3fc4029
[gcc.git] /
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86 #include "rs6000-internal.h"
87
88 /* This file should be included last. */
89 #include "target-def.h"
90
91 #ifndef TARGET_NO_PROTOTYPE
92 #define TARGET_NO_PROTOTYPE 0
93 #endif
94
95 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
96 systems will also set long double to be IEEE 128-bit. AIX and Darwin
97 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
98 those systems will not pick up this default. This needs to be after all
99 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
100 properly defined. */
101 #ifndef TARGET_IEEEQUAD_DEFAULT
102 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
103 #define TARGET_IEEEQUAD_DEFAULT 1
104 #else
105 #define TARGET_IEEEQUAD_DEFAULT 0
106 #endif
107 #endif
108
109 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
110
111 /* Support targetm.vectorize.builtin_mask_for_load. */
112 static GTY(()) tree altivec_builtin_mask_for_load;
113
114 /* Set to nonzero once AIX common-mode calls have been defined. */
115 static GTY(()) int common_mode_defined;
116
117 #ifdef USING_ELFOS_H
118 /* Counter for labels which are to be placed in .fixup. */
119 int fixuplabelno = 0;
120 #endif
121
122 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
123 int dot_symbols;
124
125 /* Specify the machine mode that pointers have. After generation of rtl, the
126 compiler makes no further distinction between pointers and any other objects
127 of this machine mode. */
128 scalar_int_mode rs6000_pmode;
129
130 #if TARGET_ELF
131 /* Note whether IEEE 128-bit floating point was passed or returned, either as
132 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
133 floating point. We changed the default C++ mangling for these types and we
134 may want to generate a weak alias of the old mangling (U10__float128) to the
135 new mangling (u9__ieee128). */
136 static bool rs6000_passes_ieee128;
137 #endif
138
139 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
140 name used in current releases (i.e. u9__ieee128). */
141 static bool ieee128_mangling_gcc_8_1;
142
143 /* Width in bits of a pointer. */
144 unsigned rs6000_pointer_size;
145
146 #ifdef HAVE_AS_GNU_ATTRIBUTE
147 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
148 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
149 # endif
150 /* Flag whether floating point values have been passed/returned.
151 Note that this doesn't say whether fprs are used, since the
152 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
153 should be set for soft-float values passed in gprs and ieee128
154 values passed in vsx registers. */
155 static bool rs6000_passes_float;
156 static bool rs6000_passes_long_double;
157 /* Flag whether vector values have been passed/returned. */
158 static bool rs6000_passes_vector;
159 /* Flag whether small (<= 8 byte) structures have been returned. */
160 static bool rs6000_returns_struct;
161 #endif
162
163 /* Value is TRUE if register/mode pair is acceptable. */
164 static bool rs6000_hard_regno_mode_ok_p
165 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
166
167 /* Maximum number of registers needed for a given register class and mode. */
168 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
169
170 /* How many registers are needed for a given register and mode. */
171 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
172
173 /* Map register number to register class. */
174 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
175
176 static int dbg_cost_ctrl;
177
178 /* Built in types. */
179 tree rs6000_builtin_types[RS6000_BTI_MAX];
180 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
181
182 /* Flag to say the TOC is initialized */
183 int toc_initialized, need_toc_init;
184 char toc_label_name[10];
185
186 /* Cached value of rs6000_variable_issue. This is cached in
187 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
188 static short cached_can_issue_more;
189
190 static GTY(()) section *read_only_data_section;
191 static GTY(()) section *private_data_section;
192 static GTY(()) section *tls_data_section;
193 static GTY(()) section *tls_private_data_section;
194 static GTY(()) section *read_only_private_data_section;
195 static GTY(()) section *sdata2_section;
196
197 extern GTY(()) section *toc_section;
198 section *toc_section = 0;
199
200 struct builtin_description
201 {
202 const HOST_WIDE_INT mask;
203 const enum insn_code icode;
204 const char *const name;
205 const enum rs6000_builtins code;
206 };
207
208 /* Describe the vector unit used for modes. */
209 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
210 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
211
212 /* Register classes for various constraints that are based on the target
213 switches. */
214 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
215
216 /* Describe the alignment of a vector. */
217 int rs6000_vector_align[NUM_MACHINE_MODES];
218
219 /* Map selected modes to types for builtins. */
220 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
221
222 /* What modes to automatically generate reciprocal divide estimate (fre) and
223 reciprocal sqrt (frsqrte) for. */
224 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
225
226 /* Masks to determine which reciprocal esitmate instructions to generate
227 automatically. */
228 enum rs6000_recip_mask {
229 RECIP_SF_DIV = 0x001, /* Use divide estimate */
230 RECIP_DF_DIV = 0x002,
231 RECIP_V4SF_DIV = 0x004,
232 RECIP_V2DF_DIV = 0x008,
233
234 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
235 RECIP_DF_RSQRT = 0x020,
236 RECIP_V4SF_RSQRT = 0x040,
237 RECIP_V2DF_RSQRT = 0x080,
238
239 /* Various combination of flags for -mrecip=xxx. */
240 RECIP_NONE = 0,
241 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
242 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
243 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
244
245 RECIP_HIGH_PRECISION = RECIP_ALL,
246
247 /* On low precision machines like the power5, don't enable double precision
248 reciprocal square root estimate, since it isn't accurate enough. */
249 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
250 };
251
252 /* -mrecip options. */
253 static struct
254 {
255 const char *string; /* option name */
256 unsigned int mask; /* mask bits to set */
257 } recip_options[] = {
258 { "all", RECIP_ALL },
259 { "none", RECIP_NONE },
260 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
261 | RECIP_V2DF_DIV) },
262 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
263 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
264 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
265 | RECIP_V2DF_RSQRT) },
266 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
267 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
268 };
269
270 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
271 static const struct
272 {
273 const char *cpu;
274 unsigned int cpuid;
275 } cpu_is_info[] = {
276 { "power9", PPC_PLATFORM_POWER9 },
277 { "power8", PPC_PLATFORM_POWER8 },
278 { "power7", PPC_PLATFORM_POWER7 },
279 { "power6x", PPC_PLATFORM_POWER6X },
280 { "power6", PPC_PLATFORM_POWER6 },
281 { "power5+", PPC_PLATFORM_POWER5_PLUS },
282 { "power5", PPC_PLATFORM_POWER5 },
283 { "ppc970", PPC_PLATFORM_PPC970 },
284 { "power4", PPC_PLATFORM_POWER4 },
285 { "ppca2", PPC_PLATFORM_PPCA2 },
286 { "ppc476", PPC_PLATFORM_PPC476 },
287 { "ppc464", PPC_PLATFORM_PPC464 },
288 { "ppc440", PPC_PLATFORM_PPC440 },
289 { "ppc405", PPC_PLATFORM_PPC405 },
290 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
291 };
292
293 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
294 static const struct
295 {
296 const char *hwcap;
297 int mask;
298 unsigned int id;
299 } cpu_supports_info[] = {
300 /* AT_HWCAP masks. */
301 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
302 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
303 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
304 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
305 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
306 { "booke", PPC_FEATURE_BOOKE, 0 },
307 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
308 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
309 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
310 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
311 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
312 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
313 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
314 { "notb", PPC_FEATURE_NO_TB, 0 },
315 { "pa6t", PPC_FEATURE_PA6T, 0 },
316 { "power4", PPC_FEATURE_POWER4, 0 },
317 { "power5", PPC_FEATURE_POWER5, 0 },
318 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
319 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
320 { "ppc32", PPC_FEATURE_32, 0 },
321 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
322 { "ppc64", PPC_FEATURE_64, 0 },
323 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
324 { "smt", PPC_FEATURE_SMT, 0 },
325 { "spe", PPC_FEATURE_HAS_SPE, 0 },
326 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
327 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
328 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
329
330 /* AT_HWCAP2 masks. */
331 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
332 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
333 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
334 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
335 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
336 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
337 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
338 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
339 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
340 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
341 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
342 { "darn", PPC_FEATURE2_DARN, 1 },
343 { "scv", PPC_FEATURE2_SCV, 1 }
344 };
345
346 /* On PowerPC, we have a limited number of target clones that we care about
347 which means we can use an array to hold the options, rather than having more
348 elaborate data structures to identify each possible variation. Order the
349 clones from the default to the highest ISA. */
350 enum {
351 CLONE_DEFAULT = 0, /* default clone. */
352 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
353 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
354 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
355 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
356 CLONE_MAX
357 };
358
359 /* Map compiler ISA bits into HWCAP names. */
360 struct clone_map {
361 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
362 const char *name; /* name to use in __builtin_cpu_supports. */
363 };
364
365 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
366 { 0, "" }, /* Default options. */
367 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
368 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
369 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
370 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
371 };
372
373
374 /* Newer LIBCs explicitly export this symbol to declare that they provide
375 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
376 reference to this symbol whenever we expand a CPU builtin, so that
377 we never link against an old LIBC. */
378 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
379
380 /* True if we have expanded a CPU builtin. */
381 bool cpu_builtin_p;
382
383 /* Pointer to function (in rs6000-c.c) that can define or undefine target
384 macros that have changed. Languages that don't support the preprocessor
385 don't link in rs6000-c.c, so we can't call it directly. */
386 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
387
388 /* Simplfy register classes into simpler classifications. We assume
389 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
390 check for standard register classes (gpr/floating/altivec/vsx) and
391 floating/vector classes (float/altivec/vsx). */
392
393 enum rs6000_reg_type {
394 NO_REG_TYPE,
395 PSEUDO_REG_TYPE,
396 GPR_REG_TYPE,
397 VSX_REG_TYPE,
398 ALTIVEC_REG_TYPE,
399 FPR_REG_TYPE,
400 SPR_REG_TYPE,
401 CR_REG_TYPE
402 };
403
404 /* Map register class to register type. */
405 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
406
407 /* First/last register type for the 'normal' register types (i.e. general
408 purpose, floating point, altivec, and VSX registers). */
409 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
410
411 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
412
413
414 /* Register classes we care about in secondary reload or go if legitimate
415 address. We only need to worry about GPR, FPR, and Altivec registers here,
416 along an ANY field that is the OR of the 3 register classes. */
417
418 enum rs6000_reload_reg_type {
419 RELOAD_REG_GPR, /* General purpose registers. */
420 RELOAD_REG_FPR, /* Traditional floating point regs. */
421 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
422 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
423 N_RELOAD_REG
424 };
425
426 /* For setting up register classes, loop through the 3 register classes mapping
427 into real registers, and skip the ANY class, which is just an OR of the
428 bits. */
429 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
430 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
431
432 /* Map reload register type to a register in the register class. */
433 struct reload_reg_map_type {
434 const char *name; /* Register class name. */
435 int reg; /* Register in the register class. */
436 };
437
438 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
439 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
440 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
441 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
442 { "Any", -1 }, /* RELOAD_REG_ANY. */
443 };
444
445 /* Mask bits for each register class, indexed per mode. Historically the
446 compiler has been more restrictive which types can do PRE_MODIFY instead of
447 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
448 typedef unsigned char addr_mask_type;
449
450 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
451 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
452 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
453 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
454 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
455 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
456 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
457 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
458
459 /* Register type masks based on the type, of valid addressing modes. */
460 struct rs6000_reg_addr {
461 enum insn_code reload_load; /* INSN to reload for loading. */
462 enum insn_code reload_store; /* INSN to reload for storing. */
463 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
464 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
465 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
466 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
467 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
468 };
469
470 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
471
472 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
473 static inline bool
474 mode_supports_pre_incdec_p (machine_mode mode)
475 {
476 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
477 != 0);
478 }
479
480 /* Helper function to say whether a mode supports PRE_MODIFY. */
481 static inline bool
482 mode_supports_pre_modify_p (machine_mode mode)
483 {
484 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
485 != 0);
486 }
487
488 /* Return true if we have D-form addressing in altivec registers. */
489 static inline bool
490 mode_supports_vmx_dform (machine_mode mode)
491 {
492 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
493 }
494
495 /* Return true if we have D-form addressing in VSX registers. This addressing
496 is more limited than normal d-form addressing in that the offset must be
497 aligned on a 16-byte boundary. */
498 static inline bool
499 mode_supports_dq_form (machine_mode mode)
500 {
501 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
502 != 0);
503 }
504
505 /* Given that there exists at least one variable that is set (produced)
506 by OUT_INSN and read (consumed) by IN_INSN, return true iff
507 IN_INSN represents one or more memory store operations and none of
508 the variables set by OUT_INSN is used by IN_INSN as the address of a
509 store operation. If either IN_INSN or OUT_INSN does not represent
510 a "single" RTL SET expression (as loosely defined by the
511 implementation of the single_set function) or a PARALLEL with only
512 SETs, CLOBBERs, and USEs inside, this function returns false.
513
514 This rs6000-specific version of store_data_bypass_p checks for
515 certain conditions that result in assertion failures (and internal
516 compiler errors) in the generic store_data_bypass_p function and
517 returns false rather than calling store_data_bypass_p if one of the
518 problematic conditions is detected. */
519
520 int
521 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
522 {
523 rtx out_set, in_set;
524 rtx out_pat, in_pat;
525 rtx out_exp, in_exp;
526 int i, j;
527
528 in_set = single_set (in_insn);
529 if (in_set)
530 {
531 if (MEM_P (SET_DEST (in_set)))
532 {
533 out_set = single_set (out_insn);
534 if (!out_set)
535 {
536 out_pat = PATTERN (out_insn);
537 if (GET_CODE (out_pat) == PARALLEL)
538 {
539 for (i = 0; i < XVECLEN (out_pat, 0); i++)
540 {
541 out_exp = XVECEXP (out_pat, 0, i);
542 if ((GET_CODE (out_exp) == CLOBBER)
543 || (GET_CODE (out_exp) == USE))
544 continue;
545 else if (GET_CODE (out_exp) != SET)
546 return false;
547 }
548 }
549 }
550 }
551 }
552 else
553 {
554 in_pat = PATTERN (in_insn);
555 if (GET_CODE (in_pat) != PARALLEL)
556 return false;
557
558 for (i = 0; i < XVECLEN (in_pat, 0); i++)
559 {
560 in_exp = XVECEXP (in_pat, 0, i);
561 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
562 continue;
563 else if (GET_CODE (in_exp) != SET)
564 return false;
565
566 if (MEM_P (SET_DEST (in_exp)))
567 {
568 out_set = single_set (out_insn);
569 if (!out_set)
570 {
571 out_pat = PATTERN (out_insn);
572 if (GET_CODE (out_pat) != PARALLEL)
573 return false;
574 for (j = 0; j < XVECLEN (out_pat, 0); j++)
575 {
576 out_exp = XVECEXP (out_pat, 0, j);
577 if ((GET_CODE (out_exp) == CLOBBER)
578 || (GET_CODE (out_exp) == USE))
579 continue;
580 else if (GET_CODE (out_exp) != SET)
581 return false;
582 }
583 }
584 }
585 }
586 }
587 return store_data_bypass_p (out_insn, in_insn);
588 }
589
590 \f
591 /* Processor costs (relative to an add) */
592
593 const struct processor_costs *rs6000_cost;
594
595 /* Instruction size costs on 32bit processors. */
596 static const
597 struct processor_costs size32_cost = {
598 COSTS_N_INSNS (1), /* mulsi */
599 COSTS_N_INSNS (1), /* mulsi_const */
600 COSTS_N_INSNS (1), /* mulsi_const9 */
601 COSTS_N_INSNS (1), /* muldi */
602 COSTS_N_INSNS (1), /* divsi */
603 COSTS_N_INSNS (1), /* divdi */
604 COSTS_N_INSNS (1), /* fp */
605 COSTS_N_INSNS (1), /* dmul */
606 COSTS_N_INSNS (1), /* sdiv */
607 COSTS_N_INSNS (1), /* ddiv */
608 32, /* cache line size */
609 0, /* l1 cache */
610 0, /* l2 cache */
611 0, /* streams */
612 0, /* SF->DF convert */
613 };
614
615 /* Instruction size costs on 64bit processors. */
616 static const
617 struct processor_costs size64_cost = {
618 COSTS_N_INSNS (1), /* mulsi */
619 COSTS_N_INSNS (1), /* mulsi_const */
620 COSTS_N_INSNS (1), /* mulsi_const9 */
621 COSTS_N_INSNS (1), /* muldi */
622 COSTS_N_INSNS (1), /* divsi */
623 COSTS_N_INSNS (1), /* divdi */
624 COSTS_N_INSNS (1), /* fp */
625 COSTS_N_INSNS (1), /* dmul */
626 COSTS_N_INSNS (1), /* sdiv */
627 COSTS_N_INSNS (1), /* ddiv */
628 128, /* cache line size */
629 0, /* l1 cache */
630 0, /* l2 cache */
631 0, /* streams */
632 0, /* SF->DF convert */
633 };
634
635 /* Instruction costs on RS64A processors. */
636 static const
637 struct processor_costs rs64a_cost = {
638 COSTS_N_INSNS (20), /* mulsi */
639 COSTS_N_INSNS (12), /* mulsi_const */
640 COSTS_N_INSNS (8), /* mulsi_const9 */
641 COSTS_N_INSNS (34), /* muldi */
642 COSTS_N_INSNS (65), /* divsi */
643 COSTS_N_INSNS (67), /* divdi */
644 COSTS_N_INSNS (4), /* fp */
645 COSTS_N_INSNS (4), /* dmul */
646 COSTS_N_INSNS (31), /* sdiv */
647 COSTS_N_INSNS (31), /* ddiv */
648 128, /* cache line size */
649 128, /* l1 cache */
650 2048, /* l2 cache */
651 1, /* streams */
652 0, /* SF->DF convert */
653 };
654
655 /* Instruction costs on MPCCORE processors. */
656 static const
657 struct processor_costs mpccore_cost = {
658 COSTS_N_INSNS (2), /* mulsi */
659 COSTS_N_INSNS (2), /* mulsi_const */
660 COSTS_N_INSNS (2), /* mulsi_const9 */
661 COSTS_N_INSNS (2), /* muldi */
662 COSTS_N_INSNS (6), /* divsi */
663 COSTS_N_INSNS (6), /* divdi */
664 COSTS_N_INSNS (4), /* fp */
665 COSTS_N_INSNS (5), /* dmul */
666 COSTS_N_INSNS (10), /* sdiv */
667 COSTS_N_INSNS (17), /* ddiv */
668 32, /* cache line size */
669 4, /* l1 cache */
670 16, /* l2 cache */
671 1, /* streams */
672 0, /* SF->DF convert */
673 };
674
675 /* Instruction costs on PPC403 processors. */
676 static const
677 struct processor_costs ppc403_cost = {
678 COSTS_N_INSNS (4), /* mulsi */
679 COSTS_N_INSNS (4), /* mulsi_const */
680 COSTS_N_INSNS (4), /* mulsi_const9 */
681 COSTS_N_INSNS (4), /* muldi */
682 COSTS_N_INSNS (33), /* divsi */
683 COSTS_N_INSNS (33), /* divdi */
684 COSTS_N_INSNS (11), /* fp */
685 COSTS_N_INSNS (11), /* dmul */
686 COSTS_N_INSNS (11), /* sdiv */
687 COSTS_N_INSNS (11), /* ddiv */
688 32, /* cache line size */
689 4, /* l1 cache */
690 16, /* l2 cache */
691 1, /* streams */
692 0, /* SF->DF convert */
693 };
694
695 /* Instruction costs on PPC405 processors. */
696 static const
697 struct processor_costs ppc405_cost = {
698 COSTS_N_INSNS (5), /* mulsi */
699 COSTS_N_INSNS (4), /* mulsi_const */
700 COSTS_N_INSNS (3), /* mulsi_const9 */
701 COSTS_N_INSNS (5), /* muldi */
702 COSTS_N_INSNS (35), /* divsi */
703 COSTS_N_INSNS (35), /* divdi */
704 COSTS_N_INSNS (11), /* fp */
705 COSTS_N_INSNS (11), /* dmul */
706 COSTS_N_INSNS (11), /* sdiv */
707 COSTS_N_INSNS (11), /* ddiv */
708 32, /* cache line size */
709 16, /* l1 cache */
710 128, /* l2 cache */
711 1, /* streams */
712 0, /* SF->DF convert */
713 };
714
715 /* Instruction costs on PPC440 processors. */
716 static const
717 struct processor_costs ppc440_cost = {
718 COSTS_N_INSNS (3), /* mulsi */
719 COSTS_N_INSNS (2), /* mulsi_const */
720 COSTS_N_INSNS (2), /* mulsi_const9 */
721 COSTS_N_INSNS (3), /* muldi */
722 COSTS_N_INSNS (34), /* divsi */
723 COSTS_N_INSNS (34), /* divdi */
724 COSTS_N_INSNS (5), /* fp */
725 COSTS_N_INSNS (5), /* dmul */
726 COSTS_N_INSNS (19), /* sdiv */
727 COSTS_N_INSNS (33), /* ddiv */
728 32, /* cache line size */
729 32, /* l1 cache */
730 256, /* l2 cache */
731 1, /* streams */
732 0, /* SF->DF convert */
733 };
734
735 /* Instruction costs on PPC476 processors. */
736 static const
737 struct processor_costs ppc476_cost = {
738 COSTS_N_INSNS (4), /* mulsi */
739 COSTS_N_INSNS (4), /* mulsi_const */
740 COSTS_N_INSNS (4), /* mulsi_const9 */
741 COSTS_N_INSNS (4), /* muldi */
742 COSTS_N_INSNS (11), /* divsi */
743 COSTS_N_INSNS (11), /* divdi */
744 COSTS_N_INSNS (6), /* fp */
745 COSTS_N_INSNS (6), /* dmul */
746 COSTS_N_INSNS (19), /* sdiv */
747 COSTS_N_INSNS (33), /* ddiv */
748 32, /* l1 cache line size */
749 32, /* l1 cache */
750 512, /* l2 cache */
751 1, /* streams */
752 0, /* SF->DF convert */
753 };
754
755 /* Instruction costs on PPC601 processors. */
756 static const
757 struct processor_costs ppc601_cost = {
758 COSTS_N_INSNS (5), /* mulsi */
759 COSTS_N_INSNS (5), /* mulsi_const */
760 COSTS_N_INSNS (5), /* mulsi_const9 */
761 COSTS_N_INSNS (5), /* muldi */
762 COSTS_N_INSNS (36), /* divsi */
763 COSTS_N_INSNS (36), /* divdi */
764 COSTS_N_INSNS (4), /* fp */
765 COSTS_N_INSNS (5), /* dmul */
766 COSTS_N_INSNS (17), /* sdiv */
767 COSTS_N_INSNS (31), /* ddiv */
768 32, /* cache line size */
769 32, /* l1 cache */
770 256, /* l2 cache */
771 1, /* streams */
772 0, /* SF->DF convert */
773 };
774
775 /* Instruction costs on PPC603 processors. */
776 static const
777 struct processor_costs ppc603_cost = {
778 COSTS_N_INSNS (5), /* mulsi */
779 COSTS_N_INSNS (3), /* mulsi_const */
780 COSTS_N_INSNS (2), /* mulsi_const9 */
781 COSTS_N_INSNS (5), /* muldi */
782 COSTS_N_INSNS (37), /* divsi */
783 COSTS_N_INSNS (37), /* divdi */
784 COSTS_N_INSNS (3), /* fp */
785 COSTS_N_INSNS (4), /* dmul */
786 COSTS_N_INSNS (18), /* sdiv */
787 COSTS_N_INSNS (33), /* ddiv */
788 32, /* cache line size */
789 8, /* l1 cache */
790 64, /* l2 cache */
791 1, /* streams */
792 0, /* SF->DF convert */
793 };
794
795 /* Instruction costs on PPC604 processors. */
796 static const
797 struct processor_costs ppc604_cost = {
798 COSTS_N_INSNS (4), /* mulsi */
799 COSTS_N_INSNS (4), /* mulsi_const */
800 COSTS_N_INSNS (4), /* mulsi_const9 */
801 COSTS_N_INSNS (4), /* muldi */
802 COSTS_N_INSNS (20), /* divsi */
803 COSTS_N_INSNS (20), /* divdi */
804 COSTS_N_INSNS (3), /* fp */
805 COSTS_N_INSNS (3), /* dmul */
806 COSTS_N_INSNS (18), /* sdiv */
807 COSTS_N_INSNS (32), /* ddiv */
808 32, /* cache line size */
809 16, /* l1 cache */
810 512, /* l2 cache */
811 1, /* streams */
812 0, /* SF->DF convert */
813 };
814
815 /* Instruction costs on PPC604e processors. */
816 static const
817 struct processor_costs ppc604e_cost = {
818 COSTS_N_INSNS (2), /* mulsi */
819 COSTS_N_INSNS (2), /* mulsi_const */
820 COSTS_N_INSNS (2), /* mulsi_const9 */
821 COSTS_N_INSNS (2), /* muldi */
822 COSTS_N_INSNS (20), /* divsi */
823 COSTS_N_INSNS (20), /* divdi */
824 COSTS_N_INSNS (3), /* fp */
825 COSTS_N_INSNS (3), /* dmul */
826 COSTS_N_INSNS (18), /* sdiv */
827 COSTS_N_INSNS (32), /* ddiv */
828 32, /* cache line size */
829 32, /* l1 cache */
830 1024, /* l2 cache */
831 1, /* streams */
832 0, /* SF->DF convert */
833 };
834
835 /* Instruction costs on PPC620 processors. */
836 static const
837 struct processor_costs ppc620_cost = {
838 COSTS_N_INSNS (5), /* mulsi */
839 COSTS_N_INSNS (4), /* mulsi_const */
840 COSTS_N_INSNS (3), /* mulsi_const9 */
841 COSTS_N_INSNS (7), /* muldi */
842 COSTS_N_INSNS (21), /* divsi */
843 COSTS_N_INSNS (37), /* divdi */
844 COSTS_N_INSNS (3), /* fp */
845 COSTS_N_INSNS (3), /* dmul */
846 COSTS_N_INSNS (18), /* sdiv */
847 COSTS_N_INSNS (32), /* ddiv */
848 128, /* cache line size */
849 32, /* l1 cache */
850 1024, /* l2 cache */
851 1, /* streams */
852 0, /* SF->DF convert */
853 };
854
855 /* Instruction costs on PPC630 processors. */
856 static const
857 struct processor_costs ppc630_cost = {
858 COSTS_N_INSNS (5), /* mulsi */
859 COSTS_N_INSNS (4), /* mulsi_const */
860 COSTS_N_INSNS (3), /* mulsi_const9 */
861 COSTS_N_INSNS (7), /* muldi */
862 COSTS_N_INSNS (21), /* divsi */
863 COSTS_N_INSNS (37), /* divdi */
864 COSTS_N_INSNS (3), /* fp */
865 COSTS_N_INSNS (3), /* dmul */
866 COSTS_N_INSNS (17), /* sdiv */
867 COSTS_N_INSNS (21), /* ddiv */
868 128, /* cache line size */
869 64, /* l1 cache */
870 1024, /* l2 cache */
871 1, /* streams */
872 0, /* SF->DF convert */
873 };
874
875 /* Instruction costs on Cell processor. */
876 /* COSTS_N_INSNS (1) ~ one add. */
877 static const
878 struct processor_costs ppccell_cost = {
879 COSTS_N_INSNS (9/2)+2, /* mulsi */
880 COSTS_N_INSNS (6/2), /* mulsi_const */
881 COSTS_N_INSNS (6/2), /* mulsi_const9 */
882 COSTS_N_INSNS (15/2)+2, /* muldi */
883 COSTS_N_INSNS (38/2), /* divsi */
884 COSTS_N_INSNS (70/2), /* divdi */
885 COSTS_N_INSNS (10/2), /* fp */
886 COSTS_N_INSNS (10/2), /* dmul */
887 COSTS_N_INSNS (74/2), /* sdiv */
888 COSTS_N_INSNS (74/2), /* ddiv */
889 128, /* cache line size */
890 32, /* l1 cache */
891 512, /* l2 cache */
892 6, /* streams */
893 0, /* SF->DF convert */
894 };
895
896 /* Instruction costs on PPC750 and PPC7400 processors. */
897 static const
898 struct processor_costs ppc750_cost = {
899 COSTS_N_INSNS (5), /* mulsi */
900 COSTS_N_INSNS (3), /* mulsi_const */
901 COSTS_N_INSNS (2), /* mulsi_const9 */
902 COSTS_N_INSNS (5), /* muldi */
903 COSTS_N_INSNS (17), /* divsi */
904 COSTS_N_INSNS (17), /* divdi */
905 COSTS_N_INSNS (3), /* fp */
906 COSTS_N_INSNS (3), /* dmul */
907 COSTS_N_INSNS (17), /* sdiv */
908 COSTS_N_INSNS (31), /* ddiv */
909 32, /* cache line size */
910 32, /* l1 cache */
911 512, /* l2 cache */
912 1, /* streams */
913 0, /* SF->DF convert */
914 };
915
916 /* Instruction costs on PPC7450 processors. */
917 static const
918 struct processor_costs ppc7450_cost = {
919 COSTS_N_INSNS (4), /* mulsi */
920 COSTS_N_INSNS (3), /* mulsi_const */
921 COSTS_N_INSNS (3), /* mulsi_const9 */
922 COSTS_N_INSNS (4), /* muldi */
923 COSTS_N_INSNS (23), /* divsi */
924 COSTS_N_INSNS (23), /* divdi */
925 COSTS_N_INSNS (5), /* fp */
926 COSTS_N_INSNS (5), /* dmul */
927 COSTS_N_INSNS (21), /* sdiv */
928 COSTS_N_INSNS (35), /* ddiv */
929 32, /* cache line size */
930 32, /* l1 cache */
931 1024, /* l2 cache */
932 1, /* streams */
933 0, /* SF->DF convert */
934 };
935
936 /* Instruction costs on PPC8540 processors. */
937 static const
938 struct processor_costs ppc8540_cost = {
939 COSTS_N_INSNS (4), /* mulsi */
940 COSTS_N_INSNS (4), /* mulsi_const */
941 COSTS_N_INSNS (4), /* mulsi_const9 */
942 COSTS_N_INSNS (4), /* muldi */
943 COSTS_N_INSNS (19), /* divsi */
944 COSTS_N_INSNS (19), /* divdi */
945 COSTS_N_INSNS (4), /* fp */
946 COSTS_N_INSNS (4), /* dmul */
947 COSTS_N_INSNS (29), /* sdiv */
948 COSTS_N_INSNS (29), /* ddiv */
949 32, /* cache line size */
950 32, /* l1 cache */
951 256, /* l2 cache */
952 1, /* prefetch streams /*/
953 0, /* SF->DF convert */
954 };
955
956 /* Instruction costs on E300C2 and E300C3 cores. */
957 static const
958 struct processor_costs ppce300c2c3_cost = {
959 COSTS_N_INSNS (4), /* mulsi */
960 COSTS_N_INSNS (4), /* mulsi_const */
961 COSTS_N_INSNS (4), /* mulsi_const9 */
962 COSTS_N_INSNS (4), /* muldi */
963 COSTS_N_INSNS (19), /* divsi */
964 COSTS_N_INSNS (19), /* divdi */
965 COSTS_N_INSNS (3), /* fp */
966 COSTS_N_INSNS (4), /* dmul */
967 COSTS_N_INSNS (18), /* sdiv */
968 COSTS_N_INSNS (33), /* ddiv */
969 32,
970 16, /* l1 cache */
971 16, /* l2 cache */
972 1, /* prefetch streams /*/
973 0, /* SF->DF convert */
974 };
975
976 /* Instruction costs on PPCE500MC processors. */
977 static const
978 struct processor_costs ppce500mc_cost = {
979 COSTS_N_INSNS (4), /* mulsi */
980 COSTS_N_INSNS (4), /* mulsi_const */
981 COSTS_N_INSNS (4), /* mulsi_const9 */
982 COSTS_N_INSNS (4), /* muldi */
983 COSTS_N_INSNS (14), /* divsi */
984 COSTS_N_INSNS (14), /* divdi */
985 COSTS_N_INSNS (8), /* fp */
986 COSTS_N_INSNS (10), /* dmul */
987 COSTS_N_INSNS (36), /* sdiv */
988 COSTS_N_INSNS (66), /* ddiv */
989 64, /* cache line size */
990 32, /* l1 cache */
991 128, /* l2 cache */
992 1, /* prefetch streams /*/
993 0, /* SF->DF convert */
994 };
995
996 /* Instruction costs on PPCE500MC64 processors. */
997 static const
998 struct processor_costs ppce500mc64_cost = {
999 COSTS_N_INSNS (4), /* mulsi */
1000 COSTS_N_INSNS (4), /* mulsi_const */
1001 COSTS_N_INSNS (4), /* mulsi_const9 */
1002 COSTS_N_INSNS (4), /* muldi */
1003 COSTS_N_INSNS (14), /* divsi */
1004 COSTS_N_INSNS (14), /* divdi */
1005 COSTS_N_INSNS (4), /* fp */
1006 COSTS_N_INSNS (10), /* dmul */
1007 COSTS_N_INSNS (36), /* sdiv */
1008 COSTS_N_INSNS (66), /* ddiv */
1009 64, /* cache line size */
1010 32, /* l1 cache */
1011 128, /* l2 cache */
1012 1, /* prefetch streams /*/
1013 0, /* SF->DF convert */
1014 };
1015
1016 /* Instruction costs on PPCE5500 processors. */
1017 static const
1018 struct processor_costs ppce5500_cost = {
1019 COSTS_N_INSNS (5), /* mulsi */
1020 COSTS_N_INSNS (5), /* mulsi_const */
1021 COSTS_N_INSNS (4), /* mulsi_const9 */
1022 COSTS_N_INSNS (5), /* muldi */
1023 COSTS_N_INSNS (14), /* divsi */
1024 COSTS_N_INSNS (14), /* divdi */
1025 COSTS_N_INSNS (7), /* fp */
1026 COSTS_N_INSNS (10), /* dmul */
1027 COSTS_N_INSNS (36), /* sdiv */
1028 COSTS_N_INSNS (66), /* ddiv */
1029 64, /* cache line size */
1030 32, /* l1 cache */
1031 128, /* l2 cache */
1032 1, /* prefetch streams /*/
1033 0, /* SF->DF convert */
1034 };
1035
1036 /* Instruction costs on PPCE6500 processors. */
1037 static const
1038 struct processor_costs ppce6500_cost = {
1039 COSTS_N_INSNS (5), /* mulsi */
1040 COSTS_N_INSNS (5), /* mulsi_const */
1041 COSTS_N_INSNS (4), /* mulsi_const9 */
1042 COSTS_N_INSNS (5), /* muldi */
1043 COSTS_N_INSNS (14), /* divsi */
1044 COSTS_N_INSNS (14), /* divdi */
1045 COSTS_N_INSNS (7), /* fp */
1046 COSTS_N_INSNS (10), /* dmul */
1047 COSTS_N_INSNS (36), /* sdiv */
1048 COSTS_N_INSNS (66), /* ddiv */
1049 64, /* cache line size */
1050 32, /* l1 cache */
1051 128, /* l2 cache */
1052 1, /* prefetch streams /*/
1053 0, /* SF->DF convert */
1054 };
1055
1056 /* Instruction costs on AppliedMicro Titan processors. */
1057 static const
1058 struct processor_costs titan_cost = {
1059 COSTS_N_INSNS (5), /* mulsi */
1060 COSTS_N_INSNS (5), /* mulsi_const */
1061 COSTS_N_INSNS (5), /* mulsi_const9 */
1062 COSTS_N_INSNS (5), /* muldi */
1063 COSTS_N_INSNS (18), /* divsi */
1064 COSTS_N_INSNS (18), /* divdi */
1065 COSTS_N_INSNS (10), /* fp */
1066 COSTS_N_INSNS (10), /* dmul */
1067 COSTS_N_INSNS (46), /* sdiv */
1068 COSTS_N_INSNS (72), /* ddiv */
1069 32, /* cache line size */
1070 32, /* l1 cache */
1071 512, /* l2 cache */
1072 1, /* prefetch streams /*/
1073 0, /* SF->DF convert */
1074 };
1075
1076 /* Instruction costs on POWER4 and POWER5 processors. */
1077 static const
1078 struct processor_costs power4_cost = {
1079 COSTS_N_INSNS (3), /* mulsi */
1080 COSTS_N_INSNS (2), /* mulsi_const */
1081 COSTS_N_INSNS (2), /* mulsi_const9 */
1082 COSTS_N_INSNS (4), /* muldi */
1083 COSTS_N_INSNS (18), /* divsi */
1084 COSTS_N_INSNS (34), /* divdi */
1085 COSTS_N_INSNS (3), /* fp */
1086 COSTS_N_INSNS (3), /* dmul */
1087 COSTS_N_INSNS (17), /* sdiv */
1088 COSTS_N_INSNS (17), /* ddiv */
1089 128, /* cache line size */
1090 32, /* l1 cache */
1091 1024, /* l2 cache */
1092 8, /* prefetch streams /*/
1093 0, /* SF->DF convert */
1094 };
1095
1096 /* Instruction costs on POWER6 processors. */
1097 static const
1098 struct processor_costs power6_cost = {
1099 COSTS_N_INSNS (8), /* mulsi */
1100 COSTS_N_INSNS (8), /* mulsi_const */
1101 COSTS_N_INSNS (8), /* mulsi_const9 */
1102 COSTS_N_INSNS (8), /* muldi */
1103 COSTS_N_INSNS (22), /* divsi */
1104 COSTS_N_INSNS (28), /* divdi */
1105 COSTS_N_INSNS (3), /* fp */
1106 COSTS_N_INSNS (3), /* dmul */
1107 COSTS_N_INSNS (13), /* sdiv */
1108 COSTS_N_INSNS (16), /* ddiv */
1109 128, /* cache line size */
1110 64, /* l1 cache */
1111 2048, /* l2 cache */
1112 16, /* prefetch streams */
1113 0, /* SF->DF convert */
1114 };
1115
1116 /* Instruction costs on POWER7 processors. */
1117 static const
1118 struct processor_costs power7_cost = {
1119 COSTS_N_INSNS (2), /* mulsi */
1120 COSTS_N_INSNS (2), /* mulsi_const */
1121 COSTS_N_INSNS (2), /* mulsi_const9 */
1122 COSTS_N_INSNS (2), /* muldi */
1123 COSTS_N_INSNS (18), /* divsi */
1124 COSTS_N_INSNS (34), /* divdi */
1125 COSTS_N_INSNS (3), /* fp */
1126 COSTS_N_INSNS (3), /* dmul */
1127 COSTS_N_INSNS (13), /* sdiv */
1128 COSTS_N_INSNS (16), /* ddiv */
1129 128, /* cache line size */
1130 32, /* l1 cache */
1131 256, /* l2 cache */
1132 12, /* prefetch streams */
1133 COSTS_N_INSNS (3), /* SF->DF convert */
1134 };
1135
1136 /* Instruction costs on POWER8 processors. */
1137 static const
1138 struct processor_costs power8_cost = {
1139 COSTS_N_INSNS (3), /* mulsi */
1140 COSTS_N_INSNS (3), /* mulsi_const */
1141 COSTS_N_INSNS (3), /* mulsi_const9 */
1142 COSTS_N_INSNS (3), /* muldi */
1143 COSTS_N_INSNS (19), /* divsi */
1144 COSTS_N_INSNS (35), /* divdi */
1145 COSTS_N_INSNS (3), /* fp */
1146 COSTS_N_INSNS (3), /* dmul */
1147 COSTS_N_INSNS (14), /* sdiv */
1148 COSTS_N_INSNS (17), /* ddiv */
1149 128, /* cache line size */
1150 32, /* l1 cache */
1151 256, /* l2 cache */
1152 12, /* prefetch streams */
1153 COSTS_N_INSNS (3), /* SF->DF convert */
1154 };
1155
1156 /* Instruction costs on POWER9 processors. */
1157 static const
1158 struct processor_costs power9_cost = {
1159 COSTS_N_INSNS (3), /* mulsi */
1160 COSTS_N_INSNS (3), /* mulsi_const */
1161 COSTS_N_INSNS (3), /* mulsi_const9 */
1162 COSTS_N_INSNS (3), /* muldi */
1163 COSTS_N_INSNS (8), /* divsi */
1164 COSTS_N_INSNS (12), /* divdi */
1165 COSTS_N_INSNS (3), /* fp */
1166 COSTS_N_INSNS (3), /* dmul */
1167 COSTS_N_INSNS (13), /* sdiv */
1168 COSTS_N_INSNS (18), /* ddiv */
1169 128, /* cache line size */
1170 32, /* l1 cache */
1171 512, /* l2 cache */
1172 8, /* prefetch streams */
1173 COSTS_N_INSNS (3), /* SF->DF convert */
1174 };
1175
1176 /* Instruction costs on POWER A2 processors. */
1177 static const
1178 struct processor_costs ppca2_cost = {
1179 COSTS_N_INSNS (16), /* mulsi */
1180 COSTS_N_INSNS (16), /* mulsi_const */
1181 COSTS_N_INSNS (16), /* mulsi_const9 */
1182 COSTS_N_INSNS (16), /* muldi */
1183 COSTS_N_INSNS (22), /* divsi */
1184 COSTS_N_INSNS (28), /* divdi */
1185 COSTS_N_INSNS (3), /* fp */
1186 COSTS_N_INSNS (3), /* dmul */
1187 COSTS_N_INSNS (59), /* sdiv */
1188 COSTS_N_INSNS (72), /* ddiv */
1189 64,
1190 16, /* l1 cache */
1191 2048, /* l2 cache */
1192 16, /* prefetch streams */
1193 0, /* SF->DF convert */
1194 };
1195
1196 \f
1197 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1198 #undef RS6000_BUILTIN_0
1199 #undef RS6000_BUILTIN_1
1200 #undef RS6000_BUILTIN_2
1201 #undef RS6000_BUILTIN_3
1202 #undef RS6000_BUILTIN_A
1203 #undef RS6000_BUILTIN_D
1204 #undef RS6000_BUILTIN_H
1205 #undef RS6000_BUILTIN_P
1206 #undef RS6000_BUILTIN_X
1207
1208 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1209 { NAME, ICODE, MASK, ATTR },
1210
1211 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1212 { NAME, ICODE, MASK, ATTR },
1213
1214 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1215 { NAME, ICODE, MASK, ATTR },
1216
1217 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1218 { NAME, ICODE, MASK, ATTR },
1219
1220 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1221 { NAME, ICODE, MASK, ATTR },
1222
1223 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1224 { NAME, ICODE, MASK, ATTR },
1225
1226 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1227 { NAME, ICODE, MASK, ATTR },
1228
1229 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1230 { NAME, ICODE, MASK, ATTR },
1231
1232 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1233 { NAME, ICODE, MASK, ATTR },
1234
1235 struct rs6000_builtin_info_type {
1236 const char *name;
1237 const enum insn_code icode;
1238 const HOST_WIDE_INT mask;
1239 const unsigned attr;
1240 };
1241
1242 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1243 {
1244 #include "rs6000-builtin.def"
1245 };
1246
1247 #undef RS6000_BUILTIN_0
1248 #undef RS6000_BUILTIN_1
1249 #undef RS6000_BUILTIN_2
1250 #undef RS6000_BUILTIN_3
1251 #undef RS6000_BUILTIN_A
1252 #undef RS6000_BUILTIN_D
1253 #undef RS6000_BUILTIN_H
1254 #undef RS6000_BUILTIN_P
1255 #undef RS6000_BUILTIN_X
1256
1257 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1258 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1259
1260 \f
1261 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1262 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1263 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1264 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1265 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1266 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1267 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1268 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1269 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1270 bool);
1271 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1272 unsigned int);
1273 static bool is_microcoded_insn (rtx_insn *);
1274 static bool is_nonpipeline_insn (rtx_insn *);
1275 static bool is_cracked_insn (rtx_insn *);
1276 static bool is_load_insn (rtx, rtx *);
1277 static bool is_store_insn (rtx, rtx *);
1278 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1279 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1280 static bool insn_must_be_first_in_group (rtx_insn *);
1281 static bool insn_must_be_last_in_group (rtx_insn *);
1282 static void altivec_init_builtins (void);
1283 static tree builtin_function_type (machine_mode, machine_mode,
1284 machine_mode, machine_mode,
1285 enum rs6000_builtins, const char *name);
1286 static void rs6000_common_init_builtins (void);
1287 static void htm_init_builtins (void);
1288 int easy_vector_constant (rtx, machine_mode);
1289 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1290 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1291 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1292 bool, bool);
1293 #if TARGET_MACHO
1294 static void macho_branch_islands (void);
1295 static tree get_prev_label (tree);
1296 #endif
1297 static bool rs6000_mode_dependent_address (const_rtx);
1298 static bool rs6000_debug_mode_dependent_address (const_rtx);
1299 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1300 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1301 machine_mode, rtx);
1302 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1303 machine_mode,
1304 rtx);
1305 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1306 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1307 enum reg_class);
1308 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1309 reg_class_t,
1310 reg_class_t);
1311 static bool rs6000_debug_can_change_mode_class (machine_mode,
1312 machine_mode,
1313 reg_class_t);
1314 static rtx rs6000_internal_arg_pointer (void);
1315
1316 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1317 = rs6000_mode_dependent_address;
1318
1319 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1320 machine_mode, rtx)
1321 = rs6000_secondary_reload_class;
1322
1323 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1324 = rs6000_preferred_reload_class;
1325
1326 const int INSN_NOT_AVAILABLE = -1;
1327
1328 static void rs6000_print_isa_options (FILE *, int, const char *,
1329 HOST_WIDE_INT);
1330 static void rs6000_print_builtin_options (FILE *, int, const char *,
1331 HOST_WIDE_INT);
1332 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1333
1334 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1335 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1336 enum rs6000_reg_type,
1337 machine_mode,
1338 secondary_reload_info *,
1339 bool);
1340 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1341 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1342 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1343
1344 /* Hash table stuff for keeping track of TOC entries. */
1345
1346 struct GTY((for_user)) toc_hash_struct
1347 {
1348 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1349 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1350 rtx key;
1351 machine_mode key_mode;
1352 int labelno;
1353 };
1354
1355 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1356 {
1357 static hashval_t hash (toc_hash_struct *);
1358 static bool equal (toc_hash_struct *, toc_hash_struct *);
1359 };
1360
1361 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1362
1363 /* Hash table to keep track of the argument types for builtin functions. */
1364
1365 struct GTY((for_user)) builtin_hash_struct
1366 {
1367 tree type;
1368 machine_mode mode[4]; /* return value + 3 arguments. */
1369 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1370 };
1371
1372 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1373 {
1374 static hashval_t hash (builtin_hash_struct *);
1375 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1376 };
1377
1378 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1379
1380 \f
1381 /* Default register names. */
1382 char rs6000_reg_names[][8] =
1383 {
1384 /* GPRs */
1385 "0", "1", "2", "3", "4", "5", "6", "7",
1386 "8", "9", "10", "11", "12", "13", "14", "15",
1387 "16", "17", "18", "19", "20", "21", "22", "23",
1388 "24", "25", "26", "27", "28", "29", "30", "31",
1389 /* FPRs */
1390 "0", "1", "2", "3", "4", "5", "6", "7",
1391 "8", "9", "10", "11", "12", "13", "14", "15",
1392 "16", "17", "18", "19", "20", "21", "22", "23",
1393 "24", "25", "26", "27", "28", "29", "30", "31",
1394 /* VRs */
1395 "0", "1", "2", "3", "4", "5", "6", "7",
1396 "8", "9", "10", "11", "12", "13", "14", "15",
1397 "16", "17", "18", "19", "20", "21", "22", "23",
1398 "24", "25", "26", "27", "28", "29", "30", "31",
1399 /* lr ctr ca ap */
1400 "lr", "ctr", "ca", "ap",
1401 /* cr0..cr7 */
1402 "0", "1", "2", "3", "4", "5", "6", "7",
1403 /* vrsave vscr sfp */
1404 "vrsave", "vscr", "sfp",
1405 };
1406
1407 #ifdef TARGET_REGNAMES
1408 static const char alt_reg_names[][8] =
1409 {
1410 /* GPRs */
1411 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1412 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1413 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1414 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1415 /* FPRs */
1416 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1417 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1418 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1419 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1420 /* VRs */
1421 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1422 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1423 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1424 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1425 /* lr ctr ca ap */
1426 "lr", "ctr", "ca", "ap",
1427 /* cr0..cr7 */
1428 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1429 /* vrsave vscr sfp */
1430 "vrsave", "vscr", "sfp",
1431 };
1432 #endif
1433
1434 /* Table of valid machine attributes. */
1435
1436 static const struct attribute_spec rs6000_attribute_table[] =
1437 {
1438 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1439 affects_type_identity, handler, exclude } */
1440 { "altivec", 1, 1, false, true, false, false,
1441 rs6000_handle_altivec_attribute, NULL },
1442 { "longcall", 0, 0, false, true, true, false,
1443 rs6000_handle_longcall_attribute, NULL },
1444 { "shortcall", 0, 0, false, true, true, false,
1445 rs6000_handle_longcall_attribute, NULL },
1446 { "ms_struct", 0, 0, false, false, false, false,
1447 rs6000_handle_struct_attribute, NULL },
1448 { "gcc_struct", 0, 0, false, false, false, false,
1449 rs6000_handle_struct_attribute, NULL },
1450 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1451 SUBTARGET_ATTRIBUTE_TABLE,
1452 #endif
1453 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1454 };
1455 \f
1456 #ifndef TARGET_PROFILE_KERNEL
1457 #define TARGET_PROFILE_KERNEL 0
1458 #endif
1459 \f
1460 /* Initialize the GCC target structure. */
1461 #undef TARGET_ATTRIBUTE_TABLE
1462 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1463 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1464 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1465 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1466 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1467
1468 #undef TARGET_ASM_ALIGNED_DI_OP
1469 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1470
1471 /* Default unaligned ops are only provided for ELF. Find the ops needed
1472 for non-ELF systems. */
1473 #ifndef OBJECT_FORMAT_ELF
1474 #if TARGET_XCOFF
1475 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1476 64-bit targets. */
1477 #undef TARGET_ASM_UNALIGNED_HI_OP
1478 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1479 #undef TARGET_ASM_UNALIGNED_SI_OP
1480 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1481 #undef TARGET_ASM_UNALIGNED_DI_OP
1482 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1483 #else
1484 /* For Darwin. */
1485 #undef TARGET_ASM_UNALIGNED_HI_OP
1486 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1487 #undef TARGET_ASM_UNALIGNED_SI_OP
1488 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1489 #undef TARGET_ASM_UNALIGNED_DI_OP
1490 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1491 #undef TARGET_ASM_ALIGNED_DI_OP
1492 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1493 #endif
1494 #endif
1495
1496 /* This hook deals with fixups for relocatable code and DI-mode objects
1497 in 64-bit code. */
1498 #undef TARGET_ASM_INTEGER
1499 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1500
1501 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1502 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1503 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1504 #endif
1505
1506 #undef TARGET_SET_UP_BY_PROLOGUE
1507 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1508
1509 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1510 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1511 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1512 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1513 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1514 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1515 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1516 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1517 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1518 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1519 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1520 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1521
1522 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1523 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1524
1525 #undef TARGET_INTERNAL_ARG_POINTER
1526 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1527
1528 #undef TARGET_HAVE_TLS
1529 #define TARGET_HAVE_TLS HAVE_AS_TLS
1530
1531 #undef TARGET_CANNOT_FORCE_CONST_MEM
1532 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1533
1534 #undef TARGET_DELEGITIMIZE_ADDRESS
1535 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1536
1537 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1538 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1539
1540 #undef TARGET_LEGITIMATE_COMBINED_INSN
1541 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1542
1543 #undef TARGET_ASM_FUNCTION_PROLOGUE
1544 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1545 #undef TARGET_ASM_FUNCTION_EPILOGUE
1546 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1547
1548 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1549 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1550
1551 #undef TARGET_LEGITIMIZE_ADDRESS
1552 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1553
1554 #undef TARGET_SCHED_VARIABLE_ISSUE
1555 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1556
1557 #undef TARGET_SCHED_ISSUE_RATE
1558 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1559 #undef TARGET_SCHED_ADJUST_COST
1560 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1561 #undef TARGET_SCHED_ADJUST_PRIORITY
1562 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1563 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1564 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1565 #undef TARGET_SCHED_INIT
1566 #define TARGET_SCHED_INIT rs6000_sched_init
1567 #undef TARGET_SCHED_FINISH
1568 #define TARGET_SCHED_FINISH rs6000_sched_finish
1569 #undef TARGET_SCHED_REORDER
1570 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1571 #undef TARGET_SCHED_REORDER2
1572 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1573
1574 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1575 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1576
1577 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1578 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1579
1580 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1581 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1582 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1583 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1584 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1585 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1586 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1587 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1588
1589 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1590 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1591
1592 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1593 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1594 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1595 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1596 rs6000_builtin_support_vector_misalignment
1597 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1598 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1599 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1600 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1601 rs6000_builtin_vectorization_cost
1602 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1603 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1604 rs6000_preferred_simd_mode
1605 #undef TARGET_VECTORIZE_INIT_COST
1606 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1607 #undef TARGET_VECTORIZE_ADD_STMT_COST
1608 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1609 #undef TARGET_VECTORIZE_FINISH_COST
1610 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1611 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1612 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1613
1614 #undef TARGET_INIT_BUILTINS
1615 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1616 #undef TARGET_BUILTIN_DECL
1617 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1618
1619 #undef TARGET_FOLD_BUILTIN
1620 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1621 #undef TARGET_GIMPLE_FOLD_BUILTIN
1622 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1623
1624 #undef TARGET_EXPAND_BUILTIN
1625 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1626
1627 #undef TARGET_MANGLE_TYPE
1628 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1629
1630 #undef TARGET_INIT_LIBFUNCS
1631 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1632
1633 #if TARGET_MACHO
1634 #undef TARGET_BINDS_LOCAL_P
1635 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1636 #endif
1637
1638 #undef TARGET_MS_BITFIELD_LAYOUT_P
1639 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1640
1641 #undef TARGET_ASM_OUTPUT_MI_THUNK
1642 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1643
1644 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1645 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1646
1647 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1648 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1649
1650 #undef TARGET_REGISTER_MOVE_COST
1651 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1652 #undef TARGET_MEMORY_MOVE_COST
1653 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1654 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1655 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1656 rs6000_ira_change_pseudo_allocno_class
1657 #undef TARGET_CANNOT_COPY_INSN_P
1658 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1659 #undef TARGET_RTX_COSTS
1660 #define TARGET_RTX_COSTS rs6000_rtx_costs
1661 #undef TARGET_ADDRESS_COST
1662 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1663 #undef TARGET_INSN_COST
1664 #define TARGET_INSN_COST rs6000_insn_cost
1665
1666 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1667 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1668
1669 #undef TARGET_PROMOTE_FUNCTION_MODE
1670 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1671
1672 #undef TARGET_RETURN_IN_MEMORY
1673 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1674
1675 #undef TARGET_RETURN_IN_MSB
1676 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1677
1678 #undef TARGET_SETUP_INCOMING_VARARGS
1679 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1680
1681 /* Always strict argument naming on rs6000. */
1682 #undef TARGET_STRICT_ARGUMENT_NAMING
1683 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1684 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1685 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1686 #undef TARGET_SPLIT_COMPLEX_ARG
1687 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1688 #undef TARGET_MUST_PASS_IN_STACK
1689 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1690 #undef TARGET_PASS_BY_REFERENCE
1691 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1692 #undef TARGET_ARG_PARTIAL_BYTES
1693 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1694 #undef TARGET_FUNCTION_ARG_ADVANCE
1695 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1696 #undef TARGET_FUNCTION_ARG
1697 #define TARGET_FUNCTION_ARG rs6000_function_arg
1698 #undef TARGET_FUNCTION_ARG_PADDING
1699 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1700 #undef TARGET_FUNCTION_ARG_BOUNDARY
1701 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1702
1703 #undef TARGET_BUILD_BUILTIN_VA_LIST
1704 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1705
1706 #undef TARGET_EXPAND_BUILTIN_VA_START
1707 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1708
1709 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1710 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1711
1712 #undef TARGET_EH_RETURN_FILTER_MODE
1713 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1714
1715 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1716 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1717
1718 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1719 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1720
1721 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1722 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1723
1724 #undef TARGET_FLOATN_MODE
1725 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1726
1727 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1728 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1729
1730 #undef TARGET_MD_ASM_ADJUST
1731 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1732
1733 #undef TARGET_OPTION_OVERRIDE
1734 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1735
1736 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1737 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1738 rs6000_builtin_vectorized_function
1739
1740 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1741 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1742 rs6000_builtin_md_vectorized_function
1743
1744 #undef TARGET_STACK_PROTECT_GUARD
1745 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1746
1747 #if !TARGET_MACHO
1748 #undef TARGET_STACK_PROTECT_FAIL
1749 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1750 #endif
1751
1752 #ifdef HAVE_AS_TLS
1753 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1754 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1755 #endif
1756
1757 /* Use a 32-bit anchor range. This leads to sequences like:
1758
1759 addis tmp,anchor,high
1760 add dest,tmp,low
1761
1762 where tmp itself acts as an anchor, and can be shared between
1763 accesses to the same 64k page. */
1764 #undef TARGET_MIN_ANCHOR_OFFSET
1765 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1766 #undef TARGET_MAX_ANCHOR_OFFSET
1767 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1768 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1769 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1770 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1771 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1772
1773 #undef TARGET_BUILTIN_RECIPROCAL
1774 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1775
1776 #undef TARGET_SECONDARY_RELOAD
1777 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1778 #undef TARGET_SECONDARY_MEMORY_NEEDED
1779 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1780 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1781 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1782
1783 #undef TARGET_LEGITIMATE_ADDRESS_P
1784 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1785
1786 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1787 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1788
1789 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1790 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1791
1792 #undef TARGET_CAN_ELIMINATE
1793 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1794
1795 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1796 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1797
1798 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1799 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1800
1801 #undef TARGET_TRAMPOLINE_INIT
1802 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1803
1804 #undef TARGET_FUNCTION_VALUE
1805 #define TARGET_FUNCTION_VALUE rs6000_function_value
1806
1807 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1808 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1809
1810 #undef TARGET_OPTION_SAVE
1811 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1812
1813 #undef TARGET_OPTION_RESTORE
1814 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1815
1816 #undef TARGET_OPTION_PRINT
1817 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1818
1819 #undef TARGET_CAN_INLINE_P
1820 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1821
1822 #undef TARGET_SET_CURRENT_FUNCTION
1823 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1824
1825 #undef TARGET_LEGITIMATE_CONSTANT_P
1826 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1827
1828 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1829 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1830
1831 #undef TARGET_CAN_USE_DOLOOP_P
1832 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1833
1834 #undef TARGET_PREDICT_DOLOOP_P
1835 #define TARGET_PREDICT_DOLOOP_P rs6000_predict_doloop_p
1836
1837 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1838 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1839
1840 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1841 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1842 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1843 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1844 #undef TARGET_UNWIND_WORD_MODE
1845 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1846
1847 #undef TARGET_OFFLOAD_OPTIONS
1848 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1849
1850 #undef TARGET_C_MODE_FOR_SUFFIX
1851 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1852
1853 #undef TARGET_INVALID_BINARY_OP
1854 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1855
1856 #undef TARGET_OPTAB_SUPPORTED_P
1857 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1858
1859 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1860 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1861
1862 #undef TARGET_COMPARE_VERSION_PRIORITY
1863 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1864
1865 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1866 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1867 rs6000_generate_version_dispatcher_body
1868
1869 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1870 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1871 rs6000_get_function_versions_dispatcher
1872
1873 #undef TARGET_OPTION_FUNCTION_VERSIONS
1874 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1875
1876 #undef TARGET_HARD_REGNO_NREGS
1877 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1878 #undef TARGET_HARD_REGNO_MODE_OK
1879 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1880
1881 #undef TARGET_MODES_TIEABLE_P
1882 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1883
1884 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1885 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1886 rs6000_hard_regno_call_part_clobbered
1887
1888 #undef TARGET_SLOW_UNALIGNED_ACCESS
1889 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1890
1891 #undef TARGET_CAN_CHANGE_MODE_CLASS
1892 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1893
1894 #undef TARGET_CONSTANT_ALIGNMENT
1895 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1896
1897 #undef TARGET_STARTING_FRAME_OFFSET
1898 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1899
1900 #if TARGET_ELF && RS6000_WEAK
1901 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1902 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1903 #endif
1904
1905 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1906 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1907
1908 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1909 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1910 \f
1911
1912 /* Processor table. */
1913 struct rs6000_ptt
1914 {
1915 const char *const name; /* Canonical processor name. */
1916 const enum processor_type processor; /* Processor type enum value. */
1917 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1918 };
1919
1920 static struct rs6000_ptt const processor_target_table[] =
1921 {
1922 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1923 #include "rs6000-cpus.def"
1924 #undef RS6000_CPU
1925 };
1926
1927 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1928 name is invalid. */
1929
1930 static int
1931 rs6000_cpu_name_lookup (const char *name)
1932 {
1933 size_t i;
1934
1935 if (name != NULL)
1936 {
1937 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1938 if (! strcmp (name, processor_target_table[i].name))
1939 return (int)i;
1940 }
1941
1942 return -1;
1943 }
1944
1945 \f
1946 /* Return number of consecutive hard regs needed starting at reg REGNO
1947 to hold something of mode MODE.
1948 This is ordinarily the length in words of a value of mode MODE
1949 but can be less for certain modes in special long registers.
1950
1951 POWER and PowerPC GPRs hold 32 bits worth;
1952 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1953
1954 static int
1955 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1956 {
1957 unsigned HOST_WIDE_INT reg_size;
1958
1959 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1960 128-bit floating point that can go in vector registers, which has VSX
1961 memory addressing. */
1962 if (FP_REGNO_P (regno))
1963 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1964 ? UNITS_PER_VSX_WORD
1965 : UNITS_PER_FP_WORD);
1966
1967 else if (ALTIVEC_REGNO_P (regno))
1968 reg_size = UNITS_PER_ALTIVEC_WORD;
1969
1970 else
1971 reg_size = UNITS_PER_WORD;
1972
1973 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1974 }
1975
1976 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1977 MODE. */
1978 static int
1979 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
1980 {
1981 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1982
1983 if (COMPLEX_MODE_P (mode))
1984 mode = GET_MODE_INNER (mode);
1985
1986 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1987 register combinations, and use PTImode where we need to deal with quad
1988 word memory operations. Don't allow quad words in the argument or frame
1989 pointer registers, just registers 0..31. */
1990 if (mode == PTImode)
1991 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1992 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1993 && ((regno & 1) == 0));
1994
1995 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1996 implementations. Don't allow an item to be split between a FP register
1997 and an Altivec register. Allow TImode in all VSX registers if the user
1998 asked for it. */
1999 if (TARGET_VSX && VSX_REGNO_P (regno)
2000 && (VECTOR_MEM_VSX_P (mode)
2001 || FLOAT128_VECTOR_P (mode)
2002 || reg_addr[mode].scalar_in_vmx_p
2003 || mode == TImode
2004 || (TARGET_VADDUQM && mode == V1TImode)))
2005 {
2006 if (FP_REGNO_P (regno))
2007 return FP_REGNO_P (last_regno);
2008
2009 if (ALTIVEC_REGNO_P (regno))
2010 {
2011 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2012 return 0;
2013
2014 return ALTIVEC_REGNO_P (last_regno);
2015 }
2016 }
2017
2018 /* The GPRs can hold any mode, but values bigger than one register
2019 cannot go past R31. */
2020 if (INT_REGNO_P (regno))
2021 return INT_REGNO_P (last_regno);
2022
2023 /* The float registers (except for VSX vector modes) can only hold floating
2024 modes and DImode. */
2025 if (FP_REGNO_P (regno))
2026 {
2027 if (FLOAT128_VECTOR_P (mode))
2028 return false;
2029
2030 if (SCALAR_FLOAT_MODE_P (mode)
2031 && (mode != TDmode || (regno % 2) == 0)
2032 && FP_REGNO_P (last_regno))
2033 return 1;
2034
2035 if (GET_MODE_CLASS (mode) == MODE_INT)
2036 {
2037 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2038 return 1;
2039
2040 if (TARGET_P8_VECTOR && (mode == SImode))
2041 return 1;
2042
2043 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2044 return 1;
2045 }
2046
2047 return 0;
2048 }
2049
2050 /* The CR register can only hold CC modes. */
2051 if (CR_REGNO_P (regno))
2052 return GET_MODE_CLASS (mode) == MODE_CC;
2053
2054 if (CA_REGNO_P (regno))
2055 return mode == Pmode || mode == SImode;
2056
2057 /* AltiVec only in AldyVec registers. */
2058 if (ALTIVEC_REGNO_P (regno))
2059 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2060 || mode == V1TImode);
2061
2062 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2063 and it must be able to fit within the register set. */
2064
2065 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2066 }
2067
2068 /* Implement TARGET_HARD_REGNO_NREGS. */
2069
2070 static unsigned int
2071 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2072 {
2073 return rs6000_hard_regno_nregs[mode][regno];
2074 }
2075
2076 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2077
2078 static bool
2079 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2080 {
2081 return rs6000_hard_regno_mode_ok_p[mode][regno];
2082 }
2083
2084 /* Implement TARGET_MODES_TIEABLE_P.
2085
2086 PTImode cannot tie with other modes because PTImode is restricted to even
2087 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2088 57744).
2089
2090 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2091 128-bit floating point on VSX systems ties with other vectors. */
2092
2093 static bool
2094 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2095 {
2096 if (mode1 == PTImode)
2097 return mode2 == PTImode;
2098 if (mode2 == PTImode)
2099 return false;
2100
2101 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2102 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2103 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2104 return false;
2105
2106 if (SCALAR_FLOAT_MODE_P (mode1))
2107 return SCALAR_FLOAT_MODE_P (mode2);
2108 if (SCALAR_FLOAT_MODE_P (mode2))
2109 return false;
2110
2111 if (GET_MODE_CLASS (mode1) == MODE_CC)
2112 return GET_MODE_CLASS (mode2) == MODE_CC;
2113 if (GET_MODE_CLASS (mode2) == MODE_CC)
2114 return false;
2115
2116 return true;
2117 }
2118
2119 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2120
2121 static bool
2122 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2123 unsigned int regno, machine_mode mode)
2124 {
2125 if (TARGET_32BIT
2126 && TARGET_POWERPC64
2127 && GET_MODE_SIZE (mode) > 4
2128 && INT_REGNO_P (regno))
2129 return true;
2130
2131 if (TARGET_VSX
2132 && FP_REGNO_P (regno)
2133 && GET_MODE_SIZE (mode) > 8
2134 && !FLOAT128_2REG_P (mode))
2135 return true;
2136
2137 return false;
2138 }
2139
2140 /* Print interesting facts about registers. */
2141 static void
2142 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2143 {
2144 int r, m;
2145
2146 for (r = first_regno; r <= last_regno; ++r)
2147 {
2148 const char *comma = "";
2149 int len;
2150
2151 if (first_regno == last_regno)
2152 fprintf (stderr, "%s:\t", reg_name);
2153 else
2154 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2155
2156 len = 8;
2157 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2158 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2159 {
2160 if (len > 70)
2161 {
2162 fprintf (stderr, ",\n\t");
2163 len = 8;
2164 comma = "";
2165 }
2166
2167 if (rs6000_hard_regno_nregs[m][r] > 1)
2168 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2169 rs6000_hard_regno_nregs[m][r]);
2170 else
2171 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2172
2173 comma = ", ";
2174 }
2175
2176 if (call_used_regs[r])
2177 {
2178 if (len > 70)
2179 {
2180 fprintf (stderr, ",\n\t");
2181 len = 8;
2182 comma = "";
2183 }
2184
2185 len += fprintf (stderr, "%s%s", comma, "call-used");
2186 comma = ", ";
2187 }
2188
2189 if (fixed_regs[r])
2190 {
2191 if (len > 70)
2192 {
2193 fprintf (stderr, ",\n\t");
2194 len = 8;
2195 comma = "";
2196 }
2197
2198 len += fprintf (stderr, "%s%s", comma, "fixed");
2199 comma = ", ";
2200 }
2201
2202 if (len > 70)
2203 {
2204 fprintf (stderr, ",\n\t");
2205 comma = "";
2206 }
2207
2208 len += fprintf (stderr, "%sreg-class = %s", comma,
2209 reg_class_names[(int)rs6000_regno_regclass[r]]);
2210 comma = ", ";
2211
2212 if (len > 70)
2213 {
2214 fprintf (stderr, ",\n\t");
2215 comma = "";
2216 }
2217
2218 fprintf (stderr, "%sregno = %d\n", comma, r);
2219 }
2220 }
2221
2222 static const char *
2223 rs6000_debug_vector_unit (enum rs6000_vector v)
2224 {
2225 const char *ret;
2226
2227 switch (v)
2228 {
2229 case VECTOR_NONE: ret = "none"; break;
2230 case VECTOR_ALTIVEC: ret = "altivec"; break;
2231 case VECTOR_VSX: ret = "vsx"; break;
2232 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2233 default: ret = "unknown"; break;
2234 }
2235
2236 return ret;
2237 }
2238
2239 /* Inner function printing just the address mask for a particular reload
2240 register class. */
2241 DEBUG_FUNCTION char *
2242 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2243 {
2244 static char ret[8];
2245 char *p = ret;
2246
2247 if ((mask & RELOAD_REG_VALID) != 0)
2248 *p++ = 'v';
2249 else if (keep_spaces)
2250 *p++ = ' ';
2251
2252 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2253 *p++ = 'm';
2254 else if (keep_spaces)
2255 *p++ = ' ';
2256
2257 if ((mask & RELOAD_REG_INDEXED) != 0)
2258 *p++ = 'i';
2259 else if (keep_spaces)
2260 *p++ = ' ';
2261
2262 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2263 *p++ = 'O';
2264 else if ((mask & RELOAD_REG_OFFSET) != 0)
2265 *p++ = 'o';
2266 else if (keep_spaces)
2267 *p++ = ' ';
2268
2269 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2270 *p++ = '+';
2271 else if (keep_spaces)
2272 *p++ = ' ';
2273
2274 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2275 *p++ = '+';
2276 else if (keep_spaces)
2277 *p++ = ' ';
2278
2279 if ((mask & RELOAD_REG_AND_M16) != 0)
2280 *p++ = '&';
2281 else if (keep_spaces)
2282 *p++ = ' ';
2283
2284 *p = '\0';
2285
2286 return ret;
2287 }
2288
2289 /* Print the address masks in a human readble fashion. */
2290 DEBUG_FUNCTION void
2291 rs6000_debug_print_mode (ssize_t m)
2292 {
2293 ssize_t rc;
2294 int spaces = 0;
2295
2296 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2297 for (rc = 0; rc < N_RELOAD_REG; rc++)
2298 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2299 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2300
2301 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2302 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2303 {
2304 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2305 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2306 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2307 spaces = 0;
2308 }
2309 else
2310 spaces += sizeof (" Reload=sl") - 1;
2311
2312 if (reg_addr[m].scalar_in_vmx_p)
2313 {
2314 fprintf (stderr, "%*s Upper=y", spaces, "");
2315 spaces = 0;
2316 }
2317 else
2318 spaces += sizeof (" Upper=y") - 1;
2319
2320 if (rs6000_vector_unit[m] != VECTOR_NONE
2321 || rs6000_vector_mem[m] != VECTOR_NONE)
2322 {
2323 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2324 spaces, "",
2325 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2326 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2327 }
2328
2329 fputs ("\n", stderr);
2330 }
2331
2332 #define DEBUG_FMT_ID "%-32s= "
2333 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2334 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2335 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2336
2337 /* Print various interesting information with -mdebug=reg. */
2338 static void
2339 rs6000_debug_reg_global (void)
2340 {
2341 static const char *const tf[2] = { "false", "true" };
2342 const char *nl = (const char *)0;
2343 int m;
2344 size_t m1, m2, v;
2345 char costly_num[20];
2346 char nop_num[20];
2347 char flags_buffer[40];
2348 const char *costly_str;
2349 const char *nop_str;
2350 const char *trace_str;
2351 const char *abi_str;
2352 const char *cmodel_str;
2353 struct cl_target_option cl_opts;
2354
2355 /* Modes we want tieable information on. */
2356 static const machine_mode print_tieable_modes[] = {
2357 QImode,
2358 HImode,
2359 SImode,
2360 DImode,
2361 TImode,
2362 PTImode,
2363 SFmode,
2364 DFmode,
2365 TFmode,
2366 IFmode,
2367 KFmode,
2368 SDmode,
2369 DDmode,
2370 TDmode,
2371 V16QImode,
2372 V8HImode,
2373 V4SImode,
2374 V2DImode,
2375 V1TImode,
2376 V32QImode,
2377 V16HImode,
2378 V8SImode,
2379 V4DImode,
2380 V2TImode,
2381 V4SFmode,
2382 V2DFmode,
2383 V8SFmode,
2384 V4DFmode,
2385 CCmode,
2386 CCUNSmode,
2387 CCEQmode,
2388 };
2389
2390 /* Virtual regs we are interested in. */
2391 const static struct {
2392 int regno; /* register number. */
2393 const char *name; /* register name. */
2394 } virtual_regs[] = {
2395 { STACK_POINTER_REGNUM, "stack pointer:" },
2396 { TOC_REGNUM, "toc: " },
2397 { STATIC_CHAIN_REGNUM, "static chain: " },
2398 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2399 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2400 { ARG_POINTER_REGNUM, "arg pointer: " },
2401 { FRAME_POINTER_REGNUM, "frame pointer:" },
2402 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2403 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2404 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2405 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2406 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2407 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2408 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2409 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2410 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2411 };
2412
2413 fputs ("\nHard register information:\n", stderr);
2414 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2415 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2416 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2417 LAST_ALTIVEC_REGNO,
2418 "vs");
2419 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2420 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2421 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2422 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2423 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2424 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2425
2426 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2427 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2428 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2429
2430 fprintf (stderr,
2431 "\n"
2432 "d reg_class = %s\n"
2433 "f reg_class = %s\n"
2434 "v reg_class = %s\n"
2435 "wa reg_class = %s\n"
2436 "we reg_class = %s\n"
2437 "wr reg_class = %s\n"
2438 "wx reg_class = %s\n"
2439 "wA reg_class = %s\n"
2440 "\n",
2441 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2442 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2443 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2444 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2445 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2446 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2447 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2448 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]);
2449
2450 nl = "\n";
2451 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2452 rs6000_debug_print_mode (m);
2453
2454 fputs ("\n", stderr);
2455
2456 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2457 {
2458 machine_mode mode1 = print_tieable_modes[m1];
2459 bool first_time = true;
2460
2461 nl = (const char *)0;
2462 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2463 {
2464 machine_mode mode2 = print_tieable_modes[m2];
2465 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2466 {
2467 if (first_time)
2468 {
2469 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2470 nl = "\n";
2471 first_time = false;
2472 }
2473
2474 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2475 }
2476 }
2477
2478 if (!first_time)
2479 fputs ("\n", stderr);
2480 }
2481
2482 if (nl)
2483 fputs (nl, stderr);
2484
2485 if (rs6000_recip_control)
2486 {
2487 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2488
2489 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2490 if (rs6000_recip_bits[m])
2491 {
2492 fprintf (stderr,
2493 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2494 GET_MODE_NAME (m),
2495 (RS6000_RECIP_AUTO_RE_P (m)
2496 ? "auto"
2497 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2498 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2499 ? "auto"
2500 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2501 }
2502
2503 fputs ("\n", stderr);
2504 }
2505
2506 if (rs6000_cpu_index >= 0)
2507 {
2508 const char *name = processor_target_table[rs6000_cpu_index].name;
2509 HOST_WIDE_INT flags
2510 = processor_target_table[rs6000_cpu_index].target_enable;
2511
2512 sprintf (flags_buffer, "-mcpu=%s flags", name);
2513 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2514 }
2515 else
2516 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2517
2518 if (rs6000_tune_index >= 0)
2519 {
2520 const char *name = processor_target_table[rs6000_tune_index].name;
2521 HOST_WIDE_INT flags
2522 = processor_target_table[rs6000_tune_index].target_enable;
2523
2524 sprintf (flags_buffer, "-mtune=%s flags", name);
2525 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2526 }
2527 else
2528 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2529
2530 cl_target_option_save (&cl_opts, &global_options);
2531 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2532 rs6000_isa_flags);
2533
2534 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2535 rs6000_isa_flags_explicit);
2536
2537 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2538 rs6000_builtin_mask);
2539
2540 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2541
2542 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2543 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2544
2545 switch (rs6000_sched_costly_dep)
2546 {
2547 case max_dep_latency:
2548 costly_str = "max_dep_latency";
2549 break;
2550
2551 case no_dep_costly:
2552 costly_str = "no_dep_costly";
2553 break;
2554
2555 case all_deps_costly:
2556 costly_str = "all_deps_costly";
2557 break;
2558
2559 case true_store_to_load_dep_costly:
2560 costly_str = "true_store_to_load_dep_costly";
2561 break;
2562
2563 case store_to_load_dep_costly:
2564 costly_str = "store_to_load_dep_costly";
2565 break;
2566
2567 default:
2568 costly_str = costly_num;
2569 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2570 break;
2571 }
2572
2573 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2574
2575 switch (rs6000_sched_insert_nops)
2576 {
2577 case sched_finish_regroup_exact:
2578 nop_str = "sched_finish_regroup_exact";
2579 break;
2580
2581 case sched_finish_pad_groups:
2582 nop_str = "sched_finish_pad_groups";
2583 break;
2584
2585 case sched_finish_none:
2586 nop_str = "sched_finish_none";
2587 break;
2588
2589 default:
2590 nop_str = nop_num;
2591 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2592 break;
2593 }
2594
2595 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2596
2597 switch (rs6000_sdata)
2598 {
2599 default:
2600 case SDATA_NONE:
2601 break;
2602
2603 case SDATA_DATA:
2604 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2605 break;
2606
2607 case SDATA_SYSV:
2608 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2609 break;
2610
2611 case SDATA_EABI:
2612 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2613 break;
2614
2615 }
2616
2617 switch (rs6000_traceback)
2618 {
2619 case traceback_default: trace_str = "default"; break;
2620 case traceback_none: trace_str = "none"; break;
2621 case traceback_part: trace_str = "part"; break;
2622 case traceback_full: trace_str = "full"; break;
2623 default: trace_str = "unknown"; break;
2624 }
2625
2626 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2627
2628 switch (rs6000_current_cmodel)
2629 {
2630 case CMODEL_SMALL: cmodel_str = "small"; break;
2631 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2632 case CMODEL_LARGE: cmodel_str = "large"; break;
2633 default: cmodel_str = "unknown"; break;
2634 }
2635
2636 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2637
2638 switch (rs6000_current_abi)
2639 {
2640 case ABI_NONE: abi_str = "none"; break;
2641 case ABI_AIX: abi_str = "aix"; break;
2642 case ABI_ELFv2: abi_str = "ELFv2"; break;
2643 case ABI_V4: abi_str = "V4"; break;
2644 case ABI_DARWIN: abi_str = "darwin"; break;
2645 default: abi_str = "unknown"; break;
2646 }
2647
2648 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2649
2650 if (rs6000_altivec_abi)
2651 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2652
2653 if (rs6000_darwin64_abi)
2654 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2655
2656 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2657 (TARGET_SOFT_FLOAT ? "true" : "false"));
2658
2659 if (TARGET_LINK_STACK)
2660 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2661
2662 if (TARGET_P8_FUSION)
2663 {
2664 char options[80];
2665
2666 strcpy (options, "power8");
2667 if (TARGET_P8_FUSION_SIGN)
2668 strcat (options, ", sign");
2669
2670 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2671 }
2672
2673 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2674 TARGET_SECURE_PLT ? "secure" : "bss");
2675 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2676 aix_struct_return ? "aix" : "sysv");
2677 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2678 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2679 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2680 tf[!!rs6000_align_branch_targets]);
2681 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2682 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2683 rs6000_long_double_type_size);
2684 if (rs6000_long_double_type_size > 64)
2685 {
2686 fprintf (stderr, DEBUG_FMT_S, "long double type",
2687 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2688 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2689 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2690 }
2691 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2692 (int)rs6000_sched_restricted_insns_priority);
2693 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2694 (int)END_BUILTINS);
2695 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2696 (int)RS6000_BUILTIN_COUNT);
2697
2698 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2699 (int)TARGET_FLOAT128_ENABLE_TYPE);
2700
2701 if (TARGET_VSX)
2702 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2703 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2704
2705 if (TARGET_DIRECT_MOVE_128)
2706 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2707 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2708 }
2709
2710 \f
2711 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2712 legitimate address support to figure out the appropriate addressing to
2713 use. */
2714
2715 static void
2716 rs6000_setup_reg_addr_masks (void)
2717 {
2718 ssize_t rc, reg, m, nregs;
2719 addr_mask_type any_addr_mask, addr_mask;
2720
2721 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2722 {
2723 machine_mode m2 = (machine_mode) m;
2724 bool complex_p = false;
2725 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2726 size_t msize;
2727
2728 if (COMPLEX_MODE_P (m2))
2729 {
2730 complex_p = true;
2731 m2 = GET_MODE_INNER (m2);
2732 }
2733
2734 msize = GET_MODE_SIZE (m2);
2735
2736 /* SDmode is special in that we want to access it only via REG+REG
2737 addressing on power7 and above, since we want to use the LFIWZX and
2738 STFIWZX instructions to load it. */
2739 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2740
2741 any_addr_mask = 0;
2742 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2743 {
2744 addr_mask = 0;
2745 reg = reload_reg_map[rc].reg;
2746
2747 /* Can mode values go in the GPR/FPR/Altivec registers? */
2748 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2749 {
2750 bool small_int_vsx_p = (small_int_p
2751 && (rc == RELOAD_REG_FPR
2752 || rc == RELOAD_REG_VMX));
2753
2754 nregs = rs6000_hard_regno_nregs[m][reg];
2755 addr_mask |= RELOAD_REG_VALID;
2756
2757 /* Indicate if the mode takes more than 1 physical register. If
2758 it takes a single register, indicate it can do REG+REG
2759 addressing. Small integers in VSX registers can only do
2760 REG+REG addressing. */
2761 if (small_int_vsx_p)
2762 addr_mask |= RELOAD_REG_INDEXED;
2763 else if (nregs > 1 || m == BLKmode || complex_p)
2764 addr_mask |= RELOAD_REG_MULTIPLE;
2765 else
2766 addr_mask |= RELOAD_REG_INDEXED;
2767
2768 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2769 addressing. If we allow scalars into Altivec registers,
2770 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2771
2772 For VSX systems, we don't allow update addressing for
2773 DFmode/SFmode if those registers can go in both the
2774 traditional floating point registers and Altivec registers.
2775 The load/store instructions for the Altivec registers do not
2776 have update forms. If we allowed update addressing, it seems
2777 to break IV-OPT code using floating point if the index type is
2778 int instead of long (PR target/81550 and target/84042). */
2779
2780 if (TARGET_UPDATE
2781 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2782 && msize <= 8
2783 && !VECTOR_MODE_P (m2)
2784 && !FLOAT128_VECTOR_P (m2)
2785 && !complex_p
2786 && (m != E_DFmode || !TARGET_VSX)
2787 && (m != E_SFmode || !TARGET_P8_VECTOR)
2788 && !small_int_vsx_p)
2789 {
2790 addr_mask |= RELOAD_REG_PRE_INCDEC;
2791
2792 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2793 we don't allow PRE_MODIFY for some multi-register
2794 operations. */
2795 switch (m)
2796 {
2797 default:
2798 addr_mask |= RELOAD_REG_PRE_MODIFY;
2799 break;
2800
2801 case E_DImode:
2802 if (TARGET_POWERPC64)
2803 addr_mask |= RELOAD_REG_PRE_MODIFY;
2804 break;
2805
2806 case E_DFmode:
2807 case E_DDmode:
2808 if (TARGET_HARD_FLOAT)
2809 addr_mask |= RELOAD_REG_PRE_MODIFY;
2810 break;
2811 }
2812 }
2813 }
2814
2815 /* GPR and FPR registers can do REG+OFFSET addressing, except
2816 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2817 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2818 if ((addr_mask != 0) && !indexed_only_p
2819 && msize <= 8
2820 && (rc == RELOAD_REG_GPR
2821 || ((msize == 8 || m2 == SFmode)
2822 && (rc == RELOAD_REG_FPR
2823 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2824 addr_mask |= RELOAD_REG_OFFSET;
2825
2826 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2827 instructions are enabled. The offset for 128-bit VSX registers is
2828 only 12-bits. While GPRs can handle the full offset range, VSX
2829 registers can only handle the restricted range. */
2830 else if ((addr_mask != 0) && !indexed_only_p
2831 && msize == 16 && TARGET_P9_VECTOR
2832 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2833 || (m2 == TImode && TARGET_VSX)))
2834 {
2835 addr_mask |= RELOAD_REG_OFFSET;
2836 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2837 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2838 }
2839
2840 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2841 addressing on 128-bit types. */
2842 if (rc == RELOAD_REG_VMX && msize == 16
2843 && (addr_mask & RELOAD_REG_VALID) != 0)
2844 addr_mask |= RELOAD_REG_AND_M16;
2845
2846 reg_addr[m].addr_mask[rc] = addr_mask;
2847 any_addr_mask |= addr_mask;
2848 }
2849
2850 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2851 }
2852 }
2853
2854 \f
2855 /* Initialize the various global tables that are based on register size. */
2856 static void
2857 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2858 {
2859 ssize_t r, m, c;
2860 int align64;
2861 int align32;
2862
2863 /* Precalculate REGNO_REG_CLASS. */
2864 rs6000_regno_regclass[0] = GENERAL_REGS;
2865 for (r = 1; r < 32; ++r)
2866 rs6000_regno_regclass[r] = BASE_REGS;
2867
2868 for (r = 32; r < 64; ++r)
2869 rs6000_regno_regclass[r] = FLOAT_REGS;
2870
2871 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2872 rs6000_regno_regclass[r] = NO_REGS;
2873
2874 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2875 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2876
2877 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2878 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2879 rs6000_regno_regclass[r] = CR_REGS;
2880
2881 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2882 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2883 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2884 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2885 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2886 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2887 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2888
2889 /* Precalculate register class to simpler reload register class. We don't
2890 need all of the register classes that are combinations of different
2891 classes, just the simple ones that have constraint letters. */
2892 for (c = 0; c < N_REG_CLASSES; c++)
2893 reg_class_to_reg_type[c] = NO_REG_TYPE;
2894
2895 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2897 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2898 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2899 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2900 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2901 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2902 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2903 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2904 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2905
2906 if (TARGET_VSX)
2907 {
2908 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2909 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2910 }
2911 else
2912 {
2913 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2914 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2915 }
2916
2917 /* Precalculate the valid memory formats as well as the vector information,
2918 this must be set up before the rs6000_hard_regno_nregs_internal calls
2919 below. */
2920 gcc_assert ((int)VECTOR_NONE == 0);
2921 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2922 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
2923
2924 gcc_assert ((int)CODE_FOR_nothing == 0);
2925 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2926
2927 gcc_assert ((int)NO_REGS == 0);
2928 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2929
2930 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2931 believes it can use native alignment or still uses 128-bit alignment. */
2932 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2933 {
2934 align64 = 64;
2935 align32 = 32;
2936 }
2937 else
2938 {
2939 align64 = 128;
2940 align32 = 128;
2941 }
2942
2943 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2944 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2945 if (TARGET_FLOAT128_TYPE)
2946 {
2947 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2948 rs6000_vector_align[KFmode] = 128;
2949
2950 if (FLOAT128_IEEE_P (TFmode))
2951 {
2952 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2953 rs6000_vector_align[TFmode] = 128;
2954 }
2955 }
2956
2957 /* V2DF mode, VSX only. */
2958 if (TARGET_VSX)
2959 {
2960 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2961 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2962 rs6000_vector_align[V2DFmode] = align64;
2963 }
2964
2965 /* V4SF mode, either VSX or Altivec. */
2966 if (TARGET_VSX)
2967 {
2968 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2969 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2970 rs6000_vector_align[V4SFmode] = align32;
2971 }
2972 else if (TARGET_ALTIVEC)
2973 {
2974 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2975 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2976 rs6000_vector_align[V4SFmode] = align32;
2977 }
2978
2979 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2980 and stores. */
2981 if (TARGET_ALTIVEC)
2982 {
2983 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2984 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2985 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2986 rs6000_vector_align[V4SImode] = align32;
2987 rs6000_vector_align[V8HImode] = align32;
2988 rs6000_vector_align[V16QImode] = align32;
2989
2990 if (TARGET_VSX)
2991 {
2992 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2993 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2994 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2995 }
2996 else
2997 {
2998 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2999 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3000 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3001 }
3002 }
3003
3004 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3005 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3006 if (TARGET_VSX)
3007 {
3008 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3009 rs6000_vector_unit[V2DImode]
3010 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3011 rs6000_vector_align[V2DImode] = align64;
3012
3013 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3014 rs6000_vector_unit[V1TImode]
3015 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3016 rs6000_vector_align[V1TImode] = 128;
3017 }
3018
3019 /* DFmode, see if we want to use the VSX unit. Memory is handled
3020 differently, so don't set rs6000_vector_mem. */
3021 if (TARGET_VSX)
3022 {
3023 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3024 rs6000_vector_align[DFmode] = 64;
3025 }
3026
3027 /* SFmode, see if we want to use the VSX unit. */
3028 if (TARGET_P8_VECTOR)
3029 {
3030 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3031 rs6000_vector_align[SFmode] = 32;
3032 }
3033
3034 /* Allow TImode in VSX register and set the VSX memory macros. */
3035 if (TARGET_VSX)
3036 {
3037 rs6000_vector_mem[TImode] = VECTOR_VSX;
3038 rs6000_vector_align[TImode] = align64;
3039 }
3040
3041 /* Register class constraints for the constraints that depend on compile
3042 switches. When the VSX code was added, different constraints were added
3043 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3044 of the VSX registers are used. The register classes for scalar floating
3045 point types is set, based on whether we allow that type into the upper
3046 (Altivec) registers. GCC has register classes to target the Altivec
3047 registers for load/store operations, to select using a VSX memory
3048 operation instead of the traditional floating point operation. The
3049 constraints are:
3050
3051 d - Register class to use with traditional DFmode instructions.
3052 f - Register class to use with traditional SFmode instructions.
3053 v - Altivec register.
3054 wa - Any VSX register.
3055 wc - Reserved to represent individual CR bits (used in LLVM).
3056 wn - always NO_REGS.
3057 wr - GPR if 64-bit mode is permitted.
3058 wx - Float register if we can do 32-bit int stores. */
3059
3060 if (TARGET_HARD_FLOAT)
3061 {
3062 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3063 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3064 }
3065
3066 if (TARGET_VSX)
3067 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3068
3069 /* Add conditional constraints based on various options, to allow us to
3070 collapse multiple insn patterns. */
3071 if (TARGET_ALTIVEC)
3072 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3073
3074 if (TARGET_POWERPC64)
3075 {
3076 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3077 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3078 }
3079
3080 if (TARGET_STFIWX)
3081 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3082
3083 /* Support for new direct moves (ISA 3.0 + 64bit). */
3084 if (TARGET_DIRECT_MOVE_128)
3085 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3086
3087 /* Set up the reload helper and direct move functions. */
3088 if (TARGET_VSX || TARGET_ALTIVEC)
3089 {
3090 if (TARGET_64BIT)
3091 {
3092 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3093 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3094 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3095 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3096 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3097 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3098 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3099 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3100 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3101 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3102 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3103 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3104 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3105 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3106 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3107 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3108 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3109 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3110 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3111 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3112
3113 if (FLOAT128_VECTOR_P (KFmode))
3114 {
3115 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3116 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3117 }
3118
3119 if (FLOAT128_VECTOR_P (TFmode))
3120 {
3121 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3122 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3123 }
3124
3125 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3126 available. */
3127 if (TARGET_NO_SDMODE_STACK)
3128 {
3129 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3130 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3131 }
3132
3133 if (TARGET_VSX)
3134 {
3135 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3136 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3137 }
3138
3139 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3140 {
3141 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3142 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3143 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3144 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3145 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3146 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3147 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3148 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3149 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3150
3151 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3152 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3153 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3154 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3155 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3156 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3157 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3158 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3159 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3160
3161 if (FLOAT128_VECTOR_P (KFmode))
3162 {
3163 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3164 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3165 }
3166
3167 if (FLOAT128_VECTOR_P (TFmode))
3168 {
3169 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3170 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3171 }
3172 }
3173 }
3174 else
3175 {
3176 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3177 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3178 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3179 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3180 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3181 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3182 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3183 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3184 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3185 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3186 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3187 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3188 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3189 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3190 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3191 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3192 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3193 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3194 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3195 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3196
3197 if (FLOAT128_VECTOR_P (KFmode))
3198 {
3199 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3200 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3201 }
3202
3203 if (FLOAT128_IEEE_P (TFmode))
3204 {
3205 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3206 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3207 }
3208
3209 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3210 available. */
3211 if (TARGET_NO_SDMODE_STACK)
3212 {
3213 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3214 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3215 }
3216
3217 if (TARGET_VSX)
3218 {
3219 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3220 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3221 }
3222
3223 if (TARGET_DIRECT_MOVE)
3224 {
3225 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3226 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3227 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3228 }
3229 }
3230
3231 reg_addr[DFmode].scalar_in_vmx_p = true;
3232 reg_addr[DImode].scalar_in_vmx_p = true;
3233
3234 if (TARGET_P8_VECTOR)
3235 {
3236 reg_addr[SFmode].scalar_in_vmx_p = true;
3237 reg_addr[SImode].scalar_in_vmx_p = true;
3238
3239 if (TARGET_P9_VECTOR)
3240 {
3241 reg_addr[HImode].scalar_in_vmx_p = true;
3242 reg_addr[QImode].scalar_in_vmx_p = true;
3243 }
3244 }
3245 }
3246
3247 /* Precalculate HARD_REGNO_NREGS. */
3248 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3249 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3250 rs6000_hard_regno_nregs[m][r]
3251 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3252
3253 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3254 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3255 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3256 rs6000_hard_regno_mode_ok_p[m][r]
3257 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3258
3259 /* Precalculate CLASS_MAX_NREGS sizes. */
3260 for (c = 0; c < LIM_REG_CLASSES; ++c)
3261 {
3262 int reg_size;
3263
3264 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3265 reg_size = UNITS_PER_VSX_WORD;
3266
3267 else if (c == ALTIVEC_REGS)
3268 reg_size = UNITS_PER_ALTIVEC_WORD;
3269
3270 else if (c == FLOAT_REGS)
3271 reg_size = UNITS_PER_FP_WORD;
3272
3273 else
3274 reg_size = UNITS_PER_WORD;
3275
3276 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3277 {
3278 machine_mode m2 = (machine_mode)m;
3279 int reg_size2 = reg_size;
3280
3281 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3282 in VSX. */
3283 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3284 reg_size2 = UNITS_PER_FP_WORD;
3285
3286 rs6000_class_max_nregs[m][c]
3287 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3288 }
3289 }
3290
3291 /* Calculate which modes to automatically generate code to use a the
3292 reciprocal divide and square root instructions. In the future, possibly
3293 automatically generate the instructions even if the user did not specify
3294 -mrecip. The older machines double precision reciprocal sqrt estimate is
3295 not accurate enough. */
3296 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3297 if (TARGET_FRES)
3298 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3299 if (TARGET_FRE)
3300 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3301 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3302 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3303 if (VECTOR_UNIT_VSX_P (V2DFmode))
3304 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3305
3306 if (TARGET_FRSQRTES)
3307 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3308 if (TARGET_FRSQRTE)
3309 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3310 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3311 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3312 if (VECTOR_UNIT_VSX_P (V2DFmode))
3313 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3314
3315 if (rs6000_recip_control)
3316 {
3317 if (!flag_finite_math_only)
3318 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3319 "-ffast-math");
3320 if (flag_trapping_math)
3321 warning (0, "%qs requires %qs or %qs", "-mrecip",
3322 "-fno-trapping-math", "-ffast-math");
3323 if (!flag_reciprocal_math)
3324 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3325 "-ffast-math");
3326 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3327 {
3328 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3329 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3330 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3331
3332 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3333 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3334 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3335
3336 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3337 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3338 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3339
3340 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3341 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3342 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3343
3344 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3345 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3346 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3347
3348 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3349 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3350 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3351
3352 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3353 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3354 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3355
3356 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3357 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3358 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3359 }
3360 }
3361
3362 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3363 legitimate address support to figure out the appropriate addressing to
3364 use. */
3365 rs6000_setup_reg_addr_masks ();
3366
3367 if (global_init_p || TARGET_DEBUG_TARGET)
3368 {
3369 if (TARGET_DEBUG_REG)
3370 rs6000_debug_reg_global ();
3371
3372 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3373 fprintf (stderr,
3374 "SImode variable mult cost = %d\n"
3375 "SImode constant mult cost = %d\n"
3376 "SImode short constant mult cost = %d\n"
3377 "DImode multipliciation cost = %d\n"
3378 "SImode division cost = %d\n"
3379 "DImode division cost = %d\n"
3380 "Simple fp operation cost = %d\n"
3381 "DFmode multiplication cost = %d\n"
3382 "SFmode division cost = %d\n"
3383 "DFmode division cost = %d\n"
3384 "cache line size = %d\n"
3385 "l1 cache size = %d\n"
3386 "l2 cache size = %d\n"
3387 "simultaneous prefetches = %d\n"
3388 "\n",
3389 rs6000_cost->mulsi,
3390 rs6000_cost->mulsi_const,
3391 rs6000_cost->mulsi_const9,
3392 rs6000_cost->muldi,
3393 rs6000_cost->divsi,
3394 rs6000_cost->divdi,
3395 rs6000_cost->fp,
3396 rs6000_cost->dmul,
3397 rs6000_cost->sdiv,
3398 rs6000_cost->ddiv,
3399 rs6000_cost->cache_line_size,
3400 rs6000_cost->l1_cache_size,
3401 rs6000_cost->l2_cache_size,
3402 rs6000_cost->simultaneous_prefetches);
3403 }
3404 }
3405
3406 #if TARGET_MACHO
3407 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3408
3409 static void
3410 darwin_rs6000_override_options (void)
3411 {
3412 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3413 off. */
3414 rs6000_altivec_abi = 1;
3415 TARGET_ALTIVEC_VRSAVE = 1;
3416 rs6000_current_abi = ABI_DARWIN;
3417
3418 if (DEFAULT_ABI == ABI_DARWIN
3419 && TARGET_64BIT)
3420 darwin_one_byte_bool = 1;
3421
3422 if (TARGET_64BIT && ! TARGET_POWERPC64)
3423 {
3424 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3425 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3426 }
3427 if (flag_mkernel)
3428 {
3429 rs6000_default_long_calls = 1;
3430 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3431 }
3432
3433 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3434 Altivec. */
3435 if (!flag_mkernel && !flag_apple_kext
3436 && TARGET_64BIT
3437 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3438 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3439
3440 /* Unless the user (not the configurer) has explicitly overridden
3441 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3442 G4 unless targeting the kernel. */
3443 if (!flag_mkernel
3444 && !flag_apple_kext
3445 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3446 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3447 && ! global_options_set.x_rs6000_cpu_index)
3448 {
3449 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3450 }
3451 }
3452 #endif
3453
3454 /* If not otherwise specified by a target, make 'long double' equivalent to
3455 'double'. */
3456
3457 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3458 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3459 #endif
3460
3461 /* Return the builtin mask of the various options used that could affect which
3462 builtins were used. In the past we used target_flags, but we've run out of
3463 bits, and some options are no longer in target_flags. */
3464
3465 HOST_WIDE_INT
3466 rs6000_builtin_mask_calculate (void)
3467 {
3468 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3469 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3470 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3471 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3472 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3473 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3474 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3475 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3476 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3477 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3478 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3479 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3480 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3481 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3482 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3483 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3484 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3485 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3486 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3487 | ((TARGET_LONG_DOUBLE_128
3488 && TARGET_HARD_FLOAT
3489 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3490 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3491 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3492 }
3493
3494 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3495 to clobber the XER[CA] bit because clobbering that bit without telling
3496 the compiler worked just fine with versions of GCC before GCC 5, and
3497 breaking a lot of older code in ways that are hard to track down is
3498 not such a great idea. */
3499
3500 static rtx_insn *
3501 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3502 vec<const char *> &/*constraints*/,
3503 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3504 {
3505 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3506 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3507 return NULL;
3508 }
3509
3510 /* Override command line options.
3511
3512 Combine build-specific configuration information with options
3513 specified on the command line to set various state variables which
3514 influence code generation, optimization, and expansion of built-in
3515 functions. Assure that command-line configuration preferences are
3516 compatible with each other and with the build configuration; issue
3517 warnings while adjusting configuration or error messages while
3518 rejecting configuration.
3519
3520 Upon entry to this function:
3521
3522 This function is called once at the beginning of
3523 compilation, and then again at the start and end of compiling
3524 each section of code that has a different configuration, as
3525 indicated, for example, by adding the
3526
3527 __attribute__((__target__("cpu=power9")))
3528
3529 qualifier to a function definition or, for example, by bracketing
3530 code between
3531
3532 #pragma GCC target("altivec")
3533
3534 and
3535
3536 #pragma GCC reset_options
3537
3538 directives. Parameter global_init_p is true for the initial
3539 invocation, which initializes global variables, and false for all
3540 subsequent invocations.
3541
3542
3543 Various global state information is assumed to be valid. This
3544 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3545 default CPU specified at build configure time, TARGET_DEFAULT,
3546 representing the default set of option flags for the default
3547 target, and global_options_set.x_rs6000_isa_flags, representing
3548 which options were requested on the command line.
3549
3550 Upon return from this function:
3551
3552 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3553 was set by name on the command line. Additionally, if certain
3554 attributes are automatically enabled or disabled by this function
3555 in order to assure compatibility between options and
3556 configuration, the flags associated with those attributes are
3557 also set. By setting these "explicit bits", we avoid the risk
3558 that other code might accidentally overwrite these particular
3559 attributes with "default values".
3560
3561 The various bits of rs6000_isa_flags are set to indicate the
3562 target options that have been selected for the most current
3563 compilation efforts. This has the effect of also turning on the
3564 associated TARGET_XXX values since these are macros which are
3565 generally defined to test the corresponding bit of the
3566 rs6000_isa_flags variable.
3567
3568 The variable rs6000_builtin_mask is set to represent the target
3569 options for the most current compilation efforts, consistent with
3570 the current contents of rs6000_isa_flags. This variable controls
3571 expansion of built-in functions.
3572
3573 Various other global variables and fields of global structures
3574 (over 50 in all) are initialized to reflect the desired options
3575 for the most current compilation efforts. */
3576
3577 static bool
3578 rs6000_option_override_internal (bool global_init_p)
3579 {
3580 bool ret = true;
3581
3582 HOST_WIDE_INT set_masks;
3583 HOST_WIDE_INT ignore_masks;
3584 int cpu_index = -1;
3585 int tune_index;
3586 struct cl_target_option *main_target_opt
3587 = ((global_init_p || target_option_default_node == NULL)
3588 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3589
3590 /* Print defaults. */
3591 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3592 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3593
3594 /* Remember the explicit arguments. */
3595 if (global_init_p)
3596 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3597
3598 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3599 library functions, so warn about it. The flag may be useful for
3600 performance studies from time to time though, so don't disable it
3601 entirely. */
3602 if (global_options_set.x_rs6000_alignment_flags
3603 && rs6000_alignment_flags == MASK_ALIGN_POWER
3604 && DEFAULT_ABI == ABI_DARWIN
3605 && TARGET_64BIT)
3606 warning (0, "%qs is not supported for 64-bit Darwin;"
3607 " it is incompatible with the installed C and C++ libraries",
3608 "-malign-power");
3609
3610 /* Numerous experiment shows that IRA based loop pressure
3611 calculation works better for RTL loop invariant motion on targets
3612 with enough (>= 32) registers. It is an expensive optimization.
3613 So it is on only for peak performance. */
3614 if (optimize >= 3 && global_init_p
3615 && !global_options_set.x_flag_ira_loop_pressure)
3616 flag_ira_loop_pressure = 1;
3617
3618 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3619 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3620 options were already specified. */
3621 if (flag_sanitize & SANITIZE_USER_ADDRESS
3622 && !global_options_set.x_flag_asynchronous_unwind_tables)
3623 flag_asynchronous_unwind_tables = 1;
3624
3625 /* Set the pointer size. */
3626 if (TARGET_64BIT)
3627 {
3628 rs6000_pmode = DImode;
3629 rs6000_pointer_size = 64;
3630 }
3631 else
3632 {
3633 rs6000_pmode = SImode;
3634 rs6000_pointer_size = 32;
3635 }
3636
3637 /* Some OSs don't support saving the high part of 64-bit registers on context
3638 switch. Other OSs don't support saving Altivec registers. On those OSs,
3639 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3640 if the user wants either, the user must explicitly specify them and we
3641 won't interfere with the user's specification. */
3642
3643 set_masks = POWERPC_MASKS;
3644 #ifdef OS_MISSING_POWERPC64
3645 if (OS_MISSING_POWERPC64)
3646 set_masks &= ~OPTION_MASK_POWERPC64;
3647 #endif
3648 #ifdef OS_MISSING_ALTIVEC
3649 if (OS_MISSING_ALTIVEC)
3650 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3651 | OTHER_VSX_VECTOR_MASKS);
3652 #endif
3653
3654 /* Don't override by the processor default if given explicitly. */
3655 set_masks &= ~rs6000_isa_flags_explicit;
3656
3657 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3658 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3659
3660 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3661 the cpu in a target attribute or pragma, but did not specify a tuning
3662 option, use the cpu for the tuning option rather than the option specified
3663 with -mtune on the command line. Process a '--with-cpu' configuration
3664 request as an implicit --cpu. */
3665 if (rs6000_cpu_index >= 0)
3666 cpu_index = rs6000_cpu_index;
3667 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3668 cpu_index = main_target_opt->x_rs6000_cpu_index;
3669 else if (OPTION_TARGET_CPU_DEFAULT)
3670 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3671
3672 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3673 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3674 with those from the cpu, except for options that were explicitly set. If
3675 we don't have a cpu, do not override the target bits set in
3676 TARGET_DEFAULT. */
3677 if (cpu_index >= 0)
3678 {
3679 rs6000_cpu_index = cpu_index;
3680 rs6000_isa_flags &= ~set_masks;
3681 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3682 & set_masks);
3683 }
3684 else
3685 {
3686 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3687 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3688 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3689 to using rs6000_isa_flags, we need to do the initialization here.
3690
3691 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3692 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3693 HOST_WIDE_INT flags;
3694 if (TARGET_DEFAULT)
3695 flags = TARGET_DEFAULT;
3696 else
3697 {
3698 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3699 const char *default_cpu = (!TARGET_POWERPC64
3700 ? "powerpc"
3701 : (BYTES_BIG_ENDIAN
3702 ? "powerpc64"
3703 : "powerpc64le"));
3704 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3705 flags = processor_target_table[default_cpu_index].target_enable;
3706 }
3707 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3708 }
3709
3710 if (rs6000_tune_index >= 0)
3711 tune_index = rs6000_tune_index;
3712 else if (cpu_index >= 0)
3713 rs6000_tune_index = tune_index = cpu_index;
3714 else
3715 {
3716 size_t i;
3717 enum processor_type tune_proc
3718 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3719
3720 tune_index = -1;
3721 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3722 if (processor_target_table[i].processor == tune_proc)
3723 {
3724 tune_index = i;
3725 break;
3726 }
3727 }
3728
3729 if (cpu_index >= 0)
3730 rs6000_cpu = processor_target_table[cpu_index].processor;
3731 else
3732 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3733
3734 gcc_assert (tune_index >= 0);
3735 rs6000_tune = processor_target_table[tune_index].processor;
3736
3737 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3738 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3739 || rs6000_cpu == PROCESSOR_PPCE5500)
3740 {
3741 if (TARGET_ALTIVEC)
3742 error ("AltiVec not supported in this target");
3743 }
3744
3745 /* If we are optimizing big endian systems for space, use the load/store
3746 multiple instructions. */
3747 if (BYTES_BIG_ENDIAN && optimize_size)
3748 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3749
3750 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3751 because the hardware doesn't support the instructions used in little
3752 endian mode, and causes an alignment trap. The 750 does not cause an
3753 alignment trap (except when the target is unaligned). */
3754
3755 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3756 {
3757 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3758 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3759 warning (0, "%qs is not supported on little endian systems",
3760 "-mmultiple");
3761 }
3762
3763 /* If little-endian, default to -mstrict-align on older processors.
3764 Testing for htm matches power8 and later. */
3765 if (!BYTES_BIG_ENDIAN
3766 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3767 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3768
3769 if (!rs6000_fold_gimple)
3770 fprintf (stderr,
3771 "gimple folding of rs6000 builtins has been disabled.\n");
3772
3773 /* Add some warnings for VSX. */
3774 if (TARGET_VSX)
3775 {
3776 const char *msg = NULL;
3777 if (!TARGET_HARD_FLOAT)
3778 {
3779 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3780 msg = N_("%<-mvsx%> requires hardware floating point");
3781 else
3782 {
3783 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3784 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3785 }
3786 }
3787 else if (TARGET_AVOID_XFORM > 0)
3788 msg = N_("%<-mvsx%> needs indexed addressing");
3789 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3790 & OPTION_MASK_ALTIVEC))
3791 {
3792 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3793 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
3794 else
3795 msg = N_("%<-mno-altivec%> disables vsx");
3796 }
3797
3798 if (msg)
3799 {
3800 warning (0, msg);
3801 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3802 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3803 }
3804 }
3805
3806 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3807 the -mcpu setting to enable options that conflict. */
3808 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3809 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3810 | OPTION_MASK_ALTIVEC
3811 | OPTION_MASK_VSX)) != 0)
3812 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3813 | OPTION_MASK_DIRECT_MOVE)
3814 & ~rs6000_isa_flags_explicit);
3815
3816 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3817 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3818
3819 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
3820 off all of the options that depend on those flags. */
3821 ignore_masks = rs6000_disable_incompatible_switches ();
3822
3823 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3824 unless the user explicitly used the -mno-<option> to disable the code. */
3825 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
3826 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3827 else if (TARGET_P9_MINMAX)
3828 {
3829 if (cpu_index >= 0)
3830 {
3831 if (cpu_index == PROCESSOR_POWER9)
3832 {
3833 /* legacy behavior: allow -mcpu=power9 with certain
3834 capabilities explicitly disabled. */
3835 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3836 }
3837 else
3838 error ("power9 target option is incompatible with %<%s=<xxx>%> "
3839 "for <xxx> less than power9", "-mcpu");
3840 }
3841 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
3842 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
3843 & rs6000_isa_flags_explicit))
3844 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
3845 were explicitly cleared. */
3846 error ("%qs incompatible with explicitly disabled options",
3847 "-mpower9-minmax");
3848 else
3849 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
3850 }
3851 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3852 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
3853 else if (TARGET_VSX)
3854 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
3855 else if (TARGET_POPCNTD)
3856 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
3857 else if (TARGET_DFP)
3858 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
3859 else if (TARGET_CMPB)
3860 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
3861 else if (TARGET_FPRND)
3862 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
3863 else if (TARGET_POPCNTB)
3864 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
3865 else if (TARGET_ALTIVEC)
3866 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
3867
3868 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3869 {
3870 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3871 error ("%qs requires %qs", "-mcrypto", "-maltivec");
3872 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3873 }
3874
3875 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3876 {
3877 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3878 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
3879 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3880 }
3881
3882 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3883 {
3884 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3885 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
3886 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3887 }
3888
3889 if (TARGET_P8_VECTOR && !TARGET_VSX)
3890 {
3891 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3892 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
3893 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
3894 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
3895 {
3896 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3897 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3898 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
3899 }
3900 else
3901 {
3902 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
3903 not explicit. */
3904 rs6000_isa_flags |= OPTION_MASK_VSX;
3905 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3906 }
3907 }
3908
3909 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3910 {
3911 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3912 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
3913 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3914 }
3915
3916 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3917 silently turn off quad memory mode. */
3918 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3919 {
3920 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3921 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
3922
3923 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3924 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
3925
3926 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3927 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3928 }
3929
3930 /* Non-atomic quad memory load/store are disabled for little endian, since
3931 the words are reversed, but atomic operations can still be done by
3932 swapping the words. */
3933 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3934 {
3935 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3936 warning (0, N_("%<-mquad-memory%> is not available in little endian "
3937 "mode"));
3938
3939 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3940 }
3941
3942 /* Assume if the user asked for normal quad memory instructions, they want
3943 the atomic versions as well, unless they explicity told us not to use quad
3944 word atomic instructions. */
3945 if (TARGET_QUAD_MEMORY
3946 && !TARGET_QUAD_MEMORY_ATOMIC
3947 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3948 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3949
3950 /* If we can shrink-wrap the TOC register save separately, then use
3951 -msave-toc-indirect unless explicitly disabled. */
3952 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
3953 && flag_shrink_wrap_separate
3954 && optimize_function_for_speed_p (cfun))
3955 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
3956
3957 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3958 generating power8 instructions. Power9 does not optimize power8 fusion
3959 cases. */
3960 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3961 {
3962 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
3963 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
3964 else
3965 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
3966 }
3967
3968 /* Setting additional fusion flags turns on base fusion. */
3969 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
3970 {
3971 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
3972 {
3973 if (TARGET_P8_FUSION_SIGN)
3974 error ("%qs requires %qs", "-mpower8-fusion-sign",
3975 "-mpower8-fusion");
3976
3977 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
3978 }
3979 else
3980 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
3981 }
3982
3983 /* Power8 does not fuse sign extended loads with the addis. If we are
3984 optimizing at high levels for speed, convert a sign extended load into a
3985 zero extending load, and an explicit sign extension. */
3986 if (TARGET_P8_FUSION
3987 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3988 && optimize_function_for_speed_p (cfun)
3989 && optimize >= 3)
3990 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3991
3992 /* ISA 3.0 vector instructions include ISA 2.07. */
3993 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
3994 {
3995 /* We prefer to not mention undocumented options in
3996 error messages. However, if users have managed to select
3997 power9-vector without selecting power8-vector, they
3998 already know about undocumented flags. */
3999 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4000 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4001 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4002 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4003 {
4004 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4005 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4006 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4007 }
4008 else
4009 {
4010 /* OPTION_MASK_P9_VECTOR is explicit and
4011 OPTION_MASK_P8_VECTOR is not explicit. */
4012 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4013 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4014 }
4015 }
4016
4017 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4018 support. If we only have ISA 2.06 support, and the user did not specify
4019 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4020 but we don't enable the full vectorization support */
4021 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4022 TARGET_ALLOW_MOVMISALIGN = 1;
4023
4024 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4025 {
4026 if (TARGET_ALLOW_MOVMISALIGN > 0
4027 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4028 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4029
4030 TARGET_ALLOW_MOVMISALIGN = 0;
4031 }
4032
4033 /* Determine when unaligned vector accesses are permitted, and when
4034 they are preferred over masked Altivec loads. Note that if
4035 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4036 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4037 not true. */
4038 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4039 {
4040 if (!TARGET_VSX)
4041 {
4042 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4043 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4044
4045 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4046 }
4047
4048 else if (!TARGET_ALLOW_MOVMISALIGN)
4049 {
4050 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4051 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4052 "-mallow-movmisalign");
4053
4054 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4055 }
4056 }
4057
4058 /* Use long double size to select the appropriate long double. We use
4059 TYPE_PRECISION to differentiate the 3 different long double types. We map
4060 128 into the precision used for TFmode. */
4061 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4062 ? 64
4063 : FLOAT_PRECISION_TFmode);
4064
4065 /* Set long double size before the IEEE 128-bit tests. */
4066 if (!global_options_set.x_rs6000_long_double_type_size)
4067 {
4068 if (main_target_opt != NULL
4069 && (main_target_opt->x_rs6000_long_double_type_size
4070 != default_long_double_size))
4071 error ("target attribute or pragma changes %<long double%> size");
4072 else
4073 rs6000_long_double_type_size = default_long_double_size;
4074 }
4075 else if (rs6000_long_double_type_size == 128)
4076 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4077 else if (global_options_set.x_rs6000_ieeequad)
4078 {
4079 if (global_options.x_rs6000_ieeequad)
4080 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4081 else
4082 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4083 }
4084
4085 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4086 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4087 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4088 those systems will not pick up this default. Warn if the user changes the
4089 default unless -Wno-psabi. */
4090 if (!global_options_set.x_rs6000_ieeequad)
4091 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4092
4093 else
4094 {
4095 if (global_options.x_rs6000_ieeequad
4096 && (!TARGET_POPCNTD || !TARGET_VSX))
4097 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4098
4099 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4100 {
4101 static bool warned_change_long_double;
4102 if (!warned_change_long_double)
4103 {
4104 warned_change_long_double = true;
4105 if (TARGET_IEEEQUAD)
4106 warning (OPT_Wpsabi, "Using IEEE extended precision "
4107 "%<long double%>");
4108 else
4109 warning (OPT_Wpsabi, "Using IBM extended precision "
4110 "%<long double%>");
4111 }
4112 }
4113 }
4114
4115 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4116 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4117 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4118 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4119 the keyword as well as the type. */
4120 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4121
4122 /* IEEE 128-bit floating point requires VSX support. */
4123 if (TARGET_FLOAT128_KEYWORD)
4124 {
4125 if (!TARGET_VSX)
4126 {
4127 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4128 error ("%qs requires VSX support", "%<-mfloat128%>");
4129
4130 TARGET_FLOAT128_TYPE = 0;
4131 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4132 | OPTION_MASK_FLOAT128_HW);
4133 }
4134 else if (!TARGET_FLOAT128_TYPE)
4135 {
4136 TARGET_FLOAT128_TYPE = 1;
4137 warning (0, "The %<-mfloat128%> option may not be fully supported");
4138 }
4139 }
4140
4141 /* Enable the __float128 keyword under Linux by default. */
4142 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4143 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4144 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4145
4146 /* If we have are supporting the float128 type and full ISA 3.0 support,
4147 enable -mfloat128-hardware by default. However, don't enable the
4148 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4149 because sometimes the compiler wants to put things in an integer
4150 container, and if we don't have __int128 support, it is impossible. */
4151 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4152 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4153 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4154 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4155
4156 if (TARGET_FLOAT128_HW
4157 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4158 {
4159 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4160 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4161
4162 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4163 }
4164
4165 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4166 {
4167 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4168 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4169
4170 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4171 }
4172
4173 /* -mprefixed-addr (and hence -mpcrel) requires -mcpu=future. */
4174 if (TARGET_PREFIXED_ADDR && !TARGET_FUTURE)
4175 {
4176 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4177 error ("%qs requires %qs", "-mpcrel", "-mcpu=future");
4178 else if ((rs6000_isa_flags_explicit & OPTION_MASK_PREFIXED_ADDR) != 0)
4179 error ("%qs requires %qs", "-mprefixed-addr", "-mcpu=future");
4180
4181 rs6000_isa_flags &= ~(OPTION_MASK_PCREL | OPTION_MASK_PREFIXED_ADDR);
4182 }
4183
4184 /* -mpcrel requires prefixed load/store addressing. */
4185 if (TARGET_PCREL && !TARGET_PREFIXED_ADDR)
4186 {
4187 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4188 error ("%qs requires %qs", "-mpcrel", "-mprefixed-addr");
4189
4190 rs6000_isa_flags &= ~OPTION_MASK_PCREL;
4191 }
4192
4193 /* Print the options after updating the defaults. */
4194 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4195 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4196
4197 /* E500mc does "better" if we inline more aggressively. Respect the
4198 user's opinion, though. */
4199 if (rs6000_block_move_inline_limit == 0
4200 && (rs6000_tune == PROCESSOR_PPCE500MC
4201 || rs6000_tune == PROCESSOR_PPCE500MC64
4202 || rs6000_tune == PROCESSOR_PPCE5500
4203 || rs6000_tune == PROCESSOR_PPCE6500))
4204 rs6000_block_move_inline_limit = 128;
4205
4206 /* store_one_arg depends on expand_block_move to handle at least the
4207 size of reg_parm_stack_space. */
4208 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4209 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4210
4211 if (global_init_p)
4212 {
4213 /* If the appropriate debug option is enabled, replace the target hooks
4214 with debug versions that call the real version and then prints
4215 debugging information. */
4216 if (TARGET_DEBUG_COST)
4217 {
4218 targetm.rtx_costs = rs6000_debug_rtx_costs;
4219 targetm.address_cost = rs6000_debug_address_cost;
4220 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4221 }
4222
4223 if (TARGET_DEBUG_ADDR)
4224 {
4225 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4226 targetm.legitimize_address = rs6000_debug_legitimize_address;
4227 rs6000_secondary_reload_class_ptr
4228 = rs6000_debug_secondary_reload_class;
4229 targetm.secondary_memory_needed
4230 = rs6000_debug_secondary_memory_needed;
4231 targetm.can_change_mode_class
4232 = rs6000_debug_can_change_mode_class;
4233 rs6000_preferred_reload_class_ptr
4234 = rs6000_debug_preferred_reload_class;
4235 rs6000_mode_dependent_address_ptr
4236 = rs6000_debug_mode_dependent_address;
4237 }
4238
4239 if (rs6000_veclibabi_name)
4240 {
4241 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4242 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4243 else
4244 {
4245 error ("unknown vectorization library ABI type (%qs) for "
4246 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4247 ret = false;
4248 }
4249 }
4250 }
4251
4252 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4253 target attribute or pragma which automatically enables both options,
4254 unless the altivec ABI was set. This is set by default for 64-bit, but
4255 not for 32-bit. */
4256 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4257 {
4258 TARGET_FLOAT128_TYPE = 0;
4259 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4260 | OPTION_MASK_FLOAT128_KEYWORD)
4261 & ~rs6000_isa_flags_explicit);
4262 }
4263
4264 /* Enable Altivec ABI for AIX -maltivec. */
4265 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4266 {
4267 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4268 error ("target attribute or pragma changes AltiVec ABI");
4269 else
4270 rs6000_altivec_abi = 1;
4271 }
4272
4273 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4274 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4275 be explicitly overridden in either case. */
4276 if (TARGET_ELF)
4277 {
4278 if (!global_options_set.x_rs6000_altivec_abi
4279 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4280 {
4281 if (main_target_opt != NULL &&
4282 !main_target_opt->x_rs6000_altivec_abi)
4283 error ("target attribute or pragma changes AltiVec ABI");
4284 else
4285 rs6000_altivec_abi = 1;
4286 }
4287 }
4288
4289 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4290 So far, the only darwin64 targets are also MACH-O. */
4291 if (TARGET_MACHO
4292 && DEFAULT_ABI == ABI_DARWIN
4293 && TARGET_64BIT)
4294 {
4295 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4296 error ("target attribute or pragma changes darwin64 ABI");
4297 else
4298 {
4299 rs6000_darwin64_abi = 1;
4300 /* Default to natural alignment, for better performance. */
4301 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4302 }
4303 }
4304
4305 /* Place FP constants in the constant pool instead of TOC
4306 if section anchors enabled. */
4307 if (flag_section_anchors
4308 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4309 TARGET_NO_FP_IN_TOC = 1;
4310
4311 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4312 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4313
4314 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4315 SUBTARGET_OVERRIDE_OPTIONS;
4316 #endif
4317 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4318 SUBSUBTARGET_OVERRIDE_OPTIONS;
4319 #endif
4320 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4321 SUB3TARGET_OVERRIDE_OPTIONS;
4322 #endif
4323
4324 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4325 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4326
4327 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4328 && rs6000_tune != PROCESSOR_POWER5
4329 && rs6000_tune != PROCESSOR_POWER6
4330 && rs6000_tune != PROCESSOR_POWER7
4331 && rs6000_tune != PROCESSOR_POWER8
4332 && rs6000_tune != PROCESSOR_POWER9
4333 && rs6000_tune != PROCESSOR_FUTURE
4334 && rs6000_tune != PROCESSOR_PPCA2
4335 && rs6000_tune != PROCESSOR_CELL
4336 && rs6000_tune != PROCESSOR_PPC476);
4337 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4338 || rs6000_tune == PROCESSOR_POWER5
4339 || rs6000_tune == PROCESSOR_POWER7
4340 || rs6000_tune == PROCESSOR_POWER8);
4341 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4342 || rs6000_tune == PROCESSOR_POWER5
4343 || rs6000_tune == PROCESSOR_POWER6
4344 || rs6000_tune == PROCESSOR_POWER7
4345 || rs6000_tune == PROCESSOR_POWER8
4346 || rs6000_tune == PROCESSOR_POWER9
4347 || rs6000_tune == PROCESSOR_FUTURE
4348 || rs6000_tune == PROCESSOR_PPCE500MC
4349 || rs6000_tune == PROCESSOR_PPCE500MC64
4350 || rs6000_tune == PROCESSOR_PPCE5500
4351 || rs6000_tune == PROCESSOR_PPCE6500);
4352
4353 /* Allow debug switches to override the above settings. These are set to -1
4354 in rs6000.opt to indicate the user hasn't directly set the switch. */
4355 if (TARGET_ALWAYS_HINT >= 0)
4356 rs6000_always_hint = TARGET_ALWAYS_HINT;
4357
4358 if (TARGET_SCHED_GROUPS >= 0)
4359 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4360
4361 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4362 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4363
4364 rs6000_sched_restricted_insns_priority
4365 = (rs6000_sched_groups ? 1 : 0);
4366
4367 /* Handle -msched-costly-dep option. */
4368 rs6000_sched_costly_dep
4369 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4370
4371 if (rs6000_sched_costly_dep_str)
4372 {
4373 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4374 rs6000_sched_costly_dep = no_dep_costly;
4375 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4376 rs6000_sched_costly_dep = all_deps_costly;
4377 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4378 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4379 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4380 rs6000_sched_costly_dep = store_to_load_dep_costly;
4381 else
4382 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4383 atoi (rs6000_sched_costly_dep_str));
4384 }
4385
4386 /* Handle -minsert-sched-nops option. */
4387 rs6000_sched_insert_nops
4388 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4389
4390 if (rs6000_sched_insert_nops_str)
4391 {
4392 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4393 rs6000_sched_insert_nops = sched_finish_none;
4394 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4395 rs6000_sched_insert_nops = sched_finish_pad_groups;
4396 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4397 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4398 else
4399 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4400 atoi (rs6000_sched_insert_nops_str));
4401 }
4402
4403 /* Handle stack protector */
4404 if (!global_options_set.x_rs6000_stack_protector_guard)
4405 #ifdef TARGET_THREAD_SSP_OFFSET
4406 rs6000_stack_protector_guard = SSP_TLS;
4407 #else
4408 rs6000_stack_protector_guard = SSP_GLOBAL;
4409 #endif
4410
4411 #ifdef TARGET_THREAD_SSP_OFFSET
4412 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4413 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4414 #endif
4415
4416 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4417 {
4418 char *endp;
4419 const char *str = rs6000_stack_protector_guard_offset_str;
4420
4421 errno = 0;
4422 long offset = strtol (str, &endp, 0);
4423 if (!*str || *endp || errno)
4424 error ("%qs is not a valid number in %qs", str,
4425 "-mstack-protector-guard-offset=");
4426
4427 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4428 || (TARGET_64BIT && (offset & 3)))
4429 error ("%qs is not a valid offset in %qs", str,
4430 "-mstack-protector-guard-offset=");
4431
4432 rs6000_stack_protector_guard_offset = offset;
4433 }
4434
4435 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4436 {
4437 const char *str = rs6000_stack_protector_guard_reg_str;
4438 int reg = decode_reg_name (str);
4439
4440 if (!IN_RANGE (reg, 1, 31))
4441 error ("%qs is not a valid base register in %qs", str,
4442 "-mstack-protector-guard-reg=");
4443
4444 rs6000_stack_protector_guard_reg = reg;
4445 }
4446
4447 if (rs6000_stack_protector_guard == SSP_TLS
4448 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4449 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4450
4451 if (global_init_p)
4452 {
4453 #ifdef TARGET_REGNAMES
4454 /* If the user desires alternate register names, copy in the
4455 alternate names now. */
4456 if (TARGET_REGNAMES)
4457 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4458 #endif
4459
4460 /* Set aix_struct_return last, after the ABI is determined.
4461 If -maix-struct-return or -msvr4-struct-return was explicitly
4462 used, don't override with the ABI default. */
4463 if (!global_options_set.x_aix_struct_return)
4464 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4465
4466 #if 0
4467 /* IBM XL compiler defaults to unsigned bitfields. */
4468 if (TARGET_XL_COMPAT)
4469 flag_signed_bitfields = 0;
4470 #endif
4471
4472 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4473 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4474
4475 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4476
4477 /* We can only guarantee the availability of DI pseudo-ops when
4478 assembling for 64-bit targets. */
4479 if (!TARGET_64BIT)
4480 {
4481 targetm.asm_out.aligned_op.di = NULL;
4482 targetm.asm_out.unaligned_op.di = NULL;
4483 }
4484
4485
4486 /* Set branch target alignment, if not optimizing for size. */
4487 if (!optimize_size)
4488 {
4489 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4490 aligned 8byte to avoid misprediction by the branch predictor. */
4491 if (rs6000_tune == PROCESSOR_TITAN
4492 || rs6000_tune == PROCESSOR_CELL)
4493 {
4494 if (flag_align_functions && !str_align_functions)
4495 str_align_functions = "8";
4496 if (flag_align_jumps && !str_align_jumps)
4497 str_align_jumps = "8";
4498 if (flag_align_loops && !str_align_loops)
4499 str_align_loops = "8";
4500 }
4501 if (rs6000_align_branch_targets)
4502 {
4503 if (flag_align_functions && !str_align_functions)
4504 str_align_functions = "16";
4505 if (flag_align_jumps && !str_align_jumps)
4506 str_align_jumps = "16";
4507 if (flag_align_loops && !str_align_loops)
4508 {
4509 can_override_loop_align = 1;
4510 str_align_loops = "16";
4511 }
4512 }
4513
4514 if (flag_align_jumps && !str_align_jumps)
4515 str_align_jumps = "16";
4516 if (flag_align_loops && !str_align_loops)
4517 str_align_loops = "16";
4518 }
4519
4520 /* Arrange to save and restore machine status around nested functions. */
4521 init_machine_status = rs6000_init_machine_status;
4522
4523 /* We should always be splitting complex arguments, but we can't break
4524 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4525 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4526 targetm.calls.split_complex_arg = NULL;
4527
4528 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4529 if (DEFAULT_ABI == ABI_AIX)
4530 targetm.calls.custom_function_descriptors = 0;
4531 }
4532
4533 /* Initialize rs6000_cost with the appropriate target costs. */
4534 if (optimize_size)
4535 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4536 else
4537 switch (rs6000_tune)
4538 {
4539 case PROCESSOR_RS64A:
4540 rs6000_cost = &rs64a_cost;
4541 break;
4542
4543 case PROCESSOR_MPCCORE:
4544 rs6000_cost = &mpccore_cost;
4545 break;
4546
4547 case PROCESSOR_PPC403:
4548 rs6000_cost = &ppc403_cost;
4549 break;
4550
4551 case PROCESSOR_PPC405:
4552 rs6000_cost = &ppc405_cost;
4553 break;
4554
4555 case PROCESSOR_PPC440:
4556 rs6000_cost = &ppc440_cost;
4557 break;
4558
4559 case PROCESSOR_PPC476:
4560 rs6000_cost = &ppc476_cost;
4561 break;
4562
4563 case PROCESSOR_PPC601:
4564 rs6000_cost = &ppc601_cost;
4565 break;
4566
4567 case PROCESSOR_PPC603:
4568 rs6000_cost = &ppc603_cost;
4569 break;
4570
4571 case PROCESSOR_PPC604:
4572 rs6000_cost = &ppc604_cost;
4573 break;
4574
4575 case PROCESSOR_PPC604e:
4576 rs6000_cost = &ppc604e_cost;
4577 break;
4578
4579 case PROCESSOR_PPC620:
4580 rs6000_cost = &ppc620_cost;
4581 break;
4582
4583 case PROCESSOR_PPC630:
4584 rs6000_cost = &ppc630_cost;
4585 break;
4586
4587 case PROCESSOR_CELL:
4588 rs6000_cost = &ppccell_cost;
4589 break;
4590
4591 case PROCESSOR_PPC750:
4592 case PROCESSOR_PPC7400:
4593 rs6000_cost = &ppc750_cost;
4594 break;
4595
4596 case PROCESSOR_PPC7450:
4597 rs6000_cost = &ppc7450_cost;
4598 break;
4599
4600 case PROCESSOR_PPC8540:
4601 case PROCESSOR_PPC8548:
4602 rs6000_cost = &ppc8540_cost;
4603 break;
4604
4605 case PROCESSOR_PPCE300C2:
4606 case PROCESSOR_PPCE300C3:
4607 rs6000_cost = &ppce300c2c3_cost;
4608 break;
4609
4610 case PROCESSOR_PPCE500MC:
4611 rs6000_cost = &ppce500mc_cost;
4612 break;
4613
4614 case PROCESSOR_PPCE500MC64:
4615 rs6000_cost = &ppce500mc64_cost;
4616 break;
4617
4618 case PROCESSOR_PPCE5500:
4619 rs6000_cost = &ppce5500_cost;
4620 break;
4621
4622 case PROCESSOR_PPCE6500:
4623 rs6000_cost = &ppce6500_cost;
4624 break;
4625
4626 case PROCESSOR_TITAN:
4627 rs6000_cost = &titan_cost;
4628 break;
4629
4630 case PROCESSOR_POWER4:
4631 case PROCESSOR_POWER5:
4632 rs6000_cost = &power4_cost;
4633 break;
4634
4635 case PROCESSOR_POWER6:
4636 rs6000_cost = &power6_cost;
4637 break;
4638
4639 case PROCESSOR_POWER7:
4640 rs6000_cost = &power7_cost;
4641 break;
4642
4643 case PROCESSOR_POWER8:
4644 rs6000_cost = &power8_cost;
4645 break;
4646
4647 case PROCESSOR_POWER9:
4648 case PROCESSOR_FUTURE:
4649 rs6000_cost = &power9_cost;
4650 break;
4651
4652 case PROCESSOR_PPCA2:
4653 rs6000_cost = &ppca2_cost;
4654 break;
4655
4656 default:
4657 gcc_unreachable ();
4658 }
4659
4660 if (global_init_p)
4661 {
4662 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4663 rs6000_cost->simultaneous_prefetches,
4664 global_options.x_param_values,
4665 global_options_set.x_param_values);
4666 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4667 global_options.x_param_values,
4668 global_options_set.x_param_values);
4669 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4670 rs6000_cost->cache_line_size,
4671 global_options.x_param_values,
4672 global_options_set.x_param_values);
4673 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4674 global_options.x_param_values,
4675 global_options_set.x_param_values);
4676
4677 /* Increase loop peeling limits based on performance analysis. */
4678 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4679 global_options.x_param_values,
4680 global_options_set.x_param_values);
4681 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4682 global_options.x_param_values,
4683 global_options_set.x_param_values);
4684
4685 /* Use the 'model' -fsched-pressure algorithm by default. */
4686 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4687 SCHED_PRESSURE_MODEL,
4688 global_options.x_param_values,
4689 global_options_set.x_param_values);
4690
4691 /* If using typedef char *va_list, signal that
4692 __builtin_va_start (&ap, 0) can be optimized to
4693 ap = __builtin_next_arg (0). */
4694 if (DEFAULT_ABI != ABI_V4)
4695 targetm.expand_builtin_va_start = NULL;
4696 }
4697
4698 /* If not explicitly specified via option, decide whether to generate indexed
4699 load/store instructions. A value of -1 indicates that the
4700 initial value of this variable has not been overwritten. During
4701 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4702 if (TARGET_AVOID_XFORM == -1)
4703 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4704 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4705 need indexed accesses and the type used is the scalar type of the element
4706 being loaded or stored. */
4707 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4708 && !TARGET_ALTIVEC);
4709
4710 /* Set the -mrecip options. */
4711 if (rs6000_recip_name)
4712 {
4713 char *p = ASTRDUP (rs6000_recip_name);
4714 char *q;
4715 unsigned int mask, i;
4716 bool invert;
4717
4718 while ((q = strtok (p, ",")) != NULL)
4719 {
4720 p = NULL;
4721 if (*q == '!')
4722 {
4723 invert = true;
4724 q++;
4725 }
4726 else
4727 invert = false;
4728
4729 if (!strcmp (q, "default"))
4730 mask = ((TARGET_RECIP_PRECISION)
4731 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4732 else
4733 {
4734 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4735 if (!strcmp (q, recip_options[i].string))
4736 {
4737 mask = recip_options[i].mask;
4738 break;
4739 }
4740
4741 if (i == ARRAY_SIZE (recip_options))
4742 {
4743 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4744 invert = false;
4745 mask = 0;
4746 ret = false;
4747 }
4748 }
4749
4750 if (invert)
4751 rs6000_recip_control &= ~mask;
4752 else
4753 rs6000_recip_control |= mask;
4754 }
4755 }
4756
4757 /* Set the builtin mask of the various options used that could affect which
4758 builtins were used. In the past we used target_flags, but we've run out
4759 of bits, and some options are no longer in target_flags. */
4760 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4761 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4762 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4763 rs6000_builtin_mask);
4764
4765 /* Initialize all of the registers. */
4766 rs6000_init_hard_regno_mode_ok (global_init_p);
4767
4768 /* Save the initial options in case the user does function specific options */
4769 if (global_init_p)
4770 target_option_default_node = target_option_current_node
4771 = build_target_option_node (&global_options);
4772
4773 /* If not explicitly specified via option, decide whether to generate the
4774 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4775 if (TARGET_LINK_STACK == -1)
4776 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4777
4778 /* Deprecate use of -mno-speculate-indirect-jumps. */
4779 if (!rs6000_speculate_indirect_jumps)
4780 warning (0, "%qs is deprecated and not recommended in any circumstances",
4781 "-mno-speculate-indirect-jumps");
4782
4783 return ret;
4784 }
4785
4786 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4787 define the target cpu type. */
4788
4789 static void
4790 rs6000_option_override (void)
4791 {
4792 (void) rs6000_option_override_internal (true);
4793 }
4794
4795 \f
4796 /* Implement targetm.vectorize.builtin_mask_for_load. */
4797 static tree
4798 rs6000_builtin_mask_for_load (void)
4799 {
4800 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4801 if ((TARGET_ALTIVEC && !TARGET_VSX)
4802 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4803 return altivec_builtin_mask_for_load;
4804 else
4805 return 0;
4806 }
4807
4808 /* Implement LOOP_ALIGN. */
4809 align_flags
4810 rs6000_loop_align (rtx label)
4811 {
4812 basic_block bb;
4813 int ninsns;
4814
4815 /* Don't override loop alignment if -falign-loops was specified. */
4816 if (!can_override_loop_align)
4817 return align_loops;
4818
4819 bb = BLOCK_FOR_INSN (label);
4820 ninsns = num_loop_insns(bb->loop_father);
4821
4822 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4823 if (ninsns > 4 && ninsns <= 8
4824 && (rs6000_tune == PROCESSOR_POWER4
4825 || rs6000_tune == PROCESSOR_POWER5
4826 || rs6000_tune == PROCESSOR_POWER6
4827 || rs6000_tune == PROCESSOR_POWER7
4828 || rs6000_tune == PROCESSOR_POWER8))
4829 return align_flags (5);
4830 else
4831 return align_loops;
4832 }
4833
4834 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4835 after applying N number of iterations. This routine does not determine
4836 how may iterations are required to reach desired alignment. */
4837
4838 static bool
4839 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4840 {
4841 if (is_packed)
4842 return false;
4843
4844 if (TARGET_32BIT)
4845 {
4846 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4847 return true;
4848
4849 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4850 return true;
4851
4852 return false;
4853 }
4854 else
4855 {
4856 if (TARGET_MACHO)
4857 return false;
4858
4859 /* Assuming that all other types are naturally aligned. CHECKME! */
4860 return true;
4861 }
4862 }
4863
4864 /* Return true if the vector misalignment factor is supported by the
4865 target. */
4866 static bool
4867 rs6000_builtin_support_vector_misalignment (machine_mode mode,
4868 const_tree type,
4869 int misalignment,
4870 bool is_packed)
4871 {
4872 if (TARGET_VSX)
4873 {
4874 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4875 return true;
4876
4877 /* Return if movmisalign pattern is not supported for this mode. */
4878 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4879 return false;
4880
4881 if (misalignment == -1)
4882 {
4883 /* Misalignment factor is unknown at compile time but we know
4884 it's word aligned. */
4885 if (rs6000_vector_alignment_reachable (type, is_packed))
4886 {
4887 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4888
4889 if (element_size == 64 || element_size == 32)
4890 return true;
4891 }
4892
4893 return false;
4894 }
4895
4896 /* VSX supports word-aligned vector. */
4897 if (misalignment % 4 == 0)
4898 return true;
4899 }
4900 return false;
4901 }
4902
4903 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4904 static int
4905 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4906 tree vectype, int misalign)
4907 {
4908 unsigned elements;
4909 tree elem_type;
4910
4911 switch (type_of_cost)
4912 {
4913 case scalar_stmt:
4914 case scalar_load:
4915 case scalar_store:
4916 case vector_stmt:
4917 case vector_load:
4918 case vector_store:
4919 case vec_to_scalar:
4920 case scalar_to_vec:
4921 case cond_branch_not_taken:
4922 return 1;
4923
4924 case vec_perm:
4925 if (TARGET_VSX)
4926 return 3;
4927 else
4928 return 1;
4929
4930 case vec_promote_demote:
4931 if (TARGET_VSX)
4932 return 4;
4933 else
4934 return 1;
4935
4936 case cond_branch_taken:
4937 return 3;
4938
4939 case unaligned_load:
4940 case vector_gather_load:
4941 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4942 return 1;
4943
4944 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4945 {
4946 elements = TYPE_VECTOR_SUBPARTS (vectype);
4947 if (elements == 2)
4948 /* Double word aligned. */
4949 return 2;
4950
4951 if (elements == 4)
4952 {
4953 switch (misalign)
4954 {
4955 case 8:
4956 /* Double word aligned. */
4957 return 2;
4958
4959 case -1:
4960 /* Unknown misalignment. */
4961 case 4:
4962 case 12:
4963 /* Word aligned. */
4964 return 22;
4965
4966 default:
4967 gcc_unreachable ();
4968 }
4969 }
4970 }
4971
4972 if (TARGET_ALTIVEC)
4973 /* Misaligned loads are not supported. */
4974 gcc_unreachable ();
4975
4976 return 2;
4977
4978 case unaligned_store:
4979 case vector_scatter_store:
4980 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4981 return 1;
4982
4983 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4984 {
4985 elements = TYPE_VECTOR_SUBPARTS (vectype);
4986 if (elements == 2)
4987 /* Double word aligned. */
4988 return 2;
4989
4990 if (elements == 4)
4991 {
4992 switch (misalign)
4993 {
4994 case 8:
4995 /* Double word aligned. */
4996 return 2;
4997
4998 case -1:
4999 /* Unknown misalignment. */
5000 case 4:
5001 case 12:
5002 /* Word aligned. */
5003 return 23;
5004
5005 default:
5006 gcc_unreachable ();
5007 }
5008 }
5009 }
5010
5011 if (TARGET_ALTIVEC)
5012 /* Misaligned stores are not supported. */
5013 gcc_unreachable ();
5014
5015 return 2;
5016
5017 case vec_construct:
5018 /* This is a rough approximation assuming non-constant elements
5019 constructed into a vector via element insertion. FIXME:
5020 vec_construct is not granular enough for uniformly good
5021 decisions. If the initialization is a splat, this is
5022 cheaper than we estimate. Improve this someday. */
5023 elem_type = TREE_TYPE (vectype);
5024 /* 32-bit vectors loaded into registers are stored as double
5025 precision, so we need 2 permutes, 2 converts, and 1 merge
5026 to construct a vector of short floats from them. */
5027 if (SCALAR_FLOAT_TYPE_P (elem_type)
5028 && TYPE_PRECISION (elem_type) == 32)
5029 return 5;
5030 /* On POWER9, integer vector types are built up in GPRs and then
5031 use a direct move (2 cycles). For POWER8 this is even worse,
5032 as we need two direct moves and a merge, and the direct moves
5033 are five cycles. */
5034 else if (INTEGRAL_TYPE_P (elem_type))
5035 {
5036 if (TARGET_P9_VECTOR)
5037 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5038 else
5039 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5040 }
5041 else
5042 /* V2DFmode doesn't need a direct move. */
5043 return 2;
5044
5045 default:
5046 gcc_unreachable ();
5047 }
5048 }
5049
5050 /* Implement targetm.vectorize.preferred_simd_mode. */
5051
5052 static machine_mode
5053 rs6000_preferred_simd_mode (scalar_mode mode)
5054 {
5055 if (TARGET_VSX)
5056 switch (mode)
5057 {
5058 case E_DFmode:
5059 return V2DFmode;
5060 default:;
5061 }
5062 if (TARGET_ALTIVEC || TARGET_VSX)
5063 switch (mode)
5064 {
5065 case E_SFmode:
5066 return V4SFmode;
5067 case E_TImode:
5068 return V1TImode;
5069 case E_DImode:
5070 return V2DImode;
5071 case E_SImode:
5072 return V4SImode;
5073 case E_HImode:
5074 return V8HImode;
5075 case E_QImode:
5076 return V16QImode;
5077 default:;
5078 }
5079 return word_mode;
5080 }
5081
5082 typedef struct _rs6000_cost_data
5083 {
5084 struct loop *loop_info;
5085 unsigned cost[3];
5086 } rs6000_cost_data;
5087
5088 /* Test for likely overcommitment of vector hardware resources. If a
5089 loop iteration is relatively large, and too large a percentage of
5090 instructions in the loop are vectorized, the cost model may not
5091 adequately reflect delays from unavailable vector resources.
5092 Penalize the loop body cost for this case. */
5093
5094 static void
5095 rs6000_density_test (rs6000_cost_data *data)
5096 {
5097 const int DENSITY_PCT_THRESHOLD = 85;
5098 const int DENSITY_SIZE_THRESHOLD = 70;
5099 const int DENSITY_PENALTY = 10;
5100 struct loop *loop = data->loop_info;
5101 basic_block *bbs = get_loop_body (loop);
5102 int nbbs = loop->num_nodes;
5103 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5104 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5105 int i, density_pct;
5106
5107 for (i = 0; i < nbbs; i++)
5108 {
5109 basic_block bb = bbs[i];
5110 gimple_stmt_iterator gsi;
5111
5112 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5113 {
5114 gimple *stmt = gsi_stmt (gsi);
5115 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5116
5117 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5118 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5119 not_vec_cost++;
5120 }
5121 }
5122
5123 free (bbs);
5124 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5125
5126 if (density_pct > DENSITY_PCT_THRESHOLD
5127 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5128 {
5129 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5130 if (dump_enabled_p ())
5131 dump_printf_loc (MSG_NOTE, vect_location,
5132 "density %d%%, cost %d exceeds threshold, penalizing "
5133 "loop body cost by %d%%", density_pct,
5134 vec_cost + not_vec_cost, DENSITY_PENALTY);
5135 }
5136 }
5137
5138 /* Implement targetm.vectorize.init_cost. */
5139
5140 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5141 instruction is needed by the vectorization. */
5142 static bool rs6000_vect_nonmem;
5143
5144 static void *
5145 rs6000_init_cost (struct loop *loop_info)
5146 {
5147 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5148 data->loop_info = loop_info;
5149 data->cost[vect_prologue] = 0;
5150 data->cost[vect_body] = 0;
5151 data->cost[vect_epilogue] = 0;
5152 rs6000_vect_nonmem = false;
5153 return data;
5154 }
5155
5156 /* Implement targetm.vectorize.add_stmt_cost. */
5157
5158 static unsigned
5159 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5160 struct _stmt_vec_info *stmt_info, int misalign,
5161 enum vect_cost_model_location where)
5162 {
5163 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5164 unsigned retval = 0;
5165
5166 if (flag_vect_cost_model)
5167 {
5168 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5169 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5170 misalign);
5171 /* Statements in an inner loop relative to the loop being
5172 vectorized are weighted more heavily. The value here is
5173 arbitrary and could potentially be improved with analysis. */
5174 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5175 count *= 50; /* FIXME. */
5176
5177 retval = (unsigned) (count * stmt_cost);
5178 cost_data->cost[where] += retval;
5179
5180 /* Check whether we're doing something other than just a copy loop.
5181 Not all such loops may be profitably vectorized; see
5182 rs6000_finish_cost. */
5183 if ((kind == vec_to_scalar || kind == vec_perm
5184 || kind == vec_promote_demote || kind == vec_construct
5185 || kind == scalar_to_vec)
5186 || (where == vect_body && kind == vector_stmt))
5187 rs6000_vect_nonmem = true;
5188 }
5189
5190 return retval;
5191 }
5192
5193 /* Implement targetm.vectorize.finish_cost. */
5194
5195 static void
5196 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5197 unsigned *body_cost, unsigned *epilogue_cost)
5198 {
5199 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5200
5201 if (cost_data->loop_info)
5202 rs6000_density_test (cost_data);
5203
5204 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5205 that require versioning for any reason. The vectorization is at
5206 best a wash inside the loop, and the versioning checks make
5207 profitability highly unlikely and potentially quite harmful. */
5208 if (cost_data->loop_info)
5209 {
5210 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5211 if (!rs6000_vect_nonmem
5212 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5213 && LOOP_REQUIRES_VERSIONING (vec_info))
5214 cost_data->cost[vect_body] += 10000;
5215 }
5216
5217 *prologue_cost = cost_data->cost[vect_prologue];
5218 *body_cost = cost_data->cost[vect_body];
5219 *epilogue_cost = cost_data->cost[vect_epilogue];
5220 }
5221
5222 /* Implement targetm.vectorize.destroy_cost_data. */
5223
5224 static void
5225 rs6000_destroy_cost_data (void *data)
5226 {
5227 free (data);
5228 }
5229
5230 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5231 library with vectorized intrinsics. */
5232
5233 static tree
5234 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5235 tree type_in)
5236 {
5237 char name[32];
5238 const char *suffix = NULL;
5239 tree fntype, new_fndecl, bdecl = NULL_TREE;
5240 int n_args = 1;
5241 const char *bname;
5242 machine_mode el_mode, in_mode;
5243 int n, in_n;
5244
5245 /* Libmass is suitable for unsafe math only as it does not correctly support
5246 parts of IEEE with the required precision such as denormals. Only support
5247 it if we have VSX to use the simd d2 or f4 functions.
5248 XXX: Add variable length support. */
5249 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5250 return NULL_TREE;
5251
5252 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5253 n = TYPE_VECTOR_SUBPARTS (type_out);
5254 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5255 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5256 if (el_mode != in_mode
5257 || n != in_n)
5258 return NULL_TREE;
5259
5260 switch (fn)
5261 {
5262 CASE_CFN_ATAN2:
5263 CASE_CFN_HYPOT:
5264 CASE_CFN_POW:
5265 n_args = 2;
5266 gcc_fallthrough ();
5267
5268 CASE_CFN_ACOS:
5269 CASE_CFN_ACOSH:
5270 CASE_CFN_ASIN:
5271 CASE_CFN_ASINH:
5272 CASE_CFN_ATAN:
5273 CASE_CFN_ATANH:
5274 CASE_CFN_CBRT:
5275 CASE_CFN_COS:
5276 CASE_CFN_COSH:
5277 CASE_CFN_ERF:
5278 CASE_CFN_ERFC:
5279 CASE_CFN_EXP2:
5280 CASE_CFN_EXP:
5281 CASE_CFN_EXPM1:
5282 CASE_CFN_LGAMMA:
5283 CASE_CFN_LOG10:
5284 CASE_CFN_LOG1P:
5285 CASE_CFN_LOG2:
5286 CASE_CFN_LOG:
5287 CASE_CFN_SIN:
5288 CASE_CFN_SINH:
5289 CASE_CFN_SQRT:
5290 CASE_CFN_TAN:
5291 CASE_CFN_TANH:
5292 if (el_mode == DFmode && n == 2)
5293 {
5294 bdecl = mathfn_built_in (double_type_node, fn);
5295 suffix = "d2"; /* pow -> powd2 */
5296 }
5297 else if (el_mode == SFmode && n == 4)
5298 {
5299 bdecl = mathfn_built_in (float_type_node, fn);
5300 suffix = "4"; /* powf -> powf4 */
5301 }
5302 else
5303 return NULL_TREE;
5304 if (!bdecl)
5305 return NULL_TREE;
5306 break;
5307
5308 default:
5309 return NULL_TREE;
5310 }
5311
5312 gcc_assert (suffix != NULL);
5313 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5314 if (!bname)
5315 return NULL_TREE;
5316
5317 strcpy (name, bname + sizeof ("__builtin_") - 1);
5318 strcat (name, suffix);
5319
5320 if (n_args == 1)
5321 fntype = build_function_type_list (type_out, type_in, NULL);
5322 else if (n_args == 2)
5323 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5324 else
5325 gcc_unreachable ();
5326
5327 /* Build a function declaration for the vectorized function. */
5328 new_fndecl = build_decl (BUILTINS_LOCATION,
5329 FUNCTION_DECL, get_identifier (name), fntype);
5330 TREE_PUBLIC (new_fndecl) = 1;
5331 DECL_EXTERNAL (new_fndecl) = 1;
5332 DECL_IS_NOVOPS (new_fndecl) = 1;
5333 TREE_READONLY (new_fndecl) = 1;
5334
5335 return new_fndecl;
5336 }
5337
5338 /* Returns a function decl for a vectorized version of the builtin function
5339 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5340 if it is not available. */
5341
5342 static tree
5343 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5344 tree type_in)
5345 {
5346 machine_mode in_mode, out_mode;
5347 int in_n, out_n;
5348
5349 if (TARGET_DEBUG_BUILTIN)
5350 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5351 combined_fn_name (combined_fn (fn)),
5352 GET_MODE_NAME (TYPE_MODE (type_out)),
5353 GET_MODE_NAME (TYPE_MODE (type_in)));
5354
5355 if (TREE_CODE (type_out) != VECTOR_TYPE
5356 || TREE_CODE (type_in) != VECTOR_TYPE)
5357 return NULL_TREE;
5358
5359 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5360 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5361 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5362 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5363
5364 switch (fn)
5365 {
5366 CASE_CFN_COPYSIGN:
5367 if (VECTOR_UNIT_VSX_P (V2DFmode)
5368 && out_mode == DFmode && out_n == 2
5369 && in_mode == DFmode && in_n == 2)
5370 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5371 if (VECTOR_UNIT_VSX_P (V4SFmode)
5372 && out_mode == SFmode && out_n == 4
5373 && in_mode == SFmode && in_n == 4)
5374 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5375 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5376 && out_mode == SFmode && out_n == 4
5377 && in_mode == SFmode && in_n == 4)
5378 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5379 break;
5380 CASE_CFN_CEIL:
5381 if (VECTOR_UNIT_VSX_P (V2DFmode)
5382 && out_mode == DFmode && out_n == 2
5383 && in_mode == DFmode && in_n == 2)
5384 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5385 if (VECTOR_UNIT_VSX_P (V4SFmode)
5386 && out_mode == SFmode && out_n == 4
5387 && in_mode == SFmode && in_n == 4)
5388 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5389 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5390 && out_mode == SFmode && out_n == 4
5391 && in_mode == SFmode && in_n == 4)
5392 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5393 break;
5394 CASE_CFN_FLOOR:
5395 if (VECTOR_UNIT_VSX_P (V2DFmode)
5396 && out_mode == DFmode && out_n == 2
5397 && in_mode == DFmode && in_n == 2)
5398 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5399 if (VECTOR_UNIT_VSX_P (V4SFmode)
5400 && out_mode == SFmode && out_n == 4
5401 && in_mode == SFmode && in_n == 4)
5402 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5403 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5404 && out_mode == SFmode && out_n == 4
5405 && in_mode == SFmode && in_n == 4)
5406 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5407 break;
5408 CASE_CFN_FMA:
5409 if (VECTOR_UNIT_VSX_P (V2DFmode)
5410 && out_mode == DFmode && out_n == 2
5411 && in_mode == DFmode && in_n == 2)
5412 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5413 if (VECTOR_UNIT_VSX_P (V4SFmode)
5414 && out_mode == SFmode && out_n == 4
5415 && in_mode == SFmode && in_n == 4)
5416 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5417 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5418 && out_mode == SFmode && out_n == 4
5419 && in_mode == SFmode && in_n == 4)
5420 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5421 break;
5422 CASE_CFN_TRUNC:
5423 if (VECTOR_UNIT_VSX_P (V2DFmode)
5424 && out_mode == DFmode && out_n == 2
5425 && in_mode == DFmode && in_n == 2)
5426 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5427 if (VECTOR_UNIT_VSX_P (V4SFmode)
5428 && out_mode == SFmode && out_n == 4
5429 && in_mode == SFmode && in_n == 4)
5430 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5431 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5432 && out_mode == SFmode && out_n == 4
5433 && in_mode == SFmode && in_n == 4)
5434 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5435 break;
5436 CASE_CFN_NEARBYINT:
5437 if (VECTOR_UNIT_VSX_P (V2DFmode)
5438 && flag_unsafe_math_optimizations
5439 && out_mode == DFmode && out_n == 2
5440 && in_mode == DFmode && in_n == 2)
5441 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5442 if (VECTOR_UNIT_VSX_P (V4SFmode)
5443 && flag_unsafe_math_optimizations
5444 && out_mode == SFmode && out_n == 4
5445 && in_mode == SFmode && in_n == 4)
5446 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5447 break;
5448 CASE_CFN_RINT:
5449 if (VECTOR_UNIT_VSX_P (V2DFmode)
5450 && !flag_trapping_math
5451 && out_mode == DFmode && out_n == 2
5452 && in_mode == DFmode && in_n == 2)
5453 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5454 if (VECTOR_UNIT_VSX_P (V4SFmode)
5455 && !flag_trapping_math
5456 && out_mode == SFmode && out_n == 4
5457 && in_mode == SFmode && in_n == 4)
5458 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5459 break;
5460 default:
5461 break;
5462 }
5463
5464 /* Generate calls to libmass if appropriate. */
5465 if (rs6000_veclib_handler)
5466 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5467
5468 return NULL_TREE;
5469 }
5470
5471 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5472
5473 static tree
5474 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5475 tree type_in)
5476 {
5477 machine_mode in_mode, out_mode;
5478 int in_n, out_n;
5479
5480 if (TARGET_DEBUG_BUILTIN)
5481 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5482 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5483 GET_MODE_NAME (TYPE_MODE (type_out)),
5484 GET_MODE_NAME (TYPE_MODE (type_in)));
5485
5486 if (TREE_CODE (type_out) != VECTOR_TYPE
5487 || TREE_CODE (type_in) != VECTOR_TYPE)
5488 return NULL_TREE;
5489
5490 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5491 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5492 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5493 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5494
5495 enum rs6000_builtins fn
5496 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5497 switch (fn)
5498 {
5499 case RS6000_BUILTIN_RSQRTF:
5500 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5501 && out_mode == SFmode && out_n == 4
5502 && in_mode == SFmode && in_n == 4)
5503 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5504 break;
5505 case RS6000_BUILTIN_RSQRT:
5506 if (VECTOR_UNIT_VSX_P (V2DFmode)
5507 && out_mode == DFmode && out_n == 2
5508 && in_mode == DFmode && in_n == 2)
5509 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5510 break;
5511 case RS6000_BUILTIN_RECIPF:
5512 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5513 && out_mode == SFmode && out_n == 4
5514 && in_mode == SFmode && in_n == 4)
5515 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5516 break;
5517 case RS6000_BUILTIN_RECIP:
5518 if (VECTOR_UNIT_VSX_P (V2DFmode)
5519 && out_mode == DFmode && out_n == 2
5520 && in_mode == DFmode && in_n == 2)
5521 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5522 break;
5523 default:
5524 break;
5525 }
5526 return NULL_TREE;
5527 }
5528 \f
5529 /* Default CPU string for rs6000*_file_start functions. */
5530 static const char *rs6000_default_cpu;
5531
5532 #ifdef USING_ELFOS_H
5533 const char *rs6000_machine;
5534
5535 const char *
5536 rs6000_machine_from_flags (void)
5537 {
5538 if ((rs6000_isa_flags & (ISA_FUTURE_MASKS_SERVER & ~ISA_3_0_MASKS_SERVER))
5539 != 0)
5540 return "future";
5541 if ((rs6000_isa_flags & (ISA_3_0_MASKS_SERVER & ~ISA_2_7_MASKS_SERVER)) != 0)
5542 return "power9";
5543 if ((rs6000_isa_flags & (ISA_2_7_MASKS_SERVER & ~ISA_2_6_MASKS_SERVER)) != 0)
5544 return "power8";
5545 if ((rs6000_isa_flags & (ISA_2_6_MASKS_SERVER & ~ISA_2_5_MASKS_SERVER)) != 0)
5546 return "power7";
5547 if ((rs6000_isa_flags & (ISA_2_5_MASKS_SERVER & ~ISA_2_4_MASKS)) != 0)
5548 return "power6";
5549 if ((rs6000_isa_flags & (ISA_2_4_MASKS & ~ISA_2_1_MASKS)) != 0)
5550 return "power5";
5551 if ((rs6000_isa_flags & ISA_2_1_MASKS) != 0)
5552 return "power4";
5553 if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5554 return "ppc64";
5555 return "ppc";
5556 }
5557
5558 void
5559 emit_asm_machine (void)
5560 {
5561 fprintf (asm_out_file, "\t.machine %s\n", rs6000_machine);
5562 }
5563 #endif
5564
5565 /* Do anything needed at the start of the asm file. */
5566
5567 static void
5568 rs6000_file_start (void)
5569 {
5570 char buffer[80];
5571 const char *start = buffer;
5572 FILE *file = asm_out_file;
5573
5574 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5575
5576 default_file_start ();
5577
5578 if (flag_verbose_asm)
5579 {
5580 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5581
5582 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5583 {
5584 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5585 start = "";
5586 }
5587
5588 if (global_options_set.x_rs6000_cpu_index)
5589 {
5590 fprintf (file, "%s -mcpu=%s", start,
5591 processor_target_table[rs6000_cpu_index].name);
5592 start = "";
5593 }
5594
5595 if (global_options_set.x_rs6000_tune_index)
5596 {
5597 fprintf (file, "%s -mtune=%s", start,
5598 processor_target_table[rs6000_tune_index].name);
5599 start = "";
5600 }
5601
5602 if (PPC405_ERRATUM77)
5603 {
5604 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5605 start = "";
5606 }
5607
5608 #ifdef USING_ELFOS_H
5609 switch (rs6000_sdata)
5610 {
5611 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5612 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5613 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5614 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5615 }
5616
5617 if (rs6000_sdata && g_switch_value)
5618 {
5619 fprintf (file, "%s -G %d", start,
5620 g_switch_value);
5621 start = "";
5622 }
5623 #endif
5624
5625 if (*start == '\0')
5626 putc ('\n', file);
5627 }
5628
5629 #ifdef USING_ELFOS_H
5630 rs6000_machine = rs6000_machine_from_flags ();
5631 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5632 && !global_options_set.x_rs6000_cpu_index)
5633 emit_asm_machine ();
5634 #endif
5635
5636 if (DEFAULT_ABI == ABI_ELFv2)
5637 fprintf (file, "\t.abiversion 2\n");
5638 }
5639
5640 \f
5641 /* Return nonzero if this function is known to have a null epilogue. */
5642
5643 int
5644 direct_return (void)
5645 {
5646 if (reload_completed)
5647 {
5648 rs6000_stack_t *info = rs6000_stack_info ();
5649
5650 if (info->first_gp_reg_save == 32
5651 && info->first_fp_reg_save == 64
5652 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5653 && ! info->lr_save_p
5654 && ! info->cr_save_p
5655 && info->vrsave_size == 0
5656 && ! info->push_p)
5657 return 1;
5658 }
5659
5660 return 0;
5661 }
5662
5663 /* Helper for num_insns_constant. Calculate number of instructions to
5664 load VALUE to a single gpr using combinations of addi, addis, ori,
5665 oris and sldi instructions. */
5666
5667 static int
5668 num_insns_constant_gpr (HOST_WIDE_INT value)
5669 {
5670 /* signed constant loadable with addi */
5671 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5672 return 1;
5673
5674 /* constant loadable with addis */
5675 else if ((value & 0xffff) == 0
5676 && (value >> 31 == -1 || value >> 31 == 0))
5677 return 1;
5678
5679 else if (TARGET_POWERPC64)
5680 {
5681 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5682 HOST_WIDE_INT high = value >> 31;
5683
5684 if (high == 0 || high == -1)
5685 return 2;
5686
5687 high >>= 1;
5688
5689 if (low == 0)
5690 return num_insns_constant_gpr (high) + 1;
5691 else if (high == 0)
5692 return num_insns_constant_gpr (low) + 1;
5693 else
5694 return (num_insns_constant_gpr (high)
5695 + num_insns_constant_gpr (low) + 1);
5696 }
5697
5698 else
5699 return 2;
5700 }
5701
5702 /* Helper for num_insns_constant. Allow constants formed by the
5703 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5704 and handle modes that require multiple gprs. */
5705
5706 static int
5707 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5708 {
5709 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5710 int total = 0;
5711 while (nregs-- > 0)
5712 {
5713 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5714 int insns = num_insns_constant_gpr (low);
5715 if (insns > 2
5716 /* We won't get more than 2 from num_insns_constant_gpr
5717 except when TARGET_POWERPC64 and mode is DImode or
5718 wider, so the register mode must be DImode. */
5719 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5720 insns = 2;
5721 total += insns;
5722 value >>= BITS_PER_WORD;
5723 }
5724 return total;
5725 }
5726
5727 /* Return the number of instructions it takes to form a constant in as
5728 many gprs are needed for MODE. */
5729
5730 int
5731 num_insns_constant (rtx op, machine_mode mode)
5732 {
5733 HOST_WIDE_INT val;
5734
5735 switch (GET_CODE (op))
5736 {
5737 case CONST_INT:
5738 val = INTVAL (op);
5739 break;
5740
5741 case CONST_WIDE_INT:
5742 {
5743 int insns = 0;
5744 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5745 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5746 DImode);
5747 return insns;
5748 }
5749
5750 case CONST_DOUBLE:
5751 {
5752 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5753
5754 if (mode == SFmode || mode == SDmode)
5755 {
5756 long l;
5757
5758 if (mode == SDmode)
5759 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5760 else
5761 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5762 /* See the first define_split in rs6000.md handling a
5763 const_double_operand. */
5764 val = l;
5765 mode = SImode;
5766 }
5767 else if (mode == DFmode || mode == DDmode)
5768 {
5769 long l[2];
5770
5771 if (mode == DDmode)
5772 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5773 else
5774 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5775
5776 /* See the second (32-bit) and third (64-bit) define_split
5777 in rs6000.md handling a const_double_operand. */
5778 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5779 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5780 mode = DImode;
5781 }
5782 else if (mode == TFmode || mode == TDmode
5783 || mode == KFmode || mode == IFmode)
5784 {
5785 long l[4];
5786 int insns;
5787
5788 if (mode == TDmode)
5789 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5790 else
5791 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5792
5793 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5794 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5795 insns = num_insns_constant_multi (val, DImode);
5796 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5797 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5798 insns += num_insns_constant_multi (val, DImode);
5799 return insns;
5800 }
5801 else
5802 gcc_unreachable ();
5803 }
5804 break;
5805
5806 default:
5807 gcc_unreachable ();
5808 }
5809
5810 return num_insns_constant_multi (val, mode);
5811 }
5812
5813 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5814 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5815 corresponding element of the vector, but for V4SFmode, the
5816 corresponding "float" is interpreted as an SImode integer. */
5817
5818 HOST_WIDE_INT
5819 const_vector_elt_as_int (rtx op, unsigned int elt)
5820 {
5821 rtx tmp;
5822
5823 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5824 gcc_assert (GET_MODE (op) != V2DImode
5825 && GET_MODE (op) != V2DFmode);
5826
5827 tmp = CONST_VECTOR_ELT (op, elt);
5828 if (GET_MODE (op) == V4SFmode)
5829 tmp = gen_lowpart (SImode, tmp);
5830 return INTVAL (tmp);
5831 }
5832
5833 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5834 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5835 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5836 all items are set to the same value and contain COPIES replicas of the
5837 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5838 operand and the others are set to the value of the operand's msb. */
5839
5840 static bool
5841 vspltis_constant (rtx op, unsigned step, unsigned copies)
5842 {
5843 machine_mode mode = GET_MODE (op);
5844 machine_mode inner = GET_MODE_INNER (mode);
5845
5846 unsigned i;
5847 unsigned nunits;
5848 unsigned bitsize;
5849 unsigned mask;
5850
5851 HOST_WIDE_INT val;
5852 HOST_WIDE_INT splat_val;
5853 HOST_WIDE_INT msb_val;
5854
5855 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5856 return false;
5857
5858 nunits = GET_MODE_NUNITS (mode);
5859 bitsize = GET_MODE_BITSIZE (inner);
5860 mask = GET_MODE_MASK (inner);
5861
5862 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5863 splat_val = val;
5864 msb_val = val >= 0 ? 0 : -1;
5865
5866 /* Construct the value to be splatted, if possible. If not, return 0. */
5867 for (i = 2; i <= copies; i *= 2)
5868 {
5869 HOST_WIDE_INT small_val;
5870 bitsize /= 2;
5871 small_val = splat_val >> bitsize;
5872 mask >>= bitsize;
5873 if (splat_val != ((HOST_WIDE_INT)
5874 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5875 | (small_val & mask)))
5876 return false;
5877 splat_val = small_val;
5878 }
5879
5880 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5881 if (EASY_VECTOR_15 (splat_val))
5882 ;
5883
5884 /* Also check if we can splat, and then add the result to itself. Do so if
5885 the value is positive, of if the splat instruction is using OP's mode;
5886 for splat_val < 0, the splat and the add should use the same mode. */
5887 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5888 && (splat_val >= 0 || (step == 1 && copies == 1)))
5889 ;
5890
5891 /* Also check if are loading up the most significant bit which can be done by
5892 loading up -1 and shifting the value left by -1. */
5893 else if (EASY_VECTOR_MSB (splat_val, inner))
5894 ;
5895
5896 else
5897 return false;
5898
5899 /* Check if VAL is present in every STEP-th element, and the
5900 other elements are filled with its most significant bit. */
5901 for (i = 1; i < nunits; ++i)
5902 {
5903 HOST_WIDE_INT desired_val;
5904 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5905 if ((i & (step - 1)) == 0)
5906 desired_val = val;
5907 else
5908 desired_val = msb_val;
5909
5910 if (desired_val != const_vector_elt_as_int (op, elt))
5911 return false;
5912 }
5913
5914 return true;
5915 }
5916
5917 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
5918 instruction, filling in the bottom elements with 0 or -1.
5919
5920 Return 0 if the constant cannot be generated with VSLDOI. Return positive
5921 for the number of zeroes to shift in, or negative for the number of 0xff
5922 bytes to shift in.
5923
5924 OP is a CONST_VECTOR. */
5925
5926 int
5927 vspltis_shifted (rtx op)
5928 {
5929 machine_mode mode = GET_MODE (op);
5930 machine_mode inner = GET_MODE_INNER (mode);
5931
5932 unsigned i, j;
5933 unsigned nunits;
5934 unsigned mask;
5935
5936 HOST_WIDE_INT val;
5937
5938 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
5939 return false;
5940
5941 /* We need to create pseudo registers to do the shift, so don't recognize
5942 shift vector constants after reload. */
5943 if (!can_create_pseudo_p ())
5944 return false;
5945
5946 nunits = GET_MODE_NUNITS (mode);
5947 mask = GET_MODE_MASK (inner);
5948
5949 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
5950
5951 /* Check if the value can really be the operand of a vspltis[bhw]. */
5952 if (EASY_VECTOR_15 (val))
5953 ;
5954
5955 /* Also check if we are loading up the most significant bit which can be done
5956 by loading up -1 and shifting the value left by -1. */
5957 else if (EASY_VECTOR_MSB (val, inner))
5958 ;
5959
5960 else
5961 return 0;
5962
5963 /* Check if VAL is present in every STEP-th element until we find elements
5964 that are 0 or all 1 bits. */
5965 for (i = 1; i < nunits; ++i)
5966 {
5967 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
5968 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
5969
5970 /* If the value isn't the splat value, check for the remaining elements
5971 being 0/-1. */
5972 if (val != elt_val)
5973 {
5974 if (elt_val == 0)
5975 {
5976 for (j = i+1; j < nunits; ++j)
5977 {
5978 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
5979 if (const_vector_elt_as_int (op, elt2) != 0)
5980 return 0;
5981 }
5982
5983 return (nunits - i) * GET_MODE_SIZE (inner);
5984 }
5985
5986 else if ((elt_val & mask) == mask)
5987 {
5988 for (j = i+1; j < nunits; ++j)
5989 {
5990 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
5991 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
5992 return 0;
5993 }
5994
5995 return -((nunits - i) * GET_MODE_SIZE (inner));
5996 }
5997
5998 else
5999 return 0;
6000 }
6001 }
6002
6003 /* If all elements are equal, we don't need to do VLSDOI. */
6004 return 0;
6005 }
6006
6007
6008 /* Return true if OP is of the given MODE and can be synthesized
6009 with a vspltisb, vspltish or vspltisw. */
6010
6011 bool
6012 easy_altivec_constant (rtx op, machine_mode mode)
6013 {
6014 unsigned step, copies;
6015
6016 if (mode == VOIDmode)
6017 mode = GET_MODE (op);
6018 else if (mode != GET_MODE (op))
6019 return false;
6020
6021 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6022 constants. */
6023 if (mode == V2DFmode)
6024 return zero_constant (op, mode);
6025
6026 else if (mode == V2DImode)
6027 {
6028 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6029 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6030 return false;
6031
6032 if (zero_constant (op, mode))
6033 return true;
6034
6035 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6036 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6037 return true;
6038
6039 return false;
6040 }
6041
6042 /* V1TImode is a special container for TImode. Ignore for now. */
6043 else if (mode == V1TImode)
6044 return false;
6045
6046 /* Start with a vspltisw. */
6047 step = GET_MODE_NUNITS (mode) / 4;
6048 copies = 1;
6049
6050 if (vspltis_constant (op, step, copies))
6051 return true;
6052
6053 /* Then try with a vspltish. */
6054 if (step == 1)
6055 copies <<= 1;
6056 else
6057 step >>= 1;
6058
6059 if (vspltis_constant (op, step, copies))
6060 return true;
6061
6062 /* And finally a vspltisb. */
6063 if (step == 1)
6064 copies <<= 1;
6065 else
6066 step >>= 1;
6067
6068 if (vspltis_constant (op, step, copies))
6069 return true;
6070
6071 if (vspltis_shifted (op) != 0)
6072 return true;
6073
6074 return false;
6075 }
6076
6077 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6078 result is OP. Abort if it is not possible. */
6079
6080 rtx
6081 gen_easy_altivec_constant (rtx op)
6082 {
6083 machine_mode mode = GET_MODE (op);
6084 int nunits = GET_MODE_NUNITS (mode);
6085 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6086 unsigned step = nunits / 4;
6087 unsigned copies = 1;
6088
6089 /* Start with a vspltisw. */
6090 if (vspltis_constant (op, step, copies))
6091 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6092
6093 /* Then try with a vspltish. */
6094 if (step == 1)
6095 copies <<= 1;
6096 else
6097 step >>= 1;
6098
6099 if (vspltis_constant (op, step, copies))
6100 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6101
6102 /* And finally a vspltisb. */
6103 if (step == 1)
6104 copies <<= 1;
6105 else
6106 step >>= 1;
6107
6108 if (vspltis_constant (op, step, copies))
6109 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6110
6111 gcc_unreachable ();
6112 }
6113
6114 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6115 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6116
6117 Return the number of instructions needed (1 or 2) into the address pointed
6118 via NUM_INSNS_PTR.
6119
6120 Return the constant that is being split via CONSTANT_PTR. */
6121
6122 bool
6123 xxspltib_constant_p (rtx op,
6124 machine_mode mode,
6125 int *num_insns_ptr,
6126 int *constant_ptr)
6127 {
6128 size_t nunits = GET_MODE_NUNITS (mode);
6129 size_t i;
6130 HOST_WIDE_INT value;
6131 rtx element;
6132
6133 /* Set the returned values to out of bound values. */
6134 *num_insns_ptr = -1;
6135 *constant_ptr = 256;
6136
6137 if (!TARGET_P9_VECTOR)
6138 return false;
6139
6140 if (mode == VOIDmode)
6141 mode = GET_MODE (op);
6142
6143 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6144 return false;
6145
6146 /* Handle (vec_duplicate <constant>). */
6147 if (GET_CODE (op) == VEC_DUPLICATE)
6148 {
6149 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6150 && mode != V2DImode)
6151 return false;
6152
6153 element = XEXP (op, 0);
6154 if (!CONST_INT_P (element))
6155 return false;
6156
6157 value = INTVAL (element);
6158 if (!IN_RANGE (value, -128, 127))
6159 return false;
6160 }
6161
6162 /* Handle (const_vector [...]). */
6163 else if (GET_CODE (op) == CONST_VECTOR)
6164 {
6165 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6166 && mode != V2DImode)
6167 return false;
6168
6169 element = CONST_VECTOR_ELT (op, 0);
6170 if (!CONST_INT_P (element))
6171 return false;
6172
6173 value = INTVAL (element);
6174 if (!IN_RANGE (value, -128, 127))
6175 return false;
6176
6177 for (i = 1; i < nunits; i++)
6178 {
6179 element = CONST_VECTOR_ELT (op, i);
6180 if (!CONST_INT_P (element))
6181 return false;
6182
6183 if (value != INTVAL (element))
6184 return false;
6185 }
6186 }
6187
6188 /* Handle integer constants being loaded into the upper part of the VSX
6189 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6190 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6191 else if (CONST_INT_P (op))
6192 {
6193 if (!SCALAR_INT_MODE_P (mode))
6194 return false;
6195
6196 value = INTVAL (op);
6197 if (!IN_RANGE (value, -128, 127))
6198 return false;
6199
6200 if (!IN_RANGE (value, -1, 0))
6201 {
6202 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6203 return false;
6204
6205 if (EASY_VECTOR_15 (value))
6206 return false;
6207 }
6208 }
6209
6210 else
6211 return false;
6212
6213 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6214 sign extend. Special case 0/-1 to allow getting any VSX register instead
6215 of an Altivec register. */
6216 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6217 && EASY_VECTOR_15 (value))
6218 return false;
6219
6220 /* Return # of instructions and the constant byte for XXSPLTIB. */
6221 if (mode == V16QImode)
6222 *num_insns_ptr = 1;
6223
6224 else if (IN_RANGE (value, -1, 0))
6225 *num_insns_ptr = 1;
6226
6227 else
6228 *num_insns_ptr = 2;
6229
6230 *constant_ptr = (int) value;
6231 return true;
6232 }
6233
6234 const char *
6235 output_vec_const_move (rtx *operands)
6236 {
6237 int shift;
6238 machine_mode mode;
6239 rtx dest, vec;
6240
6241 dest = operands[0];
6242 vec = operands[1];
6243 mode = GET_MODE (dest);
6244
6245 if (TARGET_VSX)
6246 {
6247 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6248 int xxspltib_value = 256;
6249 int num_insns = -1;
6250
6251 if (zero_constant (vec, mode))
6252 {
6253 if (TARGET_P9_VECTOR)
6254 return "xxspltib %x0,0";
6255
6256 else if (dest_vmx_p)
6257 return "vspltisw %0,0";
6258
6259 else
6260 return "xxlxor %x0,%x0,%x0";
6261 }
6262
6263 if (all_ones_constant (vec, mode))
6264 {
6265 if (TARGET_P9_VECTOR)
6266 return "xxspltib %x0,255";
6267
6268 else if (dest_vmx_p)
6269 return "vspltisw %0,-1";
6270
6271 else if (TARGET_P8_VECTOR)
6272 return "xxlorc %x0,%x0,%x0";
6273
6274 else
6275 gcc_unreachable ();
6276 }
6277
6278 if (TARGET_P9_VECTOR
6279 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6280 {
6281 if (num_insns == 1)
6282 {
6283 operands[2] = GEN_INT (xxspltib_value & 0xff);
6284 return "xxspltib %x0,%2";
6285 }
6286
6287 return "#";
6288 }
6289 }
6290
6291 if (TARGET_ALTIVEC)
6292 {
6293 rtx splat_vec;
6294
6295 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6296 if (zero_constant (vec, mode))
6297 return "vspltisw %0,0";
6298
6299 if (all_ones_constant (vec, mode))
6300 return "vspltisw %0,-1";
6301
6302 /* Do we need to construct a value using VSLDOI? */
6303 shift = vspltis_shifted (vec);
6304 if (shift != 0)
6305 return "#";
6306
6307 splat_vec = gen_easy_altivec_constant (vec);
6308 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6309 operands[1] = XEXP (splat_vec, 0);
6310 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6311 return "#";
6312
6313 switch (GET_MODE (splat_vec))
6314 {
6315 case E_V4SImode:
6316 return "vspltisw %0,%1";
6317
6318 case E_V8HImode:
6319 return "vspltish %0,%1";
6320
6321 case E_V16QImode:
6322 return "vspltisb %0,%1";
6323
6324 default:
6325 gcc_unreachable ();
6326 }
6327 }
6328
6329 gcc_unreachable ();
6330 }
6331
6332 /* Initialize vector TARGET to VALS. */
6333
6334 void
6335 rs6000_expand_vector_init (rtx target, rtx vals)
6336 {
6337 machine_mode mode = GET_MODE (target);
6338 machine_mode inner_mode = GET_MODE_INNER (mode);
6339 int n_elts = GET_MODE_NUNITS (mode);
6340 int n_var = 0, one_var = -1;
6341 bool all_same = true, all_const_zero = true;
6342 rtx x, mem;
6343 int i;
6344
6345 for (i = 0; i < n_elts; ++i)
6346 {
6347 x = XVECEXP (vals, 0, i);
6348 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6349 ++n_var, one_var = i;
6350 else if (x != CONST0_RTX (inner_mode))
6351 all_const_zero = false;
6352
6353 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6354 all_same = false;
6355 }
6356
6357 if (n_var == 0)
6358 {
6359 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6360 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6361 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6362 {
6363 /* Zero register. */
6364 emit_move_insn (target, CONST0_RTX (mode));
6365 return;
6366 }
6367 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6368 {
6369 /* Splat immediate. */
6370 emit_insn (gen_rtx_SET (target, const_vec));
6371 return;
6372 }
6373 else
6374 {
6375 /* Load from constant pool. */
6376 emit_move_insn (target, const_vec);
6377 return;
6378 }
6379 }
6380
6381 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6382 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6383 {
6384 rtx op[2];
6385 size_t i;
6386 size_t num_elements = all_same ? 1 : 2;
6387 for (i = 0; i < num_elements; i++)
6388 {
6389 op[i] = XVECEXP (vals, 0, i);
6390 /* Just in case there is a SUBREG with a smaller mode, do a
6391 conversion. */
6392 if (GET_MODE (op[i]) != inner_mode)
6393 {
6394 rtx tmp = gen_reg_rtx (inner_mode);
6395 convert_move (tmp, op[i], 0);
6396 op[i] = tmp;
6397 }
6398 /* Allow load with splat double word. */
6399 else if (MEM_P (op[i]))
6400 {
6401 if (!all_same)
6402 op[i] = force_reg (inner_mode, op[i]);
6403 }
6404 else if (!REG_P (op[i]))
6405 op[i] = force_reg (inner_mode, op[i]);
6406 }
6407
6408 if (all_same)
6409 {
6410 if (mode == V2DFmode)
6411 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6412 else
6413 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6414 }
6415 else
6416 {
6417 if (mode == V2DFmode)
6418 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6419 else
6420 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6421 }
6422 return;
6423 }
6424
6425 /* Special case initializing vector int if we are on 64-bit systems with
6426 direct move or we have the ISA 3.0 instructions. */
6427 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6428 && TARGET_DIRECT_MOVE_64BIT)
6429 {
6430 if (all_same)
6431 {
6432 rtx element0 = XVECEXP (vals, 0, 0);
6433 if (MEM_P (element0))
6434 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6435 else
6436 element0 = force_reg (SImode, element0);
6437
6438 if (TARGET_P9_VECTOR)
6439 emit_insn (gen_vsx_splat_v4si (target, element0));
6440 else
6441 {
6442 rtx tmp = gen_reg_rtx (DImode);
6443 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6444 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6445 }
6446 return;
6447 }
6448 else
6449 {
6450 rtx elements[4];
6451 size_t i;
6452
6453 for (i = 0; i < 4; i++)
6454 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6455
6456 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6457 elements[2], elements[3]));
6458 return;
6459 }
6460 }
6461
6462 /* With single precision floating point on VSX, know that internally single
6463 precision is actually represented as a double, and either make 2 V2DF
6464 vectors, and convert these vectors to single precision, or do one
6465 conversion, and splat the result to the other elements. */
6466 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6467 {
6468 if (all_same)
6469 {
6470 rtx element0 = XVECEXP (vals, 0, 0);
6471
6472 if (TARGET_P9_VECTOR)
6473 {
6474 if (MEM_P (element0))
6475 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6476
6477 emit_insn (gen_vsx_splat_v4sf (target, element0));
6478 }
6479
6480 else
6481 {
6482 rtx freg = gen_reg_rtx (V4SFmode);
6483 rtx sreg = force_reg (SFmode, element0);
6484 rtx cvt = (TARGET_XSCVDPSPN
6485 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6486 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6487
6488 emit_insn (cvt);
6489 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6490 const0_rtx));
6491 }
6492 }
6493 else
6494 {
6495 rtx dbl_even = gen_reg_rtx (V2DFmode);
6496 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6497 rtx flt_even = gen_reg_rtx (V4SFmode);
6498 rtx flt_odd = gen_reg_rtx (V4SFmode);
6499 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6500 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6501 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6502 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6503
6504 /* Use VMRGEW if we can instead of doing a permute. */
6505 if (TARGET_P8_VECTOR)
6506 {
6507 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6508 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6509 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6510 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6511 if (BYTES_BIG_ENDIAN)
6512 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6513 else
6514 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6515 }
6516 else
6517 {
6518 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6519 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6520 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6521 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6522 rs6000_expand_extract_even (target, flt_even, flt_odd);
6523 }
6524 }
6525 return;
6526 }
6527
6528 /* Special case initializing vector short/char that are splats if we are on
6529 64-bit systems with direct move. */
6530 if (all_same && TARGET_DIRECT_MOVE_64BIT
6531 && (mode == V16QImode || mode == V8HImode))
6532 {
6533 rtx op0 = XVECEXP (vals, 0, 0);
6534 rtx di_tmp = gen_reg_rtx (DImode);
6535
6536 if (!REG_P (op0))
6537 op0 = force_reg (GET_MODE_INNER (mode), op0);
6538
6539 if (mode == V16QImode)
6540 {
6541 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6542 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6543 return;
6544 }
6545
6546 if (mode == V8HImode)
6547 {
6548 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6549 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6550 return;
6551 }
6552 }
6553
6554 /* Store value to stack temp. Load vector element. Splat. However, splat
6555 of 64-bit items is not supported on Altivec. */
6556 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6557 {
6558 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6559 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6560 XVECEXP (vals, 0, 0));
6561 x = gen_rtx_UNSPEC (VOIDmode,
6562 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6563 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6564 gen_rtvec (2,
6565 gen_rtx_SET (target, mem),
6566 x)));
6567 x = gen_rtx_VEC_SELECT (inner_mode, target,
6568 gen_rtx_PARALLEL (VOIDmode,
6569 gen_rtvec (1, const0_rtx)));
6570 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6571 return;
6572 }
6573
6574 /* One field is non-constant. Load constant then overwrite
6575 varying field. */
6576 if (n_var == 1)
6577 {
6578 rtx copy = copy_rtx (vals);
6579
6580 /* Load constant part of vector, substitute neighboring value for
6581 varying element. */
6582 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6583 rs6000_expand_vector_init (target, copy);
6584
6585 /* Insert variable. */
6586 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6587 return;
6588 }
6589
6590 /* Construct the vector in memory one field at a time
6591 and load the whole vector. */
6592 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6593 for (i = 0; i < n_elts; i++)
6594 emit_move_insn (adjust_address_nv (mem, inner_mode,
6595 i * GET_MODE_SIZE (inner_mode)),
6596 XVECEXP (vals, 0, i));
6597 emit_move_insn (target, mem);
6598 }
6599
6600 /* Set field ELT of TARGET to VAL. */
6601
6602 void
6603 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6604 {
6605 machine_mode mode = GET_MODE (target);
6606 machine_mode inner_mode = GET_MODE_INNER (mode);
6607 rtx reg = gen_reg_rtx (mode);
6608 rtx mask, mem, x;
6609 int width = GET_MODE_SIZE (inner_mode);
6610 int i;
6611
6612 val = force_reg (GET_MODE (val), val);
6613
6614 if (VECTOR_MEM_VSX_P (mode))
6615 {
6616 rtx insn = NULL_RTX;
6617 rtx elt_rtx = GEN_INT (elt);
6618
6619 if (mode == V2DFmode)
6620 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6621
6622 else if (mode == V2DImode)
6623 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6624
6625 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6626 {
6627 if (mode == V4SImode)
6628 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6629 else if (mode == V8HImode)
6630 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6631 else if (mode == V16QImode)
6632 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6633 else if (mode == V4SFmode)
6634 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6635 }
6636
6637 if (insn)
6638 {
6639 emit_insn (insn);
6640 return;
6641 }
6642 }
6643
6644 /* Simplify setting single element vectors like V1TImode. */
6645 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6646 {
6647 emit_move_insn (target, gen_lowpart (mode, val));
6648 return;
6649 }
6650
6651 /* Load single variable value. */
6652 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6653 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6654 x = gen_rtx_UNSPEC (VOIDmode,
6655 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6656 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6657 gen_rtvec (2,
6658 gen_rtx_SET (reg, mem),
6659 x)));
6660
6661 /* Linear sequence. */
6662 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6663 for (i = 0; i < 16; ++i)
6664 XVECEXP (mask, 0, i) = GEN_INT (i);
6665
6666 /* Set permute mask to insert element into target. */
6667 for (i = 0; i < width; ++i)
6668 XVECEXP (mask, 0, elt*width + i)
6669 = GEN_INT (i + 0x10);
6670 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6671
6672 if (BYTES_BIG_ENDIAN)
6673 x = gen_rtx_UNSPEC (mode,
6674 gen_rtvec (3, target, reg,
6675 force_reg (V16QImode, x)),
6676 UNSPEC_VPERM);
6677 else
6678 {
6679 if (TARGET_P9_VECTOR)
6680 x = gen_rtx_UNSPEC (mode,
6681 gen_rtvec (3, reg, target,
6682 force_reg (V16QImode, x)),
6683 UNSPEC_VPERMR);
6684 else
6685 {
6686 /* Invert selector. We prefer to generate VNAND on P8 so
6687 that future fusion opportunities can kick in, but must
6688 generate VNOR elsewhere. */
6689 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6690 rtx iorx = (TARGET_P8_VECTOR
6691 ? gen_rtx_IOR (V16QImode, notx, notx)
6692 : gen_rtx_AND (V16QImode, notx, notx));
6693 rtx tmp = gen_reg_rtx (V16QImode);
6694 emit_insn (gen_rtx_SET (tmp, iorx));
6695
6696 /* Permute with operands reversed and adjusted selector. */
6697 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6698 UNSPEC_VPERM);
6699 }
6700 }
6701
6702 emit_insn (gen_rtx_SET (target, x));
6703 }
6704
6705 /* Extract field ELT from VEC into TARGET. */
6706
6707 void
6708 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6709 {
6710 machine_mode mode = GET_MODE (vec);
6711 machine_mode inner_mode = GET_MODE_INNER (mode);
6712 rtx mem;
6713
6714 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6715 {
6716 switch (mode)
6717 {
6718 default:
6719 break;
6720 case E_V1TImode:
6721 emit_move_insn (target, gen_lowpart (TImode, vec));
6722 break;
6723 case E_V2DFmode:
6724 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6725 return;
6726 case E_V2DImode:
6727 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6728 return;
6729 case E_V4SFmode:
6730 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6731 return;
6732 case E_V16QImode:
6733 if (TARGET_DIRECT_MOVE_64BIT)
6734 {
6735 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6736 return;
6737 }
6738 else
6739 break;
6740 case E_V8HImode:
6741 if (TARGET_DIRECT_MOVE_64BIT)
6742 {
6743 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6744 return;
6745 }
6746 else
6747 break;
6748 case E_V4SImode:
6749 if (TARGET_DIRECT_MOVE_64BIT)
6750 {
6751 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6752 return;
6753 }
6754 break;
6755 }
6756 }
6757 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6758 && TARGET_DIRECT_MOVE_64BIT)
6759 {
6760 if (GET_MODE (elt) != DImode)
6761 {
6762 rtx tmp = gen_reg_rtx (DImode);
6763 convert_move (tmp, elt, 0);
6764 elt = tmp;
6765 }
6766 else if (!REG_P (elt))
6767 elt = force_reg (DImode, elt);
6768
6769 switch (mode)
6770 {
6771 case E_V1TImode:
6772 emit_move_insn (target, gen_lowpart (TImode, vec));
6773 return;
6774
6775 case E_V2DFmode:
6776 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6777 return;
6778
6779 case E_V2DImode:
6780 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6781 return;
6782
6783 case E_V4SFmode:
6784 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6785 return;
6786
6787 case E_V4SImode:
6788 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6789 return;
6790
6791 case E_V8HImode:
6792 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6793 return;
6794
6795 case E_V16QImode:
6796 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6797 return;
6798
6799 default:
6800 gcc_unreachable ();
6801 }
6802 }
6803
6804 /* Allocate mode-sized buffer. */
6805 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6806
6807 emit_move_insn (mem, vec);
6808 if (CONST_INT_P (elt))
6809 {
6810 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6811
6812 /* Add offset to field within buffer matching vector element. */
6813 mem = adjust_address_nv (mem, inner_mode,
6814 modulo_elt * GET_MODE_SIZE (inner_mode));
6815 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6816 }
6817 else
6818 {
6819 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6820 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6821 rtx new_addr = gen_reg_rtx (Pmode);
6822
6823 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6824 if (ele_size > 1)
6825 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6826 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6827 new_addr = change_address (mem, inner_mode, new_addr);
6828 emit_move_insn (target, new_addr);
6829 }
6830 }
6831
6832 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6833 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6834 temporary (BASE_TMP) to fixup the address. Return the new memory address
6835 that is valid for reads or writes to a given register (SCALAR_REG). */
6836
6837 rtx
6838 rs6000_adjust_vec_address (rtx scalar_reg,
6839 rtx mem,
6840 rtx element,
6841 rtx base_tmp,
6842 machine_mode scalar_mode)
6843 {
6844 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6845 rtx addr = XEXP (mem, 0);
6846 rtx element_offset;
6847 rtx new_addr;
6848 bool valid_addr_p;
6849
6850 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6851 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6852
6853 /* Calculate what we need to add to the address to get the element
6854 address. */
6855 if (CONST_INT_P (element))
6856 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6857 else
6858 {
6859 int byte_shift = exact_log2 (scalar_size);
6860 gcc_assert (byte_shift >= 0);
6861
6862 if (byte_shift == 0)
6863 element_offset = element;
6864
6865 else
6866 {
6867 if (TARGET_POWERPC64)
6868 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6869 else
6870 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6871
6872 element_offset = base_tmp;
6873 }
6874 }
6875
6876 /* Create the new address pointing to the element within the vector. If we
6877 are adding 0, we don't have to change the address. */
6878 if (element_offset == const0_rtx)
6879 new_addr = addr;
6880
6881 /* A simple indirect address can be converted into a reg + offset
6882 address. */
6883 else if (REG_P (addr) || SUBREG_P (addr))
6884 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
6885
6886 /* Optimize D-FORM addresses with constant offset with a constant element, to
6887 include the element offset in the address directly. */
6888 else if (GET_CODE (addr) == PLUS)
6889 {
6890 rtx op0 = XEXP (addr, 0);
6891 rtx op1 = XEXP (addr, 1);
6892 rtx insn;
6893
6894 gcc_assert (REG_P (op0) || SUBREG_P (op0));
6895 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
6896 {
6897 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
6898 rtx offset_rtx = GEN_INT (offset);
6899
6900 if (IN_RANGE (offset, -32768, 32767)
6901 && (scalar_size < 8 || (offset & 0x3) == 0))
6902 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
6903 else
6904 {
6905 emit_move_insn (base_tmp, offset_rtx);
6906 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
6907 }
6908 }
6909 else
6910 {
6911 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
6912 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
6913
6914 /* Note, ADDI requires the register being added to be a base
6915 register. If the register was R0, load it up into the temporary
6916 and do the add. */
6917 if (op1_reg_p
6918 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
6919 {
6920 insn = gen_add3_insn (base_tmp, op1, element_offset);
6921 gcc_assert (insn != NULL_RTX);
6922 emit_insn (insn);
6923 }
6924
6925 else if (ele_reg_p
6926 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
6927 {
6928 insn = gen_add3_insn (base_tmp, element_offset, op1);
6929 gcc_assert (insn != NULL_RTX);
6930 emit_insn (insn);
6931 }
6932
6933 else
6934 {
6935 emit_move_insn (base_tmp, op1);
6936 emit_insn (gen_add2_insn (base_tmp, element_offset));
6937 }
6938
6939 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
6940 }
6941 }
6942
6943 else
6944 {
6945 emit_move_insn (base_tmp, addr);
6946 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
6947 }
6948
6949 /* If we have a PLUS, we need to see whether the particular register class
6950 allows for D-FORM or X-FORM addressing. */
6951 if (GET_CODE (new_addr) == PLUS)
6952 {
6953 rtx op1 = XEXP (new_addr, 1);
6954 addr_mask_type addr_mask;
6955 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
6956
6957 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
6958 if (INT_REGNO_P (scalar_regno))
6959 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
6960
6961 else if (FP_REGNO_P (scalar_regno))
6962 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
6963
6964 else if (ALTIVEC_REGNO_P (scalar_regno))
6965 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
6966
6967 else
6968 gcc_unreachable ();
6969
6970 if (REG_P (op1) || SUBREG_P (op1))
6971 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
6972 else
6973 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
6974 }
6975
6976 else if (REG_P (new_addr) || SUBREG_P (new_addr))
6977 valid_addr_p = true;
6978
6979 else
6980 valid_addr_p = false;
6981
6982 if (!valid_addr_p)
6983 {
6984 emit_move_insn (base_tmp, new_addr);
6985 new_addr = base_tmp;
6986 }
6987
6988 return change_address (mem, scalar_mode, new_addr);
6989 }
6990
6991 /* Split a variable vec_extract operation into the component instructions. */
6992
6993 void
6994 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
6995 rtx tmp_altivec)
6996 {
6997 machine_mode mode = GET_MODE (src);
6998 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
6999 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7000 int byte_shift = exact_log2 (scalar_size);
7001
7002 gcc_assert (byte_shift >= 0);
7003
7004 /* If we are given a memory address, optimize to load just the element. We
7005 don't have to adjust the vector element number on little endian
7006 systems. */
7007 if (MEM_P (src))
7008 {
7009 int num_elements = GET_MODE_NUNITS (mode);
7010 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7011
7012 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7013 gcc_assert (REG_P (tmp_gpr));
7014 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7015 tmp_gpr, scalar_mode));
7016 return;
7017 }
7018
7019 else if (REG_P (src) || SUBREG_P (src))
7020 {
7021 int num_elements = GET_MODE_NUNITS (mode);
7022 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7023 int bit_shift = 7 - exact_log2 (num_elements);
7024 rtx element2;
7025 unsigned int dest_regno = reg_or_subregno (dest);
7026 unsigned int src_regno = reg_or_subregno (src);
7027 unsigned int element_regno = reg_or_subregno (element);
7028
7029 gcc_assert (REG_P (tmp_gpr));
7030
7031 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7032 a general purpose register. */
7033 if (TARGET_P9_VECTOR
7034 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7035 && INT_REGNO_P (dest_regno)
7036 && ALTIVEC_REGNO_P (src_regno)
7037 && INT_REGNO_P (element_regno))
7038 {
7039 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7040 rtx element_si = gen_rtx_REG (SImode, element_regno);
7041
7042 if (mode == V16QImode)
7043 emit_insn (BYTES_BIG_ENDIAN
7044 ? gen_vextublx (dest_si, element_si, src)
7045 : gen_vextubrx (dest_si, element_si, src));
7046
7047 else if (mode == V8HImode)
7048 {
7049 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7050 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7051 emit_insn (BYTES_BIG_ENDIAN
7052 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7053 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7054 }
7055
7056
7057 else
7058 {
7059 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7060 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7061 emit_insn (BYTES_BIG_ENDIAN
7062 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7063 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7064 }
7065
7066 return;
7067 }
7068
7069
7070 gcc_assert (REG_P (tmp_altivec));
7071
7072 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7073 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7074 will shift the element into the upper position (adding 3 to convert a
7075 byte shift into a bit shift). */
7076 if (scalar_size == 8)
7077 {
7078 if (!BYTES_BIG_ENDIAN)
7079 {
7080 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7081 element2 = tmp_gpr;
7082 }
7083 else
7084 element2 = element;
7085
7086 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7087 bit. */
7088 emit_insn (gen_rtx_SET (tmp_gpr,
7089 gen_rtx_AND (DImode,
7090 gen_rtx_ASHIFT (DImode,
7091 element2,
7092 GEN_INT (6)),
7093 GEN_INT (64))));
7094 }
7095 else
7096 {
7097 if (!BYTES_BIG_ENDIAN)
7098 {
7099 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7100
7101 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7102 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7103 element2 = tmp_gpr;
7104 }
7105 else
7106 element2 = element;
7107
7108 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7109 }
7110
7111 /* Get the value into the lower byte of the Altivec register where VSLO
7112 expects it. */
7113 if (TARGET_P9_VECTOR)
7114 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7115 else if (can_create_pseudo_p ())
7116 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7117 else
7118 {
7119 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7120 emit_move_insn (tmp_di, tmp_gpr);
7121 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7122 }
7123
7124 /* Do the VSLO to get the value into the final location. */
7125 switch (mode)
7126 {
7127 case E_V2DFmode:
7128 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7129 return;
7130
7131 case E_V2DImode:
7132 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7133 return;
7134
7135 case E_V4SFmode:
7136 {
7137 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7138 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7139 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7140 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7141 tmp_altivec));
7142
7143 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7144 return;
7145 }
7146
7147 case E_V4SImode:
7148 case E_V8HImode:
7149 case E_V16QImode:
7150 {
7151 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7152 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7153 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7154 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7155 tmp_altivec));
7156 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7157 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7158 GEN_INT (64 - bits_in_element)));
7159 return;
7160 }
7161
7162 default:
7163 gcc_unreachable ();
7164 }
7165
7166 return;
7167 }
7168 else
7169 gcc_unreachable ();
7170 }
7171
7172 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7173 selects whether the alignment is abi mandated, optional, or
7174 both abi and optional alignment. */
7175
7176 unsigned int
7177 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7178 {
7179 if (how != align_opt)
7180 {
7181 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7182 align = 128;
7183 }
7184
7185 if (how != align_abi)
7186 {
7187 if (TREE_CODE (type) == ARRAY_TYPE
7188 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7189 {
7190 if (align < BITS_PER_WORD)
7191 align = BITS_PER_WORD;
7192 }
7193 }
7194
7195 return align;
7196 }
7197
7198 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7199 instructions simply ignore the low bits; VSX memory instructions
7200 are aligned to 4 or 8 bytes. */
7201
7202 static bool
7203 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7204 {
7205 return (STRICT_ALIGNMENT
7206 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7207 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7208 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7209 && (int) align < VECTOR_ALIGN (mode)))));
7210 }
7211
7212 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7213
7214 bool
7215 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7216 {
7217 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7218 {
7219 if (computed != 128)
7220 {
7221 static bool warned;
7222 if (!warned && warn_psabi)
7223 {
7224 warned = true;
7225 inform (input_location,
7226 "the layout of aggregates containing vectors with"
7227 " %d-byte alignment has changed in GCC 5",
7228 computed / BITS_PER_UNIT);
7229 }
7230 }
7231 /* In current GCC there is no special case. */
7232 return false;
7233 }
7234
7235 return false;
7236 }
7237
7238 /* AIX increases natural record alignment to doubleword if the first
7239 field is an FP double while the FP fields remain word aligned. */
7240
7241 unsigned int
7242 rs6000_special_round_type_align (tree type, unsigned int computed,
7243 unsigned int specified)
7244 {
7245 unsigned int align = MAX (computed, specified);
7246 tree field = TYPE_FIELDS (type);
7247
7248 /* Skip all non field decls */
7249 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7250 field = DECL_CHAIN (field);
7251
7252 if (field != NULL && field != type)
7253 {
7254 type = TREE_TYPE (field);
7255 while (TREE_CODE (type) == ARRAY_TYPE)
7256 type = TREE_TYPE (type);
7257
7258 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7259 align = MAX (align, 64);
7260 }
7261
7262 return align;
7263 }
7264
7265 /* Darwin increases record alignment to the natural alignment of
7266 the first field. */
7267
7268 unsigned int
7269 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7270 unsigned int specified)
7271 {
7272 unsigned int align = MAX (computed, specified);
7273
7274 if (TYPE_PACKED (type))
7275 return align;
7276
7277 /* Find the first field, looking down into aggregates. */
7278 do {
7279 tree field = TYPE_FIELDS (type);
7280 /* Skip all non field decls */
7281 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7282 field = DECL_CHAIN (field);
7283 if (! field)
7284 break;
7285 /* A packed field does not contribute any extra alignment. */
7286 if (DECL_PACKED (field))
7287 return align;
7288 type = TREE_TYPE (field);
7289 while (TREE_CODE (type) == ARRAY_TYPE)
7290 type = TREE_TYPE (type);
7291 } while (AGGREGATE_TYPE_P (type));
7292
7293 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7294 align = MAX (align, TYPE_ALIGN (type));
7295
7296 return align;
7297 }
7298
7299 /* Return 1 for an operand in small memory on V.4/eabi. */
7300
7301 int
7302 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7303 machine_mode mode ATTRIBUTE_UNUSED)
7304 {
7305 #if TARGET_ELF
7306 rtx sym_ref;
7307
7308 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7309 return 0;
7310
7311 if (DEFAULT_ABI != ABI_V4)
7312 return 0;
7313
7314 if (SYMBOL_REF_P (op))
7315 sym_ref = op;
7316
7317 else if (GET_CODE (op) != CONST
7318 || GET_CODE (XEXP (op, 0)) != PLUS
7319 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7320 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7321 return 0;
7322
7323 else
7324 {
7325 rtx sum = XEXP (op, 0);
7326 HOST_WIDE_INT summand;
7327
7328 /* We have to be careful here, because it is the referenced address
7329 that must be 32k from _SDA_BASE_, not just the symbol. */
7330 summand = INTVAL (XEXP (sum, 1));
7331 if (summand < 0 || summand > g_switch_value)
7332 return 0;
7333
7334 sym_ref = XEXP (sum, 0);
7335 }
7336
7337 return SYMBOL_REF_SMALL_P (sym_ref);
7338 #else
7339 return 0;
7340 #endif
7341 }
7342
7343 /* Return true if either operand is a general purpose register. */
7344
7345 bool
7346 gpr_or_gpr_p (rtx op0, rtx op1)
7347 {
7348 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7349 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7350 }
7351
7352 /* Return true if this is a move direct operation between GPR registers and
7353 floating point/VSX registers. */
7354
7355 bool
7356 direct_move_p (rtx op0, rtx op1)
7357 {
7358 if (!REG_P (op0) || !REG_P (op1))
7359 return false;
7360
7361 if (!TARGET_DIRECT_MOVE)
7362 return false;
7363
7364 int regno0 = REGNO (op0);
7365 int regno1 = REGNO (op1);
7366 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7367 return false;
7368
7369 if (INT_REGNO_P (regno0) && VSX_REGNO_P (regno1))
7370 return true;
7371
7372 if (VSX_REGNO_P (regno0) && INT_REGNO_P (regno1))
7373 return true;
7374
7375 return false;
7376 }
7377
7378 /* Return true if the ADDR is an acceptable address for a quad memory
7379 operation of mode MODE (either LQ/STQ for general purpose registers, or
7380 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7381 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7382 3.0 LXV/STXV instruction. */
7383
7384 bool
7385 quad_address_p (rtx addr, machine_mode mode, bool strict)
7386 {
7387 rtx op0, op1;
7388
7389 if (GET_MODE_SIZE (mode) != 16)
7390 return false;
7391
7392 if (legitimate_indirect_address_p (addr, strict))
7393 return true;
7394
7395 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7396 return false;
7397
7398 if (GET_CODE (addr) != PLUS)
7399 return false;
7400
7401 op0 = XEXP (addr, 0);
7402 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7403 return false;
7404
7405 op1 = XEXP (addr, 1);
7406 if (!CONST_INT_P (op1))
7407 return false;
7408
7409 return quad_address_offset_p (INTVAL (op1));
7410 }
7411
7412 /* Return true if this is a load or store quad operation. This function does
7413 not handle the atomic quad memory instructions. */
7414
7415 bool
7416 quad_load_store_p (rtx op0, rtx op1)
7417 {
7418 bool ret;
7419
7420 if (!TARGET_QUAD_MEMORY)
7421 ret = false;
7422
7423 else if (REG_P (op0) && MEM_P (op1))
7424 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7425 && quad_memory_operand (op1, GET_MODE (op1))
7426 && !reg_overlap_mentioned_p (op0, op1));
7427
7428 else if (MEM_P (op0) && REG_P (op1))
7429 ret = (quad_memory_operand (op0, GET_MODE (op0))
7430 && quad_int_reg_operand (op1, GET_MODE (op1)));
7431
7432 else
7433 ret = false;
7434
7435 if (TARGET_DEBUG_ADDR)
7436 {
7437 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7438 ret ? "true" : "false");
7439 debug_rtx (gen_rtx_SET (op0, op1));
7440 }
7441
7442 return ret;
7443 }
7444
7445 /* Given an address, return a constant offset term if one exists. */
7446
7447 static rtx
7448 address_offset (rtx op)
7449 {
7450 if (GET_CODE (op) == PRE_INC
7451 || GET_CODE (op) == PRE_DEC)
7452 op = XEXP (op, 0);
7453 else if (GET_CODE (op) == PRE_MODIFY
7454 || GET_CODE (op) == LO_SUM)
7455 op = XEXP (op, 1);
7456
7457 if (GET_CODE (op) == CONST)
7458 op = XEXP (op, 0);
7459
7460 if (GET_CODE (op) == PLUS)
7461 op = XEXP (op, 1);
7462
7463 if (CONST_INT_P (op))
7464 return op;
7465
7466 return NULL_RTX;
7467 }
7468
7469 /* Return true if the MEM operand is a memory operand suitable for use
7470 with a (full width, possibly multiple) gpr load/store. On
7471 powerpc64 this means the offset must be divisible by 4.
7472 Implements 'Y' constraint.
7473
7474 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7475 a constraint function we know the operand has satisfied a suitable
7476 memory predicate.
7477
7478 Offsetting a lo_sum should not be allowed, except where we know by
7479 alignment that a 32k boundary is not crossed. Note that by
7480 "offsetting" here we mean a further offset to access parts of the
7481 MEM. It's fine to have a lo_sum where the inner address is offset
7482 from a sym, since the same sym+offset will appear in the high part
7483 of the address calculation. */
7484
7485 bool
7486 mem_operand_gpr (rtx op, machine_mode mode)
7487 {
7488 unsigned HOST_WIDE_INT offset;
7489 int extra;
7490 rtx addr = XEXP (op, 0);
7491
7492 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7493 if (TARGET_UPDATE
7494 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7495 && mode_supports_pre_incdec_p (mode)
7496 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7497 return true;
7498
7499 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7500 if (!rs6000_offsettable_memref_p (op, mode, false))
7501 return false;
7502
7503 op = address_offset (addr);
7504 if (op == NULL_RTX)
7505 return true;
7506
7507 offset = INTVAL (op);
7508 if (TARGET_POWERPC64 && (offset & 3) != 0)
7509 return false;
7510
7511 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7512 if (extra < 0)
7513 extra = 0;
7514
7515 if (GET_CODE (addr) == LO_SUM)
7516 /* For lo_sum addresses, we must allow any offset except one that
7517 causes a wrap, so test only the low 16 bits. */
7518 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7519
7520 return offset + 0x8000 < 0x10000u - extra;
7521 }
7522
7523 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7524 enforce an offset divisible by 4 even for 32-bit. */
7525
7526 bool
7527 mem_operand_ds_form (rtx op, machine_mode mode)
7528 {
7529 unsigned HOST_WIDE_INT offset;
7530 int extra;
7531 rtx addr = XEXP (op, 0);
7532
7533 if (!offsettable_address_p (false, mode, addr))
7534 return false;
7535
7536 op = address_offset (addr);
7537 if (op == NULL_RTX)
7538 return true;
7539
7540 offset = INTVAL (op);
7541 if ((offset & 3) != 0)
7542 return false;
7543
7544 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7545 if (extra < 0)
7546 extra = 0;
7547
7548 if (GET_CODE (addr) == LO_SUM)
7549 /* For lo_sum addresses, we must allow any offset except one that
7550 causes a wrap, so test only the low 16 bits. */
7551 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7552
7553 return offset + 0x8000 < 0x10000u - extra;
7554 }
7555 \f
7556 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7557
7558 static bool
7559 reg_offset_addressing_ok_p (machine_mode mode)
7560 {
7561 switch (mode)
7562 {
7563 case E_V16QImode:
7564 case E_V8HImode:
7565 case E_V4SFmode:
7566 case E_V4SImode:
7567 case E_V2DFmode:
7568 case E_V2DImode:
7569 case E_V1TImode:
7570 case E_TImode:
7571 case E_TFmode:
7572 case E_KFmode:
7573 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7574 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7575 a vector mode, if we want to use the VSX registers to move it around,
7576 we need to restrict ourselves to reg+reg addressing. Similarly for
7577 IEEE 128-bit floating point that is passed in a single vector
7578 register. */
7579 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7580 return mode_supports_dq_form (mode);
7581 break;
7582
7583 case E_SDmode:
7584 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7585 addressing for the LFIWZX and STFIWX instructions. */
7586 if (TARGET_NO_SDMODE_STACK)
7587 return false;
7588 break;
7589
7590 default:
7591 break;
7592 }
7593
7594 return true;
7595 }
7596
7597 static bool
7598 virtual_stack_registers_memory_p (rtx op)
7599 {
7600 int regnum;
7601
7602 if (REG_P (op))
7603 regnum = REGNO (op);
7604
7605 else if (GET_CODE (op) == PLUS
7606 && REG_P (XEXP (op, 0))
7607 && CONST_INT_P (XEXP (op, 1)))
7608 regnum = REGNO (XEXP (op, 0));
7609
7610 else
7611 return false;
7612
7613 return (regnum >= FIRST_VIRTUAL_REGISTER
7614 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7615 }
7616
7617 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7618 is known to not straddle a 32k boundary. This function is used
7619 to determine whether -mcmodel=medium code can use TOC pointer
7620 relative addressing for OP. This means the alignment of the TOC
7621 pointer must also be taken into account, and unfortunately that is
7622 only 8 bytes. */
7623
7624 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7625 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7626 #endif
7627
7628 static bool
7629 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7630 machine_mode mode)
7631 {
7632 tree decl;
7633 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7634
7635 if (!SYMBOL_REF_P (op))
7636 return false;
7637
7638 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7639 SYMBOL_REF. */
7640 if (mode_supports_dq_form (mode))
7641 return false;
7642
7643 dsize = GET_MODE_SIZE (mode);
7644 decl = SYMBOL_REF_DECL (op);
7645 if (!decl)
7646 {
7647 if (dsize == 0)
7648 return false;
7649
7650 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7651 replacing memory addresses with an anchor plus offset. We
7652 could find the decl by rummaging around in the block->objects
7653 VEC for the given offset but that seems like too much work. */
7654 dalign = BITS_PER_UNIT;
7655 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7656 && SYMBOL_REF_ANCHOR_P (op)
7657 && SYMBOL_REF_BLOCK (op) != NULL)
7658 {
7659 struct object_block *block = SYMBOL_REF_BLOCK (op);
7660
7661 dalign = block->alignment;
7662 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7663 }
7664 else if (CONSTANT_POOL_ADDRESS_P (op))
7665 {
7666 /* It would be nice to have get_pool_align().. */
7667 machine_mode cmode = get_pool_mode (op);
7668
7669 dalign = GET_MODE_ALIGNMENT (cmode);
7670 }
7671 }
7672 else if (DECL_P (decl))
7673 {
7674 dalign = DECL_ALIGN (decl);
7675
7676 if (dsize == 0)
7677 {
7678 /* Allow BLKmode when the entire object is known to not
7679 cross a 32k boundary. */
7680 if (!DECL_SIZE_UNIT (decl))
7681 return false;
7682
7683 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7684 return false;
7685
7686 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7687 if (dsize > 32768)
7688 return false;
7689
7690 dalign /= BITS_PER_UNIT;
7691 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7692 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7693 return dalign >= dsize;
7694 }
7695 }
7696 else
7697 gcc_unreachable ();
7698
7699 /* Find how many bits of the alignment we know for this access. */
7700 dalign /= BITS_PER_UNIT;
7701 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7702 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7703 mask = dalign - 1;
7704 lsb = offset & -offset;
7705 mask &= lsb - 1;
7706 dalign = mask + 1;
7707
7708 return dalign >= dsize;
7709 }
7710
7711 static bool
7712 constant_pool_expr_p (rtx op)
7713 {
7714 rtx base, offset;
7715
7716 split_const (op, &base, &offset);
7717 return (SYMBOL_REF_P (base)
7718 && CONSTANT_POOL_ADDRESS_P (base)
7719 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7720 }
7721
7722 /* These are only used to pass through from print_operand/print_operand_address
7723 to rs6000_output_addr_const_extra over the intervening function
7724 output_addr_const which is not target code. */
7725 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7726
7727 /* Return true if OP is a toc pointer relative address (the output
7728 of create_TOC_reference). If STRICT, do not match non-split
7729 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7730 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7731 TOCREL_OFFSET_RET respectively. */
7732
7733 bool
7734 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7735 const_rtx *tocrel_offset_ret)
7736 {
7737 if (!TARGET_TOC)
7738 return false;
7739
7740 if (TARGET_CMODEL != CMODEL_SMALL)
7741 {
7742 /* When strict ensure we have everything tidy. */
7743 if (strict
7744 && !(GET_CODE (op) == LO_SUM
7745 && REG_P (XEXP (op, 0))
7746 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7747 return false;
7748
7749 /* When not strict, allow non-split TOC addresses and also allow
7750 (lo_sum (high ..)) TOC addresses created during reload. */
7751 if (GET_CODE (op) == LO_SUM)
7752 op = XEXP (op, 1);
7753 }
7754
7755 const_rtx tocrel_base = op;
7756 const_rtx tocrel_offset = const0_rtx;
7757
7758 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7759 {
7760 tocrel_base = XEXP (op, 0);
7761 tocrel_offset = XEXP (op, 1);
7762 }
7763
7764 if (tocrel_base_ret)
7765 *tocrel_base_ret = tocrel_base;
7766 if (tocrel_offset_ret)
7767 *tocrel_offset_ret = tocrel_offset;
7768
7769 return (GET_CODE (tocrel_base) == UNSPEC
7770 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7771 && REG_P (XVECEXP (tocrel_base, 0, 1))
7772 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7773 }
7774
7775 /* Return true if X is a constant pool address, and also for cmodel=medium
7776 if X is a toc-relative address known to be offsettable within MODE. */
7777
7778 bool
7779 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7780 bool strict)
7781 {
7782 const_rtx tocrel_base, tocrel_offset;
7783 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7784 && (TARGET_CMODEL != CMODEL_MEDIUM
7785 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7786 || mode == QImode
7787 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7788 INTVAL (tocrel_offset), mode)));
7789 }
7790
7791 static bool
7792 legitimate_small_data_p (machine_mode mode, rtx x)
7793 {
7794 return (DEFAULT_ABI == ABI_V4
7795 && !flag_pic && !TARGET_TOC
7796 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7797 && small_data_operand (x, mode));
7798 }
7799
7800 bool
7801 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7802 bool strict, bool worst_case)
7803 {
7804 unsigned HOST_WIDE_INT offset;
7805 unsigned int extra;
7806
7807 if (GET_CODE (x) != PLUS)
7808 return false;
7809 if (!REG_P (XEXP (x, 0)))
7810 return false;
7811 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7812 return false;
7813 if (mode_supports_dq_form (mode))
7814 return quad_address_p (x, mode, strict);
7815 if (!reg_offset_addressing_ok_p (mode))
7816 return virtual_stack_registers_memory_p (x);
7817 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7818 return true;
7819 if (!CONST_INT_P (XEXP (x, 1)))
7820 return false;
7821
7822 offset = INTVAL (XEXP (x, 1));
7823 extra = 0;
7824 switch (mode)
7825 {
7826 case E_DFmode:
7827 case E_DDmode:
7828 case E_DImode:
7829 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7830 addressing. */
7831 if (VECTOR_MEM_VSX_P (mode))
7832 return false;
7833
7834 if (!worst_case)
7835 break;
7836 if (!TARGET_POWERPC64)
7837 extra = 4;
7838 else if (offset & 3)
7839 return false;
7840 break;
7841
7842 case E_TFmode:
7843 case E_IFmode:
7844 case E_KFmode:
7845 case E_TDmode:
7846 case E_TImode:
7847 case E_PTImode:
7848 extra = 8;
7849 if (!worst_case)
7850 break;
7851 if (!TARGET_POWERPC64)
7852 extra = 12;
7853 else if (offset & 3)
7854 return false;
7855 break;
7856
7857 default:
7858 break;
7859 }
7860
7861 offset += 0x8000;
7862 return offset < 0x10000 - extra;
7863 }
7864
7865 bool
7866 legitimate_indexed_address_p (rtx x, int strict)
7867 {
7868 rtx op0, op1;
7869
7870 if (GET_CODE (x) != PLUS)
7871 return false;
7872
7873 op0 = XEXP (x, 0);
7874 op1 = XEXP (x, 1);
7875
7876 return (REG_P (op0) && REG_P (op1)
7877 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
7878 && INT_REG_OK_FOR_INDEX_P (op1, strict))
7879 || (INT_REG_OK_FOR_BASE_P (op1, strict)
7880 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
7881 }
7882
7883 bool
7884 avoiding_indexed_address_p (machine_mode mode)
7885 {
7886 /* Avoid indexed addressing for modes that have non-indexed
7887 load/store instruction forms. */
7888 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
7889 }
7890
7891 bool
7892 legitimate_indirect_address_p (rtx x, int strict)
7893 {
7894 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
7895 }
7896
7897 bool
7898 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
7899 {
7900 if (!TARGET_MACHO || !flag_pic
7901 || mode != SImode || !MEM_P (x))
7902 return false;
7903 x = XEXP (x, 0);
7904
7905 if (GET_CODE (x) != LO_SUM)
7906 return false;
7907 if (!REG_P (XEXP (x, 0)))
7908 return false;
7909 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
7910 return false;
7911 x = XEXP (x, 1);
7912
7913 return CONSTANT_P (x);
7914 }
7915
7916 static bool
7917 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
7918 {
7919 if (GET_CODE (x) != LO_SUM)
7920 return false;
7921 if (!REG_P (XEXP (x, 0)))
7922 return false;
7923 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7924 return false;
7925 /* quad word addresses are restricted, and we can't use LO_SUM. */
7926 if (mode_supports_dq_form (mode))
7927 return false;
7928 x = XEXP (x, 1);
7929
7930 if (TARGET_ELF || TARGET_MACHO)
7931 {
7932 bool large_toc_ok;
7933
7934 if (DEFAULT_ABI == ABI_V4 && flag_pic)
7935 return false;
7936 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
7937 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
7938 recognizes some LO_SUM addresses as valid although this
7939 function says opposite. In most cases, LRA through different
7940 transformations can generate correct code for address reloads.
7941 It cannot manage only some LO_SUM cases. So we need to add
7942 code here saying that some addresses are still valid. */
7943 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
7944 && small_toc_ref (x, VOIDmode));
7945 if (TARGET_TOC && ! large_toc_ok)
7946 return false;
7947 if (GET_MODE_NUNITS (mode) != 1)
7948 return false;
7949 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7950 && !(/* ??? Assume floating point reg based on mode? */
7951 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
7952 return false;
7953
7954 return CONSTANT_P (x) || large_toc_ok;
7955 }
7956
7957 return false;
7958 }
7959
7960
7961 /* Try machine-dependent ways of modifying an illegitimate address
7962 to be legitimate. If we find one, return the new, valid address.
7963 This is used from only one place: `memory_address' in explow.c.
7964
7965 OLDX is the address as it was before break_out_memory_refs was
7966 called. In some cases it is useful to look at this to decide what
7967 needs to be done.
7968
7969 It is always safe for this function to do nothing. It exists to
7970 recognize opportunities to optimize the output.
7971
7972 On RS/6000, first check for the sum of a register with a constant
7973 integer that is out of range. If so, generate code to add the
7974 constant with the low-order 16 bits masked to the register and force
7975 this result into another register (this can be done with `cau').
7976 Then generate an address of REG+(CONST&0xffff), allowing for the
7977 possibility of bit 16 being a one.
7978
7979 Then check for the sum of a register and something not constant, try to
7980 load the other things into a register and return the sum. */
7981
7982 static rtx
7983 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
7984 machine_mode mode)
7985 {
7986 unsigned int extra;
7987
7988 if (!reg_offset_addressing_ok_p (mode)
7989 || mode_supports_dq_form (mode))
7990 {
7991 if (virtual_stack_registers_memory_p (x))
7992 return x;
7993
7994 /* In theory we should not be seeing addresses of the form reg+0,
7995 but just in case it is generated, optimize it away. */
7996 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
7997 return force_reg (Pmode, XEXP (x, 0));
7998
7999 /* For TImode with load/store quad, restrict addresses to just a single
8000 pointer, so it works with both GPRs and VSX registers. */
8001 /* Make sure both operands are registers. */
8002 else if (GET_CODE (x) == PLUS
8003 && (mode != TImode || !TARGET_VSX))
8004 return gen_rtx_PLUS (Pmode,
8005 force_reg (Pmode, XEXP (x, 0)),
8006 force_reg (Pmode, XEXP (x, 1)));
8007 else
8008 return force_reg (Pmode, x);
8009 }
8010 if (SYMBOL_REF_P (x))
8011 {
8012 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8013 if (model != 0)
8014 return rs6000_legitimize_tls_address (x, model);
8015 }
8016
8017 extra = 0;
8018 switch (mode)
8019 {
8020 case E_TFmode:
8021 case E_TDmode:
8022 case E_TImode:
8023 case E_PTImode:
8024 case E_IFmode:
8025 case E_KFmode:
8026 /* As in legitimate_offset_address_p we do not assume
8027 worst-case. The mode here is just a hint as to the registers
8028 used. A TImode is usually in gprs, but may actually be in
8029 fprs. Leave worst-case scenario for reload to handle via
8030 insn constraints. PTImode is only GPRs. */
8031 extra = 8;
8032 break;
8033 default:
8034 break;
8035 }
8036
8037 if (GET_CODE (x) == PLUS
8038 && REG_P (XEXP (x, 0))
8039 && CONST_INT_P (XEXP (x, 1))
8040 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8041 >= 0x10000 - extra))
8042 {
8043 HOST_WIDE_INT high_int, low_int;
8044 rtx sum;
8045 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8046 if (low_int >= 0x8000 - extra)
8047 low_int = 0;
8048 high_int = INTVAL (XEXP (x, 1)) - low_int;
8049 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8050 GEN_INT (high_int)), 0);
8051 return plus_constant (Pmode, sum, low_int);
8052 }
8053 else if (GET_CODE (x) == PLUS
8054 && REG_P (XEXP (x, 0))
8055 && !CONST_INT_P (XEXP (x, 1))
8056 && GET_MODE_NUNITS (mode) == 1
8057 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8058 || (/* ??? Assume floating point reg based on mode? */
8059 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8060 && !avoiding_indexed_address_p (mode))
8061 {
8062 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8063 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8064 }
8065 else if ((TARGET_ELF
8066 #if TARGET_MACHO
8067 || !MACHO_DYNAMIC_NO_PIC_P
8068 #endif
8069 )
8070 && TARGET_32BIT
8071 && TARGET_NO_TOC
8072 && !flag_pic
8073 && !CONST_INT_P (x)
8074 && !CONST_WIDE_INT_P (x)
8075 && !CONST_DOUBLE_P (x)
8076 && CONSTANT_P (x)
8077 && GET_MODE_NUNITS (mode) == 1
8078 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8079 || (/* ??? Assume floating point reg based on mode? */
8080 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8081 {
8082 rtx reg = gen_reg_rtx (Pmode);
8083 if (TARGET_ELF)
8084 emit_insn (gen_elf_high (reg, x));
8085 else
8086 emit_insn (gen_macho_high (reg, x));
8087 return gen_rtx_LO_SUM (Pmode, reg, x);
8088 }
8089 else if (TARGET_TOC
8090 && SYMBOL_REF_P (x)
8091 && constant_pool_expr_p (x)
8092 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8093 return create_TOC_reference (x, NULL_RTX);
8094 else
8095 return x;
8096 }
8097
8098 /* Debug version of rs6000_legitimize_address. */
8099 static rtx
8100 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8101 {
8102 rtx ret;
8103 rtx_insn *insns;
8104
8105 start_sequence ();
8106 ret = rs6000_legitimize_address (x, oldx, mode);
8107 insns = get_insns ();
8108 end_sequence ();
8109
8110 if (ret != x)
8111 {
8112 fprintf (stderr,
8113 "\nrs6000_legitimize_address: mode %s, old code %s, "
8114 "new code %s, modified\n",
8115 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8116 GET_RTX_NAME (GET_CODE (ret)));
8117
8118 fprintf (stderr, "Original address:\n");
8119 debug_rtx (x);
8120
8121 fprintf (stderr, "oldx:\n");
8122 debug_rtx (oldx);
8123
8124 fprintf (stderr, "New address:\n");
8125 debug_rtx (ret);
8126
8127 if (insns)
8128 {
8129 fprintf (stderr, "Insns added:\n");
8130 debug_rtx_list (insns, 20);
8131 }
8132 }
8133 else
8134 {
8135 fprintf (stderr,
8136 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8137 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8138
8139 debug_rtx (x);
8140 }
8141
8142 if (insns)
8143 emit_insn (insns);
8144
8145 return ret;
8146 }
8147
8148 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8149 We need to emit DTP-relative relocations. */
8150
8151 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8152 static void
8153 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8154 {
8155 switch (size)
8156 {
8157 case 4:
8158 fputs ("\t.long\t", file);
8159 break;
8160 case 8:
8161 fputs (DOUBLE_INT_ASM_OP, file);
8162 break;
8163 default:
8164 gcc_unreachable ();
8165 }
8166 output_addr_const (file, x);
8167 if (TARGET_ELF)
8168 fputs ("@dtprel+0x8000", file);
8169 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8170 {
8171 switch (SYMBOL_REF_TLS_MODEL (x))
8172 {
8173 case 0:
8174 break;
8175 case TLS_MODEL_LOCAL_EXEC:
8176 fputs ("@le", file);
8177 break;
8178 case TLS_MODEL_INITIAL_EXEC:
8179 fputs ("@ie", file);
8180 break;
8181 case TLS_MODEL_GLOBAL_DYNAMIC:
8182 case TLS_MODEL_LOCAL_DYNAMIC:
8183 fputs ("@m", file);
8184 break;
8185 default:
8186 gcc_unreachable ();
8187 }
8188 }
8189 }
8190
8191 /* Return true if X is a symbol that refers to real (rather than emulated)
8192 TLS. */
8193
8194 static bool
8195 rs6000_real_tls_symbol_ref_p (rtx x)
8196 {
8197 return (SYMBOL_REF_P (x)
8198 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8199 }
8200
8201 /* In the name of slightly smaller debug output, and to cater to
8202 general assembler lossage, recognize various UNSPEC sequences
8203 and turn them back into a direct symbol reference. */
8204
8205 static rtx
8206 rs6000_delegitimize_address (rtx orig_x)
8207 {
8208 rtx x, y, offset;
8209
8210 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8211 orig_x = XVECEXP (orig_x, 0, 0);
8212
8213 orig_x = delegitimize_mem_from_attrs (orig_x);
8214
8215 x = orig_x;
8216 if (MEM_P (x))
8217 x = XEXP (x, 0);
8218
8219 y = x;
8220 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8221 y = XEXP (y, 1);
8222
8223 offset = NULL_RTX;
8224 if (GET_CODE (y) == PLUS
8225 && GET_MODE (y) == Pmode
8226 && CONST_INT_P (XEXP (y, 1)))
8227 {
8228 offset = XEXP (y, 1);
8229 y = XEXP (y, 0);
8230 }
8231
8232 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8233 {
8234 y = XVECEXP (y, 0, 0);
8235
8236 #ifdef HAVE_AS_TLS
8237 /* Do not associate thread-local symbols with the original
8238 constant pool symbol. */
8239 if (TARGET_XCOFF
8240 && SYMBOL_REF_P (y)
8241 && CONSTANT_POOL_ADDRESS_P (y)
8242 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8243 return orig_x;
8244 #endif
8245
8246 if (offset != NULL_RTX)
8247 y = gen_rtx_PLUS (Pmode, y, offset);
8248 if (!MEM_P (orig_x))
8249 return y;
8250 else
8251 return replace_equiv_address_nv (orig_x, y);
8252 }
8253
8254 if (TARGET_MACHO
8255 && GET_CODE (orig_x) == LO_SUM
8256 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8257 {
8258 y = XEXP (XEXP (orig_x, 1), 0);
8259 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8260 return XVECEXP (y, 0, 0);
8261 }
8262
8263 return orig_x;
8264 }
8265
8266 /* Return true if X shouldn't be emitted into the debug info.
8267 The linker doesn't like .toc section references from
8268 .debug_* sections, so reject .toc section symbols. */
8269
8270 static bool
8271 rs6000_const_not_ok_for_debug_p (rtx x)
8272 {
8273 if (GET_CODE (x) == UNSPEC)
8274 return true;
8275 if (SYMBOL_REF_P (x)
8276 && CONSTANT_POOL_ADDRESS_P (x))
8277 {
8278 rtx c = get_pool_constant (x);
8279 machine_mode cmode = get_pool_mode (x);
8280 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8281 return true;
8282 }
8283
8284 return false;
8285 }
8286
8287 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8288
8289 static bool
8290 rs6000_legitimate_combined_insn (rtx_insn *insn)
8291 {
8292 int icode = INSN_CODE (insn);
8293
8294 /* Reject creating doloop insns. Combine should not be allowed
8295 to create these for a number of reasons:
8296 1) In a nested loop, if combine creates one of these in an
8297 outer loop and the register allocator happens to allocate ctr
8298 to the outer loop insn, then the inner loop can't use ctr.
8299 Inner loops ought to be more highly optimized.
8300 2) Combine often wants to create one of these from what was
8301 originally a three insn sequence, first combining the three
8302 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8303 allocated ctr, the splitter takes use back to the three insn
8304 sequence. It's better to stop combine at the two insn
8305 sequence.
8306 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8307 insns, the register allocator sometimes uses floating point
8308 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8309 jump insn and output reloads are not implemented for jumps,
8310 the ctrsi/ctrdi splitters need to handle all possible cases.
8311 That's a pain, and it gets to be seriously difficult when a
8312 splitter that runs after reload needs memory to transfer from
8313 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8314 for the difficult case. It's better to not create problems
8315 in the first place. */
8316 if (icode != CODE_FOR_nothing
8317 && (icode == CODE_FOR_bdz_si
8318 || icode == CODE_FOR_bdz_di
8319 || icode == CODE_FOR_bdnz_si
8320 || icode == CODE_FOR_bdnz_di
8321 || icode == CODE_FOR_bdztf_si
8322 || icode == CODE_FOR_bdztf_di
8323 || icode == CODE_FOR_bdnztf_si
8324 || icode == CODE_FOR_bdnztf_di))
8325 return false;
8326
8327 return true;
8328 }
8329
8330 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8331
8332 static GTY(()) rtx rs6000_tls_symbol;
8333 static rtx
8334 rs6000_tls_get_addr (void)
8335 {
8336 if (!rs6000_tls_symbol)
8337 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8338
8339 return rs6000_tls_symbol;
8340 }
8341
8342 /* Construct the SYMBOL_REF for TLS GOT references. */
8343
8344 static GTY(()) rtx rs6000_got_symbol;
8345 rtx
8346 rs6000_got_sym (void)
8347 {
8348 if (!rs6000_got_symbol)
8349 {
8350 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8351 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8352 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8353 }
8354
8355 return rs6000_got_symbol;
8356 }
8357
8358 /* AIX Thread-Local Address support. */
8359
8360 static rtx
8361 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8362 {
8363 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8364 const char *name;
8365 char *tlsname;
8366
8367 name = XSTR (addr, 0);
8368 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8369 or the symbol will be in TLS private data section. */
8370 if (name[strlen (name) - 1] != ']'
8371 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8372 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8373 {
8374 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8375 strcpy (tlsname, name);
8376 strcat (tlsname,
8377 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8378 tlsaddr = copy_rtx (addr);
8379 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8380 }
8381 else
8382 tlsaddr = addr;
8383
8384 /* Place addr into TOC constant pool. */
8385 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8386
8387 /* Output the TOC entry and create the MEM referencing the value. */
8388 if (constant_pool_expr_p (XEXP (sym, 0))
8389 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8390 {
8391 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8392 mem = gen_const_mem (Pmode, tocref);
8393 set_mem_alias_set (mem, get_TOC_alias_set ());
8394 }
8395 else
8396 return sym;
8397
8398 /* Use global-dynamic for local-dynamic. */
8399 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8400 || model == TLS_MODEL_LOCAL_DYNAMIC)
8401 {
8402 /* Create new TOC reference for @m symbol. */
8403 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8404 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8405 strcpy (tlsname, "*LCM");
8406 strcat (tlsname, name + 3);
8407 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8408 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8409 tocref = create_TOC_reference (modaddr, NULL_RTX);
8410 rtx modmem = gen_const_mem (Pmode, tocref);
8411 set_mem_alias_set (modmem, get_TOC_alias_set ());
8412
8413 rtx modreg = gen_reg_rtx (Pmode);
8414 emit_insn (gen_rtx_SET (modreg, modmem));
8415
8416 tmpreg = gen_reg_rtx (Pmode);
8417 emit_insn (gen_rtx_SET (tmpreg, mem));
8418
8419 dest = gen_reg_rtx (Pmode);
8420 if (TARGET_32BIT)
8421 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8422 else
8423 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8424 return dest;
8425 }
8426 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8427 else if (TARGET_32BIT)
8428 {
8429 tlsreg = gen_reg_rtx (SImode);
8430 emit_insn (gen_tls_get_tpointer (tlsreg));
8431 }
8432 else
8433 tlsreg = gen_rtx_REG (DImode, 13);
8434
8435 /* Load the TOC value into temporary register. */
8436 tmpreg = gen_reg_rtx (Pmode);
8437 emit_insn (gen_rtx_SET (tmpreg, mem));
8438 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8439 gen_rtx_MINUS (Pmode, addr, tlsreg));
8440
8441 /* Add TOC symbol value to TLS pointer. */
8442 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8443
8444 return dest;
8445 }
8446
8447 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8448 __tls_get_addr call. */
8449
8450 void
8451 rs6000_output_tlsargs (rtx *operands)
8452 {
8453 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8454 rtx op[3];
8455
8456 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8457 op[0] = operands[0];
8458 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8459 op[1] = XVECEXP (operands[2], 0, 0);
8460 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8461 {
8462 /* The GOT register. */
8463 op[2] = XVECEXP (operands[2], 0, 1);
8464 if (TARGET_CMODEL != CMODEL_SMALL)
8465 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8466 "addi %0,%0,%1@got@tlsgd@l", op);
8467 else
8468 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8469 }
8470 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8471 {
8472 if (TARGET_CMODEL != CMODEL_SMALL)
8473 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8474 "addi %0,%0,%&@got@tlsld@l", op);
8475 else
8476 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8477 }
8478 else
8479 gcc_unreachable ();
8480 }
8481
8482 /* Passes the tls arg value for global dynamic and local dynamic
8483 emit_library_call_value in rs6000_legitimize_tls_address to
8484 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8485 marker relocs put on __tls_get_addr calls. */
8486 static rtx global_tlsarg;
8487
8488 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8489 this (thread-local) address. */
8490
8491 static rtx
8492 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8493 {
8494 rtx dest, insn;
8495
8496 if (TARGET_XCOFF)
8497 return rs6000_legitimize_tls_address_aix (addr, model);
8498
8499 dest = gen_reg_rtx (Pmode);
8500 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8501 {
8502 rtx tlsreg;
8503
8504 if (TARGET_64BIT)
8505 {
8506 tlsreg = gen_rtx_REG (Pmode, 13);
8507 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8508 }
8509 else
8510 {
8511 tlsreg = gen_rtx_REG (Pmode, 2);
8512 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8513 }
8514 emit_insn (insn);
8515 }
8516 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8517 {
8518 rtx tlsreg, tmp;
8519
8520 tmp = gen_reg_rtx (Pmode);
8521 if (TARGET_64BIT)
8522 {
8523 tlsreg = gen_rtx_REG (Pmode, 13);
8524 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8525 }
8526 else
8527 {
8528 tlsreg = gen_rtx_REG (Pmode, 2);
8529 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8530 }
8531 emit_insn (insn);
8532 if (TARGET_64BIT)
8533 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8534 else
8535 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8536 emit_insn (insn);
8537 }
8538 else
8539 {
8540 rtx got, tga, tmp1, tmp2;
8541
8542 /* We currently use relocations like @got@tlsgd for tls, which
8543 means the linker will handle allocation of tls entries, placing
8544 them in the .got section. So use a pointer to the .got section,
8545 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8546 or to secondary GOT sections used by 32-bit -fPIC. */
8547 if (TARGET_64BIT)
8548 got = gen_rtx_REG (Pmode, 2);
8549 else
8550 {
8551 if (flag_pic == 1)
8552 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8553 else
8554 {
8555 rtx gsym = rs6000_got_sym ();
8556 got = gen_reg_rtx (Pmode);
8557 if (flag_pic == 0)
8558 rs6000_emit_move (got, gsym, Pmode);
8559 else
8560 {
8561 rtx mem, lab;
8562
8563 tmp1 = gen_reg_rtx (Pmode);
8564 tmp2 = gen_reg_rtx (Pmode);
8565 mem = gen_const_mem (Pmode, tmp1);
8566 lab = gen_label_rtx ();
8567 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8568 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8569 if (TARGET_LINK_STACK)
8570 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8571 emit_move_insn (tmp2, mem);
8572 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8573 set_unique_reg_note (last, REG_EQUAL, gsym);
8574 }
8575 }
8576 }
8577
8578 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8579 {
8580 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8581 UNSPEC_TLSGD);
8582 tga = rs6000_tls_get_addr ();
8583 global_tlsarg = arg;
8584 if (TARGET_TLS_MARKERS)
8585 {
8586 rtx argreg = gen_rtx_REG (Pmode, 3);
8587 emit_insn (gen_rtx_SET (argreg, arg));
8588 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8589 argreg, Pmode);
8590 }
8591 else
8592 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8593 global_tlsarg = NULL_RTX;
8594
8595 /* Make a note so that the result of this call can be CSEd. */
8596 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8597 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8598 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8599 }
8600 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8601 {
8602 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8603 tga = rs6000_tls_get_addr ();
8604 tmp1 = gen_reg_rtx (Pmode);
8605 global_tlsarg = arg;
8606 if (TARGET_TLS_MARKERS)
8607 {
8608 rtx argreg = gen_rtx_REG (Pmode, 3);
8609 emit_insn (gen_rtx_SET (argreg, arg));
8610 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8611 argreg, Pmode);
8612 }
8613 else
8614 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8615 global_tlsarg = NULL_RTX;
8616
8617 /* Make a note so that the result of this call can be CSEd. */
8618 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8619 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8620 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8621
8622 if (rs6000_tls_size == 16)
8623 {
8624 if (TARGET_64BIT)
8625 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8626 else
8627 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8628 }
8629 else if (rs6000_tls_size == 32)
8630 {
8631 tmp2 = gen_reg_rtx (Pmode);
8632 if (TARGET_64BIT)
8633 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8634 else
8635 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8636 emit_insn (insn);
8637 if (TARGET_64BIT)
8638 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8639 else
8640 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8641 }
8642 else
8643 {
8644 tmp2 = gen_reg_rtx (Pmode);
8645 if (TARGET_64BIT)
8646 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8647 else
8648 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8649 emit_insn (insn);
8650 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8651 }
8652 emit_insn (insn);
8653 }
8654 else
8655 {
8656 /* IE, or 64-bit offset LE. */
8657 tmp2 = gen_reg_rtx (Pmode);
8658 if (TARGET_64BIT)
8659 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8660 else
8661 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8662 emit_insn (insn);
8663 if (TARGET_64BIT)
8664 insn = gen_tls_tls_64 (dest, tmp2, addr);
8665 else
8666 insn = gen_tls_tls_32 (dest, tmp2, addr);
8667 emit_insn (insn);
8668 }
8669 }
8670
8671 return dest;
8672 }
8673
8674 /* Only create the global variable for the stack protect guard if we are using
8675 the global flavor of that guard. */
8676 static tree
8677 rs6000_init_stack_protect_guard (void)
8678 {
8679 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8680 return default_stack_protect_guard ();
8681
8682 return NULL_TREE;
8683 }
8684
8685 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8686
8687 static bool
8688 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8689 {
8690 if (GET_CODE (x) == HIGH
8691 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8692 return true;
8693
8694 /* A TLS symbol in the TOC cannot contain a sum. */
8695 if (GET_CODE (x) == CONST
8696 && GET_CODE (XEXP (x, 0)) == PLUS
8697 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8698 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8699 return true;
8700
8701 /* Do not place an ELF TLS symbol in the constant pool. */
8702 return TARGET_ELF && tls_referenced_p (x);
8703 }
8704
8705 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8706 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8707 can be addressed relative to the toc pointer. */
8708
8709 static bool
8710 use_toc_relative_ref (rtx sym, machine_mode mode)
8711 {
8712 return ((constant_pool_expr_p (sym)
8713 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8714 get_pool_mode (sym)))
8715 || (TARGET_CMODEL == CMODEL_MEDIUM
8716 && SYMBOL_REF_LOCAL_P (sym)
8717 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8718 }
8719
8720 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8721 that is a valid memory address for an instruction.
8722 The MODE argument is the machine mode for the MEM expression
8723 that wants to use this address.
8724
8725 On the RS/6000, there are four valid address: a SYMBOL_REF that
8726 refers to a constant pool entry of an address (or the sum of it
8727 plus a constant), a short (16-bit signed) constant plus a register,
8728 the sum of two registers, or a register indirect, possibly with an
8729 auto-increment. For DFmode, DDmode and DImode with a constant plus
8730 register, we must ensure that both words are addressable or PowerPC64
8731 with offset word aligned.
8732
8733 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8734 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8735 because adjacent memory cells are accessed by adding word-sized offsets
8736 during assembly output. */
8737 static bool
8738 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8739 {
8740 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8741 bool quad_offset_p = mode_supports_dq_form (mode);
8742
8743 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8744 if (VECTOR_MEM_ALTIVEC_P (mode)
8745 && GET_CODE (x) == AND
8746 && CONST_INT_P (XEXP (x, 1))
8747 && INTVAL (XEXP (x, 1)) == -16)
8748 x = XEXP (x, 0);
8749
8750 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8751 return 0;
8752 if (legitimate_indirect_address_p (x, reg_ok_strict))
8753 return 1;
8754 if (TARGET_UPDATE
8755 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8756 && mode_supports_pre_incdec_p (mode)
8757 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8758 return 1;
8759 /* Handle restricted vector d-form offsets in ISA 3.0. */
8760 if (quad_offset_p)
8761 {
8762 if (quad_address_p (x, mode, reg_ok_strict))
8763 return 1;
8764 }
8765 else if (virtual_stack_registers_memory_p (x))
8766 return 1;
8767
8768 else if (reg_offset_p)
8769 {
8770 if (legitimate_small_data_p (mode, x))
8771 return 1;
8772 if (legitimate_constant_pool_address_p (x, mode,
8773 reg_ok_strict || lra_in_progress))
8774 return 1;
8775 }
8776
8777 /* For TImode, if we have TImode in VSX registers, only allow register
8778 indirect addresses. This will allow the values to go in either GPRs
8779 or VSX registers without reloading. The vector types would tend to
8780 go into VSX registers, so we allow REG+REG, while TImode seems
8781 somewhat split, in that some uses are GPR based, and some VSX based. */
8782 /* FIXME: We could loosen this by changing the following to
8783 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8784 but currently we cannot allow REG+REG addressing for TImode. See
8785 PR72827 for complete details on how this ends up hoodwinking DSE. */
8786 if (mode == TImode && TARGET_VSX)
8787 return 0;
8788 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8789 if (! reg_ok_strict
8790 && reg_offset_p
8791 && GET_CODE (x) == PLUS
8792 && REG_P (XEXP (x, 0))
8793 && (XEXP (x, 0) == virtual_stack_vars_rtx
8794 || XEXP (x, 0) == arg_pointer_rtx)
8795 && CONST_INT_P (XEXP (x, 1)))
8796 return 1;
8797 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8798 return 1;
8799 if (!FLOAT128_2REG_P (mode)
8800 && (TARGET_HARD_FLOAT
8801 || TARGET_POWERPC64
8802 || (mode != DFmode && mode != DDmode))
8803 && (TARGET_POWERPC64 || mode != DImode)
8804 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8805 && mode != PTImode
8806 && !avoiding_indexed_address_p (mode)
8807 && legitimate_indexed_address_p (x, reg_ok_strict))
8808 return 1;
8809 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8810 && mode_supports_pre_modify_p (mode)
8811 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8812 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8813 reg_ok_strict, false)
8814 || (!avoiding_indexed_address_p (mode)
8815 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8816 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8817 return 1;
8818 if (reg_offset_p && !quad_offset_p
8819 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8820 return 1;
8821 return 0;
8822 }
8823
8824 /* Debug version of rs6000_legitimate_address_p. */
8825 static bool
8826 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8827 bool reg_ok_strict)
8828 {
8829 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8830 fprintf (stderr,
8831 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8832 "strict = %d, reload = %s, code = %s\n",
8833 ret ? "true" : "false",
8834 GET_MODE_NAME (mode),
8835 reg_ok_strict,
8836 (reload_completed ? "after" : "before"),
8837 GET_RTX_NAME (GET_CODE (x)));
8838 debug_rtx (x);
8839
8840 return ret;
8841 }
8842
8843 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8844
8845 static bool
8846 rs6000_mode_dependent_address_p (const_rtx addr,
8847 addr_space_t as ATTRIBUTE_UNUSED)
8848 {
8849 return rs6000_mode_dependent_address_ptr (addr);
8850 }
8851
8852 /* Go to LABEL if ADDR (a legitimate address expression)
8853 has an effect that depends on the machine mode it is used for.
8854
8855 On the RS/6000 this is true of all integral offsets (since AltiVec
8856 and VSX modes don't allow them) or is a pre-increment or decrement.
8857
8858 ??? Except that due to conceptual problems in offsettable_address_p
8859 we can't really report the problems of integral offsets. So leave
8860 this assuming that the adjustable offset must be valid for the
8861 sub-words of a TFmode operand, which is what we had before. */
8862
8863 static bool
8864 rs6000_mode_dependent_address (const_rtx addr)
8865 {
8866 switch (GET_CODE (addr))
8867 {
8868 case PLUS:
8869 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8870 is considered a legitimate address before reload, so there
8871 are no offset restrictions in that case. Note that this
8872 condition is safe in strict mode because any address involving
8873 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8874 been rejected as illegitimate. */
8875 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8876 && XEXP (addr, 0) != arg_pointer_rtx
8877 && CONST_INT_P (XEXP (addr, 1)))
8878 {
8879 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
8880 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
8881 }
8882 break;
8883
8884 case LO_SUM:
8885 /* Anything in the constant pool is sufficiently aligned that
8886 all bytes have the same high part address. */
8887 return !legitimate_constant_pool_address_p (addr, QImode, false);
8888
8889 /* Auto-increment cases are now treated generically in recog.c. */
8890 case PRE_MODIFY:
8891 return TARGET_UPDATE;
8892
8893 /* AND is only allowed in Altivec loads. */
8894 case AND:
8895 return true;
8896
8897 default:
8898 break;
8899 }
8900
8901 return false;
8902 }
8903
8904 /* Debug version of rs6000_mode_dependent_address. */
8905 static bool
8906 rs6000_debug_mode_dependent_address (const_rtx addr)
8907 {
8908 bool ret = rs6000_mode_dependent_address (addr);
8909
8910 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
8911 ret ? "true" : "false");
8912 debug_rtx (addr);
8913
8914 return ret;
8915 }
8916
8917 /* Implement FIND_BASE_TERM. */
8918
8919 rtx
8920 rs6000_find_base_term (rtx op)
8921 {
8922 rtx base;
8923
8924 base = op;
8925 if (GET_CODE (base) == CONST)
8926 base = XEXP (base, 0);
8927 if (GET_CODE (base) == PLUS)
8928 base = XEXP (base, 0);
8929 if (GET_CODE (base) == UNSPEC)
8930 switch (XINT (base, 1))
8931 {
8932 case UNSPEC_TOCREL:
8933 case UNSPEC_MACHOPIC_OFFSET:
8934 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
8935 for aliasing purposes. */
8936 return XVECEXP (base, 0, 0);
8937 }
8938
8939 return op;
8940 }
8941
8942 /* More elaborate version of recog's offsettable_memref_p predicate
8943 that works around the ??? note of rs6000_mode_dependent_address.
8944 In particular it accepts
8945
8946 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
8947
8948 in 32-bit mode, that the recog predicate rejects. */
8949
8950 static bool
8951 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
8952 {
8953 bool worst_case;
8954
8955 if (!MEM_P (op))
8956 return false;
8957
8958 /* First mimic offsettable_memref_p. */
8959 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
8960 return true;
8961
8962 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
8963 the latter predicate knows nothing about the mode of the memory
8964 reference and, therefore, assumes that it is the largest supported
8965 mode (TFmode). As a consequence, legitimate offsettable memory
8966 references are rejected. rs6000_legitimate_offset_address_p contains
8967 the correct logic for the PLUS case of rs6000_mode_dependent_address,
8968 at least with a little bit of help here given that we know the
8969 actual registers used. */
8970 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
8971 || GET_MODE_SIZE (reg_mode) == 4);
8972 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
8973 strict, worst_case);
8974 }
8975
8976 /* Determine the reassociation width to be used in reassociate_bb.
8977 This takes into account how many parallel operations we
8978 can actually do of a given type, and also the latency.
8979 P8:
8980 int add/sub 6/cycle
8981 mul 2/cycle
8982 vect add/sub/mul 2/cycle
8983 fp add/sub/mul 2/cycle
8984 dfp 1/cycle
8985 */
8986
8987 static int
8988 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
8989 machine_mode mode)
8990 {
8991 switch (rs6000_tune)
8992 {
8993 case PROCESSOR_POWER8:
8994 case PROCESSOR_POWER9:
8995 case PROCESSOR_FUTURE:
8996 if (DECIMAL_FLOAT_MODE_P (mode))
8997 return 1;
8998 if (VECTOR_MODE_P (mode))
8999 return 4;
9000 if (INTEGRAL_MODE_P (mode))
9001 return 1;
9002 if (FLOAT_MODE_P (mode))
9003 return 4;
9004 break;
9005 default:
9006 break;
9007 }
9008 return 1;
9009 }
9010
9011 /* Change register usage conditional on target flags. */
9012 static void
9013 rs6000_conditional_register_usage (void)
9014 {
9015 int i;
9016
9017 if (TARGET_DEBUG_TARGET)
9018 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9019
9020 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9021 if (TARGET_64BIT)
9022 fixed_regs[13] = call_used_regs[13]
9023 = call_really_used_regs[13] = 1;
9024
9025 /* Conditionally disable FPRs. */
9026 if (TARGET_SOFT_FLOAT)
9027 for (i = 32; i < 64; i++)
9028 fixed_regs[i] = call_used_regs[i]
9029 = call_really_used_regs[i] = 1;
9030
9031 /* The TOC register is not killed across calls in a way that is
9032 visible to the compiler. */
9033 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9034 call_really_used_regs[2] = 0;
9035
9036 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9037 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9038
9039 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9040 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9041 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9042 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9043
9044 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9045 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9046 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9047 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9048
9049 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9050 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9051 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9052
9053 if (!TARGET_ALTIVEC && !TARGET_VSX)
9054 {
9055 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9056 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9057 call_really_used_regs[VRSAVE_REGNO] = 1;
9058 }
9059
9060 if (TARGET_ALTIVEC || TARGET_VSX)
9061 global_regs[VSCR_REGNO] = 1;
9062
9063 if (TARGET_ALTIVEC_ABI)
9064 {
9065 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9066 call_used_regs[i] = call_really_used_regs[i] = 1;
9067
9068 /* AIX reserves VR20:31 in non-extended ABI mode. */
9069 if (TARGET_XCOFF)
9070 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9071 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9072 }
9073 }
9074
9075 \f
9076 /* Output insns to set DEST equal to the constant SOURCE as a series of
9077 lis, ori and shl instructions and return TRUE. */
9078
9079 bool
9080 rs6000_emit_set_const (rtx dest, rtx source)
9081 {
9082 machine_mode mode = GET_MODE (dest);
9083 rtx temp, set;
9084 rtx_insn *insn;
9085 HOST_WIDE_INT c;
9086
9087 gcc_checking_assert (CONST_INT_P (source));
9088 c = INTVAL (source);
9089 switch (mode)
9090 {
9091 case E_QImode:
9092 case E_HImode:
9093 emit_insn (gen_rtx_SET (dest, source));
9094 return true;
9095
9096 case E_SImode:
9097 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9098
9099 emit_insn (gen_rtx_SET (copy_rtx (temp),
9100 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9101 emit_insn (gen_rtx_SET (dest,
9102 gen_rtx_IOR (SImode, copy_rtx (temp),
9103 GEN_INT (c & 0xffff))));
9104 break;
9105
9106 case E_DImode:
9107 if (!TARGET_POWERPC64)
9108 {
9109 rtx hi, lo;
9110
9111 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9112 DImode);
9113 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9114 DImode);
9115 emit_move_insn (hi, GEN_INT (c >> 32));
9116 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9117 emit_move_insn (lo, GEN_INT (c));
9118 }
9119 else
9120 rs6000_emit_set_long_const (dest, c);
9121 break;
9122
9123 default:
9124 gcc_unreachable ();
9125 }
9126
9127 insn = get_last_insn ();
9128 set = single_set (insn);
9129 if (! CONSTANT_P (SET_SRC (set)))
9130 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9131
9132 return true;
9133 }
9134
9135 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9136 Output insns to set DEST equal to the constant C as a series of
9137 lis, ori and shl instructions. */
9138
9139 static void
9140 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9141 {
9142 rtx temp;
9143 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9144
9145 ud1 = c & 0xffff;
9146 c = c >> 16;
9147 ud2 = c & 0xffff;
9148 c = c >> 16;
9149 ud3 = c & 0xffff;
9150 c = c >> 16;
9151 ud4 = c & 0xffff;
9152
9153 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9154 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9155 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9156
9157 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9158 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9159 {
9160 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9161
9162 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9163 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9164 if (ud1 != 0)
9165 emit_move_insn (dest,
9166 gen_rtx_IOR (DImode, copy_rtx (temp),
9167 GEN_INT (ud1)));
9168 }
9169 else if (ud3 == 0 && ud4 == 0)
9170 {
9171 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9172
9173 gcc_assert (ud2 & 0x8000);
9174 emit_move_insn (copy_rtx (temp),
9175 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9176 if (ud1 != 0)
9177 emit_move_insn (copy_rtx (temp),
9178 gen_rtx_IOR (DImode, copy_rtx (temp),
9179 GEN_INT (ud1)));
9180 emit_move_insn (dest,
9181 gen_rtx_ZERO_EXTEND (DImode,
9182 gen_lowpart (SImode,
9183 copy_rtx (temp))));
9184 }
9185 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9186 || (ud4 == 0 && ! (ud3 & 0x8000)))
9187 {
9188 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9189
9190 emit_move_insn (copy_rtx (temp),
9191 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9192 if (ud2 != 0)
9193 emit_move_insn (copy_rtx (temp),
9194 gen_rtx_IOR (DImode, copy_rtx (temp),
9195 GEN_INT (ud2)));
9196 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9197 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9198 GEN_INT (16)));
9199 if (ud1 != 0)
9200 emit_move_insn (dest,
9201 gen_rtx_IOR (DImode, copy_rtx (temp),
9202 GEN_INT (ud1)));
9203 }
9204 else
9205 {
9206 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9207
9208 emit_move_insn (copy_rtx (temp),
9209 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9210 if (ud3 != 0)
9211 emit_move_insn (copy_rtx (temp),
9212 gen_rtx_IOR (DImode, copy_rtx (temp),
9213 GEN_INT (ud3)));
9214
9215 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9216 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9217 GEN_INT (32)));
9218 if (ud2 != 0)
9219 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9220 gen_rtx_IOR (DImode, copy_rtx (temp),
9221 GEN_INT (ud2 << 16)));
9222 if (ud1 != 0)
9223 emit_move_insn (dest,
9224 gen_rtx_IOR (DImode, copy_rtx (temp),
9225 GEN_INT (ud1)));
9226 }
9227 }
9228
9229 /* Helper for the following. Get rid of [r+r] memory refs
9230 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9231
9232 static void
9233 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9234 {
9235 if (MEM_P (operands[0])
9236 && !REG_P (XEXP (operands[0], 0))
9237 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9238 GET_MODE (operands[0]), false))
9239 operands[0]
9240 = replace_equiv_address (operands[0],
9241 copy_addr_to_reg (XEXP (operands[0], 0)));
9242
9243 if (MEM_P (operands[1])
9244 && !REG_P (XEXP (operands[1], 0))
9245 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9246 GET_MODE (operands[1]), false))
9247 operands[1]
9248 = replace_equiv_address (operands[1],
9249 copy_addr_to_reg (XEXP (operands[1], 0)));
9250 }
9251
9252 /* Generate a vector of constants to permute MODE for a little-endian
9253 storage operation by swapping the two halves of a vector. */
9254 static rtvec
9255 rs6000_const_vec (machine_mode mode)
9256 {
9257 int i, subparts;
9258 rtvec v;
9259
9260 switch (mode)
9261 {
9262 case E_V1TImode:
9263 subparts = 1;
9264 break;
9265 case E_V2DFmode:
9266 case E_V2DImode:
9267 subparts = 2;
9268 break;
9269 case E_V4SFmode:
9270 case E_V4SImode:
9271 subparts = 4;
9272 break;
9273 case E_V8HImode:
9274 subparts = 8;
9275 break;
9276 case E_V16QImode:
9277 subparts = 16;
9278 break;
9279 default:
9280 gcc_unreachable();
9281 }
9282
9283 v = rtvec_alloc (subparts);
9284
9285 for (i = 0; i < subparts / 2; ++i)
9286 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9287 for (i = subparts / 2; i < subparts; ++i)
9288 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9289
9290 return v;
9291 }
9292
9293 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9294 store operation. */
9295 void
9296 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9297 {
9298 /* Scalar permutations are easier to express in integer modes rather than
9299 floating-point modes, so cast them here. We use V1TImode instead
9300 of TImode to ensure that the values don't go through GPRs. */
9301 if (FLOAT128_VECTOR_P (mode))
9302 {
9303 dest = gen_lowpart (V1TImode, dest);
9304 source = gen_lowpart (V1TImode, source);
9305 mode = V1TImode;
9306 }
9307
9308 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9309 scalar. */
9310 if (mode == TImode || mode == V1TImode)
9311 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9312 GEN_INT (64))));
9313 else
9314 {
9315 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9316 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9317 }
9318 }
9319
9320 /* Emit a little-endian load from vector memory location SOURCE to VSX
9321 register DEST in mode MODE. The load is done with two permuting
9322 insn's that represent an lxvd2x and xxpermdi. */
9323 void
9324 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9325 {
9326 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9327 V1TImode). */
9328 if (mode == TImode || mode == V1TImode)
9329 {
9330 mode = V2DImode;
9331 dest = gen_lowpart (V2DImode, dest);
9332 source = adjust_address (source, V2DImode, 0);
9333 }
9334
9335 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9336 rs6000_emit_le_vsx_permute (tmp, source, mode);
9337 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9338 }
9339
9340 /* Emit a little-endian store to vector memory location DEST from VSX
9341 register SOURCE in mode MODE. The store is done with two permuting
9342 insn's that represent an xxpermdi and an stxvd2x. */
9343 void
9344 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9345 {
9346 /* This should never be called during or after LRA, because it does
9347 not re-permute the source register. It is intended only for use
9348 during expand. */
9349 gcc_assert (!lra_in_progress && !reload_completed);
9350
9351 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9352 V1TImode). */
9353 if (mode == TImode || mode == V1TImode)
9354 {
9355 mode = V2DImode;
9356 dest = adjust_address (dest, V2DImode, 0);
9357 source = gen_lowpart (V2DImode, source);
9358 }
9359
9360 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9361 rs6000_emit_le_vsx_permute (tmp, source, mode);
9362 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9363 }
9364
9365 /* Emit a sequence representing a little-endian VSX load or store,
9366 moving data from SOURCE to DEST in mode MODE. This is done
9367 separately from rs6000_emit_move to ensure it is called only
9368 during expand. LE VSX loads and stores introduced later are
9369 handled with a split. The expand-time RTL generation allows
9370 us to optimize away redundant pairs of register-permutes. */
9371 void
9372 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9373 {
9374 gcc_assert (!BYTES_BIG_ENDIAN
9375 && VECTOR_MEM_VSX_P (mode)
9376 && !TARGET_P9_VECTOR
9377 && !gpr_or_gpr_p (dest, source)
9378 && (MEM_P (source) ^ MEM_P (dest)));
9379
9380 if (MEM_P (source))
9381 {
9382 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9383 rs6000_emit_le_vsx_load (dest, source, mode);
9384 }
9385 else
9386 {
9387 if (!REG_P (source))
9388 source = force_reg (mode, source);
9389 rs6000_emit_le_vsx_store (dest, source, mode);
9390 }
9391 }
9392
9393 /* Return whether a SFmode or SImode move can be done without converting one
9394 mode to another. This arrises when we have:
9395
9396 (SUBREG:SF (REG:SI ...))
9397 (SUBREG:SI (REG:SF ...))
9398
9399 and one of the values is in a floating point/vector register, where SFmode
9400 scalars are stored in DFmode format. */
9401
9402 bool
9403 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9404 {
9405 if (TARGET_ALLOW_SF_SUBREG)
9406 return true;
9407
9408 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9409 return true;
9410
9411 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9412 return true;
9413
9414 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9415 if (SUBREG_P (dest))
9416 {
9417 rtx dest_subreg = SUBREG_REG (dest);
9418 rtx src_subreg = SUBREG_REG (src);
9419 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9420 }
9421
9422 return false;
9423 }
9424
9425
9426 /* Helper function to change moves with:
9427
9428 (SUBREG:SF (REG:SI)) and
9429 (SUBREG:SI (REG:SF))
9430
9431 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9432 values are stored as DFmode values in the VSX registers. We need to convert
9433 the bits before we can use a direct move or operate on the bits in the
9434 vector register as an integer type.
9435
9436 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9437
9438 static bool
9439 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9440 {
9441 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9442 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9443 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9444 {
9445 rtx inner_source = SUBREG_REG (source);
9446 machine_mode inner_mode = GET_MODE (inner_source);
9447
9448 if (mode == SImode && inner_mode == SFmode)
9449 {
9450 emit_insn (gen_movsi_from_sf (dest, inner_source));
9451 return true;
9452 }
9453
9454 if (mode == SFmode && inner_mode == SImode)
9455 {
9456 emit_insn (gen_movsf_from_si (dest, inner_source));
9457 return true;
9458 }
9459 }
9460
9461 return false;
9462 }
9463
9464 /* Emit a move from SOURCE to DEST in mode MODE. */
9465 void
9466 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9467 {
9468 rtx operands[2];
9469 operands[0] = dest;
9470 operands[1] = source;
9471
9472 if (TARGET_DEBUG_ADDR)
9473 {
9474 fprintf (stderr,
9475 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9476 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9477 GET_MODE_NAME (mode),
9478 lra_in_progress,
9479 reload_completed,
9480 can_create_pseudo_p ());
9481 debug_rtx (dest);
9482 fprintf (stderr, "source:\n");
9483 debug_rtx (source);
9484 }
9485
9486 /* Check that we get CONST_WIDE_INT only when we should. */
9487 if (CONST_WIDE_INT_P (operands[1])
9488 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9489 gcc_unreachable ();
9490
9491 #ifdef HAVE_AS_GNU_ATTRIBUTE
9492 /* If we use a long double type, set the flags in .gnu_attribute that say
9493 what the long double type is. This is to allow the linker's warning
9494 message for the wrong long double to be useful, even if the function does
9495 not do a call (for example, doing a 128-bit add on power9 if the long
9496 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9497 used if they aren't the default long dobule type. */
9498 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9499 {
9500 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9501 rs6000_passes_float = rs6000_passes_long_double = true;
9502
9503 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9504 rs6000_passes_float = rs6000_passes_long_double = true;
9505 }
9506 #endif
9507
9508 /* See if we need to special case SImode/SFmode SUBREG moves. */
9509 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9510 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9511 return;
9512
9513 /* Check if GCC is setting up a block move that will end up using FP
9514 registers as temporaries. We must make sure this is acceptable. */
9515 if (MEM_P (operands[0])
9516 && MEM_P (operands[1])
9517 && mode == DImode
9518 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9519 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9520 && ! (rs6000_slow_unaligned_access (SImode,
9521 (MEM_ALIGN (operands[0]) > 32
9522 ? 32 : MEM_ALIGN (operands[0])))
9523 || rs6000_slow_unaligned_access (SImode,
9524 (MEM_ALIGN (operands[1]) > 32
9525 ? 32 : MEM_ALIGN (operands[1]))))
9526 && ! MEM_VOLATILE_P (operands [0])
9527 && ! MEM_VOLATILE_P (operands [1]))
9528 {
9529 emit_move_insn (adjust_address (operands[0], SImode, 0),
9530 adjust_address (operands[1], SImode, 0));
9531 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9532 adjust_address (copy_rtx (operands[1]), SImode, 4));
9533 return;
9534 }
9535
9536 if (can_create_pseudo_p () && MEM_P (operands[0])
9537 && !gpc_reg_operand (operands[1], mode))
9538 operands[1] = force_reg (mode, operands[1]);
9539
9540 /* Recognize the case where operand[1] is a reference to thread-local
9541 data and load its address to a register. */
9542 if (tls_referenced_p (operands[1]))
9543 {
9544 enum tls_model model;
9545 rtx tmp = operands[1];
9546 rtx addend = NULL;
9547
9548 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9549 {
9550 addend = XEXP (XEXP (tmp, 0), 1);
9551 tmp = XEXP (XEXP (tmp, 0), 0);
9552 }
9553
9554 gcc_assert (SYMBOL_REF_P (tmp));
9555 model = SYMBOL_REF_TLS_MODEL (tmp);
9556 gcc_assert (model != 0);
9557
9558 tmp = rs6000_legitimize_tls_address (tmp, model);
9559 if (addend)
9560 {
9561 tmp = gen_rtx_PLUS (mode, tmp, addend);
9562 tmp = force_operand (tmp, operands[0]);
9563 }
9564 operands[1] = tmp;
9565 }
9566
9567 /* 128-bit constant floating-point values on Darwin should really be loaded
9568 as two parts. However, this premature splitting is a problem when DFmode
9569 values can go into Altivec registers. */
9570 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9571 && !reg_addr[DFmode].scalar_in_vmx_p)
9572 {
9573 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9574 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9575 DFmode);
9576 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9577 GET_MODE_SIZE (DFmode)),
9578 simplify_gen_subreg (DFmode, operands[1], mode,
9579 GET_MODE_SIZE (DFmode)),
9580 DFmode);
9581 return;
9582 }
9583
9584 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9585 p1:SD) if p1 is not of floating point class and p0 is spilled as
9586 we can have no analogous movsd_store for this. */
9587 if (lra_in_progress && mode == DDmode
9588 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9589 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9590 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9591 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9592 {
9593 enum reg_class cl;
9594 int regno = REGNO (SUBREG_REG (operands[1]));
9595
9596 if (!HARD_REGISTER_NUM_P (regno))
9597 {
9598 cl = reg_preferred_class (regno);
9599 regno = reg_renumber[regno];
9600 if (regno < 0)
9601 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9602 }
9603 if (regno >= 0 && ! FP_REGNO_P (regno))
9604 {
9605 mode = SDmode;
9606 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9607 operands[1] = SUBREG_REG (operands[1]);
9608 }
9609 }
9610 if (lra_in_progress
9611 && mode == SDmode
9612 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9613 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9614 && (REG_P (operands[1])
9615 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9616 {
9617 int regno = reg_or_subregno (operands[1]);
9618 enum reg_class cl;
9619
9620 if (!HARD_REGISTER_NUM_P (regno))
9621 {
9622 cl = reg_preferred_class (regno);
9623 gcc_assert (cl != NO_REGS);
9624 regno = reg_renumber[regno];
9625 if (regno < 0)
9626 regno = ira_class_hard_regs[cl][0];
9627 }
9628 if (FP_REGNO_P (regno))
9629 {
9630 if (GET_MODE (operands[0]) != DDmode)
9631 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9632 emit_insn (gen_movsd_store (operands[0], operands[1]));
9633 }
9634 else if (INT_REGNO_P (regno))
9635 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9636 else
9637 gcc_unreachable();
9638 return;
9639 }
9640 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9641 p:DD)) if p0 is not of floating point class and p1 is spilled as
9642 we can have no analogous movsd_load for this. */
9643 if (lra_in_progress && mode == DDmode
9644 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9645 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9646 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9647 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9648 {
9649 enum reg_class cl;
9650 int regno = REGNO (SUBREG_REG (operands[0]));
9651
9652 if (!HARD_REGISTER_NUM_P (regno))
9653 {
9654 cl = reg_preferred_class (regno);
9655 regno = reg_renumber[regno];
9656 if (regno < 0)
9657 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9658 }
9659 if (regno >= 0 && ! FP_REGNO_P (regno))
9660 {
9661 mode = SDmode;
9662 operands[0] = SUBREG_REG (operands[0]);
9663 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9664 }
9665 }
9666 if (lra_in_progress
9667 && mode == SDmode
9668 && (REG_P (operands[0])
9669 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9670 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9671 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9672 {
9673 int regno = reg_or_subregno (operands[0]);
9674 enum reg_class cl;
9675
9676 if (!HARD_REGISTER_NUM_P (regno))
9677 {
9678 cl = reg_preferred_class (regno);
9679 gcc_assert (cl != NO_REGS);
9680 regno = reg_renumber[regno];
9681 if (regno < 0)
9682 regno = ira_class_hard_regs[cl][0];
9683 }
9684 if (FP_REGNO_P (regno))
9685 {
9686 if (GET_MODE (operands[1]) != DDmode)
9687 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9688 emit_insn (gen_movsd_load (operands[0], operands[1]));
9689 }
9690 else if (INT_REGNO_P (regno))
9691 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9692 else
9693 gcc_unreachable();
9694 return;
9695 }
9696
9697 /* FIXME: In the long term, this switch statement should go away
9698 and be replaced by a sequence of tests based on things like
9699 mode == Pmode. */
9700 switch (mode)
9701 {
9702 case E_HImode:
9703 case E_QImode:
9704 if (CONSTANT_P (operands[1])
9705 && !CONST_INT_P (operands[1]))
9706 operands[1] = force_const_mem (mode, operands[1]);
9707 break;
9708
9709 case E_TFmode:
9710 case E_TDmode:
9711 case E_IFmode:
9712 case E_KFmode:
9713 if (FLOAT128_2REG_P (mode))
9714 rs6000_eliminate_indexed_memrefs (operands);
9715 /* fall through */
9716
9717 case E_DFmode:
9718 case E_DDmode:
9719 case E_SFmode:
9720 case E_SDmode:
9721 if (CONSTANT_P (operands[1])
9722 && ! easy_fp_constant (operands[1], mode))
9723 operands[1] = force_const_mem (mode, operands[1]);
9724 break;
9725
9726 case E_V16QImode:
9727 case E_V8HImode:
9728 case E_V4SFmode:
9729 case E_V4SImode:
9730 case E_V2DFmode:
9731 case E_V2DImode:
9732 case E_V1TImode:
9733 if (CONSTANT_P (operands[1])
9734 && !easy_vector_constant (operands[1], mode))
9735 operands[1] = force_const_mem (mode, operands[1]);
9736 break;
9737
9738 case E_SImode:
9739 case E_DImode:
9740 /* Use default pattern for address of ELF small data */
9741 if (TARGET_ELF
9742 && mode == Pmode
9743 && DEFAULT_ABI == ABI_V4
9744 && (SYMBOL_REF_P (operands[1])
9745 || GET_CODE (operands[1]) == CONST)
9746 && small_data_operand (operands[1], mode))
9747 {
9748 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9749 return;
9750 }
9751
9752 if (DEFAULT_ABI == ABI_V4
9753 && mode == Pmode && mode == SImode
9754 && flag_pic == 1 && got_operand (operands[1], mode))
9755 {
9756 emit_insn (gen_movsi_got (operands[0], operands[1]));
9757 return;
9758 }
9759
9760 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9761 && TARGET_NO_TOC
9762 && ! flag_pic
9763 && mode == Pmode
9764 && CONSTANT_P (operands[1])
9765 && GET_CODE (operands[1]) != HIGH
9766 && !CONST_INT_P (operands[1]))
9767 {
9768 rtx target = (!can_create_pseudo_p ()
9769 ? operands[0]
9770 : gen_reg_rtx (mode));
9771
9772 /* If this is a function address on -mcall-aixdesc,
9773 convert it to the address of the descriptor. */
9774 if (DEFAULT_ABI == ABI_AIX
9775 && SYMBOL_REF_P (operands[1])
9776 && XSTR (operands[1], 0)[0] == '.')
9777 {
9778 const char *name = XSTR (operands[1], 0);
9779 rtx new_ref;
9780 while (*name == '.')
9781 name++;
9782 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9783 CONSTANT_POOL_ADDRESS_P (new_ref)
9784 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9785 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9786 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9787 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9788 operands[1] = new_ref;
9789 }
9790
9791 if (DEFAULT_ABI == ABI_DARWIN)
9792 {
9793 #if TARGET_MACHO
9794 if (MACHO_DYNAMIC_NO_PIC_P)
9795 {
9796 /* Take care of any required data indirection. */
9797 operands[1] = rs6000_machopic_legitimize_pic_address (
9798 operands[1], mode, operands[0]);
9799 if (operands[0] != operands[1])
9800 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9801 return;
9802 }
9803 #endif
9804 emit_insn (gen_macho_high (target, operands[1]));
9805 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9806 return;
9807 }
9808
9809 emit_insn (gen_elf_high (target, operands[1]));
9810 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9811 return;
9812 }
9813
9814 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9815 and we have put it in the TOC, we just need to make a TOC-relative
9816 reference to it. */
9817 if (TARGET_TOC
9818 && SYMBOL_REF_P (operands[1])
9819 && use_toc_relative_ref (operands[1], mode))
9820 operands[1] = create_TOC_reference (operands[1], operands[0]);
9821 else if (mode == Pmode
9822 && CONSTANT_P (operands[1])
9823 && GET_CODE (operands[1]) != HIGH
9824 && ((REG_P (operands[0])
9825 && FP_REGNO_P (REGNO (operands[0])))
9826 || !CONST_INT_P (operands[1])
9827 || (num_insns_constant (operands[1], mode)
9828 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9829 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
9830 && (TARGET_CMODEL == CMODEL_SMALL
9831 || can_create_pseudo_p ()
9832 || (REG_P (operands[0])
9833 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9834 {
9835
9836 #if TARGET_MACHO
9837 /* Darwin uses a special PIC legitimizer. */
9838 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9839 {
9840 operands[1] =
9841 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9842 operands[0]);
9843 if (operands[0] != operands[1])
9844 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9845 return;
9846 }
9847 #endif
9848
9849 /* If we are to limit the number of things we put in the TOC and
9850 this is a symbol plus a constant we can add in one insn,
9851 just put the symbol in the TOC and add the constant. */
9852 if (GET_CODE (operands[1]) == CONST
9853 && TARGET_NO_SUM_IN_TOC
9854 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9855 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9856 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9857 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
9858 && ! side_effects_p (operands[0]))
9859 {
9860 rtx sym =
9861 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9862 rtx other = XEXP (XEXP (operands[1], 0), 1);
9863
9864 sym = force_reg (mode, sym);
9865 emit_insn (gen_add3_insn (operands[0], sym, other));
9866 return;
9867 }
9868
9869 operands[1] = force_const_mem (mode, operands[1]);
9870
9871 if (TARGET_TOC
9872 && SYMBOL_REF_P (XEXP (operands[1], 0))
9873 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
9874 {
9875 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9876 operands[0]);
9877 operands[1] = gen_const_mem (mode, tocref);
9878 set_mem_alias_set (operands[1], get_TOC_alias_set ());
9879 }
9880 }
9881 break;
9882
9883 case E_TImode:
9884 if (!VECTOR_MEM_VSX_P (TImode))
9885 rs6000_eliminate_indexed_memrefs (operands);
9886 break;
9887
9888 case E_PTImode:
9889 rs6000_eliminate_indexed_memrefs (operands);
9890 break;
9891
9892 default:
9893 fatal_insn ("bad move", gen_rtx_SET (dest, source));
9894 }
9895
9896 /* Above, we may have called force_const_mem which may have returned
9897 an invalid address. If we can, fix this up; otherwise, reload will
9898 have to deal with it. */
9899 if (MEM_P (operands[1]))
9900 operands[1] = validize_mem (operands[1]);
9901
9902 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9903 }
9904 \f
9905 /* Nonzero if we can use a floating-point register to pass this arg. */
9906 #define USE_FP_FOR_ARG_P(CUM,MODE) \
9907 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
9908 && (CUM)->fregno <= FP_ARG_MAX_REG \
9909 && TARGET_HARD_FLOAT)
9910
9911 /* Nonzero if we can use an AltiVec register to pass this arg. */
9912 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
9913 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
9914 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
9915 && TARGET_ALTIVEC_ABI \
9916 && (NAMED))
9917
9918 /* Walk down the type tree of TYPE counting consecutive base elements.
9919 If *MODEP is VOIDmode, then set it to the first valid floating point
9920 or vector type. If a non-floating point or vector type is found, or
9921 if a floating point or vector type that doesn't match a non-VOIDmode
9922 *MODEP is found, then return -1, otherwise return the count in the
9923 sub-tree. */
9924
9925 static int
9926 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
9927 {
9928 machine_mode mode;
9929 HOST_WIDE_INT size;
9930
9931 switch (TREE_CODE (type))
9932 {
9933 case REAL_TYPE:
9934 mode = TYPE_MODE (type);
9935 if (!SCALAR_FLOAT_MODE_P (mode))
9936 return -1;
9937
9938 if (*modep == VOIDmode)
9939 *modep = mode;
9940
9941 if (*modep == mode)
9942 return 1;
9943
9944 break;
9945
9946 case COMPLEX_TYPE:
9947 mode = TYPE_MODE (TREE_TYPE (type));
9948 if (!SCALAR_FLOAT_MODE_P (mode))
9949 return -1;
9950
9951 if (*modep == VOIDmode)
9952 *modep = mode;
9953
9954 if (*modep == mode)
9955 return 2;
9956
9957 break;
9958
9959 case VECTOR_TYPE:
9960 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
9961 return -1;
9962
9963 /* Use V4SImode as representative of all 128-bit vector types. */
9964 size = int_size_in_bytes (type);
9965 switch (size)
9966 {
9967 case 16:
9968 mode = V4SImode;
9969 break;
9970 default:
9971 return -1;
9972 }
9973
9974 if (*modep == VOIDmode)
9975 *modep = mode;
9976
9977 /* Vector modes are considered to be opaque: two vectors are
9978 equivalent for the purposes of being homogeneous aggregates
9979 if they are the same size. */
9980 if (*modep == mode)
9981 return 1;
9982
9983 break;
9984
9985 case ARRAY_TYPE:
9986 {
9987 int count;
9988 tree index = TYPE_DOMAIN (type);
9989
9990 /* Can't handle incomplete types nor sizes that are not
9991 fixed. */
9992 if (!COMPLETE_TYPE_P (type)
9993 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9994 return -1;
9995
9996 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
9997 if (count == -1
9998 || !index
9999 || !TYPE_MAX_VALUE (index)
10000 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10001 || !TYPE_MIN_VALUE (index)
10002 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10003 || count < 0)
10004 return -1;
10005
10006 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10007 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10008
10009 /* There must be no padding. */
10010 if (wi::to_wide (TYPE_SIZE (type))
10011 != count * GET_MODE_BITSIZE (*modep))
10012 return -1;
10013
10014 return count;
10015 }
10016
10017 case RECORD_TYPE:
10018 {
10019 int count = 0;
10020 int sub_count;
10021 tree field;
10022
10023 /* Can't handle incomplete types nor sizes that are not
10024 fixed. */
10025 if (!COMPLETE_TYPE_P (type)
10026 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10027 return -1;
10028
10029 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10030 {
10031 if (TREE_CODE (field) != FIELD_DECL)
10032 continue;
10033
10034 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10035 if (sub_count < 0)
10036 return -1;
10037 count += sub_count;
10038 }
10039
10040 /* There must be no padding. */
10041 if (wi::to_wide (TYPE_SIZE (type))
10042 != count * GET_MODE_BITSIZE (*modep))
10043 return -1;
10044
10045 return count;
10046 }
10047
10048 case UNION_TYPE:
10049 case QUAL_UNION_TYPE:
10050 {
10051 /* These aren't very interesting except in a degenerate case. */
10052 int count = 0;
10053 int sub_count;
10054 tree field;
10055
10056 /* Can't handle incomplete types nor sizes that are not
10057 fixed. */
10058 if (!COMPLETE_TYPE_P (type)
10059 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10060 return -1;
10061
10062 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10063 {
10064 if (TREE_CODE (field) != FIELD_DECL)
10065 continue;
10066
10067 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10068 if (sub_count < 0)
10069 return -1;
10070 count = count > sub_count ? count : sub_count;
10071 }
10072
10073 /* There must be no padding. */
10074 if (wi::to_wide (TYPE_SIZE (type))
10075 != count * GET_MODE_BITSIZE (*modep))
10076 return -1;
10077
10078 return count;
10079 }
10080
10081 default:
10082 break;
10083 }
10084
10085 return -1;
10086 }
10087
10088 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10089 float or vector aggregate that shall be passed in FP/vector registers
10090 according to the ELFv2 ABI, return the homogeneous element mode in
10091 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10092
10093 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10094
10095 static bool
10096 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10097 machine_mode *elt_mode,
10098 int *n_elts)
10099 {
10100 /* Note that we do not accept complex types at the top level as
10101 homogeneous aggregates; these types are handled via the
10102 targetm.calls.split_complex_arg mechanism. Complex types
10103 can be elements of homogeneous aggregates, however. */
10104 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10105 && AGGREGATE_TYPE_P (type))
10106 {
10107 machine_mode field_mode = VOIDmode;
10108 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10109
10110 if (field_count > 0)
10111 {
10112 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10113 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10114
10115 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10116 up to AGGR_ARG_NUM_REG registers. */
10117 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10118 {
10119 if (elt_mode)
10120 *elt_mode = field_mode;
10121 if (n_elts)
10122 *n_elts = field_count;
10123 return true;
10124 }
10125 }
10126 }
10127
10128 if (elt_mode)
10129 *elt_mode = mode;
10130 if (n_elts)
10131 *n_elts = 1;
10132 return false;
10133 }
10134
10135 /* Return a nonzero value to say to return the function value in
10136 memory, just as large structures are always returned. TYPE will be
10137 the data type of the value, and FNTYPE will be the type of the
10138 function doing the returning, or @code{NULL} for libcalls.
10139
10140 The AIX ABI for the RS/6000 specifies that all structures are
10141 returned in memory. The Darwin ABI does the same.
10142
10143 For the Darwin 64 Bit ABI, a function result can be returned in
10144 registers or in memory, depending on the size of the return data
10145 type. If it is returned in registers, the value occupies the same
10146 registers as it would if it were the first and only function
10147 argument. Otherwise, the function places its result in memory at
10148 the location pointed to by GPR3.
10149
10150 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10151 but a draft put them in memory, and GCC used to implement the draft
10152 instead of the final standard. Therefore, aix_struct_return
10153 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10154 compatibility can change DRAFT_V4_STRUCT_RET to override the
10155 default, and -m switches get the final word. See
10156 rs6000_option_override_internal for more details.
10157
10158 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10159 long double support is enabled. These values are returned in memory.
10160
10161 int_size_in_bytes returns -1 for variable size objects, which go in
10162 memory always. The cast to unsigned makes -1 > 8. */
10163
10164 static bool
10165 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10166 {
10167 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10168 if (TARGET_MACHO
10169 && rs6000_darwin64_abi
10170 && TREE_CODE (type) == RECORD_TYPE
10171 && int_size_in_bytes (type) > 0)
10172 {
10173 CUMULATIVE_ARGS valcum;
10174 rtx valret;
10175
10176 valcum.words = 0;
10177 valcum.fregno = FP_ARG_MIN_REG;
10178 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10179 /* Do a trial code generation as if this were going to be passed
10180 as an argument; if any part goes in memory, we return NULL. */
10181 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10182 if (valret)
10183 return false;
10184 /* Otherwise fall through to more conventional ABI rules. */
10185 }
10186
10187 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10188 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10189 NULL, NULL))
10190 return false;
10191
10192 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10193 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10194 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10195 return false;
10196
10197 if (AGGREGATE_TYPE_P (type)
10198 && (aix_struct_return
10199 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10200 return true;
10201
10202 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10203 modes only exist for GCC vector types if -maltivec. */
10204 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10205 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10206 return false;
10207
10208 /* Return synthetic vectors in memory. */
10209 if (TREE_CODE (type) == VECTOR_TYPE
10210 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10211 {
10212 static bool warned_for_return_big_vectors = false;
10213 if (!warned_for_return_big_vectors)
10214 {
10215 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10216 "non-standard ABI extension with no compatibility "
10217 "guarantee");
10218 warned_for_return_big_vectors = true;
10219 }
10220 return true;
10221 }
10222
10223 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10224 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10225 return true;
10226
10227 return false;
10228 }
10229
10230 /* Specify whether values returned in registers should be at the most
10231 significant end of a register. We want aggregates returned by
10232 value to match the way aggregates are passed to functions. */
10233
10234 static bool
10235 rs6000_return_in_msb (const_tree valtype)
10236 {
10237 return (DEFAULT_ABI == ABI_ELFv2
10238 && BYTES_BIG_ENDIAN
10239 && AGGREGATE_TYPE_P (valtype)
10240 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10241 == PAD_UPWARD));
10242 }
10243
10244 #ifdef HAVE_AS_GNU_ATTRIBUTE
10245 /* Return TRUE if a call to function FNDECL may be one that
10246 potentially affects the function calling ABI of the object file. */
10247
10248 static bool
10249 call_ABI_of_interest (tree fndecl)
10250 {
10251 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10252 {
10253 struct cgraph_node *c_node;
10254
10255 /* Libcalls are always interesting. */
10256 if (fndecl == NULL_TREE)
10257 return true;
10258
10259 /* Any call to an external function is interesting. */
10260 if (DECL_EXTERNAL (fndecl))
10261 return true;
10262
10263 /* Interesting functions that we are emitting in this object file. */
10264 c_node = cgraph_node::get (fndecl);
10265 c_node = c_node->ultimate_alias_target ();
10266 return !c_node->only_called_directly_p ();
10267 }
10268 return false;
10269 }
10270 #endif
10271
10272 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10273 for a call to a function whose data type is FNTYPE.
10274 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10275
10276 For incoming args we set the number of arguments in the prototype large
10277 so we never return a PARALLEL. */
10278
10279 void
10280 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10281 rtx libname ATTRIBUTE_UNUSED, int incoming,
10282 int libcall, int n_named_args,
10283 tree fndecl,
10284 machine_mode return_mode ATTRIBUTE_UNUSED)
10285 {
10286 static CUMULATIVE_ARGS zero_cumulative;
10287
10288 *cum = zero_cumulative;
10289 cum->words = 0;
10290 cum->fregno = FP_ARG_MIN_REG;
10291 cum->vregno = ALTIVEC_ARG_MIN_REG;
10292 cum->prototype = (fntype && prototype_p (fntype));
10293 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10294 ? CALL_LIBCALL : CALL_NORMAL);
10295 cum->sysv_gregno = GP_ARG_MIN_REG;
10296 cum->stdarg = stdarg_p (fntype);
10297 cum->libcall = libcall;
10298
10299 cum->nargs_prototype = 0;
10300 if (incoming || cum->prototype)
10301 cum->nargs_prototype = n_named_args;
10302
10303 /* Check for a longcall attribute. */
10304 if ((!fntype && rs6000_default_long_calls)
10305 || (fntype
10306 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10307 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10308 cum->call_cookie |= CALL_LONG;
10309 else if (DEFAULT_ABI != ABI_DARWIN)
10310 {
10311 bool is_local = (fndecl
10312 && !DECL_EXTERNAL (fndecl)
10313 && !DECL_WEAK (fndecl)
10314 && (*targetm.binds_local_p) (fndecl));
10315 if (is_local)
10316 ;
10317 else if (flag_plt)
10318 {
10319 if (fntype
10320 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10321 cum->call_cookie |= CALL_LONG;
10322 }
10323 else
10324 {
10325 if (!(fntype
10326 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10327 cum->call_cookie |= CALL_LONG;
10328 }
10329 }
10330
10331 if (TARGET_DEBUG_ARG)
10332 {
10333 fprintf (stderr, "\ninit_cumulative_args:");
10334 if (fntype)
10335 {
10336 tree ret_type = TREE_TYPE (fntype);
10337 fprintf (stderr, " ret code = %s,",
10338 get_tree_code_name (TREE_CODE (ret_type)));
10339 }
10340
10341 if (cum->call_cookie & CALL_LONG)
10342 fprintf (stderr, " longcall,");
10343
10344 fprintf (stderr, " proto = %d, nargs = %d\n",
10345 cum->prototype, cum->nargs_prototype);
10346 }
10347
10348 #ifdef HAVE_AS_GNU_ATTRIBUTE
10349 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10350 {
10351 cum->escapes = call_ABI_of_interest (fndecl);
10352 if (cum->escapes)
10353 {
10354 tree return_type;
10355
10356 if (fntype)
10357 {
10358 return_type = TREE_TYPE (fntype);
10359 return_mode = TYPE_MODE (return_type);
10360 }
10361 else
10362 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10363
10364 if (return_type != NULL)
10365 {
10366 if (TREE_CODE (return_type) == RECORD_TYPE
10367 && TYPE_TRANSPARENT_AGGR (return_type))
10368 {
10369 return_type = TREE_TYPE (first_field (return_type));
10370 return_mode = TYPE_MODE (return_type);
10371 }
10372 if (AGGREGATE_TYPE_P (return_type)
10373 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10374 <= 8))
10375 rs6000_returns_struct = true;
10376 }
10377 if (SCALAR_FLOAT_MODE_P (return_mode))
10378 {
10379 rs6000_passes_float = true;
10380 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10381 && (FLOAT128_IBM_P (return_mode)
10382 || FLOAT128_IEEE_P (return_mode)
10383 || (return_type != NULL
10384 && (TYPE_MAIN_VARIANT (return_type)
10385 == long_double_type_node))))
10386 rs6000_passes_long_double = true;
10387
10388 /* Note if we passed or return a IEEE 128-bit type. We changed
10389 the mangling for these types, and we may need to make an alias
10390 with the old mangling. */
10391 if (FLOAT128_IEEE_P (return_mode))
10392 rs6000_passes_ieee128 = true;
10393 }
10394 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10395 rs6000_passes_vector = true;
10396 }
10397 }
10398 #endif
10399
10400 if (fntype
10401 && !TARGET_ALTIVEC
10402 && TARGET_ALTIVEC_ABI
10403 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10404 {
10405 error ("cannot return value in vector register because"
10406 " altivec instructions are disabled, use %qs"
10407 " to enable them", "-maltivec");
10408 }
10409 }
10410 \f
10411 /* The mode the ABI uses for a word. This is not the same as word_mode
10412 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10413
10414 static scalar_int_mode
10415 rs6000_abi_word_mode (void)
10416 {
10417 return TARGET_32BIT ? SImode : DImode;
10418 }
10419
10420 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10421 static char *
10422 rs6000_offload_options (void)
10423 {
10424 if (TARGET_64BIT)
10425 return xstrdup ("-foffload-abi=lp64");
10426 else
10427 return xstrdup ("-foffload-abi=ilp32");
10428 }
10429
10430 /* On rs6000, function arguments are promoted, as are function return
10431 values. */
10432
10433 static machine_mode
10434 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10435 machine_mode mode,
10436 int *punsignedp ATTRIBUTE_UNUSED,
10437 const_tree, int)
10438 {
10439 PROMOTE_MODE (mode, *punsignedp, type);
10440
10441 return mode;
10442 }
10443
10444 /* Return true if TYPE must be passed on the stack and not in registers. */
10445
10446 static bool
10447 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10448 {
10449 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10450 return must_pass_in_stack_var_size (mode, type);
10451 else
10452 return must_pass_in_stack_var_size_or_pad (mode, type);
10453 }
10454
10455 static inline bool
10456 is_complex_IBM_long_double (machine_mode mode)
10457 {
10458 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10459 }
10460
10461 /* Whether ABI_V4 passes MODE args to a function in floating point
10462 registers. */
10463
10464 static bool
10465 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10466 {
10467 if (!TARGET_HARD_FLOAT)
10468 return false;
10469 if (mode == DFmode)
10470 return true;
10471 if (mode == SFmode && named)
10472 return true;
10473 /* ABI_V4 passes complex IBM long double in 8 gprs.
10474 Stupid, but we can't change the ABI now. */
10475 if (is_complex_IBM_long_double (mode))
10476 return false;
10477 if (FLOAT128_2REG_P (mode))
10478 return true;
10479 if (DECIMAL_FLOAT_MODE_P (mode))
10480 return true;
10481 return false;
10482 }
10483
10484 /* Implement TARGET_FUNCTION_ARG_PADDING.
10485
10486 For the AIX ABI structs are always stored left shifted in their
10487 argument slot. */
10488
10489 static pad_direction
10490 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10491 {
10492 #ifndef AGGREGATE_PADDING_FIXED
10493 #define AGGREGATE_PADDING_FIXED 0
10494 #endif
10495 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10496 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10497 #endif
10498
10499 if (!AGGREGATE_PADDING_FIXED)
10500 {
10501 /* GCC used to pass structures of the same size as integer types as
10502 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10503 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10504 passed padded downward, except that -mstrict-align further
10505 muddied the water in that multi-component structures of 2 and 4
10506 bytes in size were passed padded upward.
10507
10508 The following arranges for best compatibility with previous
10509 versions of gcc, but removes the -mstrict-align dependency. */
10510 if (BYTES_BIG_ENDIAN)
10511 {
10512 HOST_WIDE_INT size = 0;
10513
10514 if (mode == BLKmode)
10515 {
10516 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10517 size = int_size_in_bytes (type);
10518 }
10519 else
10520 size = GET_MODE_SIZE (mode);
10521
10522 if (size == 1 || size == 2 || size == 4)
10523 return PAD_DOWNWARD;
10524 }
10525 return PAD_UPWARD;
10526 }
10527
10528 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10529 {
10530 if (type != 0 && AGGREGATE_TYPE_P (type))
10531 return PAD_UPWARD;
10532 }
10533
10534 /* Fall back to the default. */
10535 return default_function_arg_padding (mode, type);
10536 }
10537
10538 /* If defined, a C expression that gives the alignment boundary, in bits,
10539 of an argument with the specified mode and type. If it is not defined,
10540 PARM_BOUNDARY is used for all arguments.
10541
10542 V.4 wants long longs and doubles to be double word aligned. Just
10543 testing the mode size is a boneheaded way to do this as it means
10544 that other types such as complex int are also double word aligned.
10545 However, we're stuck with this because changing the ABI might break
10546 existing library interfaces.
10547
10548 Quadword align Altivec/VSX vectors.
10549 Quadword align large synthetic vector types. */
10550
10551 static unsigned int
10552 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10553 {
10554 machine_mode elt_mode;
10555 int n_elts;
10556
10557 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10558
10559 if (DEFAULT_ABI == ABI_V4
10560 && (GET_MODE_SIZE (mode) == 8
10561 || (TARGET_HARD_FLOAT
10562 && !is_complex_IBM_long_double (mode)
10563 && FLOAT128_2REG_P (mode))))
10564 return 64;
10565 else if (FLOAT128_VECTOR_P (mode))
10566 return 128;
10567 else if (type && TREE_CODE (type) == VECTOR_TYPE
10568 && int_size_in_bytes (type) >= 8
10569 && int_size_in_bytes (type) < 16)
10570 return 64;
10571 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10572 || (type && TREE_CODE (type) == VECTOR_TYPE
10573 && int_size_in_bytes (type) >= 16))
10574 return 128;
10575
10576 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10577 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10578 -mcompat-align-parm is used. */
10579 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10580 || DEFAULT_ABI == ABI_ELFv2)
10581 && type && TYPE_ALIGN (type) > 64)
10582 {
10583 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10584 or homogeneous float/vector aggregates here. We already handled
10585 vector aggregates above, but still need to check for float here. */
10586 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10587 && !SCALAR_FLOAT_MODE_P (elt_mode));
10588
10589 /* We used to check for BLKmode instead of the above aggregate type
10590 check. Warn when this results in any difference to the ABI. */
10591 if (aggregate_p != (mode == BLKmode))
10592 {
10593 static bool warned;
10594 if (!warned && warn_psabi)
10595 {
10596 warned = true;
10597 inform (input_location,
10598 "the ABI of passing aggregates with %d-byte alignment"
10599 " has changed in GCC 5",
10600 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10601 }
10602 }
10603
10604 if (aggregate_p)
10605 return 128;
10606 }
10607
10608 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10609 implement the "aggregate type" check as a BLKmode check here; this
10610 means certain aggregate types are in fact not aligned. */
10611 if (TARGET_MACHO && rs6000_darwin64_abi
10612 && mode == BLKmode
10613 && type && TYPE_ALIGN (type) > 64)
10614 return 128;
10615
10616 return PARM_BOUNDARY;
10617 }
10618
10619 /* The offset in words to the start of the parameter save area. */
10620
10621 static unsigned int
10622 rs6000_parm_offset (void)
10623 {
10624 return (DEFAULT_ABI == ABI_V4 ? 2
10625 : DEFAULT_ABI == ABI_ELFv2 ? 4
10626 : 6);
10627 }
10628
10629 /* For a function parm of MODE and TYPE, return the starting word in
10630 the parameter area. NWORDS of the parameter area are already used. */
10631
10632 static unsigned int
10633 rs6000_parm_start (machine_mode mode, const_tree type,
10634 unsigned int nwords)
10635 {
10636 unsigned int align;
10637
10638 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10639 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10640 }
10641
10642 /* Compute the size (in words) of a function argument. */
10643
10644 static unsigned long
10645 rs6000_arg_size (machine_mode mode, const_tree type)
10646 {
10647 unsigned long size;
10648
10649 if (mode != BLKmode)
10650 size = GET_MODE_SIZE (mode);
10651 else
10652 size = int_size_in_bytes (type);
10653
10654 if (TARGET_32BIT)
10655 return (size + 3) >> 2;
10656 else
10657 return (size + 7) >> 3;
10658 }
10659 \f
10660 /* Use this to flush pending int fields. */
10661
10662 static void
10663 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10664 HOST_WIDE_INT bitpos, int final)
10665 {
10666 unsigned int startbit, endbit;
10667 int intregs, intoffset;
10668
10669 /* Handle the situations where a float is taking up the first half
10670 of the GPR, and the other half is empty (typically due to
10671 alignment restrictions). We can detect this by a 8-byte-aligned
10672 int field, or by seeing that this is the final flush for this
10673 argument. Count the word and continue on. */
10674 if (cum->floats_in_gpr == 1
10675 && (cum->intoffset % 64 == 0
10676 || (cum->intoffset == -1 && final)))
10677 {
10678 cum->words++;
10679 cum->floats_in_gpr = 0;
10680 }
10681
10682 if (cum->intoffset == -1)
10683 return;
10684
10685 intoffset = cum->intoffset;
10686 cum->intoffset = -1;
10687 cum->floats_in_gpr = 0;
10688
10689 if (intoffset % BITS_PER_WORD != 0)
10690 {
10691 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10692 if (!int_mode_for_size (bits, 0).exists ())
10693 {
10694 /* We couldn't find an appropriate mode, which happens,
10695 e.g., in packed structs when there are 3 bytes to load.
10696 Back intoffset back to the beginning of the word in this
10697 case. */
10698 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10699 }
10700 }
10701
10702 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10703 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10704 intregs = (endbit - startbit) / BITS_PER_WORD;
10705 cum->words += intregs;
10706 /* words should be unsigned. */
10707 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10708 {
10709 int pad = (endbit/BITS_PER_WORD) - cum->words;
10710 cum->words += pad;
10711 }
10712 }
10713
10714 /* The darwin64 ABI calls for us to recurse down through structs,
10715 looking for elements passed in registers. Unfortunately, we have
10716 to track int register count here also because of misalignments
10717 in powerpc alignment mode. */
10718
10719 static void
10720 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10721 const_tree type,
10722 HOST_WIDE_INT startbitpos)
10723 {
10724 tree f;
10725
10726 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10727 if (TREE_CODE (f) == FIELD_DECL)
10728 {
10729 HOST_WIDE_INT bitpos = startbitpos;
10730 tree ftype = TREE_TYPE (f);
10731 machine_mode mode;
10732 if (ftype == error_mark_node)
10733 continue;
10734 mode = TYPE_MODE (ftype);
10735
10736 if (DECL_SIZE (f) != 0
10737 && tree_fits_uhwi_p (bit_position (f)))
10738 bitpos += int_bit_position (f);
10739
10740 /* ??? FIXME: else assume zero offset. */
10741
10742 if (TREE_CODE (ftype) == RECORD_TYPE)
10743 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10744 else if (USE_FP_FOR_ARG_P (cum, mode))
10745 {
10746 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10747 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10748 cum->fregno += n_fpregs;
10749 /* Single-precision floats present a special problem for
10750 us, because they are smaller than an 8-byte GPR, and so
10751 the structure-packing rules combined with the standard
10752 varargs behavior mean that we want to pack float/float
10753 and float/int combinations into a single register's
10754 space. This is complicated by the arg advance flushing,
10755 which works on arbitrarily large groups of int-type
10756 fields. */
10757 if (mode == SFmode)
10758 {
10759 if (cum->floats_in_gpr == 1)
10760 {
10761 /* Two floats in a word; count the word and reset
10762 the float count. */
10763 cum->words++;
10764 cum->floats_in_gpr = 0;
10765 }
10766 else if (bitpos % 64 == 0)
10767 {
10768 /* A float at the beginning of an 8-byte word;
10769 count it and put off adjusting cum->words until
10770 we see if a arg advance flush is going to do it
10771 for us. */
10772 cum->floats_in_gpr++;
10773 }
10774 else
10775 {
10776 /* The float is at the end of a word, preceded
10777 by integer fields, so the arg advance flush
10778 just above has already set cum->words and
10779 everything is taken care of. */
10780 }
10781 }
10782 else
10783 cum->words += n_fpregs;
10784 }
10785 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10786 {
10787 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10788 cum->vregno++;
10789 cum->words += 2;
10790 }
10791 else if (cum->intoffset == -1)
10792 cum->intoffset = bitpos;
10793 }
10794 }
10795
10796 /* Check for an item that needs to be considered specially under the darwin 64
10797 bit ABI. These are record types where the mode is BLK or the structure is
10798 8 bytes in size. */
10799 static int
10800 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10801 {
10802 return rs6000_darwin64_abi
10803 && ((mode == BLKmode
10804 && TREE_CODE (type) == RECORD_TYPE
10805 && int_size_in_bytes (type) > 0)
10806 || (type && TREE_CODE (type) == RECORD_TYPE
10807 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10808 }
10809
10810 /* Update the data in CUM to advance over an argument
10811 of mode MODE and data type TYPE.
10812 (TYPE is null for libcalls where that information may not be available.)
10813
10814 Note that for args passed by reference, function_arg will be called
10815 with MODE and TYPE set to that of the pointer to the arg, not the arg
10816 itself. */
10817
10818 static void
10819 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10820 const_tree type, bool named, int depth)
10821 {
10822 machine_mode elt_mode;
10823 int n_elts;
10824
10825 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10826
10827 /* Only tick off an argument if we're not recursing. */
10828 if (depth == 0)
10829 cum->nargs_prototype--;
10830
10831 #ifdef HAVE_AS_GNU_ATTRIBUTE
10832 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
10833 && cum->escapes)
10834 {
10835 if (SCALAR_FLOAT_MODE_P (mode))
10836 {
10837 rs6000_passes_float = true;
10838 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10839 && (FLOAT128_IBM_P (mode)
10840 || FLOAT128_IEEE_P (mode)
10841 || (type != NULL
10842 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
10843 rs6000_passes_long_double = true;
10844
10845 /* Note if we passed or return a IEEE 128-bit type. We changed the
10846 mangling for these types, and we may need to make an alias with
10847 the old mangling. */
10848 if (FLOAT128_IEEE_P (mode))
10849 rs6000_passes_ieee128 = true;
10850 }
10851 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10852 rs6000_passes_vector = true;
10853 }
10854 #endif
10855
10856 if (TARGET_ALTIVEC_ABI
10857 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10858 || (type && TREE_CODE (type) == VECTOR_TYPE
10859 && int_size_in_bytes (type) == 16)))
10860 {
10861 bool stack = false;
10862
10863 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10864 {
10865 cum->vregno += n_elts;
10866
10867 if (!TARGET_ALTIVEC)
10868 error ("cannot pass argument in vector register because"
10869 " altivec instructions are disabled, use %qs"
10870 " to enable them", "-maltivec");
10871
10872 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10873 even if it is going to be passed in a vector register.
10874 Darwin does the same for variable-argument functions. */
10875 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10876 && TARGET_64BIT)
10877 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10878 stack = true;
10879 }
10880 else
10881 stack = true;
10882
10883 if (stack)
10884 {
10885 int align;
10886
10887 /* Vector parameters must be 16-byte aligned. In 32-bit
10888 mode this means we need to take into account the offset
10889 to the parameter save area. In 64-bit mode, they just
10890 have to start on an even word, since the parameter save
10891 area is 16-byte aligned. */
10892 if (TARGET_32BIT)
10893 align = -(rs6000_parm_offset () + cum->words) & 3;
10894 else
10895 align = cum->words & 1;
10896 cum->words += align + rs6000_arg_size (mode, type);
10897
10898 if (TARGET_DEBUG_ARG)
10899 {
10900 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
10901 cum->words, align);
10902 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
10903 cum->nargs_prototype, cum->prototype,
10904 GET_MODE_NAME (mode));
10905 }
10906 }
10907 }
10908 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10909 {
10910 int size = int_size_in_bytes (type);
10911 /* Variable sized types have size == -1 and are
10912 treated as if consisting entirely of ints.
10913 Pad to 16 byte boundary if needed. */
10914 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10915 && (cum->words % 2) != 0)
10916 cum->words++;
10917 /* For varargs, we can just go up by the size of the struct. */
10918 if (!named)
10919 cum->words += (size + 7) / 8;
10920 else
10921 {
10922 /* It is tempting to say int register count just goes up by
10923 sizeof(type)/8, but this is wrong in a case such as
10924 { int; double; int; } [powerpc alignment]. We have to
10925 grovel through the fields for these too. */
10926 cum->intoffset = 0;
10927 cum->floats_in_gpr = 0;
10928 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
10929 rs6000_darwin64_record_arg_advance_flush (cum,
10930 size * BITS_PER_UNIT, 1);
10931 }
10932 if (TARGET_DEBUG_ARG)
10933 {
10934 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
10935 cum->words, TYPE_ALIGN (type), size);
10936 fprintf (stderr,
10937 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
10938 cum->nargs_prototype, cum->prototype,
10939 GET_MODE_NAME (mode));
10940 }
10941 }
10942 else if (DEFAULT_ABI == ABI_V4)
10943 {
10944 if (abi_v4_pass_in_fpr (mode, named))
10945 {
10946 /* _Decimal128 must use an even/odd register pair. This assumes
10947 that the register number is odd when fregno is odd. */
10948 if (mode == TDmode && (cum->fregno % 2) == 1)
10949 cum->fregno++;
10950
10951 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
10952 <= FP_ARG_V4_MAX_REG)
10953 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
10954 else
10955 {
10956 cum->fregno = FP_ARG_V4_MAX_REG + 1;
10957 if (mode == DFmode || FLOAT128_IBM_P (mode)
10958 || mode == DDmode || mode == TDmode)
10959 cum->words += cum->words & 1;
10960 cum->words += rs6000_arg_size (mode, type);
10961 }
10962 }
10963 else
10964 {
10965 int n_words = rs6000_arg_size (mode, type);
10966 int gregno = cum->sysv_gregno;
10967
10968 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
10969 As does any other 2 word item such as complex int due to a
10970 historical mistake. */
10971 if (n_words == 2)
10972 gregno += (1 - gregno) & 1;
10973
10974 /* Multi-reg args are not split between registers and stack. */
10975 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10976 {
10977 /* Long long is aligned on the stack. So are other 2 word
10978 items such as complex int due to a historical mistake. */
10979 if (n_words == 2)
10980 cum->words += cum->words & 1;
10981 cum->words += n_words;
10982 }
10983
10984 /* Note: continuing to accumulate gregno past when we've started
10985 spilling to the stack indicates the fact that we've started
10986 spilling to the stack to expand_builtin_saveregs. */
10987 cum->sysv_gregno = gregno + n_words;
10988 }
10989
10990 if (TARGET_DEBUG_ARG)
10991 {
10992 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
10993 cum->words, cum->fregno);
10994 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
10995 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
10996 fprintf (stderr, "mode = %4s, named = %d\n",
10997 GET_MODE_NAME (mode), named);
10998 }
10999 }
11000 else
11001 {
11002 int n_words = rs6000_arg_size (mode, type);
11003 int start_words = cum->words;
11004 int align_words = rs6000_parm_start (mode, type, start_words);
11005
11006 cum->words = align_words + n_words;
11007
11008 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11009 {
11010 /* _Decimal128 must be passed in an even/odd float register pair.
11011 This assumes that the register number is odd when fregno is
11012 odd. */
11013 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11014 cum->fregno++;
11015 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11016 }
11017
11018 if (TARGET_DEBUG_ARG)
11019 {
11020 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11021 cum->words, cum->fregno);
11022 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11023 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11024 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11025 named, align_words - start_words, depth);
11026 }
11027 }
11028 }
11029
11030 static void
11031 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11032 const_tree type, bool named)
11033 {
11034 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11035 0);
11036 }
11037
11038 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11039 structure between cum->intoffset and bitpos to integer registers. */
11040
11041 static void
11042 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11043 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11044 {
11045 machine_mode mode;
11046 unsigned int regno;
11047 unsigned int startbit, endbit;
11048 int this_regno, intregs, intoffset;
11049 rtx reg;
11050
11051 if (cum->intoffset == -1)
11052 return;
11053
11054 intoffset = cum->intoffset;
11055 cum->intoffset = -1;
11056
11057 /* If this is the trailing part of a word, try to only load that
11058 much into the register. Otherwise load the whole register. Note
11059 that in the latter case we may pick up unwanted bits. It's not a
11060 problem at the moment but may wish to revisit. */
11061
11062 if (intoffset % BITS_PER_WORD != 0)
11063 {
11064 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11065 if (!int_mode_for_size (bits, 0).exists (&mode))
11066 {
11067 /* We couldn't find an appropriate mode, which happens,
11068 e.g., in packed structs when there are 3 bytes to load.
11069 Back intoffset back to the beginning of the word in this
11070 case. */
11071 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11072 mode = word_mode;
11073 }
11074 }
11075 else
11076 mode = word_mode;
11077
11078 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11079 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11080 intregs = (endbit - startbit) / BITS_PER_WORD;
11081 this_regno = cum->words + intoffset / BITS_PER_WORD;
11082
11083 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11084 cum->use_stack = 1;
11085
11086 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11087 if (intregs <= 0)
11088 return;
11089
11090 intoffset /= BITS_PER_UNIT;
11091 do
11092 {
11093 regno = GP_ARG_MIN_REG + this_regno;
11094 reg = gen_rtx_REG (mode, regno);
11095 rvec[(*k)++] =
11096 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11097
11098 this_regno += 1;
11099 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11100 mode = word_mode;
11101 intregs -= 1;
11102 }
11103 while (intregs > 0);
11104 }
11105
11106 /* Recursive workhorse for the following. */
11107
11108 static void
11109 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11110 HOST_WIDE_INT startbitpos, rtx rvec[],
11111 int *k)
11112 {
11113 tree f;
11114
11115 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11116 if (TREE_CODE (f) == FIELD_DECL)
11117 {
11118 HOST_WIDE_INT bitpos = startbitpos;
11119 tree ftype = TREE_TYPE (f);
11120 machine_mode mode;
11121 if (ftype == error_mark_node)
11122 continue;
11123 mode = TYPE_MODE (ftype);
11124
11125 if (DECL_SIZE (f) != 0
11126 && tree_fits_uhwi_p (bit_position (f)))
11127 bitpos += int_bit_position (f);
11128
11129 /* ??? FIXME: else assume zero offset. */
11130
11131 if (TREE_CODE (ftype) == RECORD_TYPE)
11132 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11133 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11134 {
11135 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11136 #if 0
11137 switch (mode)
11138 {
11139 case E_SCmode: mode = SFmode; break;
11140 case E_DCmode: mode = DFmode; break;
11141 case E_TCmode: mode = TFmode; break;
11142 default: break;
11143 }
11144 #endif
11145 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11146 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11147 {
11148 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11149 && (mode == TFmode || mode == TDmode));
11150 /* Long double or _Decimal128 split over regs and memory. */
11151 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11152 cum->use_stack=1;
11153 }
11154 rvec[(*k)++]
11155 = gen_rtx_EXPR_LIST (VOIDmode,
11156 gen_rtx_REG (mode, cum->fregno++),
11157 GEN_INT (bitpos / BITS_PER_UNIT));
11158 if (FLOAT128_2REG_P (mode))
11159 cum->fregno++;
11160 }
11161 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11162 {
11163 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11164 rvec[(*k)++]
11165 = gen_rtx_EXPR_LIST (VOIDmode,
11166 gen_rtx_REG (mode, cum->vregno++),
11167 GEN_INT (bitpos / BITS_PER_UNIT));
11168 }
11169 else if (cum->intoffset == -1)
11170 cum->intoffset = bitpos;
11171 }
11172 }
11173
11174 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11175 the register(s) to be used for each field and subfield of a struct
11176 being passed by value, along with the offset of where the
11177 register's value may be found in the block. FP fields go in FP
11178 register, vector fields go in vector registers, and everything
11179 else goes in int registers, packed as in memory.
11180
11181 This code is also used for function return values. RETVAL indicates
11182 whether this is the case.
11183
11184 Much of this is taken from the SPARC V9 port, which has a similar
11185 calling convention. */
11186
11187 static rtx
11188 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11189 bool named, bool retval)
11190 {
11191 rtx rvec[FIRST_PSEUDO_REGISTER];
11192 int k = 1, kbase = 1;
11193 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11194 /* This is a copy; modifications are not visible to our caller. */
11195 CUMULATIVE_ARGS copy_cum = *orig_cum;
11196 CUMULATIVE_ARGS *cum = &copy_cum;
11197
11198 /* Pad to 16 byte boundary if needed. */
11199 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11200 && (cum->words % 2) != 0)
11201 cum->words++;
11202
11203 cum->intoffset = 0;
11204 cum->use_stack = 0;
11205 cum->named = named;
11206
11207 /* Put entries into rvec[] for individual FP and vector fields, and
11208 for the chunks of memory that go in int regs. Note we start at
11209 element 1; 0 is reserved for an indication of using memory, and
11210 may or may not be filled in below. */
11211 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11212 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11213
11214 /* If any part of the struct went on the stack put all of it there.
11215 This hack is because the generic code for
11216 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11217 parts of the struct are not at the beginning. */
11218 if (cum->use_stack)
11219 {
11220 if (retval)
11221 return NULL_RTX; /* doesn't go in registers at all */
11222 kbase = 0;
11223 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11224 }
11225 if (k > 1 || cum->use_stack)
11226 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11227 else
11228 return NULL_RTX;
11229 }
11230
11231 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11232
11233 static rtx
11234 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11235 int align_words)
11236 {
11237 int n_units;
11238 int i, k;
11239 rtx rvec[GP_ARG_NUM_REG + 1];
11240
11241 if (align_words >= GP_ARG_NUM_REG)
11242 return NULL_RTX;
11243
11244 n_units = rs6000_arg_size (mode, type);
11245
11246 /* Optimize the simple case where the arg fits in one gpr, except in
11247 the case of BLKmode due to assign_parms assuming that registers are
11248 BITS_PER_WORD wide. */
11249 if (n_units == 0
11250 || (n_units == 1 && mode != BLKmode))
11251 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11252
11253 k = 0;
11254 if (align_words + n_units > GP_ARG_NUM_REG)
11255 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11256 using a magic NULL_RTX component.
11257 This is not strictly correct. Only some of the arg belongs in
11258 memory, not all of it. However, the normal scheme using
11259 function_arg_partial_nregs can result in unusual subregs, eg.
11260 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11261 store the whole arg to memory is often more efficient than code
11262 to store pieces, and we know that space is available in the right
11263 place for the whole arg. */
11264 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11265
11266 i = 0;
11267 do
11268 {
11269 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11270 rtx off = GEN_INT (i++ * 4);
11271 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11272 }
11273 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11274
11275 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11276 }
11277
11278 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11279 but must also be copied into the parameter save area starting at
11280 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11281 to the GPRs and/or memory. Return the number of elements used. */
11282
11283 static int
11284 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11285 int align_words, rtx *rvec)
11286 {
11287 int k = 0;
11288
11289 if (align_words < GP_ARG_NUM_REG)
11290 {
11291 int n_words = rs6000_arg_size (mode, type);
11292
11293 if (align_words + n_words > GP_ARG_NUM_REG
11294 || mode == BLKmode
11295 || (TARGET_32BIT && TARGET_POWERPC64))
11296 {
11297 /* If this is partially on the stack, then we only
11298 include the portion actually in registers here. */
11299 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11300 int i = 0;
11301
11302 if (align_words + n_words > GP_ARG_NUM_REG)
11303 {
11304 /* Not all of the arg fits in gprs. Say that it goes in memory
11305 too, using a magic NULL_RTX component. Also see comment in
11306 rs6000_mixed_function_arg for why the normal
11307 function_arg_partial_nregs scheme doesn't work in this case. */
11308 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11309 }
11310
11311 do
11312 {
11313 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11314 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11315 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11316 }
11317 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11318 }
11319 else
11320 {
11321 /* The whole arg fits in gprs. */
11322 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11323 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11324 }
11325 }
11326 else
11327 {
11328 /* It's entirely in memory. */
11329 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11330 }
11331
11332 return k;
11333 }
11334
11335 /* RVEC is a vector of K components of an argument of mode MODE.
11336 Construct the final function_arg return value from it. */
11337
11338 static rtx
11339 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11340 {
11341 gcc_assert (k >= 1);
11342
11343 /* Avoid returning a PARALLEL in the trivial cases. */
11344 if (k == 1)
11345 {
11346 if (XEXP (rvec[0], 0) == NULL_RTX)
11347 return NULL_RTX;
11348
11349 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11350 return XEXP (rvec[0], 0);
11351 }
11352
11353 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11354 }
11355
11356 /* Determine where to put an argument to a function.
11357 Value is zero to push the argument on the stack,
11358 or a hard register in which to store the argument.
11359
11360 MODE is the argument's machine mode.
11361 TYPE is the data type of the argument (as a tree).
11362 This is null for libcalls where that information may
11363 not be available.
11364 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11365 the preceding args and about the function being called. It is
11366 not modified in this routine.
11367 NAMED is nonzero if this argument is a named parameter
11368 (otherwise it is an extra parameter matching an ellipsis).
11369
11370 On RS/6000 the first eight words of non-FP are normally in registers
11371 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11372 Under V.4, the first 8 FP args are in registers.
11373
11374 If this is floating-point and no prototype is specified, we use
11375 both an FP and integer register (or possibly FP reg and stack). Library
11376 functions (when CALL_LIBCALL is set) always have the proper types for args,
11377 so we can pass the FP value just in one register. emit_library_function
11378 doesn't support PARALLEL anyway.
11379
11380 Note that for args passed by reference, function_arg will be called
11381 with MODE and TYPE set to that of the pointer to the arg, not the arg
11382 itself. */
11383
11384 static rtx
11385 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11386 const_tree type, bool named)
11387 {
11388 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11389 enum rs6000_abi abi = DEFAULT_ABI;
11390 machine_mode elt_mode;
11391 int n_elts;
11392
11393 /* Return a marker to indicate whether CR1 needs to set or clear the
11394 bit that V.4 uses to say fp args were passed in registers.
11395 Assume that we don't need the marker for software floating point,
11396 or compiler generated library calls. */
11397 if (mode == VOIDmode)
11398 {
11399 if (abi == ABI_V4
11400 && (cum->call_cookie & CALL_LIBCALL) == 0
11401 && (cum->stdarg
11402 || (cum->nargs_prototype < 0
11403 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11404 && TARGET_HARD_FLOAT)
11405 return GEN_INT (cum->call_cookie
11406 | ((cum->fregno == FP_ARG_MIN_REG)
11407 ? CALL_V4_SET_FP_ARGS
11408 : CALL_V4_CLEAR_FP_ARGS));
11409
11410 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11411 }
11412
11413 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11414
11415 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11416 {
11417 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11418 if (rslt != NULL_RTX)
11419 return rslt;
11420 /* Else fall through to usual handling. */
11421 }
11422
11423 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11424 {
11425 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11426 rtx r, off;
11427 int i, k = 0;
11428
11429 /* Do we also need to pass this argument in the parameter save area?
11430 Library support functions for IEEE 128-bit are assumed to not need the
11431 value passed both in GPRs and in vector registers. */
11432 if (TARGET_64BIT && !cum->prototype
11433 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11434 {
11435 int align_words = ROUND_UP (cum->words, 2);
11436 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11437 }
11438
11439 /* Describe where this argument goes in the vector registers. */
11440 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11441 {
11442 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11443 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11444 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11445 }
11446
11447 return rs6000_finish_function_arg (mode, rvec, k);
11448 }
11449 else if (TARGET_ALTIVEC_ABI
11450 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11451 || (type && TREE_CODE (type) == VECTOR_TYPE
11452 && int_size_in_bytes (type) == 16)))
11453 {
11454 if (named || abi == ABI_V4)
11455 return NULL_RTX;
11456 else
11457 {
11458 /* Vector parameters to varargs functions under AIX or Darwin
11459 get passed in memory and possibly also in GPRs. */
11460 int align, align_words, n_words;
11461 machine_mode part_mode;
11462
11463 /* Vector parameters must be 16-byte aligned. In 32-bit
11464 mode this means we need to take into account the offset
11465 to the parameter save area. In 64-bit mode, they just
11466 have to start on an even word, since the parameter save
11467 area is 16-byte aligned. */
11468 if (TARGET_32BIT)
11469 align = -(rs6000_parm_offset () + cum->words) & 3;
11470 else
11471 align = cum->words & 1;
11472 align_words = cum->words + align;
11473
11474 /* Out of registers? Memory, then. */
11475 if (align_words >= GP_ARG_NUM_REG)
11476 return NULL_RTX;
11477
11478 if (TARGET_32BIT && TARGET_POWERPC64)
11479 return rs6000_mixed_function_arg (mode, type, align_words);
11480
11481 /* The vector value goes in GPRs. Only the part of the
11482 value in GPRs is reported here. */
11483 part_mode = mode;
11484 n_words = rs6000_arg_size (mode, type);
11485 if (align_words + n_words > GP_ARG_NUM_REG)
11486 /* Fortunately, there are only two possibilities, the value
11487 is either wholly in GPRs or half in GPRs and half not. */
11488 part_mode = DImode;
11489
11490 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11491 }
11492 }
11493
11494 else if (abi == ABI_V4)
11495 {
11496 if (abi_v4_pass_in_fpr (mode, named))
11497 {
11498 /* _Decimal128 must use an even/odd register pair. This assumes
11499 that the register number is odd when fregno is odd. */
11500 if (mode == TDmode && (cum->fregno % 2) == 1)
11501 cum->fregno++;
11502
11503 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11504 <= FP_ARG_V4_MAX_REG)
11505 return gen_rtx_REG (mode, cum->fregno);
11506 else
11507 return NULL_RTX;
11508 }
11509 else
11510 {
11511 int n_words = rs6000_arg_size (mode, type);
11512 int gregno = cum->sysv_gregno;
11513
11514 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11515 As does any other 2 word item such as complex int due to a
11516 historical mistake. */
11517 if (n_words == 2)
11518 gregno += (1 - gregno) & 1;
11519
11520 /* Multi-reg args are not split between registers and stack. */
11521 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11522 return NULL_RTX;
11523
11524 if (TARGET_32BIT && TARGET_POWERPC64)
11525 return rs6000_mixed_function_arg (mode, type,
11526 gregno - GP_ARG_MIN_REG);
11527 return gen_rtx_REG (mode, gregno);
11528 }
11529 }
11530 else
11531 {
11532 int align_words = rs6000_parm_start (mode, type, cum->words);
11533
11534 /* _Decimal128 must be passed in an even/odd float register pair.
11535 This assumes that the register number is odd when fregno is odd. */
11536 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11537 cum->fregno++;
11538
11539 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11540 && !(TARGET_AIX && !TARGET_ELF
11541 && type != NULL && AGGREGATE_TYPE_P (type)))
11542 {
11543 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11544 rtx r, off;
11545 int i, k = 0;
11546 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11547 int fpr_words;
11548
11549 /* Do we also need to pass this argument in the parameter
11550 save area? */
11551 if (type && (cum->nargs_prototype <= 0
11552 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11553 && TARGET_XL_COMPAT
11554 && align_words >= GP_ARG_NUM_REG)))
11555 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11556
11557 /* Describe where this argument goes in the fprs. */
11558 for (i = 0; i < n_elts
11559 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11560 {
11561 /* Check if the argument is split over registers and memory.
11562 This can only ever happen for long double or _Decimal128;
11563 complex types are handled via split_complex_arg. */
11564 machine_mode fmode = elt_mode;
11565 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11566 {
11567 gcc_assert (FLOAT128_2REG_P (fmode));
11568 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11569 }
11570
11571 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11572 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11573 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11574 }
11575
11576 /* If there were not enough FPRs to hold the argument, the rest
11577 usually goes into memory. However, if the current position
11578 is still within the register parameter area, a portion may
11579 actually have to go into GPRs.
11580
11581 Note that it may happen that the portion of the argument
11582 passed in the first "half" of the first GPR was already
11583 passed in the last FPR as well.
11584
11585 For unnamed arguments, we already set up GPRs to cover the
11586 whole argument in rs6000_psave_function_arg, so there is
11587 nothing further to do at this point. */
11588 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11589 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11590 && cum->nargs_prototype > 0)
11591 {
11592 static bool warned;
11593
11594 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11595 int n_words = rs6000_arg_size (mode, type);
11596
11597 align_words += fpr_words;
11598 n_words -= fpr_words;
11599
11600 do
11601 {
11602 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11603 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11604 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11605 }
11606 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11607
11608 if (!warned && warn_psabi)
11609 {
11610 warned = true;
11611 inform (input_location,
11612 "the ABI of passing homogeneous %<float%> aggregates"
11613 " has changed in GCC 5");
11614 }
11615 }
11616
11617 return rs6000_finish_function_arg (mode, rvec, k);
11618 }
11619 else if (align_words < GP_ARG_NUM_REG)
11620 {
11621 if (TARGET_32BIT && TARGET_POWERPC64)
11622 return rs6000_mixed_function_arg (mode, type, align_words);
11623
11624 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11625 }
11626 else
11627 return NULL_RTX;
11628 }
11629 }
11630 \f
11631 /* For an arg passed partly in registers and partly in memory, this is
11632 the number of bytes passed in registers. For args passed entirely in
11633 registers or entirely in memory, zero. When an arg is described by a
11634 PARALLEL, perhaps using more than one register type, this function
11635 returns the number of bytes used by the first element of the PARALLEL. */
11636
11637 static int
11638 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11639 tree type, bool named)
11640 {
11641 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11642 bool passed_in_gprs = true;
11643 int ret = 0;
11644 int align_words;
11645 machine_mode elt_mode;
11646 int n_elts;
11647
11648 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11649
11650 if (DEFAULT_ABI == ABI_V4)
11651 return 0;
11652
11653 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11654 {
11655 /* If we are passing this arg in the fixed parameter save area (gprs or
11656 memory) as well as VRs, we do not use the partial bytes mechanism;
11657 instead, rs6000_function_arg will return a PARALLEL including a memory
11658 element as necessary. Library support functions for IEEE 128-bit are
11659 assumed to not need the value passed both in GPRs and in vector
11660 registers. */
11661 if (TARGET_64BIT && !cum->prototype
11662 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11663 return 0;
11664
11665 /* Otherwise, we pass in VRs only. Check for partial copies. */
11666 passed_in_gprs = false;
11667 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11668 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11669 }
11670
11671 /* In this complicated case we just disable the partial_nregs code. */
11672 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11673 return 0;
11674
11675 align_words = rs6000_parm_start (mode, type, cum->words);
11676
11677 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11678 && !(TARGET_AIX && !TARGET_ELF
11679 && type != NULL && AGGREGATE_TYPE_P (type)))
11680 {
11681 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11682
11683 /* If we are passing this arg in the fixed parameter save area
11684 (gprs or memory) as well as FPRs, we do not use the partial
11685 bytes mechanism; instead, rs6000_function_arg will return a
11686 PARALLEL including a memory element as necessary. */
11687 if (type
11688 && (cum->nargs_prototype <= 0
11689 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11690 && TARGET_XL_COMPAT
11691 && align_words >= GP_ARG_NUM_REG)))
11692 return 0;
11693
11694 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11695 passed_in_gprs = false;
11696 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11697 {
11698 /* Compute number of bytes / words passed in FPRs. If there
11699 is still space available in the register parameter area
11700 *after* that amount, a part of the argument will be passed
11701 in GPRs. In that case, the total amount passed in any
11702 registers is equal to the amount that would have been passed
11703 in GPRs if everything were passed there, so we fall back to
11704 the GPR code below to compute the appropriate value. */
11705 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11706 * MIN (8, GET_MODE_SIZE (elt_mode)));
11707 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11708
11709 if (align_words + fpr_words < GP_ARG_NUM_REG)
11710 passed_in_gprs = true;
11711 else
11712 ret = fpr;
11713 }
11714 }
11715
11716 if (passed_in_gprs
11717 && align_words < GP_ARG_NUM_REG
11718 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11719 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11720
11721 if (ret != 0 && TARGET_DEBUG_ARG)
11722 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11723
11724 return ret;
11725 }
11726 \f
11727 /* A C expression that indicates when an argument must be passed by
11728 reference. If nonzero for an argument, a copy of that argument is
11729 made in memory and a pointer to the argument is passed instead of
11730 the argument itself. The pointer is passed in whatever way is
11731 appropriate for passing a pointer to that type.
11732
11733 Under V.4, aggregates and long double are passed by reference.
11734
11735 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11736 reference unless the AltiVec vector extension ABI is in force.
11737
11738 As an extension to all ABIs, variable sized types are passed by
11739 reference. */
11740
11741 static bool
11742 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11743 machine_mode mode, const_tree type,
11744 bool named ATTRIBUTE_UNUSED)
11745 {
11746 if (!type)
11747 return 0;
11748
11749 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11750 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11751 {
11752 if (TARGET_DEBUG_ARG)
11753 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11754 return 1;
11755 }
11756
11757 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11758 {
11759 if (TARGET_DEBUG_ARG)
11760 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11761 return 1;
11762 }
11763
11764 if (int_size_in_bytes (type) < 0)
11765 {
11766 if (TARGET_DEBUG_ARG)
11767 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11768 return 1;
11769 }
11770
11771 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11772 modes only exist for GCC vector types if -maltivec. */
11773 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11774 {
11775 if (TARGET_DEBUG_ARG)
11776 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11777 return 1;
11778 }
11779
11780 /* Pass synthetic vectors in memory. */
11781 if (TREE_CODE (type) == VECTOR_TYPE
11782 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11783 {
11784 static bool warned_for_pass_big_vectors = false;
11785 if (TARGET_DEBUG_ARG)
11786 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11787 if (!warned_for_pass_big_vectors)
11788 {
11789 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11790 "non-standard ABI extension with no compatibility "
11791 "guarantee");
11792 warned_for_pass_big_vectors = true;
11793 }
11794 return 1;
11795 }
11796
11797 return 0;
11798 }
11799
11800 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11801 already processes. Return true if the parameter must be passed
11802 (fully or partially) on the stack. */
11803
11804 static bool
11805 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11806 {
11807 machine_mode mode;
11808 int unsignedp;
11809 rtx entry_parm;
11810
11811 /* Catch errors. */
11812 if (type == NULL || type == error_mark_node)
11813 return true;
11814
11815 /* Handle types with no storage requirement. */
11816 if (TYPE_MODE (type) == VOIDmode)
11817 return false;
11818
11819 /* Handle complex types. */
11820 if (TREE_CODE (type) == COMPLEX_TYPE)
11821 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11822 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11823
11824 /* Handle transparent aggregates. */
11825 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11826 && TYPE_TRANSPARENT_AGGR (type))
11827 type = TREE_TYPE (first_field (type));
11828
11829 /* See if this arg was passed by invisible reference. */
11830 if (pass_by_reference (get_cumulative_args (args_so_far),
11831 TYPE_MODE (type), type, true))
11832 type = build_pointer_type (type);
11833
11834 /* Find mode as it is passed by the ABI. */
11835 unsignedp = TYPE_UNSIGNED (type);
11836 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11837
11838 /* If we must pass in stack, we need a stack. */
11839 if (rs6000_must_pass_in_stack (mode, type))
11840 return true;
11841
11842 /* If there is no incoming register, we need a stack. */
11843 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11844 if (entry_parm == NULL)
11845 return true;
11846
11847 /* Likewise if we need to pass both in registers and on the stack. */
11848 if (GET_CODE (entry_parm) == PARALLEL
11849 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
11850 return true;
11851
11852 /* Also true if we're partially in registers and partially not. */
11853 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
11854 return true;
11855
11856 /* Update info on where next arg arrives in registers. */
11857 rs6000_function_arg_advance (args_so_far, mode, type, true);
11858 return false;
11859 }
11860
11861 /* Return true if FUN has no prototype, has a variable argument
11862 list, or passes any parameter in memory. */
11863
11864 static bool
11865 rs6000_function_parms_need_stack (tree fun, bool incoming)
11866 {
11867 tree fntype, result;
11868 CUMULATIVE_ARGS args_so_far_v;
11869 cumulative_args_t args_so_far;
11870
11871 if (!fun)
11872 /* Must be a libcall, all of which only use reg parms. */
11873 return false;
11874
11875 fntype = fun;
11876 if (!TYPE_P (fun))
11877 fntype = TREE_TYPE (fun);
11878
11879 /* Varargs functions need the parameter save area. */
11880 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
11881 return true;
11882
11883 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
11884 args_so_far = pack_cumulative_args (&args_so_far_v);
11885
11886 /* When incoming, we will have been passed the function decl.
11887 It is necessary to use the decl to handle K&R style functions,
11888 where TYPE_ARG_TYPES may not be available. */
11889 if (incoming)
11890 {
11891 gcc_assert (DECL_P (fun));
11892 result = DECL_RESULT (fun);
11893 }
11894 else
11895 result = TREE_TYPE (fntype);
11896
11897 if (result && aggregate_value_p (result, fntype))
11898 {
11899 if (!TYPE_P (result))
11900 result = TREE_TYPE (result);
11901 result = build_pointer_type (result);
11902 rs6000_parm_needs_stack (args_so_far, result);
11903 }
11904
11905 if (incoming)
11906 {
11907 tree parm;
11908
11909 for (parm = DECL_ARGUMENTS (fun);
11910 parm && parm != void_list_node;
11911 parm = TREE_CHAIN (parm))
11912 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
11913 return true;
11914 }
11915 else
11916 {
11917 function_args_iterator args_iter;
11918 tree arg_type;
11919
11920 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
11921 if (rs6000_parm_needs_stack (args_so_far, arg_type))
11922 return true;
11923 }
11924
11925 return false;
11926 }
11927
11928 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
11929 usually a constant depending on the ABI. However, in the ELFv2 ABI
11930 the register parameter area is optional when calling a function that
11931 has a prototype is scope, has no variable argument list, and passes
11932 all parameters in registers. */
11933
11934 int
11935 rs6000_reg_parm_stack_space (tree fun, bool incoming)
11936 {
11937 int reg_parm_stack_space;
11938
11939 switch (DEFAULT_ABI)
11940 {
11941 default:
11942 reg_parm_stack_space = 0;
11943 break;
11944
11945 case ABI_AIX:
11946 case ABI_DARWIN:
11947 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
11948 break;
11949
11950 case ABI_ELFv2:
11951 /* ??? Recomputing this every time is a bit expensive. Is there
11952 a place to cache this information? */
11953 if (rs6000_function_parms_need_stack (fun, incoming))
11954 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
11955 else
11956 reg_parm_stack_space = 0;
11957 break;
11958 }
11959
11960 return reg_parm_stack_space;
11961 }
11962
11963 static void
11964 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
11965 {
11966 int i;
11967 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
11968
11969 if (nregs == 0)
11970 return;
11971
11972 for (i = 0; i < nregs; i++)
11973 {
11974 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
11975 if (reload_completed)
11976 {
11977 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
11978 tem = NULL_RTX;
11979 else
11980 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
11981 i * GET_MODE_SIZE (reg_mode));
11982 }
11983 else
11984 tem = replace_equiv_address (tem, XEXP (tem, 0));
11985
11986 gcc_assert (tem);
11987
11988 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
11989 }
11990 }
11991 \f
11992 /* Perform any needed actions needed for a function that is receiving a
11993 variable number of arguments.
11994
11995 CUM is as above.
11996
11997 MODE and TYPE are the mode and type of the current parameter.
11998
11999 PRETEND_SIZE is a variable that should be set to the amount of stack
12000 that must be pushed by the prolog to pretend that our caller pushed
12001 it.
12002
12003 Normally, this macro will push all remaining incoming registers on the
12004 stack and set PRETEND_SIZE to the length of the registers pushed. */
12005
12006 static void
12007 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12008 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12009 int no_rtl)
12010 {
12011 CUMULATIVE_ARGS next_cum;
12012 int reg_size = TARGET_32BIT ? 4 : 8;
12013 rtx save_area = NULL_RTX, mem;
12014 int first_reg_offset;
12015 alias_set_type set;
12016
12017 /* Skip the last named argument. */
12018 next_cum = *get_cumulative_args (cum);
12019 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12020
12021 if (DEFAULT_ABI == ABI_V4)
12022 {
12023 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12024
12025 if (! no_rtl)
12026 {
12027 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12028 HOST_WIDE_INT offset = 0;
12029
12030 /* Try to optimize the size of the varargs save area.
12031 The ABI requires that ap.reg_save_area is doubleword
12032 aligned, but we don't need to allocate space for all
12033 the bytes, only those to which we actually will save
12034 anything. */
12035 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12036 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12037 if (TARGET_HARD_FLOAT
12038 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12039 && cfun->va_list_fpr_size)
12040 {
12041 if (gpr_reg_num)
12042 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12043 * UNITS_PER_FP_WORD;
12044 if (cfun->va_list_fpr_size
12045 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12046 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12047 else
12048 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12049 * UNITS_PER_FP_WORD;
12050 }
12051 if (gpr_reg_num)
12052 {
12053 offset = -((first_reg_offset * reg_size) & ~7);
12054 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12055 {
12056 gpr_reg_num = cfun->va_list_gpr_size;
12057 if (reg_size == 4 && (first_reg_offset & 1))
12058 gpr_reg_num++;
12059 }
12060 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12061 }
12062 else if (fpr_size)
12063 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12064 * UNITS_PER_FP_WORD
12065 - (int) (GP_ARG_NUM_REG * reg_size);
12066
12067 if (gpr_size + fpr_size)
12068 {
12069 rtx reg_save_area
12070 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12071 gcc_assert (MEM_P (reg_save_area));
12072 reg_save_area = XEXP (reg_save_area, 0);
12073 if (GET_CODE (reg_save_area) == PLUS)
12074 {
12075 gcc_assert (XEXP (reg_save_area, 0)
12076 == virtual_stack_vars_rtx);
12077 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12078 offset += INTVAL (XEXP (reg_save_area, 1));
12079 }
12080 else
12081 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12082 }
12083
12084 cfun->machine->varargs_save_offset = offset;
12085 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12086 }
12087 }
12088 else
12089 {
12090 first_reg_offset = next_cum.words;
12091 save_area = crtl->args.internal_arg_pointer;
12092
12093 if (targetm.calls.must_pass_in_stack (mode, type))
12094 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12095 }
12096
12097 set = get_varargs_alias_set ();
12098 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12099 && cfun->va_list_gpr_size)
12100 {
12101 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12102
12103 if (va_list_gpr_counter_field)
12104 /* V4 va_list_gpr_size counts number of registers needed. */
12105 n_gpr = cfun->va_list_gpr_size;
12106 else
12107 /* char * va_list instead counts number of bytes needed. */
12108 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12109
12110 if (nregs > n_gpr)
12111 nregs = n_gpr;
12112
12113 mem = gen_rtx_MEM (BLKmode,
12114 plus_constant (Pmode, save_area,
12115 first_reg_offset * reg_size));
12116 MEM_NOTRAP_P (mem) = 1;
12117 set_mem_alias_set (mem, set);
12118 set_mem_align (mem, BITS_PER_WORD);
12119
12120 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12121 nregs);
12122 }
12123
12124 /* Save FP registers if needed. */
12125 if (DEFAULT_ABI == ABI_V4
12126 && TARGET_HARD_FLOAT
12127 && ! no_rtl
12128 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12129 && cfun->va_list_fpr_size)
12130 {
12131 int fregno = next_cum.fregno, nregs;
12132 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12133 rtx lab = gen_label_rtx ();
12134 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12135 * UNITS_PER_FP_WORD);
12136
12137 emit_jump_insn
12138 (gen_rtx_SET (pc_rtx,
12139 gen_rtx_IF_THEN_ELSE (VOIDmode,
12140 gen_rtx_NE (VOIDmode, cr1,
12141 const0_rtx),
12142 gen_rtx_LABEL_REF (VOIDmode, lab),
12143 pc_rtx)));
12144
12145 for (nregs = 0;
12146 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12147 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12148 {
12149 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12150 plus_constant (Pmode, save_area, off));
12151 MEM_NOTRAP_P (mem) = 1;
12152 set_mem_alias_set (mem, set);
12153 set_mem_align (mem, GET_MODE_ALIGNMENT (
12154 TARGET_HARD_FLOAT ? DFmode : SFmode));
12155 emit_move_insn (mem, gen_rtx_REG (
12156 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12157 }
12158
12159 emit_label (lab);
12160 }
12161 }
12162
12163 /* Create the va_list data type. */
12164
12165 static tree
12166 rs6000_build_builtin_va_list (void)
12167 {
12168 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12169
12170 /* For AIX, prefer 'char *' because that's what the system
12171 header files like. */
12172 if (DEFAULT_ABI != ABI_V4)
12173 return build_pointer_type (char_type_node);
12174
12175 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12176 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12177 get_identifier ("__va_list_tag"), record);
12178
12179 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12180 unsigned_char_type_node);
12181 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12182 unsigned_char_type_node);
12183 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12184 every user file. */
12185 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12186 get_identifier ("reserved"), short_unsigned_type_node);
12187 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12188 get_identifier ("overflow_arg_area"),
12189 ptr_type_node);
12190 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12191 get_identifier ("reg_save_area"),
12192 ptr_type_node);
12193
12194 va_list_gpr_counter_field = f_gpr;
12195 va_list_fpr_counter_field = f_fpr;
12196
12197 DECL_FIELD_CONTEXT (f_gpr) = record;
12198 DECL_FIELD_CONTEXT (f_fpr) = record;
12199 DECL_FIELD_CONTEXT (f_res) = record;
12200 DECL_FIELD_CONTEXT (f_ovf) = record;
12201 DECL_FIELD_CONTEXT (f_sav) = record;
12202
12203 TYPE_STUB_DECL (record) = type_decl;
12204 TYPE_NAME (record) = type_decl;
12205 TYPE_FIELDS (record) = f_gpr;
12206 DECL_CHAIN (f_gpr) = f_fpr;
12207 DECL_CHAIN (f_fpr) = f_res;
12208 DECL_CHAIN (f_res) = f_ovf;
12209 DECL_CHAIN (f_ovf) = f_sav;
12210
12211 layout_type (record);
12212
12213 /* The correct type is an array type of one element. */
12214 return build_array_type (record, build_index_type (size_zero_node));
12215 }
12216
12217 /* Implement va_start. */
12218
12219 static void
12220 rs6000_va_start (tree valist, rtx nextarg)
12221 {
12222 HOST_WIDE_INT words, n_gpr, n_fpr;
12223 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12224 tree gpr, fpr, ovf, sav, t;
12225
12226 /* Only SVR4 needs something special. */
12227 if (DEFAULT_ABI != ABI_V4)
12228 {
12229 std_expand_builtin_va_start (valist, nextarg);
12230 return;
12231 }
12232
12233 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12234 f_fpr = DECL_CHAIN (f_gpr);
12235 f_res = DECL_CHAIN (f_fpr);
12236 f_ovf = DECL_CHAIN (f_res);
12237 f_sav = DECL_CHAIN (f_ovf);
12238
12239 valist = build_simple_mem_ref (valist);
12240 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12241 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12242 f_fpr, NULL_TREE);
12243 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12244 f_ovf, NULL_TREE);
12245 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12246 f_sav, NULL_TREE);
12247
12248 /* Count number of gp and fp argument registers used. */
12249 words = crtl->args.info.words;
12250 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12251 GP_ARG_NUM_REG);
12252 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12253 FP_ARG_NUM_REG);
12254
12255 if (TARGET_DEBUG_ARG)
12256 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12257 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12258 words, n_gpr, n_fpr);
12259
12260 if (cfun->va_list_gpr_size)
12261 {
12262 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12263 build_int_cst (NULL_TREE, n_gpr));
12264 TREE_SIDE_EFFECTS (t) = 1;
12265 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12266 }
12267
12268 if (cfun->va_list_fpr_size)
12269 {
12270 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12271 build_int_cst (NULL_TREE, n_fpr));
12272 TREE_SIDE_EFFECTS (t) = 1;
12273 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12274
12275 #ifdef HAVE_AS_GNU_ATTRIBUTE
12276 if (call_ABI_of_interest (cfun->decl))
12277 rs6000_passes_float = true;
12278 #endif
12279 }
12280
12281 /* Find the overflow area. */
12282 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12283 if (words != 0)
12284 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12285 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12286 TREE_SIDE_EFFECTS (t) = 1;
12287 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12288
12289 /* If there were no va_arg invocations, don't set up the register
12290 save area. */
12291 if (!cfun->va_list_gpr_size
12292 && !cfun->va_list_fpr_size
12293 && n_gpr < GP_ARG_NUM_REG
12294 && n_fpr < FP_ARG_V4_MAX_REG)
12295 return;
12296
12297 /* Find the register save area. */
12298 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12299 if (cfun->machine->varargs_save_offset)
12300 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12301 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12302 TREE_SIDE_EFFECTS (t) = 1;
12303 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12304 }
12305
12306 /* Implement va_arg. */
12307
12308 static tree
12309 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12310 gimple_seq *post_p)
12311 {
12312 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12313 tree gpr, fpr, ovf, sav, reg, t, u;
12314 int size, rsize, n_reg, sav_ofs, sav_scale;
12315 tree lab_false, lab_over, addr;
12316 int align;
12317 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12318 int regalign = 0;
12319 gimple *stmt;
12320
12321 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12322 {
12323 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12324 return build_va_arg_indirect_ref (t);
12325 }
12326
12327 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12328 earlier version of gcc, with the property that it always applied alignment
12329 adjustments to the va-args (even for zero-sized types). The cheapest way
12330 to deal with this is to replicate the effect of the part of
12331 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12332 of relevance.
12333 We don't need to check for pass-by-reference because of the test above.
12334 We can return a simplifed answer, since we know there's no offset to add. */
12335
12336 if (((TARGET_MACHO
12337 && rs6000_darwin64_abi)
12338 || DEFAULT_ABI == ABI_ELFv2
12339 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12340 && integer_zerop (TYPE_SIZE (type)))
12341 {
12342 unsigned HOST_WIDE_INT align, boundary;
12343 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12344 align = PARM_BOUNDARY / BITS_PER_UNIT;
12345 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12346 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12347 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12348 boundary /= BITS_PER_UNIT;
12349 if (boundary > align)
12350 {
12351 tree t ;
12352 /* This updates arg ptr by the amount that would be necessary
12353 to align the zero-sized (but not zero-alignment) item. */
12354 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12355 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12356 gimplify_and_add (t, pre_p);
12357
12358 t = fold_convert (sizetype, valist_tmp);
12359 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12360 fold_convert (TREE_TYPE (valist),
12361 fold_build2 (BIT_AND_EXPR, sizetype, t,
12362 size_int (-boundary))));
12363 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12364 gimplify_and_add (t, pre_p);
12365 }
12366 /* Since it is zero-sized there's no increment for the item itself. */
12367 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12368 return build_va_arg_indirect_ref (valist_tmp);
12369 }
12370
12371 if (DEFAULT_ABI != ABI_V4)
12372 {
12373 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12374 {
12375 tree elem_type = TREE_TYPE (type);
12376 machine_mode elem_mode = TYPE_MODE (elem_type);
12377 int elem_size = GET_MODE_SIZE (elem_mode);
12378
12379 if (elem_size < UNITS_PER_WORD)
12380 {
12381 tree real_part, imag_part;
12382 gimple_seq post = NULL;
12383
12384 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12385 &post);
12386 /* Copy the value into a temporary, lest the formal temporary
12387 be reused out from under us. */
12388 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12389 gimple_seq_add_seq (pre_p, post);
12390
12391 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12392 post_p);
12393
12394 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12395 }
12396 }
12397
12398 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12399 }
12400
12401 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12402 f_fpr = DECL_CHAIN (f_gpr);
12403 f_res = DECL_CHAIN (f_fpr);
12404 f_ovf = DECL_CHAIN (f_res);
12405 f_sav = DECL_CHAIN (f_ovf);
12406
12407 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12408 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12409 f_fpr, NULL_TREE);
12410 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12411 f_ovf, NULL_TREE);
12412 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12413 f_sav, NULL_TREE);
12414
12415 size = int_size_in_bytes (type);
12416 rsize = (size + 3) / 4;
12417 int pad = 4 * rsize - size;
12418 align = 1;
12419
12420 machine_mode mode = TYPE_MODE (type);
12421 if (abi_v4_pass_in_fpr (mode, false))
12422 {
12423 /* FP args go in FP registers, if present. */
12424 reg = fpr;
12425 n_reg = (size + 7) / 8;
12426 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12427 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12428 if (mode != SFmode && mode != SDmode)
12429 align = 8;
12430 }
12431 else
12432 {
12433 /* Otherwise into GP registers. */
12434 reg = gpr;
12435 n_reg = rsize;
12436 sav_ofs = 0;
12437 sav_scale = 4;
12438 if (n_reg == 2)
12439 align = 8;
12440 }
12441
12442 /* Pull the value out of the saved registers.... */
12443
12444 lab_over = NULL;
12445 addr = create_tmp_var (ptr_type_node, "addr");
12446
12447 /* AltiVec vectors never go in registers when -mabi=altivec. */
12448 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12449 align = 16;
12450 else
12451 {
12452 lab_false = create_artificial_label (input_location);
12453 lab_over = create_artificial_label (input_location);
12454
12455 /* Long long is aligned in the registers. As are any other 2 gpr
12456 item such as complex int due to a historical mistake. */
12457 u = reg;
12458 if (n_reg == 2 && reg == gpr)
12459 {
12460 regalign = 1;
12461 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12462 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12463 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12464 unshare_expr (reg), u);
12465 }
12466 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12467 reg number is 0 for f1, so we want to make it odd. */
12468 else if (reg == fpr && mode == TDmode)
12469 {
12470 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12471 build_int_cst (TREE_TYPE (reg), 1));
12472 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12473 }
12474
12475 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12476 t = build2 (GE_EXPR, boolean_type_node, u, t);
12477 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12478 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12479 gimplify_and_add (t, pre_p);
12480
12481 t = sav;
12482 if (sav_ofs)
12483 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12484
12485 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12486 build_int_cst (TREE_TYPE (reg), n_reg));
12487 u = fold_convert (sizetype, u);
12488 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12489 t = fold_build_pointer_plus (t, u);
12490
12491 /* _Decimal32 varargs are located in the second word of the 64-bit
12492 FP register for 32-bit binaries. */
12493 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12494 t = fold_build_pointer_plus_hwi (t, size);
12495
12496 /* Args are passed right-aligned. */
12497 if (BYTES_BIG_ENDIAN)
12498 t = fold_build_pointer_plus_hwi (t, pad);
12499
12500 gimplify_assign (addr, t, pre_p);
12501
12502 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12503
12504 stmt = gimple_build_label (lab_false);
12505 gimple_seq_add_stmt (pre_p, stmt);
12506
12507 if ((n_reg == 2 && !regalign) || n_reg > 2)
12508 {
12509 /* Ensure that we don't find any more args in regs.
12510 Alignment has taken care of for special cases. */
12511 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12512 }
12513 }
12514
12515 /* ... otherwise out of the overflow area. */
12516
12517 /* Care for on-stack alignment if needed. */
12518 t = ovf;
12519 if (align != 1)
12520 {
12521 t = fold_build_pointer_plus_hwi (t, align - 1);
12522 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12523 build_int_cst (TREE_TYPE (t), -align));
12524 }
12525
12526 /* Args are passed right-aligned. */
12527 if (BYTES_BIG_ENDIAN)
12528 t = fold_build_pointer_plus_hwi (t, pad);
12529
12530 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12531
12532 gimplify_assign (unshare_expr (addr), t, pre_p);
12533
12534 t = fold_build_pointer_plus_hwi (t, size);
12535 gimplify_assign (unshare_expr (ovf), t, pre_p);
12536
12537 if (lab_over)
12538 {
12539 stmt = gimple_build_label (lab_over);
12540 gimple_seq_add_stmt (pre_p, stmt);
12541 }
12542
12543 if (STRICT_ALIGNMENT
12544 && (TYPE_ALIGN (type)
12545 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12546 {
12547 /* The value (of type complex double, for example) may not be
12548 aligned in memory in the saved registers, so copy via a
12549 temporary. (This is the same code as used for SPARC.) */
12550 tree tmp = create_tmp_var (type, "va_arg_tmp");
12551 tree dest_addr = build_fold_addr_expr (tmp);
12552
12553 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12554 3, dest_addr, addr, size_int (rsize * 4));
12555 TREE_ADDRESSABLE (tmp) = 1;
12556
12557 gimplify_and_add (copy, pre_p);
12558 addr = dest_addr;
12559 }
12560
12561 addr = fold_convert (ptrtype, addr);
12562 return build_va_arg_indirect_ref (addr);
12563 }
12564
12565 /* Builtins. */
12566
12567 static void
12568 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12569 {
12570 tree t;
12571 unsigned classify = rs6000_builtin_info[(int)code].attr;
12572 const char *attr_string = "";
12573
12574 gcc_assert (name != NULL);
12575 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12576
12577 if (rs6000_builtin_decls[(int)code])
12578 fatal_error (input_location,
12579 "internal error: builtin function %qs already processed",
12580 name);
12581
12582 rs6000_builtin_decls[(int)code] = t =
12583 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12584
12585 /* Set any special attributes. */
12586 if ((classify & RS6000_BTC_CONST) != 0)
12587 {
12588 /* const function, function only depends on the inputs. */
12589 TREE_READONLY (t) = 1;
12590 TREE_NOTHROW (t) = 1;
12591 attr_string = ", const";
12592 }
12593 else if ((classify & RS6000_BTC_PURE) != 0)
12594 {
12595 /* pure function, function can read global memory, but does not set any
12596 external state. */
12597 DECL_PURE_P (t) = 1;
12598 TREE_NOTHROW (t) = 1;
12599 attr_string = ", pure";
12600 }
12601 else if ((classify & RS6000_BTC_FP) != 0)
12602 {
12603 /* Function is a math function. If rounding mode is on, then treat the
12604 function as not reading global memory, but it can have arbitrary side
12605 effects. If it is off, then assume the function is a const function.
12606 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12607 builtin-attribute.def that is used for the math functions. */
12608 TREE_NOTHROW (t) = 1;
12609 if (flag_rounding_math)
12610 {
12611 DECL_PURE_P (t) = 1;
12612 DECL_IS_NOVOPS (t) = 1;
12613 attr_string = ", fp, pure";
12614 }
12615 else
12616 {
12617 TREE_READONLY (t) = 1;
12618 attr_string = ", fp, const";
12619 }
12620 }
12621 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12622 gcc_unreachable ();
12623
12624 if (TARGET_DEBUG_BUILTIN)
12625 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12626 (int)code, name, attr_string);
12627 }
12628
12629 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12630
12631 #undef RS6000_BUILTIN_0
12632 #undef RS6000_BUILTIN_1
12633 #undef RS6000_BUILTIN_2
12634 #undef RS6000_BUILTIN_3
12635 #undef RS6000_BUILTIN_A
12636 #undef RS6000_BUILTIN_D
12637 #undef RS6000_BUILTIN_H
12638 #undef RS6000_BUILTIN_P
12639 #undef RS6000_BUILTIN_X
12640
12641 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12642 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12643 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12644 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12645 { MASK, ICODE, NAME, ENUM },
12646
12647 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12648 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12649 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12650 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12651 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12652
12653 static const struct builtin_description bdesc_3arg[] =
12654 {
12655 #include "rs6000-builtin.def"
12656 };
12657
12658 /* DST operations: void foo (void *, const int, const char). */
12659
12660 #undef RS6000_BUILTIN_0
12661 #undef RS6000_BUILTIN_1
12662 #undef RS6000_BUILTIN_2
12663 #undef RS6000_BUILTIN_3
12664 #undef RS6000_BUILTIN_A
12665 #undef RS6000_BUILTIN_D
12666 #undef RS6000_BUILTIN_H
12667 #undef RS6000_BUILTIN_P
12668 #undef RS6000_BUILTIN_X
12669
12670 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12671 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12672 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12673 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12674 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12675 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12676 { MASK, ICODE, NAME, ENUM },
12677
12678 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12679 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12680 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12681
12682 static const struct builtin_description bdesc_dst[] =
12683 {
12684 #include "rs6000-builtin.def"
12685 };
12686
12687 /* Simple binary operations: VECc = foo (VECa, VECb). */
12688
12689 #undef RS6000_BUILTIN_0
12690 #undef RS6000_BUILTIN_1
12691 #undef RS6000_BUILTIN_2
12692 #undef RS6000_BUILTIN_3
12693 #undef RS6000_BUILTIN_A
12694 #undef RS6000_BUILTIN_D
12695 #undef RS6000_BUILTIN_H
12696 #undef RS6000_BUILTIN_P
12697 #undef RS6000_BUILTIN_X
12698
12699 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12700 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12701 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12702 { MASK, ICODE, NAME, ENUM },
12703
12704 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12705 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12706 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12707 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12708 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12709 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12710
12711 static const struct builtin_description bdesc_2arg[] =
12712 {
12713 #include "rs6000-builtin.def"
12714 };
12715
12716 #undef RS6000_BUILTIN_0
12717 #undef RS6000_BUILTIN_1
12718 #undef RS6000_BUILTIN_2
12719 #undef RS6000_BUILTIN_3
12720 #undef RS6000_BUILTIN_A
12721 #undef RS6000_BUILTIN_D
12722 #undef RS6000_BUILTIN_H
12723 #undef RS6000_BUILTIN_P
12724 #undef RS6000_BUILTIN_X
12725
12726 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12727 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12728 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12729 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12730 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12731 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12732 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12733 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12734 { MASK, ICODE, NAME, ENUM },
12735
12736 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12737
12738 /* AltiVec predicates. */
12739
12740 static const struct builtin_description bdesc_altivec_preds[] =
12741 {
12742 #include "rs6000-builtin.def"
12743 };
12744
12745 /* ABS* operations. */
12746
12747 #undef RS6000_BUILTIN_0
12748 #undef RS6000_BUILTIN_1
12749 #undef RS6000_BUILTIN_2
12750 #undef RS6000_BUILTIN_3
12751 #undef RS6000_BUILTIN_A
12752 #undef RS6000_BUILTIN_D
12753 #undef RS6000_BUILTIN_H
12754 #undef RS6000_BUILTIN_P
12755 #undef RS6000_BUILTIN_X
12756
12757 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12758 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12759 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12760 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12761 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12762 { MASK, ICODE, NAME, ENUM },
12763
12764 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12765 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12766 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12767 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12768
12769 static const struct builtin_description bdesc_abs[] =
12770 {
12771 #include "rs6000-builtin.def"
12772 };
12773
12774 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12775 foo (VECa). */
12776
12777 #undef RS6000_BUILTIN_0
12778 #undef RS6000_BUILTIN_1
12779 #undef RS6000_BUILTIN_2
12780 #undef RS6000_BUILTIN_3
12781 #undef RS6000_BUILTIN_A
12782 #undef RS6000_BUILTIN_D
12783 #undef RS6000_BUILTIN_H
12784 #undef RS6000_BUILTIN_P
12785 #undef RS6000_BUILTIN_X
12786
12787 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12788 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12789 { MASK, ICODE, NAME, ENUM },
12790
12791 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12792 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12793 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12794 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12795 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12796 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12797 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12798
12799 static const struct builtin_description bdesc_1arg[] =
12800 {
12801 #include "rs6000-builtin.def"
12802 };
12803
12804 /* Simple no-argument operations: result = __builtin_darn_32 () */
12805
12806 #undef RS6000_BUILTIN_0
12807 #undef RS6000_BUILTIN_1
12808 #undef RS6000_BUILTIN_2
12809 #undef RS6000_BUILTIN_3
12810 #undef RS6000_BUILTIN_A
12811 #undef RS6000_BUILTIN_D
12812 #undef RS6000_BUILTIN_H
12813 #undef RS6000_BUILTIN_P
12814 #undef RS6000_BUILTIN_X
12815
12816 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12817 { MASK, ICODE, NAME, ENUM },
12818
12819 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12820 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12821 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12822 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12823 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12824 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12825 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12826 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12827
12828 static const struct builtin_description bdesc_0arg[] =
12829 {
12830 #include "rs6000-builtin.def"
12831 };
12832
12833 /* HTM builtins. */
12834 #undef RS6000_BUILTIN_0
12835 #undef RS6000_BUILTIN_1
12836 #undef RS6000_BUILTIN_2
12837 #undef RS6000_BUILTIN_3
12838 #undef RS6000_BUILTIN_A
12839 #undef RS6000_BUILTIN_D
12840 #undef RS6000_BUILTIN_H
12841 #undef RS6000_BUILTIN_P
12842 #undef RS6000_BUILTIN_X
12843
12844 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12845 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12846 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12847 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12848 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12849 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12850 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12851 { MASK, ICODE, NAME, ENUM },
12852
12853 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12854 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12855
12856 static const struct builtin_description bdesc_htm[] =
12857 {
12858 #include "rs6000-builtin.def"
12859 };
12860
12861 #undef RS6000_BUILTIN_0
12862 #undef RS6000_BUILTIN_1
12863 #undef RS6000_BUILTIN_2
12864 #undef RS6000_BUILTIN_3
12865 #undef RS6000_BUILTIN_A
12866 #undef RS6000_BUILTIN_D
12867 #undef RS6000_BUILTIN_H
12868 #undef RS6000_BUILTIN_P
12869
12870 /* Return true if a builtin function is overloaded. */
12871 bool
12872 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
12873 {
12874 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
12875 }
12876
12877 const char *
12878 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
12879 {
12880 return rs6000_builtin_info[(int)fncode].name;
12881 }
12882
12883 /* Expand an expression EXP that calls a builtin without arguments. */
12884 static rtx
12885 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
12886 {
12887 rtx pat;
12888 machine_mode tmode = insn_data[icode].operand[0].mode;
12889
12890 if (icode == CODE_FOR_nothing)
12891 /* Builtin not supported on this processor. */
12892 return 0;
12893
12894 if (icode == CODE_FOR_rs6000_mffsl
12895 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
12896 {
12897 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
12898 return const0_rtx;
12899 }
12900
12901 if (target == 0
12902 || GET_MODE (target) != tmode
12903 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12904 target = gen_reg_rtx (tmode);
12905
12906 pat = GEN_FCN (icode) (target);
12907 if (! pat)
12908 return 0;
12909 emit_insn (pat);
12910
12911 return target;
12912 }
12913
12914
12915 static rtx
12916 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
12917 {
12918 rtx pat;
12919 tree arg0 = CALL_EXPR_ARG (exp, 0);
12920 tree arg1 = CALL_EXPR_ARG (exp, 1);
12921 rtx op0 = expand_normal (arg0);
12922 rtx op1 = expand_normal (arg1);
12923 machine_mode mode0 = insn_data[icode].operand[0].mode;
12924 machine_mode mode1 = insn_data[icode].operand[1].mode;
12925
12926 if (icode == CODE_FOR_nothing)
12927 /* Builtin not supported on this processor. */
12928 return 0;
12929
12930 /* If we got invalid arguments bail out before generating bad rtl. */
12931 if (arg0 == error_mark_node || arg1 == error_mark_node)
12932 return const0_rtx;
12933
12934 if (!CONST_INT_P (op0)
12935 || INTVAL (op0) > 255
12936 || INTVAL (op0) < 0)
12937 {
12938 error ("argument 1 must be an 8-bit field value");
12939 return const0_rtx;
12940 }
12941
12942 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12943 op0 = copy_to_mode_reg (mode0, op0);
12944
12945 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12946 op1 = copy_to_mode_reg (mode1, op1);
12947
12948 pat = GEN_FCN (icode) (op0, op1);
12949 if (!pat)
12950 return const0_rtx;
12951 emit_insn (pat);
12952
12953 return NULL_RTX;
12954 }
12955
12956 static rtx
12957 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
12958 {
12959 rtx pat;
12960 tree arg0 = CALL_EXPR_ARG (exp, 0);
12961 rtx op0 = expand_normal (arg0);
12962
12963 if (icode == CODE_FOR_nothing)
12964 /* Builtin not supported on this processor. */
12965 return 0;
12966
12967 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
12968 {
12969 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
12970 "%<-msoft-float%>");
12971 return const0_rtx;
12972 }
12973
12974 /* If we got invalid arguments bail out before generating bad rtl. */
12975 if (arg0 == error_mark_node)
12976 return const0_rtx;
12977
12978 /* Only allow bit numbers 0 to 31. */
12979 if (!u5bit_cint_operand (op0, VOIDmode))
12980 {
12981 error ("Argument must be a constant between 0 and 31.");
12982 return const0_rtx;
12983 }
12984
12985 pat = GEN_FCN (icode) (op0);
12986 if (!pat)
12987 return const0_rtx;
12988 emit_insn (pat);
12989
12990 return NULL_RTX;
12991 }
12992
12993 static rtx
12994 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
12995 {
12996 rtx pat;
12997 tree arg0 = CALL_EXPR_ARG (exp, 0);
12998 rtx op0 = expand_normal (arg0);
12999 machine_mode mode0 = insn_data[icode].operand[0].mode;
13000
13001 if (icode == CODE_FOR_nothing)
13002 /* Builtin not supported on this processor. */
13003 return 0;
13004
13005 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13006 {
13007 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13008 return const0_rtx;
13009 }
13010
13011 /* If we got invalid arguments bail out before generating bad rtl. */
13012 if (arg0 == error_mark_node)
13013 return const0_rtx;
13014
13015 /* If the argument is a constant, check the range. Argument can only be a
13016 2-bit value. Unfortunately, can't check the range of the value at
13017 compile time if the argument is a variable. The least significant two
13018 bits of the argument, regardless of type, are used to set the rounding
13019 mode. All other bits are ignored. */
13020 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13021 {
13022 error ("Argument must be a value between 0 and 3.");
13023 return const0_rtx;
13024 }
13025
13026 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13027 op0 = copy_to_mode_reg (mode0, op0);
13028
13029 pat = GEN_FCN (icode) (op0);
13030 if (!pat)
13031 return const0_rtx;
13032 emit_insn (pat);
13033
13034 return NULL_RTX;
13035 }
13036 static rtx
13037 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13038 {
13039 rtx pat;
13040 tree arg0 = CALL_EXPR_ARG (exp, 0);
13041 rtx op0 = expand_normal (arg0);
13042 machine_mode mode0 = insn_data[icode].operand[0].mode;
13043
13044 if (TARGET_32BIT)
13045 /* Builtin not supported in 32-bit mode. */
13046 fatal_error (input_location,
13047 "%<__builtin_set_fpscr_drn%> is not supported "
13048 "in 32-bit mode");
13049
13050 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13051 {
13052 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13053 return const0_rtx;
13054 }
13055
13056 if (icode == CODE_FOR_nothing)
13057 /* Builtin not supported on this processor. */
13058 return 0;
13059
13060 /* If we got invalid arguments bail out before generating bad rtl. */
13061 if (arg0 == error_mark_node)
13062 return const0_rtx;
13063
13064 /* If the argument is a constant, check the range. Agrument can only be a
13065 3-bit value. Unfortunately, can't check the range of the value at
13066 compile time if the argument is a variable. The least significant two
13067 bits of the argument, regardless of type, are used to set the rounding
13068 mode. All other bits are ignored. */
13069 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13070 {
13071 error ("Argument must be a value between 0 and 7.");
13072 return const0_rtx;
13073 }
13074
13075 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13076 op0 = copy_to_mode_reg (mode0, op0);
13077
13078 pat = GEN_FCN (icode) (op0);
13079 if (! pat)
13080 return const0_rtx;
13081 emit_insn (pat);
13082
13083 return NULL_RTX;
13084 }
13085
13086 static rtx
13087 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13088 {
13089 rtx pat;
13090 tree arg0 = CALL_EXPR_ARG (exp, 0);
13091 rtx op0 = expand_normal (arg0);
13092 machine_mode tmode = insn_data[icode].operand[0].mode;
13093 machine_mode mode0 = insn_data[icode].operand[1].mode;
13094
13095 if (icode == CODE_FOR_nothing)
13096 /* Builtin not supported on this processor. */
13097 return 0;
13098
13099 /* If we got invalid arguments bail out before generating bad rtl. */
13100 if (arg0 == error_mark_node)
13101 return const0_rtx;
13102
13103 if (icode == CODE_FOR_altivec_vspltisb
13104 || icode == CODE_FOR_altivec_vspltish
13105 || icode == CODE_FOR_altivec_vspltisw)
13106 {
13107 /* Only allow 5-bit *signed* literals. */
13108 if (!CONST_INT_P (op0)
13109 || INTVAL (op0) > 15
13110 || INTVAL (op0) < -16)
13111 {
13112 error ("argument 1 must be a 5-bit signed literal");
13113 return CONST0_RTX (tmode);
13114 }
13115 }
13116
13117 if (target == 0
13118 || GET_MODE (target) != tmode
13119 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13120 target = gen_reg_rtx (tmode);
13121
13122 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13123 op0 = copy_to_mode_reg (mode0, op0);
13124
13125 pat = GEN_FCN (icode) (target, op0);
13126 if (! pat)
13127 return 0;
13128 emit_insn (pat);
13129
13130 return target;
13131 }
13132
13133 static rtx
13134 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13135 {
13136 rtx pat, scratch1, scratch2;
13137 tree arg0 = CALL_EXPR_ARG (exp, 0);
13138 rtx op0 = expand_normal (arg0);
13139 machine_mode tmode = insn_data[icode].operand[0].mode;
13140 machine_mode mode0 = insn_data[icode].operand[1].mode;
13141
13142 /* If we have invalid arguments, bail out before generating bad rtl. */
13143 if (arg0 == error_mark_node)
13144 return const0_rtx;
13145
13146 if (target == 0
13147 || GET_MODE (target) != tmode
13148 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13149 target = gen_reg_rtx (tmode);
13150
13151 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13152 op0 = copy_to_mode_reg (mode0, op0);
13153
13154 scratch1 = gen_reg_rtx (mode0);
13155 scratch2 = gen_reg_rtx (mode0);
13156
13157 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13158 if (! pat)
13159 return 0;
13160 emit_insn (pat);
13161
13162 return target;
13163 }
13164
13165 static rtx
13166 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13167 {
13168 rtx pat;
13169 tree arg0 = CALL_EXPR_ARG (exp, 0);
13170 tree arg1 = CALL_EXPR_ARG (exp, 1);
13171 rtx op0 = expand_normal (arg0);
13172 rtx op1 = expand_normal (arg1);
13173 machine_mode tmode = insn_data[icode].operand[0].mode;
13174 machine_mode mode0 = insn_data[icode].operand[1].mode;
13175 machine_mode mode1 = insn_data[icode].operand[2].mode;
13176
13177 if (icode == CODE_FOR_nothing)
13178 /* Builtin not supported on this processor. */
13179 return 0;
13180
13181 /* If we got invalid arguments bail out before generating bad rtl. */
13182 if (arg0 == error_mark_node || arg1 == error_mark_node)
13183 return const0_rtx;
13184
13185 if (icode == CODE_FOR_unpackv1ti
13186 || icode == CODE_FOR_unpackkf
13187 || icode == CODE_FOR_unpacktf
13188 || icode == CODE_FOR_unpackif
13189 || icode == CODE_FOR_unpacktd)
13190 {
13191 /* Only allow 1-bit unsigned literals. */
13192 STRIP_NOPS (arg1);
13193 if (TREE_CODE (arg1) != INTEGER_CST
13194 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13195 {
13196 error ("argument 2 must be a 1-bit unsigned literal");
13197 return CONST0_RTX (tmode);
13198 }
13199 }
13200 else if (icode == CODE_FOR_altivec_vspltw)
13201 {
13202 /* Only allow 2-bit unsigned literals. */
13203 STRIP_NOPS (arg1);
13204 if (TREE_CODE (arg1) != INTEGER_CST
13205 || TREE_INT_CST_LOW (arg1) & ~3)
13206 {
13207 error ("argument 2 must be a 2-bit unsigned literal");
13208 return CONST0_RTX (tmode);
13209 }
13210 }
13211 else if (icode == CODE_FOR_altivec_vsplth)
13212 {
13213 /* Only allow 3-bit unsigned literals. */
13214 STRIP_NOPS (arg1);
13215 if (TREE_CODE (arg1) != INTEGER_CST
13216 || TREE_INT_CST_LOW (arg1) & ~7)
13217 {
13218 error ("argument 2 must be a 3-bit unsigned literal");
13219 return CONST0_RTX (tmode);
13220 }
13221 }
13222 else if (icode == CODE_FOR_altivec_vspltb)
13223 {
13224 /* Only allow 4-bit unsigned literals. */
13225 STRIP_NOPS (arg1);
13226 if (TREE_CODE (arg1) != INTEGER_CST
13227 || TREE_INT_CST_LOW (arg1) & ~15)
13228 {
13229 error ("argument 2 must be a 4-bit unsigned literal");
13230 return CONST0_RTX (tmode);
13231 }
13232 }
13233 else if (icode == CODE_FOR_altivec_vcfux
13234 || icode == CODE_FOR_altivec_vcfsx
13235 || icode == CODE_FOR_altivec_vctsxs
13236 || icode == CODE_FOR_altivec_vctuxs)
13237 {
13238 /* Only allow 5-bit unsigned literals. */
13239 STRIP_NOPS (arg1);
13240 if (TREE_CODE (arg1) != INTEGER_CST
13241 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13242 {
13243 error ("argument 2 must be a 5-bit unsigned literal");
13244 return CONST0_RTX (tmode);
13245 }
13246 }
13247 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13248 || icode == CODE_FOR_dfptstsfi_lt_dd
13249 || icode == CODE_FOR_dfptstsfi_gt_dd
13250 || icode == CODE_FOR_dfptstsfi_unordered_dd
13251 || icode == CODE_FOR_dfptstsfi_eq_td
13252 || icode == CODE_FOR_dfptstsfi_lt_td
13253 || icode == CODE_FOR_dfptstsfi_gt_td
13254 || icode == CODE_FOR_dfptstsfi_unordered_td)
13255 {
13256 /* Only allow 6-bit unsigned literals. */
13257 STRIP_NOPS (arg0);
13258 if (TREE_CODE (arg0) != INTEGER_CST
13259 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13260 {
13261 error ("argument 1 must be a 6-bit unsigned literal");
13262 return CONST0_RTX (tmode);
13263 }
13264 }
13265 else if (icode == CODE_FOR_xststdcqp_kf
13266 || icode == CODE_FOR_xststdcqp_tf
13267 || icode == CODE_FOR_xststdcdp
13268 || icode == CODE_FOR_xststdcsp
13269 || icode == CODE_FOR_xvtstdcdp
13270 || icode == CODE_FOR_xvtstdcsp)
13271 {
13272 /* Only allow 7-bit unsigned literals. */
13273 STRIP_NOPS (arg1);
13274 if (TREE_CODE (arg1) != INTEGER_CST
13275 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13276 {
13277 error ("argument 2 must be a 7-bit unsigned literal");
13278 return CONST0_RTX (tmode);
13279 }
13280 }
13281
13282 if (target == 0
13283 || GET_MODE (target) != tmode
13284 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13285 target = gen_reg_rtx (tmode);
13286
13287 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13288 op0 = copy_to_mode_reg (mode0, op0);
13289 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13290 op1 = copy_to_mode_reg (mode1, op1);
13291
13292 pat = GEN_FCN (icode) (target, op0, op1);
13293 if (! pat)
13294 return 0;
13295 emit_insn (pat);
13296
13297 return target;
13298 }
13299
13300 static rtx
13301 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13302 {
13303 rtx pat, scratch;
13304 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13305 tree arg0 = CALL_EXPR_ARG (exp, 1);
13306 tree arg1 = CALL_EXPR_ARG (exp, 2);
13307 rtx op0 = expand_normal (arg0);
13308 rtx op1 = expand_normal (arg1);
13309 machine_mode tmode = SImode;
13310 machine_mode mode0 = insn_data[icode].operand[1].mode;
13311 machine_mode mode1 = insn_data[icode].operand[2].mode;
13312 int cr6_form_int;
13313
13314 if (TREE_CODE (cr6_form) != INTEGER_CST)
13315 {
13316 error ("argument 1 of %qs must be a constant",
13317 "__builtin_altivec_predicate");
13318 return const0_rtx;
13319 }
13320 else
13321 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13322
13323 gcc_assert (mode0 == mode1);
13324
13325 /* If we have invalid arguments, bail out before generating bad rtl. */
13326 if (arg0 == error_mark_node || arg1 == error_mark_node)
13327 return const0_rtx;
13328
13329 if (target == 0
13330 || GET_MODE (target) != tmode
13331 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13332 target = gen_reg_rtx (tmode);
13333
13334 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13335 op0 = copy_to_mode_reg (mode0, op0);
13336 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13337 op1 = copy_to_mode_reg (mode1, op1);
13338
13339 /* Note that for many of the relevant operations (e.g. cmpne or
13340 cmpeq) with float or double operands, it makes more sense for the
13341 mode of the allocated scratch register to select a vector of
13342 integer. But the choice to copy the mode of operand 0 was made
13343 long ago and there are no plans to change it. */
13344 scratch = gen_reg_rtx (mode0);
13345
13346 pat = GEN_FCN (icode) (scratch, op0, op1);
13347 if (! pat)
13348 return 0;
13349 emit_insn (pat);
13350
13351 /* The vec_any* and vec_all* predicates use the same opcodes for two
13352 different operations, but the bits in CR6 will be different
13353 depending on what information we want. So we have to play tricks
13354 with CR6 to get the right bits out.
13355
13356 If you think this is disgusting, look at the specs for the
13357 AltiVec predicates. */
13358
13359 switch (cr6_form_int)
13360 {
13361 case 0:
13362 emit_insn (gen_cr6_test_for_zero (target));
13363 break;
13364 case 1:
13365 emit_insn (gen_cr6_test_for_zero_reverse (target));
13366 break;
13367 case 2:
13368 emit_insn (gen_cr6_test_for_lt (target));
13369 break;
13370 case 3:
13371 emit_insn (gen_cr6_test_for_lt_reverse (target));
13372 break;
13373 default:
13374 error ("argument 1 of %qs is out of range",
13375 "__builtin_altivec_predicate");
13376 break;
13377 }
13378
13379 return target;
13380 }
13381
13382 rtx
13383 swap_endian_selector_for_mode (machine_mode mode)
13384 {
13385 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13386 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13387 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13388 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13389
13390 unsigned int *swaparray, i;
13391 rtx perm[16];
13392
13393 switch (mode)
13394 {
13395 case E_V1TImode:
13396 swaparray = swap1;
13397 break;
13398 case E_V2DFmode:
13399 case E_V2DImode:
13400 swaparray = swap2;
13401 break;
13402 case E_V4SFmode:
13403 case E_V4SImode:
13404 swaparray = swap4;
13405 break;
13406 case E_V8HImode:
13407 swaparray = swap8;
13408 break;
13409 default:
13410 gcc_unreachable ();
13411 }
13412
13413 for (i = 0; i < 16; ++i)
13414 perm[i] = GEN_INT (swaparray[i]);
13415
13416 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13417 gen_rtvec_v (16, perm)));
13418 }
13419
13420 static rtx
13421 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13422 {
13423 rtx pat, addr;
13424 tree arg0 = CALL_EXPR_ARG (exp, 0);
13425 tree arg1 = CALL_EXPR_ARG (exp, 1);
13426 machine_mode tmode = insn_data[icode].operand[0].mode;
13427 machine_mode mode0 = Pmode;
13428 machine_mode mode1 = Pmode;
13429 rtx op0 = expand_normal (arg0);
13430 rtx op1 = expand_normal (arg1);
13431
13432 if (icode == CODE_FOR_nothing)
13433 /* Builtin not supported on this processor. */
13434 return 0;
13435
13436 /* If we got invalid arguments bail out before generating bad rtl. */
13437 if (arg0 == error_mark_node || arg1 == error_mark_node)
13438 return const0_rtx;
13439
13440 if (target == 0
13441 || GET_MODE (target) != tmode
13442 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13443 target = gen_reg_rtx (tmode);
13444
13445 op1 = copy_to_mode_reg (mode1, op1);
13446
13447 /* For LVX, express the RTL accurately by ANDing the address with -16.
13448 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13449 so the raw address is fine. */
13450 if (icode == CODE_FOR_altivec_lvx_v1ti
13451 || icode == CODE_FOR_altivec_lvx_v2df
13452 || icode == CODE_FOR_altivec_lvx_v2di
13453 || icode == CODE_FOR_altivec_lvx_v4sf
13454 || icode == CODE_FOR_altivec_lvx_v4si
13455 || icode == CODE_FOR_altivec_lvx_v8hi
13456 || icode == CODE_FOR_altivec_lvx_v16qi)
13457 {
13458 rtx rawaddr;
13459 if (op0 == const0_rtx)
13460 rawaddr = op1;
13461 else
13462 {
13463 op0 = copy_to_mode_reg (mode0, op0);
13464 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13465 }
13466 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13467 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13468
13469 emit_insn (gen_rtx_SET (target, addr));
13470 }
13471 else
13472 {
13473 if (op0 == const0_rtx)
13474 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13475 else
13476 {
13477 op0 = copy_to_mode_reg (mode0, op0);
13478 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13479 gen_rtx_PLUS (Pmode, op1, op0));
13480 }
13481
13482 pat = GEN_FCN (icode) (target, addr);
13483 if (! pat)
13484 return 0;
13485 emit_insn (pat);
13486 }
13487
13488 return target;
13489 }
13490
13491 static rtx
13492 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13493 {
13494 rtx pat;
13495 tree arg0 = CALL_EXPR_ARG (exp, 0);
13496 tree arg1 = CALL_EXPR_ARG (exp, 1);
13497 tree arg2 = CALL_EXPR_ARG (exp, 2);
13498 rtx op0 = expand_normal (arg0);
13499 rtx op1 = expand_normal (arg1);
13500 rtx op2 = expand_normal (arg2);
13501 machine_mode mode0 = insn_data[icode].operand[0].mode;
13502 machine_mode mode1 = insn_data[icode].operand[1].mode;
13503 machine_mode mode2 = insn_data[icode].operand[2].mode;
13504
13505 if (icode == CODE_FOR_nothing)
13506 /* Builtin not supported on this processor. */
13507 return NULL_RTX;
13508
13509 /* If we got invalid arguments bail out before generating bad rtl. */
13510 if (arg0 == error_mark_node
13511 || arg1 == error_mark_node
13512 || arg2 == error_mark_node)
13513 return NULL_RTX;
13514
13515 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13516 op0 = copy_to_mode_reg (mode0, op0);
13517 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13518 op1 = copy_to_mode_reg (mode1, op1);
13519 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13520 op2 = copy_to_mode_reg (mode2, op2);
13521
13522 pat = GEN_FCN (icode) (op0, op1, op2);
13523 if (pat)
13524 emit_insn (pat);
13525
13526 return NULL_RTX;
13527 }
13528
13529 static rtx
13530 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13531 {
13532 tree arg0 = CALL_EXPR_ARG (exp, 0);
13533 tree arg1 = CALL_EXPR_ARG (exp, 1);
13534 tree arg2 = CALL_EXPR_ARG (exp, 2);
13535 rtx op0 = expand_normal (arg0);
13536 rtx op1 = expand_normal (arg1);
13537 rtx op2 = expand_normal (arg2);
13538 rtx pat, addr, rawaddr;
13539 machine_mode tmode = insn_data[icode].operand[0].mode;
13540 machine_mode smode = insn_data[icode].operand[1].mode;
13541 machine_mode mode1 = Pmode;
13542 machine_mode mode2 = Pmode;
13543
13544 /* Invalid arguments. Bail before doing anything stoopid! */
13545 if (arg0 == error_mark_node
13546 || arg1 == error_mark_node
13547 || arg2 == error_mark_node)
13548 return const0_rtx;
13549
13550 op2 = copy_to_mode_reg (mode2, op2);
13551
13552 /* For STVX, express the RTL accurately by ANDing the address with -16.
13553 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13554 so the raw address is fine. */
13555 if (icode == CODE_FOR_altivec_stvx_v2df
13556 || icode == CODE_FOR_altivec_stvx_v2di
13557 || icode == CODE_FOR_altivec_stvx_v4sf
13558 || icode == CODE_FOR_altivec_stvx_v4si
13559 || icode == CODE_FOR_altivec_stvx_v8hi
13560 || icode == CODE_FOR_altivec_stvx_v16qi)
13561 {
13562 if (op1 == const0_rtx)
13563 rawaddr = op2;
13564 else
13565 {
13566 op1 = copy_to_mode_reg (mode1, op1);
13567 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13568 }
13569
13570 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13571 addr = gen_rtx_MEM (tmode, addr);
13572
13573 op0 = copy_to_mode_reg (tmode, op0);
13574
13575 emit_insn (gen_rtx_SET (addr, op0));
13576 }
13577 else
13578 {
13579 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13580 op0 = copy_to_mode_reg (smode, op0);
13581
13582 if (op1 == const0_rtx)
13583 addr = gen_rtx_MEM (tmode, op2);
13584 else
13585 {
13586 op1 = copy_to_mode_reg (mode1, op1);
13587 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13588 }
13589
13590 pat = GEN_FCN (icode) (addr, op0);
13591 if (pat)
13592 emit_insn (pat);
13593 }
13594
13595 return NULL_RTX;
13596 }
13597
13598 /* Return the appropriate SPR number associated with the given builtin. */
13599 static inline HOST_WIDE_INT
13600 htm_spr_num (enum rs6000_builtins code)
13601 {
13602 if (code == HTM_BUILTIN_GET_TFHAR
13603 || code == HTM_BUILTIN_SET_TFHAR)
13604 return TFHAR_SPR;
13605 else if (code == HTM_BUILTIN_GET_TFIAR
13606 || code == HTM_BUILTIN_SET_TFIAR)
13607 return TFIAR_SPR;
13608 else if (code == HTM_BUILTIN_GET_TEXASR
13609 || code == HTM_BUILTIN_SET_TEXASR)
13610 return TEXASR_SPR;
13611 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13612 || code == HTM_BUILTIN_SET_TEXASRU);
13613 return TEXASRU_SPR;
13614 }
13615
13616 /* Return the correct ICODE value depending on whether we are
13617 setting or reading the HTM SPRs. */
13618 static inline enum insn_code
13619 rs6000_htm_spr_icode (bool nonvoid)
13620 {
13621 if (nonvoid)
13622 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13623 else
13624 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13625 }
13626
13627 /* Expand the HTM builtin in EXP and store the result in TARGET.
13628 Store true in *EXPANDEDP if we found a builtin to expand. */
13629 static rtx
13630 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13631 {
13632 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13633 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13634 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13635 const struct builtin_description *d;
13636 size_t i;
13637
13638 *expandedp = true;
13639
13640 if (!TARGET_POWERPC64
13641 && (fcode == HTM_BUILTIN_TABORTDC
13642 || fcode == HTM_BUILTIN_TABORTDCI))
13643 {
13644 size_t uns_fcode = (size_t)fcode;
13645 const char *name = rs6000_builtin_info[uns_fcode].name;
13646 error ("builtin %qs is only valid in 64-bit mode", name);
13647 return const0_rtx;
13648 }
13649
13650 /* Expand the HTM builtins. */
13651 d = bdesc_htm;
13652 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13653 if (d->code == fcode)
13654 {
13655 rtx op[MAX_HTM_OPERANDS], pat;
13656 int nopnds = 0;
13657 tree arg;
13658 call_expr_arg_iterator iter;
13659 unsigned attr = rs6000_builtin_info[fcode].attr;
13660 enum insn_code icode = d->icode;
13661 const struct insn_operand_data *insn_op;
13662 bool uses_spr = (attr & RS6000_BTC_SPR);
13663 rtx cr = NULL_RTX;
13664
13665 if (uses_spr)
13666 icode = rs6000_htm_spr_icode (nonvoid);
13667 insn_op = &insn_data[icode].operand[0];
13668
13669 if (nonvoid)
13670 {
13671 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13672 if (!target
13673 || GET_MODE (target) != tmode
13674 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13675 target = gen_reg_rtx (tmode);
13676 if (uses_spr)
13677 op[nopnds++] = target;
13678 }
13679
13680 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13681 {
13682 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13683 return const0_rtx;
13684
13685 insn_op = &insn_data[icode].operand[nopnds];
13686
13687 op[nopnds] = expand_normal (arg);
13688
13689 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13690 {
13691 if (!strcmp (insn_op->constraint, "n"))
13692 {
13693 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13694 if (!CONST_INT_P (op[nopnds]))
13695 error ("argument %d must be an unsigned literal", arg_num);
13696 else
13697 error ("argument %d is an unsigned literal that is "
13698 "out of range", arg_num);
13699 return const0_rtx;
13700 }
13701 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13702 }
13703
13704 nopnds++;
13705 }
13706
13707 /* Handle the builtins for extended mnemonics. These accept
13708 no arguments, but map to builtins that take arguments. */
13709 switch (fcode)
13710 {
13711 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13712 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13713 op[nopnds++] = GEN_INT (1);
13714 if (flag_checking)
13715 attr |= RS6000_BTC_UNARY;
13716 break;
13717 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13718 op[nopnds++] = GEN_INT (0);
13719 if (flag_checking)
13720 attr |= RS6000_BTC_UNARY;
13721 break;
13722 default:
13723 break;
13724 }
13725
13726 /* If this builtin accesses SPRs, then pass in the appropriate
13727 SPR number and SPR regno as the last two operands. */
13728 if (uses_spr)
13729 {
13730 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13731 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13732 }
13733 /* If this builtin accesses a CR, then pass in a scratch
13734 CR as the last operand. */
13735 else if (attr & RS6000_BTC_CR)
13736 { cr = gen_reg_rtx (CCmode);
13737 op[nopnds++] = cr;
13738 }
13739
13740 if (flag_checking)
13741 {
13742 int expected_nopnds = 0;
13743 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13744 expected_nopnds = 1;
13745 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13746 expected_nopnds = 2;
13747 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13748 expected_nopnds = 3;
13749 if (!(attr & RS6000_BTC_VOID))
13750 expected_nopnds += 1;
13751 if (uses_spr)
13752 expected_nopnds += 1;
13753
13754 gcc_assert (nopnds == expected_nopnds
13755 && nopnds <= MAX_HTM_OPERANDS);
13756 }
13757
13758 switch (nopnds)
13759 {
13760 case 1:
13761 pat = GEN_FCN (icode) (op[0]);
13762 break;
13763 case 2:
13764 pat = GEN_FCN (icode) (op[0], op[1]);
13765 break;
13766 case 3:
13767 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13768 break;
13769 case 4:
13770 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13771 break;
13772 default:
13773 gcc_unreachable ();
13774 }
13775 if (!pat)
13776 return NULL_RTX;
13777 emit_insn (pat);
13778
13779 if (attr & RS6000_BTC_CR)
13780 {
13781 if (fcode == HTM_BUILTIN_TBEGIN)
13782 {
13783 /* Emit code to set TARGET to true or false depending on
13784 whether the tbegin. instruction successfully or failed
13785 to start a transaction. We do this by placing the 1's
13786 complement of CR's EQ bit into TARGET. */
13787 rtx scratch = gen_reg_rtx (SImode);
13788 emit_insn (gen_rtx_SET (scratch,
13789 gen_rtx_EQ (SImode, cr,
13790 const0_rtx)));
13791 emit_insn (gen_rtx_SET (target,
13792 gen_rtx_XOR (SImode, scratch,
13793 GEN_INT (1))));
13794 }
13795 else
13796 {
13797 /* Emit code to copy the 4-bit condition register field
13798 CR into the least significant end of register TARGET. */
13799 rtx scratch1 = gen_reg_rtx (SImode);
13800 rtx scratch2 = gen_reg_rtx (SImode);
13801 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13802 emit_insn (gen_movcc (subreg, cr));
13803 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13804 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13805 }
13806 }
13807
13808 if (nonvoid)
13809 return target;
13810 return const0_rtx;
13811 }
13812
13813 *expandedp = false;
13814 return NULL_RTX;
13815 }
13816
13817 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
13818
13819 static rtx
13820 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
13821 rtx target)
13822 {
13823 /* __builtin_cpu_init () is a nop, so expand to nothing. */
13824 if (fcode == RS6000_BUILTIN_CPU_INIT)
13825 return const0_rtx;
13826
13827 if (target == 0 || GET_MODE (target) != SImode)
13828 target = gen_reg_rtx (SImode);
13829
13830 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
13831 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
13832 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
13833 to a STRING_CST. */
13834 if (TREE_CODE (arg) == ARRAY_REF
13835 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
13836 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
13837 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
13838 arg = TREE_OPERAND (arg, 0);
13839
13840 if (TREE_CODE (arg) != STRING_CST)
13841 {
13842 error ("builtin %qs only accepts a string argument",
13843 rs6000_builtin_info[(size_t) fcode].name);
13844 return const0_rtx;
13845 }
13846
13847 if (fcode == RS6000_BUILTIN_CPU_IS)
13848 {
13849 const char *cpu = TREE_STRING_POINTER (arg);
13850 rtx cpuid = NULL_RTX;
13851 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
13852 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
13853 {
13854 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
13855 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
13856 break;
13857 }
13858 if (cpuid == NULL_RTX)
13859 {
13860 /* Invalid CPU argument. */
13861 error ("cpu %qs is an invalid argument to builtin %qs",
13862 cpu, rs6000_builtin_info[(size_t) fcode].name);
13863 return const0_rtx;
13864 }
13865
13866 rtx platform = gen_reg_rtx (SImode);
13867 rtx tcbmem = gen_const_mem (SImode,
13868 gen_rtx_PLUS (Pmode,
13869 gen_rtx_REG (Pmode, TLS_REGNUM),
13870 GEN_INT (TCB_PLATFORM_OFFSET)));
13871 emit_move_insn (platform, tcbmem);
13872 emit_insn (gen_eqsi3 (target, platform, cpuid));
13873 }
13874 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
13875 {
13876 const char *hwcap = TREE_STRING_POINTER (arg);
13877 rtx mask = NULL_RTX;
13878 int hwcap_offset;
13879 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
13880 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
13881 {
13882 mask = GEN_INT (cpu_supports_info[i].mask);
13883 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
13884 break;
13885 }
13886 if (mask == NULL_RTX)
13887 {
13888 /* Invalid HWCAP argument. */
13889 error ("%s %qs is an invalid argument to builtin %qs",
13890 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
13891 return const0_rtx;
13892 }
13893
13894 rtx tcb_hwcap = gen_reg_rtx (SImode);
13895 rtx tcbmem = gen_const_mem (SImode,
13896 gen_rtx_PLUS (Pmode,
13897 gen_rtx_REG (Pmode, TLS_REGNUM),
13898 GEN_INT (hwcap_offset)));
13899 emit_move_insn (tcb_hwcap, tcbmem);
13900 rtx scratch1 = gen_reg_rtx (SImode);
13901 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
13902 rtx scratch2 = gen_reg_rtx (SImode);
13903 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
13904 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
13905 }
13906 else
13907 gcc_unreachable ();
13908
13909 /* Record that we have expanded a CPU builtin, so that we can later
13910 emit a reference to the special symbol exported by LIBC to ensure we
13911 do not link against an old LIBC that doesn't support this feature. */
13912 cpu_builtin_p = true;
13913
13914 #else
13915 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
13916 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
13917
13918 /* For old LIBCs, always return FALSE. */
13919 emit_move_insn (target, GEN_INT (0));
13920 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
13921
13922 return target;
13923 }
13924
13925 static rtx
13926 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
13927 {
13928 rtx pat;
13929 tree arg0 = CALL_EXPR_ARG (exp, 0);
13930 tree arg1 = CALL_EXPR_ARG (exp, 1);
13931 tree arg2 = CALL_EXPR_ARG (exp, 2);
13932 rtx op0 = expand_normal (arg0);
13933 rtx op1 = expand_normal (arg1);
13934 rtx op2 = expand_normal (arg2);
13935 machine_mode tmode = insn_data[icode].operand[0].mode;
13936 machine_mode mode0 = insn_data[icode].operand[1].mode;
13937 machine_mode mode1 = insn_data[icode].operand[2].mode;
13938 machine_mode mode2 = insn_data[icode].operand[3].mode;
13939
13940 if (icode == CODE_FOR_nothing)
13941 /* Builtin not supported on this processor. */
13942 return 0;
13943
13944 /* If we got invalid arguments bail out before generating bad rtl. */
13945 if (arg0 == error_mark_node
13946 || arg1 == error_mark_node
13947 || arg2 == error_mark_node)
13948 return const0_rtx;
13949
13950 /* Check and prepare argument depending on the instruction code.
13951
13952 Note that a switch statement instead of the sequence of tests
13953 would be incorrect as many of the CODE_FOR values could be
13954 CODE_FOR_nothing and that would yield multiple alternatives
13955 with identical values. We'd never reach here at runtime in
13956 this case. */
13957 if (icode == CODE_FOR_altivec_vsldoi_v4sf
13958 || icode == CODE_FOR_altivec_vsldoi_v2df
13959 || icode == CODE_FOR_altivec_vsldoi_v4si
13960 || icode == CODE_FOR_altivec_vsldoi_v8hi
13961 || icode == CODE_FOR_altivec_vsldoi_v16qi)
13962 {
13963 /* Only allow 4-bit unsigned literals. */
13964 STRIP_NOPS (arg2);
13965 if (TREE_CODE (arg2) != INTEGER_CST
13966 || TREE_INT_CST_LOW (arg2) & ~0xf)
13967 {
13968 error ("argument 3 must be a 4-bit unsigned literal");
13969 return CONST0_RTX (tmode);
13970 }
13971 }
13972 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
13973 || icode == CODE_FOR_vsx_xxpermdi_v2di
13974 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
13975 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
13976 || icode == CODE_FOR_vsx_xxpermdi_v1ti
13977 || icode == CODE_FOR_vsx_xxpermdi_v4sf
13978 || icode == CODE_FOR_vsx_xxpermdi_v4si
13979 || icode == CODE_FOR_vsx_xxpermdi_v8hi
13980 || icode == CODE_FOR_vsx_xxpermdi_v16qi
13981 || icode == CODE_FOR_vsx_xxsldwi_v16qi
13982 || icode == CODE_FOR_vsx_xxsldwi_v8hi
13983 || icode == CODE_FOR_vsx_xxsldwi_v4si
13984 || icode == CODE_FOR_vsx_xxsldwi_v4sf
13985 || icode == CODE_FOR_vsx_xxsldwi_v2di
13986 || icode == CODE_FOR_vsx_xxsldwi_v2df)
13987 {
13988 /* Only allow 2-bit unsigned literals. */
13989 STRIP_NOPS (arg2);
13990 if (TREE_CODE (arg2) != INTEGER_CST
13991 || TREE_INT_CST_LOW (arg2) & ~0x3)
13992 {
13993 error ("argument 3 must be a 2-bit unsigned literal");
13994 return CONST0_RTX (tmode);
13995 }
13996 }
13997 else if (icode == CODE_FOR_vsx_set_v2df
13998 || icode == CODE_FOR_vsx_set_v2di
13999 || icode == CODE_FOR_bcdadd
14000 || icode == CODE_FOR_bcdadd_lt
14001 || icode == CODE_FOR_bcdadd_eq
14002 || icode == CODE_FOR_bcdadd_gt
14003 || icode == CODE_FOR_bcdsub
14004 || icode == CODE_FOR_bcdsub_lt
14005 || icode == CODE_FOR_bcdsub_eq
14006 || icode == CODE_FOR_bcdsub_gt)
14007 {
14008 /* Only allow 1-bit unsigned literals. */
14009 STRIP_NOPS (arg2);
14010 if (TREE_CODE (arg2) != INTEGER_CST
14011 || TREE_INT_CST_LOW (arg2) & ~0x1)
14012 {
14013 error ("argument 3 must be a 1-bit unsigned literal");
14014 return CONST0_RTX (tmode);
14015 }
14016 }
14017 else if (icode == CODE_FOR_dfp_ddedpd_dd
14018 || icode == CODE_FOR_dfp_ddedpd_td)
14019 {
14020 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14021 STRIP_NOPS (arg0);
14022 if (TREE_CODE (arg0) != INTEGER_CST
14023 || TREE_INT_CST_LOW (arg2) & ~0x3)
14024 {
14025 error ("argument 1 must be 0 or 2");
14026 return CONST0_RTX (tmode);
14027 }
14028 }
14029 else if (icode == CODE_FOR_dfp_denbcd_dd
14030 || icode == CODE_FOR_dfp_denbcd_td)
14031 {
14032 /* Only allow 1-bit unsigned literals. */
14033 STRIP_NOPS (arg0);
14034 if (TREE_CODE (arg0) != INTEGER_CST
14035 || TREE_INT_CST_LOW (arg0) & ~0x1)
14036 {
14037 error ("argument 1 must be a 1-bit unsigned literal");
14038 return CONST0_RTX (tmode);
14039 }
14040 }
14041 else if (icode == CODE_FOR_dfp_dscli_dd
14042 || icode == CODE_FOR_dfp_dscli_td
14043 || icode == CODE_FOR_dfp_dscri_dd
14044 || icode == CODE_FOR_dfp_dscri_td)
14045 {
14046 /* Only allow 6-bit unsigned literals. */
14047 STRIP_NOPS (arg1);
14048 if (TREE_CODE (arg1) != INTEGER_CST
14049 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14050 {
14051 error ("argument 2 must be a 6-bit unsigned literal");
14052 return CONST0_RTX (tmode);
14053 }
14054 }
14055 else if (icode == CODE_FOR_crypto_vshasigmaw
14056 || icode == CODE_FOR_crypto_vshasigmad)
14057 {
14058 /* Check whether the 2nd and 3rd arguments are integer constants and in
14059 range and prepare arguments. */
14060 STRIP_NOPS (arg1);
14061 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14062 {
14063 error ("argument 2 must be 0 or 1");
14064 return CONST0_RTX (tmode);
14065 }
14066
14067 STRIP_NOPS (arg2);
14068 if (TREE_CODE (arg2) != INTEGER_CST
14069 || wi::geu_p (wi::to_wide (arg2), 16))
14070 {
14071 error ("argument 3 must be in the range [0, 15]");
14072 return CONST0_RTX (tmode);
14073 }
14074 }
14075
14076 if (target == 0
14077 || GET_MODE (target) != tmode
14078 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14079 target = gen_reg_rtx (tmode);
14080
14081 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14082 op0 = copy_to_mode_reg (mode0, op0);
14083 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14084 op1 = copy_to_mode_reg (mode1, op1);
14085 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14086 op2 = copy_to_mode_reg (mode2, op2);
14087
14088 pat = GEN_FCN (icode) (target, op0, op1, op2);
14089 if (! pat)
14090 return 0;
14091 emit_insn (pat);
14092
14093 return target;
14094 }
14095
14096
14097 /* Expand the dst builtins. */
14098 static rtx
14099 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14100 bool *expandedp)
14101 {
14102 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14103 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14104 tree arg0, arg1, arg2;
14105 machine_mode mode0, mode1;
14106 rtx pat, op0, op1, op2;
14107 const struct builtin_description *d;
14108 size_t i;
14109
14110 *expandedp = false;
14111
14112 /* Handle DST variants. */
14113 d = bdesc_dst;
14114 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14115 if (d->code == fcode)
14116 {
14117 arg0 = CALL_EXPR_ARG (exp, 0);
14118 arg1 = CALL_EXPR_ARG (exp, 1);
14119 arg2 = CALL_EXPR_ARG (exp, 2);
14120 op0 = expand_normal (arg0);
14121 op1 = expand_normal (arg1);
14122 op2 = expand_normal (arg2);
14123 mode0 = insn_data[d->icode].operand[0].mode;
14124 mode1 = insn_data[d->icode].operand[1].mode;
14125
14126 /* Invalid arguments, bail out before generating bad rtl. */
14127 if (arg0 == error_mark_node
14128 || arg1 == error_mark_node
14129 || arg2 == error_mark_node)
14130 return const0_rtx;
14131
14132 *expandedp = true;
14133 STRIP_NOPS (arg2);
14134 if (TREE_CODE (arg2) != INTEGER_CST
14135 || TREE_INT_CST_LOW (arg2) & ~0x3)
14136 {
14137 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14138 return const0_rtx;
14139 }
14140
14141 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14142 op0 = copy_to_mode_reg (Pmode, op0);
14143 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14144 op1 = copy_to_mode_reg (mode1, op1);
14145
14146 pat = GEN_FCN (d->icode) (op0, op1, op2);
14147 if (pat != 0)
14148 emit_insn (pat);
14149
14150 return NULL_RTX;
14151 }
14152
14153 return NULL_RTX;
14154 }
14155
14156 /* Expand vec_init builtin. */
14157 static rtx
14158 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14159 {
14160 machine_mode tmode = TYPE_MODE (type);
14161 machine_mode inner_mode = GET_MODE_INNER (tmode);
14162 int i, n_elt = GET_MODE_NUNITS (tmode);
14163
14164 gcc_assert (VECTOR_MODE_P (tmode));
14165 gcc_assert (n_elt == call_expr_nargs (exp));
14166
14167 if (!target || !register_operand (target, tmode))
14168 target = gen_reg_rtx (tmode);
14169
14170 /* If we have a vector compromised of a single element, such as V1TImode, do
14171 the initialization directly. */
14172 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14173 {
14174 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14175 emit_move_insn (target, gen_lowpart (tmode, x));
14176 }
14177 else
14178 {
14179 rtvec v = rtvec_alloc (n_elt);
14180
14181 for (i = 0; i < n_elt; ++i)
14182 {
14183 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14184 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14185 }
14186
14187 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14188 }
14189
14190 return target;
14191 }
14192
14193 /* Return the integer constant in ARG. Constrain it to be in the range
14194 of the subparts of VEC_TYPE; issue an error if not. */
14195
14196 static int
14197 get_element_number (tree vec_type, tree arg)
14198 {
14199 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14200
14201 if (!tree_fits_uhwi_p (arg)
14202 || (elt = tree_to_uhwi (arg), elt > max))
14203 {
14204 error ("selector must be an integer constant in the range [0, %wi]", max);
14205 return 0;
14206 }
14207
14208 return elt;
14209 }
14210
14211 /* Expand vec_set builtin. */
14212 static rtx
14213 altivec_expand_vec_set_builtin (tree exp)
14214 {
14215 machine_mode tmode, mode1;
14216 tree arg0, arg1, arg2;
14217 int elt;
14218 rtx op0, op1;
14219
14220 arg0 = CALL_EXPR_ARG (exp, 0);
14221 arg1 = CALL_EXPR_ARG (exp, 1);
14222 arg2 = CALL_EXPR_ARG (exp, 2);
14223
14224 tmode = TYPE_MODE (TREE_TYPE (arg0));
14225 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14226 gcc_assert (VECTOR_MODE_P (tmode));
14227
14228 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14229 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14230 elt = get_element_number (TREE_TYPE (arg0), arg2);
14231
14232 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14233 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14234
14235 op0 = force_reg (tmode, op0);
14236 op1 = force_reg (mode1, op1);
14237
14238 rs6000_expand_vector_set (op0, op1, elt);
14239
14240 return op0;
14241 }
14242
14243 /* Expand vec_ext builtin. */
14244 static rtx
14245 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14246 {
14247 machine_mode tmode, mode0;
14248 tree arg0, arg1;
14249 rtx op0;
14250 rtx op1;
14251
14252 arg0 = CALL_EXPR_ARG (exp, 0);
14253 arg1 = CALL_EXPR_ARG (exp, 1);
14254
14255 op0 = expand_normal (arg0);
14256 op1 = expand_normal (arg1);
14257
14258 if (TREE_CODE (arg1) == INTEGER_CST)
14259 {
14260 unsigned HOST_WIDE_INT elt;
14261 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14262 unsigned int truncated_selector;
14263 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14264 returns low-order bits of INTEGER_CST for modulo indexing. */
14265 elt = TREE_INT_CST_LOW (arg1);
14266 truncated_selector = elt % size;
14267 op1 = GEN_INT (truncated_selector);
14268 }
14269
14270 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14271 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14272 gcc_assert (VECTOR_MODE_P (mode0));
14273
14274 op0 = force_reg (mode0, op0);
14275
14276 if (optimize || !target || !register_operand (target, tmode))
14277 target = gen_reg_rtx (tmode);
14278
14279 rs6000_expand_vector_extract (target, op0, op1);
14280
14281 return target;
14282 }
14283
14284 /* Expand the builtin in EXP and store the result in TARGET. Store
14285 true in *EXPANDEDP if we found a builtin to expand. */
14286 static rtx
14287 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14288 {
14289 const struct builtin_description *d;
14290 size_t i;
14291 enum insn_code icode;
14292 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14293 tree arg0, arg1, arg2;
14294 rtx op0, pat;
14295 machine_mode tmode, mode0;
14296 enum rs6000_builtins fcode
14297 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14298
14299 if (rs6000_overloaded_builtin_p (fcode))
14300 {
14301 *expandedp = true;
14302 error ("unresolved overload for Altivec builtin %qF", fndecl);
14303
14304 /* Given it is invalid, just generate a normal call. */
14305 return expand_call (exp, target, false);
14306 }
14307
14308 target = altivec_expand_dst_builtin (exp, target, expandedp);
14309 if (*expandedp)
14310 return target;
14311
14312 *expandedp = true;
14313
14314 switch (fcode)
14315 {
14316 case ALTIVEC_BUILTIN_STVX_V2DF:
14317 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14318 case ALTIVEC_BUILTIN_STVX_V2DI:
14319 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14320 case ALTIVEC_BUILTIN_STVX_V4SF:
14321 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14322 case ALTIVEC_BUILTIN_STVX:
14323 case ALTIVEC_BUILTIN_STVX_V4SI:
14324 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14325 case ALTIVEC_BUILTIN_STVX_V8HI:
14326 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14327 case ALTIVEC_BUILTIN_STVX_V16QI:
14328 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14329 case ALTIVEC_BUILTIN_STVEBX:
14330 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14331 case ALTIVEC_BUILTIN_STVEHX:
14332 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14333 case ALTIVEC_BUILTIN_STVEWX:
14334 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14335 case ALTIVEC_BUILTIN_STVXL_V2DF:
14336 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14337 case ALTIVEC_BUILTIN_STVXL_V2DI:
14338 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14339 case ALTIVEC_BUILTIN_STVXL_V4SF:
14340 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14341 case ALTIVEC_BUILTIN_STVXL:
14342 case ALTIVEC_BUILTIN_STVXL_V4SI:
14343 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14344 case ALTIVEC_BUILTIN_STVXL_V8HI:
14345 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14346 case ALTIVEC_BUILTIN_STVXL_V16QI:
14347 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14348
14349 case ALTIVEC_BUILTIN_STVLX:
14350 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14351 case ALTIVEC_BUILTIN_STVLXL:
14352 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14353 case ALTIVEC_BUILTIN_STVRX:
14354 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14355 case ALTIVEC_BUILTIN_STVRXL:
14356 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14357
14358 case P9V_BUILTIN_STXVL:
14359 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14360
14361 case P9V_BUILTIN_XST_LEN_R:
14362 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14363
14364 case VSX_BUILTIN_STXVD2X_V1TI:
14365 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14366 case VSX_BUILTIN_STXVD2X_V2DF:
14367 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14368 case VSX_BUILTIN_STXVD2X_V2DI:
14369 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14370 case VSX_BUILTIN_STXVW4X_V4SF:
14371 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14372 case VSX_BUILTIN_STXVW4X_V4SI:
14373 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14374 case VSX_BUILTIN_STXVW4X_V8HI:
14375 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14376 case VSX_BUILTIN_STXVW4X_V16QI:
14377 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14378
14379 /* For the following on big endian, it's ok to use any appropriate
14380 unaligned-supporting store, so use a generic expander. For
14381 little-endian, the exact element-reversing instruction must
14382 be used. */
14383 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14384 {
14385 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14386 : CODE_FOR_vsx_st_elemrev_v1ti);
14387 return altivec_expand_stv_builtin (code, exp);
14388 }
14389 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14390 {
14391 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14392 : CODE_FOR_vsx_st_elemrev_v2df);
14393 return altivec_expand_stv_builtin (code, exp);
14394 }
14395 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14396 {
14397 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14398 : CODE_FOR_vsx_st_elemrev_v2di);
14399 return altivec_expand_stv_builtin (code, exp);
14400 }
14401 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14402 {
14403 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14404 : CODE_FOR_vsx_st_elemrev_v4sf);
14405 return altivec_expand_stv_builtin (code, exp);
14406 }
14407 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14408 {
14409 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14410 : CODE_FOR_vsx_st_elemrev_v4si);
14411 return altivec_expand_stv_builtin (code, exp);
14412 }
14413 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14414 {
14415 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14416 : CODE_FOR_vsx_st_elemrev_v8hi);
14417 return altivec_expand_stv_builtin (code, exp);
14418 }
14419 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14420 {
14421 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14422 : CODE_FOR_vsx_st_elemrev_v16qi);
14423 return altivec_expand_stv_builtin (code, exp);
14424 }
14425
14426 case ALTIVEC_BUILTIN_MFVSCR:
14427 icode = CODE_FOR_altivec_mfvscr;
14428 tmode = insn_data[icode].operand[0].mode;
14429
14430 if (target == 0
14431 || GET_MODE (target) != tmode
14432 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14433 target = gen_reg_rtx (tmode);
14434
14435 pat = GEN_FCN (icode) (target);
14436 if (! pat)
14437 return 0;
14438 emit_insn (pat);
14439 return target;
14440
14441 case ALTIVEC_BUILTIN_MTVSCR:
14442 icode = CODE_FOR_altivec_mtvscr;
14443 arg0 = CALL_EXPR_ARG (exp, 0);
14444 op0 = expand_normal (arg0);
14445 mode0 = insn_data[icode].operand[0].mode;
14446
14447 /* If we got invalid arguments bail out before generating bad rtl. */
14448 if (arg0 == error_mark_node)
14449 return const0_rtx;
14450
14451 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14452 op0 = copy_to_mode_reg (mode0, op0);
14453
14454 pat = GEN_FCN (icode) (op0);
14455 if (pat)
14456 emit_insn (pat);
14457 return NULL_RTX;
14458
14459 case ALTIVEC_BUILTIN_DSSALL:
14460 emit_insn (gen_altivec_dssall ());
14461 return NULL_RTX;
14462
14463 case ALTIVEC_BUILTIN_DSS:
14464 icode = CODE_FOR_altivec_dss;
14465 arg0 = CALL_EXPR_ARG (exp, 0);
14466 STRIP_NOPS (arg0);
14467 op0 = expand_normal (arg0);
14468 mode0 = insn_data[icode].operand[0].mode;
14469
14470 /* If we got invalid arguments bail out before generating bad rtl. */
14471 if (arg0 == error_mark_node)
14472 return const0_rtx;
14473
14474 if (TREE_CODE (arg0) != INTEGER_CST
14475 || TREE_INT_CST_LOW (arg0) & ~0x3)
14476 {
14477 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14478 return const0_rtx;
14479 }
14480
14481 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14482 op0 = copy_to_mode_reg (mode0, op0);
14483
14484 emit_insn (gen_altivec_dss (op0));
14485 return NULL_RTX;
14486
14487 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14488 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14489 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14490 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14491 case VSX_BUILTIN_VEC_INIT_V2DF:
14492 case VSX_BUILTIN_VEC_INIT_V2DI:
14493 case VSX_BUILTIN_VEC_INIT_V1TI:
14494 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14495
14496 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14497 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14498 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14499 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14500 case VSX_BUILTIN_VEC_SET_V2DF:
14501 case VSX_BUILTIN_VEC_SET_V2DI:
14502 case VSX_BUILTIN_VEC_SET_V1TI:
14503 return altivec_expand_vec_set_builtin (exp);
14504
14505 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14506 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14507 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14508 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14509 case VSX_BUILTIN_VEC_EXT_V2DF:
14510 case VSX_BUILTIN_VEC_EXT_V2DI:
14511 case VSX_BUILTIN_VEC_EXT_V1TI:
14512 return altivec_expand_vec_ext_builtin (exp, target);
14513
14514 case P9V_BUILTIN_VEC_EXTRACT4B:
14515 arg1 = CALL_EXPR_ARG (exp, 1);
14516 STRIP_NOPS (arg1);
14517
14518 /* Generate a normal call if it is invalid. */
14519 if (arg1 == error_mark_node)
14520 return expand_call (exp, target, false);
14521
14522 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14523 {
14524 error ("second argument to %qs must be [0, 12]", "vec_vextract4b");
14525 return expand_call (exp, target, false);
14526 }
14527 break;
14528
14529 case P9V_BUILTIN_VEC_INSERT4B:
14530 arg2 = CALL_EXPR_ARG (exp, 2);
14531 STRIP_NOPS (arg2);
14532
14533 /* Generate a normal call if it is invalid. */
14534 if (arg2 == error_mark_node)
14535 return expand_call (exp, target, false);
14536
14537 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14538 {
14539 error ("third argument to %qs must be [0, 12]", "vec_vinsert4b");
14540 return expand_call (exp, target, false);
14541 }
14542 break;
14543
14544 default:
14545 break;
14546 /* Fall through. */
14547 }
14548
14549 /* Expand abs* operations. */
14550 d = bdesc_abs;
14551 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14552 if (d->code == fcode)
14553 return altivec_expand_abs_builtin (d->icode, exp, target);
14554
14555 /* Expand the AltiVec predicates. */
14556 d = bdesc_altivec_preds;
14557 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14558 if (d->code == fcode)
14559 return altivec_expand_predicate_builtin (d->icode, exp, target);
14560
14561 /* LV* are funky. We initialized them differently. */
14562 switch (fcode)
14563 {
14564 case ALTIVEC_BUILTIN_LVSL:
14565 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14566 exp, target, false);
14567 case ALTIVEC_BUILTIN_LVSR:
14568 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14569 exp, target, false);
14570 case ALTIVEC_BUILTIN_LVEBX:
14571 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14572 exp, target, false);
14573 case ALTIVEC_BUILTIN_LVEHX:
14574 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14575 exp, target, false);
14576 case ALTIVEC_BUILTIN_LVEWX:
14577 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14578 exp, target, false);
14579 case ALTIVEC_BUILTIN_LVXL_V2DF:
14580 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14581 exp, target, false);
14582 case ALTIVEC_BUILTIN_LVXL_V2DI:
14583 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14584 exp, target, false);
14585 case ALTIVEC_BUILTIN_LVXL_V4SF:
14586 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14587 exp, target, false);
14588 case ALTIVEC_BUILTIN_LVXL:
14589 case ALTIVEC_BUILTIN_LVXL_V4SI:
14590 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14591 exp, target, false);
14592 case ALTIVEC_BUILTIN_LVXL_V8HI:
14593 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14594 exp, target, false);
14595 case ALTIVEC_BUILTIN_LVXL_V16QI:
14596 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14597 exp, target, false);
14598 case ALTIVEC_BUILTIN_LVX_V1TI:
14599 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14600 exp, target, false);
14601 case ALTIVEC_BUILTIN_LVX_V2DF:
14602 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14603 exp, target, false);
14604 case ALTIVEC_BUILTIN_LVX_V2DI:
14605 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14606 exp, target, false);
14607 case ALTIVEC_BUILTIN_LVX_V4SF:
14608 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14609 exp, target, false);
14610 case ALTIVEC_BUILTIN_LVX:
14611 case ALTIVEC_BUILTIN_LVX_V4SI:
14612 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14613 exp, target, false);
14614 case ALTIVEC_BUILTIN_LVX_V8HI:
14615 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14616 exp, target, false);
14617 case ALTIVEC_BUILTIN_LVX_V16QI:
14618 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14619 exp, target, false);
14620 case ALTIVEC_BUILTIN_LVLX:
14621 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14622 exp, target, true);
14623 case ALTIVEC_BUILTIN_LVLXL:
14624 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14625 exp, target, true);
14626 case ALTIVEC_BUILTIN_LVRX:
14627 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14628 exp, target, true);
14629 case ALTIVEC_BUILTIN_LVRXL:
14630 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14631 exp, target, true);
14632 case VSX_BUILTIN_LXVD2X_V1TI:
14633 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14634 exp, target, false);
14635 case VSX_BUILTIN_LXVD2X_V2DF:
14636 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14637 exp, target, false);
14638 case VSX_BUILTIN_LXVD2X_V2DI:
14639 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14640 exp, target, false);
14641 case VSX_BUILTIN_LXVW4X_V4SF:
14642 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14643 exp, target, false);
14644 case VSX_BUILTIN_LXVW4X_V4SI:
14645 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14646 exp, target, false);
14647 case VSX_BUILTIN_LXVW4X_V8HI:
14648 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14649 exp, target, false);
14650 case VSX_BUILTIN_LXVW4X_V16QI:
14651 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14652 exp, target, false);
14653 /* For the following on big endian, it's ok to use any appropriate
14654 unaligned-supporting load, so use a generic expander. For
14655 little-endian, the exact element-reversing instruction must
14656 be used. */
14657 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14658 {
14659 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14660 : CODE_FOR_vsx_ld_elemrev_v2df);
14661 return altivec_expand_lv_builtin (code, exp, target, false);
14662 }
14663 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14664 {
14665 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14666 : CODE_FOR_vsx_ld_elemrev_v1ti);
14667 return altivec_expand_lv_builtin (code, exp, target, false);
14668 }
14669 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14670 {
14671 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14672 : CODE_FOR_vsx_ld_elemrev_v2di);
14673 return altivec_expand_lv_builtin (code, exp, target, false);
14674 }
14675 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14676 {
14677 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14678 : CODE_FOR_vsx_ld_elemrev_v4sf);
14679 return altivec_expand_lv_builtin (code, exp, target, false);
14680 }
14681 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14682 {
14683 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14684 : CODE_FOR_vsx_ld_elemrev_v4si);
14685 return altivec_expand_lv_builtin (code, exp, target, false);
14686 }
14687 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14688 {
14689 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14690 : CODE_FOR_vsx_ld_elemrev_v8hi);
14691 return altivec_expand_lv_builtin (code, exp, target, false);
14692 }
14693 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14694 {
14695 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14696 : CODE_FOR_vsx_ld_elemrev_v16qi);
14697 return altivec_expand_lv_builtin (code, exp, target, false);
14698 }
14699 break;
14700 default:
14701 break;
14702 /* Fall through. */
14703 }
14704
14705 *expandedp = false;
14706 return NULL_RTX;
14707 }
14708
14709 /* Check whether a builtin function is supported in this target
14710 configuration. */
14711 bool
14712 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14713 {
14714 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14715 if ((fnmask & rs6000_builtin_mask) != fnmask)
14716 return false;
14717 else
14718 return true;
14719 }
14720
14721 /* Raise an error message for a builtin function that is called without the
14722 appropriate target options being set. */
14723
14724 static void
14725 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14726 {
14727 size_t uns_fncode = (size_t) fncode;
14728 const char *name = rs6000_builtin_info[uns_fncode].name;
14729 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14730
14731 gcc_assert (name != NULL);
14732 if ((fnmask & RS6000_BTM_CELL) != 0)
14733 error ("builtin function %qs is only valid for the cell processor", name);
14734 else if ((fnmask & RS6000_BTM_VSX) != 0)
14735 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14736 else if ((fnmask & RS6000_BTM_HTM) != 0)
14737 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14738 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14739 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14740 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14741 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14742 error ("builtin function %qs requires the %qs and %qs options",
14743 name, "-mhard-dfp", "-mpower8-vector");
14744 else if ((fnmask & RS6000_BTM_DFP) != 0)
14745 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14746 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14747 error ("builtin function %qs requires the %qs option", name,
14748 "-mpower8-vector");
14749 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14750 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14751 error ("builtin function %qs requires the %qs and %qs options",
14752 name, "-mcpu=power9", "-m64");
14753 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14754 error ("builtin function %qs requires the %qs option", name,
14755 "-mcpu=power9");
14756 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14757 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14758 error ("builtin function %qs requires the %qs and %qs options",
14759 name, "-mcpu=power9", "-m64");
14760 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14761 error ("builtin function %qs requires the %qs option", name,
14762 "-mcpu=power9");
14763 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14764 {
14765 if (!TARGET_HARD_FLOAT)
14766 error ("builtin function %qs requires the %qs option", name,
14767 "-mhard-float");
14768 else
14769 error ("builtin function %qs requires the %qs option", name,
14770 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14771 }
14772 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14773 error ("builtin function %qs requires the %qs option", name,
14774 "-mhard-float");
14775 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14776 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
14777 name);
14778 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14779 error ("builtin function %qs requires the %qs option", name,
14780 "%<-mfloat128%>");
14781 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14782 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14783 error ("builtin function %qs requires the %qs (or newer), and "
14784 "%qs or %qs options",
14785 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14786 else
14787 error ("builtin function %qs is not supported with the current options",
14788 name);
14789 }
14790
14791 /* Target hook for early folding of built-ins, shamelessly stolen
14792 from ia64.c. */
14793
14794 static tree
14795 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14796 int n_args ATTRIBUTE_UNUSED,
14797 tree *args ATTRIBUTE_UNUSED,
14798 bool ignore ATTRIBUTE_UNUSED)
14799 {
14800 #ifdef SUBTARGET_FOLD_BUILTIN
14801 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14802 #else
14803 return NULL_TREE;
14804 #endif
14805 }
14806
14807 /* Helper function to sort out which built-ins may be valid without having
14808 a LHS. */
14809 static bool
14810 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14811 {
14812 switch (fn_code)
14813 {
14814 case ALTIVEC_BUILTIN_STVX_V16QI:
14815 case ALTIVEC_BUILTIN_STVX_V8HI:
14816 case ALTIVEC_BUILTIN_STVX_V4SI:
14817 case ALTIVEC_BUILTIN_STVX_V4SF:
14818 case ALTIVEC_BUILTIN_STVX_V2DI:
14819 case ALTIVEC_BUILTIN_STVX_V2DF:
14820 case VSX_BUILTIN_STXVW4X_V16QI:
14821 case VSX_BUILTIN_STXVW4X_V8HI:
14822 case VSX_BUILTIN_STXVW4X_V4SF:
14823 case VSX_BUILTIN_STXVW4X_V4SI:
14824 case VSX_BUILTIN_STXVD2X_V2DF:
14825 case VSX_BUILTIN_STXVD2X_V2DI:
14826 return true;
14827 default:
14828 return false;
14829 }
14830 }
14831
14832 /* Helper function to handle the gimple folding of a vector compare
14833 operation. This sets up true/false vectors, and uses the
14834 VEC_COND_EXPR operation.
14835 CODE indicates which comparison is to be made. (EQ, GT, ...).
14836 TYPE indicates the type of the result. */
14837 static tree
14838 fold_build_vec_cmp (tree_code code, tree type,
14839 tree arg0, tree arg1)
14840 {
14841 tree cmp_type = build_same_sized_truth_vector_type (type);
14842 tree zero_vec = build_zero_cst (type);
14843 tree minus_one_vec = build_minus_one_cst (type);
14844 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
14845 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
14846 }
14847
14848 /* Helper function to handle the in-between steps for the
14849 vector compare built-ins. */
14850 static void
14851 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
14852 {
14853 tree arg0 = gimple_call_arg (stmt, 0);
14854 tree arg1 = gimple_call_arg (stmt, 1);
14855 tree lhs = gimple_call_lhs (stmt);
14856 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
14857 gimple *g = gimple_build_assign (lhs, cmp);
14858 gimple_set_location (g, gimple_location (stmt));
14859 gsi_replace (gsi, g, true);
14860 }
14861
14862 /* Helper function to map V2DF and V4SF types to their
14863 integral equivalents (V2DI and V4SI). */
14864 tree map_to_integral_tree_type (tree input_tree_type)
14865 {
14866 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
14867 return input_tree_type;
14868 else
14869 {
14870 if (types_compatible_p (TREE_TYPE (input_tree_type),
14871 TREE_TYPE (V2DF_type_node)))
14872 return V2DI_type_node;
14873 else if (types_compatible_p (TREE_TYPE (input_tree_type),
14874 TREE_TYPE (V4SF_type_node)))
14875 return V4SI_type_node;
14876 else
14877 gcc_unreachable ();
14878 }
14879 }
14880
14881 /* Helper function to handle the vector merge[hl] built-ins. The
14882 implementation difference between h and l versions for this code are in
14883 the values used when building of the permute vector for high word versus
14884 low word merge. The variance is keyed off the use_high parameter. */
14885 static void
14886 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
14887 {
14888 tree arg0 = gimple_call_arg (stmt, 0);
14889 tree arg1 = gimple_call_arg (stmt, 1);
14890 tree lhs = gimple_call_lhs (stmt);
14891 tree lhs_type = TREE_TYPE (lhs);
14892 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
14893 int midpoint = n_elts / 2;
14894 int offset = 0;
14895
14896 if (use_high == 1)
14897 offset = midpoint;
14898
14899 /* The permute_type will match the lhs for integral types. For double and
14900 float types, the permute type needs to map to the V2 or V4 type that
14901 matches size. */
14902 tree permute_type;
14903 permute_type = map_to_integral_tree_type (lhs_type);
14904 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
14905
14906 for (int i = 0; i < midpoint; i++)
14907 {
14908 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14909 offset + i));
14910 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14911 offset + n_elts + i));
14912 }
14913
14914 tree permute = elts.build ();
14915
14916 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
14917 gimple_set_location (g, gimple_location (stmt));
14918 gsi_replace (gsi, g, true);
14919 }
14920
14921 /* Helper function to handle the vector merge[eo] built-ins. */
14922 static void
14923 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
14924 {
14925 tree arg0 = gimple_call_arg (stmt, 0);
14926 tree arg1 = gimple_call_arg (stmt, 1);
14927 tree lhs = gimple_call_lhs (stmt);
14928 tree lhs_type = TREE_TYPE (lhs);
14929 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
14930
14931 /* The permute_type will match the lhs for integral types. For double and
14932 float types, the permute type needs to map to the V2 or V4 type that
14933 matches size. */
14934 tree permute_type;
14935 permute_type = map_to_integral_tree_type (lhs_type);
14936
14937 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
14938
14939 /* Build the permute vector. */
14940 for (int i = 0; i < n_elts / 2; i++)
14941 {
14942 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14943 2*i + use_odd));
14944 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
14945 2*i + use_odd + n_elts));
14946 }
14947
14948 tree permute = elts.build ();
14949
14950 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
14951 gimple_set_location (g, gimple_location (stmt));
14952 gsi_replace (gsi, g, true);
14953 }
14954
14955 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
14956 a constant, use rs6000_fold_builtin.) */
14957
14958 bool
14959 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
14960 {
14961 gimple *stmt = gsi_stmt (*gsi);
14962 tree fndecl = gimple_call_fndecl (stmt);
14963 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
14964 enum rs6000_builtins fn_code
14965 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14966 tree arg0, arg1, lhs, temp;
14967 enum tree_code bcode;
14968 gimple *g;
14969
14970 size_t uns_fncode = (size_t) fn_code;
14971 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
14972 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
14973 const char *fn_name2 = (icode != CODE_FOR_nothing)
14974 ? get_insn_name ((int) icode)
14975 : "nothing";
14976
14977 if (TARGET_DEBUG_BUILTIN)
14978 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
14979 fn_code, fn_name1, fn_name2);
14980
14981 if (!rs6000_fold_gimple)
14982 return false;
14983
14984 /* Prevent gimple folding for code that does not have a LHS, unless it is
14985 allowed per the rs6000_builtin_valid_without_lhs helper function. */
14986 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
14987 return false;
14988
14989 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
14990 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
14991 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
14992 if (!func_valid_p)
14993 return false;
14994
14995 switch (fn_code)
14996 {
14997 /* Flavors of vec_add. We deliberately don't expand
14998 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
14999 TImode, resulting in much poorer code generation. */
15000 case ALTIVEC_BUILTIN_VADDUBM:
15001 case ALTIVEC_BUILTIN_VADDUHM:
15002 case ALTIVEC_BUILTIN_VADDUWM:
15003 case P8V_BUILTIN_VADDUDM:
15004 case ALTIVEC_BUILTIN_VADDFP:
15005 case VSX_BUILTIN_XVADDDP:
15006 bcode = PLUS_EXPR;
15007 do_binary:
15008 arg0 = gimple_call_arg (stmt, 0);
15009 arg1 = gimple_call_arg (stmt, 1);
15010 lhs = gimple_call_lhs (stmt);
15011 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15012 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15013 {
15014 /* Ensure the binary operation is performed in a type
15015 that wraps if it is integral type. */
15016 gimple_seq stmts = NULL;
15017 tree type = unsigned_type_for (TREE_TYPE (lhs));
15018 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15019 type, arg0);
15020 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15021 type, arg1);
15022 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15023 type, uarg0, uarg1);
15024 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15025 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15026 build1 (VIEW_CONVERT_EXPR,
15027 TREE_TYPE (lhs), res));
15028 gsi_replace (gsi, g, true);
15029 return true;
15030 }
15031 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15032 gimple_set_location (g, gimple_location (stmt));
15033 gsi_replace (gsi, g, true);
15034 return true;
15035 /* Flavors of vec_sub. We deliberately don't expand
15036 P8V_BUILTIN_VSUBUQM. */
15037 case ALTIVEC_BUILTIN_VSUBUBM:
15038 case ALTIVEC_BUILTIN_VSUBUHM:
15039 case ALTIVEC_BUILTIN_VSUBUWM:
15040 case P8V_BUILTIN_VSUBUDM:
15041 case ALTIVEC_BUILTIN_VSUBFP:
15042 case VSX_BUILTIN_XVSUBDP:
15043 bcode = MINUS_EXPR;
15044 goto do_binary;
15045 case VSX_BUILTIN_XVMULSP:
15046 case VSX_BUILTIN_XVMULDP:
15047 arg0 = gimple_call_arg (stmt, 0);
15048 arg1 = gimple_call_arg (stmt, 1);
15049 lhs = gimple_call_lhs (stmt);
15050 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15051 gimple_set_location (g, gimple_location (stmt));
15052 gsi_replace (gsi, g, true);
15053 return true;
15054 /* Even element flavors of vec_mul (signed). */
15055 case ALTIVEC_BUILTIN_VMULESB:
15056 case ALTIVEC_BUILTIN_VMULESH:
15057 case P8V_BUILTIN_VMULESW:
15058 /* Even element flavors of vec_mul (unsigned). */
15059 case ALTIVEC_BUILTIN_VMULEUB:
15060 case ALTIVEC_BUILTIN_VMULEUH:
15061 case P8V_BUILTIN_VMULEUW:
15062 arg0 = gimple_call_arg (stmt, 0);
15063 arg1 = gimple_call_arg (stmt, 1);
15064 lhs = gimple_call_lhs (stmt);
15065 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15066 gimple_set_location (g, gimple_location (stmt));
15067 gsi_replace (gsi, g, true);
15068 return true;
15069 /* Odd element flavors of vec_mul (signed). */
15070 case ALTIVEC_BUILTIN_VMULOSB:
15071 case ALTIVEC_BUILTIN_VMULOSH:
15072 case P8V_BUILTIN_VMULOSW:
15073 /* Odd element flavors of vec_mul (unsigned). */
15074 case ALTIVEC_BUILTIN_VMULOUB:
15075 case ALTIVEC_BUILTIN_VMULOUH:
15076 case P8V_BUILTIN_VMULOUW:
15077 arg0 = gimple_call_arg (stmt, 0);
15078 arg1 = gimple_call_arg (stmt, 1);
15079 lhs = gimple_call_lhs (stmt);
15080 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15081 gimple_set_location (g, gimple_location (stmt));
15082 gsi_replace (gsi, g, true);
15083 return true;
15084 /* Flavors of vec_div (Integer). */
15085 case VSX_BUILTIN_DIV_V2DI:
15086 case VSX_BUILTIN_UDIV_V2DI:
15087 arg0 = gimple_call_arg (stmt, 0);
15088 arg1 = gimple_call_arg (stmt, 1);
15089 lhs = gimple_call_lhs (stmt);
15090 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15091 gimple_set_location (g, gimple_location (stmt));
15092 gsi_replace (gsi, g, true);
15093 return true;
15094 /* Flavors of vec_div (Float). */
15095 case VSX_BUILTIN_XVDIVSP:
15096 case VSX_BUILTIN_XVDIVDP:
15097 arg0 = gimple_call_arg (stmt, 0);
15098 arg1 = gimple_call_arg (stmt, 1);
15099 lhs = gimple_call_lhs (stmt);
15100 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15101 gimple_set_location (g, gimple_location (stmt));
15102 gsi_replace (gsi, g, true);
15103 return true;
15104 /* Flavors of vec_and. */
15105 case ALTIVEC_BUILTIN_VAND:
15106 arg0 = gimple_call_arg (stmt, 0);
15107 arg1 = gimple_call_arg (stmt, 1);
15108 lhs = gimple_call_lhs (stmt);
15109 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15110 gimple_set_location (g, gimple_location (stmt));
15111 gsi_replace (gsi, g, true);
15112 return true;
15113 /* Flavors of vec_andc. */
15114 case ALTIVEC_BUILTIN_VANDC:
15115 arg0 = gimple_call_arg (stmt, 0);
15116 arg1 = gimple_call_arg (stmt, 1);
15117 lhs = gimple_call_lhs (stmt);
15118 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15119 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15120 gimple_set_location (g, gimple_location (stmt));
15121 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15122 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15123 gimple_set_location (g, gimple_location (stmt));
15124 gsi_replace (gsi, g, true);
15125 return true;
15126 /* Flavors of vec_nand. */
15127 case P8V_BUILTIN_VEC_NAND:
15128 case P8V_BUILTIN_NAND_V16QI:
15129 case P8V_BUILTIN_NAND_V8HI:
15130 case P8V_BUILTIN_NAND_V4SI:
15131 case P8V_BUILTIN_NAND_V4SF:
15132 case P8V_BUILTIN_NAND_V2DF:
15133 case P8V_BUILTIN_NAND_V2DI:
15134 arg0 = gimple_call_arg (stmt, 0);
15135 arg1 = gimple_call_arg (stmt, 1);
15136 lhs = gimple_call_lhs (stmt);
15137 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15138 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15139 gimple_set_location (g, gimple_location (stmt));
15140 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15141 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15142 gimple_set_location (g, gimple_location (stmt));
15143 gsi_replace (gsi, g, true);
15144 return true;
15145 /* Flavors of vec_or. */
15146 case ALTIVEC_BUILTIN_VOR:
15147 arg0 = gimple_call_arg (stmt, 0);
15148 arg1 = gimple_call_arg (stmt, 1);
15149 lhs = gimple_call_lhs (stmt);
15150 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15151 gimple_set_location (g, gimple_location (stmt));
15152 gsi_replace (gsi, g, true);
15153 return true;
15154 /* flavors of vec_orc. */
15155 case P8V_BUILTIN_ORC_V16QI:
15156 case P8V_BUILTIN_ORC_V8HI:
15157 case P8V_BUILTIN_ORC_V4SI:
15158 case P8V_BUILTIN_ORC_V4SF:
15159 case P8V_BUILTIN_ORC_V2DF:
15160 case P8V_BUILTIN_ORC_V2DI:
15161 arg0 = gimple_call_arg (stmt, 0);
15162 arg1 = gimple_call_arg (stmt, 1);
15163 lhs = gimple_call_lhs (stmt);
15164 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15165 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15166 gimple_set_location (g, gimple_location (stmt));
15167 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15168 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15169 gimple_set_location (g, gimple_location (stmt));
15170 gsi_replace (gsi, g, true);
15171 return true;
15172 /* Flavors of vec_xor. */
15173 case ALTIVEC_BUILTIN_VXOR:
15174 arg0 = gimple_call_arg (stmt, 0);
15175 arg1 = gimple_call_arg (stmt, 1);
15176 lhs = gimple_call_lhs (stmt);
15177 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15178 gimple_set_location (g, gimple_location (stmt));
15179 gsi_replace (gsi, g, true);
15180 return true;
15181 /* Flavors of vec_nor. */
15182 case ALTIVEC_BUILTIN_VNOR:
15183 arg0 = gimple_call_arg (stmt, 0);
15184 arg1 = gimple_call_arg (stmt, 1);
15185 lhs = gimple_call_lhs (stmt);
15186 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15187 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15188 gimple_set_location (g, gimple_location (stmt));
15189 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15190 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15191 gimple_set_location (g, gimple_location (stmt));
15192 gsi_replace (gsi, g, true);
15193 return true;
15194 /* flavors of vec_abs. */
15195 case ALTIVEC_BUILTIN_ABS_V16QI:
15196 case ALTIVEC_BUILTIN_ABS_V8HI:
15197 case ALTIVEC_BUILTIN_ABS_V4SI:
15198 case ALTIVEC_BUILTIN_ABS_V4SF:
15199 case P8V_BUILTIN_ABS_V2DI:
15200 case VSX_BUILTIN_XVABSDP:
15201 arg0 = gimple_call_arg (stmt, 0);
15202 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15203 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15204 return false;
15205 lhs = gimple_call_lhs (stmt);
15206 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15207 gimple_set_location (g, gimple_location (stmt));
15208 gsi_replace (gsi, g, true);
15209 return true;
15210 /* flavors of vec_min. */
15211 case VSX_BUILTIN_XVMINDP:
15212 case P8V_BUILTIN_VMINSD:
15213 case P8V_BUILTIN_VMINUD:
15214 case ALTIVEC_BUILTIN_VMINSB:
15215 case ALTIVEC_BUILTIN_VMINSH:
15216 case ALTIVEC_BUILTIN_VMINSW:
15217 case ALTIVEC_BUILTIN_VMINUB:
15218 case ALTIVEC_BUILTIN_VMINUH:
15219 case ALTIVEC_BUILTIN_VMINUW:
15220 case ALTIVEC_BUILTIN_VMINFP:
15221 arg0 = gimple_call_arg (stmt, 0);
15222 arg1 = gimple_call_arg (stmt, 1);
15223 lhs = gimple_call_lhs (stmt);
15224 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15225 gimple_set_location (g, gimple_location (stmt));
15226 gsi_replace (gsi, g, true);
15227 return true;
15228 /* flavors of vec_max. */
15229 case VSX_BUILTIN_XVMAXDP:
15230 case P8V_BUILTIN_VMAXSD:
15231 case P8V_BUILTIN_VMAXUD:
15232 case ALTIVEC_BUILTIN_VMAXSB:
15233 case ALTIVEC_BUILTIN_VMAXSH:
15234 case ALTIVEC_BUILTIN_VMAXSW:
15235 case ALTIVEC_BUILTIN_VMAXUB:
15236 case ALTIVEC_BUILTIN_VMAXUH:
15237 case ALTIVEC_BUILTIN_VMAXUW:
15238 case ALTIVEC_BUILTIN_VMAXFP:
15239 arg0 = gimple_call_arg (stmt, 0);
15240 arg1 = gimple_call_arg (stmt, 1);
15241 lhs = gimple_call_lhs (stmt);
15242 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15243 gimple_set_location (g, gimple_location (stmt));
15244 gsi_replace (gsi, g, true);
15245 return true;
15246 /* Flavors of vec_eqv. */
15247 case P8V_BUILTIN_EQV_V16QI:
15248 case P8V_BUILTIN_EQV_V8HI:
15249 case P8V_BUILTIN_EQV_V4SI:
15250 case P8V_BUILTIN_EQV_V4SF:
15251 case P8V_BUILTIN_EQV_V2DF:
15252 case P8V_BUILTIN_EQV_V2DI:
15253 arg0 = gimple_call_arg (stmt, 0);
15254 arg1 = gimple_call_arg (stmt, 1);
15255 lhs = gimple_call_lhs (stmt);
15256 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15257 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15258 gimple_set_location (g, gimple_location (stmt));
15259 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15260 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15261 gimple_set_location (g, gimple_location (stmt));
15262 gsi_replace (gsi, g, true);
15263 return true;
15264 /* Flavors of vec_rotate_left. */
15265 case ALTIVEC_BUILTIN_VRLB:
15266 case ALTIVEC_BUILTIN_VRLH:
15267 case ALTIVEC_BUILTIN_VRLW:
15268 case P8V_BUILTIN_VRLD:
15269 arg0 = gimple_call_arg (stmt, 0);
15270 arg1 = gimple_call_arg (stmt, 1);
15271 lhs = gimple_call_lhs (stmt);
15272 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15273 gimple_set_location (g, gimple_location (stmt));
15274 gsi_replace (gsi, g, true);
15275 return true;
15276 /* Flavors of vector shift right algebraic.
15277 vec_sra{b,h,w} -> vsra{b,h,w}. */
15278 case ALTIVEC_BUILTIN_VSRAB:
15279 case ALTIVEC_BUILTIN_VSRAH:
15280 case ALTIVEC_BUILTIN_VSRAW:
15281 case P8V_BUILTIN_VSRAD:
15282 {
15283 arg0 = gimple_call_arg (stmt, 0);
15284 arg1 = gimple_call_arg (stmt, 1);
15285 lhs = gimple_call_lhs (stmt);
15286 tree arg1_type = TREE_TYPE (arg1);
15287 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15288 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15289 location_t loc = gimple_location (stmt);
15290 /* Force arg1 into the range valid matching the arg0 type. */
15291 /* Build a vector consisting of the max valid bit-size values. */
15292 int n_elts = VECTOR_CST_NELTS (arg1);
15293 tree element_size = build_int_cst (unsigned_element_type,
15294 128 / n_elts);
15295 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15296 for (int i = 0; i < n_elts; i++)
15297 elts.safe_push (element_size);
15298 tree modulo_tree = elts.build ();
15299 /* Modulo the provided shift value against that vector. */
15300 gimple_seq stmts = NULL;
15301 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15302 unsigned_arg1_type, arg1);
15303 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15304 unsigned_arg1_type, unsigned_arg1,
15305 modulo_tree);
15306 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15307 /* And finally, do the shift. */
15308 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15309 gimple_set_location (g, loc);
15310 gsi_replace (gsi, g, true);
15311 return true;
15312 }
15313 /* Flavors of vector shift left.
15314 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15315 case ALTIVEC_BUILTIN_VSLB:
15316 case ALTIVEC_BUILTIN_VSLH:
15317 case ALTIVEC_BUILTIN_VSLW:
15318 case P8V_BUILTIN_VSLD:
15319 {
15320 location_t loc;
15321 gimple_seq stmts = NULL;
15322 arg0 = gimple_call_arg (stmt, 0);
15323 tree arg0_type = TREE_TYPE (arg0);
15324 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15325 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15326 return false;
15327 arg1 = gimple_call_arg (stmt, 1);
15328 tree arg1_type = TREE_TYPE (arg1);
15329 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15330 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15331 loc = gimple_location (stmt);
15332 lhs = gimple_call_lhs (stmt);
15333 /* Force arg1 into the range valid matching the arg0 type. */
15334 /* Build a vector consisting of the max valid bit-size values. */
15335 int n_elts = VECTOR_CST_NELTS (arg1);
15336 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15337 * BITS_PER_UNIT;
15338 tree element_size = build_int_cst (unsigned_element_type,
15339 tree_size_in_bits / n_elts);
15340 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15341 for (int i = 0; i < n_elts; i++)
15342 elts.safe_push (element_size);
15343 tree modulo_tree = elts.build ();
15344 /* Modulo the provided shift value against that vector. */
15345 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15346 unsigned_arg1_type, arg1);
15347 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15348 unsigned_arg1_type, unsigned_arg1,
15349 modulo_tree);
15350 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15351 /* And finally, do the shift. */
15352 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15353 gimple_set_location (g, gimple_location (stmt));
15354 gsi_replace (gsi, g, true);
15355 return true;
15356 }
15357 /* Flavors of vector shift right. */
15358 case ALTIVEC_BUILTIN_VSRB:
15359 case ALTIVEC_BUILTIN_VSRH:
15360 case ALTIVEC_BUILTIN_VSRW:
15361 case P8V_BUILTIN_VSRD:
15362 {
15363 arg0 = gimple_call_arg (stmt, 0);
15364 arg1 = gimple_call_arg (stmt, 1);
15365 lhs = gimple_call_lhs (stmt);
15366 tree arg1_type = TREE_TYPE (arg1);
15367 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15368 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15369 location_t loc = gimple_location (stmt);
15370 gimple_seq stmts = NULL;
15371 /* Convert arg0 to unsigned. */
15372 tree arg0_unsigned
15373 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15374 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15375 /* Force arg1 into the range valid matching the arg0 type. */
15376 /* Build a vector consisting of the max valid bit-size values. */
15377 int n_elts = VECTOR_CST_NELTS (arg1);
15378 tree element_size = build_int_cst (unsigned_element_type,
15379 128 / n_elts);
15380 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15381 for (int i = 0; i < n_elts; i++)
15382 elts.safe_push (element_size);
15383 tree modulo_tree = elts.build ();
15384 /* Modulo the provided shift value against that vector. */
15385 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15386 unsigned_arg1_type, arg1);
15387 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15388 unsigned_arg1_type, unsigned_arg1,
15389 modulo_tree);
15390 /* Do the shift. */
15391 tree res
15392 = gimple_build (&stmts, RSHIFT_EXPR,
15393 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15394 /* Convert result back to the lhs type. */
15395 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15396 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15397 update_call_from_tree (gsi, res);
15398 return true;
15399 }
15400 /* Vector loads. */
15401 case ALTIVEC_BUILTIN_LVX_V16QI:
15402 case ALTIVEC_BUILTIN_LVX_V8HI:
15403 case ALTIVEC_BUILTIN_LVX_V4SI:
15404 case ALTIVEC_BUILTIN_LVX_V4SF:
15405 case ALTIVEC_BUILTIN_LVX_V2DI:
15406 case ALTIVEC_BUILTIN_LVX_V2DF:
15407 case ALTIVEC_BUILTIN_LVX_V1TI:
15408 {
15409 arg0 = gimple_call_arg (stmt, 0); // offset
15410 arg1 = gimple_call_arg (stmt, 1); // address
15411 lhs = gimple_call_lhs (stmt);
15412 location_t loc = gimple_location (stmt);
15413 /* Since arg1 may be cast to a different type, just use ptr_type_node
15414 here instead of trying to enforce TBAA on pointer types. */
15415 tree arg1_type = ptr_type_node;
15416 tree lhs_type = TREE_TYPE (lhs);
15417 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15418 the tree using the value from arg0. The resulting type will match
15419 the type of arg1. */
15420 gimple_seq stmts = NULL;
15421 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15422 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15423 arg1_type, arg1, temp_offset);
15424 /* Mask off any lower bits from the address. */
15425 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15426 arg1_type, temp_addr,
15427 build_int_cst (arg1_type, -16));
15428 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15429 if (!is_gimple_mem_ref_addr (aligned_addr))
15430 {
15431 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15432 gimple *g = gimple_build_assign (t, aligned_addr);
15433 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15434 aligned_addr = t;
15435 }
15436 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15437 take an offset, but since we've already incorporated the offset
15438 above, here we just pass in a zero. */
15439 gimple *g
15440 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15441 build_int_cst (arg1_type, 0)));
15442 gimple_set_location (g, loc);
15443 gsi_replace (gsi, g, true);
15444 return true;
15445 }
15446 /* Vector stores. */
15447 case ALTIVEC_BUILTIN_STVX_V16QI:
15448 case ALTIVEC_BUILTIN_STVX_V8HI:
15449 case ALTIVEC_BUILTIN_STVX_V4SI:
15450 case ALTIVEC_BUILTIN_STVX_V4SF:
15451 case ALTIVEC_BUILTIN_STVX_V2DI:
15452 case ALTIVEC_BUILTIN_STVX_V2DF:
15453 {
15454 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15455 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15456 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15457 location_t loc = gimple_location (stmt);
15458 tree arg0_type = TREE_TYPE (arg0);
15459 /* Use ptr_type_node (no TBAA) for the arg2_type.
15460 FIXME: (Richard) "A proper fix would be to transition this type as
15461 seen from the frontend to GIMPLE, for example in a similar way we
15462 do for MEM_REFs by piggy-backing that on an extra argument, a
15463 constant zero pointer of the alias pointer type to use (which would
15464 also serve as a type indicator of the store itself). I'd use a
15465 target specific internal function for this (not sure if we can have
15466 those target specific, but I guess if it's folded away then that's
15467 fine) and get away with the overload set." */
15468 tree arg2_type = ptr_type_node;
15469 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15470 the tree using the value from arg0. The resulting type will match
15471 the type of arg2. */
15472 gimple_seq stmts = NULL;
15473 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15474 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15475 arg2_type, arg2, temp_offset);
15476 /* Mask off any lower bits from the address. */
15477 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15478 arg2_type, temp_addr,
15479 build_int_cst (arg2_type, -16));
15480 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15481 if (!is_gimple_mem_ref_addr (aligned_addr))
15482 {
15483 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15484 gimple *g = gimple_build_assign (t, aligned_addr);
15485 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15486 aligned_addr = t;
15487 }
15488 /* The desired gimple result should be similar to:
15489 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15490 gimple *g
15491 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15492 build_int_cst (arg2_type, 0)), arg0);
15493 gimple_set_location (g, loc);
15494 gsi_replace (gsi, g, true);
15495 return true;
15496 }
15497
15498 /* unaligned Vector loads. */
15499 case VSX_BUILTIN_LXVW4X_V16QI:
15500 case VSX_BUILTIN_LXVW4X_V8HI:
15501 case VSX_BUILTIN_LXVW4X_V4SF:
15502 case VSX_BUILTIN_LXVW4X_V4SI:
15503 case VSX_BUILTIN_LXVD2X_V2DF:
15504 case VSX_BUILTIN_LXVD2X_V2DI:
15505 {
15506 arg0 = gimple_call_arg (stmt, 0); // offset
15507 arg1 = gimple_call_arg (stmt, 1); // address
15508 lhs = gimple_call_lhs (stmt);
15509 location_t loc = gimple_location (stmt);
15510 /* Since arg1 may be cast to a different type, just use ptr_type_node
15511 here instead of trying to enforce TBAA on pointer types. */
15512 tree arg1_type = ptr_type_node;
15513 tree lhs_type = TREE_TYPE (lhs);
15514 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15515 required alignment (power) is 4 bytes regardless of data type. */
15516 tree align_ltype = build_aligned_type (lhs_type, 4);
15517 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15518 the tree using the value from arg0. The resulting type will match
15519 the type of arg1. */
15520 gimple_seq stmts = NULL;
15521 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15522 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15523 arg1_type, arg1, temp_offset);
15524 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15525 if (!is_gimple_mem_ref_addr (temp_addr))
15526 {
15527 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15528 gimple *g = gimple_build_assign (t, temp_addr);
15529 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15530 temp_addr = t;
15531 }
15532 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15533 take an offset, but since we've already incorporated the offset
15534 above, here we just pass in a zero. */
15535 gimple *g;
15536 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15537 build_int_cst (arg1_type, 0)));
15538 gimple_set_location (g, loc);
15539 gsi_replace (gsi, g, true);
15540 return true;
15541 }
15542
15543 /* unaligned Vector stores. */
15544 case VSX_BUILTIN_STXVW4X_V16QI:
15545 case VSX_BUILTIN_STXVW4X_V8HI:
15546 case VSX_BUILTIN_STXVW4X_V4SF:
15547 case VSX_BUILTIN_STXVW4X_V4SI:
15548 case VSX_BUILTIN_STXVD2X_V2DF:
15549 case VSX_BUILTIN_STXVD2X_V2DI:
15550 {
15551 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15552 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15553 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15554 location_t loc = gimple_location (stmt);
15555 tree arg0_type = TREE_TYPE (arg0);
15556 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15557 tree arg2_type = ptr_type_node;
15558 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15559 required alignment (power) is 4 bytes regardless of data type. */
15560 tree align_stype = build_aligned_type (arg0_type, 4);
15561 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15562 the tree using the value from arg1. */
15563 gimple_seq stmts = NULL;
15564 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15565 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15566 arg2_type, arg2, temp_offset);
15567 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15568 if (!is_gimple_mem_ref_addr (temp_addr))
15569 {
15570 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15571 gimple *g = gimple_build_assign (t, temp_addr);
15572 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15573 temp_addr = t;
15574 }
15575 gimple *g;
15576 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15577 build_int_cst (arg2_type, 0)), arg0);
15578 gimple_set_location (g, loc);
15579 gsi_replace (gsi, g, true);
15580 return true;
15581 }
15582
15583 /* Vector Fused multiply-add (fma). */
15584 case ALTIVEC_BUILTIN_VMADDFP:
15585 case VSX_BUILTIN_XVMADDDP:
15586 case ALTIVEC_BUILTIN_VMLADDUHM:
15587 {
15588 arg0 = gimple_call_arg (stmt, 0);
15589 arg1 = gimple_call_arg (stmt, 1);
15590 tree arg2 = gimple_call_arg (stmt, 2);
15591 lhs = gimple_call_lhs (stmt);
15592 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15593 gimple_call_set_lhs (g, lhs);
15594 gimple_call_set_nothrow (g, true);
15595 gimple_set_location (g, gimple_location (stmt));
15596 gsi_replace (gsi, g, true);
15597 return true;
15598 }
15599
15600 /* Vector compares; EQ, NE, GE, GT, LE. */
15601 case ALTIVEC_BUILTIN_VCMPEQUB:
15602 case ALTIVEC_BUILTIN_VCMPEQUH:
15603 case ALTIVEC_BUILTIN_VCMPEQUW:
15604 case P8V_BUILTIN_VCMPEQUD:
15605 fold_compare_helper (gsi, EQ_EXPR, stmt);
15606 return true;
15607
15608 case P9V_BUILTIN_CMPNEB:
15609 case P9V_BUILTIN_CMPNEH:
15610 case P9V_BUILTIN_CMPNEW:
15611 fold_compare_helper (gsi, NE_EXPR, stmt);
15612 return true;
15613
15614 case VSX_BUILTIN_CMPGE_16QI:
15615 case VSX_BUILTIN_CMPGE_U16QI:
15616 case VSX_BUILTIN_CMPGE_8HI:
15617 case VSX_BUILTIN_CMPGE_U8HI:
15618 case VSX_BUILTIN_CMPGE_4SI:
15619 case VSX_BUILTIN_CMPGE_U4SI:
15620 case VSX_BUILTIN_CMPGE_2DI:
15621 case VSX_BUILTIN_CMPGE_U2DI:
15622 fold_compare_helper (gsi, GE_EXPR, stmt);
15623 return true;
15624
15625 case ALTIVEC_BUILTIN_VCMPGTSB:
15626 case ALTIVEC_BUILTIN_VCMPGTUB:
15627 case ALTIVEC_BUILTIN_VCMPGTSH:
15628 case ALTIVEC_BUILTIN_VCMPGTUH:
15629 case ALTIVEC_BUILTIN_VCMPGTSW:
15630 case ALTIVEC_BUILTIN_VCMPGTUW:
15631 case P8V_BUILTIN_VCMPGTUD:
15632 case P8V_BUILTIN_VCMPGTSD:
15633 fold_compare_helper (gsi, GT_EXPR, stmt);
15634 return true;
15635
15636 case VSX_BUILTIN_CMPLE_16QI:
15637 case VSX_BUILTIN_CMPLE_U16QI:
15638 case VSX_BUILTIN_CMPLE_8HI:
15639 case VSX_BUILTIN_CMPLE_U8HI:
15640 case VSX_BUILTIN_CMPLE_4SI:
15641 case VSX_BUILTIN_CMPLE_U4SI:
15642 case VSX_BUILTIN_CMPLE_2DI:
15643 case VSX_BUILTIN_CMPLE_U2DI:
15644 fold_compare_helper (gsi, LE_EXPR, stmt);
15645 return true;
15646
15647 /* flavors of vec_splat_[us]{8,16,32}. */
15648 case ALTIVEC_BUILTIN_VSPLTISB:
15649 case ALTIVEC_BUILTIN_VSPLTISH:
15650 case ALTIVEC_BUILTIN_VSPLTISW:
15651 {
15652 arg0 = gimple_call_arg (stmt, 0);
15653 lhs = gimple_call_lhs (stmt);
15654
15655 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15656 5-bit signed constant in range -16 to +15. */
15657 if (TREE_CODE (arg0) != INTEGER_CST
15658 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15659 return false;
15660 gimple_seq stmts = NULL;
15661 location_t loc = gimple_location (stmt);
15662 tree splat_value = gimple_convert (&stmts, loc,
15663 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15664 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15665 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15666 g = gimple_build_assign (lhs, splat_tree);
15667 gimple_set_location (g, gimple_location (stmt));
15668 gsi_replace (gsi, g, true);
15669 return true;
15670 }
15671
15672 /* Flavors of vec_splat. */
15673 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15674 case ALTIVEC_BUILTIN_VSPLTB:
15675 case ALTIVEC_BUILTIN_VSPLTH:
15676 case ALTIVEC_BUILTIN_VSPLTW:
15677 case VSX_BUILTIN_XXSPLTD_V2DI:
15678 case VSX_BUILTIN_XXSPLTD_V2DF:
15679 {
15680 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15681 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15682 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15683 is a valid index into the arg0 vector. */
15684 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15685 if (TREE_CODE (arg1) != INTEGER_CST
15686 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15687 return false;
15688 lhs = gimple_call_lhs (stmt);
15689 tree lhs_type = TREE_TYPE (lhs);
15690 tree arg0_type = TREE_TYPE (arg0);
15691 tree splat;
15692 if (TREE_CODE (arg0) == VECTOR_CST)
15693 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15694 else
15695 {
15696 /* Determine (in bits) the length and start location of the
15697 splat value for a call to the tree_vec_extract helper. */
15698 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15699 * BITS_PER_UNIT / n_elts;
15700 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15701 tree len = build_int_cst (bitsizetype, splat_elem_size);
15702 tree start = build_int_cst (bitsizetype, splat_start_bit);
15703 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15704 len, start);
15705 }
15706 /* And finally, build the new vector. */
15707 tree splat_tree = build_vector_from_val (lhs_type, splat);
15708 g = gimple_build_assign (lhs, splat_tree);
15709 gimple_set_location (g, gimple_location (stmt));
15710 gsi_replace (gsi, g, true);
15711 return true;
15712 }
15713
15714 /* vec_mergel (integrals). */
15715 case ALTIVEC_BUILTIN_VMRGLH:
15716 case ALTIVEC_BUILTIN_VMRGLW:
15717 case VSX_BUILTIN_XXMRGLW_4SI:
15718 case ALTIVEC_BUILTIN_VMRGLB:
15719 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15720 case VSX_BUILTIN_XXMRGLW_4SF:
15721 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15722 fold_mergehl_helper (gsi, stmt, 1);
15723 return true;
15724 /* vec_mergeh (integrals). */
15725 case ALTIVEC_BUILTIN_VMRGHH:
15726 case ALTIVEC_BUILTIN_VMRGHW:
15727 case VSX_BUILTIN_XXMRGHW_4SI:
15728 case ALTIVEC_BUILTIN_VMRGHB:
15729 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15730 case VSX_BUILTIN_XXMRGHW_4SF:
15731 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15732 fold_mergehl_helper (gsi, stmt, 0);
15733 return true;
15734
15735 /* Flavors of vec_mergee. */
15736 case P8V_BUILTIN_VMRGEW_V4SI:
15737 case P8V_BUILTIN_VMRGEW_V2DI:
15738 case P8V_BUILTIN_VMRGEW_V4SF:
15739 case P8V_BUILTIN_VMRGEW_V2DF:
15740 fold_mergeeo_helper (gsi, stmt, 0);
15741 return true;
15742 /* Flavors of vec_mergeo. */
15743 case P8V_BUILTIN_VMRGOW_V4SI:
15744 case P8V_BUILTIN_VMRGOW_V2DI:
15745 case P8V_BUILTIN_VMRGOW_V4SF:
15746 case P8V_BUILTIN_VMRGOW_V2DF:
15747 fold_mergeeo_helper (gsi, stmt, 1);
15748 return true;
15749
15750 /* d = vec_pack (a, b) */
15751 case P8V_BUILTIN_VPKUDUM:
15752 case ALTIVEC_BUILTIN_VPKUHUM:
15753 case ALTIVEC_BUILTIN_VPKUWUM:
15754 {
15755 arg0 = gimple_call_arg (stmt, 0);
15756 arg1 = gimple_call_arg (stmt, 1);
15757 lhs = gimple_call_lhs (stmt);
15758 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15759 gimple_set_location (g, gimple_location (stmt));
15760 gsi_replace (gsi, g, true);
15761 return true;
15762 }
15763
15764 /* d = vec_unpackh (a) */
15765 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15766 in this code is sensitive to endian-ness, and needs to be inverted to
15767 handle both LE and BE targets. */
15768 case ALTIVEC_BUILTIN_VUPKHSB:
15769 case ALTIVEC_BUILTIN_VUPKHSH:
15770 case P8V_BUILTIN_VUPKHSW:
15771 {
15772 arg0 = gimple_call_arg (stmt, 0);
15773 lhs = gimple_call_lhs (stmt);
15774 if (BYTES_BIG_ENDIAN)
15775 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15776 else
15777 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15778 gimple_set_location (g, gimple_location (stmt));
15779 gsi_replace (gsi, g, true);
15780 return true;
15781 }
15782 /* d = vec_unpackl (a) */
15783 case ALTIVEC_BUILTIN_VUPKLSB:
15784 case ALTIVEC_BUILTIN_VUPKLSH:
15785 case P8V_BUILTIN_VUPKLSW:
15786 {
15787 arg0 = gimple_call_arg (stmt, 0);
15788 lhs = gimple_call_lhs (stmt);
15789 if (BYTES_BIG_ENDIAN)
15790 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15791 else
15792 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15793 gimple_set_location (g, gimple_location (stmt));
15794 gsi_replace (gsi, g, true);
15795 return true;
15796 }
15797 /* There is no gimple type corresponding with pixel, so just return. */
15798 case ALTIVEC_BUILTIN_VUPKHPX:
15799 case ALTIVEC_BUILTIN_VUPKLPX:
15800 return false;
15801
15802 /* vec_perm. */
15803 case ALTIVEC_BUILTIN_VPERM_16QI:
15804 case ALTIVEC_BUILTIN_VPERM_8HI:
15805 case ALTIVEC_BUILTIN_VPERM_4SI:
15806 case ALTIVEC_BUILTIN_VPERM_2DI:
15807 case ALTIVEC_BUILTIN_VPERM_4SF:
15808 case ALTIVEC_BUILTIN_VPERM_2DF:
15809 {
15810 arg0 = gimple_call_arg (stmt, 0);
15811 arg1 = gimple_call_arg (stmt, 1);
15812 tree permute = gimple_call_arg (stmt, 2);
15813 lhs = gimple_call_lhs (stmt);
15814 location_t loc = gimple_location (stmt);
15815 gimple_seq stmts = NULL;
15816 // convert arg0 and arg1 to match the type of the permute
15817 // for the VEC_PERM_EXPR operation.
15818 tree permute_type = (TREE_TYPE (permute));
15819 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15820 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15821 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15822 permute_type, arg0_ptype, arg1_ptype,
15823 permute);
15824 // Convert the result back to the desired lhs type upon completion.
15825 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15826 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15827 g = gimple_build_assign (lhs, temp);
15828 gimple_set_location (g, loc);
15829 gsi_replace (gsi, g, true);
15830 return true;
15831 }
15832
15833 default:
15834 if (TARGET_DEBUG_BUILTIN)
15835 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15836 fn_code, fn_name1, fn_name2);
15837 break;
15838 }
15839
15840 return false;
15841 }
15842
15843 /* Expand an expression EXP that calls a built-in function,
15844 with result going to TARGET if that's convenient
15845 (and in mode MODE if that's convenient).
15846 SUBTARGET may be used as the target for computing one of EXP's operands.
15847 IGNORE is nonzero if the value is to be ignored. */
15848
15849 static rtx
15850 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15851 machine_mode mode ATTRIBUTE_UNUSED,
15852 int ignore ATTRIBUTE_UNUSED)
15853 {
15854 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15855 enum rs6000_builtins fcode
15856 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15857 size_t uns_fcode = (size_t)fcode;
15858 const struct builtin_description *d;
15859 size_t i;
15860 rtx ret;
15861 bool success;
15862 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15863 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15864 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15865
15866 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
15867 floating point type, depending on whether long double is the IBM extended
15868 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
15869 we only define one variant of the built-in function, and switch the code
15870 when defining it, rather than defining two built-ins and using the
15871 overload table in rs6000-c.c to switch between the two. If we don't have
15872 the proper assembler, don't do this switch because CODE_FOR_*kf* and
15873 CODE_FOR_*tf* will be CODE_FOR_nothing. */
15874 if (FLOAT128_IEEE_P (TFmode))
15875 switch (icode)
15876 {
15877 default:
15878 break;
15879
15880 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
15881 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
15882 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
15883 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
15884 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
15885 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
15886 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
15887 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
15888 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
15889 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
15890 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
15891 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
15892 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
15893 }
15894
15895 if (TARGET_DEBUG_BUILTIN)
15896 {
15897 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15898 const char *name2 = (icode != CODE_FOR_nothing)
15899 ? get_insn_name ((int) icode)
15900 : "nothing";
15901 const char *name3;
15902
15903 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15904 {
15905 default: name3 = "unknown"; break;
15906 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15907 case RS6000_BTC_UNARY: name3 = "unary"; break;
15908 case RS6000_BTC_BINARY: name3 = "binary"; break;
15909 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15910 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15911 case RS6000_BTC_ABS: name3 = "abs"; break;
15912 case RS6000_BTC_DST: name3 = "dst"; break;
15913 }
15914
15915
15916 fprintf (stderr,
15917 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15918 (name1) ? name1 : "---", fcode,
15919 (name2) ? name2 : "---", (int) icode,
15920 name3,
15921 func_valid_p ? "" : ", not valid");
15922 }
15923
15924 if (!func_valid_p)
15925 {
15926 rs6000_invalid_builtin (fcode);
15927
15928 /* Given it is invalid, just generate a normal call. */
15929 return expand_call (exp, target, ignore);
15930 }
15931
15932 switch (fcode)
15933 {
15934 case RS6000_BUILTIN_RECIP:
15935 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
15936
15937 case RS6000_BUILTIN_RECIPF:
15938 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
15939
15940 case RS6000_BUILTIN_RSQRTF:
15941 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
15942
15943 case RS6000_BUILTIN_RSQRT:
15944 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
15945
15946 case POWER7_BUILTIN_BPERMD:
15947 return rs6000_expand_binop_builtin (((TARGET_64BIT)
15948 ? CODE_FOR_bpermd_di
15949 : CODE_FOR_bpermd_si), exp, target);
15950
15951 case RS6000_BUILTIN_GET_TB:
15952 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
15953 target);
15954
15955 case RS6000_BUILTIN_MFTB:
15956 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
15957 ? CODE_FOR_rs6000_mftb_di
15958 : CODE_FOR_rs6000_mftb_si),
15959 target);
15960
15961 case RS6000_BUILTIN_MFFS:
15962 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
15963
15964 case RS6000_BUILTIN_MTFSB0:
15965 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
15966
15967 case RS6000_BUILTIN_MTFSB1:
15968 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
15969
15970 case RS6000_BUILTIN_SET_FPSCR_RN:
15971 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
15972 exp);
15973
15974 case RS6000_BUILTIN_SET_FPSCR_DRN:
15975 return
15976 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
15977 exp);
15978
15979 case RS6000_BUILTIN_MFFSL:
15980 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
15981
15982 case RS6000_BUILTIN_MTFSF:
15983 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
15984
15985 case RS6000_BUILTIN_CPU_INIT:
15986 case RS6000_BUILTIN_CPU_IS:
15987 case RS6000_BUILTIN_CPU_SUPPORTS:
15988 return cpu_expand_builtin (fcode, exp, target);
15989
15990 case MISC_BUILTIN_SPEC_BARRIER:
15991 {
15992 emit_insn (gen_speculation_barrier ());
15993 return NULL_RTX;
15994 }
15995
15996 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
15997 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
15998 {
15999 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16000 : (int) CODE_FOR_altivec_lvsl_direct);
16001 machine_mode tmode = insn_data[icode2].operand[0].mode;
16002 machine_mode mode = insn_data[icode2].operand[1].mode;
16003 tree arg;
16004 rtx op, addr, pat;
16005
16006 gcc_assert (TARGET_ALTIVEC);
16007
16008 arg = CALL_EXPR_ARG (exp, 0);
16009 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16010 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16011 addr = memory_address (mode, op);
16012 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16013 op = addr;
16014 else
16015 {
16016 /* For the load case need to negate the address. */
16017 op = gen_reg_rtx (GET_MODE (addr));
16018 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16019 }
16020 op = gen_rtx_MEM (mode, op);
16021
16022 if (target == 0
16023 || GET_MODE (target) != tmode
16024 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16025 target = gen_reg_rtx (tmode);
16026
16027 pat = GEN_FCN (icode2) (target, op);
16028 if (!pat)
16029 return 0;
16030 emit_insn (pat);
16031
16032 return target;
16033 }
16034
16035 case ALTIVEC_BUILTIN_VCFUX:
16036 case ALTIVEC_BUILTIN_VCFSX:
16037 case ALTIVEC_BUILTIN_VCTUXS:
16038 case ALTIVEC_BUILTIN_VCTSXS:
16039 /* FIXME: There's got to be a nicer way to handle this case than
16040 constructing a new CALL_EXPR. */
16041 if (call_expr_nargs (exp) == 1)
16042 {
16043 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16044 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16045 }
16046 break;
16047
16048 /* For the pack and unpack int128 routines, fix up the builtin so it
16049 uses the correct IBM128 type. */
16050 case MISC_BUILTIN_PACK_IF:
16051 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16052 {
16053 icode = CODE_FOR_packtf;
16054 fcode = MISC_BUILTIN_PACK_TF;
16055 uns_fcode = (size_t)fcode;
16056 }
16057 break;
16058
16059 case MISC_BUILTIN_UNPACK_IF:
16060 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16061 {
16062 icode = CODE_FOR_unpacktf;
16063 fcode = MISC_BUILTIN_UNPACK_TF;
16064 uns_fcode = (size_t)fcode;
16065 }
16066 break;
16067
16068 default:
16069 break;
16070 }
16071
16072 if (TARGET_ALTIVEC)
16073 {
16074 ret = altivec_expand_builtin (exp, target, &success);
16075
16076 if (success)
16077 return ret;
16078 }
16079 if (TARGET_HTM)
16080 {
16081 ret = htm_expand_builtin (exp, target, &success);
16082
16083 if (success)
16084 return ret;
16085 }
16086
16087 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16088 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16089 gcc_assert (attr == RS6000_BTC_UNARY
16090 || attr == RS6000_BTC_BINARY
16091 || attr == RS6000_BTC_TERNARY
16092 || attr == RS6000_BTC_SPECIAL);
16093
16094 /* Handle simple unary operations. */
16095 d = bdesc_1arg;
16096 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16097 if (d->code == fcode)
16098 return rs6000_expand_unop_builtin (icode, exp, target);
16099
16100 /* Handle simple binary operations. */
16101 d = bdesc_2arg;
16102 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16103 if (d->code == fcode)
16104 return rs6000_expand_binop_builtin (icode, exp, target);
16105
16106 /* Handle simple ternary operations. */
16107 d = bdesc_3arg;
16108 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16109 if (d->code == fcode)
16110 return rs6000_expand_ternop_builtin (icode, exp, target);
16111
16112 /* Handle simple no-argument operations. */
16113 d = bdesc_0arg;
16114 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16115 if (d->code == fcode)
16116 return rs6000_expand_zeroop_builtin (icode, target);
16117
16118 gcc_unreachable ();
16119 }
16120
16121 /* Create a builtin vector type with a name. Taking care not to give
16122 the canonical type a name. */
16123
16124 static tree
16125 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16126 {
16127 tree result = build_vector_type (elt_type, num_elts);
16128
16129 /* Copy so we don't give the canonical type a name. */
16130 result = build_variant_type_copy (result);
16131
16132 add_builtin_type (name, result);
16133
16134 return result;
16135 }
16136
16137 static void
16138 rs6000_init_builtins (void)
16139 {
16140 tree tdecl;
16141 tree ftype;
16142 machine_mode mode;
16143
16144 if (TARGET_DEBUG_BUILTIN)
16145 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16146 (TARGET_ALTIVEC) ? ", altivec" : "",
16147 (TARGET_VSX) ? ", vsx" : "");
16148
16149 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16150 : "__vector long long",
16151 intDI_type_node, 2);
16152 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16153 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16154 intSI_type_node, 4);
16155 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16156 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16157 intHI_type_node, 8);
16158 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16159 intQI_type_node, 16);
16160
16161 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16162 unsigned_intQI_type_node, 16);
16163 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16164 unsigned_intHI_type_node, 8);
16165 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16166 unsigned_intSI_type_node, 4);
16167 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16168 ? "__vector unsigned long"
16169 : "__vector unsigned long long",
16170 unsigned_intDI_type_node, 2);
16171
16172 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16173
16174 const_str_type_node
16175 = build_pointer_type (build_qualified_type (char_type_node,
16176 TYPE_QUAL_CONST));
16177
16178 /* We use V1TI mode as a special container to hold __int128_t items that
16179 must live in VSX registers. */
16180 if (intTI_type_node)
16181 {
16182 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16183 intTI_type_node, 1);
16184 unsigned_V1TI_type_node
16185 = rs6000_vector_type ("__vector unsigned __int128",
16186 unsigned_intTI_type_node, 1);
16187 }
16188
16189 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16190 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16191 'vector unsigned short'. */
16192
16193 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16194 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16195 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16196 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16197 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16198
16199 long_integer_type_internal_node = long_integer_type_node;
16200 long_unsigned_type_internal_node = long_unsigned_type_node;
16201 long_long_integer_type_internal_node = long_long_integer_type_node;
16202 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16203 intQI_type_internal_node = intQI_type_node;
16204 uintQI_type_internal_node = unsigned_intQI_type_node;
16205 intHI_type_internal_node = intHI_type_node;
16206 uintHI_type_internal_node = unsigned_intHI_type_node;
16207 intSI_type_internal_node = intSI_type_node;
16208 uintSI_type_internal_node = unsigned_intSI_type_node;
16209 intDI_type_internal_node = intDI_type_node;
16210 uintDI_type_internal_node = unsigned_intDI_type_node;
16211 intTI_type_internal_node = intTI_type_node;
16212 uintTI_type_internal_node = unsigned_intTI_type_node;
16213 float_type_internal_node = float_type_node;
16214 double_type_internal_node = double_type_node;
16215 long_double_type_internal_node = long_double_type_node;
16216 dfloat64_type_internal_node = dfloat64_type_node;
16217 dfloat128_type_internal_node = dfloat128_type_node;
16218 void_type_internal_node = void_type_node;
16219
16220 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16221 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16222 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16223 format that uses a pair of doubles, depending on the switches and
16224 defaults.
16225
16226 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16227 floating point, we need make sure the type is non-zero or else self-test
16228 fails during bootstrap.
16229
16230 Always create __ibm128 as a separate type, even if the current long double
16231 format is IBM extended double.
16232
16233 For IEEE 128-bit floating point, always create the type __ieee128. If the
16234 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16235 __ieee128. */
16236 if (TARGET_FLOAT128_TYPE)
16237 {
16238 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16239 ibm128_float_type_node = long_double_type_node;
16240 else
16241 {
16242 ibm128_float_type_node = make_node (REAL_TYPE);
16243 TYPE_PRECISION (ibm128_float_type_node) = 128;
16244 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16245 layout_type (ibm128_float_type_node);
16246 }
16247
16248 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16249 "__ibm128");
16250
16251 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16252 ieee128_float_type_node = long_double_type_node;
16253 else
16254 ieee128_float_type_node = float128_type_node;
16255
16256 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16257 "__ieee128");
16258 }
16259
16260 else
16261 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16262
16263 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16264 tree type node. */
16265 builtin_mode_to_type[QImode][0] = integer_type_node;
16266 builtin_mode_to_type[HImode][0] = integer_type_node;
16267 builtin_mode_to_type[SImode][0] = intSI_type_node;
16268 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16269 builtin_mode_to_type[DImode][0] = intDI_type_node;
16270 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16271 builtin_mode_to_type[TImode][0] = intTI_type_node;
16272 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16273 builtin_mode_to_type[SFmode][0] = float_type_node;
16274 builtin_mode_to_type[DFmode][0] = double_type_node;
16275 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16276 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16277 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16278 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16279 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16280 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16281 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16282 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16283 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16284 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16285 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16286 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16287 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16288 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16289 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16290 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16291 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16292
16293 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16294 TYPE_NAME (bool_char_type_node) = tdecl;
16295
16296 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16297 TYPE_NAME (bool_short_type_node) = tdecl;
16298
16299 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16300 TYPE_NAME (bool_int_type_node) = tdecl;
16301
16302 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16303 TYPE_NAME (pixel_type_node) = tdecl;
16304
16305 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16306 bool_char_type_node, 16);
16307 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16308 bool_short_type_node, 8);
16309 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16310 bool_int_type_node, 4);
16311 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16312 ? "__vector __bool long"
16313 : "__vector __bool long long",
16314 bool_long_long_type_node, 2);
16315 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16316 pixel_type_node, 8);
16317
16318 /* Create Altivec and VSX builtins on machines with at least the
16319 general purpose extensions (970 and newer) to allow the use of
16320 the target attribute. */
16321 if (TARGET_EXTRA_BUILTINS)
16322 altivec_init_builtins ();
16323 if (TARGET_HTM)
16324 htm_init_builtins ();
16325
16326 if (TARGET_EXTRA_BUILTINS)
16327 rs6000_common_init_builtins ();
16328
16329 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16330 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16331 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16332
16333 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16334 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16335 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16336
16337 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16338 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16339 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16340
16341 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16342 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16343 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16344
16345 mode = (TARGET_64BIT) ? DImode : SImode;
16346 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16347 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16348 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16349
16350 ftype = build_function_type_list (unsigned_intDI_type_node,
16351 NULL_TREE);
16352 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16353
16354 if (TARGET_64BIT)
16355 ftype = build_function_type_list (unsigned_intDI_type_node,
16356 NULL_TREE);
16357 else
16358 ftype = build_function_type_list (unsigned_intSI_type_node,
16359 NULL_TREE);
16360 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16361
16362 ftype = build_function_type_list (double_type_node, NULL_TREE);
16363 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16364
16365 ftype = build_function_type_list (double_type_node, NULL_TREE);
16366 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16367
16368 ftype = build_function_type_list (void_type_node,
16369 intSI_type_node,
16370 NULL_TREE);
16371 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16372
16373 ftype = build_function_type_list (void_type_node,
16374 intSI_type_node,
16375 NULL_TREE);
16376 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16377
16378 ftype = build_function_type_list (void_type_node,
16379 intDI_type_node,
16380 NULL_TREE);
16381 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16382
16383 ftype = build_function_type_list (void_type_node,
16384 intDI_type_node,
16385 NULL_TREE);
16386 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16387
16388 ftype = build_function_type_list (void_type_node,
16389 intSI_type_node, double_type_node,
16390 NULL_TREE);
16391 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16392
16393 ftype = build_function_type_list (void_type_node, NULL_TREE);
16394 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16395 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16396 MISC_BUILTIN_SPEC_BARRIER);
16397
16398 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16399 NULL_TREE);
16400 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16401 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16402
16403 /* AIX libm provides clog as __clog. */
16404 if (TARGET_XCOFF &&
16405 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16406 set_user_assembler_name (tdecl, "__clog");
16407
16408 #ifdef SUBTARGET_INIT_BUILTINS
16409 SUBTARGET_INIT_BUILTINS;
16410 #endif
16411 }
16412
16413 /* Returns the rs6000 builtin decl for CODE. */
16414
16415 static tree
16416 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16417 {
16418 HOST_WIDE_INT fnmask;
16419
16420 if (code >= RS6000_BUILTIN_COUNT)
16421 return error_mark_node;
16422
16423 fnmask = rs6000_builtin_info[code].mask;
16424 if ((fnmask & rs6000_builtin_mask) != fnmask)
16425 {
16426 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16427 return error_mark_node;
16428 }
16429
16430 return rs6000_builtin_decls[code];
16431 }
16432
16433 static void
16434 altivec_init_builtins (void)
16435 {
16436 const struct builtin_description *d;
16437 size_t i;
16438 tree ftype;
16439 tree decl;
16440 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16441
16442 tree pvoid_type_node = build_pointer_type (void_type_node);
16443
16444 tree pcvoid_type_node
16445 = build_pointer_type (build_qualified_type (void_type_node,
16446 TYPE_QUAL_CONST));
16447
16448 tree int_ftype_opaque
16449 = build_function_type_list (integer_type_node,
16450 opaque_V4SI_type_node, NULL_TREE);
16451 tree opaque_ftype_opaque
16452 = build_function_type_list (integer_type_node, NULL_TREE);
16453 tree opaque_ftype_opaque_int
16454 = build_function_type_list (opaque_V4SI_type_node,
16455 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16456 tree opaque_ftype_opaque_opaque_int
16457 = build_function_type_list (opaque_V4SI_type_node,
16458 opaque_V4SI_type_node, opaque_V4SI_type_node,
16459 integer_type_node, NULL_TREE);
16460 tree opaque_ftype_opaque_opaque_opaque
16461 = build_function_type_list (opaque_V4SI_type_node,
16462 opaque_V4SI_type_node, opaque_V4SI_type_node,
16463 opaque_V4SI_type_node, NULL_TREE);
16464 tree opaque_ftype_opaque_opaque
16465 = build_function_type_list (opaque_V4SI_type_node,
16466 opaque_V4SI_type_node, opaque_V4SI_type_node,
16467 NULL_TREE);
16468 tree int_ftype_int_opaque_opaque
16469 = build_function_type_list (integer_type_node,
16470 integer_type_node, opaque_V4SI_type_node,
16471 opaque_V4SI_type_node, NULL_TREE);
16472 tree int_ftype_int_v4si_v4si
16473 = build_function_type_list (integer_type_node,
16474 integer_type_node, V4SI_type_node,
16475 V4SI_type_node, NULL_TREE);
16476 tree int_ftype_int_v2di_v2di
16477 = build_function_type_list (integer_type_node,
16478 integer_type_node, V2DI_type_node,
16479 V2DI_type_node, NULL_TREE);
16480 tree void_ftype_v4si
16481 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16482 tree v8hi_ftype_void
16483 = build_function_type_list (V8HI_type_node, NULL_TREE);
16484 tree void_ftype_void
16485 = build_function_type_list (void_type_node, NULL_TREE);
16486 tree void_ftype_int
16487 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16488
16489 tree opaque_ftype_long_pcvoid
16490 = build_function_type_list (opaque_V4SI_type_node,
16491 long_integer_type_node, pcvoid_type_node,
16492 NULL_TREE);
16493 tree v16qi_ftype_long_pcvoid
16494 = build_function_type_list (V16QI_type_node,
16495 long_integer_type_node, pcvoid_type_node,
16496 NULL_TREE);
16497 tree v8hi_ftype_long_pcvoid
16498 = build_function_type_list (V8HI_type_node,
16499 long_integer_type_node, pcvoid_type_node,
16500 NULL_TREE);
16501 tree v4si_ftype_long_pcvoid
16502 = build_function_type_list (V4SI_type_node,
16503 long_integer_type_node, pcvoid_type_node,
16504 NULL_TREE);
16505 tree v4sf_ftype_long_pcvoid
16506 = build_function_type_list (V4SF_type_node,
16507 long_integer_type_node, pcvoid_type_node,
16508 NULL_TREE);
16509 tree v2df_ftype_long_pcvoid
16510 = build_function_type_list (V2DF_type_node,
16511 long_integer_type_node, pcvoid_type_node,
16512 NULL_TREE);
16513 tree v2di_ftype_long_pcvoid
16514 = build_function_type_list (V2DI_type_node,
16515 long_integer_type_node, pcvoid_type_node,
16516 NULL_TREE);
16517 tree v1ti_ftype_long_pcvoid
16518 = build_function_type_list (V1TI_type_node,
16519 long_integer_type_node, pcvoid_type_node,
16520 NULL_TREE);
16521
16522 tree void_ftype_opaque_long_pvoid
16523 = build_function_type_list (void_type_node,
16524 opaque_V4SI_type_node, long_integer_type_node,
16525 pvoid_type_node, NULL_TREE);
16526 tree void_ftype_v4si_long_pvoid
16527 = build_function_type_list (void_type_node,
16528 V4SI_type_node, long_integer_type_node,
16529 pvoid_type_node, NULL_TREE);
16530 tree void_ftype_v16qi_long_pvoid
16531 = build_function_type_list (void_type_node,
16532 V16QI_type_node, long_integer_type_node,
16533 pvoid_type_node, NULL_TREE);
16534
16535 tree void_ftype_v16qi_pvoid_long
16536 = build_function_type_list (void_type_node,
16537 V16QI_type_node, pvoid_type_node,
16538 long_integer_type_node, NULL_TREE);
16539
16540 tree void_ftype_v8hi_long_pvoid
16541 = build_function_type_list (void_type_node,
16542 V8HI_type_node, long_integer_type_node,
16543 pvoid_type_node, NULL_TREE);
16544 tree void_ftype_v4sf_long_pvoid
16545 = build_function_type_list (void_type_node,
16546 V4SF_type_node, long_integer_type_node,
16547 pvoid_type_node, NULL_TREE);
16548 tree void_ftype_v2df_long_pvoid
16549 = build_function_type_list (void_type_node,
16550 V2DF_type_node, long_integer_type_node,
16551 pvoid_type_node, NULL_TREE);
16552 tree void_ftype_v1ti_long_pvoid
16553 = build_function_type_list (void_type_node,
16554 V1TI_type_node, long_integer_type_node,
16555 pvoid_type_node, NULL_TREE);
16556 tree void_ftype_v2di_long_pvoid
16557 = build_function_type_list (void_type_node,
16558 V2DI_type_node, long_integer_type_node,
16559 pvoid_type_node, NULL_TREE);
16560 tree int_ftype_int_v8hi_v8hi
16561 = build_function_type_list (integer_type_node,
16562 integer_type_node, V8HI_type_node,
16563 V8HI_type_node, NULL_TREE);
16564 tree int_ftype_int_v16qi_v16qi
16565 = build_function_type_list (integer_type_node,
16566 integer_type_node, V16QI_type_node,
16567 V16QI_type_node, NULL_TREE);
16568 tree int_ftype_int_v4sf_v4sf
16569 = build_function_type_list (integer_type_node,
16570 integer_type_node, V4SF_type_node,
16571 V4SF_type_node, NULL_TREE);
16572 tree int_ftype_int_v2df_v2df
16573 = build_function_type_list (integer_type_node,
16574 integer_type_node, V2DF_type_node,
16575 V2DF_type_node, NULL_TREE);
16576 tree v2di_ftype_v2di
16577 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16578 tree v4si_ftype_v4si
16579 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16580 tree v8hi_ftype_v8hi
16581 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16582 tree v16qi_ftype_v16qi
16583 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16584 tree v4sf_ftype_v4sf
16585 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16586 tree v2df_ftype_v2df
16587 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16588 tree void_ftype_pcvoid_int_int
16589 = build_function_type_list (void_type_node,
16590 pcvoid_type_node, integer_type_node,
16591 integer_type_node, NULL_TREE);
16592
16593 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16594 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16595 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16596 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16597 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16598 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16599 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16600 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16601 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16602 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16603 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16604 ALTIVEC_BUILTIN_LVXL_V2DF);
16605 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16606 ALTIVEC_BUILTIN_LVXL_V2DI);
16607 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16608 ALTIVEC_BUILTIN_LVXL_V4SF);
16609 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16610 ALTIVEC_BUILTIN_LVXL_V4SI);
16611 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16612 ALTIVEC_BUILTIN_LVXL_V8HI);
16613 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16614 ALTIVEC_BUILTIN_LVXL_V16QI);
16615 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16616 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16617 ALTIVEC_BUILTIN_LVX_V1TI);
16618 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16619 ALTIVEC_BUILTIN_LVX_V2DF);
16620 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16621 ALTIVEC_BUILTIN_LVX_V2DI);
16622 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16623 ALTIVEC_BUILTIN_LVX_V4SF);
16624 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16625 ALTIVEC_BUILTIN_LVX_V4SI);
16626 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16627 ALTIVEC_BUILTIN_LVX_V8HI);
16628 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16629 ALTIVEC_BUILTIN_LVX_V16QI);
16630 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16631 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16632 ALTIVEC_BUILTIN_STVX_V2DF);
16633 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16634 ALTIVEC_BUILTIN_STVX_V2DI);
16635 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16636 ALTIVEC_BUILTIN_STVX_V4SF);
16637 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16638 ALTIVEC_BUILTIN_STVX_V4SI);
16639 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16640 ALTIVEC_BUILTIN_STVX_V8HI);
16641 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16642 ALTIVEC_BUILTIN_STVX_V16QI);
16643 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16644 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16645 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16646 ALTIVEC_BUILTIN_STVXL_V2DF);
16647 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16648 ALTIVEC_BUILTIN_STVXL_V2DI);
16649 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16650 ALTIVEC_BUILTIN_STVXL_V4SF);
16651 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16652 ALTIVEC_BUILTIN_STVXL_V4SI);
16653 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16654 ALTIVEC_BUILTIN_STVXL_V8HI);
16655 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16656 ALTIVEC_BUILTIN_STVXL_V16QI);
16657 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16658 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16659 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16660 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16661 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16662 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16663 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16664 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16665 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16666 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16667 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16668 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16669 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16670 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16671 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16672 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16673
16674 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16675 VSX_BUILTIN_LXVD2X_V2DF);
16676 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16677 VSX_BUILTIN_LXVD2X_V2DI);
16678 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16679 VSX_BUILTIN_LXVW4X_V4SF);
16680 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16681 VSX_BUILTIN_LXVW4X_V4SI);
16682 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16683 VSX_BUILTIN_LXVW4X_V8HI);
16684 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16685 VSX_BUILTIN_LXVW4X_V16QI);
16686 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16687 VSX_BUILTIN_STXVD2X_V2DF);
16688 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16689 VSX_BUILTIN_STXVD2X_V2DI);
16690 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16691 VSX_BUILTIN_STXVW4X_V4SF);
16692 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16693 VSX_BUILTIN_STXVW4X_V4SI);
16694 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16695 VSX_BUILTIN_STXVW4X_V8HI);
16696 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16697 VSX_BUILTIN_STXVW4X_V16QI);
16698
16699 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16700 VSX_BUILTIN_LD_ELEMREV_V2DF);
16701 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16702 VSX_BUILTIN_LD_ELEMREV_V2DI);
16703 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16704 VSX_BUILTIN_LD_ELEMREV_V4SF);
16705 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16706 VSX_BUILTIN_LD_ELEMREV_V4SI);
16707 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16708 VSX_BUILTIN_LD_ELEMREV_V8HI);
16709 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16710 VSX_BUILTIN_LD_ELEMREV_V16QI);
16711 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16712 VSX_BUILTIN_ST_ELEMREV_V2DF);
16713 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16714 VSX_BUILTIN_ST_ELEMREV_V1TI);
16715 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16716 VSX_BUILTIN_ST_ELEMREV_V2DI);
16717 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16718 VSX_BUILTIN_ST_ELEMREV_V4SF);
16719 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16720 VSX_BUILTIN_ST_ELEMREV_V4SI);
16721 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16722 VSX_BUILTIN_ST_ELEMREV_V8HI);
16723 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16724 VSX_BUILTIN_ST_ELEMREV_V16QI);
16725
16726 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16727 VSX_BUILTIN_VEC_LD);
16728 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16729 VSX_BUILTIN_VEC_ST);
16730 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16731 VSX_BUILTIN_VEC_XL);
16732 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16733 VSX_BUILTIN_VEC_XL_BE);
16734 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16735 VSX_BUILTIN_VEC_XST);
16736 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16737 VSX_BUILTIN_VEC_XST_BE);
16738
16739 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16740 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16741 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16742
16743 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16744 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16745 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16746 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16747 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16748 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16749 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16750 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16751 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16752 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16753 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16754 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16755
16756 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16757 ALTIVEC_BUILTIN_VEC_ADDE);
16758 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16759 ALTIVEC_BUILTIN_VEC_ADDEC);
16760 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16761 ALTIVEC_BUILTIN_VEC_CMPNE);
16762 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16763 ALTIVEC_BUILTIN_VEC_MUL);
16764 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16765 ALTIVEC_BUILTIN_VEC_SUBE);
16766 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16767 ALTIVEC_BUILTIN_VEC_SUBEC);
16768
16769 /* Cell builtins. */
16770 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16771 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16772 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16773 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16774
16775 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16776 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16777 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16778 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16779
16780 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16781 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16782 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16783 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16784
16785 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16786 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16787 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16788 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16789
16790 if (TARGET_P9_VECTOR)
16791 {
16792 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16793 P9V_BUILTIN_STXVL);
16794 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16795 P9V_BUILTIN_XST_LEN_R);
16796 }
16797
16798 /* Add the DST variants. */
16799 d = bdesc_dst;
16800 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16801 {
16802 HOST_WIDE_INT mask = d->mask;
16803
16804 /* It is expected that these dst built-in functions may have
16805 d->icode equal to CODE_FOR_nothing. */
16806 if ((mask & builtin_mask) != mask)
16807 {
16808 if (TARGET_DEBUG_BUILTIN)
16809 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16810 d->name);
16811 continue;
16812 }
16813 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16814 }
16815
16816 /* Initialize the predicates. */
16817 d = bdesc_altivec_preds;
16818 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16819 {
16820 machine_mode mode1;
16821 tree type;
16822 HOST_WIDE_INT mask = d->mask;
16823
16824 if ((mask & builtin_mask) != mask)
16825 {
16826 if (TARGET_DEBUG_BUILTIN)
16827 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16828 d->name);
16829 continue;
16830 }
16831
16832 if (rs6000_overloaded_builtin_p (d->code))
16833 mode1 = VOIDmode;
16834 else
16835 {
16836 /* Cannot define builtin if the instruction is disabled. */
16837 gcc_assert (d->icode != CODE_FOR_nothing);
16838 mode1 = insn_data[d->icode].operand[1].mode;
16839 }
16840
16841 switch (mode1)
16842 {
16843 case E_VOIDmode:
16844 type = int_ftype_int_opaque_opaque;
16845 break;
16846 case E_V2DImode:
16847 type = int_ftype_int_v2di_v2di;
16848 break;
16849 case E_V4SImode:
16850 type = int_ftype_int_v4si_v4si;
16851 break;
16852 case E_V8HImode:
16853 type = int_ftype_int_v8hi_v8hi;
16854 break;
16855 case E_V16QImode:
16856 type = int_ftype_int_v16qi_v16qi;
16857 break;
16858 case E_V4SFmode:
16859 type = int_ftype_int_v4sf_v4sf;
16860 break;
16861 case E_V2DFmode:
16862 type = int_ftype_int_v2df_v2df;
16863 break;
16864 default:
16865 gcc_unreachable ();
16866 }
16867
16868 def_builtin (d->name, type, d->code);
16869 }
16870
16871 /* Initialize the abs* operators. */
16872 d = bdesc_abs;
16873 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16874 {
16875 machine_mode mode0;
16876 tree type;
16877 HOST_WIDE_INT mask = d->mask;
16878
16879 if ((mask & builtin_mask) != mask)
16880 {
16881 if (TARGET_DEBUG_BUILTIN)
16882 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
16883 d->name);
16884 continue;
16885 }
16886
16887 /* Cannot define builtin if the instruction is disabled. */
16888 gcc_assert (d->icode != CODE_FOR_nothing);
16889 mode0 = insn_data[d->icode].operand[0].mode;
16890
16891 switch (mode0)
16892 {
16893 case E_V2DImode:
16894 type = v2di_ftype_v2di;
16895 break;
16896 case E_V4SImode:
16897 type = v4si_ftype_v4si;
16898 break;
16899 case E_V8HImode:
16900 type = v8hi_ftype_v8hi;
16901 break;
16902 case E_V16QImode:
16903 type = v16qi_ftype_v16qi;
16904 break;
16905 case E_V4SFmode:
16906 type = v4sf_ftype_v4sf;
16907 break;
16908 case E_V2DFmode:
16909 type = v2df_ftype_v2df;
16910 break;
16911 default:
16912 gcc_unreachable ();
16913 }
16914
16915 def_builtin (d->name, type, d->code);
16916 }
16917
16918 /* Initialize target builtin that implements
16919 targetm.vectorize.builtin_mask_for_load. */
16920
16921 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16922 v16qi_ftype_long_pcvoid,
16923 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16924 BUILT_IN_MD, NULL, NULL_TREE);
16925 TREE_READONLY (decl) = 1;
16926 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16927 altivec_builtin_mask_for_load = decl;
16928
16929 /* Access to the vec_init patterns. */
16930 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16931 integer_type_node, integer_type_node,
16932 integer_type_node, NULL_TREE);
16933 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16934
16935 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
16936 short_integer_type_node,
16937 short_integer_type_node,
16938 short_integer_type_node,
16939 short_integer_type_node,
16940 short_integer_type_node,
16941 short_integer_type_node,
16942 short_integer_type_node, NULL_TREE);
16943 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
16944
16945 ftype = build_function_type_list (V16QI_type_node, char_type_node,
16946 char_type_node, char_type_node,
16947 char_type_node, char_type_node,
16948 char_type_node, char_type_node,
16949 char_type_node, char_type_node,
16950 char_type_node, char_type_node,
16951 char_type_node, char_type_node,
16952 char_type_node, char_type_node,
16953 char_type_node, NULL_TREE);
16954 def_builtin ("__builtin_vec_init_v16qi", ftype,
16955 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
16956
16957 ftype = build_function_type_list (V4SF_type_node, float_type_node,
16958 float_type_node, float_type_node,
16959 float_type_node, NULL_TREE);
16960 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
16961
16962 /* VSX builtins. */
16963 ftype = build_function_type_list (V2DF_type_node, double_type_node,
16964 double_type_node, NULL_TREE);
16965 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
16966
16967 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
16968 intDI_type_node, NULL_TREE);
16969 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
16970
16971 /* Access to the vec_set patterns. */
16972 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
16973 intSI_type_node,
16974 integer_type_node, NULL_TREE);
16975 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
16976
16977 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16978 intHI_type_node,
16979 integer_type_node, NULL_TREE);
16980 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
16981
16982 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
16983 intQI_type_node,
16984 integer_type_node, NULL_TREE);
16985 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
16986
16987 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
16988 float_type_node,
16989 integer_type_node, NULL_TREE);
16990 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
16991
16992 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
16993 double_type_node,
16994 integer_type_node, NULL_TREE);
16995 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
16996
16997 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
16998 intDI_type_node,
16999 integer_type_node, NULL_TREE);
17000 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17001
17002 /* Access to the vec_extract patterns. */
17003 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17004 integer_type_node, NULL_TREE);
17005 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17006
17007 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17008 integer_type_node, NULL_TREE);
17009 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17010
17011 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17012 integer_type_node, NULL_TREE);
17013 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17014
17015 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17016 integer_type_node, NULL_TREE);
17017 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17018
17019 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17020 integer_type_node, NULL_TREE);
17021 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17022
17023 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17024 integer_type_node, NULL_TREE);
17025 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17026
17027
17028 if (V1TI_type_node)
17029 {
17030 tree v1ti_ftype_long_pcvoid
17031 = build_function_type_list (V1TI_type_node,
17032 long_integer_type_node, pcvoid_type_node,
17033 NULL_TREE);
17034 tree void_ftype_v1ti_long_pvoid
17035 = build_function_type_list (void_type_node,
17036 V1TI_type_node, long_integer_type_node,
17037 pvoid_type_node, NULL_TREE);
17038 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17039 VSX_BUILTIN_LD_ELEMREV_V1TI);
17040 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17041 VSX_BUILTIN_LXVD2X_V1TI);
17042 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17043 VSX_BUILTIN_STXVD2X_V1TI);
17044 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17045 NULL_TREE, NULL_TREE);
17046 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17047 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17048 intTI_type_node,
17049 integer_type_node, NULL_TREE);
17050 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17051 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17052 integer_type_node, NULL_TREE);
17053 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17054 }
17055
17056 }
17057
17058 static void
17059 htm_init_builtins (void)
17060 {
17061 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17062 const struct builtin_description *d;
17063 size_t i;
17064
17065 d = bdesc_htm;
17066 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17067 {
17068 tree op[MAX_HTM_OPERANDS], type;
17069 HOST_WIDE_INT mask = d->mask;
17070 unsigned attr = rs6000_builtin_info[d->code].attr;
17071 bool void_func = (attr & RS6000_BTC_VOID);
17072 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17073 int nopnds = 0;
17074 tree gpr_type_node;
17075 tree rettype;
17076 tree argtype;
17077
17078 /* It is expected that these htm built-in functions may have
17079 d->icode equal to CODE_FOR_nothing. */
17080
17081 if (TARGET_32BIT && TARGET_POWERPC64)
17082 gpr_type_node = long_long_unsigned_type_node;
17083 else
17084 gpr_type_node = long_unsigned_type_node;
17085
17086 if (attr & RS6000_BTC_SPR)
17087 {
17088 rettype = gpr_type_node;
17089 argtype = gpr_type_node;
17090 }
17091 else if (d->code == HTM_BUILTIN_TABORTDC
17092 || d->code == HTM_BUILTIN_TABORTDCI)
17093 {
17094 rettype = unsigned_type_node;
17095 argtype = gpr_type_node;
17096 }
17097 else
17098 {
17099 rettype = unsigned_type_node;
17100 argtype = unsigned_type_node;
17101 }
17102
17103 if ((mask & builtin_mask) != mask)
17104 {
17105 if (TARGET_DEBUG_BUILTIN)
17106 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17107 continue;
17108 }
17109
17110 if (d->name == 0)
17111 {
17112 if (TARGET_DEBUG_BUILTIN)
17113 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17114 (long unsigned) i);
17115 continue;
17116 }
17117
17118 op[nopnds++] = (void_func) ? void_type_node : rettype;
17119
17120 if (attr_args == RS6000_BTC_UNARY)
17121 op[nopnds++] = argtype;
17122 else if (attr_args == RS6000_BTC_BINARY)
17123 {
17124 op[nopnds++] = argtype;
17125 op[nopnds++] = argtype;
17126 }
17127 else if (attr_args == RS6000_BTC_TERNARY)
17128 {
17129 op[nopnds++] = argtype;
17130 op[nopnds++] = argtype;
17131 op[nopnds++] = argtype;
17132 }
17133
17134 switch (nopnds)
17135 {
17136 case 1:
17137 type = build_function_type_list (op[0], NULL_TREE);
17138 break;
17139 case 2:
17140 type = build_function_type_list (op[0], op[1], NULL_TREE);
17141 break;
17142 case 3:
17143 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17144 break;
17145 case 4:
17146 type = build_function_type_list (op[0], op[1], op[2], op[3],
17147 NULL_TREE);
17148 break;
17149 default:
17150 gcc_unreachable ();
17151 }
17152
17153 def_builtin (d->name, type, d->code);
17154 }
17155 }
17156
17157 /* Hash function for builtin functions with up to 3 arguments and a return
17158 type. */
17159 hashval_t
17160 builtin_hasher::hash (builtin_hash_struct *bh)
17161 {
17162 unsigned ret = 0;
17163 int i;
17164
17165 for (i = 0; i < 4; i++)
17166 {
17167 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17168 ret = (ret * 2) + bh->uns_p[i];
17169 }
17170
17171 return ret;
17172 }
17173
17174 /* Compare builtin hash entries H1 and H2 for equivalence. */
17175 bool
17176 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17177 {
17178 return ((p1->mode[0] == p2->mode[0])
17179 && (p1->mode[1] == p2->mode[1])
17180 && (p1->mode[2] == p2->mode[2])
17181 && (p1->mode[3] == p2->mode[3])
17182 && (p1->uns_p[0] == p2->uns_p[0])
17183 && (p1->uns_p[1] == p2->uns_p[1])
17184 && (p1->uns_p[2] == p2->uns_p[2])
17185 && (p1->uns_p[3] == p2->uns_p[3]));
17186 }
17187
17188 /* Map types for builtin functions with an explicit return type and up to 3
17189 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17190 of the argument. */
17191 static tree
17192 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17193 machine_mode mode_arg1, machine_mode mode_arg2,
17194 enum rs6000_builtins builtin, const char *name)
17195 {
17196 struct builtin_hash_struct h;
17197 struct builtin_hash_struct *h2;
17198 int num_args = 3;
17199 int i;
17200 tree ret_type = NULL_TREE;
17201 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17202
17203 /* Create builtin_hash_table. */
17204 if (builtin_hash_table == NULL)
17205 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17206
17207 h.type = NULL_TREE;
17208 h.mode[0] = mode_ret;
17209 h.mode[1] = mode_arg0;
17210 h.mode[2] = mode_arg1;
17211 h.mode[3] = mode_arg2;
17212 h.uns_p[0] = 0;
17213 h.uns_p[1] = 0;
17214 h.uns_p[2] = 0;
17215 h.uns_p[3] = 0;
17216
17217 /* If the builtin is a type that produces unsigned results or takes unsigned
17218 arguments, and it is returned as a decl for the vectorizer (such as
17219 widening multiplies, permute), make sure the arguments and return value
17220 are type correct. */
17221 switch (builtin)
17222 {
17223 /* unsigned 1 argument functions. */
17224 case CRYPTO_BUILTIN_VSBOX:
17225 case CRYPTO_BUILTIN_VSBOX_BE:
17226 case P8V_BUILTIN_VGBBD:
17227 case MISC_BUILTIN_CDTBCD:
17228 case MISC_BUILTIN_CBCDTD:
17229 h.uns_p[0] = 1;
17230 h.uns_p[1] = 1;
17231 break;
17232
17233 /* unsigned 2 argument functions. */
17234 case ALTIVEC_BUILTIN_VMULEUB:
17235 case ALTIVEC_BUILTIN_VMULEUH:
17236 case P8V_BUILTIN_VMULEUW:
17237 case ALTIVEC_BUILTIN_VMULOUB:
17238 case ALTIVEC_BUILTIN_VMULOUH:
17239 case P8V_BUILTIN_VMULOUW:
17240 case CRYPTO_BUILTIN_VCIPHER:
17241 case CRYPTO_BUILTIN_VCIPHER_BE:
17242 case CRYPTO_BUILTIN_VCIPHERLAST:
17243 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17244 case CRYPTO_BUILTIN_VNCIPHER:
17245 case CRYPTO_BUILTIN_VNCIPHER_BE:
17246 case CRYPTO_BUILTIN_VNCIPHERLAST:
17247 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17248 case CRYPTO_BUILTIN_VPMSUMB:
17249 case CRYPTO_BUILTIN_VPMSUMH:
17250 case CRYPTO_BUILTIN_VPMSUMW:
17251 case CRYPTO_BUILTIN_VPMSUMD:
17252 case CRYPTO_BUILTIN_VPMSUM:
17253 case MISC_BUILTIN_ADDG6S:
17254 case MISC_BUILTIN_DIVWEU:
17255 case MISC_BUILTIN_DIVDEU:
17256 case VSX_BUILTIN_UDIV_V2DI:
17257 case ALTIVEC_BUILTIN_VMAXUB:
17258 case ALTIVEC_BUILTIN_VMINUB:
17259 case ALTIVEC_BUILTIN_VMAXUH:
17260 case ALTIVEC_BUILTIN_VMINUH:
17261 case ALTIVEC_BUILTIN_VMAXUW:
17262 case ALTIVEC_BUILTIN_VMINUW:
17263 case P8V_BUILTIN_VMAXUD:
17264 case P8V_BUILTIN_VMINUD:
17265 h.uns_p[0] = 1;
17266 h.uns_p[1] = 1;
17267 h.uns_p[2] = 1;
17268 break;
17269
17270 /* unsigned 3 argument functions. */
17271 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17272 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17273 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17274 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17275 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17276 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17277 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17278 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17279 case VSX_BUILTIN_VPERM_16QI_UNS:
17280 case VSX_BUILTIN_VPERM_8HI_UNS:
17281 case VSX_BUILTIN_VPERM_4SI_UNS:
17282 case VSX_BUILTIN_VPERM_2DI_UNS:
17283 case VSX_BUILTIN_XXSEL_16QI_UNS:
17284 case VSX_BUILTIN_XXSEL_8HI_UNS:
17285 case VSX_BUILTIN_XXSEL_4SI_UNS:
17286 case VSX_BUILTIN_XXSEL_2DI_UNS:
17287 case CRYPTO_BUILTIN_VPERMXOR:
17288 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17289 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17290 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17291 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17292 case CRYPTO_BUILTIN_VSHASIGMAW:
17293 case CRYPTO_BUILTIN_VSHASIGMAD:
17294 case CRYPTO_BUILTIN_VSHASIGMA:
17295 h.uns_p[0] = 1;
17296 h.uns_p[1] = 1;
17297 h.uns_p[2] = 1;
17298 h.uns_p[3] = 1;
17299 break;
17300
17301 /* signed permute functions with unsigned char mask. */
17302 case ALTIVEC_BUILTIN_VPERM_16QI:
17303 case ALTIVEC_BUILTIN_VPERM_8HI:
17304 case ALTIVEC_BUILTIN_VPERM_4SI:
17305 case ALTIVEC_BUILTIN_VPERM_4SF:
17306 case ALTIVEC_BUILTIN_VPERM_2DI:
17307 case ALTIVEC_BUILTIN_VPERM_2DF:
17308 case VSX_BUILTIN_VPERM_16QI:
17309 case VSX_BUILTIN_VPERM_8HI:
17310 case VSX_BUILTIN_VPERM_4SI:
17311 case VSX_BUILTIN_VPERM_4SF:
17312 case VSX_BUILTIN_VPERM_2DI:
17313 case VSX_BUILTIN_VPERM_2DF:
17314 h.uns_p[3] = 1;
17315 break;
17316
17317 /* unsigned args, signed return. */
17318 case VSX_BUILTIN_XVCVUXDSP:
17319 case VSX_BUILTIN_XVCVUXDDP_UNS:
17320 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17321 h.uns_p[1] = 1;
17322 break;
17323
17324 /* signed args, unsigned return. */
17325 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17326 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17327 case MISC_BUILTIN_UNPACK_TD:
17328 case MISC_BUILTIN_UNPACK_V1TI:
17329 h.uns_p[0] = 1;
17330 break;
17331
17332 /* unsigned arguments, bool return (compares). */
17333 case ALTIVEC_BUILTIN_VCMPEQUB:
17334 case ALTIVEC_BUILTIN_VCMPEQUH:
17335 case ALTIVEC_BUILTIN_VCMPEQUW:
17336 case P8V_BUILTIN_VCMPEQUD:
17337 case VSX_BUILTIN_CMPGE_U16QI:
17338 case VSX_BUILTIN_CMPGE_U8HI:
17339 case VSX_BUILTIN_CMPGE_U4SI:
17340 case VSX_BUILTIN_CMPGE_U2DI:
17341 case ALTIVEC_BUILTIN_VCMPGTUB:
17342 case ALTIVEC_BUILTIN_VCMPGTUH:
17343 case ALTIVEC_BUILTIN_VCMPGTUW:
17344 case P8V_BUILTIN_VCMPGTUD:
17345 h.uns_p[1] = 1;
17346 h.uns_p[2] = 1;
17347 break;
17348
17349 /* unsigned arguments for 128-bit pack instructions. */
17350 case MISC_BUILTIN_PACK_TD:
17351 case MISC_BUILTIN_PACK_V1TI:
17352 h.uns_p[1] = 1;
17353 h.uns_p[2] = 1;
17354 break;
17355
17356 /* unsigned second arguments (vector shift right). */
17357 case ALTIVEC_BUILTIN_VSRB:
17358 case ALTIVEC_BUILTIN_VSRH:
17359 case ALTIVEC_BUILTIN_VSRW:
17360 case P8V_BUILTIN_VSRD:
17361 h.uns_p[2] = 1;
17362 break;
17363
17364 default:
17365 break;
17366 }
17367
17368 /* Figure out how many args are present. */
17369 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17370 num_args--;
17371
17372 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17373 if (!ret_type && h.uns_p[0])
17374 ret_type = builtin_mode_to_type[h.mode[0]][0];
17375
17376 if (!ret_type)
17377 fatal_error (input_location,
17378 "internal error: builtin function %qs had an unexpected "
17379 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17380
17381 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17382 arg_type[i] = NULL_TREE;
17383
17384 for (i = 0; i < num_args; i++)
17385 {
17386 int m = (int) h.mode[i+1];
17387 int uns_p = h.uns_p[i+1];
17388
17389 arg_type[i] = builtin_mode_to_type[m][uns_p];
17390 if (!arg_type[i] && uns_p)
17391 arg_type[i] = builtin_mode_to_type[m][0];
17392
17393 if (!arg_type[i])
17394 fatal_error (input_location,
17395 "internal error: builtin function %qs, argument %d "
17396 "had unexpected argument type %qs", name, i,
17397 GET_MODE_NAME (m));
17398 }
17399
17400 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17401 if (*found == NULL)
17402 {
17403 h2 = ggc_alloc<builtin_hash_struct> ();
17404 *h2 = h;
17405 *found = h2;
17406
17407 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17408 arg_type[2], NULL_TREE);
17409 }
17410
17411 return (*found)->type;
17412 }
17413
17414 static void
17415 rs6000_common_init_builtins (void)
17416 {
17417 const struct builtin_description *d;
17418 size_t i;
17419
17420 tree opaque_ftype_opaque = NULL_TREE;
17421 tree opaque_ftype_opaque_opaque = NULL_TREE;
17422 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17423 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17424
17425 /* Create Altivec and VSX builtins on machines with at least the
17426 general purpose extensions (970 and newer) to allow the use of
17427 the target attribute. */
17428
17429 if (TARGET_EXTRA_BUILTINS)
17430 builtin_mask |= RS6000_BTM_COMMON;
17431
17432 /* Add the ternary operators. */
17433 d = bdesc_3arg;
17434 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17435 {
17436 tree type;
17437 HOST_WIDE_INT mask = d->mask;
17438
17439 if ((mask & builtin_mask) != mask)
17440 {
17441 if (TARGET_DEBUG_BUILTIN)
17442 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17443 continue;
17444 }
17445
17446 if (rs6000_overloaded_builtin_p (d->code))
17447 {
17448 if (! (type = opaque_ftype_opaque_opaque_opaque))
17449 type = opaque_ftype_opaque_opaque_opaque
17450 = build_function_type_list (opaque_V4SI_type_node,
17451 opaque_V4SI_type_node,
17452 opaque_V4SI_type_node,
17453 opaque_V4SI_type_node,
17454 NULL_TREE);
17455 }
17456 else
17457 {
17458 enum insn_code icode = d->icode;
17459 if (d->name == 0)
17460 {
17461 if (TARGET_DEBUG_BUILTIN)
17462 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17463 (long unsigned)i);
17464
17465 continue;
17466 }
17467
17468 if (icode == CODE_FOR_nothing)
17469 {
17470 if (TARGET_DEBUG_BUILTIN)
17471 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17472 d->name);
17473
17474 continue;
17475 }
17476
17477 type = builtin_function_type (insn_data[icode].operand[0].mode,
17478 insn_data[icode].operand[1].mode,
17479 insn_data[icode].operand[2].mode,
17480 insn_data[icode].operand[3].mode,
17481 d->code, d->name);
17482 }
17483
17484 def_builtin (d->name, type, d->code);
17485 }
17486
17487 /* Add the binary operators. */
17488 d = bdesc_2arg;
17489 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17490 {
17491 machine_mode mode0, mode1, mode2;
17492 tree type;
17493 HOST_WIDE_INT mask = d->mask;
17494
17495 if ((mask & builtin_mask) != mask)
17496 {
17497 if (TARGET_DEBUG_BUILTIN)
17498 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17499 continue;
17500 }
17501
17502 if (rs6000_overloaded_builtin_p (d->code))
17503 {
17504 if (! (type = opaque_ftype_opaque_opaque))
17505 type = opaque_ftype_opaque_opaque
17506 = build_function_type_list (opaque_V4SI_type_node,
17507 opaque_V4SI_type_node,
17508 opaque_V4SI_type_node,
17509 NULL_TREE);
17510 }
17511 else
17512 {
17513 enum insn_code icode = d->icode;
17514 if (d->name == 0)
17515 {
17516 if (TARGET_DEBUG_BUILTIN)
17517 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17518 (long unsigned)i);
17519
17520 continue;
17521 }
17522
17523 if (icode == CODE_FOR_nothing)
17524 {
17525 if (TARGET_DEBUG_BUILTIN)
17526 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17527 d->name);
17528
17529 continue;
17530 }
17531
17532 mode0 = insn_data[icode].operand[0].mode;
17533 mode1 = insn_data[icode].operand[1].mode;
17534 mode2 = insn_data[icode].operand[2].mode;
17535
17536 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17537 d->code, d->name);
17538 }
17539
17540 def_builtin (d->name, type, d->code);
17541 }
17542
17543 /* Add the simple unary operators. */
17544 d = bdesc_1arg;
17545 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17546 {
17547 machine_mode mode0, mode1;
17548 tree type;
17549 HOST_WIDE_INT mask = d->mask;
17550
17551 if ((mask & builtin_mask) != mask)
17552 {
17553 if (TARGET_DEBUG_BUILTIN)
17554 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17555 continue;
17556 }
17557
17558 if (rs6000_overloaded_builtin_p (d->code))
17559 {
17560 if (! (type = opaque_ftype_opaque))
17561 type = opaque_ftype_opaque
17562 = build_function_type_list (opaque_V4SI_type_node,
17563 opaque_V4SI_type_node,
17564 NULL_TREE);
17565 }
17566 else
17567 {
17568 enum insn_code icode = d->icode;
17569 if (d->name == 0)
17570 {
17571 if (TARGET_DEBUG_BUILTIN)
17572 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17573 (long unsigned)i);
17574
17575 continue;
17576 }
17577
17578 if (icode == CODE_FOR_nothing)
17579 {
17580 if (TARGET_DEBUG_BUILTIN)
17581 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17582 d->name);
17583
17584 continue;
17585 }
17586
17587 mode0 = insn_data[icode].operand[0].mode;
17588 mode1 = insn_data[icode].operand[1].mode;
17589
17590 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17591 d->code, d->name);
17592 }
17593
17594 def_builtin (d->name, type, d->code);
17595 }
17596
17597 /* Add the simple no-argument operators. */
17598 d = bdesc_0arg;
17599 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17600 {
17601 machine_mode mode0;
17602 tree type;
17603 HOST_WIDE_INT mask = d->mask;
17604
17605 if ((mask & builtin_mask) != mask)
17606 {
17607 if (TARGET_DEBUG_BUILTIN)
17608 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17609 continue;
17610 }
17611 if (rs6000_overloaded_builtin_p (d->code))
17612 {
17613 if (!opaque_ftype_opaque)
17614 opaque_ftype_opaque
17615 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17616 type = opaque_ftype_opaque;
17617 }
17618 else
17619 {
17620 enum insn_code icode = d->icode;
17621 if (d->name == 0)
17622 {
17623 if (TARGET_DEBUG_BUILTIN)
17624 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17625 (long unsigned) i);
17626 continue;
17627 }
17628 if (icode == CODE_FOR_nothing)
17629 {
17630 if (TARGET_DEBUG_BUILTIN)
17631 fprintf (stderr,
17632 "rs6000_builtin, skip no-argument %s (no code)\n",
17633 d->name);
17634 continue;
17635 }
17636 mode0 = insn_data[icode].operand[0].mode;
17637 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17638 d->code, d->name);
17639 }
17640 def_builtin (d->name, type, d->code);
17641 }
17642 }
17643
17644 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17645 static void
17646 init_float128_ibm (machine_mode mode)
17647 {
17648 if (!TARGET_XL_COMPAT)
17649 {
17650 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17651 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17652 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17653 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17654
17655 if (!TARGET_HARD_FLOAT)
17656 {
17657 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17658 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17659 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17660 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17661 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17662 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17663 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17664 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17665
17666 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17667 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17668 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17669 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17670 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17671 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17672 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17673 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17674 }
17675 }
17676 else
17677 {
17678 set_optab_libfunc (add_optab, mode, "_xlqadd");
17679 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17680 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17681 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17682 }
17683
17684 /* Add various conversions for IFmode to use the traditional TFmode
17685 names. */
17686 if (mode == IFmode)
17687 {
17688 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17689 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17690 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17691 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17692 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17693 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17694
17695 if (TARGET_POWERPC64)
17696 {
17697 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17698 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17699 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17700 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17701 }
17702 }
17703 }
17704
17705 /* Create a decl for either complex long double multiply or complex long double
17706 divide when long double is IEEE 128-bit floating point. We can't use
17707 __multc3 and __divtc3 because the original long double using IBM extended
17708 double used those names. The complex multiply/divide functions are encoded
17709 as builtin functions with a complex result and 4 scalar inputs. */
17710
17711 static void
17712 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17713 {
17714 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17715 name, NULL_TREE);
17716
17717 set_builtin_decl (fncode, fndecl, true);
17718
17719 if (TARGET_DEBUG_BUILTIN)
17720 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17721
17722 return;
17723 }
17724
17725 /* Set up IEEE 128-bit floating point routines. Use different names if the
17726 arguments can be passed in a vector register. The historical PowerPC
17727 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17728 continue to use that if we aren't using vector registers to pass IEEE
17729 128-bit floating point. */
17730
17731 static void
17732 init_float128_ieee (machine_mode mode)
17733 {
17734 if (FLOAT128_VECTOR_P (mode))
17735 {
17736 static bool complex_muldiv_init_p = false;
17737
17738 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17739 we have clone or target attributes, this will be called a second
17740 time. We want to create the built-in function only once. */
17741 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17742 {
17743 complex_muldiv_init_p = true;
17744 built_in_function fncode_mul =
17745 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17746 - MIN_MODE_COMPLEX_FLOAT);
17747 built_in_function fncode_div =
17748 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17749 - MIN_MODE_COMPLEX_FLOAT);
17750
17751 tree fntype = build_function_type_list (complex_long_double_type_node,
17752 long_double_type_node,
17753 long_double_type_node,
17754 long_double_type_node,
17755 long_double_type_node,
17756 NULL_TREE);
17757
17758 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17759 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17760 }
17761
17762 set_optab_libfunc (add_optab, mode, "__addkf3");
17763 set_optab_libfunc (sub_optab, mode, "__subkf3");
17764 set_optab_libfunc (neg_optab, mode, "__negkf2");
17765 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17766 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17767 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17768 set_optab_libfunc (abs_optab, mode, "__abskf2");
17769 set_optab_libfunc (powi_optab, mode, "__powikf2");
17770
17771 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17772 set_optab_libfunc (ne_optab, mode, "__nekf2");
17773 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17774 set_optab_libfunc (ge_optab, mode, "__gekf2");
17775 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17776 set_optab_libfunc (le_optab, mode, "__lekf2");
17777 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17778
17779 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17780 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17781 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17782 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17783
17784 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17785 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17786 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17787
17788 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17789 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17790 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17791
17792 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17793 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17794 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17795 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17796 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17797 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17798
17799 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17800 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17801 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17802 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17803
17804 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17805 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17806 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17807 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17808
17809 if (TARGET_POWERPC64)
17810 {
17811 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17812 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17813 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17814 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17815 }
17816 }
17817
17818 else
17819 {
17820 set_optab_libfunc (add_optab, mode, "_q_add");
17821 set_optab_libfunc (sub_optab, mode, "_q_sub");
17822 set_optab_libfunc (neg_optab, mode, "_q_neg");
17823 set_optab_libfunc (smul_optab, mode, "_q_mul");
17824 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17825 if (TARGET_PPC_GPOPT)
17826 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17827
17828 set_optab_libfunc (eq_optab, mode, "_q_feq");
17829 set_optab_libfunc (ne_optab, mode, "_q_fne");
17830 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17831 set_optab_libfunc (ge_optab, mode, "_q_fge");
17832 set_optab_libfunc (lt_optab, mode, "_q_flt");
17833 set_optab_libfunc (le_optab, mode, "_q_fle");
17834
17835 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17836 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17837 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17838 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17839 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17840 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17841 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17842 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17843 }
17844 }
17845
17846 static void
17847 rs6000_init_libfuncs (void)
17848 {
17849 /* __float128 support. */
17850 if (TARGET_FLOAT128_TYPE)
17851 {
17852 init_float128_ibm (IFmode);
17853 init_float128_ieee (KFmode);
17854 }
17855
17856 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17857 if (TARGET_LONG_DOUBLE_128)
17858 {
17859 if (!TARGET_IEEEQUAD)
17860 init_float128_ibm (TFmode);
17861
17862 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17863 else
17864 init_float128_ieee (TFmode);
17865 }
17866 }
17867
17868 /* Emit a potentially record-form instruction, setting DST from SRC.
17869 If DOT is 0, that is all; otherwise, set CCREG to the result of the
17870 signed comparison of DST with zero. If DOT is 1, the generated RTL
17871 doesn't care about the DST result; if DOT is 2, it does. If CCREG
17872 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
17873 a separate COMPARE. */
17874
17875 void
17876 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
17877 {
17878 if (dot == 0)
17879 {
17880 emit_move_insn (dst, src);
17881 return;
17882 }
17883
17884 if (cc_reg_not_cr0_operand (ccreg, CCmode))
17885 {
17886 emit_move_insn (dst, src);
17887 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
17888 return;
17889 }
17890
17891 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
17892 if (dot == 1)
17893 {
17894 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
17895 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
17896 }
17897 else
17898 {
17899 rtx set = gen_rtx_SET (dst, src);
17900 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
17901 }
17902 }
17903
17904 \f
17905 /* A validation routine: say whether CODE, a condition code, and MODE
17906 match. The other alternatives either don't make sense or should
17907 never be generated. */
17908
17909 void
17910 validate_condition_mode (enum rtx_code code, machine_mode mode)
17911 {
17912 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
17913 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
17914 && GET_MODE_CLASS (mode) == MODE_CC);
17915
17916 /* These don't make sense. */
17917 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
17918 || mode != CCUNSmode);
17919
17920 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
17921 || mode == CCUNSmode);
17922
17923 gcc_assert (mode == CCFPmode
17924 || (code != ORDERED && code != UNORDERED
17925 && code != UNEQ && code != LTGT
17926 && code != UNGT && code != UNLT
17927 && code != UNGE && code != UNLE));
17928
17929 /* These should never be generated except for
17930 flag_finite_math_only. */
17931 gcc_assert (mode != CCFPmode
17932 || flag_finite_math_only
17933 || (code != LE && code != GE
17934 && code != UNEQ && code != LTGT
17935 && code != UNGT && code != UNLT));
17936
17937 /* These are invalid; the information is not there. */
17938 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
17939 }
17940
17941 \f
17942 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
17943 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
17944 not zero, store there the bit offset (counted from the right) where
17945 the single stretch of 1 bits begins; and similarly for B, the bit
17946 offset where it ends. */
17947
17948 bool
17949 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
17950 {
17951 unsigned HOST_WIDE_INT val = INTVAL (mask);
17952 unsigned HOST_WIDE_INT bit;
17953 int nb, ne;
17954 int n = GET_MODE_PRECISION (mode);
17955
17956 if (mode != DImode && mode != SImode)
17957 return false;
17958
17959 if (INTVAL (mask) >= 0)
17960 {
17961 bit = val & -val;
17962 ne = exact_log2 (bit);
17963 nb = exact_log2 (val + bit);
17964 }
17965 else if (val + 1 == 0)
17966 {
17967 nb = n;
17968 ne = 0;
17969 }
17970 else if (val & 1)
17971 {
17972 val = ~val;
17973 bit = val & -val;
17974 nb = exact_log2 (bit);
17975 ne = exact_log2 (val + bit);
17976 }
17977 else
17978 {
17979 bit = val & -val;
17980 ne = exact_log2 (bit);
17981 if (val + bit == 0)
17982 nb = n;
17983 else
17984 nb = 0;
17985 }
17986
17987 nb--;
17988
17989 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
17990 return false;
17991
17992 if (b)
17993 *b = nb;
17994 if (e)
17995 *e = ne;
17996
17997 return true;
17998 }
17999
18000 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18001 or rldicr instruction, to implement an AND with it in mode MODE. */
18002
18003 bool
18004 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18005 {
18006 int nb, ne;
18007
18008 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18009 return false;
18010
18011 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18012 does not wrap. */
18013 if (mode == DImode)
18014 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18015
18016 /* For SImode, rlwinm can do everything. */
18017 if (mode == SImode)
18018 return (nb < 32 && ne < 32);
18019
18020 return false;
18021 }
18022
18023 /* Return the instruction template for an AND with mask in mode MODE, with
18024 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18025
18026 const char *
18027 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18028 {
18029 int nb, ne;
18030
18031 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18032 gcc_unreachable ();
18033
18034 if (mode == DImode && ne == 0)
18035 {
18036 operands[3] = GEN_INT (63 - nb);
18037 if (dot)
18038 return "rldicl. %0,%1,0,%3";
18039 return "rldicl %0,%1,0,%3";
18040 }
18041
18042 if (mode == DImode && nb == 63)
18043 {
18044 operands[3] = GEN_INT (63 - ne);
18045 if (dot)
18046 return "rldicr. %0,%1,0,%3";
18047 return "rldicr %0,%1,0,%3";
18048 }
18049
18050 if (nb < 32 && ne < 32)
18051 {
18052 operands[3] = GEN_INT (31 - nb);
18053 operands[4] = GEN_INT (31 - ne);
18054 if (dot)
18055 return "rlwinm. %0,%1,0,%3,%4";
18056 return "rlwinm %0,%1,0,%3,%4";
18057 }
18058
18059 gcc_unreachable ();
18060 }
18061
18062 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18063 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18064 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18065
18066 bool
18067 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18068 {
18069 int nb, ne;
18070
18071 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18072 return false;
18073
18074 int n = GET_MODE_PRECISION (mode);
18075 int sh = -1;
18076
18077 if (CONST_INT_P (XEXP (shift, 1)))
18078 {
18079 sh = INTVAL (XEXP (shift, 1));
18080 if (sh < 0 || sh >= n)
18081 return false;
18082 }
18083
18084 rtx_code code = GET_CODE (shift);
18085
18086 /* Convert any shift by 0 to a rotate, to simplify below code. */
18087 if (sh == 0)
18088 code = ROTATE;
18089
18090 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18091 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18092 code = ASHIFT;
18093 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18094 {
18095 code = LSHIFTRT;
18096 sh = n - sh;
18097 }
18098
18099 /* DImode rotates need rld*. */
18100 if (mode == DImode && code == ROTATE)
18101 return (nb == 63 || ne == 0 || ne == sh);
18102
18103 /* SImode rotates need rlw*. */
18104 if (mode == SImode && code == ROTATE)
18105 return (nb < 32 && ne < 32 && sh < 32);
18106
18107 /* Wrap-around masks are only okay for rotates. */
18108 if (ne > nb)
18109 return false;
18110
18111 /* Variable shifts are only okay for rotates. */
18112 if (sh < 0)
18113 return false;
18114
18115 /* Don't allow ASHIFT if the mask is wrong for that. */
18116 if (code == ASHIFT && ne < sh)
18117 return false;
18118
18119 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18120 if the mask is wrong for that. */
18121 if (nb < 32 && ne < 32 && sh < 32
18122 && !(code == LSHIFTRT && nb >= 32 - sh))
18123 return true;
18124
18125 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18126 if the mask is wrong for that. */
18127 if (code == LSHIFTRT)
18128 sh = 64 - sh;
18129 if (nb == 63 || ne == 0 || ne == sh)
18130 return !(code == LSHIFTRT && nb >= sh);
18131
18132 return false;
18133 }
18134
18135 /* Return the instruction template for a shift with mask in mode MODE, with
18136 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18137
18138 const char *
18139 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18140 {
18141 int nb, ne;
18142
18143 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18144 gcc_unreachable ();
18145
18146 if (mode == DImode && ne == 0)
18147 {
18148 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18149 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18150 operands[3] = GEN_INT (63 - nb);
18151 if (dot)
18152 return "rld%I2cl. %0,%1,%2,%3";
18153 return "rld%I2cl %0,%1,%2,%3";
18154 }
18155
18156 if (mode == DImode && nb == 63)
18157 {
18158 operands[3] = GEN_INT (63 - ne);
18159 if (dot)
18160 return "rld%I2cr. %0,%1,%2,%3";
18161 return "rld%I2cr %0,%1,%2,%3";
18162 }
18163
18164 if (mode == DImode
18165 && GET_CODE (operands[4]) != LSHIFTRT
18166 && CONST_INT_P (operands[2])
18167 && ne == INTVAL (operands[2]))
18168 {
18169 operands[3] = GEN_INT (63 - nb);
18170 if (dot)
18171 return "rld%I2c. %0,%1,%2,%3";
18172 return "rld%I2c %0,%1,%2,%3";
18173 }
18174
18175 if (nb < 32 && ne < 32)
18176 {
18177 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18178 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18179 operands[3] = GEN_INT (31 - nb);
18180 operands[4] = GEN_INT (31 - ne);
18181 /* This insn can also be a 64-bit rotate with mask that really makes
18182 it just a shift right (with mask); the %h below are to adjust for
18183 that situation (shift count is >= 32 in that case). */
18184 if (dot)
18185 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18186 return "rlw%I2nm %0,%1,%h2,%3,%4";
18187 }
18188
18189 gcc_unreachable ();
18190 }
18191
18192 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18193 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18194 ASHIFT, or LSHIFTRT) in mode MODE. */
18195
18196 bool
18197 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18198 {
18199 int nb, ne;
18200
18201 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18202 return false;
18203
18204 int n = GET_MODE_PRECISION (mode);
18205
18206 int sh = INTVAL (XEXP (shift, 1));
18207 if (sh < 0 || sh >= n)
18208 return false;
18209
18210 rtx_code code = GET_CODE (shift);
18211
18212 /* Convert any shift by 0 to a rotate, to simplify below code. */
18213 if (sh == 0)
18214 code = ROTATE;
18215
18216 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18217 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18218 code = ASHIFT;
18219 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18220 {
18221 code = LSHIFTRT;
18222 sh = n - sh;
18223 }
18224
18225 /* DImode rotates need rldimi. */
18226 if (mode == DImode && code == ROTATE)
18227 return (ne == sh);
18228
18229 /* SImode rotates need rlwimi. */
18230 if (mode == SImode && code == ROTATE)
18231 return (nb < 32 && ne < 32 && sh < 32);
18232
18233 /* Wrap-around masks are only okay for rotates. */
18234 if (ne > nb)
18235 return false;
18236
18237 /* Don't allow ASHIFT if the mask is wrong for that. */
18238 if (code == ASHIFT && ne < sh)
18239 return false;
18240
18241 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18242 if the mask is wrong for that. */
18243 if (nb < 32 && ne < 32 && sh < 32
18244 && !(code == LSHIFTRT && nb >= 32 - sh))
18245 return true;
18246
18247 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18248 if the mask is wrong for that. */
18249 if (code == LSHIFTRT)
18250 sh = 64 - sh;
18251 if (ne == sh)
18252 return !(code == LSHIFTRT && nb >= sh);
18253
18254 return false;
18255 }
18256
18257 /* Return the instruction template for an insert with mask in mode MODE, with
18258 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18259
18260 const char *
18261 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18262 {
18263 int nb, ne;
18264
18265 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18266 gcc_unreachable ();
18267
18268 /* Prefer rldimi because rlwimi is cracked. */
18269 if (TARGET_POWERPC64
18270 && (!dot || mode == DImode)
18271 && GET_CODE (operands[4]) != LSHIFTRT
18272 && ne == INTVAL (operands[2]))
18273 {
18274 operands[3] = GEN_INT (63 - nb);
18275 if (dot)
18276 return "rldimi. %0,%1,%2,%3";
18277 return "rldimi %0,%1,%2,%3";
18278 }
18279
18280 if (nb < 32 && ne < 32)
18281 {
18282 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18283 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18284 operands[3] = GEN_INT (31 - nb);
18285 operands[4] = GEN_INT (31 - ne);
18286 if (dot)
18287 return "rlwimi. %0,%1,%2,%3,%4";
18288 return "rlwimi %0,%1,%2,%3,%4";
18289 }
18290
18291 gcc_unreachable ();
18292 }
18293
18294 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18295 using two machine instructions. */
18296
18297 bool
18298 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18299 {
18300 /* There are two kinds of AND we can handle with two insns:
18301 1) those we can do with two rl* insn;
18302 2) ori[s];xori[s].
18303
18304 We do not handle that last case yet. */
18305
18306 /* If there is just one stretch of ones, we can do it. */
18307 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18308 return true;
18309
18310 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18311 one insn, we can do the whole thing with two. */
18312 unsigned HOST_WIDE_INT val = INTVAL (c);
18313 unsigned HOST_WIDE_INT bit1 = val & -val;
18314 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18315 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18316 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18317 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18318 }
18319
18320 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18321 If EXPAND is true, split rotate-and-mask instructions we generate to
18322 their constituent parts as well (this is used during expand); if DOT
18323 is 1, make the last insn a record-form instruction clobbering the
18324 destination GPR and setting the CC reg (from operands[3]); if 2, set
18325 that GPR as well as the CC reg. */
18326
18327 void
18328 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18329 {
18330 gcc_assert (!(expand && dot));
18331
18332 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18333
18334 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18335 shift right. This generates better code than doing the masks without
18336 shifts, or shifting first right and then left. */
18337 int nb, ne;
18338 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18339 {
18340 gcc_assert (mode == DImode);
18341
18342 int shift = 63 - nb;
18343 if (expand)
18344 {
18345 rtx tmp1 = gen_reg_rtx (DImode);
18346 rtx tmp2 = gen_reg_rtx (DImode);
18347 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18348 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18349 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18350 }
18351 else
18352 {
18353 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18354 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18355 emit_move_insn (operands[0], tmp);
18356 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18357 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18358 }
18359 return;
18360 }
18361
18362 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18363 that does the rest. */
18364 unsigned HOST_WIDE_INT bit1 = val & -val;
18365 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18366 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18367 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18368
18369 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18370 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18371
18372 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18373
18374 /* Two "no-rotate"-and-mask instructions, for SImode. */
18375 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18376 {
18377 gcc_assert (mode == SImode);
18378
18379 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18380 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18381 emit_move_insn (reg, tmp);
18382 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18383 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18384 return;
18385 }
18386
18387 gcc_assert (mode == DImode);
18388
18389 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18390 insns; we have to do the first in SImode, because it wraps. */
18391 if (mask2 <= 0xffffffff
18392 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18393 {
18394 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18395 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18396 GEN_INT (mask1));
18397 rtx reg_low = gen_lowpart (SImode, reg);
18398 emit_move_insn (reg_low, tmp);
18399 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18400 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18401 return;
18402 }
18403
18404 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18405 at the top end), rotate back and clear the other hole. */
18406 int right = exact_log2 (bit3);
18407 int left = 64 - right;
18408
18409 /* Rotate the mask too. */
18410 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18411
18412 if (expand)
18413 {
18414 rtx tmp1 = gen_reg_rtx (DImode);
18415 rtx tmp2 = gen_reg_rtx (DImode);
18416 rtx tmp3 = gen_reg_rtx (DImode);
18417 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18418 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18419 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18420 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18421 }
18422 else
18423 {
18424 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18425 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18426 emit_move_insn (operands[0], tmp);
18427 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18428 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18429 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18430 }
18431 }
18432 \f
18433 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18434 for lfq and stfq insns iff the registers are hard registers. */
18435
18436 int
18437 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18438 {
18439 /* We might have been passed a SUBREG. */
18440 if (!REG_P (reg1) || !REG_P (reg2))
18441 return 0;
18442
18443 /* We might have been passed non floating point registers. */
18444 if (!FP_REGNO_P (REGNO (reg1))
18445 || !FP_REGNO_P (REGNO (reg2)))
18446 return 0;
18447
18448 return (REGNO (reg1) == REGNO (reg2) - 1);
18449 }
18450
18451 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18452 addr1 and addr2 must be in consecutive memory locations
18453 (addr2 == addr1 + 8). */
18454
18455 int
18456 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18457 {
18458 rtx addr1, addr2;
18459 unsigned int reg1, reg2;
18460 int offset1, offset2;
18461
18462 /* The mems cannot be volatile. */
18463 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18464 return 0;
18465
18466 addr1 = XEXP (mem1, 0);
18467 addr2 = XEXP (mem2, 0);
18468
18469 /* Extract an offset (if used) from the first addr. */
18470 if (GET_CODE (addr1) == PLUS)
18471 {
18472 /* If not a REG, return zero. */
18473 if (!REG_P (XEXP (addr1, 0)))
18474 return 0;
18475 else
18476 {
18477 reg1 = REGNO (XEXP (addr1, 0));
18478 /* The offset must be constant! */
18479 if (!CONST_INT_P (XEXP (addr1, 1)))
18480 return 0;
18481 offset1 = INTVAL (XEXP (addr1, 1));
18482 }
18483 }
18484 else if (!REG_P (addr1))
18485 return 0;
18486 else
18487 {
18488 reg1 = REGNO (addr1);
18489 /* This was a simple (mem (reg)) expression. Offset is 0. */
18490 offset1 = 0;
18491 }
18492
18493 /* And now for the second addr. */
18494 if (GET_CODE (addr2) == PLUS)
18495 {
18496 /* If not a REG, return zero. */
18497 if (!REG_P (XEXP (addr2, 0)))
18498 return 0;
18499 else
18500 {
18501 reg2 = REGNO (XEXP (addr2, 0));
18502 /* The offset must be constant. */
18503 if (!CONST_INT_P (XEXP (addr2, 1)))
18504 return 0;
18505 offset2 = INTVAL (XEXP (addr2, 1));
18506 }
18507 }
18508 else if (!REG_P (addr2))
18509 return 0;
18510 else
18511 {
18512 reg2 = REGNO (addr2);
18513 /* This was a simple (mem (reg)) expression. Offset is 0. */
18514 offset2 = 0;
18515 }
18516
18517 /* Both of these must have the same base register. */
18518 if (reg1 != reg2)
18519 return 0;
18520
18521 /* The offset for the second addr must be 8 more than the first addr. */
18522 if (offset2 != offset1 + 8)
18523 return 0;
18524
18525 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18526 instructions. */
18527 return 1;
18528 }
18529 \f
18530 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18531 need to use DDmode, in all other cases we can use the same mode. */
18532 static machine_mode
18533 rs6000_secondary_memory_needed_mode (machine_mode mode)
18534 {
18535 if (lra_in_progress && mode == SDmode)
18536 return DDmode;
18537 return mode;
18538 }
18539
18540 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18541 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18542 only work on the traditional altivec registers, note if an altivec register
18543 was chosen. */
18544
18545 static enum rs6000_reg_type
18546 register_to_reg_type (rtx reg, bool *is_altivec)
18547 {
18548 HOST_WIDE_INT regno;
18549 enum reg_class rclass;
18550
18551 if (SUBREG_P (reg))
18552 reg = SUBREG_REG (reg);
18553
18554 if (!REG_P (reg))
18555 return NO_REG_TYPE;
18556
18557 regno = REGNO (reg);
18558 if (!HARD_REGISTER_NUM_P (regno))
18559 {
18560 if (!lra_in_progress && !reload_completed)
18561 return PSEUDO_REG_TYPE;
18562
18563 regno = true_regnum (reg);
18564 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18565 return PSEUDO_REG_TYPE;
18566 }
18567
18568 gcc_assert (regno >= 0);
18569
18570 if (is_altivec && ALTIVEC_REGNO_P (regno))
18571 *is_altivec = true;
18572
18573 rclass = rs6000_regno_regclass[regno];
18574 return reg_class_to_reg_type[(int)rclass];
18575 }
18576
18577 /* Helper function to return the cost of adding a TOC entry address. */
18578
18579 static inline int
18580 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18581 {
18582 int ret;
18583
18584 if (TARGET_CMODEL != CMODEL_SMALL)
18585 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18586
18587 else
18588 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18589
18590 return ret;
18591 }
18592
18593 /* Helper function for rs6000_secondary_reload to determine whether the memory
18594 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18595 needs reloading. Return negative if the memory is not handled by the memory
18596 helper functions and to try a different reload method, 0 if no additional
18597 instructions are need, and positive to give the extra cost for the
18598 memory. */
18599
18600 static int
18601 rs6000_secondary_reload_memory (rtx addr,
18602 enum reg_class rclass,
18603 machine_mode mode)
18604 {
18605 int extra_cost = 0;
18606 rtx reg, and_arg, plus_arg0, plus_arg1;
18607 addr_mask_type addr_mask;
18608 const char *type = NULL;
18609 const char *fail_msg = NULL;
18610
18611 if (GPR_REG_CLASS_P (rclass))
18612 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18613
18614 else if (rclass == FLOAT_REGS)
18615 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18616
18617 else if (rclass == ALTIVEC_REGS)
18618 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18619
18620 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18621 else if (rclass == VSX_REGS)
18622 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18623 & ~RELOAD_REG_AND_M16);
18624
18625 /* If the register allocator hasn't made up its mind yet on the register
18626 class to use, settle on defaults to use. */
18627 else if (rclass == NO_REGS)
18628 {
18629 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18630 & ~RELOAD_REG_AND_M16);
18631
18632 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18633 addr_mask &= ~(RELOAD_REG_INDEXED
18634 | RELOAD_REG_PRE_INCDEC
18635 | RELOAD_REG_PRE_MODIFY);
18636 }
18637
18638 else
18639 addr_mask = 0;
18640
18641 /* If the register isn't valid in this register class, just return now. */
18642 if ((addr_mask & RELOAD_REG_VALID) == 0)
18643 {
18644 if (TARGET_DEBUG_ADDR)
18645 {
18646 fprintf (stderr,
18647 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18648 "not valid in class\n",
18649 GET_MODE_NAME (mode), reg_class_names[rclass]);
18650 debug_rtx (addr);
18651 }
18652
18653 return -1;
18654 }
18655
18656 switch (GET_CODE (addr))
18657 {
18658 /* Does the register class supports auto update forms for this mode? We
18659 don't need a scratch register, since the powerpc only supports
18660 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18661 case PRE_INC:
18662 case PRE_DEC:
18663 reg = XEXP (addr, 0);
18664 if (!base_reg_operand (addr, GET_MODE (reg)))
18665 {
18666 fail_msg = "no base register #1";
18667 extra_cost = -1;
18668 }
18669
18670 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18671 {
18672 extra_cost = 1;
18673 type = "update";
18674 }
18675 break;
18676
18677 case PRE_MODIFY:
18678 reg = XEXP (addr, 0);
18679 plus_arg1 = XEXP (addr, 1);
18680 if (!base_reg_operand (reg, GET_MODE (reg))
18681 || GET_CODE (plus_arg1) != PLUS
18682 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18683 {
18684 fail_msg = "bad PRE_MODIFY";
18685 extra_cost = -1;
18686 }
18687
18688 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18689 {
18690 extra_cost = 1;
18691 type = "update";
18692 }
18693 break;
18694
18695 /* Do we need to simulate AND -16 to clear the bottom address bits used
18696 in VMX load/stores? Only allow the AND for vector sizes. */
18697 case AND:
18698 and_arg = XEXP (addr, 0);
18699 if (GET_MODE_SIZE (mode) != 16
18700 || !CONST_INT_P (XEXP (addr, 1))
18701 || INTVAL (XEXP (addr, 1)) != -16)
18702 {
18703 fail_msg = "bad Altivec AND #1";
18704 extra_cost = -1;
18705 }
18706
18707 if (rclass != ALTIVEC_REGS)
18708 {
18709 if (legitimate_indirect_address_p (and_arg, false))
18710 extra_cost = 1;
18711
18712 else if (legitimate_indexed_address_p (and_arg, false))
18713 extra_cost = 2;
18714
18715 else
18716 {
18717 fail_msg = "bad Altivec AND #2";
18718 extra_cost = -1;
18719 }
18720
18721 type = "and";
18722 }
18723 break;
18724
18725 /* If this is an indirect address, make sure it is a base register. */
18726 case REG:
18727 case SUBREG:
18728 if (!legitimate_indirect_address_p (addr, false))
18729 {
18730 extra_cost = 1;
18731 type = "move";
18732 }
18733 break;
18734
18735 /* If this is an indexed address, make sure the register class can handle
18736 indexed addresses for this mode. */
18737 case PLUS:
18738 plus_arg0 = XEXP (addr, 0);
18739 plus_arg1 = XEXP (addr, 1);
18740
18741 /* (plus (plus (reg) (constant)) (constant)) is generated during
18742 push_reload processing, so handle it now. */
18743 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18744 {
18745 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18746 {
18747 extra_cost = 1;
18748 type = "offset";
18749 }
18750 }
18751
18752 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18753 push_reload processing, so handle it now. */
18754 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18755 {
18756 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18757 {
18758 extra_cost = 1;
18759 type = "indexed #2";
18760 }
18761 }
18762
18763 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18764 {
18765 fail_msg = "no base register #2";
18766 extra_cost = -1;
18767 }
18768
18769 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18770 {
18771 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18772 || !legitimate_indexed_address_p (addr, false))
18773 {
18774 extra_cost = 1;
18775 type = "indexed";
18776 }
18777 }
18778
18779 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18780 && CONST_INT_P (plus_arg1))
18781 {
18782 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18783 {
18784 extra_cost = 1;
18785 type = "vector d-form offset";
18786 }
18787 }
18788
18789 /* Make sure the register class can handle offset addresses. */
18790 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18791 {
18792 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18793 {
18794 extra_cost = 1;
18795 type = "offset #2";
18796 }
18797 }
18798
18799 else
18800 {
18801 fail_msg = "bad PLUS";
18802 extra_cost = -1;
18803 }
18804
18805 break;
18806
18807 case LO_SUM:
18808 /* Quad offsets are restricted and can't handle normal addresses. */
18809 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18810 {
18811 extra_cost = -1;
18812 type = "vector d-form lo_sum";
18813 }
18814
18815 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18816 {
18817 fail_msg = "bad LO_SUM";
18818 extra_cost = -1;
18819 }
18820
18821 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18822 {
18823 extra_cost = 1;
18824 type = "lo_sum";
18825 }
18826 break;
18827
18828 /* Static addresses need to create a TOC entry. */
18829 case CONST:
18830 case SYMBOL_REF:
18831 case LABEL_REF:
18832 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18833 {
18834 extra_cost = -1;
18835 type = "vector d-form lo_sum #2";
18836 }
18837
18838 else
18839 {
18840 type = "address";
18841 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18842 }
18843 break;
18844
18845 /* TOC references look like offsetable memory. */
18846 case UNSPEC:
18847 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18848 {
18849 fail_msg = "bad UNSPEC";
18850 extra_cost = -1;
18851 }
18852
18853 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18854 {
18855 extra_cost = -1;
18856 type = "vector d-form lo_sum #3";
18857 }
18858
18859 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18860 {
18861 extra_cost = 1;
18862 type = "toc reference";
18863 }
18864 break;
18865
18866 default:
18867 {
18868 fail_msg = "bad address";
18869 extra_cost = -1;
18870 }
18871 }
18872
18873 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
18874 {
18875 if (extra_cost < 0)
18876 fprintf (stderr,
18877 "rs6000_secondary_reload_memory error: mode = %s, "
18878 "class = %s, addr_mask = '%s', %s\n",
18879 GET_MODE_NAME (mode),
18880 reg_class_names[rclass],
18881 rs6000_debug_addr_mask (addr_mask, false),
18882 (fail_msg != NULL) ? fail_msg : "<bad address>");
18883
18884 else
18885 fprintf (stderr,
18886 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18887 "addr_mask = '%s', extra cost = %d, %s\n",
18888 GET_MODE_NAME (mode),
18889 reg_class_names[rclass],
18890 rs6000_debug_addr_mask (addr_mask, false),
18891 extra_cost,
18892 (type) ? type : "<none>");
18893
18894 debug_rtx (addr);
18895 }
18896
18897 return extra_cost;
18898 }
18899
18900 /* Helper function for rs6000_secondary_reload to return true if a move to a
18901 different register classe is really a simple move. */
18902
18903 static bool
18904 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
18905 enum rs6000_reg_type from_type,
18906 machine_mode mode)
18907 {
18908 int size = GET_MODE_SIZE (mode);
18909
18910 /* Add support for various direct moves available. In this function, we only
18911 look at cases where we don't need any extra registers, and one or more
18912 simple move insns are issued. Originally small integers are not allowed
18913 in FPR/VSX registers. Single precision binary floating is not a simple
18914 move because we need to convert to the single precision memory layout.
18915 The 4-byte SDmode can be moved. TDmode values are disallowed since they
18916 need special direct move handling, which we do not support yet. */
18917 if (TARGET_DIRECT_MOVE
18918 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18919 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
18920 {
18921 if (TARGET_POWERPC64)
18922 {
18923 /* ISA 2.07: MTVSRD or MVFVSRD. */
18924 if (size == 8)
18925 return true;
18926
18927 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
18928 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
18929 return true;
18930 }
18931
18932 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18933 if (TARGET_P8_VECTOR)
18934 {
18935 if (mode == SImode)
18936 return true;
18937
18938 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
18939 return true;
18940 }
18941
18942 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
18943 if (mode == SDmode)
18944 return true;
18945 }
18946
18947 /* Move to/from SPR. */
18948 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
18949 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
18950 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
18951 return true;
18952
18953 return false;
18954 }
18955
18956 /* Direct move helper function for rs6000_secondary_reload, handle all of the
18957 special direct moves that involve allocating an extra register, return the
18958 insn code of the helper function if there is such a function or
18959 CODE_FOR_nothing if not. */
18960
18961 static bool
18962 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
18963 enum rs6000_reg_type from_type,
18964 machine_mode mode,
18965 secondary_reload_info *sri,
18966 bool altivec_p)
18967 {
18968 bool ret = false;
18969 enum insn_code icode = CODE_FOR_nothing;
18970 int cost = 0;
18971 int size = GET_MODE_SIZE (mode);
18972
18973 if (TARGET_POWERPC64 && size == 16)
18974 {
18975 /* Handle moving 128-bit values from GPRs to VSX point registers on
18976 ISA 2.07 (power8, power9) when running in 64-bit mode using
18977 XXPERMDI to glue the two 64-bit values back together. */
18978 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
18979 {
18980 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
18981 icode = reg_addr[mode].reload_vsx_gpr;
18982 }
18983
18984 /* Handle moving 128-bit values from VSX point registers to GPRs on
18985 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
18986 bottom 64-bit value. */
18987 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18988 {
18989 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
18990 icode = reg_addr[mode].reload_gpr_vsx;
18991 }
18992 }
18993
18994 else if (TARGET_POWERPC64 && mode == SFmode)
18995 {
18996 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18997 {
18998 cost = 3; /* xscvdpspn, mfvsrd, and. */
18999 icode = reg_addr[mode].reload_gpr_vsx;
19000 }
19001
19002 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19003 {
19004 cost = 2; /* mtvsrz, xscvspdpn. */
19005 icode = reg_addr[mode].reload_vsx_gpr;
19006 }
19007 }
19008
19009 else if (!TARGET_POWERPC64 && size == 8)
19010 {
19011 /* Handle moving 64-bit values from GPRs to floating point registers on
19012 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19013 32-bit values back together. Altivec register classes must be handled
19014 specially since a different instruction is used, and the secondary
19015 reload support requires a single instruction class in the scratch
19016 register constraint. However, right now TFmode is not allowed in
19017 Altivec registers, so the pattern will never match. */
19018 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19019 {
19020 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19021 icode = reg_addr[mode].reload_fpr_gpr;
19022 }
19023 }
19024
19025 if (icode != CODE_FOR_nothing)
19026 {
19027 ret = true;
19028 if (sri)
19029 {
19030 sri->icode = icode;
19031 sri->extra_cost = cost;
19032 }
19033 }
19034
19035 return ret;
19036 }
19037
19038 /* Return whether a move between two register classes can be done either
19039 directly (simple move) or via a pattern that uses a single extra temporary
19040 (using ISA 2.07's direct move in this case. */
19041
19042 static bool
19043 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19044 enum rs6000_reg_type from_type,
19045 machine_mode mode,
19046 secondary_reload_info *sri,
19047 bool altivec_p)
19048 {
19049 /* Fall back to load/store reloads if either type is not a register. */
19050 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19051 return false;
19052
19053 /* If we haven't allocated registers yet, assume the move can be done for the
19054 standard register types. */
19055 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19056 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19057 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19058 return true;
19059
19060 /* Moves to the same set of registers is a simple move for non-specialized
19061 registers. */
19062 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19063 return true;
19064
19065 /* Check whether a simple move can be done directly. */
19066 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19067 {
19068 if (sri)
19069 {
19070 sri->icode = CODE_FOR_nothing;
19071 sri->extra_cost = 0;
19072 }
19073 return true;
19074 }
19075
19076 /* Now check if we can do it in a few steps. */
19077 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19078 altivec_p);
19079 }
19080
19081 /* Inform reload about cases where moving X with a mode MODE to a register in
19082 RCLASS requires an extra scratch or immediate register. Return the class
19083 needed for the immediate register.
19084
19085 For VSX and Altivec, we may need a register to convert sp+offset into
19086 reg+sp.
19087
19088 For misaligned 64-bit gpr loads and stores we need a register to
19089 convert an offset address to indirect. */
19090
19091 static reg_class_t
19092 rs6000_secondary_reload (bool in_p,
19093 rtx x,
19094 reg_class_t rclass_i,
19095 machine_mode mode,
19096 secondary_reload_info *sri)
19097 {
19098 enum reg_class rclass = (enum reg_class) rclass_i;
19099 reg_class_t ret = ALL_REGS;
19100 enum insn_code icode;
19101 bool default_p = false;
19102 bool done_p = false;
19103
19104 /* Allow subreg of memory before/during reload. */
19105 bool memory_p = (MEM_P (x)
19106 || (!reload_completed && SUBREG_P (x)
19107 && MEM_P (SUBREG_REG (x))));
19108
19109 sri->icode = CODE_FOR_nothing;
19110 sri->t_icode = CODE_FOR_nothing;
19111 sri->extra_cost = 0;
19112 icode = ((in_p)
19113 ? reg_addr[mode].reload_load
19114 : reg_addr[mode].reload_store);
19115
19116 if (REG_P (x) || register_operand (x, mode))
19117 {
19118 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19119 bool altivec_p = (rclass == ALTIVEC_REGS);
19120 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19121
19122 if (!in_p)
19123 std::swap (to_type, from_type);
19124
19125 /* Can we do a direct move of some sort? */
19126 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19127 altivec_p))
19128 {
19129 icode = (enum insn_code)sri->icode;
19130 default_p = false;
19131 done_p = true;
19132 ret = NO_REGS;
19133 }
19134 }
19135
19136 /* Make sure 0.0 is not reloaded or forced into memory. */
19137 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19138 {
19139 ret = NO_REGS;
19140 default_p = false;
19141 done_p = true;
19142 }
19143
19144 /* If this is a scalar floating point value and we want to load it into the
19145 traditional Altivec registers, do it via a move via a traditional floating
19146 point register, unless we have D-form addressing. Also make sure that
19147 non-zero constants use a FPR. */
19148 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19149 && !mode_supports_vmx_dform (mode)
19150 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19151 && (memory_p || CONST_DOUBLE_P (x)))
19152 {
19153 ret = FLOAT_REGS;
19154 default_p = false;
19155 done_p = true;
19156 }
19157
19158 /* Handle reload of load/stores if we have reload helper functions. */
19159 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19160 {
19161 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19162 mode);
19163
19164 if (extra_cost >= 0)
19165 {
19166 done_p = true;
19167 ret = NO_REGS;
19168 if (extra_cost > 0)
19169 {
19170 sri->extra_cost = extra_cost;
19171 sri->icode = icode;
19172 }
19173 }
19174 }
19175
19176 /* Handle unaligned loads and stores of integer registers. */
19177 if (!done_p && TARGET_POWERPC64
19178 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19179 && memory_p
19180 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19181 {
19182 rtx addr = XEXP (x, 0);
19183 rtx off = address_offset (addr);
19184
19185 if (off != NULL_RTX)
19186 {
19187 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19188 unsigned HOST_WIDE_INT offset = INTVAL (off);
19189
19190 /* We need a secondary reload when our legitimate_address_p
19191 says the address is good (as otherwise the entire address
19192 will be reloaded), and the offset is not a multiple of
19193 four or we have an address wrap. Address wrap will only
19194 occur for LO_SUMs since legitimate_offset_address_p
19195 rejects addresses for 16-byte mems that will wrap. */
19196 if (GET_CODE (addr) == LO_SUM
19197 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19198 && ((offset & 3) != 0
19199 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19200 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19201 && (offset & 3) != 0))
19202 {
19203 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19204 if (in_p)
19205 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19206 : CODE_FOR_reload_di_load);
19207 else
19208 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19209 : CODE_FOR_reload_di_store);
19210 sri->extra_cost = 2;
19211 ret = NO_REGS;
19212 done_p = true;
19213 }
19214 else
19215 default_p = true;
19216 }
19217 else
19218 default_p = true;
19219 }
19220
19221 if (!done_p && !TARGET_POWERPC64
19222 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19223 && memory_p
19224 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19225 {
19226 rtx addr = XEXP (x, 0);
19227 rtx off = address_offset (addr);
19228
19229 if (off != NULL_RTX)
19230 {
19231 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19232 unsigned HOST_WIDE_INT offset = INTVAL (off);
19233
19234 /* We need a secondary reload when our legitimate_address_p
19235 says the address is good (as otherwise the entire address
19236 will be reloaded), and we have a wrap.
19237
19238 legitimate_lo_sum_address_p allows LO_SUM addresses to
19239 have any offset so test for wrap in the low 16 bits.
19240
19241 legitimate_offset_address_p checks for the range
19242 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19243 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19244 [0x7ff4,0x7fff] respectively, so test for the
19245 intersection of these ranges, [0x7ffc,0x7fff] and
19246 [0x7ff4,0x7ff7] respectively.
19247
19248 Note that the address we see here may have been
19249 manipulated by legitimize_reload_address. */
19250 if (GET_CODE (addr) == LO_SUM
19251 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19252 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19253 {
19254 if (in_p)
19255 sri->icode = CODE_FOR_reload_si_load;
19256 else
19257 sri->icode = CODE_FOR_reload_si_store;
19258 sri->extra_cost = 2;
19259 ret = NO_REGS;
19260 done_p = true;
19261 }
19262 else
19263 default_p = true;
19264 }
19265 else
19266 default_p = true;
19267 }
19268
19269 if (!done_p)
19270 default_p = true;
19271
19272 if (default_p)
19273 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19274
19275 gcc_assert (ret != ALL_REGS);
19276
19277 if (TARGET_DEBUG_ADDR)
19278 {
19279 fprintf (stderr,
19280 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19281 "mode = %s",
19282 reg_class_names[ret],
19283 in_p ? "true" : "false",
19284 reg_class_names[rclass],
19285 GET_MODE_NAME (mode));
19286
19287 if (reload_completed)
19288 fputs (", after reload", stderr);
19289
19290 if (!done_p)
19291 fputs (", done_p not set", stderr);
19292
19293 if (default_p)
19294 fputs (", default secondary reload", stderr);
19295
19296 if (sri->icode != CODE_FOR_nothing)
19297 fprintf (stderr, ", reload func = %s, extra cost = %d",
19298 insn_data[sri->icode].name, sri->extra_cost);
19299
19300 else if (sri->extra_cost > 0)
19301 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19302
19303 fputs ("\n", stderr);
19304 debug_rtx (x);
19305 }
19306
19307 return ret;
19308 }
19309
19310 /* Better tracing for rs6000_secondary_reload_inner. */
19311
19312 static void
19313 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19314 bool store_p)
19315 {
19316 rtx set, clobber;
19317
19318 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19319
19320 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19321 store_p ? "store" : "load");
19322
19323 if (store_p)
19324 set = gen_rtx_SET (mem, reg);
19325 else
19326 set = gen_rtx_SET (reg, mem);
19327
19328 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19329 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19330 }
19331
19332 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19333 ATTRIBUTE_NORETURN;
19334
19335 static void
19336 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19337 bool store_p)
19338 {
19339 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19340 gcc_unreachable ();
19341 }
19342
19343 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19344 reload helper functions. These were identified in
19345 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19346 reload, it calls the insns:
19347 reload_<RELOAD:mode>_<P:mptrsize>_store
19348 reload_<RELOAD:mode>_<P:mptrsize>_load
19349
19350 which in turn calls this function, to do whatever is necessary to create
19351 valid addresses. */
19352
19353 void
19354 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19355 {
19356 int regno = true_regnum (reg);
19357 machine_mode mode = GET_MODE (reg);
19358 addr_mask_type addr_mask;
19359 rtx addr;
19360 rtx new_addr;
19361 rtx op_reg, op0, op1;
19362 rtx and_op;
19363 rtx cc_clobber;
19364 rtvec rv;
19365
19366 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19367 || !base_reg_operand (scratch, GET_MODE (scratch)))
19368 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19369
19370 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19371 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19372
19373 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19374 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19375
19376 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19377 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19378
19379 else
19380 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19381
19382 /* Make sure the mode is valid in this register class. */
19383 if ((addr_mask & RELOAD_REG_VALID) == 0)
19384 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19385
19386 if (TARGET_DEBUG_ADDR)
19387 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19388
19389 new_addr = addr = XEXP (mem, 0);
19390 switch (GET_CODE (addr))
19391 {
19392 /* Does the register class support auto update forms for this mode? If
19393 not, do the update now. We don't need a scratch register, since the
19394 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19395 case PRE_INC:
19396 case PRE_DEC:
19397 op_reg = XEXP (addr, 0);
19398 if (!base_reg_operand (op_reg, Pmode))
19399 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19400
19401 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19402 {
19403 int delta = GET_MODE_SIZE (mode);
19404 if (GET_CODE (addr) == PRE_DEC)
19405 delta = -delta;
19406 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19407 new_addr = op_reg;
19408 }
19409 break;
19410
19411 case PRE_MODIFY:
19412 op0 = XEXP (addr, 0);
19413 op1 = XEXP (addr, 1);
19414 if (!base_reg_operand (op0, Pmode)
19415 || GET_CODE (op1) != PLUS
19416 || !rtx_equal_p (op0, XEXP (op1, 0)))
19417 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19418
19419 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19420 {
19421 emit_insn (gen_rtx_SET (op0, op1));
19422 new_addr = reg;
19423 }
19424 break;
19425
19426 /* Do we need to simulate AND -16 to clear the bottom address bits used
19427 in VMX load/stores? */
19428 case AND:
19429 op0 = XEXP (addr, 0);
19430 op1 = XEXP (addr, 1);
19431 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19432 {
19433 if (REG_P (op0) || SUBREG_P (op0))
19434 op_reg = op0;
19435
19436 else if (GET_CODE (op1) == PLUS)
19437 {
19438 emit_insn (gen_rtx_SET (scratch, op1));
19439 op_reg = scratch;
19440 }
19441
19442 else
19443 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19444
19445 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19446 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19447 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19448 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19449 new_addr = scratch;
19450 }
19451 break;
19452
19453 /* If this is an indirect address, make sure it is a base register. */
19454 case REG:
19455 case SUBREG:
19456 if (!base_reg_operand (addr, GET_MODE (addr)))
19457 {
19458 emit_insn (gen_rtx_SET (scratch, addr));
19459 new_addr = scratch;
19460 }
19461 break;
19462
19463 /* If this is an indexed address, make sure the register class can handle
19464 indexed addresses for this mode. */
19465 case PLUS:
19466 op0 = XEXP (addr, 0);
19467 op1 = XEXP (addr, 1);
19468 if (!base_reg_operand (op0, Pmode))
19469 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19470
19471 else if (int_reg_operand (op1, Pmode))
19472 {
19473 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19474 {
19475 emit_insn (gen_rtx_SET (scratch, addr));
19476 new_addr = scratch;
19477 }
19478 }
19479
19480 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19481 {
19482 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19483 || !quad_address_p (addr, mode, false))
19484 {
19485 emit_insn (gen_rtx_SET (scratch, addr));
19486 new_addr = scratch;
19487 }
19488 }
19489
19490 /* Make sure the register class can handle offset addresses. */
19491 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19492 {
19493 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19494 {
19495 emit_insn (gen_rtx_SET (scratch, addr));
19496 new_addr = scratch;
19497 }
19498 }
19499
19500 else
19501 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19502
19503 break;
19504
19505 case LO_SUM:
19506 op0 = XEXP (addr, 0);
19507 op1 = XEXP (addr, 1);
19508 if (!base_reg_operand (op0, Pmode))
19509 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19510
19511 else if (int_reg_operand (op1, Pmode))
19512 {
19513 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19514 {
19515 emit_insn (gen_rtx_SET (scratch, addr));
19516 new_addr = scratch;
19517 }
19518 }
19519
19520 /* Quad offsets are restricted and can't handle normal addresses. */
19521 else if (mode_supports_dq_form (mode))
19522 {
19523 emit_insn (gen_rtx_SET (scratch, addr));
19524 new_addr = scratch;
19525 }
19526
19527 /* Make sure the register class can handle offset addresses. */
19528 else if (legitimate_lo_sum_address_p (mode, addr, false))
19529 {
19530 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19531 {
19532 emit_insn (gen_rtx_SET (scratch, addr));
19533 new_addr = scratch;
19534 }
19535 }
19536
19537 else
19538 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19539
19540 break;
19541
19542 case SYMBOL_REF:
19543 case CONST:
19544 case LABEL_REF:
19545 rs6000_emit_move (scratch, addr, Pmode);
19546 new_addr = scratch;
19547 break;
19548
19549 default:
19550 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19551 }
19552
19553 /* Adjust the address if it changed. */
19554 if (addr != new_addr)
19555 {
19556 mem = replace_equiv_address_nv (mem, new_addr);
19557 if (TARGET_DEBUG_ADDR)
19558 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19559 }
19560
19561 /* Now create the move. */
19562 if (store_p)
19563 emit_insn (gen_rtx_SET (mem, reg));
19564 else
19565 emit_insn (gen_rtx_SET (reg, mem));
19566
19567 return;
19568 }
19569
19570 /* Convert reloads involving 64-bit gprs and misaligned offset
19571 addressing, or multiple 32-bit gprs and offsets that are too large,
19572 to use indirect addressing. */
19573
19574 void
19575 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19576 {
19577 int regno = true_regnum (reg);
19578 enum reg_class rclass;
19579 rtx addr;
19580 rtx scratch_or_premodify = scratch;
19581
19582 if (TARGET_DEBUG_ADDR)
19583 {
19584 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19585 store_p ? "store" : "load");
19586 fprintf (stderr, "reg:\n");
19587 debug_rtx (reg);
19588 fprintf (stderr, "mem:\n");
19589 debug_rtx (mem);
19590 fprintf (stderr, "scratch:\n");
19591 debug_rtx (scratch);
19592 }
19593
19594 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19595 gcc_assert (MEM_P (mem));
19596 rclass = REGNO_REG_CLASS (regno);
19597 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19598 addr = XEXP (mem, 0);
19599
19600 if (GET_CODE (addr) == PRE_MODIFY)
19601 {
19602 gcc_assert (REG_P (XEXP (addr, 0))
19603 && GET_CODE (XEXP (addr, 1)) == PLUS
19604 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19605 scratch_or_premodify = XEXP (addr, 0);
19606 addr = XEXP (addr, 1);
19607 }
19608 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19609
19610 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19611
19612 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19613
19614 /* Now create the move. */
19615 if (store_p)
19616 emit_insn (gen_rtx_SET (mem, reg));
19617 else
19618 emit_insn (gen_rtx_SET (reg, mem));
19619
19620 return;
19621 }
19622
19623 /* Given an rtx X being reloaded into a reg required to be
19624 in class CLASS, return the class of reg to actually use.
19625 In general this is just CLASS; but on some machines
19626 in some cases it is preferable to use a more restrictive class.
19627
19628 On the RS/6000, we have to return NO_REGS when we want to reload a
19629 floating-point CONST_DOUBLE to force it to be copied to memory.
19630
19631 We also don't want to reload integer values into floating-point
19632 registers if we can at all help it. In fact, this can
19633 cause reload to die, if it tries to generate a reload of CTR
19634 into a FP register and discovers it doesn't have the memory location
19635 required.
19636
19637 ??? Would it be a good idea to have reload do the converse, that is
19638 try to reload floating modes into FP registers if possible?
19639 */
19640
19641 static enum reg_class
19642 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19643 {
19644 machine_mode mode = GET_MODE (x);
19645 bool is_constant = CONSTANT_P (x);
19646
19647 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19648 reload class for it. */
19649 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19650 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19651 return NO_REGS;
19652
19653 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19654 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19655 return NO_REGS;
19656
19657 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19658 the reloading of address expressions using PLUS into floating point
19659 registers. */
19660 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19661 {
19662 if (is_constant)
19663 {
19664 /* Zero is always allowed in all VSX registers. */
19665 if (x == CONST0_RTX (mode))
19666 return rclass;
19667
19668 /* If this is a vector constant that can be formed with a few Altivec
19669 instructions, we want altivec registers. */
19670 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19671 return ALTIVEC_REGS;
19672
19673 /* If this is an integer constant that can easily be loaded into
19674 vector registers, allow it. */
19675 if (CONST_INT_P (x))
19676 {
19677 HOST_WIDE_INT value = INTVAL (x);
19678
19679 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19680 2.06 can generate it in the Altivec registers with
19681 VSPLTI<x>. */
19682 if (value == -1)
19683 {
19684 if (TARGET_P8_VECTOR)
19685 return rclass;
19686 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19687 return ALTIVEC_REGS;
19688 else
19689 return NO_REGS;
19690 }
19691
19692 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19693 a sign extend in the Altivec registers. */
19694 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19695 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19696 return ALTIVEC_REGS;
19697 }
19698
19699 /* Force constant to memory. */
19700 return NO_REGS;
19701 }
19702
19703 /* D-form addressing can easily reload the value. */
19704 if (mode_supports_vmx_dform (mode)
19705 || mode_supports_dq_form (mode))
19706 return rclass;
19707
19708 /* If this is a scalar floating point value and we don't have D-form
19709 addressing, prefer the traditional floating point registers so that we
19710 can use D-form (register+offset) addressing. */
19711 if (rclass == VSX_REGS
19712 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19713 return FLOAT_REGS;
19714
19715 /* Prefer the Altivec registers if Altivec is handling the vector
19716 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19717 loads. */
19718 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19719 || mode == V1TImode)
19720 return ALTIVEC_REGS;
19721
19722 return rclass;
19723 }
19724
19725 if (is_constant || GET_CODE (x) == PLUS)
19726 {
19727 if (reg_class_subset_p (GENERAL_REGS, rclass))
19728 return GENERAL_REGS;
19729 if (reg_class_subset_p (BASE_REGS, rclass))
19730 return BASE_REGS;
19731 return NO_REGS;
19732 }
19733
19734 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19735 return GENERAL_REGS;
19736
19737 return rclass;
19738 }
19739
19740 /* Debug version of rs6000_preferred_reload_class. */
19741 static enum reg_class
19742 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19743 {
19744 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19745
19746 fprintf (stderr,
19747 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19748 "mode = %s, x:\n",
19749 reg_class_names[ret], reg_class_names[rclass],
19750 GET_MODE_NAME (GET_MODE (x)));
19751 debug_rtx (x);
19752
19753 return ret;
19754 }
19755
19756 /* If we are copying between FP or AltiVec registers and anything else, we need
19757 a memory location. The exception is when we are targeting ppc64 and the
19758 move to/from fpr to gpr instructions are available. Also, under VSX, you
19759 can copy vector registers from the FP register set to the Altivec register
19760 set and vice versa. */
19761
19762 static bool
19763 rs6000_secondary_memory_needed (machine_mode mode,
19764 reg_class_t from_class,
19765 reg_class_t to_class)
19766 {
19767 enum rs6000_reg_type from_type, to_type;
19768 bool altivec_p = ((from_class == ALTIVEC_REGS)
19769 || (to_class == ALTIVEC_REGS));
19770
19771 /* If a simple/direct move is available, we don't need secondary memory */
19772 from_type = reg_class_to_reg_type[(int)from_class];
19773 to_type = reg_class_to_reg_type[(int)to_class];
19774
19775 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19776 (secondary_reload_info *)0, altivec_p))
19777 return false;
19778
19779 /* If we have a floating point or vector register class, we need to use
19780 memory to transfer the data. */
19781 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19782 return true;
19783
19784 return false;
19785 }
19786
19787 /* Debug version of rs6000_secondary_memory_needed. */
19788 static bool
19789 rs6000_debug_secondary_memory_needed (machine_mode mode,
19790 reg_class_t from_class,
19791 reg_class_t to_class)
19792 {
19793 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19794
19795 fprintf (stderr,
19796 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19797 "to_class = %s, mode = %s\n",
19798 ret ? "true" : "false",
19799 reg_class_names[from_class],
19800 reg_class_names[to_class],
19801 GET_MODE_NAME (mode));
19802
19803 return ret;
19804 }
19805
19806 /* Return the register class of a scratch register needed to copy IN into
19807 or out of a register in RCLASS in MODE. If it can be done directly,
19808 NO_REGS is returned. */
19809
19810 static enum reg_class
19811 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19812 rtx in)
19813 {
19814 int regno;
19815
19816 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19817 #if TARGET_MACHO
19818 && MACHOPIC_INDIRECT
19819 #endif
19820 ))
19821 {
19822 /* We cannot copy a symbolic operand directly into anything
19823 other than BASE_REGS for TARGET_ELF. So indicate that a
19824 register from BASE_REGS is needed as an intermediate
19825 register.
19826
19827 On Darwin, pic addresses require a load from memory, which
19828 needs a base register. */
19829 if (rclass != BASE_REGS
19830 && (SYMBOL_REF_P (in)
19831 || GET_CODE (in) == HIGH
19832 || GET_CODE (in) == LABEL_REF
19833 || GET_CODE (in) == CONST))
19834 return BASE_REGS;
19835 }
19836
19837 if (REG_P (in))
19838 {
19839 regno = REGNO (in);
19840 if (!HARD_REGISTER_NUM_P (regno))
19841 {
19842 regno = true_regnum (in);
19843 if (!HARD_REGISTER_NUM_P (regno))
19844 regno = -1;
19845 }
19846 }
19847 else if (SUBREG_P (in))
19848 {
19849 regno = true_regnum (in);
19850 if (!HARD_REGISTER_NUM_P (regno))
19851 regno = -1;
19852 }
19853 else
19854 regno = -1;
19855
19856 /* If we have VSX register moves, prefer moving scalar values between
19857 Altivec registers and GPR by going via an FPR (and then via memory)
19858 instead of reloading the secondary memory address for Altivec moves. */
19859 if (TARGET_VSX
19860 && GET_MODE_SIZE (mode) < 16
19861 && !mode_supports_vmx_dform (mode)
19862 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
19863 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
19864 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19865 && (regno >= 0 && INT_REGNO_P (regno)))))
19866 return FLOAT_REGS;
19867
19868 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
19869 into anything. */
19870 if (rclass == GENERAL_REGS || rclass == BASE_REGS
19871 || (regno >= 0 && INT_REGNO_P (regno)))
19872 return NO_REGS;
19873
19874 /* Constants, memory, and VSX registers can go into VSX registers (both the
19875 traditional floating point and the altivec registers). */
19876 if (rclass == VSX_REGS
19877 && (regno == -1 || VSX_REGNO_P (regno)))
19878 return NO_REGS;
19879
19880 /* Constants, memory, and FP registers can go into FP registers. */
19881 if ((regno == -1 || FP_REGNO_P (regno))
19882 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
19883 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
19884
19885 /* Memory, and AltiVec registers can go into AltiVec registers. */
19886 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
19887 && rclass == ALTIVEC_REGS)
19888 return NO_REGS;
19889
19890 /* We can copy among the CR registers. */
19891 if ((rclass == CR_REGS || rclass == CR0_REGS)
19892 && regno >= 0 && CR_REGNO_P (regno))
19893 return NO_REGS;
19894
19895 /* Otherwise, we need GENERAL_REGS. */
19896 return GENERAL_REGS;
19897 }
19898
19899 /* Debug version of rs6000_secondary_reload_class. */
19900 static enum reg_class
19901 rs6000_debug_secondary_reload_class (enum reg_class rclass,
19902 machine_mode mode, rtx in)
19903 {
19904 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
19905 fprintf (stderr,
19906 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
19907 "mode = %s, input rtx:\n",
19908 reg_class_names[ret], reg_class_names[rclass],
19909 GET_MODE_NAME (mode));
19910 debug_rtx (in);
19911
19912 return ret;
19913 }
19914
19915 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
19916
19917 static bool
19918 rs6000_can_change_mode_class (machine_mode from,
19919 machine_mode to,
19920 reg_class_t rclass)
19921 {
19922 unsigned from_size = GET_MODE_SIZE (from);
19923 unsigned to_size = GET_MODE_SIZE (to);
19924
19925 if (from_size != to_size)
19926 {
19927 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
19928
19929 if (reg_classes_intersect_p (xclass, rclass))
19930 {
19931 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
19932 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
19933 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
19934 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
19935
19936 /* Don't allow 64-bit types to overlap with 128-bit types that take a
19937 single register under VSX because the scalar part of the register
19938 is in the upper 64-bits, and not the lower 64-bits. Types like
19939 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
19940 IEEE floating point can't overlap, and neither can small
19941 values. */
19942
19943 if (to_float128_vector_p && from_float128_vector_p)
19944 return true;
19945
19946 else if (to_float128_vector_p || from_float128_vector_p)
19947 return false;
19948
19949 /* TDmode in floating-mode registers must always go into a register
19950 pair with the most significant word in the even-numbered register
19951 to match ISA requirements. In little-endian mode, this does not
19952 match subreg numbering, so we cannot allow subregs. */
19953 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
19954 return false;
19955
19956 if (from_size < 8 || to_size < 8)
19957 return false;
19958
19959 if (from_size == 8 && (8 * to_nregs) != to_size)
19960 return false;
19961
19962 if (to_size == 8 && (8 * from_nregs) != from_size)
19963 return false;
19964
19965 return true;
19966 }
19967 else
19968 return true;
19969 }
19970
19971 /* Since the VSX register set includes traditional floating point registers
19972 and altivec registers, just check for the size being different instead of
19973 trying to check whether the modes are vector modes. Otherwise it won't
19974 allow say DF and DI to change classes. For types like TFmode and TDmode
19975 that take 2 64-bit registers, rather than a single 128-bit register, don't
19976 allow subregs of those types to other 128 bit types. */
19977 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
19978 {
19979 unsigned num_regs = (from_size + 15) / 16;
19980 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
19981 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
19982 return false;
19983
19984 return (from_size == 8 || from_size == 16);
19985 }
19986
19987 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
19988 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
19989 return false;
19990
19991 return true;
19992 }
19993
19994 /* Debug version of rs6000_can_change_mode_class. */
19995 static bool
19996 rs6000_debug_can_change_mode_class (machine_mode from,
19997 machine_mode to,
19998 reg_class_t rclass)
19999 {
20000 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20001
20002 fprintf (stderr,
20003 "rs6000_can_change_mode_class, return %s, from = %s, "
20004 "to = %s, rclass = %s\n",
20005 ret ? "true" : "false",
20006 GET_MODE_NAME (from), GET_MODE_NAME (to),
20007 reg_class_names[rclass]);
20008
20009 return ret;
20010 }
20011 \f
20012 /* Return a string to do a move operation of 128 bits of data. */
20013
20014 const char *
20015 rs6000_output_move_128bit (rtx operands[])
20016 {
20017 rtx dest = operands[0];
20018 rtx src = operands[1];
20019 machine_mode mode = GET_MODE (dest);
20020 int dest_regno;
20021 int src_regno;
20022 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20023 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20024
20025 if (REG_P (dest))
20026 {
20027 dest_regno = REGNO (dest);
20028 dest_gpr_p = INT_REGNO_P (dest_regno);
20029 dest_fp_p = FP_REGNO_P (dest_regno);
20030 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20031 dest_vsx_p = dest_fp_p | dest_vmx_p;
20032 }
20033 else
20034 {
20035 dest_regno = -1;
20036 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20037 }
20038
20039 if (REG_P (src))
20040 {
20041 src_regno = REGNO (src);
20042 src_gpr_p = INT_REGNO_P (src_regno);
20043 src_fp_p = FP_REGNO_P (src_regno);
20044 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20045 src_vsx_p = src_fp_p | src_vmx_p;
20046 }
20047 else
20048 {
20049 src_regno = -1;
20050 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20051 }
20052
20053 /* Register moves. */
20054 if (dest_regno >= 0 && src_regno >= 0)
20055 {
20056 if (dest_gpr_p)
20057 {
20058 if (src_gpr_p)
20059 return "#";
20060
20061 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20062 return (WORDS_BIG_ENDIAN
20063 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20064 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20065
20066 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20067 return "#";
20068 }
20069
20070 else if (TARGET_VSX && dest_vsx_p)
20071 {
20072 if (src_vsx_p)
20073 return "xxlor %x0,%x1,%x1";
20074
20075 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20076 return (WORDS_BIG_ENDIAN
20077 ? "mtvsrdd %x0,%1,%L1"
20078 : "mtvsrdd %x0,%L1,%1");
20079
20080 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20081 return "#";
20082 }
20083
20084 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20085 return "vor %0,%1,%1";
20086
20087 else if (dest_fp_p && src_fp_p)
20088 return "#";
20089 }
20090
20091 /* Loads. */
20092 else if (dest_regno >= 0 && MEM_P (src))
20093 {
20094 if (dest_gpr_p)
20095 {
20096 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20097 return "lq %0,%1";
20098 else
20099 return "#";
20100 }
20101
20102 else if (TARGET_ALTIVEC && dest_vmx_p
20103 && altivec_indexed_or_indirect_operand (src, mode))
20104 return "lvx %0,%y1";
20105
20106 else if (TARGET_VSX && dest_vsx_p)
20107 {
20108 if (mode_supports_dq_form (mode)
20109 && quad_address_p (XEXP (src, 0), mode, true))
20110 return "lxv %x0,%1";
20111
20112 else if (TARGET_P9_VECTOR)
20113 return "lxvx %x0,%y1";
20114
20115 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20116 return "lxvw4x %x0,%y1";
20117
20118 else
20119 return "lxvd2x %x0,%y1";
20120 }
20121
20122 else if (TARGET_ALTIVEC && dest_vmx_p)
20123 return "lvx %0,%y1";
20124
20125 else if (dest_fp_p)
20126 return "#";
20127 }
20128
20129 /* Stores. */
20130 else if (src_regno >= 0 && MEM_P (dest))
20131 {
20132 if (src_gpr_p)
20133 {
20134 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20135 return "stq %1,%0";
20136 else
20137 return "#";
20138 }
20139
20140 else if (TARGET_ALTIVEC && src_vmx_p
20141 && altivec_indexed_or_indirect_operand (dest, mode))
20142 return "stvx %1,%y0";
20143
20144 else if (TARGET_VSX && src_vsx_p)
20145 {
20146 if (mode_supports_dq_form (mode)
20147 && quad_address_p (XEXP (dest, 0), mode, true))
20148 return "stxv %x1,%0";
20149
20150 else if (TARGET_P9_VECTOR)
20151 return "stxvx %x1,%y0";
20152
20153 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20154 return "stxvw4x %x1,%y0";
20155
20156 else
20157 return "stxvd2x %x1,%y0";
20158 }
20159
20160 else if (TARGET_ALTIVEC && src_vmx_p)
20161 return "stvx %1,%y0";
20162
20163 else if (src_fp_p)
20164 return "#";
20165 }
20166
20167 /* Constants. */
20168 else if (dest_regno >= 0
20169 && (CONST_INT_P (src)
20170 || CONST_WIDE_INT_P (src)
20171 || CONST_DOUBLE_P (src)
20172 || GET_CODE (src) == CONST_VECTOR))
20173 {
20174 if (dest_gpr_p)
20175 return "#";
20176
20177 else if ((dest_vmx_p && TARGET_ALTIVEC)
20178 || (dest_vsx_p && TARGET_VSX))
20179 return output_vec_const_move (operands);
20180 }
20181
20182 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20183 }
20184
20185 /* Validate a 128-bit move. */
20186 bool
20187 rs6000_move_128bit_ok_p (rtx operands[])
20188 {
20189 machine_mode mode = GET_MODE (operands[0]);
20190 return (gpc_reg_operand (operands[0], mode)
20191 || gpc_reg_operand (operands[1], mode));
20192 }
20193
20194 /* Return true if a 128-bit move needs to be split. */
20195 bool
20196 rs6000_split_128bit_ok_p (rtx operands[])
20197 {
20198 if (!reload_completed)
20199 return false;
20200
20201 if (!gpr_or_gpr_p (operands[0], operands[1]))
20202 return false;
20203
20204 if (quad_load_store_p (operands[0], operands[1]))
20205 return false;
20206
20207 return true;
20208 }
20209
20210 \f
20211 /* Given a comparison operation, return the bit number in CCR to test. We
20212 know this is a valid comparison.
20213
20214 SCC_P is 1 if this is for an scc. That means that %D will have been
20215 used instead of %C, so the bits will be in different places.
20216
20217 Return -1 if OP isn't a valid comparison for some reason. */
20218
20219 int
20220 ccr_bit (rtx op, int scc_p)
20221 {
20222 enum rtx_code code = GET_CODE (op);
20223 machine_mode cc_mode;
20224 int cc_regnum;
20225 int base_bit;
20226 rtx reg;
20227
20228 if (!COMPARISON_P (op))
20229 return -1;
20230
20231 reg = XEXP (op, 0);
20232
20233 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20234 return -1;
20235
20236 cc_mode = GET_MODE (reg);
20237 cc_regnum = REGNO (reg);
20238 base_bit = 4 * (cc_regnum - CR0_REGNO);
20239
20240 validate_condition_mode (code, cc_mode);
20241
20242 /* When generating a sCOND operation, only positive conditions are
20243 allowed. */
20244 if (scc_p)
20245 switch (code)
20246 {
20247 case EQ:
20248 case GT:
20249 case LT:
20250 case UNORDERED:
20251 case GTU:
20252 case LTU:
20253 break;
20254 default:
20255 return -1;
20256 }
20257
20258 switch (code)
20259 {
20260 case NE:
20261 return scc_p ? base_bit + 3 : base_bit + 2;
20262 case EQ:
20263 return base_bit + 2;
20264 case GT: case GTU: case UNLE:
20265 return base_bit + 1;
20266 case LT: case LTU: case UNGE:
20267 return base_bit;
20268 case ORDERED: case UNORDERED:
20269 return base_bit + 3;
20270
20271 case GE: case GEU:
20272 /* If scc, we will have done a cror to put the bit in the
20273 unordered position. So test that bit. For integer, this is ! LT
20274 unless this is an scc insn. */
20275 return scc_p ? base_bit + 3 : base_bit;
20276
20277 case LE: case LEU:
20278 return scc_p ? base_bit + 3 : base_bit + 1;
20279
20280 default:
20281 return -1;
20282 }
20283 }
20284 \f
20285 /* Return the GOT register. */
20286
20287 rtx
20288 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20289 {
20290 /* The second flow pass currently (June 1999) can't update
20291 regs_ever_live without disturbing other parts of the compiler, so
20292 update it here to make the prolog/epilogue code happy. */
20293 if (!can_create_pseudo_p ()
20294 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20295 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20296
20297 crtl->uses_pic_offset_table = 1;
20298
20299 return pic_offset_table_rtx;
20300 }
20301 \f
20302 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20303
20304 /* Write out a function code label. */
20305
20306 void
20307 rs6000_output_function_entry (FILE *file, const char *fname)
20308 {
20309 if (fname[0] != '.')
20310 {
20311 switch (DEFAULT_ABI)
20312 {
20313 default:
20314 gcc_unreachable ();
20315
20316 case ABI_AIX:
20317 if (DOT_SYMBOLS)
20318 putc ('.', file);
20319 else
20320 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20321 break;
20322
20323 case ABI_ELFv2:
20324 case ABI_V4:
20325 case ABI_DARWIN:
20326 break;
20327 }
20328 }
20329
20330 RS6000_OUTPUT_BASENAME (file, fname);
20331 }
20332
20333 /* Print an operand. Recognize special options, documented below. */
20334
20335 #if TARGET_ELF
20336 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20337 only introduced by the linker, when applying the sda21
20338 relocation. */
20339 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20340 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20341 #else
20342 #define SMALL_DATA_RELOC "sda21"
20343 #define SMALL_DATA_REG 0
20344 #endif
20345
20346 void
20347 print_operand (FILE *file, rtx x, int code)
20348 {
20349 int i;
20350 unsigned HOST_WIDE_INT uval;
20351
20352 switch (code)
20353 {
20354 /* %a is output_address. */
20355
20356 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20357 output_operand. */
20358
20359 case 'D':
20360 /* Like 'J' but get to the GT bit only. */
20361 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20362 {
20363 output_operand_lossage ("invalid %%D value");
20364 return;
20365 }
20366
20367 /* Bit 1 is GT bit. */
20368 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20369
20370 /* Add one for shift count in rlinm for scc. */
20371 fprintf (file, "%d", i + 1);
20372 return;
20373
20374 case 'e':
20375 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20376 if (! INT_P (x))
20377 {
20378 output_operand_lossage ("invalid %%e value");
20379 return;
20380 }
20381
20382 uval = INTVAL (x);
20383 if ((uval & 0xffff) == 0 && uval != 0)
20384 putc ('s', file);
20385 return;
20386
20387 case 'E':
20388 /* X is a CR register. Print the number of the EQ bit of the CR */
20389 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20390 output_operand_lossage ("invalid %%E value");
20391 else
20392 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20393 return;
20394
20395 case 'f':
20396 /* X is a CR register. Print the shift count needed to move it
20397 to the high-order four bits. */
20398 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20399 output_operand_lossage ("invalid %%f value");
20400 else
20401 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20402 return;
20403
20404 case 'F':
20405 /* Similar, but print the count for the rotate in the opposite
20406 direction. */
20407 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20408 output_operand_lossage ("invalid %%F value");
20409 else
20410 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20411 return;
20412
20413 case 'G':
20414 /* X is a constant integer. If it is negative, print "m",
20415 otherwise print "z". This is to make an aze or ame insn. */
20416 if (!CONST_INT_P (x))
20417 output_operand_lossage ("invalid %%G value");
20418 else if (INTVAL (x) >= 0)
20419 putc ('z', file);
20420 else
20421 putc ('m', file);
20422 return;
20423
20424 case 'h':
20425 /* If constant, output low-order five bits. Otherwise, write
20426 normally. */
20427 if (INT_P (x))
20428 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20429 else
20430 print_operand (file, x, 0);
20431 return;
20432
20433 case 'H':
20434 /* If constant, output low-order six bits. Otherwise, write
20435 normally. */
20436 if (INT_P (x))
20437 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20438 else
20439 print_operand (file, x, 0);
20440 return;
20441
20442 case 'I':
20443 /* Print `i' if this is a constant, else nothing. */
20444 if (INT_P (x))
20445 putc ('i', file);
20446 return;
20447
20448 case 'j':
20449 /* Write the bit number in CCR for jump. */
20450 i = ccr_bit (x, 0);
20451 if (i == -1)
20452 output_operand_lossage ("invalid %%j code");
20453 else
20454 fprintf (file, "%d", i);
20455 return;
20456
20457 case 'J':
20458 /* Similar, but add one for shift count in rlinm for scc and pass
20459 scc flag to `ccr_bit'. */
20460 i = ccr_bit (x, 1);
20461 if (i == -1)
20462 output_operand_lossage ("invalid %%J code");
20463 else
20464 /* If we want bit 31, write a shift count of zero, not 32. */
20465 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20466 return;
20467
20468 case 'k':
20469 /* X must be a constant. Write the 1's complement of the
20470 constant. */
20471 if (! INT_P (x))
20472 output_operand_lossage ("invalid %%k value");
20473 else
20474 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20475 return;
20476
20477 case 'K':
20478 /* X must be a symbolic constant on ELF. Write an
20479 expression suitable for an 'addi' that adds in the low 16
20480 bits of the MEM. */
20481 if (GET_CODE (x) == CONST)
20482 {
20483 if (GET_CODE (XEXP (x, 0)) != PLUS
20484 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20485 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20486 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20487 output_operand_lossage ("invalid %%K value");
20488 }
20489 print_operand_address (file, x);
20490 fputs ("@l", file);
20491 return;
20492
20493 /* %l is output_asm_label. */
20494
20495 case 'L':
20496 /* Write second word of DImode or DFmode reference. Works on register
20497 or non-indexed memory only. */
20498 if (REG_P (x))
20499 fputs (reg_names[REGNO (x) + 1], file);
20500 else if (MEM_P (x))
20501 {
20502 machine_mode mode = GET_MODE (x);
20503 /* Handle possible auto-increment. Since it is pre-increment and
20504 we have already done it, we can just use an offset of word. */
20505 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20506 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20507 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20508 UNITS_PER_WORD));
20509 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20510 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20511 UNITS_PER_WORD));
20512 else
20513 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20514 UNITS_PER_WORD),
20515 0));
20516
20517 if (small_data_operand (x, GET_MODE (x)))
20518 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20519 reg_names[SMALL_DATA_REG]);
20520 }
20521 return;
20522
20523 case 'N': /* Unused */
20524 /* Write the number of elements in the vector times 4. */
20525 if (GET_CODE (x) != PARALLEL)
20526 output_operand_lossage ("invalid %%N value");
20527 else
20528 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20529 return;
20530
20531 case 'O': /* Unused */
20532 /* Similar, but subtract 1 first. */
20533 if (GET_CODE (x) != PARALLEL)
20534 output_operand_lossage ("invalid %%O value");
20535 else
20536 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20537 return;
20538
20539 case 'p':
20540 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20541 if (! INT_P (x)
20542 || INTVAL (x) < 0
20543 || (i = exact_log2 (INTVAL (x))) < 0)
20544 output_operand_lossage ("invalid %%p value");
20545 else
20546 fprintf (file, "%d", i);
20547 return;
20548
20549 case 'P':
20550 /* The operand must be an indirect memory reference. The result
20551 is the register name. */
20552 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20553 || REGNO (XEXP (x, 0)) >= 32)
20554 output_operand_lossage ("invalid %%P value");
20555 else
20556 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20557 return;
20558
20559 case 'q':
20560 /* This outputs the logical code corresponding to a boolean
20561 expression. The expression may have one or both operands
20562 negated (if one, only the first one). For condition register
20563 logical operations, it will also treat the negated
20564 CR codes as NOTs, but not handle NOTs of them. */
20565 {
20566 const char *const *t = 0;
20567 const char *s;
20568 enum rtx_code code = GET_CODE (x);
20569 static const char * const tbl[3][3] = {
20570 { "and", "andc", "nor" },
20571 { "or", "orc", "nand" },
20572 { "xor", "eqv", "xor" } };
20573
20574 if (code == AND)
20575 t = tbl[0];
20576 else if (code == IOR)
20577 t = tbl[1];
20578 else if (code == XOR)
20579 t = tbl[2];
20580 else
20581 output_operand_lossage ("invalid %%q value");
20582
20583 if (GET_CODE (XEXP (x, 0)) != NOT)
20584 s = t[0];
20585 else
20586 {
20587 if (GET_CODE (XEXP (x, 1)) == NOT)
20588 s = t[2];
20589 else
20590 s = t[1];
20591 }
20592
20593 fputs (s, file);
20594 }
20595 return;
20596
20597 case 'Q':
20598 if (! TARGET_MFCRF)
20599 return;
20600 fputc (',', file);
20601 /* FALLTHRU */
20602
20603 case 'R':
20604 /* X is a CR register. Print the mask for `mtcrf'. */
20605 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20606 output_operand_lossage ("invalid %%R value");
20607 else
20608 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20609 return;
20610
20611 case 's':
20612 /* Low 5 bits of 32 - value */
20613 if (! INT_P (x))
20614 output_operand_lossage ("invalid %%s value");
20615 else
20616 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20617 return;
20618
20619 case 't':
20620 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20621 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20622 {
20623 output_operand_lossage ("invalid %%t value");
20624 return;
20625 }
20626
20627 /* Bit 3 is OV bit. */
20628 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20629
20630 /* If we want bit 31, write a shift count of zero, not 32. */
20631 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20632 return;
20633
20634 case 'T':
20635 /* Print the symbolic name of a branch target register. */
20636 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20637 x = XVECEXP (x, 0, 0);
20638 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20639 && REGNO (x) != CTR_REGNO))
20640 output_operand_lossage ("invalid %%T value");
20641 else if (REGNO (x) == LR_REGNO)
20642 fputs ("lr", file);
20643 else
20644 fputs ("ctr", file);
20645 return;
20646
20647 case 'u':
20648 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20649 for use in unsigned operand. */
20650 if (! INT_P (x))
20651 {
20652 output_operand_lossage ("invalid %%u value");
20653 return;
20654 }
20655
20656 uval = INTVAL (x);
20657 if ((uval & 0xffff) == 0)
20658 uval >>= 16;
20659
20660 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20661 return;
20662
20663 case 'v':
20664 /* High-order 16 bits of constant for use in signed operand. */
20665 if (! INT_P (x))
20666 output_operand_lossage ("invalid %%v value");
20667 else
20668 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20669 (INTVAL (x) >> 16) & 0xffff);
20670 return;
20671
20672 case 'U':
20673 /* Print `u' if this has an auto-increment or auto-decrement. */
20674 if (MEM_P (x)
20675 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20676 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20677 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20678 putc ('u', file);
20679 return;
20680
20681 case 'V':
20682 /* Print the trap code for this operand. */
20683 switch (GET_CODE (x))
20684 {
20685 case EQ:
20686 fputs ("eq", file); /* 4 */
20687 break;
20688 case NE:
20689 fputs ("ne", file); /* 24 */
20690 break;
20691 case LT:
20692 fputs ("lt", file); /* 16 */
20693 break;
20694 case LE:
20695 fputs ("le", file); /* 20 */
20696 break;
20697 case GT:
20698 fputs ("gt", file); /* 8 */
20699 break;
20700 case GE:
20701 fputs ("ge", file); /* 12 */
20702 break;
20703 case LTU:
20704 fputs ("llt", file); /* 2 */
20705 break;
20706 case LEU:
20707 fputs ("lle", file); /* 6 */
20708 break;
20709 case GTU:
20710 fputs ("lgt", file); /* 1 */
20711 break;
20712 case GEU:
20713 fputs ("lge", file); /* 5 */
20714 break;
20715 default:
20716 output_operand_lossage ("invalid %%V value");
20717 }
20718 break;
20719
20720 case 'w':
20721 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20722 normally. */
20723 if (INT_P (x))
20724 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20725 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20726 else
20727 print_operand (file, x, 0);
20728 return;
20729
20730 case 'x':
20731 /* X is a FPR or Altivec register used in a VSX context. */
20732 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20733 output_operand_lossage ("invalid %%x value");
20734 else
20735 {
20736 int reg = REGNO (x);
20737 int vsx_reg = (FP_REGNO_P (reg)
20738 ? reg - 32
20739 : reg - FIRST_ALTIVEC_REGNO + 32);
20740
20741 #ifdef TARGET_REGNAMES
20742 if (TARGET_REGNAMES)
20743 fprintf (file, "%%vs%d", vsx_reg);
20744 else
20745 #endif
20746 fprintf (file, "%d", vsx_reg);
20747 }
20748 return;
20749
20750 case 'X':
20751 if (MEM_P (x)
20752 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20753 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20754 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20755 putc ('x', file);
20756 return;
20757
20758 case 'Y':
20759 /* Like 'L', for third word of TImode/PTImode */
20760 if (REG_P (x))
20761 fputs (reg_names[REGNO (x) + 2], file);
20762 else if (MEM_P (x))
20763 {
20764 machine_mode mode = GET_MODE (x);
20765 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20766 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20767 output_address (mode, plus_constant (Pmode,
20768 XEXP (XEXP (x, 0), 0), 8));
20769 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20770 output_address (mode, plus_constant (Pmode,
20771 XEXP (XEXP (x, 0), 0), 8));
20772 else
20773 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20774 if (small_data_operand (x, GET_MODE (x)))
20775 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20776 reg_names[SMALL_DATA_REG]);
20777 }
20778 return;
20779
20780 case 'z':
20781 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20782 x = XVECEXP (x, 0, 1);
20783 /* X is a SYMBOL_REF. Write out the name preceded by a
20784 period and without any trailing data in brackets. Used for function
20785 names. If we are configured for System V (or the embedded ABI) on
20786 the PowerPC, do not emit the period, since those systems do not use
20787 TOCs and the like. */
20788 if (!SYMBOL_REF_P (x))
20789 {
20790 output_operand_lossage ("invalid %%z value");
20791 return;
20792 }
20793
20794 /* For macho, check to see if we need a stub. */
20795 if (TARGET_MACHO)
20796 {
20797 const char *name = XSTR (x, 0);
20798 #if TARGET_MACHO
20799 if (darwin_picsymbol_stubs
20800 && MACHOPIC_INDIRECT
20801 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20802 name = machopic_indirection_name (x, /*stub_p=*/true);
20803 #endif
20804 assemble_name (file, name);
20805 }
20806 else if (!DOT_SYMBOLS)
20807 assemble_name (file, XSTR (x, 0));
20808 else
20809 rs6000_output_function_entry (file, XSTR (x, 0));
20810 return;
20811
20812 case 'Z':
20813 /* Like 'L', for last word of TImode/PTImode. */
20814 if (REG_P (x))
20815 fputs (reg_names[REGNO (x) + 3], file);
20816 else if (MEM_P (x))
20817 {
20818 machine_mode mode = GET_MODE (x);
20819 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20820 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20821 output_address (mode, plus_constant (Pmode,
20822 XEXP (XEXP (x, 0), 0), 12));
20823 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20824 output_address (mode, plus_constant (Pmode,
20825 XEXP (XEXP (x, 0), 0), 12));
20826 else
20827 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20828 if (small_data_operand (x, GET_MODE (x)))
20829 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20830 reg_names[SMALL_DATA_REG]);
20831 }
20832 return;
20833
20834 /* Print AltiVec memory operand. */
20835 case 'y':
20836 {
20837 rtx tmp;
20838
20839 gcc_assert (MEM_P (x));
20840
20841 tmp = XEXP (x, 0);
20842
20843 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20844 && GET_CODE (tmp) == AND
20845 && CONST_INT_P (XEXP (tmp, 1))
20846 && INTVAL (XEXP (tmp, 1)) == -16)
20847 tmp = XEXP (tmp, 0);
20848 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20849 && GET_CODE (tmp) == PRE_MODIFY)
20850 tmp = XEXP (tmp, 1);
20851 if (REG_P (tmp))
20852 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20853 else
20854 {
20855 if (GET_CODE (tmp) != PLUS
20856 || !REG_P (XEXP (tmp, 0))
20857 || !REG_P (XEXP (tmp, 1)))
20858 {
20859 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
20860 break;
20861 }
20862
20863 if (REGNO (XEXP (tmp, 0)) == 0)
20864 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
20865 reg_names[ REGNO (XEXP (tmp, 0)) ]);
20866 else
20867 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
20868 reg_names[ REGNO (XEXP (tmp, 1)) ]);
20869 }
20870 break;
20871 }
20872
20873 case 0:
20874 if (REG_P (x))
20875 fprintf (file, "%s", reg_names[REGNO (x)]);
20876 else if (MEM_P (x))
20877 {
20878 /* We need to handle PRE_INC and PRE_DEC here, since we need to
20879 know the width from the mode. */
20880 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
20881 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
20882 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20883 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
20884 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
20885 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20886 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20887 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
20888 else
20889 output_address (GET_MODE (x), XEXP (x, 0));
20890 }
20891 else if (toc_relative_expr_p (x, false,
20892 &tocrel_base_oac, &tocrel_offset_oac))
20893 /* This hack along with a corresponding hack in
20894 rs6000_output_addr_const_extra arranges to output addends
20895 where the assembler expects to find them. eg.
20896 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
20897 without this hack would be output as "x@toc+4". We
20898 want "x+4@toc". */
20899 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
20900 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
20901 output_addr_const (file, XVECEXP (x, 0, 0));
20902 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20903 output_addr_const (file, XVECEXP (x, 0, 1));
20904 else
20905 output_addr_const (file, x);
20906 return;
20907
20908 case '&':
20909 if (const char *name = get_some_local_dynamic_name ())
20910 assemble_name (file, name);
20911 else
20912 output_operand_lossage ("'%%&' used without any "
20913 "local dynamic TLS references");
20914 return;
20915
20916 default:
20917 output_operand_lossage ("invalid %%xn code");
20918 }
20919 }
20920 \f
20921 /* Print the address of an operand. */
20922
20923 void
20924 print_operand_address (FILE *file, rtx x)
20925 {
20926 if (REG_P (x))
20927 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
20928
20929 /* Is it a pc-relative address? */
20930 else if (pcrel_address (x, Pmode))
20931 {
20932 HOST_WIDE_INT offset;
20933
20934 if (GET_CODE (x) == CONST)
20935 x = XEXP (x, 0);
20936
20937 if (GET_CODE (x) == PLUS)
20938 {
20939 offset = INTVAL (XEXP (x, 1));
20940 x = XEXP (x, 0);
20941 }
20942 else
20943 offset = 0;
20944
20945 output_addr_const (file, x);
20946
20947 if (offset)
20948 fprintf (file, "%+" PRId64, offset);
20949
20950 fputs ("@pcrel", file);
20951 }
20952 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
20953 || GET_CODE (x) == LABEL_REF)
20954 {
20955 output_addr_const (file, x);
20956 if (small_data_operand (x, GET_MODE (x)))
20957 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20958 reg_names[SMALL_DATA_REG]);
20959 else
20960 gcc_assert (!TARGET_TOC);
20961 }
20962 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
20963 && REG_P (XEXP (x, 1)))
20964 {
20965 if (REGNO (XEXP (x, 0)) == 0)
20966 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
20967 reg_names[ REGNO (XEXP (x, 0)) ]);
20968 else
20969 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
20970 reg_names[ REGNO (XEXP (x, 1)) ]);
20971 }
20972 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
20973 && CONST_INT_P (XEXP (x, 1)))
20974 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
20975 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
20976 #if TARGET_MACHO
20977 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
20978 && CONSTANT_P (XEXP (x, 1)))
20979 {
20980 fprintf (file, "lo16(");
20981 output_addr_const (file, XEXP (x, 1));
20982 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
20983 }
20984 #endif
20985 #if TARGET_ELF
20986 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
20987 && CONSTANT_P (XEXP (x, 1)))
20988 {
20989 output_addr_const (file, XEXP (x, 1));
20990 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
20991 }
20992 #endif
20993 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
20994 {
20995 /* This hack along with a corresponding hack in
20996 rs6000_output_addr_const_extra arranges to output addends
20997 where the assembler expects to find them. eg.
20998 (lo_sum (reg 9)
20999 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21000 without this hack would be output as "x@toc+8@l(9)". We
21001 want "x+8@toc@l(9)". */
21002 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21003 if (GET_CODE (x) == LO_SUM)
21004 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21005 else
21006 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21007 }
21008 else
21009 output_addr_const (file, x);
21010 }
21011 \f
21012 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21013
21014 static bool
21015 rs6000_output_addr_const_extra (FILE *file, rtx x)
21016 {
21017 if (GET_CODE (x) == UNSPEC)
21018 switch (XINT (x, 1))
21019 {
21020 case UNSPEC_TOCREL:
21021 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21022 && REG_P (XVECEXP (x, 0, 1))
21023 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21024 output_addr_const (file, XVECEXP (x, 0, 0));
21025 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21026 {
21027 if (INTVAL (tocrel_offset_oac) >= 0)
21028 fprintf (file, "+");
21029 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21030 }
21031 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21032 {
21033 putc ('-', file);
21034 assemble_name (file, toc_label_name);
21035 need_toc_init = 1;
21036 }
21037 else if (TARGET_ELF)
21038 fputs ("@toc", file);
21039 return true;
21040
21041 #if TARGET_MACHO
21042 case UNSPEC_MACHOPIC_OFFSET:
21043 output_addr_const (file, XVECEXP (x, 0, 0));
21044 putc ('-', file);
21045 machopic_output_function_base_name (file);
21046 return true;
21047 #endif
21048 }
21049 return false;
21050 }
21051 \f
21052 /* Target hook for assembling integer objects. The PowerPC version has
21053 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21054 is defined. It also needs to handle DI-mode objects on 64-bit
21055 targets. */
21056
21057 static bool
21058 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21059 {
21060 #ifdef RELOCATABLE_NEEDS_FIXUP
21061 /* Special handling for SI values. */
21062 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21063 {
21064 static int recurse = 0;
21065
21066 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21067 the .fixup section. Since the TOC section is already relocated, we
21068 don't need to mark it here. We used to skip the text section, but it
21069 should never be valid for relocated addresses to be placed in the text
21070 section. */
21071 if (DEFAULT_ABI == ABI_V4
21072 && (TARGET_RELOCATABLE || flag_pic > 1)
21073 && in_section != toc_section
21074 && !recurse
21075 && !CONST_SCALAR_INT_P (x)
21076 && CONSTANT_P (x))
21077 {
21078 char buf[256];
21079
21080 recurse = 1;
21081 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21082 fixuplabelno++;
21083 ASM_OUTPUT_LABEL (asm_out_file, buf);
21084 fprintf (asm_out_file, "\t.long\t(");
21085 output_addr_const (asm_out_file, x);
21086 fprintf (asm_out_file, ")@fixup\n");
21087 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21088 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21089 fprintf (asm_out_file, "\t.long\t");
21090 assemble_name (asm_out_file, buf);
21091 fprintf (asm_out_file, "\n\t.previous\n");
21092 recurse = 0;
21093 return true;
21094 }
21095 /* Remove initial .'s to turn a -mcall-aixdesc function
21096 address into the address of the descriptor, not the function
21097 itself. */
21098 else if (SYMBOL_REF_P (x)
21099 && XSTR (x, 0)[0] == '.'
21100 && DEFAULT_ABI == ABI_AIX)
21101 {
21102 const char *name = XSTR (x, 0);
21103 while (*name == '.')
21104 name++;
21105
21106 fprintf (asm_out_file, "\t.long\t%s\n", name);
21107 return true;
21108 }
21109 }
21110 #endif /* RELOCATABLE_NEEDS_FIXUP */
21111 return default_assemble_integer (x, size, aligned_p);
21112 }
21113
21114 /* Return a template string for assembly to emit when making an
21115 external call. FUNOP is the call mem argument operand number. */
21116
21117 static const char *
21118 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21119 {
21120 /* -Wformat-overflow workaround, without which gcc thinks that %u
21121 might produce 10 digits. */
21122 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21123
21124 char arg[12];
21125 arg[0] = 0;
21126 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21127 {
21128 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21129 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21130 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21131 sprintf (arg, "(%%&@tlsld)");
21132 else
21133 gcc_unreachable ();
21134 }
21135
21136 /* The magic 32768 offset here corresponds to the offset of
21137 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21138 char z[11];
21139 sprintf (z, "%%z%u%s", funop,
21140 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21141 ? "+32768" : ""));
21142
21143 static char str[32]; /* 1 spare */
21144 if (rs6000_pcrel_p (cfun))
21145 sprintf (str, "b%s %s@notoc%s", sibcall ? "" : "l", z, arg);
21146 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21147 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21148 sibcall ? "" : "\n\tnop");
21149 else if (DEFAULT_ABI == ABI_V4)
21150 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21151 flag_pic ? "@plt" : "");
21152 #if TARGET_MACHO
21153 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21154 else if (DEFAULT_ABI == ABI_DARWIN)
21155 {
21156 /* The cookie is in operand func+2. */
21157 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21158 int cookie = INTVAL (operands[funop + 2]);
21159 if (cookie & CALL_LONG)
21160 {
21161 tree funname = get_identifier (XSTR (operands[funop], 0));
21162 tree labelname = get_prev_label (funname);
21163 gcc_checking_assert (labelname && !sibcall);
21164
21165 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21166 instruction will reach 'foo', otherwise link as 'bl L42'".
21167 "L42" should be a 'branch island', that will do a far jump to
21168 'foo'. Branch islands are generated in
21169 macho_branch_islands(). */
21170 sprintf (str, "jbsr %%z%u,%.10s", funop,
21171 IDENTIFIER_POINTER (labelname));
21172 }
21173 else
21174 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21175 after the call. */
21176 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21177 }
21178 #endif
21179 else
21180 gcc_unreachable ();
21181 return str;
21182 }
21183
21184 const char *
21185 rs6000_call_template (rtx *operands, unsigned int funop)
21186 {
21187 return rs6000_call_template_1 (operands, funop, false);
21188 }
21189
21190 const char *
21191 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21192 {
21193 return rs6000_call_template_1 (operands, funop, true);
21194 }
21195
21196 /* As above, for indirect calls. */
21197
21198 static const char *
21199 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21200 bool sibcall)
21201 {
21202 /* -Wformat-overflow workaround, without which gcc thinks that %u
21203 might produce 10 digits. Note that -Wformat-overflow will not
21204 currently warn here for str[], so do not rely on a warning to
21205 ensure str[] is correctly sized. */
21206 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21207
21208 /* Currently, funop is either 0 or 1. The maximum string is always
21209 a !speculate 64-bit __tls_get_addr call.
21210
21211 ABI_ELFv2, pcrel:
21212 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21213 . 35 .reloc .,R_PPC64_PLTSEQ_NOTOC,%z1\n\t
21214 . 9 crset 2\n\t
21215 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21216 . 36 .reloc .,R_PPC64_PLTCALL_NOTOC,%z1\n\t
21217 . 8 beq%T1l-
21218 .---
21219 .142
21220
21221 ABI_AIX:
21222 . 9 ld 2,%3\n\t
21223 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21224 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21225 . 9 crset 2\n\t
21226 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21227 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21228 . 10 beq%T1l-\n\t
21229 . 10 ld 2,%4(1)
21230 .---
21231 .151
21232
21233 ABI_ELFv2:
21234 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21235 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21236 . 9 crset 2\n\t
21237 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21238 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21239 . 10 beq%T1l-\n\t
21240 . 10 ld 2,%3(1)
21241 .---
21242 .142
21243
21244 ABI_V4:
21245 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21246 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21247 . 9 crset 2\n\t
21248 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21249 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21250 . 8 beq%T1l-
21251 .---
21252 .141 */
21253 static char str[160]; /* 8 spare */
21254 char *s = str;
21255 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21256
21257 if (DEFAULT_ABI == ABI_AIX)
21258 s += sprintf (s,
21259 "l%s 2,%%%u\n\t",
21260 ptrload, funop + 2);
21261
21262 /* We don't need the extra code to stop indirect call speculation if
21263 calling via LR. */
21264 bool speculate = (TARGET_MACHO
21265 || rs6000_speculate_indirect_jumps
21266 || (REG_P (operands[funop])
21267 && REGNO (operands[funop]) == LR_REGNO));
21268
21269 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21270 {
21271 const char *rel64 = TARGET_64BIT ? "64" : "";
21272 char tls[29];
21273 tls[0] = 0;
21274 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21275 {
21276 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21277 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21278 rel64, funop + 1);
21279 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21280 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21281 rel64);
21282 else
21283 gcc_unreachable ();
21284 }
21285
21286 const char *notoc = rs6000_pcrel_p (cfun) ? "_NOTOC" : "";
21287 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21288 && flag_pic == 2 ? "+32768" : "");
21289 if (!speculate)
21290 {
21291 s += sprintf (s,
21292 "%s.reloc .,R_PPC%s_PLTSEQ%s,%%z%u%s\n\t",
21293 tls, rel64, notoc, funop, addend);
21294 s += sprintf (s, "crset 2\n\t");
21295 }
21296 s += sprintf (s,
21297 "%s.reloc .,R_PPC%s_PLTCALL%s,%%z%u%s\n\t",
21298 tls, rel64, notoc, funop, addend);
21299 }
21300 else if (!speculate)
21301 s += sprintf (s, "crset 2\n\t");
21302
21303 if (rs6000_pcrel_p (cfun))
21304 {
21305 if (speculate)
21306 sprintf (s, "b%%T%ul", funop);
21307 else
21308 sprintf (s, "beq%%T%ul-", funop);
21309 }
21310 else if (DEFAULT_ABI == ABI_AIX)
21311 {
21312 if (speculate)
21313 sprintf (s,
21314 "b%%T%ul\n\t"
21315 "l%s 2,%%%u(1)",
21316 funop, ptrload, funop + 3);
21317 else
21318 sprintf (s,
21319 "beq%%T%ul-\n\t"
21320 "l%s 2,%%%u(1)",
21321 funop, ptrload, funop + 3);
21322 }
21323 else if (DEFAULT_ABI == ABI_ELFv2)
21324 {
21325 if (speculate)
21326 sprintf (s,
21327 "b%%T%ul\n\t"
21328 "l%s 2,%%%u(1)",
21329 funop, ptrload, funop + 2);
21330 else
21331 sprintf (s,
21332 "beq%%T%ul-\n\t"
21333 "l%s 2,%%%u(1)",
21334 funop, ptrload, funop + 2);
21335 }
21336 else
21337 {
21338 if (speculate)
21339 sprintf (s,
21340 "b%%T%u%s",
21341 funop, sibcall ? "" : "l");
21342 else
21343 sprintf (s,
21344 "beq%%T%u%s-%s",
21345 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21346 }
21347 return str;
21348 }
21349
21350 const char *
21351 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21352 {
21353 return rs6000_indirect_call_template_1 (operands, funop, false);
21354 }
21355
21356 const char *
21357 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21358 {
21359 return rs6000_indirect_call_template_1 (operands, funop, true);
21360 }
21361
21362 #if HAVE_AS_PLTSEQ
21363 /* Output indirect call insns. WHICH identifies the type of sequence. */
21364 const char *
21365 rs6000_pltseq_template (rtx *operands, int which)
21366 {
21367 const char *rel64 = TARGET_64BIT ? "64" : "";
21368 char tls[30];
21369 tls[0] = 0;
21370 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21371 {
21372 char off = which == RS6000_PLTSEQ_PLT_PCREL34 ? '8' : '4';
21373 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21374 sprintf (tls, ".reloc .-%c,R_PPC%s_TLSGD,%%3\n\t",
21375 off, rel64);
21376 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21377 sprintf (tls, ".reloc .-%c,R_PPC%s_TLSLD,%%&\n\t",
21378 off, rel64);
21379 else
21380 gcc_unreachable ();
21381 }
21382
21383 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21384 static char str[96]; /* 10 spare */
21385 char off = WORDS_BIG_ENDIAN ? '2' : '4';
21386 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21387 && flag_pic == 2 ? "+32768" : "");
21388 switch (which)
21389 {
21390 case RS6000_PLTSEQ_TOCSAVE:
21391 sprintf (str,
21392 "st%s\n\t"
21393 "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2",
21394 TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)",
21395 tls, rel64);
21396 break;
21397 case RS6000_PLTSEQ_PLT16_HA:
21398 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21399 sprintf (str,
21400 "lis %%0,0\n\t"
21401 "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2",
21402 tls, off, rel64);
21403 else
21404 sprintf (str,
21405 "addis %%0,%%1,0\n\t"
21406 "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2%s",
21407 tls, off, rel64, addend);
21408 break;
21409 case RS6000_PLTSEQ_PLT16_LO:
21410 sprintf (str,
21411 "l%s %%0,0(%%1)\n\t"
21412 "%s.reloc .-%c,R_PPC%s_PLT16_LO%s,%%z2%s",
21413 TARGET_64BIT ? "d" : "wz",
21414 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend);
21415 break;
21416 case RS6000_PLTSEQ_MTCTR:
21417 sprintf (str,
21418 "mtctr %%1\n\t"
21419 "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2%s",
21420 tls, rel64, addend);
21421 break;
21422 case RS6000_PLTSEQ_PLT_PCREL34:
21423 sprintf (str,
21424 "pl%s %%0,0(0),1\n\t"
21425 "%s.reloc .-8,R_PPC%s_PLT_PCREL34_NOTOC,%%z2",
21426 TARGET_64BIT ? "d" : "wz",
21427 tls, rel64);
21428 break;
21429 default:
21430 gcc_unreachable ();
21431 }
21432 return str;
21433 }
21434 #endif
21435
21436 /* Helper function to return whether a MODE can do prefixed loads/stores.
21437 VOIDmode is used when we are loading the pc-relative address into a base
21438 register, but we are not using it as part of a memory operation. As modes
21439 add support for prefixed memory, they will be added here. */
21440
21441 static bool
21442 mode_supports_prefixed_address_p (machine_mode mode)
21443 {
21444 return mode == VOIDmode;
21445 }
21446
21447 /* Function to return true if ADDR is a valid prefixed memory address that uses
21448 mode MODE. */
21449
21450 bool
21451 rs6000_prefixed_address (rtx addr, machine_mode mode)
21452 {
21453 if (!TARGET_PREFIXED_ADDR || !mode_supports_prefixed_address_p (mode))
21454 return false;
21455
21456 /* Check for PC-relative addresses. */
21457 if (pcrel_address (addr, Pmode))
21458 return true;
21459
21460 /* Check for prefixed memory addresses that have a large numeric offset,
21461 or an offset that can't be used for a DS/DQ-form memory operation. */
21462 if (GET_CODE (addr) == PLUS)
21463 {
21464 rtx op0 = XEXP (addr, 0);
21465 rtx op1 = XEXP (addr, 1);
21466
21467 if (!base_reg_operand (op0, Pmode) || !CONST_INT_P (op1))
21468 return false;
21469
21470 HOST_WIDE_INT value = INTVAL (op1);
21471 if (!SIGNED_34BIT_OFFSET_P (value, 0))
21472 return false;
21473
21474 /* Offset larger than 16-bits? */
21475 if (!SIGNED_16BIT_OFFSET_P (value, 0))
21476 return true;
21477
21478 /* DQ instruction (bottom 4 bits must be 0) for vectors. */
21479 HOST_WIDE_INT mask;
21480 if (GET_MODE_SIZE (mode) >= 16)
21481 mask = 15;
21482
21483 /* DS instruction (bottom 2 bits must be 0). For 32-bit integers, we
21484 need to use DS instructions if we are sign-extending the value with
21485 LWA. For 32-bit floating point, we need DS instructions to load and
21486 store values to the traditional Altivec registers. */
21487 else if (GET_MODE_SIZE (mode) >= 4)
21488 mask = 3;
21489
21490 /* QImode/HImode has no restrictions. */
21491 else
21492 return true;
21493
21494 /* Return true if we must use a prefixed instruction. */
21495 return (value & mask) != 0;
21496 }
21497
21498 return false;
21499 }
21500 \f
21501 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21502 /* Emit an assembler directive to set symbol visibility for DECL to
21503 VISIBILITY_TYPE. */
21504
21505 static void
21506 rs6000_assemble_visibility (tree decl, int vis)
21507 {
21508 if (TARGET_XCOFF)
21509 return;
21510
21511 /* Functions need to have their entry point symbol visibility set as
21512 well as their descriptor symbol visibility. */
21513 if (DEFAULT_ABI == ABI_AIX
21514 && DOT_SYMBOLS
21515 && TREE_CODE (decl) == FUNCTION_DECL)
21516 {
21517 static const char * const visibility_types[] = {
21518 NULL, "protected", "hidden", "internal"
21519 };
21520
21521 const char *name, *type;
21522
21523 name = ((* targetm.strip_name_encoding)
21524 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21525 type = visibility_types[vis];
21526
21527 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21528 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21529 }
21530 else
21531 default_assemble_visibility (decl, vis);
21532 }
21533 #endif
21534 \f
21535 enum rtx_code
21536 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21537 {
21538 /* Reversal of FP compares takes care -- an ordered compare
21539 becomes an unordered compare and vice versa. */
21540 if (mode == CCFPmode
21541 && (!flag_finite_math_only
21542 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21543 || code == UNEQ || code == LTGT))
21544 return reverse_condition_maybe_unordered (code);
21545 else
21546 return reverse_condition (code);
21547 }
21548
21549 /* Generate a compare for CODE. Return a brand-new rtx that
21550 represents the result of the compare. */
21551
21552 static rtx
21553 rs6000_generate_compare (rtx cmp, machine_mode mode)
21554 {
21555 machine_mode comp_mode;
21556 rtx compare_result;
21557 enum rtx_code code = GET_CODE (cmp);
21558 rtx op0 = XEXP (cmp, 0);
21559 rtx op1 = XEXP (cmp, 1);
21560
21561 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21562 comp_mode = CCmode;
21563 else if (FLOAT_MODE_P (mode))
21564 comp_mode = CCFPmode;
21565 else if (code == GTU || code == LTU
21566 || code == GEU || code == LEU)
21567 comp_mode = CCUNSmode;
21568 else if ((code == EQ || code == NE)
21569 && unsigned_reg_p (op0)
21570 && (unsigned_reg_p (op1)
21571 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21572 /* These are unsigned values, perhaps there will be a later
21573 ordering compare that can be shared with this one. */
21574 comp_mode = CCUNSmode;
21575 else
21576 comp_mode = CCmode;
21577
21578 /* If we have an unsigned compare, make sure we don't have a signed value as
21579 an immediate. */
21580 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21581 && INTVAL (op1) < 0)
21582 {
21583 op0 = copy_rtx_if_shared (op0);
21584 op1 = force_reg (GET_MODE (op0), op1);
21585 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21586 }
21587
21588 /* First, the compare. */
21589 compare_result = gen_reg_rtx (comp_mode);
21590
21591 /* IEEE 128-bit support in VSX registers when we do not have hardware
21592 support. */
21593 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21594 {
21595 rtx libfunc = NULL_RTX;
21596 bool check_nan = false;
21597 rtx dest;
21598
21599 switch (code)
21600 {
21601 case EQ:
21602 case NE:
21603 libfunc = optab_libfunc (eq_optab, mode);
21604 break;
21605
21606 case GT:
21607 case GE:
21608 libfunc = optab_libfunc (ge_optab, mode);
21609 break;
21610
21611 case LT:
21612 case LE:
21613 libfunc = optab_libfunc (le_optab, mode);
21614 break;
21615
21616 case UNORDERED:
21617 case ORDERED:
21618 libfunc = optab_libfunc (unord_optab, mode);
21619 code = (code == UNORDERED) ? NE : EQ;
21620 break;
21621
21622 case UNGE:
21623 case UNGT:
21624 check_nan = true;
21625 libfunc = optab_libfunc (ge_optab, mode);
21626 code = (code == UNGE) ? GE : GT;
21627 break;
21628
21629 case UNLE:
21630 case UNLT:
21631 check_nan = true;
21632 libfunc = optab_libfunc (le_optab, mode);
21633 code = (code == UNLE) ? LE : LT;
21634 break;
21635
21636 case UNEQ:
21637 case LTGT:
21638 check_nan = true;
21639 libfunc = optab_libfunc (eq_optab, mode);
21640 code = (code = UNEQ) ? EQ : NE;
21641 break;
21642
21643 default:
21644 gcc_unreachable ();
21645 }
21646
21647 gcc_assert (libfunc);
21648
21649 if (!check_nan)
21650 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21651 SImode, op0, mode, op1, mode);
21652
21653 /* The library signals an exception for signalling NaNs, so we need to
21654 handle isgreater, etc. by first checking isordered. */
21655 else
21656 {
21657 rtx ne_rtx, normal_dest, unord_dest;
21658 rtx unord_func = optab_libfunc (unord_optab, mode);
21659 rtx join_label = gen_label_rtx ();
21660 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21661 rtx unord_cmp = gen_reg_rtx (comp_mode);
21662
21663
21664 /* Test for either value being a NaN. */
21665 gcc_assert (unord_func);
21666 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21667 SImode, op0, mode, op1, mode);
21668
21669 /* Set value (0) if either value is a NaN, and jump to the join
21670 label. */
21671 dest = gen_reg_rtx (SImode);
21672 emit_move_insn (dest, const1_rtx);
21673 emit_insn (gen_rtx_SET (unord_cmp,
21674 gen_rtx_COMPARE (comp_mode, unord_dest,
21675 const0_rtx)));
21676
21677 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21678 emit_jump_insn (gen_rtx_SET (pc_rtx,
21679 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21680 join_ref,
21681 pc_rtx)));
21682
21683 /* Do the normal comparison, knowing that the values are not
21684 NaNs. */
21685 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21686 SImode, op0, mode, op1, mode);
21687
21688 emit_insn (gen_cstoresi4 (dest,
21689 gen_rtx_fmt_ee (code, SImode, normal_dest,
21690 const0_rtx),
21691 normal_dest, const0_rtx));
21692
21693 /* Join NaN and non-Nan paths. Compare dest against 0. */
21694 emit_label (join_label);
21695 code = NE;
21696 }
21697
21698 emit_insn (gen_rtx_SET (compare_result,
21699 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21700 }
21701
21702 else
21703 {
21704 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21705 CLOBBERs to match cmptf_internal2 pattern. */
21706 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21707 && FLOAT128_IBM_P (GET_MODE (op0))
21708 && TARGET_HARD_FLOAT)
21709 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21710 gen_rtvec (10,
21711 gen_rtx_SET (compare_result,
21712 gen_rtx_COMPARE (comp_mode, op0, op1)),
21713 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21714 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21715 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21716 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21717 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21718 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21719 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21720 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21721 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21722 else if (GET_CODE (op1) == UNSPEC
21723 && XINT (op1, 1) == UNSPEC_SP_TEST)
21724 {
21725 rtx op1b = XVECEXP (op1, 0, 0);
21726 comp_mode = CCEQmode;
21727 compare_result = gen_reg_rtx (CCEQmode);
21728 if (TARGET_64BIT)
21729 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21730 else
21731 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21732 }
21733 else
21734 emit_insn (gen_rtx_SET (compare_result,
21735 gen_rtx_COMPARE (comp_mode, op0, op1)));
21736 }
21737
21738 /* Some kinds of FP comparisons need an OR operation;
21739 under flag_finite_math_only we don't bother. */
21740 if (FLOAT_MODE_P (mode)
21741 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21742 && !flag_finite_math_only
21743 && (code == LE || code == GE
21744 || code == UNEQ || code == LTGT
21745 || code == UNGT || code == UNLT))
21746 {
21747 enum rtx_code or1, or2;
21748 rtx or1_rtx, or2_rtx, compare2_rtx;
21749 rtx or_result = gen_reg_rtx (CCEQmode);
21750
21751 switch (code)
21752 {
21753 case LE: or1 = LT; or2 = EQ; break;
21754 case GE: or1 = GT; or2 = EQ; break;
21755 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21756 case LTGT: or1 = LT; or2 = GT; break;
21757 case UNGT: or1 = UNORDERED; or2 = GT; break;
21758 case UNLT: or1 = UNORDERED; or2 = LT; break;
21759 default: gcc_unreachable ();
21760 }
21761 validate_condition_mode (or1, comp_mode);
21762 validate_condition_mode (or2, comp_mode);
21763 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21764 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21765 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21766 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21767 const_true_rtx);
21768 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21769
21770 compare_result = or_result;
21771 code = EQ;
21772 }
21773
21774 validate_condition_mode (code, GET_MODE (compare_result));
21775
21776 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21777 }
21778
21779 \f
21780 /* Return the diagnostic message string if the binary operation OP is
21781 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21782
21783 static const char*
21784 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21785 const_tree type1,
21786 const_tree type2)
21787 {
21788 machine_mode mode1 = TYPE_MODE (type1);
21789 machine_mode mode2 = TYPE_MODE (type2);
21790
21791 /* For complex modes, use the inner type. */
21792 if (COMPLEX_MODE_P (mode1))
21793 mode1 = GET_MODE_INNER (mode1);
21794
21795 if (COMPLEX_MODE_P (mode2))
21796 mode2 = GET_MODE_INNER (mode2);
21797
21798 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21799 double to intermix unless -mfloat128-convert. */
21800 if (mode1 == mode2)
21801 return NULL;
21802
21803 if (!TARGET_FLOAT128_CVT)
21804 {
21805 if ((mode1 == KFmode && mode2 == IFmode)
21806 || (mode1 == IFmode && mode2 == KFmode))
21807 return N_("__float128 and __ibm128 cannot be used in the same "
21808 "expression");
21809
21810 if (TARGET_IEEEQUAD
21811 && ((mode1 == IFmode && mode2 == TFmode)
21812 || (mode1 == TFmode && mode2 == IFmode)))
21813 return N_("__ibm128 and long double cannot be used in the same "
21814 "expression");
21815
21816 if (!TARGET_IEEEQUAD
21817 && ((mode1 == KFmode && mode2 == TFmode)
21818 || (mode1 == TFmode && mode2 == KFmode)))
21819 return N_("__float128 and long double cannot be used in the same "
21820 "expression");
21821 }
21822
21823 return NULL;
21824 }
21825
21826 \f
21827 /* Expand floating point conversion to/from __float128 and __ibm128. */
21828
21829 void
21830 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21831 {
21832 machine_mode dest_mode = GET_MODE (dest);
21833 machine_mode src_mode = GET_MODE (src);
21834 convert_optab cvt = unknown_optab;
21835 bool do_move = false;
21836 rtx libfunc = NULL_RTX;
21837 rtx dest2;
21838 typedef rtx (*rtx_2func_t) (rtx, rtx);
21839 rtx_2func_t hw_convert = (rtx_2func_t)0;
21840 size_t kf_or_tf;
21841
21842 struct hw_conv_t {
21843 rtx_2func_t from_df;
21844 rtx_2func_t from_sf;
21845 rtx_2func_t from_si_sign;
21846 rtx_2func_t from_si_uns;
21847 rtx_2func_t from_di_sign;
21848 rtx_2func_t from_di_uns;
21849 rtx_2func_t to_df;
21850 rtx_2func_t to_sf;
21851 rtx_2func_t to_si_sign;
21852 rtx_2func_t to_si_uns;
21853 rtx_2func_t to_di_sign;
21854 rtx_2func_t to_di_uns;
21855 } hw_conversions[2] = {
21856 /* convertions to/from KFmode */
21857 {
21858 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21859 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21860 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21861 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21862 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21863 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21864 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21865 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21866 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21867 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21868 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21869 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21870 },
21871
21872 /* convertions to/from TFmode */
21873 {
21874 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21875 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21876 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21877 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21878 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21879 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21880 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21881 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21882 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21883 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21884 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21885 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21886 },
21887 };
21888
21889 if (dest_mode == src_mode)
21890 gcc_unreachable ();
21891
21892 /* Eliminate memory operations. */
21893 if (MEM_P (src))
21894 src = force_reg (src_mode, src);
21895
21896 if (MEM_P (dest))
21897 {
21898 rtx tmp = gen_reg_rtx (dest_mode);
21899 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21900 rs6000_emit_move (dest, tmp, dest_mode);
21901 return;
21902 }
21903
21904 /* Convert to IEEE 128-bit floating point. */
21905 if (FLOAT128_IEEE_P (dest_mode))
21906 {
21907 if (dest_mode == KFmode)
21908 kf_or_tf = 0;
21909 else if (dest_mode == TFmode)
21910 kf_or_tf = 1;
21911 else
21912 gcc_unreachable ();
21913
21914 switch (src_mode)
21915 {
21916 case E_DFmode:
21917 cvt = sext_optab;
21918 hw_convert = hw_conversions[kf_or_tf].from_df;
21919 break;
21920
21921 case E_SFmode:
21922 cvt = sext_optab;
21923 hw_convert = hw_conversions[kf_or_tf].from_sf;
21924 break;
21925
21926 case E_KFmode:
21927 case E_IFmode:
21928 case E_TFmode:
21929 if (FLOAT128_IBM_P (src_mode))
21930 cvt = sext_optab;
21931 else
21932 do_move = true;
21933 break;
21934
21935 case E_SImode:
21936 if (unsigned_p)
21937 {
21938 cvt = ufloat_optab;
21939 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21940 }
21941 else
21942 {
21943 cvt = sfloat_optab;
21944 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21945 }
21946 break;
21947
21948 case E_DImode:
21949 if (unsigned_p)
21950 {
21951 cvt = ufloat_optab;
21952 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21953 }
21954 else
21955 {
21956 cvt = sfloat_optab;
21957 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21958 }
21959 break;
21960
21961 default:
21962 gcc_unreachable ();
21963 }
21964 }
21965
21966 /* Convert from IEEE 128-bit floating point. */
21967 else if (FLOAT128_IEEE_P (src_mode))
21968 {
21969 if (src_mode == KFmode)
21970 kf_or_tf = 0;
21971 else if (src_mode == TFmode)
21972 kf_or_tf = 1;
21973 else
21974 gcc_unreachable ();
21975
21976 switch (dest_mode)
21977 {
21978 case E_DFmode:
21979 cvt = trunc_optab;
21980 hw_convert = hw_conversions[kf_or_tf].to_df;
21981 break;
21982
21983 case E_SFmode:
21984 cvt = trunc_optab;
21985 hw_convert = hw_conversions[kf_or_tf].to_sf;
21986 break;
21987
21988 case E_KFmode:
21989 case E_IFmode:
21990 case E_TFmode:
21991 if (FLOAT128_IBM_P (dest_mode))
21992 cvt = trunc_optab;
21993 else
21994 do_move = true;
21995 break;
21996
21997 case E_SImode:
21998 if (unsigned_p)
21999 {
22000 cvt = ufix_optab;
22001 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22002 }
22003 else
22004 {
22005 cvt = sfix_optab;
22006 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22007 }
22008 break;
22009
22010 case E_DImode:
22011 if (unsigned_p)
22012 {
22013 cvt = ufix_optab;
22014 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22015 }
22016 else
22017 {
22018 cvt = sfix_optab;
22019 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22020 }
22021 break;
22022
22023 default:
22024 gcc_unreachable ();
22025 }
22026 }
22027
22028 /* Both IBM format. */
22029 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22030 do_move = true;
22031
22032 else
22033 gcc_unreachable ();
22034
22035 /* Handle conversion between TFmode/KFmode/IFmode. */
22036 if (do_move)
22037 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22038
22039 /* Handle conversion if we have hardware support. */
22040 else if (TARGET_FLOAT128_HW && hw_convert)
22041 emit_insn ((hw_convert) (dest, src));
22042
22043 /* Call an external function to do the conversion. */
22044 else if (cvt != unknown_optab)
22045 {
22046 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22047 gcc_assert (libfunc != NULL_RTX);
22048
22049 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22050 src, src_mode);
22051
22052 gcc_assert (dest2 != NULL_RTX);
22053 if (!rtx_equal_p (dest, dest2))
22054 emit_move_insn (dest, dest2);
22055 }
22056
22057 else
22058 gcc_unreachable ();
22059
22060 return;
22061 }
22062
22063 \f
22064 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22065 can be used as that dest register. Return the dest register. */
22066
22067 rtx
22068 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22069 {
22070 if (op2 == const0_rtx)
22071 return op1;
22072
22073 if (GET_CODE (scratch) == SCRATCH)
22074 scratch = gen_reg_rtx (mode);
22075
22076 if (logical_operand (op2, mode))
22077 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22078 else
22079 emit_insn (gen_rtx_SET (scratch,
22080 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22081
22082 return scratch;
22083 }
22084
22085 void
22086 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22087 {
22088 rtx condition_rtx;
22089 machine_mode op_mode;
22090 enum rtx_code cond_code;
22091 rtx result = operands[0];
22092
22093 condition_rtx = rs6000_generate_compare (operands[1], mode);
22094 cond_code = GET_CODE (condition_rtx);
22095
22096 if (cond_code == NE
22097 || cond_code == GE || cond_code == LE
22098 || cond_code == GEU || cond_code == LEU
22099 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22100 {
22101 rtx not_result = gen_reg_rtx (CCEQmode);
22102 rtx not_op, rev_cond_rtx;
22103 machine_mode cc_mode;
22104
22105 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22106
22107 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22108 SImode, XEXP (condition_rtx, 0), const0_rtx);
22109 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22110 emit_insn (gen_rtx_SET (not_result, not_op));
22111 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22112 }
22113
22114 op_mode = GET_MODE (XEXP (operands[1], 0));
22115 if (op_mode == VOIDmode)
22116 op_mode = GET_MODE (XEXP (operands[1], 1));
22117
22118 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22119 {
22120 PUT_MODE (condition_rtx, DImode);
22121 convert_move (result, condition_rtx, 0);
22122 }
22123 else
22124 {
22125 PUT_MODE (condition_rtx, SImode);
22126 emit_insn (gen_rtx_SET (result, condition_rtx));
22127 }
22128 }
22129
22130 /* Emit a branch of kind CODE to location LOC. */
22131
22132 void
22133 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22134 {
22135 rtx condition_rtx, loc_ref;
22136
22137 condition_rtx = rs6000_generate_compare (operands[0], mode);
22138 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22139 emit_jump_insn (gen_rtx_SET (pc_rtx,
22140 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22141 loc_ref, pc_rtx)));
22142 }
22143
22144 /* Return the string to output a conditional branch to LABEL, which is
22145 the operand template of the label, or NULL if the branch is really a
22146 conditional return.
22147
22148 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22149 condition code register and its mode specifies what kind of
22150 comparison we made.
22151
22152 REVERSED is nonzero if we should reverse the sense of the comparison.
22153
22154 INSN is the insn. */
22155
22156 char *
22157 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22158 {
22159 static char string[64];
22160 enum rtx_code code = GET_CODE (op);
22161 rtx cc_reg = XEXP (op, 0);
22162 machine_mode mode = GET_MODE (cc_reg);
22163 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22164 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22165 int really_reversed = reversed ^ need_longbranch;
22166 char *s = string;
22167 const char *ccode;
22168 const char *pred;
22169 rtx note;
22170
22171 validate_condition_mode (code, mode);
22172
22173 /* Work out which way this really branches. We could use
22174 reverse_condition_maybe_unordered here always but this
22175 makes the resulting assembler clearer. */
22176 if (really_reversed)
22177 {
22178 /* Reversal of FP compares takes care -- an ordered compare
22179 becomes an unordered compare and vice versa. */
22180 if (mode == CCFPmode)
22181 code = reverse_condition_maybe_unordered (code);
22182 else
22183 code = reverse_condition (code);
22184 }
22185
22186 switch (code)
22187 {
22188 /* Not all of these are actually distinct opcodes, but
22189 we distinguish them for clarity of the resulting assembler. */
22190 case NE: case LTGT:
22191 ccode = "ne"; break;
22192 case EQ: case UNEQ:
22193 ccode = "eq"; break;
22194 case GE: case GEU:
22195 ccode = "ge"; break;
22196 case GT: case GTU: case UNGT:
22197 ccode = "gt"; break;
22198 case LE: case LEU:
22199 ccode = "le"; break;
22200 case LT: case LTU: case UNLT:
22201 ccode = "lt"; break;
22202 case UNORDERED: ccode = "un"; break;
22203 case ORDERED: ccode = "nu"; break;
22204 case UNGE: ccode = "nl"; break;
22205 case UNLE: ccode = "ng"; break;
22206 default:
22207 gcc_unreachable ();
22208 }
22209
22210 /* Maybe we have a guess as to how likely the branch is. */
22211 pred = "";
22212 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22213 if (note != NULL_RTX)
22214 {
22215 /* PROB is the difference from 50%. */
22216 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22217 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22218
22219 /* Only hint for highly probable/improbable branches on newer cpus when
22220 we have real profile data, as static prediction overrides processor
22221 dynamic prediction. For older cpus we may as well always hint, but
22222 assume not taken for branches that are very close to 50% as a
22223 mispredicted taken branch is more expensive than a
22224 mispredicted not-taken branch. */
22225 if (rs6000_always_hint
22226 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22227 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22228 && br_prob_note_reliable_p (note)))
22229 {
22230 if (abs (prob) > REG_BR_PROB_BASE / 20
22231 && ((prob > 0) ^ need_longbranch))
22232 pred = "+";
22233 else
22234 pred = "-";
22235 }
22236 }
22237
22238 if (label == NULL)
22239 s += sprintf (s, "b%slr%s ", ccode, pred);
22240 else
22241 s += sprintf (s, "b%s%s ", ccode, pred);
22242
22243 /* We need to escape any '%' characters in the reg_names string.
22244 Assume they'd only be the first character.... */
22245 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22246 *s++ = '%';
22247 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22248
22249 if (label != NULL)
22250 {
22251 /* If the branch distance was too far, we may have to use an
22252 unconditional branch to go the distance. */
22253 if (need_longbranch)
22254 s += sprintf (s, ",$+8\n\tb %s", label);
22255 else
22256 s += sprintf (s, ",%s", label);
22257 }
22258
22259 return string;
22260 }
22261
22262 /* Return insn for VSX or Altivec comparisons. */
22263
22264 static rtx
22265 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22266 {
22267 rtx mask;
22268 machine_mode mode = GET_MODE (op0);
22269
22270 switch (code)
22271 {
22272 default:
22273 break;
22274
22275 case GE:
22276 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22277 return NULL_RTX;
22278 /* FALLTHRU */
22279
22280 case EQ:
22281 case GT:
22282 case GTU:
22283 case ORDERED:
22284 case UNORDERED:
22285 case UNEQ:
22286 case LTGT:
22287 mask = gen_reg_rtx (mode);
22288 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22289 return mask;
22290 }
22291
22292 return NULL_RTX;
22293 }
22294
22295 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22296 DMODE is expected destination mode. This is a recursive function. */
22297
22298 static rtx
22299 rs6000_emit_vector_compare (enum rtx_code rcode,
22300 rtx op0, rtx op1,
22301 machine_mode dmode)
22302 {
22303 rtx mask;
22304 bool swap_operands = false;
22305 bool try_again = false;
22306
22307 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22308 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22309
22310 /* See if the comparison works as is. */
22311 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22312 if (mask)
22313 return mask;
22314
22315 switch (rcode)
22316 {
22317 case LT:
22318 rcode = GT;
22319 swap_operands = true;
22320 try_again = true;
22321 break;
22322 case LTU:
22323 rcode = GTU;
22324 swap_operands = true;
22325 try_again = true;
22326 break;
22327 case NE:
22328 case UNLE:
22329 case UNLT:
22330 case UNGE:
22331 case UNGT:
22332 /* Invert condition and try again.
22333 e.g., A != B becomes ~(A==B). */
22334 {
22335 enum rtx_code rev_code;
22336 enum insn_code nor_code;
22337 rtx mask2;
22338
22339 rev_code = reverse_condition_maybe_unordered (rcode);
22340 if (rev_code == UNKNOWN)
22341 return NULL_RTX;
22342
22343 nor_code = optab_handler (one_cmpl_optab, dmode);
22344 if (nor_code == CODE_FOR_nothing)
22345 return NULL_RTX;
22346
22347 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22348 if (!mask2)
22349 return NULL_RTX;
22350
22351 mask = gen_reg_rtx (dmode);
22352 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22353 return mask;
22354 }
22355 break;
22356 case GE:
22357 case GEU:
22358 case LE:
22359 case LEU:
22360 /* Try GT/GTU/LT/LTU OR EQ */
22361 {
22362 rtx c_rtx, eq_rtx;
22363 enum insn_code ior_code;
22364 enum rtx_code new_code;
22365
22366 switch (rcode)
22367 {
22368 case GE:
22369 new_code = GT;
22370 break;
22371
22372 case GEU:
22373 new_code = GTU;
22374 break;
22375
22376 case LE:
22377 new_code = LT;
22378 break;
22379
22380 case LEU:
22381 new_code = LTU;
22382 break;
22383
22384 default:
22385 gcc_unreachable ();
22386 }
22387
22388 ior_code = optab_handler (ior_optab, dmode);
22389 if (ior_code == CODE_FOR_nothing)
22390 return NULL_RTX;
22391
22392 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22393 if (!c_rtx)
22394 return NULL_RTX;
22395
22396 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22397 if (!eq_rtx)
22398 return NULL_RTX;
22399
22400 mask = gen_reg_rtx (dmode);
22401 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22402 return mask;
22403 }
22404 break;
22405 default:
22406 return NULL_RTX;
22407 }
22408
22409 if (try_again)
22410 {
22411 if (swap_operands)
22412 std::swap (op0, op1);
22413
22414 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22415 if (mask)
22416 return mask;
22417 }
22418
22419 /* You only get two chances. */
22420 return NULL_RTX;
22421 }
22422
22423 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22424 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22425 operands for the relation operation COND. */
22426
22427 int
22428 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22429 rtx cond, rtx cc_op0, rtx cc_op1)
22430 {
22431 machine_mode dest_mode = GET_MODE (dest);
22432 machine_mode mask_mode = GET_MODE (cc_op0);
22433 enum rtx_code rcode = GET_CODE (cond);
22434 machine_mode cc_mode = CCmode;
22435 rtx mask;
22436 rtx cond2;
22437 bool invert_move = false;
22438
22439 if (VECTOR_UNIT_NONE_P (dest_mode))
22440 return 0;
22441
22442 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22443 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22444
22445 switch (rcode)
22446 {
22447 /* Swap operands if we can, and fall back to doing the operation as
22448 specified, and doing a NOR to invert the test. */
22449 case NE:
22450 case UNLE:
22451 case UNLT:
22452 case UNGE:
22453 case UNGT:
22454 /* Invert condition and try again.
22455 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22456 invert_move = true;
22457 rcode = reverse_condition_maybe_unordered (rcode);
22458 if (rcode == UNKNOWN)
22459 return 0;
22460 break;
22461
22462 case GE:
22463 case LE:
22464 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22465 {
22466 /* Invert condition to avoid compound test. */
22467 invert_move = true;
22468 rcode = reverse_condition (rcode);
22469 }
22470 break;
22471
22472 case GTU:
22473 case GEU:
22474 case LTU:
22475 case LEU:
22476 /* Mark unsigned tests with CCUNSmode. */
22477 cc_mode = CCUNSmode;
22478
22479 /* Invert condition to avoid compound test if necessary. */
22480 if (rcode == GEU || rcode == LEU)
22481 {
22482 invert_move = true;
22483 rcode = reverse_condition (rcode);
22484 }
22485 break;
22486
22487 default:
22488 break;
22489 }
22490
22491 /* Get the vector mask for the given relational operations. */
22492 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22493
22494 if (!mask)
22495 return 0;
22496
22497 if (invert_move)
22498 std::swap (op_true, op_false);
22499
22500 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22501 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22502 && (GET_CODE (op_true) == CONST_VECTOR
22503 || GET_CODE (op_false) == CONST_VECTOR))
22504 {
22505 rtx constant_0 = CONST0_RTX (dest_mode);
22506 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22507
22508 if (op_true == constant_m1 && op_false == constant_0)
22509 {
22510 emit_move_insn (dest, mask);
22511 return 1;
22512 }
22513
22514 else if (op_true == constant_0 && op_false == constant_m1)
22515 {
22516 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22517 return 1;
22518 }
22519
22520 /* If we can't use the vector comparison directly, perhaps we can use
22521 the mask for the true or false fields, instead of loading up a
22522 constant. */
22523 if (op_true == constant_m1)
22524 op_true = mask;
22525
22526 if (op_false == constant_0)
22527 op_false = mask;
22528 }
22529
22530 if (!REG_P (op_true) && !SUBREG_P (op_true))
22531 op_true = force_reg (dest_mode, op_true);
22532
22533 if (!REG_P (op_false) && !SUBREG_P (op_false))
22534 op_false = force_reg (dest_mode, op_false);
22535
22536 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22537 CONST0_RTX (dest_mode));
22538 emit_insn (gen_rtx_SET (dest,
22539 gen_rtx_IF_THEN_ELSE (dest_mode,
22540 cond2,
22541 op_true,
22542 op_false)));
22543 return 1;
22544 }
22545
22546 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22547 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22548 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22549 hardware has no such operation. */
22550
22551 static int
22552 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22553 {
22554 enum rtx_code code = GET_CODE (op);
22555 rtx op0 = XEXP (op, 0);
22556 rtx op1 = XEXP (op, 1);
22557 machine_mode compare_mode = GET_MODE (op0);
22558 machine_mode result_mode = GET_MODE (dest);
22559 bool max_p = false;
22560
22561 if (result_mode != compare_mode)
22562 return 0;
22563
22564 if (code == GE || code == GT)
22565 max_p = true;
22566 else if (code == LE || code == LT)
22567 max_p = false;
22568 else
22569 return 0;
22570
22571 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22572 ;
22573
22574 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22575 max_p = !max_p;
22576
22577 else
22578 return 0;
22579
22580 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22581 return 1;
22582 }
22583
22584 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22585 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22586 operands of the last comparison is nonzero/true, FALSE_COND if it is
22587 zero/false. Return 0 if the hardware has no such operation. */
22588
22589 static int
22590 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22591 {
22592 enum rtx_code code = GET_CODE (op);
22593 rtx op0 = XEXP (op, 0);
22594 rtx op1 = XEXP (op, 1);
22595 machine_mode result_mode = GET_MODE (dest);
22596 rtx compare_rtx;
22597 rtx cmove_rtx;
22598 rtx clobber_rtx;
22599
22600 if (!can_create_pseudo_p ())
22601 return 0;
22602
22603 switch (code)
22604 {
22605 case EQ:
22606 case GE:
22607 case GT:
22608 break;
22609
22610 case NE:
22611 case LT:
22612 case LE:
22613 code = swap_condition (code);
22614 std::swap (op0, op1);
22615 break;
22616
22617 default:
22618 return 0;
22619 }
22620
22621 /* Generate: [(parallel [(set (dest)
22622 (if_then_else (op (cmp1) (cmp2))
22623 (true)
22624 (false)))
22625 (clobber (scratch))])]. */
22626
22627 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22628 cmove_rtx = gen_rtx_SET (dest,
22629 gen_rtx_IF_THEN_ELSE (result_mode,
22630 compare_rtx,
22631 true_cond,
22632 false_cond));
22633
22634 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22635 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22636 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22637
22638 return 1;
22639 }
22640
22641 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22642 operands of the last comparison is nonzero/true, FALSE_COND if it
22643 is zero/false. Return 0 if the hardware has no such operation. */
22644
22645 int
22646 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22647 {
22648 enum rtx_code code = GET_CODE (op);
22649 rtx op0 = XEXP (op, 0);
22650 rtx op1 = XEXP (op, 1);
22651 machine_mode compare_mode = GET_MODE (op0);
22652 machine_mode result_mode = GET_MODE (dest);
22653 rtx temp;
22654 bool is_against_zero;
22655
22656 /* These modes should always match. */
22657 if (GET_MODE (op1) != compare_mode
22658 /* In the isel case however, we can use a compare immediate, so
22659 op1 may be a small constant. */
22660 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22661 return 0;
22662 if (GET_MODE (true_cond) != result_mode)
22663 return 0;
22664 if (GET_MODE (false_cond) != result_mode)
22665 return 0;
22666
22667 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22668 if (TARGET_P9_MINMAX
22669 && (compare_mode == SFmode || compare_mode == DFmode)
22670 && (result_mode == SFmode || result_mode == DFmode))
22671 {
22672 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22673 return 1;
22674
22675 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22676 return 1;
22677 }
22678
22679 /* Don't allow using floating point comparisons for integer results for
22680 now. */
22681 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22682 return 0;
22683
22684 /* First, work out if the hardware can do this at all, or
22685 if it's too slow.... */
22686 if (!FLOAT_MODE_P (compare_mode))
22687 {
22688 if (TARGET_ISEL)
22689 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22690 return 0;
22691 }
22692
22693 is_against_zero = op1 == CONST0_RTX (compare_mode);
22694
22695 /* A floating-point subtract might overflow, underflow, or produce
22696 an inexact result, thus changing the floating-point flags, so it
22697 can't be generated if we care about that. It's safe if one side
22698 of the construct is zero, since then no subtract will be
22699 generated. */
22700 if (SCALAR_FLOAT_MODE_P (compare_mode)
22701 && flag_trapping_math && ! is_against_zero)
22702 return 0;
22703
22704 /* Eliminate half of the comparisons by switching operands, this
22705 makes the remaining code simpler. */
22706 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22707 || code == LTGT || code == LT || code == UNLE)
22708 {
22709 code = reverse_condition_maybe_unordered (code);
22710 temp = true_cond;
22711 true_cond = false_cond;
22712 false_cond = temp;
22713 }
22714
22715 /* UNEQ and LTGT take four instructions for a comparison with zero,
22716 it'll probably be faster to use a branch here too. */
22717 if (code == UNEQ && HONOR_NANS (compare_mode))
22718 return 0;
22719
22720 /* We're going to try to implement comparisons by performing
22721 a subtract, then comparing against zero. Unfortunately,
22722 Inf - Inf is NaN which is not zero, and so if we don't
22723 know that the operand is finite and the comparison
22724 would treat EQ different to UNORDERED, we can't do it. */
22725 if (HONOR_INFINITIES (compare_mode)
22726 && code != GT && code != UNGE
22727 && (!CONST_DOUBLE_P (op1)
22728 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22729 /* Constructs of the form (a OP b ? a : b) are safe. */
22730 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22731 || (! rtx_equal_p (op0, true_cond)
22732 && ! rtx_equal_p (op1, true_cond))))
22733 return 0;
22734
22735 /* At this point we know we can use fsel. */
22736
22737 /* Reduce the comparison to a comparison against zero. */
22738 if (! is_against_zero)
22739 {
22740 temp = gen_reg_rtx (compare_mode);
22741 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22742 op0 = temp;
22743 op1 = CONST0_RTX (compare_mode);
22744 }
22745
22746 /* If we don't care about NaNs we can reduce some of the comparisons
22747 down to faster ones. */
22748 if (! HONOR_NANS (compare_mode))
22749 switch (code)
22750 {
22751 case GT:
22752 code = LE;
22753 temp = true_cond;
22754 true_cond = false_cond;
22755 false_cond = temp;
22756 break;
22757 case UNGE:
22758 code = GE;
22759 break;
22760 case UNEQ:
22761 code = EQ;
22762 break;
22763 default:
22764 break;
22765 }
22766
22767 /* Now, reduce everything down to a GE. */
22768 switch (code)
22769 {
22770 case GE:
22771 break;
22772
22773 case LE:
22774 temp = gen_reg_rtx (compare_mode);
22775 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22776 op0 = temp;
22777 break;
22778
22779 case ORDERED:
22780 temp = gen_reg_rtx (compare_mode);
22781 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22782 op0 = temp;
22783 break;
22784
22785 case EQ:
22786 temp = gen_reg_rtx (compare_mode);
22787 emit_insn (gen_rtx_SET (temp,
22788 gen_rtx_NEG (compare_mode,
22789 gen_rtx_ABS (compare_mode, op0))));
22790 op0 = temp;
22791 break;
22792
22793 case UNGE:
22794 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22795 temp = gen_reg_rtx (result_mode);
22796 emit_insn (gen_rtx_SET (temp,
22797 gen_rtx_IF_THEN_ELSE (result_mode,
22798 gen_rtx_GE (VOIDmode,
22799 op0, op1),
22800 true_cond, false_cond)));
22801 false_cond = true_cond;
22802 true_cond = temp;
22803
22804 temp = gen_reg_rtx (compare_mode);
22805 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22806 op0 = temp;
22807 break;
22808
22809 case GT:
22810 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22811 temp = gen_reg_rtx (result_mode);
22812 emit_insn (gen_rtx_SET (temp,
22813 gen_rtx_IF_THEN_ELSE (result_mode,
22814 gen_rtx_GE (VOIDmode,
22815 op0, op1),
22816 true_cond, false_cond)));
22817 true_cond = false_cond;
22818 false_cond = temp;
22819
22820 temp = gen_reg_rtx (compare_mode);
22821 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22822 op0 = temp;
22823 break;
22824
22825 default:
22826 gcc_unreachable ();
22827 }
22828
22829 emit_insn (gen_rtx_SET (dest,
22830 gen_rtx_IF_THEN_ELSE (result_mode,
22831 gen_rtx_GE (VOIDmode,
22832 op0, op1),
22833 true_cond, false_cond)));
22834 return 1;
22835 }
22836
22837 /* Same as above, but for ints (isel). */
22838
22839 int
22840 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22841 {
22842 rtx condition_rtx, cr;
22843 machine_mode mode = GET_MODE (dest);
22844 enum rtx_code cond_code;
22845 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22846 bool signedp;
22847
22848 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22849 return 0;
22850
22851 /* We still have to do the compare, because isel doesn't do a
22852 compare, it just looks at the CRx bits set by a previous compare
22853 instruction. */
22854 condition_rtx = rs6000_generate_compare (op, mode);
22855 cond_code = GET_CODE (condition_rtx);
22856 cr = XEXP (condition_rtx, 0);
22857 signedp = GET_MODE (cr) == CCmode;
22858
22859 isel_func = (mode == SImode
22860 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22861 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22862
22863 switch (cond_code)
22864 {
22865 case LT: case GT: case LTU: case GTU: case EQ:
22866 /* isel handles these directly. */
22867 break;
22868
22869 default:
22870 /* We need to swap the sense of the comparison. */
22871 {
22872 std::swap (false_cond, true_cond);
22873 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22874 }
22875 break;
22876 }
22877
22878 false_cond = force_reg (mode, false_cond);
22879 if (true_cond != const0_rtx)
22880 true_cond = force_reg (mode, true_cond);
22881
22882 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22883
22884 return 1;
22885 }
22886
22887 void
22888 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22889 {
22890 machine_mode mode = GET_MODE (op0);
22891 enum rtx_code c;
22892 rtx target;
22893
22894 /* VSX/altivec have direct min/max insns. */
22895 if ((code == SMAX || code == SMIN)
22896 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22897 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22898 {
22899 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22900 return;
22901 }
22902
22903 if (code == SMAX || code == SMIN)
22904 c = GE;
22905 else
22906 c = GEU;
22907
22908 if (code == SMAX || code == UMAX)
22909 target = emit_conditional_move (dest, c, op0, op1, mode,
22910 op0, op1, mode, 0);
22911 else
22912 target = emit_conditional_move (dest, c, op0, op1, mode,
22913 op1, op0, mode, 0);
22914 gcc_assert (target);
22915 if (target != dest)
22916 emit_move_insn (dest, target);
22917 }
22918
22919 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22920 COND is true. Mark the jump as unlikely to be taken. */
22921
22922 static void
22923 emit_unlikely_jump (rtx cond, rtx label)
22924 {
22925 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22926 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22927 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22928 }
22929
22930 /* A subroutine of the atomic operation splitters. Emit a load-locked
22931 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22932 the zero_extend operation. */
22933
22934 static void
22935 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22936 {
22937 rtx (*fn) (rtx, rtx) = NULL;
22938
22939 switch (mode)
22940 {
22941 case E_QImode:
22942 fn = gen_load_lockedqi;
22943 break;
22944 case E_HImode:
22945 fn = gen_load_lockedhi;
22946 break;
22947 case E_SImode:
22948 if (GET_MODE (mem) == QImode)
22949 fn = gen_load_lockedqi_si;
22950 else if (GET_MODE (mem) == HImode)
22951 fn = gen_load_lockedhi_si;
22952 else
22953 fn = gen_load_lockedsi;
22954 break;
22955 case E_DImode:
22956 fn = gen_load_lockeddi;
22957 break;
22958 case E_TImode:
22959 fn = gen_load_lockedti;
22960 break;
22961 default:
22962 gcc_unreachable ();
22963 }
22964 emit_insn (fn (reg, mem));
22965 }
22966
22967 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22968 instruction in MODE. */
22969
22970 static void
22971 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22972 {
22973 rtx (*fn) (rtx, rtx, rtx) = NULL;
22974
22975 switch (mode)
22976 {
22977 case E_QImode:
22978 fn = gen_store_conditionalqi;
22979 break;
22980 case E_HImode:
22981 fn = gen_store_conditionalhi;
22982 break;
22983 case E_SImode:
22984 fn = gen_store_conditionalsi;
22985 break;
22986 case E_DImode:
22987 fn = gen_store_conditionaldi;
22988 break;
22989 case E_TImode:
22990 fn = gen_store_conditionalti;
22991 break;
22992 default:
22993 gcc_unreachable ();
22994 }
22995
22996 /* Emit sync before stwcx. to address PPC405 Erratum. */
22997 if (PPC405_ERRATUM77)
22998 emit_insn (gen_hwsync ());
22999
23000 emit_insn (fn (res, mem, val));
23001 }
23002
23003 /* Expand barriers before and after a load_locked/store_cond sequence. */
23004
23005 static rtx
23006 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23007 {
23008 rtx addr = XEXP (mem, 0);
23009
23010 if (!legitimate_indirect_address_p (addr, reload_completed)
23011 && !legitimate_indexed_address_p (addr, reload_completed))
23012 {
23013 addr = force_reg (Pmode, addr);
23014 mem = replace_equiv_address_nv (mem, addr);
23015 }
23016
23017 switch (model)
23018 {
23019 case MEMMODEL_RELAXED:
23020 case MEMMODEL_CONSUME:
23021 case MEMMODEL_ACQUIRE:
23022 break;
23023 case MEMMODEL_RELEASE:
23024 case MEMMODEL_ACQ_REL:
23025 emit_insn (gen_lwsync ());
23026 break;
23027 case MEMMODEL_SEQ_CST:
23028 emit_insn (gen_hwsync ());
23029 break;
23030 default:
23031 gcc_unreachable ();
23032 }
23033 return mem;
23034 }
23035
23036 static void
23037 rs6000_post_atomic_barrier (enum memmodel model)
23038 {
23039 switch (model)
23040 {
23041 case MEMMODEL_RELAXED:
23042 case MEMMODEL_CONSUME:
23043 case MEMMODEL_RELEASE:
23044 break;
23045 case MEMMODEL_ACQUIRE:
23046 case MEMMODEL_ACQ_REL:
23047 case MEMMODEL_SEQ_CST:
23048 emit_insn (gen_isync ());
23049 break;
23050 default:
23051 gcc_unreachable ();
23052 }
23053 }
23054
23055 /* A subroutine of the various atomic expanders. For sub-word operations,
23056 we must adjust things to operate on SImode. Given the original MEM,
23057 return a new aligned memory. Also build and return the quantities by
23058 which to shift and mask. */
23059
23060 static rtx
23061 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23062 {
23063 rtx addr, align, shift, mask, mem;
23064 HOST_WIDE_INT shift_mask;
23065 machine_mode mode = GET_MODE (orig_mem);
23066
23067 /* For smaller modes, we have to implement this via SImode. */
23068 shift_mask = (mode == QImode ? 0x18 : 0x10);
23069
23070 addr = XEXP (orig_mem, 0);
23071 addr = force_reg (GET_MODE (addr), addr);
23072
23073 /* Aligned memory containing subword. Generate a new memory. We
23074 do not want any of the existing MEM_ATTR data, as we're now
23075 accessing memory outside the original object. */
23076 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23077 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23078 mem = gen_rtx_MEM (SImode, align);
23079 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23080 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23081 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23082
23083 /* Shift amount for subword relative to aligned word. */
23084 shift = gen_reg_rtx (SImode);
23085 addr = gen_lowpart (SImode, addr);
23086 rtx tmp = gen_reg_rtx (SImode);
23087 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23088 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23089 if (BYTES_BIG_ENDIAN)
23090 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23091 shift, 1, OPTAB_LIB_WIDEN);
23092 *pshift = shift;
23093
23094 /* Mask for insertion. */
23095 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23096 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23097 *pmask = mask;
23098
23099 return mem;
23100 }
23101
23102 /* A subroutine of the various atomic expanders. For sub-word operands,
23103 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23104
23105 static rtx
23106 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23107 {
23108 rtx x;
23109
23110 x = gen_reg_rtx (SImode);
23111 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23112 gen_rtx_NOT (SImode, mask),
23113 oldval)));
23114
23115 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23116
23117 return x;
23118 }
23119
23120 /* A subroutine of the various atomic expanders. For sub-word operands,
23121 extract WIDE to NARROW via SHIFT. */
23122
23123 static void
23124 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23125 {
23126 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23127 wide, 1, OPTAB_LIB_WIDEN);
23128 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23129 }
23130
23131 /* Expand an atomic compare and swap operation. */
23132
23133 void
23134 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23135 {
23136 rtx boolval, retval, mem, oldval, newval, cond;
23137 rtx label1, label2, x, mask, shift;
23138 machine_mode mode, orig_mode;
23139 enum memmodel mod_s, mod_f;
23140 bool is_weak;
23141
23142 boolval = operands[0];
23143 retval = operands[1];
23144 mem = operands[2];
23145 oldval = operands[3];
23146 newval = operands[4];
23147 is_weak = (INTVAL (operands[5]) != 0);
23148 mod_s = memmodel_base (INTVAL (operands[6]));
23149 mod_f = memmodel_base (INTVAL (operands[7]));
23150 orig_mode = mode = GET_MODE (mem);
23151
23152 mask = shift = NULL_RTX;
23153 if (mode == QImode || mode == HImode)
23154 {
23155 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23156 lwarx and shift/mask operations. With power8, we need to do the
23157 comparison in SImode, but the store is still done in QI/HImode. */
23158 oldval = convert_modes (SImode, mode, oldval, 1);
23159
23160 if (!TARGET_SYNC_HI_QI)
23161 {
23162 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23163
23164 /* Shift and mask OLDVAL into position with the word. */
23165 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23166 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23167
23168 /* Shift and mask NEWVAL into position within the word. */
23169 newval = convert_modes (SImode, mode, newval, 1);
23170 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23171 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23172 }
23173
23174 /* Prepare to adjust the return value. */
23175 retval = gen_reg_rtx (SImode);
23176 mode = SImode;
23177 }
23178 else if (reg_overlap_mentioned_p (retval, oldval))
23179 oldval = copy_to_reg (oldval);
23180
23181 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23182 oldval = copy_to_mode_reg (mode, oldval);
23183
23184 if (reg_overlap_mentioned_p (retval, newval))
23185 newval = copy_to_reg (newval);
23186
23187 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23188
23189 label1 = NULL_RTX;
23190 if (!is_weak)
23191 {
23192 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23193 emit_label (XEXP (label1, 0));
23194 }
23195 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23196
23197 emit_load_locked (mode, retval, mem);
23198
23199 x = retval;
23200 if (mask)
23201 x = expand_simple_binop (SImode, AND, retval, mask,
23202 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23203
23204 cond = gen_reg_rtx (CCmode);
23205 /* If we have TImode, synthesize a comparison. */
23206 if (mode != TImode)
23207 x = gen_rtx_COMPARE (CCmode, x, oldval);
23208 else
23209 {
23210 rtx xor1_result = gen_reg_rtx (DImode);
23211 rtx xor2_result = gen_reg_rtx (DImode);
23212 rtx or_result = gen_reg_rtx (DImode);
23213 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23214 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23215 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23216 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23217
23218 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23219 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23220 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23221 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23222 }
23223
23224 emit_insn (gen_rtx_SET (cond, x));
23225
23226 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23227 emit_unlikely_jump (x, label2);
23228
23229 x = newval;
23230 if (mask)
23231 x = rs6000_mask_atomic_subword (retval, newval, mask);
23232
23233 emit_store_conditional (orig_mode, cond, mem, x);
23234
23235 if (!is_weak)
23236 {
23237 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23238 emit_unlikely_jump (x, label1);
23239 }
23240
23241 if (!is_mm_relaxed (mod_f))
23242 emit_label (XEXP (label2, 0));
23243
23244 rs6000_post_atomic_barrier (mod_s);
23245
23246 if (is_mm_relaxed (mod_f))
23247 emit_label (XEXP (label2, 0));
23248
23249 if (shift)
23250 rs6000_finish_atomic_subword (operands[1], retval, shift);
23251 else if (mode != GET_MODE (operands[1]))
23252 convert_move (operands[1], retval, 1);
23253
23254 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23255 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23256 emit_insn (gen_rtx_SET (boolval, x));
23257 }
23258
23259 /* Expand an atomic exchange operation. */
23260
23261 void
23262 rs6000_expand_atomic_exchange (rtx operands[])
23263 {
23264 rtx retval, mem, val, cond;
23265 machine_mode mode;
23266 enum memmodel model;
23267 rtx label, x, mask, shift;
23268
23269 retval = operands[0];
23270 mem = operands[1];
23271 val = operands[2];
23272 model = memmodel_base (INTVAL (operands[3]));
23273 mode = GET_MODE (mem);
23274
23275 mask = shift = NULL_RTX;
23276 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23277 {
23278 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23279
23280 /* Shift and mask VAL into position with the word. */
23281 val = convert_modes (SImode, mode, val, 1);
23282 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23283 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23284
23285 /* Prepare to adjust the return value. */
23286 retval = gen_reg_rtx (SImode);
23287 mode = SImode;
23288 }
23289
23290 mem = rs6000_pre_atomic_barrier (mem, model);
23291
23292 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23293 emit_label (XEXP (label, 0));
23294
23295 emit_load_locked (mode, retval, mem);
23296
23297 x = val;
23298 if (mask)
23299 x = rs6000_mask_atomic_subword (retval, val, mask);
23300
23301 cond = gen_reg_rtx (CCmode);
23302 emit_store_conditional (mode, cond, mem, x);
23303
23304 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23305 emit_unlikely_jump (x, label);
23306
23307 rs6000_post_atomic_barrier (model);
23308
23309 if (shift)
23310 rs6000_finish_atomic_subword (operands[0], retval, shift);
23311 }
23312
23313 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23314 to perform. MEM is the memory on which to operate. VAL is the second
23315 operand of the binary operator. BEFORE and AFTER are optional locations to
23316 return the value of MEM either before of after the operation. MODEL_RTX
23317 is a CONST_INT containing the memory model to use. */
23318
23319 void
23320 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23321 rtx orig_before, rtx orig_after, rtx model_rtx)
23322 {
23323 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23324 machine_mode mode = GET_MODE (mem);
23325 machine_mode store_mode = mode;
23326 rtx label, x, cond, mask, shift;
23327 rtx before = orig_before, after = orig_after;
23328
23329 mask = shift = NULL_RTX;
23330 /* On power8, we want to use SImode for the operation. On previous systems,
23331 use the operation in a subword and shift/mask to get the proper byte or
23332 halfword. */
23333 if (mode == QImode || mode == HImode)
23334 {
23335 if (TARGET_SYNC_HI_QI)
23336 {
23337 val = convert_modes (SImode, mode, val, 1);
23338
23339 /* Prepare to adjust the return value. */
23340 before = gen_reg_rtx (SImode);
23341 if (after)
23342 after = gen_reg_rtx (SImode);
23343 mode = SImode;
23344 }
23345 else
23346 {
23347 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23348
23349 /* Shift and mask VAL into position with the word. */
23350 val = convert_modes (SImode, mode, val, 1);
23351 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23352 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23353
23354 switch (code)
23355 {
23356 case IOR:
23357 case XOR:
23358 /* We've already zero-extended VAL. That is sufficient to
23359 make certain that it does not affect other bits. */
23360 mask = NULL;
23361 break;
23362
23363 case AND:
23364 /* If we make certain that all of the other bits in VAL are
23365 set, that will be sufficient to not affect other bits. */
23366 x = gen_rtx_NOT (SImode, mask);
23367 x = gen_rtx_IOR (SImode, x, val);
23368 emit_insn (gen_rtx_SET (val, x));
23369 mask = NULL;
23370 break;
23371
23372 case NOT:
23373 case PLUS:
23374 case MINUS:
23375 /* These will all affect bits outside the field and need
23376 adjustment via MASK within the loop. */
23377 break;
23378
23379 default:
23380 gcc_unreachable ();
23381 }
23382
23383 /* Prepare to adjust the return value. */
23384 before = gen_reg_rtx (SImode);
23385 if (after)
23386 after = gen_reg_rtx (SImode);
23387 store_mode = mode = SImode;
23388 }
23389 }
23390
23391 mem = rs6000_pre_atomic_barrier (mem, model);
23392
23393 label = gen_label_rtx ();
23394 emit_label (label);
23395 label = gen_rtx_LABEL_REF (VOIDmode, label);
23396
23397 if (before == NULL_RTX)
23398 before = gen_reg_rtx (mode);
23399
23400 emit_load_locked (mode, before, mem);
23401
23402 if (code == NOT)
23403 {
23404 x = expand_simple_binop (mode, AND, before, val,
23405 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23406 after = expand_simple_unop (mode, NOT, x, after, 1);
23407 }
23408 else
23409 {
23410 after = expand_simple_binop (mode, code, before, val,
23411 after, 1, OPTAB_LIB_WIDEN);
23412 }
23413
23414 x = after;
23415 if (mask)
23416 {
23417 x = expand_simple_binop (SImode, AND, after, mask,
23418 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23419 x = rs6000_mask_atomic_subword (before, x, mask);
23420 }
23421 else if (store_mode != mode)
23422 x = convert_modes (store_mode, mode, x, 1);
23423
23424 cond = gen_reg_rtx (CCmode);
23425 emit_store_conditional (store_mode, cond, mem, x);
23426
23427 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23428 emit_unlikely_jump (x, label);
23429
23430 rs6000_post_atomic_barrier (model);
23431
23432 if (shift)
23433 {
23434 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23435 then do the calcuations in a SImode register. */
23436 if (orig_before)
23437 rs6000_finish_atomic_subword (orig_before, before, shift);
23438 if (orig_after)
23439 rs6000_finish_atomic_subword (orig_after, after, shift);
23440 }
23441 else if (store_mode != mode)
23442 {
23443 /* QImode/HImode on machines with lbarx/lharx where we do the native
23444 operation and then do the calcuations in a SImode register. */
23445 if (orig_before)
23446 convert_move (orig_before, before, 1);
23447 if (orig_after)
23448 convert_move (orig_after, after, 1);
23449 }
23450 else if (orig_after && after != orig_after)
23451 emit_move_insn (orig_after, after);
23452 }
23453
23454 /* Emit instructions to move SRC to DST. Called by splitters for
23455 multi-register moves. It will emit at most one instruction for
23456 each register that is accessed; that is, it won't emit li/lis pairs
23457 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23458 register. */
23459
23460 void
23461 rs6000_split_multireg_move (rtx dst, rtx src)
23462 {
23463 /* The register number of the first register being moved. */
23464 int reg;
23465 /* The mode that is to be moved. */
23466 machine_mode mode;
23467 /* The mode that the move is being done in, and its size. */
23468 machine_mode reg_mode;
23469 int reg_mode_size;
23470 /* The number of registers that will be moved. */
23471 int nregs;
23472
23473 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23474 mode = GET_MODE (dst);
23475 nregs = hard_regno_nregs (reg, mode);
23476 if (FP_REGNO_P (reg))
23477 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23478 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23479 else if (ALTIVEC_REGNO_P (reg))
23480 reg_mode = V16QImode;
23481 else
23482 reg_mode = word_mode;
23483 reg_mode_size = GET_MODE_SIZE (reg_mode);
23484
23485 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23486
23487 /* TDmode residing in FP registers is special, since the ISA requires that
23488 the lower-numbered word of a register pair is always the most significant
23489 word, even in little-endian mode. This does not match the usual subreg
23490 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23491 the appropriate constituent registers "by hand" in little-endian mode.
23492
23493 Note we do not need to check for destructive overlap here since TDmode
23494 can only reside in even/odd register pairs. */
23495 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23496 {
23497 rtx p_src, p_dst;
23498 int i;
23499
23500 for (i = 0; i < nregs; i++)
23501 {
23502 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23503 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23504 else
23505 p_src = simplify_gen_subreg (reg_mode, src, mode,
23506 i * reg_mode_size);
23507
23508 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23509 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23510 else
23511 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23512 i * reg_mode_size);
23513
23514 emit_insn (gen_rtx_SET (p_dst, p_src));
23515 }
23516
23517 return;
23518 }
23519
23520 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23521 {
23522 /* Move register range backwards, if we might have destructive
23523 overlap. */
23524 int i;
23525 for (i = nregs - 1; i >= 0; i--)
23526 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23527 i * reg_mode_size),
23528 simplify_gen_subreg (reg_mode, src, mode,
23529 i * reg_mode_size)));
23530 }
23531 else
23532 {
23533 int i;
23534 int j = -1;
23535 bool used_update = false;
23536 rtx restore_basereg = NULL_RTX;
23537
23538 if (MEM_P (src) && INT_REGNO_P (reg))
23539 {
23540 rtx breg;
23541
23542 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23543 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23544 {
23545 rtx delta_rtx;
23546 breg = XEXP (XEXP (src, 0), 0);
23547 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23548 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23549 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23550 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23551 src = replace_equiv_address (src, breg);
23552 }
23553 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23554 {
23555 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23556 {
23557 rtx basereg = XEXP (XEXP (src, 0), 0);
23558 if (TARGET_UPDATE)
23559 {
23560 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23561 emit_insn (gen_rtx_SET (ndst,
23562 gen_rtx_MEM (reg_mode,
23563 XEXP (src, 0))));
23564 used_update = true;
23565 }
23566 else
23567 emit_insn (gen_rtx_SET (basereg,
23568 XEXP (XEXP (src, 0), 1)));
23569 src = replace_equiv_address (src, basereg);
23570 }
23571 else
23572 {
23573 rtx basereg = gen_rtx_REG (Pmode, reg);
23574 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23575 src = replace_equiv_address (src, basereg);
23576 }
23577 }
23578
23579 breg = XEXP (src, 0);
23580 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23581 breg = XEXP (breg, 0);
23582
23583 /* If the base register we are using to address memory is
23584 also a destination reg, then change that register last. */
23585 if (REG_P (breg)
23586 && REGNO (breg) >= REGNO (dst)
23587 && REGNO (breg) < REGNO (dst) + nregs)
23588 j = REGNO (breg) - REGNO (dst);
23589 }
23590 else if (MEM_P (dst) && INT_REGNO_P (reg))
23591 {
23592 rtx breg;
23593
23594 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23595 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23596 {
23597 rtx delta_rtx;
23598 breg = XEXP (XEXP (dst, 0), 0);
23599 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23600 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23601 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23602
23603 /* We have to update the breg before doing the store.
23604 Use store with update, if available. */
23605
23606 if (TARGET_UPDATE)
23607 {
23608 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23609 emit_insn (TARGET_32BIT
23610 ? (TARGET_POWERPC64
23611 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23612 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23613 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23614 used_update = true;
23615 }
23616 else
23617 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23618 dst = replace_equiv_address (dst, breg);
23619 }
23620 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23621 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23622 {
23623 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23624 {
23625 rtx basereg = XEXP (XEXP (dst, 0), 0);
23626 if (TARGET_UPDATE)
23627 {
23628 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23629 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23630 XEXP (dst, 0)),
23631 nsrc));
23632 used_update = true;
23633 }
23634 else
23635 emit_insn (gen_rtx_SET (basereg,
23636 XEXP (XEXP (dst, 0), 1)));
23637 dst = replace_equiv_address (dst, basereg);
23638 }
23639 else
23640 {
23641 rtx basereg = XEXP (XEXP (dst, 0), 0);
23642 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23643 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23644 && REG_P (basereg)
23645 && REG_P (offsetreg)
23646 && REGNO (basereg) != REGNO (offsetreg));
23647 if (REGNO (basereg) == 0)
23648 {
23649 rtx tmp = offsetreg;
23650 offsetreg = basereg;
23651 basereg = tmp;
23652 }
23653 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23654 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23655 dst = replace_equiv_address (dst, basereg);
23656 }
23657 }
23658 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23659 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23660 }
23661
23662 for (i = 0; i < nregs; i++)
23663 {
23664 /* Calculate index to next subword. */
23665 ++j;
23666 if (j == nregs)
23667 j = 0;
23668
23669 /* If compiler already emitted move of first word by
23670 store with update, no need to do anything. */
23671 if (j == 0 && used_update)
23672 continue;
23673
23674 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23675 j * reg_mode_size),
23676 simplify_gen_subreg (reg_mode, src, mode,
23677 j * reg_mode_size)));
23678 }
23679 if (restore_basereg != NULL_RTX)
23680 emit_insn (restore_basereg);
23681 }
23682 }
23683
23684 static GTY(()) alias_set_type set = -1;
23685
23686 alias_set_type
23687 get_TOC_alias_set (void)
23688 {
23689 if (set == -1)
23690 set = new_alias_set ();
23691 return set;
23692 }
23693
23694 /* Return the internal arg pointer used for function incoming
23695 arguments. When -fsplit-stack, the arg pointer is r12 so we need
23696 to copy it to a pseudo in order for it to be preserved over calls
23697 and suchlike. We'd really like to use a pseudo here for the
23698 internal arg pointer but data-flow analysis is not prepared to
23699 accept pseudos as live at the beginning of a function. */
23700
23701 static rtx
23702 rs6000_internal_arg_pointer (void)
23703 {
23704 if (flag_split_stack
23705 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
23706 == NULL))
23707
23708 {
23709 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
23710 {
23711 rtx pat;
23712
23713 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
23714 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
23715
23716 /* Put the pseudo initialization right after the note at the
23717 beginning of the function. */
23718 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
23719 gen_rtx_REG (Pmode, 12));
23720 push_topmost_sequence ();
23721 emit_insn_after (pat, get_insns ());
23722 pop_topmost_sequence ();
23723 }
23724 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
23725 FIRST_PARM_OFFSET (current_function_decl));
23726 return copy_to_reg (ret);
23727 }
23728 return virtual_incoming_args_rtx;
23729 }
23730
23731 /* We may have to tell the dataflow pass that the split stack prologue
23732 is initializing a register. */
23733
23734 static void
23735 rs6000_live_on_entry (bitmap regs)
23736 {
23737 if (flag_split_stack)
23738 bitmap_set_bit (regs, 12);
23739 }
23740
23741 \f
23742 /* A C compound statement that outputs the assembler code for a thunk
23743 function, used to implement C++ virtual function calls with
23744 multiple inheritance. The thunk acts as a wrapper around a virtual
23745 function, adjusting the implicit object parameter before handing
23746 control off to the real function.
23747
23748 First, emit code to add the integer DELTA to the location that
23749 contains the incoming first argument. Assume that this argument
23750 contains a pointer, and is the one used to pass the `this' pointer
23751 in C++. This is the incoming argument *before* the function
23752 prologue, e.g. `%o0' on a sparc. The addition must preserve the
23753 values of all other incoming arguments.
23754
23755 After the addition, emit code to jump to FUNCTION, which is a
23756 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
23757 not touch the return address. Hence returning from FUNCTION will
23758 return to whoever called the current `thunk'.
23759
23760 The effect must be as if FUNCTION had been called directly with the
23761 adjusted first argument. This macro is responsible for emitting
23762 all of the code for a thunk function; output_function_prologue()
23763 and output_function_epilogue() are not invoked.
23764
23765 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
23766 been extracted from it.) It might possibly be useful on some
23767 targets, but probably not.
23768
23769 If you do not define this macro, the target-independent code in the
23770 C++ frontend will generate a less efficient heavyweight thunk that
23771 calls FUNCTION instead of jumping to it. The generic approach does
23772 not support varargs. */
23773
23774 static void
23775 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
23776 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
23777 tree function)
23778 {
23779 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
23780 rtx this_rtx, funexp;
23781 rtx_insn *insn;
23782
23783 reload_completed = 1;
23784 epilogue_completed = 1;
23785
23786 /* Mark the end of the (empty) prologue. */
23787 emit_note (NOTE_INSN_PROLOGUE_END);
23788
23789 /* Find the "this" pointer. If the function returns a structure,
23790 the structure return pointer is in r3. */
23791 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
23792 this_rtx = gen_rtx_REG (Pmode, 4);
23793 else
23794 this_rtx = gen_rtx_REG (Pmode, 3);
23795
23796 /* Apply the constant offset, if required. */
23797 if (delta)
23798 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
23799
23800 /* Apply the offset from the vtable, if required. */
23801 if (vcall_offset)
23802 {
23803 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
23804 rtx tmp = gen_rtx_REG (Pmode, 12);
23805
23806 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
23807 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
23808 {
23809 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
23810 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
23811 }
23812 else
23813 {
23814 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
23815
23816 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
23817 }
23818 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
23819 }
23820
23821 /* Generate a tail call to the target function. */
23822 if (!TREE_USED (function))
23823 {
23824 assemble_external (function);
23825 TREE_USED (function) = 1;
23826 }
23827 funexp = XEXP (DECL_RTL (function), 0);
23828 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
23829
23830 #if TARGET_MACHO
23831 if (MACHOPIC_INDIRECT)
23832 funexp = machopic_indirect_call_target (funexp);
23833 #endif
23834
23835 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
23836 generate sibcall RTL explicitly. */
23837 insn = emit_call_insn (
23838 gen_rtx_PARALLEL (VOIDmode,
23839 gen_rtvec (3,
23840 gen_rtx_CALL (VOIDmode,
23841 funexp, const0_rtx),
23842 gen_rtx_USE (VOIDmode, const0_rtx),
23843 simple_return_rtx)));
23844 SIBLING_CALL_P (insn) = 1;
23845 emit_barrier ();
23846
23847 /* Run just enough of rest_of_compilation to get the insns emitted.
23848 There's not really enough bulk here to make other passes such as
23849 instruction scheduling worth while. */
23850 insn = get_insns ();
23851 shorten_branches (insn);
23852 assemble_start_function (thunk_fndecl, fnname);
23853 final_start_function (insn, file, 1);
23854 final (insn, file, 1);
23855 final_end_function ();
23856 assemble_end_function (thunk_fndecl, fnname);
23857
23858 reload_completed = 0;
23859 epilogue_completed = 0;
23860 }
23861 \f
23862 /* A quick summary of the various types of 'constant-pool tables'
23863 under PowerPC:
23864
23865 Target Flags Name One table per
23866 AIX (none) AIX TOC object file
23867 AIX -mfull-toc AIX TOC object file
23868 AIX -mminimal-toc AIX minimal TOC translation unit
23869 SVR4/EABI (none) SVR4 SDATA object file
23870 SVR4/EABI -fpic SVR4 pic object file
23871 SVR4/EABI -fPIC SVR4 PIC translation unit
23872 SVR4/EABI -mrelocatable EABI TOC function
23873 SVR4/EABI -maix AIX TOC object file
23874 SVR4/EABI -maix -mminimal-toc
23875 AIX minimal TOC translation unit
23876
23877 Name Reg. Set by entries contains:
23878 made by addrs? fp? sum?
23879
23880 AIX TOC 2 crt0 as Y option option
23881 AIX minimal TOC 30 prolog gcc Y Y option
23882 SVR4 SDATA 13 crt0 gcc N Y N
23883 SVR4 pic 30 prolog ld Y not yet N
23884 SVR4 PIC 30 prolog gcc Y option option
23885 EABI TOC 30 prolog gcc Y option option
23886
23887 */
23888
23889 /* Hash functions for the hash table. */
23890
23891 static unsigned
23892 rs6000_hash_constant (rtx k)
23893 {
23894 enum rtx_code code = GET_CODE (k);
23895 machine_mode mode = GET_MODE (k);
23896 unsigned result = (code << 3) ^ mode;
23897 const char *format;
23898 int flen, fidx;
23899
23900 format = GET_RTX_FORMAT (code);
23901 flen = strlen (format);
23902 fidx = 0;
23903
23904 switch (code)
23905 {
23906 case LABEL_REF:
23907 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
23908
23909 case CONST_WIDE_INT:
23910 {
23911 int i;
23912 flen = CONST_WIDE_INT_NUNITS (k);
23913 for (i = 0; i < flen; i++)
23914 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
23915 return result;
23916 }
23917
23918 case CONST_DOUBLE:
23919 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
23920
23921 case CODE_LABEL:
23922 fidx = 3;
23923 break;
23924
23925 default:
23926 break;
23927 }
23928
23929 for (; fidx < flen; fidx++)
23930 switch (format[fidx])
23931 {
23932 case 's':
23933 {
23934 unsigned i, len;
23935 const char *str = XSTR (k, fidx);
23936 len = strlen (str);
23937 result = result * 613 + len;
23938 for (i = 0; i < len; i++)
23939 result = result * 613 + (unsigned) str[i];
23940 break;
23941 }
23942 case 'u':
23943 case 'e':
23944 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
23945 break;
23946 case 'i':
23947 case 'n':
23948 result = result * 613 + (unsigned) XINT (k, fidx);
23949 break;
23950 case 'w':
23951 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
23952 result = result * 613 + (unsigned) XWINT (k, fidx);
23953 else
23954 {
23955 size_t i;
23956 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
23957 result = result * 613 + (unsigned) (XWINT (k, fidx)
23958 >> CHAR_BIT * i);
23959 }
23960 break;
23961 case '0':
23962 break;
23963 default:
23964 gcc_unreachable ();
23965 }
23966
23967 return result;
23968 }
23969
23970 hashval_t
23971 toc_hasher::hash (toc_hash_struct *thc)
23972 {
23973 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
23974 }
23975
23976 /* Compare H1 and H2 for equivalence. */
23977
23978 bool
23979 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
23980 {
23981 rtx r1 = h1->key;
23982 rtx r2 = h2->key;
23983
23984 if (h1->key_mode != h2->key_mode)
23985 return 0;
23986
23987 return rtx_equal_p (r1, r2);
23988 }
23989
23990 /* These are the names given by the C++ front-end to vtables, and
23991 vtable-like objects. Ideally, this logic should not be here;
23992 instead, there should be some programmatic way of inquiring as
23993 to whether or not an object is a vtable. */
23994
23995 #define VTABLE_NAME_P(NAME) \
23996 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
23997 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
23998 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
23999 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
24000 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
24001
24002 #ifdef NO_DOLLAR_IN_LABEL
24003 /* Return a GGC-allocated character string translating dollar signs in
24004 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
24005
24006 const char *
24007 rs6000_xcoff_strip_dollar (const char *name)
24008 {
24009 char *strip, *p;
24010 const char *q;
24011 size_t len;
24012
24013 q = (const char *) strchr (name, '$');
24014
24015 if (q == 0 || q == name)
24016 return name;
24017
24018 len = strlen (name);
24019 strip = XALLOCAVEC (char, len + 1);
24020 strcpy (strip, name);
24021 p = strip + (q - name);
24022 while (p)
24023 {
24024 *p = '_';
24025 p = strchr (p + 1, '$');
24026 }
24027
24028 return ggc_alloc_string (strip, len);
24029 }
24030 #endif
24031
24032 void
24033 rs6000_output_symbol_ref (FILE *file, rtx x)
24034 {
24035 const char *name = XSTR (x, 0);
24036
24037 /* Currently C++ toc references to vtables can be emitted before it
24038 is decided whether the vtable is public or private. If this is
24039 the case, then the linker will eventually complain that there is
24040 a reference to an unknown section. Thus, for vtables only,
24041 we emit the TOC reference to reference the identifier and not the
24042 symbol. */
24043 if (VTABLE_NAME_P (name))
24044 {
24045 RS6000_OUTPUT_BASENAME (file, name);
24046 }
24047 else
24048 assemble_name (file, name);
24049 }
24050
24051 /* Output a TOC entry. We derive the entry name from what is being
24052 written. */
24053
24054 void
24055 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
24056 {
24057 char buf[256];
24058 const char *name = buf;
24059 rtx base = x;
24060 HOST_WIDE_INT offset = 0;
24061
24062 gcc_assert (!TARGET_NO_TOC);
24063
24064 /* When the linker won't eliminate them, don't output duplicate
24065 TOC entries (this happens on AIX if there is any kind of TOC,
24066 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
24067 CODE_LABELs. */
24068 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
24069 {
24070 struct toc_hash_struct *h;
24071
24072 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
24073 time because GGC is not initialized at that point. */
24074 if (toc_hash_table == NULL)
24075 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
24076
24077 h = ggc_alloc<toc_hash_struct> ();
24078 h->key = x;
24079 h->key_mode = mode;
24080 h->labelno = labelno;
24081
24082 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
24083 if (*found == NULL)
24084 *found = h;
24085 else /* This is indeed a duplicate.
24086 Set this label equal to that label. */
24087 {
24088 fputs ("\t.set ", file);
24089 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
24090 fprintf (file, "%d,", labelno);
24091 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
24092 fprintf (file, "%d\n", ((*found)->labelno));
24093
24094 #ifdef HAVE_AS_TLS
24095 if (TARGET_XCOFF && SYMBOL_REF_P (x)
24096 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
24097 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
24098 {
24099 fputs ("\t.set ", file);
24100 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
24101 fprintf (file, "%d,", labelno);
24102 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
24103 fprintf (file, "%d\n", ((*found)->labelno));
24104 }
24105 #endif
24106 return;
24107 }
24108 }
24109
24110 /* If we're going to put a double constant in the TOC, make sure it's
24111 aligned properly when strict alignment is on. */
24112 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
24113 && STRICT_ALIGNMENT
24114 && GET_MODE_BITSIZE (mode) >= 64
24115 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
24116 ASM_OUTPUT_ALIGN (file, 3);
24117 }
24118
24119 (*targetm.asm_out.internal_label) (file, "LC", labelno);
24120
24121 /* Handle FP constants specially. Note that if we have a minimal
24122 TOC, things we put here aren't actually in the TOC, so we can allow
24123 FP constants. */
24124 if (CONST_DOUBLE_P (x)
24125 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
24126 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
24127 {
24128 long k[4];
24129
24130 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24131 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
24132 else
24133 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
24134
24135 if (TARGET_64BIT)
24136 {
24137 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24138 fputs (DOUBLE_INT_ASM_OP, file);
24139 else
24140 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24141 k[0] & 0xffffffff, k[1] & 0xffffffff,
24142 k[2] & 0xffffffff, k[3] & 0xffffffff);
24143 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
24144 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
24145 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
24146 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
24147 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
24148 return;
24149 }
24150 else
24151 {
24152 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24153 fputs ("\t.long ", file);
24154 else
24155 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24156 k[0] & 0xffffffff, k[1] & 0xffffffff,
24157 k[2] & 0xffffffff, k[3] & 0xffffffff);
24158 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
24159 k[0] & 0xffffffff, k[1] & 0xffffffff,
24160 k[2] & 0xffffffff, k[3] & 0xffffffff);
24161 return;
24162 }
24163 }
24164 else if (CONST_DOUBLE_P (x)
24165 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
24166 {
24167 long k[2];
24168
24169 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24170 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
24171 else
24172 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
24173
24174 if (TARGET_64BIT)
24175 {
24176 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24177 fputs (DOUBLE_INT_ASM_OP, file);
24178 else
24179 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
24180 k[0] & 0xffffffff, k[1] & 0xffffffff);
24181 fprintf (file, "0x%lx%08lx\n",
24182 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
24183 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
24184 return;
24185 }
24186 else
24187 {
24188 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24189 fputs ("\t.long ", file);
24190 else
24191 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
24192 k[0] & 0xffffffff, k[1] & 0xffffffff);
24193 fprintf (file, "0x%lx,0x%lx\n",
24194 k[0] & 0xffffffff, k[1] & 0xffffffff);
24195 return;
24196 }
24197 }
24198 else if (CONST_DOUBLE_P (x)
24199 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
24200 {
24201 long l;
24202
24203 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
24204 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
24205 else
24206 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
24207
24208 if (TARGET_64BIT)
24209 {
24210 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24211 fputs (DOUBLE_INT_ASM_OP, file);
24212 else
24213 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
24214 if (WORDS_BIG_ENDIAN)
24215 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
24216 else
24217 fprintf (file, "0x%lx\n", l & 0xffffffff);
24218 return;
24219 }
24220 else
24221 {
24222 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24223 fputs ("\t.long ", file);
24224 else
24225 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
24226 fprintf (file, "0x%lx\n", l & 0xffffffff);
24227 return;
24228 }
24229 }
24230 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
24231 {
24232 unsigned HOST_WIDE_INT low;
24233 HOST_WIDE_INT high;
24234
24235 low = INTVAL (x) & 0xffffffff;
24236 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
24237
24238 /* TOC entries are always Pmode-sized, so when big-endian
24239 smaller integer constants in the TOC need to be padded.
24240 (This is still a win over putting the constants in
24241 a separate constant pool, because then we'd have
24242 to have both a TOC entry _and_ the actual constant.)
24243
24244 For a 32-bit target, CONST_INT values are loaded and shifted
24245 entirely within `low' and can be stored in one TOC entry. */
24246
24247 /* It would be easy to make this work, but it doesn't now. */
24248 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
24249
24250 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
24251 {
24252 low |= high << 32;
24253 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
24254 high = (HOST_WIDE_INT) low >> 32;
24255 low &= 0xffffffff;
24256 }
24257
24258 if (TARGET_64BIT)
24259 {
24260 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24261 fputs (DOUBLE_INT_ASM_OP, file);
24262 else
24263 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
24264 (long) high & 0xffffffff, (long) low & 0xffffffff);
24265 fprintf (file, "0x%lx%08lx\n",
24266 (long) high & 0xffffffff, (long) low & 0xffffffff);
24267 return;
24268 }
24269 else
24270 {
24271 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
24272 {
24273 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24274 fputs ("\t.long ", file);
24275 else
24276 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
24277 (long) high & 0xffffffff, (long) low & 0xffffffff);
24278 fprintf (file, "0x%lx,0x%lx\n",
24279 (long) high & 0xffffffff, (long) low & 0xffffffff);
24280 }
24281 else
24282 {
24283 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24284 fputs ("\t.long ", file);
24285 else
24286 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
24287 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
24288 }
24289 return;
24290 }
24291 }
24292
24293 if (GET_CODE (x) == CONST)
24294 {
24295 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
24296 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
24297
24298 base = XEXP (XEXP (x, 0), 0);
24299 offset = INTVAL (XEXP (XEXP (x, 0), 1));
24300 }
24301
24302 switch (GET_CODE (base))
24303 {
24304 case SYMBOL_REF:
24305 name = XSTR (base, 0);
24306 break;
24307
24308 case LABEL_REF:
24309 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
24310 CODE_LABEL_NUMBER (XEXP (base, 0)));
24311 break;
24312
24313 case CODE_LABEL:
24314 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
24315 break;
24316
24317 default:
24318 gcc_unreachable ();
24319 }
24320
24321 if (TARGET_ELF || TARGET_MINIMAL_TOC)
24322 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
24323 else
24324 {
24325 fputs ("\t.tc ", file);
24326 RS6000_OUTPUT_BASENAME (file, name);
24327
24328 if (offset < 0)
24329 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
24330 else if (offset)
24331 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
24332
24333 /* Mark large TOC symbols on AIX with [TE] so they are mapped
24334 after other TOC symbols, reducing overflow of small TOC access
24335 to [TC] symbols. */
24336 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
24337 ? "[TE]," : "[TC],", file);
24338 }
24339
24340 /* Currently C++ toc references to vtables can be emitted before it
24341 is decided whether the vtable is public or private. If this is
24342 the case, then the linker will eventually complain that there is
24343 a TOC reference to an unknown section. Thus, for vtables only,
24344 we emit the TOC reference to reference the symbol and not the
24345 section. */
24346 if (VTABLE_NAME_P (name))
24347 {
24348 RS6000_OUTPUT_BASENAME (file, name);
24349 if (offset < 0)
24350 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
24351 else if (offset > 0)
24352 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
24353 }
24354 else
24355 output_addr_const (file, x);
24356
24357 #if HAVE_AS_TLS
24358 if (TARGET_XCOFF && SYMBOL_REF_P (base))
24359 {
24360 switch (SYMBOL_REF_TLS_MODEL (base))
24361 {
24362 case 0:
24363 break;
24364 case TLS_MODEL_LOCAL_EXEC:
24365 fputs ("@le", file);
24366 break;
24367 case TLS_MODEL_INITIAL_EXEC:
24368 fputs ("@ie", file);
24369 break;
24370 /* Use global-dynamic for local-dynamic. */
24371 case TLS_MODEL_GLOBAL_DYNAMIC:
24372 case TLS_MODEL_LOCAL_DYNAMIC:
24373 putc ('\n', file);
24374 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
24375 fputs ("\t.tc .", file);
24376 RS6000_OUTPUT_BASENAME (file, name);
24377 fputs ("[TC],", file);
24378 output_addr_const (file, x);
24379 fputs ("@m", file);
24380 break;
24381 default:
24382 gcc_unreachable ();
24383 }
24384 }
24385 #endif
24386
24387 putc ('\n', file);
24388 }
24389 \f
24390 /* Output an assembler pseudo-op to write an ASCII string of N characters
24391 starting at P to FILE.
24392
24393 On the RS/6000, we have to do this using the .byte operation and
24394 write out special characters outside the quoted string.
24395 Also, the assembler is broken; very long strings are truncated,
24396 so we must artificially break them up early. */
24397
24398 void
24399 output_ascii (FILE *file, const char *p, int n)
24400 {
24401 char c;
24402 int i, count_string;
24403 const char *for_string = "\t.byte \"";
24404 const char *for_decimal = "\t.byte ";
24405 const char *to_close = NULL;
24406
24407 count_string = 0;
24408 for (i = 0; i < n; i++)
24409 {
24410 c = *p++;
24411 if (c >= ' ' && c < 0177)
24412 {
24413 if (for_string)
24414 fputs (for_string, file);
24415 putc (c, file);
24416
24417 /* Write two quotes to get one. */
24418 if (c == '"')
24419 {
24420 putc (c, file);
24421 ++count_string;
24422 }
24423
24424 for_string = NULL;
24425 for_decimal = "\"\n\t.byte ";
24426 to_close = "\"\n";
24427 ++count_string;
24428
24429 if (count_string >= 512)
24430 {
24431 fputs (to_close, file);
24432
24433 for_string = "\t.byte \"";
24434 for_decimal = "\t.byte ";
24435 to_close = NULL;
24436 count_string = 0;
24437 }
24438 }
24439 else
24440 {
24441 if (for_decimal)
24442 fputs (for_decimal, file);
24443 fprintf (file, "%d", c);
24444
24445 for_string = "\n\t.byte \"";
24446 for_decimal = ", ";
24447 to_close = "\n";
24448 count_string = 0;
24449 }
24450 }
24451
24452 /* Now close the string if we have written one. Then end the line. */
24453 if (to_close)
24454 fputs (to_close, file);
24455 }
24456 \f
24457 /* Generate a unique section name for FILENAME for a section type
24458 represented by SECTION_DESC. Output goes into BUF.
24459
24460 SECTION_DESC can be any string, as long as it is different for each
24461 possible section type.
24462
24463 We name the section in the same manner as xlc. The name begins with an
24464 underscore followed by the filename (after stripping any leading directory
24465 names) with the last period replaced by the string SECTION_DESC. If
24466 FILENAME does not contain a period, SECTION_DESC is appended to the end of
24467 the name. */
24468
24469 void
24470 rs6000_gen_section_name (char **buf, const char *filename,
24471 const char *section_desc)
24472 {
24473 const char *q, *after_last_slash, *last_period = 0;
24474 char *p;
24475 int len;
24476
24477 after_last_slash = filename;
24478 for (q = filename; *q; q++)
24479 {
24480 if (*q == '/')
24481 after_last_slash = q + 1;
24482 else if (*q == '.')
24483 last_period = q;
24484 }
24485
24486 len = strlen (after_last_slash) + strlen (section_desc) + 2;
24487 *buf = (char *) xmalloc (len);
24488
24489 p = *buf;
24490 *p++ = '_';
24491
24492 for (q = after_last_slash; *q; q++)
24493 {
24494 if (q == last_period)
24495 {
24496 strcpy (p, section_desc);
24497 p += strlen (section_desc);
24498 break;
24499 }
24500
24501 else if (ISALNUM (*q))
24502 *p++ = *q;
24503 }
24504
24505 if (last_period == 0)
24506 strcpy (p, section_desc);
24507 else
24508 *p = '\0';
24509 }
24510 \f
24511 /* Emit profile function. */
24512
24513 void
24514 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
24515 {
24516 /* Non-standard profiling for kernels, which just saves LR then calls
24517 _mcount without worrying about arg saves. The idea is to change
24518 the function prologue as little as possible as it isn't easy to
24519 account for arg save/restore code added just for _mcount. */
24520 if (TARGET_PROFILE_KERNEL)
24521 return;
24522
24523 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24524 {
24525 #ifndef NO_PROFILE_COUNTERS
24526 # define NO_PROFILE_COUNTERS 0
24527 #endif
24528 if (NO_PROFILE_COUNTERS)
24529 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24530 LCT_NORMAL, VOIDmode);
24531 else
24532 {
24533 char buf[30];
24534 const char *label_name;
24535 rtx fun;
24536
24537 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24538 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
24539 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
24540
24541 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
24542 LCT_NORMAL, VOIDmode, fun, Pmode);
24543 }
24544 }
24545 else if (DEFAULT_ABI == ABI_DARWIN)
24546 {
24547 const char *mcount_name = RS6000_MCOUNT;
24548 int caller_addr_regno = LR_REGNO;
24549
24550 /* Be conservative and always set this, at least for now. */
24551 crtl->uses_pic_offset_table = 1;
24552
24553 #if TARGET_MACHO
24554 /* For PIC code, set up a stub and collect the caller's address
24555 from r0, which is where the prologue puts it. */
24556 if (MACHOPIC_INDIRECT
24557 && crtl->uses_pic_offset_table)
24558 caller_addr_regno = 0;
24559 #endif
24560 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
24561 LCT_NORMAL, VOIDmode,
24562 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
24563 }
24564 }
24565
24566 /* Write function profiler code. */
24567
24568 void
24569 output_function_profiler (FILE *file, int labelno)
24570 {
24571 char buf[100];
24572
24573 switch (DEFAULT_ABI)
24574 {
24575 default:
24576 gcc_unreachable ();
24577
24578 case ABI_V4:
24579 if (!TARGET_32BIT)
24580 {
24581 warning (0, "no profiling of 64-bit code for this ABI");
24582 return;
24583 }
24584 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
24585 fprintf (file, "\tmflr %s\n", reg_names[0]);
24586 if (NO_PROFILE_COUNTERS)
24587 {
24588 asm_fprintf (file, "\tstw %s,4(%s)\n",
24589 reg_names[0], reg_names[1]);
24590 }
24591 else if (TARGET_SECURE_PLT && flag_pic)
24592 {
24593 if (TARGET_LINK_STACK)
24594 {
24595 char name[32];
24596 get_ppc476_thunk_name (name);
24597 asm_fprintf (file, "\tbl %s\n", name);
24598 }
24599 else
24600 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
24601 asm_fprintf (file, "\tstw %s,4(%s)\n",
24602 reg_names[0], reg_names[1]);
24603 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24604 asm_fprintf (file, "\taddis %s,%s,",
24605 reg_names[12], reg_names[12]);
24606 assemble_name (file, buf);
24607 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
24608 assemble_name (file, buf);
24609 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
24610 }
24611 else if (flag_pic == 1)
24612 {
24613 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
24614 asm_fprintf (file, "\tstw %s,4(%s)\n",
24615 reg_names[0], reg_names[1]);
24616 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
24617 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
24618 assemble_name (file, buf);
24619 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
24620 }
24621 else if (flag_pic > 1)
24622 {
24623 asm_fprintf (file, "\tstw %s,4(%s)\n",
24624 reg_names[0], reg_names[1]);
24625 /* Now, we need to get the address of the label. */
24626 if (TARGET_LINK_STACK)
24627 {
24628 char name[32];
24629 get_ppc476_thunk_name (name);
24630 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
24631 assemble_name (file, buf);
24632 fputs ("-.\n1:", file);
24633 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24634 asm_fprintf (file, "\taddi %s,%s,4\n",
24635 reg_names[11], reg_names[11]);
24636 }
24637 else
24638 {
24639 fputs ("\tbcl 20,31,1f\n\t.long ", file);
24640 assemble_name (file, buf);
24641 fputs ("-.\n1:", file);
24642 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
24643 }
24644 asm_fprintf (file, "\tlwz %s,0(%s)\n",
24645 reg_names[0], reg_names[11]);
24646 asm_fprintf (file, "\tadd %s,%s,%s\n",
24647 reg_names[0], reg_names[0], reg_names[11]);
24648 }
24649 else
24650 {
24651 asm_fprintf (file, "\tlis %s,", reg_names[12]);
24652 assemble_name (file, buf);
24653 fputs ("@ha\n", file);
24654 asm_fprintf (file, "\tstw %s,4(%s)\n",
24655 reg_names[0], reg_names[1]);
24656 asm_fprintf (file, "\tla %s,", reg_names[0]);
24657 assemble_name (file, buf);
24658 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
24659 }
24660
24661 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
24662 fprintf (file, "\tbl %s%s\n",
24663 RS6000_MCOUNT, flag_pic ? "@plt" : "");
24664 break;
24665
24666 case ABI_AIX:
24667 case ABI_ELFv2:
24668 case ABI_DARWIN:
24669 /* Don't do anything, done in output_profile_hook (). */
24670 break;
24671 }
24672 }
24673
24674 \f
24675
24676 /* The following variable value is the last issued insn. */
24677
24678 static rtx_insn *last_scheduled_insn;
24679
24680 /* The following variable helps to balance issuing of load and
24681 store instructions */
24682
24683 static int load_store_pendulum;
24684
24685 /* The following variable helps pair divide insns during scheduling. */
24686 static int divide_cnt;
24687 /* The following variable helps pair and alternate vector and vector load
24688 insns during scheduling. */
24689 static int vec_pairing;
24690
24691
24692 /* Power4 load update and store update instructions are cracked into a
24693 load or store and an integer insn which are executed in the same cycle.
24694 Branches have their own dispatch slot which does not count against the
24695 GCC issue rate, but it changes the program flow so there are no other
24696 instructions to issue in this cycle. */
24697
24698 static int
24699 rs6000_variable_issue_1 (rtx_insn *insn, int more)
24700 {
24701 last_scheduled_insn = insn;
24702 if (GET_CODE (PATTERN (insn)) == USE
24703 || GET_CODE (PATTERN (insn)) == CLOBBER)
24704 {
24705 cached_can_issue_more = more;
24706 return cached_can_issue_more;
24707 }
24708
24709 if (insn_terminates_group_p (insn, current_group))
24710 {
24711 cached_can_issue_more = 0;
24712 return cached_can_issue_more;
24713 }
24714
24715 /* If no reservation, but reach here */
24716 if (recog_memoized (insn) < 0)
24717 return more;
24718
24719 if (rs6000_sched_groups)
24720 {
24721 if (is_microcoded_insn (insn))
24722 cached_can_issue_more = 0;
24723 else if (is_cracked_insn (insn))
24724 cached_can_issue_more = more > 2 ? more - 2 : 0;
24725 else
24726 cached_can_issue_more = more - 1;
24727
24728 return cached_can_issue_more;
24729 }
24730
24731 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
24732 return 0;
24733
24734 cached_can_issue_more = more - 1;
24735 return cached_can_issue_more;
24736 }
24737
24738 static int
24739 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
24740 {
24741 int r = rs6000_variable_issue_1 (insn, more);
24742 if (verbose)
24743 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
24744 return r;
24745 }
24746
24747 /* Adjust the cost of a scheduling dependency. Return the new cost of
24748 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
24749
24750 static int
24751 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
24752 unsigned int)
24753 {
24754 enum attr_type attr_type;
24755
24756 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
24757 return cost;
24758
24759 switch (dep_type)
24760 {
24761 case REG_DEP_TRUE:
24762 {
24763 /* Data dependency; DEP_INSN writes a register that INSN reads
24764 some cycles later. */
24765
24766 /* Separate a load from a narrower, dependent store. */
24767 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9
24768 || rs6000_tune == PROCESSOR_FUTURE)
24769 && GET_CODE (PATTERN (insn)) == SET
24770 && GET_CODE (PATTERN (dep_insn)) == SET
24771 && MEM_P (XEXP (PATTERN (insn), 1))
24772 && MEM_P (XEXP (PATTERN (dep_insn), 0))
24773 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
24774 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
24775 return cost + 14;
24776
24777 attr_type = get_attr_type (insn);
24778
24779 switch (attr_type)
24780 {
24781 case TYPE_JMPREG:
24782 /* Tell the first scheduling pass about the latency between
24783 a mtctr and bctr (and mtlr and br/blr). The first
24784 scheduling pass will not know about this latency since
24785 the mtctr instruction, which has the latency associated
24786 to it, will be generated by reload. */
24787 return 4;
24788 case TYPE_BRANCH:
24789 /* Leave some extra cycles between a compare and its
24790 dependent branch, to inhibit expensive mispredicts. */
24791 if ((rs6000_tune == PROCESSOR_PPC603
24792 || rs6000_tune == PROCESSOR_PPC604
24793 || rs6000_tune == PROCESSOR_PPC604e
24794 || rs6000_tune == PROCESSOR_PPC620
24795 || rs6000_tune == PROCESSOR_PPC630
24796 || rs6000_tune == PROCESSOR_PPC750
24797 || rs6000_tune == PROCESSOR_PPC7400
24798 || rs6000_tune == PROCESSOR_PPC7450
24799 || rs6000_tune == PROCESSOR_PPCE5500
24800 || rs6000_tune == PROCESSOR_PPCE6500
24801 || rs6000_tune == PROCESSOR_POWER4
24802 || rs6000_tune == PROCESSOR_POWER5
24803 || rs6000_tune == PROCESSOR_POWER7
24804 || rs6000_tune == PROCESSOR_POWER8
24805 || rs6000_tune == PROCESSOR_POWER9
24806 || rs6000_tune == PROCESSOR_FUTURE
24807 || rs6000_tune == PROCESSOR_CELL)
24808 && recog_memoized (dep_insn)
24809 && (INSN_CODE (dep_insn) >= 0))
24810
24811 switch (get_attr_type (dep_insn))
24812 {
24813 case TYPE_CMP:
24814 case TYPE_FPCOMPARE:
24815 case TYPE_CR_LOGICAL:
24816 return cost + 2;
24817 case TYPE_EXTS:
24818 case TYPE_MUL:
24819 if (get_attr_dot (dep_insn) == DOT_YES)
24820 return cost + 2;
24821 else
24822 break;
24823 case TYPE_SHIFT:
24824 if (get_attr_dot (dep_insn) == DOT_YES
24825 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
24826 return cost + 2;
24827 else
24828 break;
24829 default:
24830 break;
24831 }
24832 break;
24833
24834 case TYPE_STORE:
24835 case TYPE_FPSTORE:
24836 if ((rs6000_tune == PROCESSOR_POWER6)
24837 && recog_memoized (dep_insn)
24838 && (INSN_CODE (dep_insn) >= 0))
24839 {
24840
24841 if (GET_CODE (PATTERN (insn)) != SET)
24842 /* If this happens, we have to extend this to schedule
24843 optimally. Return default for now. */
24844 return cost;
24845
24846 /* Adjust the cost for the case where the value written
24847 by a fixed point operation is used as the address
24848 gen value on a store. */
24849 switch (get_attr_type (dep_insn))
24850 {
24851 case TYPE_LOAD:
24852 case TYPE_CNTLZ:
24853 {
24854 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24855 return get_attr_sign_extend (dep_insn)
24856 == SIGN_EXTEND_YES ? 6 : 4;
24857 break;
24858 }
24859 case TYPE_SHIFT:
24860 {
24861 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24862 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
24863 6 : 3;
24864 break;
24865 }
24866 case TYPE_INTEGER:
24867 case TYPE_ADD:
24868 case TYPE_LOGICAL:
24869 case TYPE_EXTS:
24870 case TYPE_INSERT:
24871 {
24872 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24873 return 3;
24874 break;
24875 }
24876 case TYPE_STORE:
24877 case TYPE_FPLOAD:
24878 case TYPE_FPSTORE:
24879 {
24880 if (get_attr_update (dep_insn) == UPDATE_YES
24881 && ! rs6000_store_data_bypass_p (dep_insn, insn))
24882 return 3;
24883 break;
24884 }
24885 case TYPE_MUL:
24886 {
24887 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24888 return 17;
24889 break;
24890 }
24891 case TYPE_DIV:
24892 {
24893 if (! rs6000_store_data_bypass_p (dep_insn, insn))
24894 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
24895 break;
24896 }
24897 default:
24898 break;
24899 }
24900 }
24901 break;
24902
24903 case TYPE_LOAD:
24904 if ((rs6000_tune == PROCESSOR_POWER6)
24905 && recog_memoized (dep_insn)
24906 && (INSN_CODE (dep_insn) >= 0))
24907 {
24908
24909 /* Adjust the cost for the case where the value written
24910 by a fixed point instruction is used within the address
24911 gen portion of a subsequent load(u)(x) */
24912 switch (get_attr_type (dep_insn))
24913 {
24914 case TYPE_LOAD:
24915 case TYPE_CNTLZ:
24916 {
24917 if (set_to_load_agen (dep_insn, insn))
24918 return get_attr_sign_extend (dep_insn)
24919 == SIGN_EXTEND_YES ? 6 : 4;
24920 break;
24921 }
24922 case TYPE_SHIFT:
24923 {
24924 if (set_to_load_agen (dep_insn, insn))
24925 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
24926 6 : 3;
24927 break;
24928 }
24929 case TYPE_INTEGER:
24930 case TYPE_ADD:
24931 case TYPE_LOGICAL:
24932 case TYPE_EXTS:
24933 case TYPE_INSERT:
24934 {
24935 if (set_to_load_agen (dep_insn, insn))
24936 return 3;
24937 break;
24938 }
24939 case TYPE_STORE:
24940 case TYPE_FPLOAD:
24941 case TYPE_FPSTORE:
24942 {
24943 if (get_attr_update (dep_insn) == UPDATE_YES
24944 && set_to_load_agen (dep_insn, insn))
24945 return 3;
24946 break;
24947 }
24948 case TYPE_MUL:
24949 {
24950 if (set_to_load_agen (dep_insn, insn))
24951 return 17;
24952 break;
24953 }
24954 case TYPE_DIV:
24955 {
24956 if (set_to_load_agen (dep_insn, insn))
24957 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
24958 break;
24959 }
24960 default:
24961 break;
24962 }
24963 }
24964 break;
24965
24966 case TYPE_FPLOAD:
24967 if ((rs6000_tune == PROCESSOR_POWER6)
24968 && get_attr_update (insn) == UPDATE_NO
24969 && recog_memoized (dep_insn)
24970 && (INSN_CODE (dep_insn) >= 0)
24971 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
24972 return 2;
24973
24974 default:
24975 break;
24976 }
24977
24978 /* Fall out to return default cost. */
24979 }
24980 break;
24981
24982 case REG_DEP_OUTPUT:
24983 /* Output dependency; DEP_INSN writes a register that INSN writes some
24984 cycles later. */
24985 if ((rs6000_tune == PROCESSOR_POWER6)
24986 && recog_memoized (dep_insn)
24987 && (INSN_CODE (dep_insn) >= 0))
24988 {
24989 attr_type = get_attr_type (insn);
24990
24991 switch (attr_type)
24992 {
24993 case TYPE_FP:
24994 case TYPE_FPSIMPLE:
24995 if (get_attr_type (dep_insn) == TYPE_FP
24996 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
24997 return 1;
24998 break;
24999 case TYPE_FPLOAD:
25000 if (get_attr_update (insn) == UPDATE_NO
25001 && get_attr_type (dep_insn) == TYPE_MFFGPR)
25002 return 2;
25003 break;
25004 default:
25005 break;
25006 }
25007 }
25008 /* Fall through, no cost for output dependency. */
25009 /* FALLTHRU */
25010
25011 case REG_DEP_ANTI:
25012 /* Anti dependency; DEP_INSN reads a register that INSN writes some
25013 cycles later. */
25014 return 0;
25015
25016 default:
25017 gcc_unreachable ();
25018 }
25019
25020 return cost;
25021 }
25022
25023 /* Debug version of rs6000_adjust_cost. */
25024
25025 static int
25026 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
25027 int cost, unsigned int dw)
25028 {
25029 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
25030
25031 if (ret != cost)
25032 {
25033 const char *dep;
25034
25035 switch (dep_type)
25036 {
25037 default: dep = "unknown depencency"; break;
25038 case REG_DEP_TRUE: dep = "data dependency"; break;
25039 case REG_DEP_OUTPUT: dep = "output dependency"; break;
25040 case REG_DEP_ANTI: dep = "anti depencency"; break;
25041 }
25042
25043 fprintf (stderr,
25044 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
25045 "%s, insn:\n", ret, cost, dep);
25046
25047 debug_rtx (insn);
25048 }
25049
25050 return ret;
25051 }
25052
25053 /* The function returns a true if INSN is microcoded.
25054 Return false otherwise. */
25055
25056 static bool
25057 is_microcoded_insn (rtx_insn *insn)
25058 {
25059 if (!insn || !NONDEBUG_INSN_P (insn)
25060 || GET_CODE (PATTERN (insn)) == USE
25061 || GET_CODE (PATTERN (insn)) == CLOBBER)
25062 return false;
25063
25064 if (rs6000_tune == PROCESSOR_CELL)
25065 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
25066
25067 if (rs6000_sched_groups
25068 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
25069 {
25070 enum attr_type type = get_attr_type (insn);
25071 if ((type == TYPE_LOAD
25072 && get_attr_update (insn) == UPDATE_YES
25073 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
25074 || ((type == TYPE_LOAD || type == TYPE_STORE)
25075 && get_attr_update (insn) == UPDATE_YES
25076 && get_attr_indexed (insn) == INDEXED_YES)
25077 || type == TYPE_MFCR)
25078 return true;
25079 }
25080
25081 return false;
25082 }
25083
25084 /* The function returns true if INSN is cracked into 2 instructions
25085 by the processor (and therefore occupies 2 issue slots). */
25086
25087 static bool
25088 is_cracked_insn (rtx_insn *insn)
25089 {
25090 if (!insn || !NONDEBUG_INSN_P (insn)
25091 || GET_CODE (PATTERN (insn)) == USE
25092 || GET_CODE (PATTERN (insn)) == CLOBBER)
25093 return false;
25094
25095 if (rs6000_sched_groups
25096 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
25097 {
25098 enum attr_type type = get_attr_type (insn);
25099 if ((type == TYPE_LOAD
25100 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
25101 && get_attr_update (insn) == UPDATE_NO)
25102 || (type == TYPE_LOAD
25103 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
25104 && get_attr_update (insn) == UPDATE_YES
25105 && get_attr_indexed (insn) == INDEXED_NO)
25106 || (type == TYPE_STORE
25107 && get_attr_update (insn) == UPDATE_YES
25108 && get_attr_indexed (insn) == INDEXED_NO)
25109 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
25110 && get_attr_update (insn) == UPDATE_YES)
25111 || (type == TYPE_CR_LOGICAL
25112 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
25113 || (type == TYPE_EXTS
25114 && get_attr_dot (insn) == DOT_YES)
25115 || (type == TYPE_SHIFT
25116 && get_attr_dot (insn) == DOT_YES
25117 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
25118 || (type == TYPE_MUL
25119 && get_attr_dot (insn) == DOT_YES)
25120 || type == TYPE_DIV
25121 || (type == TYPE_INSERT
25122 && get_attr_size (insn) == SIZE_32))
25123 return true;
25124 }
25125
25126 return false;
25127 }
25128
25129 /* The function returns true if INSN can be issued only from
25130 the branch slot. */
25131
25132 static bool
25133 is_branch_slot_insn (rtx_insn *insn)
25134 {
25135 if (!insn || !NONDEBUG_INSN_P (insn)
25136 || GET_CODE (PATTERN (insn)) == USE
25137 || GET_CODE (PATTERN (insn)) == CLOBBER)
25138 return false;
25139
25140 if (rs6000_sched_groups)
25141 {
25142 enum attr_type type = get_attr_type (insn);
25143 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
25144 return true;
25145 return false;
25146 }
25147
25148 return false;
25149 }
25150
25151 /* The function returns true if out_inst sets a value that is
25152 used in the address generation computation of in_insn */
25153 static bool
25154 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
25155 {
25156 rtx out_set, in_set;
25157
25158 /* For performance reasons, only handle the simple case where
25159 both loads are a single_set. */
25160 out_set = single_set (out_insn);
25161 if (out_set)
25162 {
25163 in_set = single_set (in_insn);
25164 if (in_set)
25165 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
25166 }
25167
25168 return false;
25169 }
25170
25171 /* Try to determine base/offset/size parts of the given MEM.
25172 Return true if successful, false if all the values couldn't
25173 be determined.
25174
25175 This function only looks for REG or REG+CONST address forms.
25176 REG+REG address form will return false. */
25177
25178 static bool
25179 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
25180 HOST_WIDE_INT *size)
25181 {
25182 rtx addr_rtx;
25183 if MEM_SIZE_KNOWN_P (mem)
25184 *size = MEM_SIZE (mem);
25185 else
25186 return false;
25187
25188 addr_rtx = (XEXP (mem, 0));
25189 if (GET_CODE (addr_rtx) == PRE_MODIFY)
25190 addr_rtx = XEXP (addr_rtx, 1);
25191
25192 *offset = 0;
25193 while (GET_CODE (addr_rtx) == PLUS
25194 && CONST_INT_P (XEXP (addr_rtx, 1)))
25195 {
25196 *offset += INTVAL (XEXP (addr_rtx, 1));
25197 addr_rtx = XEXP (addr_rtx, 0);
25198 }
25199 if (!REG_P (addr_rtx))
25200 return false;
25201
25202 *base = addr_rtx;
25203 return true;
25204 }
25205
25206 /* The function returns true if the target storage location of
25207 mem1 is adjacent to the target storage location of mem2 */
25208 /* Return 1 if memory locations are adjacent. */
25209
25210 static bool
25211 adjacent_mem_locations (rtx mem1, rtx mem2)
25212 {
25213 rtx reg1, reg2;
25214 HOST_WIDE_INT off1, size1, off2, size2;
25215
25216 if (get_memref_parts (mem1, &reg1, &off1, &size1)
25217 && get_memref_parts (mem2, &reg2, &off2, &size2))
25218 return ((REGNO (reg1) == REGNO (reg2))
25219 && ((off1 + size1 == off2)
25220 || (off2 + size2 == off1)));
25221
25222 return false;
25223 }
25224
25225 /* This function returns true if it can be determined that the two MEM
25226 locations overlap by at least 1 byte based on base reg/offset/size. */
25227
25228 static bool
25229 mem_locations_overlap (rtx mem1, rtx mem2)
25230 {
25231 rtx reg1, reg2;
25232 HOST_WIDE_INT off1, size1, off2, size2;
25233
25234 if (get_memref_parts (mem1, &reg1, &off1, &size1)
25235 && get_memref_parts (mem2, &reg2, &off2, &size2))
25236 return ((REGNO (reg1) == REGNO (reg2))
25237 && (((off1 <= off2) && (off1 + size1 > off2))
25238 || ((off2 <= off1) && (off2 + size2 > off1))));
25239
25240 return false;
25241 }
25242
25243 /* A C statement (sans semicolon) to update the integer scheduling
25244 priority INSN_PRIORITY (INSN). Increase the priority to execute the
25245 INSN earlier, reduce the priority to execute INSN later. Do not
25246 define this macro if you do not need to adjust the scheduling
25247 priorities of insns. */
25248
25249 static int
25250 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
25251 {
25252 rtx load_mem, str_mem;
25253 /* On machines (like the 750) which have asymmetric integer units,
25254 where one integer unit can do multiply and divides and the other
25255 can't, reduce the priority of multiply/divide so it is scheduled
25256 before other integer operations. */
25257
25258 #if 0
25259 if (! INSN_P (insn))
25260 return priority;
25261
25262 if (GET_CODE (PATTERN (insn)) == USE)
25263 return priority;
25264
25265 switch (rs6000_tune) {
25266 case PROCESSOR_PPC750:
25267 switch (get_attr_type (insn))
25268 {
25269 default:
25270 break;
25271
25272 case TYPE_MUL:
25273 case TYPE_DIV:
25274 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
25275 priority, priority);
25276 if (priority >= 0 && priority < 0x01000000)
25277 priority >>= 3;
25278 break;
25279 }
25280 }
25281 #endif
25282
25283 if (insn_must_be_first_in_group (insn)
25284 && reload_completed
25285 && current_sched_info->sched_max_insns_priority
25286 && rs6000_sched_restricted_insns_priority)
25287 {
25288
25289 /* Prioritize insns that can be dispatched only in the first
25290 dispatch slot. */
25291 if (rs6000_sched_restricted_insns_priority == 1)
25292 /* Attach highest priority to insn. This means that in
25293 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
25294 precede 'priority' (critical path) considerations. */
25295 return current_sched_info->sched_max_insns_priority;
25296 else if (rs6000_sched_restricted_insns_priority == 2)
25297 /* Increase priority of insn by a minimal amount. This means that in
25298 haifa-sched.c:ready_sort(), only 'priority' (critical path)
25299 considerations precede dispatch-slot restriction considerations. */
25300 return (priority + 1);
25301 }
25302
25303 if (rs6000_tune == PROCESSOR_POWER6
25304 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
25305 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
25306 /* Attach highest priority to insn if the scheduler has just issued two
25307 stores and this instruction is a load, or two loads and this instruction
25308 is a store. Power6 wants loads and stores scheduled alternately
25309 when possible */
25310 return current_sched_info->sched_max_insns_priority;
25311
25312 return priority;
25313 }
25314
25315 /* Return true if the instruction is nonpipelined on the Cell. */
25316 static bool
25317 is_nonpipeline_insn (rtx_insn *insn)
25318 {
25319 enum attr_type type;
25320 if (!insn || !NONDEBUG_INSN_P (insn)
25321 || GET_CODE (PATTERN (insn)) == USE
25322 || GET_CODE (PATTERN (insn)) == CLOBBER)
25323 return false;
25324
25325 type = get_attr_type (insn);
25326 if (type == TYPE_MUL
25327 || type == TYPE_DIV
25328 || type == TYPE_SDIV
25329 || type == TYPE_DDIV
25330 || type == TYPE_SSQRT
25331 || type == TYPE_DSQRT
25332 || type == TYPE_MFCR
25333 || type == TYPE_MFCRF
25334 || type == TYPE_MFJMPR)
25335 {
25336 return true;
25337 }
25338 return false;
25339 }
25340
25341
25342 /* Return how many instructions the machine can issue per cycle. */
25343
25344 static int
25345 rs6000_issue_rate (void)
25346 {
25347 /* Unless scheduling for register pressure, use issue rate of 1 for
25348 first scheduling pass to decrease degradation. */
25349 if (!reload_completed && !flag_sched_pressure)
25350 return 1;
25351
25352 switch (rs6000_tune) {
25353 case PROCESSOR_RS64A:
25354 case PROCESSOR_PPC601: /* ? */
25355 case PROCESSOR_PPC7450:
25356 return 3;
25357 case PROCESSOR_PPC440:
25358 case PROCESSOR_PPC603:
25359 case PROCESSOR_PPC750:
25360 case PROCESSOR_PPC7400:
25361 case PROCESSOR_PPC8540:
25362 case PROCESSOR_PPC8548:
25363 case PROCESSOR_CELL:
25364 case PROCESSOR_PPCE300C2:
25365 case PROCESSOR_PPCE300C3:
25366 case PROCESSOR_PPCE500MC:
25367 case PROCESSOR_PPCE500MC64:
25368 case PROCESSOR_PPCE5500:
25369 case PROCESSOR_PPCE6500:
25370 case PROCESSOR_TITAN:
25371 return 2;
25372 case PROCESSOR_PPC476:
25373 case PROCESSOR_PPC604:
25374 case PROCESSOR_PPC604e:
25375 case PROCESSOR_PPC620:
25376 case PROCESSOR_PPC630:
25377 return 4;
25378 case PROCESSOR_POWER4:
25379 case PROCESSOR_POWER5:
25380 case PROCESSOR_POWER6:
25381 case PROCESSOR_POWER7:
25382 return 5;
25383 case PROCESSOR_POWER8:
25384 return 7;
25385 case PROCESSOR_POWER9:
25386 case PROCESSOR_FUTURE:
25387 return 6;
25388 default:
25389 return 1;
25390 }
25391 }
25392
25393 /* Return how many instructions to look ahead for better insn
25394 scheduling. */
25395
25396 static int
25397 rs6000_use_sched_lookahead (void)
25398 {
25399 switch (rs6000_tune)
25400 {
25401 case PROCESSOR_PPC8540:
25402 case PROCESSOR_PPC8548:
25403 return 4;
25404
25405 case PROCESSOR_CELL:
25406 return (reload_completed ? 8 : 0);
25407
25408 default:
25409 return 0;
25410 }
25411 }
25412
25413 /* We are choosing insn from the ready queue. Return zero if INSN can be
25414 chosen. */
25415 static int
25416 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
25417 {
25418 if (ready_index == 0)
25419 return 0;
25420
25421 if (rs6000_tune != PROCESSOR_CELL)
25422 return 0;
25423
25424 gcc_assert (insn != NULL_RTX && INSN_P (insn));
25425
25426 if (!reload_completed
25427 || is_nonpipeline_insn (insn)
25428 || is_microcoded_insn (insn))
25429 return 1;
25430
25431 return 0;
25432 }
25433
25434 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
25435 and return true. */
25436
25437 static bool
25438 find_mem_ref (rtx pat, rtx *mem_ref)
25439 {
25440 const char * fmt;
25441 int i, j;
25442
25443 /* stack_tie does not produce any real memory traffic. */
25444 if (tie_operand (pat, VOIDmode))
25445 return false;
25446
25447 if (MEM_P (pat))
25448 {
25449 *mem_ref = pat;
25450 return true;
25451 }
25452
25453 /* Recursively process the pattern. */
25454 fmt = GET_RTX_FORMAT (GET_CODE (pat));
25455
25456 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
25457 {
25458 if (fmt[i] == 'e')
25459 {
25460 if (find_mem_ref (XEXP (pat, i), mem_ref))
25461 return true;
25462 }
25463 else if (fmt[i] == 'E')
25464 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
25465 {
25466 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
25467 return true;
25468 }
25469 }
25470
25471 return false;
25472 }
25473
25474 /* Determine if PAT is a PATTERN of a load insn. */
25475
25476 static bool
25477 is_load_insn1 (rtx pat, rtx *load_mem)
25478 {
25479 if (!pat || pat == NULL_RTX)
25480 return false;
25481
25482 if (GET_CODE (pat) == SET)
25483 return find_mem_ref (SET_SRC (pat), load_mem);
25484
25485 if (GET_CODE (pat) == PARALLEL)
25486 {
25487 int i;
25488
25489 for (i = 0; i < XVECLEN (pat, 0); i++)
25490 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
25491 return true;
25492 }
25493
25494 return false;
25495 }
25496
25497 /* Determine if INSN loads from memory. */
25498
25499 static bool
25500 is_load_insn (rtx insn, rtx *load_mem)
25501 {
25502 if (!insn || !INSN_P (insn))
25503 return false;
25504
25505 if (CALL_P (insn))
25506 return false;
25507
25508 return is_load_insn1 (PATTERN (insn), load_mem);
25509 }
25510
25511 /* Determine if PAT is a PATTERN of a store insn. */
25512
25513 static bool
25514 is_store_insn1 (rtx pat, rtx *str_mem)
25515 {
25516 if (!pat || pat == NULL_RTX)
25517 return false;
25518
25519 if (GET_CODE (pat) == SET)
25520 return find_mem_ref (SET_DEST (pat), str_mem);
25521
25522 if (GET_CODE (pat) == PARALLEL)
25523 {
25524 int i;
25525
25526 for (i = 0; i < XVECLEN (pat, 0); i++)
25527 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
25528 return true;
25529 }
25530
25531 return false;
25532 }
25533
25534 /* Determine if INSN stores to memory. */
25535
25536 static bool
25537 is_store_insn (rtx insn, rtx *str_mem)
25538 {
25539 if (!insn || !INSN_P (insn))
25540 return false;
25541
25542 return is_store_insn1 (PATTERN (insn), str_mem);
25543 }
25544
25545 /* Return whether TYPE is a Power9 pairable vector instruction type. */
25546
25547 static bool
25548 is_power9_pairable_vec_type (enum attr_type type)
25549 {
25550 switch (type)
25551 {
25552 case TYPE_VECSIMPLE:
25553 case TYPE_VECCOMPLEX:
25554 case TYPE_VECDIV:
25555 case TYPE_VECCMP:
25556 case TYPE_VECPERM:
25557 case TYPE_VECFLOAT:
25558 case TYPE_VECFDIV:
25559 case TYPE_VECDOUBLE:
25560 return true;
25561 default:
25562 break;
25563 }
25564 return false;
25565 }
25566
25567 /* Returns whether the dependence between INSN and NEXT is considered
25568 costly by the given target. */
25569
25570 static bool
25571 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
25572 {
25573 rtx insn;
25574 rtx next;
25575 rtx load_mem, str_mem;
25576
25577 /* If the flag is not enabled - no dependence is considered costly;
25578 allow all dependent insns in the same group.
25579 This is the most aggressive option. */
25580 if (rs6000_sched_costly_dep == no_dep_costly)
25581 return false;
25582
25583 /* If the flag is set to 1 - a dependence is always considered costly;
25584 do not allow dependent instructions in the same group.
25585 This is the most conservative option. */
25586 if (rs6000_sched_costly_dep == all_deps_costly)
25587 return true;
25588
25589 insn = DEP_PRO (dep);
25590 next = DEP_CON (dep);
25591
25592 if (rs6000_sched_costly_dep == store_to_load_dep_costly
25593 && is_load_insn (next, &load_mem)
25594 && is_store_insn (insn, &str_mem))
25595 /* Prevent load after store in the same group. */
25596 return true;
25597
25598 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
25599 && is_load_insn (next, &load_mem)
25600 && is_store_insn (insn, &str_mem)
25601 && DEP_TYPE (dep) == REG_DEP_TRUE
25602 && mem_locations_overlap(str_mem, load_mem))
25603 /* Prevent load after store in the same group if it is a true
25604 dependence. */
25605 return true;
25606
25607 /* The flag is set to X; dependences with latency >= X are considered costly,
25608 and will not be scheduled in the same group. */
25609 if (rs6000_sched_costly_dep <= max_dep_latency
25610 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
25611 return true;
25612
25613 return false;
25614 }
25615
25616 /* Return the next insn after INSN that is found before TAIL is reached,
25617 skipping any "non-active" insns - insns that will not actually occupy
25618 an issue slot. Return NULL_RTX if such an insn is not found. */
25619
25620 static rtx_insn *
25621 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
25622 {
25623 if (insn == NULL_RTX || insn == tail)
25624 return NULL;
25625
25626 while (1)
25627 {
25628 insn = NEXT_INSN (insn);
25629 if (insn == NULL_RTX || insn == tail)
25630 return NULL;
25631
25632 if (CALL_P (insn)
25633 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
25634 || (NONJUMP_INSN_P (insn)
25635 && GET_CODE (PATTERN (insn)) != USE
25636 && GET_CODE (PATTERN (insn)) != CLOBBER
25637 && INSN_CODE (insn) != CODE_FOR_stack_tie))
25638 break;
25639 }
25640 return insn;
25641 }
25642
25643 /* Do Power9 specific sched_reorder2 reordering of ready list. */
25644
25645 static int
25646 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
25647 {
25648 int pos;
25649 int i;
25650 rtx_insn *tmp;
25651 enum attr_type type, type2;
25652
25653 type = get_attr_type (last_scheduled_insn);
25654
25655 /* Try to issue fixed point divides back-to-back in pairs so they will be
25656 routed to separate execution units and execute in parallel. */
25657 if (type == TYPE_DIV && divide_cnt == 0)
25658 {
25659 /* First divide has been scheduled. */
25660 divide_cnt = 1;
25661
25662 /* Scan the ready list looking for another divide, if found move it
25663 to the end of the list so it is chosen next. */
25664 pos = lastpos;
25665 while (pos >= 0)
25666 {
25667 if (recog_memoized (ready[pos]) >= 0
25668 && get_attr_type (ready[pos]) == TYPE_DIV)
25669 {
25670 tmp = ready[pos];
25671 for (i = pos; i < lastpos; i++)
25672 ready[i] = ready[i + 1];
25673 ready[lastpos] = tmp;
25674 break;
25675 }
25676 pos--;
25677 }
25678 }
25679 else
25680 {
25681 /* Last insn was the 2nd divide or not a divide, reset the counter. */
25682 divide_cnt = 0;
25683
25684 /* The best dispatch throughput for vector and vector load insns can be
25685 achieved by interleaving a vector and vector load such that they'll
25686 dispatch to the same superslice. If this pairing cannot be achieved
25687 then it is best to pair vector insns together and vector load insns
25688 together.
25689
25690 To aid in this pairing, vec_pairing maintains the current state with
25691 the following values:
25692
25693 0 : Initial state, no vecload/vector pairing has been started.
25694
25695 1 : A vecload or vector insn has been issued and a candidate for
25696 pairing has been found and moved to the end of the ready
25697 list. */
25698 if (type == TYPE_VECLOAD)
25699 {
25700 /* Issued a vecload. */
25701 if (vec_pairing == 0)
25702 {
25703 int vecload_pos = -1;
25704 /* We issued a single vecload, look for a vector insn to pair it
25705 with. If one isn't found, try to pair another vecload. */
25706 pos = lastpos;
25707 while (pos >= 0)
25708 {
25709 if (recog_memoized (ready[pos]) >= 0)
25710 {
25711 type2 = get_attr_type (ready[pos]);
25712 if (is_power9_pairable_vec_type (type2))
25713 {
25714 /* Found a vector insn to pair with, move it to the
25715 end of the ready list so it is scheduled next. */
25716 tmp = ready[pos];
25717 for (i = pos; i < lastpos; i++)
25718 ready[i] = ready[i + 1];
25719 ready[lastpos] = tmp;
25720 vec_pairing = 1;
25721 return cached_can_issue_more;
25722 }
25723 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
25724 /* Remember position of first vecload seen. */
25725 vecload_pos = pos;
25726 }
25727 pos--;
25728 }
25729 if (vecload_pos >= 0)
25730 {
25731 /* Didn't find a vector to pair with but did find a vecload,
25732 move it to the end of the ready list. */
25733 tmp = ready[vecload_pos];
25734 for (i = vecload_pos; i < lastpos; i++)
25735 ready[i] = ready[i + 1];
25736 ready[lastpos] = tmp;
25737 vec_pairing = 1;
25738 return cached_can_issue_more;
25739 }
25740 }
25741 }
25742 else if (is_power9_pairable_vec_type (type))
25743 {
25744 /* Issued a vector operation. */
25745 if (vec_pairing == 0)
25746 {
25747 int vec_pos = -1;
25748 /* We issued a single vector insn, look for a vecload to pair it
25749 with. If one isn't found, try to pair another vector. */
25750 pos = lastpos;
25751 while (pos >= 0)
25752 {
25753 if (recog_memoized (ready[pos]) >= 0)
25754 {
25755 type2 = get_attr_type (ready[pos]);
25756 if (type2 == TYPE_VECLOAD)
25757 {
25758 /* Found a vecload insn to pair with, move it to the
25759 end of the ready list so it is scheduled next. */
25760 tmp = ready[pos];
25761 for (i = pos; i < lastpos; i++)
25762 ready[i] = ready[i + 1];
25763 ready[lastpos] = tmp;
25764 vec_pairing = 1;
25765 return cached_can_issue_more;
25766 }
25767 else if (is_power9_pairable_vec_type (type2)
25768 && vec_pos == -1)
25769 /* Remember position of first vector insn seen. */
25770 vec_pos = pos;
25771 }
25772 pos--;
25773 }
25774 if (vec_pos >= 0)
25775 {
25776 /* Didn't find a vecload to pair with but did find a vector
25777 insn, move it to the end of the ready list. */
25778 tmp = ready[vec_pos];
25779 for (i = vec_pos; i < lastpos; i++)
25780 ready[i] = ready[i + 1];
25781 ready[lastpos] = tmp;
25782 vec_pairing = 1;
25783 return cached_can_issue_more;
25784 }
25785 }
25786 }
25787
25788 /* We've either finished a vec/vecload pair, couldn't find an insn to
25789 continue the current pair, or the last insn had nothing to do with
25790 with pairing. In any case, reset the state. */
25791 vec_pairing = 0;
25792 }
25793
25794 return cached_can_issue_more;
25795 }
25796
25797 /* We are about to begin issuing insns for this clock cycle. */
25798
25799 static int
25800 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
25801 rtx_insn **ready ATTRIBUTE_UNUSED,
25802 int *pn_ready ATTRIBUTE_UNUSED,
25803 int clock_var ATTRIBUTE_UNUSED)
25804 {
25805 int n_ready = *pn_ready;
25806
25807 if (sched_verbose)
25808 fprintf (dump, "// rs6000_sched_reorder :\n");
25809
25810 /* Reorder the ready list, if the second to last ready insn
25811 is a nonepipeline insn. */
25812 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
25813 {
25814 if (is_nonpipeline_insn (ready[n_ready - 1])
25815 && (recog_memoized (ready[n_ready - 2]) > 0))
25816 /* Simply swap first two insns. */
25817 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
25818 }
25819
25820 if (rs6000_tune == PROCESSOR_POWER6)
25821 load_store_pendulum = 0;
25822
25823 return rs6000_issue_rate ();
25824 }
25825
25826 /* Like rs6000_sched_reorder, but called after issuing each insn. */
25827
25828 static int
25829 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
25830 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
25831 {
25832 if (sched_verbose)
25833 fprintf (dump, "// rs6000_sched_reorder2 :\n");
25834
25835 /* For Power6, we need to handle some special cases to try and keep the
25836 store queue from overflowing and triggering expensive flushes.
25837
25838 This code monitors how load and store instructions are being issued
25839 and skews the ready list one way or the other to increase the likelihood
25840 that a desired instruction is issued at the proper time.
25841
25842 A couple of things are done. First, we maintain a "load_store_pendulum"
25843 to track the current state of load/store issue.
25844
25845 - If the pendulum is at zero, then no loads or stores have been
25846 issued in the current cycle so we do nothing.
25847
25848 - If the pendulum is 1, then a single load has been issued in this
25849 cycle and we attempt to locate another load in the ready list to
25850 issue with it.
25851
25852 - If the pendulum is -2, then two stores have already been
25853 issued in this cycle, so we increase the priority of the first load
25854 in the ready list to increase it's likelihood of being chosen first
25855 in the next cycle.
25856
25857 - If the pendulum is -1, then a single store has been issued in this
25858 cycle and we attempt to locate another store in the ready list to
25859 issue with it, preferring a store to an adjacent memory location to
25860 facilitate store pairing in the store queue.
25861
25862 - If the pendulum is 2, then two loads have already been
25863 issued in this cycle, so we increase the priority of the first store
25864 in the ready list to increase it's likelihood of being chosen first
25865 in the next cycle.
25866
25867 - If the pendulum < -2 or > 2, then do nothing.
25868
25869 Note: This code covers the most common scenarios. There exist non
25870 load/store instructions which make use of the LSU and which
25871 would need to be accounted for to strictly model the behavior
25872 of the machine. Those instructions are currently unaccounted
25873 for to help minimize compile time overhead of this code.
25874 */
25875 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
25876 {
25877 int pos;
25878 int i;
25879 rtx_insn *tmp;
25880 rtx load_mem, str_mem;
25881
25882 if (is_store_insn (last_scheduled_insn, &str_mem))
25883 /* Issuing a store, swing the load_store_pendulum to the left */
25884 load_store_pendulum--;
25885 else if (is_load_insn (last_scheduled_insn, &load_mem))
25886 /* Issuing a load, swing the load_store_pendulum to the right */
25887 load_store_pendulum++;
25888 else
25889 return cached_can_issue_more;
25890
25891 /* If the pendulum is balanced, or there is only one instruction on
25892 the ready list, then all is well, so return. */
25893 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
25894 return cached_can_issue_more;
25895
25896 if (load_store_pendulum == 1)
25897 {
25898 /* A load has been issued in this cycle. Scan the ready list
25899 for another load to issue with it */
25900 pos = *pn_ready-1;
25901
25902 while (pos >= 0)
25903 {
25904 if (is_load_insn (ready[pos], &load_mem))
25905 {
25906 /* Found a load. Move it to the head of the ready list,
25907 and adjust it's priority so that it is more likely to
25908 stay there */
25909 tmp = ready[pos];
25910 for (i=pos; i<*pn_ready-1; i++)
25911 ready[i] = ready[i + 1];
25912 ready[*pn_ready-1] = tmp;
25913
25914 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25915 INSN_PRIORITY (tmp)++;
25916 break;
25917 }
25918 pos--;
25919 }
25920 }
25921 else if (load_store_pendulum == -2)
25922 {
25923 /* Two stores have been issued in this cycle. Increase the
25924 priority of the first load in the ready list to favor it for
25925 issuing in the next cycle. */
25926 pos = *pn_ready-1;
25927
25928 while (pos >= 0)
25929 {
25930 if (is_load_insn (ready[pos], &load_mem)
25931 && !sel_sched_p ()
25932 && INSN_PRIORITY_KNOWN (ready[pos]))
25933 {
25934 INSN_PRIORITY (ready[pos])++;
25935
25936 /* Adjust the pendulum to account for the fact that a load
25937 was found and increased in priority. This is to prevent
25938 increasing the priority of multiple loads */
25939 load_store_pendulum--;
25940
25941 break;
25942 }
25943 pos--;
25944 }
25945 }
25946 else if (load_store_pendulum == -1)
25947 {
25948 /* A store has been issued in this cycle. Scan the ready list for
25949 another store to issue with it, preferring a store to an adjacent
25950 memory location */
25951 int first_store_pos = -1;
25952
25953 pos = *pn_ready-1;
25954
25955 while (pos >= 0)
25956 {
25957 if (is_store_insn (ready[pos], &str_mem))
25958 {
25959 rtx str_mem2;
25960 /* Maintain the index of the first store found on the
25961 list */
25962 if (first_store_pos == -1)
25963 first_store_pos = pos;
25964
25965 if (is_store_insn (last_scheduled_insn, &str_mem2)
25966 && adjacent_mem_locations (str_mem, str_mem2))
25967 {
25968 /* Found an adjacent store. Move it to the head of the
25969 ready list, and adjust it's priority so that it is
25970 more likely to stay there */
25971 tmp = ready[pos];
25972 for (i=pos; i<*pn_ready-1; i++)
25973 ready[i] = ready[i + 1];
25974 ready[*pn_ready-1] = tmp;
25975
25976 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25977 INSN_PRIORITY (tmp)++;
25978
25979 first_store_pos = -1;
25980
25981 break;
25982 };
25983 }
25984 pos--;
25985 }
25986
25987 if (first_store_pos >= 0)
25988 {
25989 /* An adjacent store wasn't found, but a non-adjacent store was,
25990 so move the non-adjacent store to the front of the ready
25991 list, and adjust its priority so that it is more likely to
25992 stay there. */
25993 tmp = ready[first_store_pos];
25994 for (i=first_store_pos; i<*pn_ready-1; i++)
25995 ready[i] = ready[i + 1];
25996 ready[*pn_ready-1] = tmp;
25997 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
25998 INSN_PRIORITY (tmp)++;
25999 }
26000 }
26001 else if (load_store_pendulum == 2)
26002 {
26003 /* Two loads have been issued in this cycle. Increase the priority
26004 of the first store in the ready list to favor it for issuing in
26005 the next cycle. */
26006 pos = *pn_ready-1;
26007
26008 while (pos >= 0)
26009 {
26010 if (is_store_insn (ready[pos], &str_mem)
26011 && !sel_sched_p ()
26012 && INSN_PRIORITY_KNOWN (ready[pos]))
26013 {
26014 INSN_PRIORITY (ready[pos])++;
26015
26016 /* Adjust the pendulum to account for the fact that a store
26017 was found and increased in priority. This is to prevent
26018 increasing the priority of multiple stores */
26019 load_store_pendulum++;
26020
26021 break;
26022 }
26023 pos--;
26024 }
26025 }
26026 }
26027
26028 /* Do Power9 dependent reordering if necessary. */
26029 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
26030 && recog_memoized (last_scheduled_insn) >= 0)
26031 return power9_sched_reorder2 (ready, *pn_ready - 1);
26032
26033 return cached_can_issue_more;
26034 }
26035
26036 /* Return whether the presence of INSN causes a dispatch group termination
26037 of group WHICH_GROUP.
26038
26039 If WHICH_GROUP == current_group, this function will return true if INSN
26040 causes the termination of the current group (i.e, the dispatch group to
26041 which INSN belongs). This means that INSN will be the last insn in the
26042 group it belongs to.
26043
26044 If WHICH_GROUP == previous_group, this function will return true if INSN
26045 causes the termination of the previous group (i.e, the dispatch group that
26046 precedes the group to which INSN belongs). This means that INSN will be
26047 the first insn in the group it belongs to). */
26048
26049 static bool
26050 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
26051 {
26052 bool first, last;
26053
26054 if (! insn)
26055 return false;
26056
26057 first = insn_must_be_first_in_group (insn);
26058 last = insn_must_be_last_in_group (insn);
26059
26060 if (first && last)
26061 return true;
26062
26063 if (which_group == current_group)
26064 return last;
26065 else if (which_group == previous_group)
26066 return first;
26067
26068 return false;
26069 }
26070
26071
26072 static bool
26073 insn_must_be_first_in_group (rtx_insn *insn)
26074 {
26075 enum attr_type type;
26076
26077 if (!insn
26078 || NOTE_P (insn)
26079 || DEBUG_INSN_P (insn)
26080 || GET_CODE (PATTERN (insn)) == USE
26081 || GET_CODE (PATTERN (insn)) == CLOBBER)
26082 return false;
26083
26084 switch (rs6000_tune)
26085 {
26086 case PROCESSOR_POWER5:
26087 if (is_cracked_insn (insn))
26088 return true;
26089 /* FALLTHRU */
26090 case PROCESSOR_POWER4:
26091 if (is_microcoded_insn (insn))
26092 return true;
26093
26094 if (!rs6000_sched_groups)
26095 return false;
26096
26097 type = get_attr_type (insn);
26098
26099 switch (type)
26100 {
26101 case TYPE_MFCR:
26102 case TYPE_MFCRF:
26103 case TYPE_MTCR:
26104 case TYPE_CR_LOGICAL:
26105 case TYPE_MTJMPR:
26106 case TYPE_MFJMPR:
26107 case TYPE_DIV:
26108 case TYPE_LOAD_L:
26109 case TYPE_STORE_C:
26110 case TYPE_ISYNC:
26111 case TYPE_SYNC:
26112 return true;
26113 default:
26114 break;
26115 }
26116 break;
26117 case PROCESSOR_POWER6:
26118 type = get_attr_type (insn);
26119
26120 switch (type)
26121 {
26122 case TYPE_EXTS:
26123 case TYPE_CNTLZ:
26124 case TYPE_TRAP:
26125 case TYPE_MUL:
26126 case TYPE_INSERT:
26127 case TYPE_FPCOMPARE:
26128 case TYPE_MFCR:
26129 case TYPE_MTCR:
26130 case TYPE_MFJMPR:
26131 case TYPE_MTJMPR:
26132 case TYPE_ISYNC:
26133 case TYPE_SYNC:
26134 case TYPE_LOAD_L:
26135 case TYPE_STORE_C:
26136 return true;
26137 case TYPE_SHIFT:
26138 if (get_attr_dot (insn) == DOT_NO
26139 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
26140 return true;
26141 else
26142 break;
26143 case TYPE_DIV:
26144 if (get_attr_size (insn) == SIZE_32)
26145 return true;
26146 else
26147 break;
26148 case TYPE_LOAD:
26149 case TYPE_STORE:
26150 case TYPE_FPLOAD:
26151 case TYPE_FPSTORE:
26152 if (get_attr_update (insn) == UPDATE_YES)
26153 return true;
26154 else
26155 break;
26156 default:
26157 break;
26158 }
26159 break;
26160 case PROCESSOR_POWER7:
26161 type = get_attr_type (insn);
26162
26163 switch (type)
26164 {
26165 case TYPE_CR_LOGICAL:
26166 case TYPE_MFCR:
26167 case TYPE_MFCRF:
26168 case TYPE_MTCR:
26169 case TYPE_DIV:
26170 case TYPE_ISYNC:
26171 case TYPE_LOAD_L:
26172 case TYPE_STORE_C:
26173 case TYPE_MFJMPR:
26174 case TYPE_MTJMPR:
26175 return true;
26176 case TYPE_MUL:
26177 case TYPE_SHIFT:
26178 case TYPE_EXTS:
26179 if (get_attr_dot (insn) == DOT_YES)
26180 return true;
26181 else
26182 break;
26183 case TYPE_LOAD:
26184 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26185 || get_attr_update (insn) == UPDATE_YES)
26186 return true;
26187 else
26188 break;
26189 case TYPE_STORE:
26190 case TYPE_FPLOAD:
26191 case TYPE_FPSTORE:
26192 if (get_attr_update (insn) == UPDATE_YES)
26193 return true;
26194 else
26195 break;
26196 default:
26197 break;
26198 }
26199 break;
26200 case PROCESSOR_POWER8:
26201 type = get_attr_type (insn);
26202
26203 switch (type)
26204 {
26205 case TYPE_CR_LOGICAL:
26206 case TYPE_MFCR:
26207 case TYPE_MFCRF:
26208 case TYPE_MTCR:
26209 case TYPE_SYNC:
26210 case TYPE_ISYNC:
26211 case TYPE_LOAD_L:
26212 case TYPE_STORE_C:
26213 case TYPE_VECSTORE:
26214 case TYPE_MFJMPR:
26215 case TYPE_MTJMPR:
26216 return true;
26217 case TYPE_SHIFT:
26218 case TYPE_EXTS:
26219 case TYPE_MUL:
26220 if (get_attr_dot (insn) == DOT_YES)
26221 return true;
26222 else
26223 break;
26224 case TYPE_LOAD:
26225 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26226 || get_attr_update (insn) == UPDATE_YES)
26227 return true;
26228 else
26229 break;
26230 case TYPE_STORE:
26231 if (get_attr_update (insn) == UPDATE_YES
26232 && get_attr_indexed (insn) == INDEXED_YES)
26233 return true;
26234 else
26235 break;
26236 default:
26237 break;
26238 }
26239 break;
26240 default:
26241 break;
26242 }
26243
26244 return false;
26245 }
26246
26247 static bool
26248 insn_must_be_last_in_group (rtx_insn *insn)
26249 {
26250 enum attr_type type;
26251
26252 if (!insn
26253 || NOTE_P (insn)
26254 || DEBUG_INSN_P (insn)
26255 || GET_CODE (PATTERN (insn)) == USE
26256 || GET_CODE (PATTERN (insn)) == CLOBBER)
26257 return false;
26258
26259 switch (rs6000_tune) {
26260 case PROCESSOR_POWER4:
26261 case PROCESSOR_POWER5:
26262 if (is_microcoded_insn (insn))
26263 return true;
26264
26265 if (is_branch_slot_insn (insn))
26266 return true;
26267
26268 break;
26269 case PROCESSOR_POWER6:
26270 type = get_attr_type (insn);
26271
26272 switch (type)
26273 {
26274 case TYPE_EXTS:
26275 case TYPE_CNTLZ:
26276 case TYPE_TRAP:
26277 case TYPE_MUL:
26278 case TYPE_FPCOMPARE:
26279 case TYPE_MFCR:
26280 case TYPE_MTCR:
26281 case TYPE_MFJMPR:
26282 case TYPE_MTJMPR:
26283 case TYPE_ISYNC:
26284 case TYPE_SYNC:
26285 case TYPE_LOAD_L:
26286 case TYPE_STORE_C:
26287 return true;
26288 case TYPE_SHIFT:
26289 if (get_attr_dot (insn) == DOT_NO
26290 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
26291 return true;
26292 else
26293 break;
26294 case TYPE_DIV:
26295 if (get_attr_size (insn) == SIZE_32)
26296 return true;
26297 else
26298 break;
26299 default:
26300 break;
26301 }
26302 break;
26303 case PROCESSOR_POWER7:
26304 type = get_attr_type (insn);
26305
26306 switch (type)
26307 {
26308 case TYPE_ISYNC:
26309 case TYPE_SYNC:
26310 case TYPE_LOAD_L:
26311 case TYPE_STORE_C:
26312 return true;
26313 case TYPE_LOAD:
26314 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26315 && get_attr_update (insn) == UPDATE_YES)
26316 return true;
26317 else
26318 break;
26319 case TYPE_STORE:
26320 if (get_attr_update (insn) == UPDATE_YES
26321 && get_attr_indexed (insn) == INDEXED_YES)
26322 return true;
26323 else
26324 break;
26325 default:
26326 break;
26327 }
26328 break;
26329 case PROCESSOR_POWER8:
26330 type = get_attr_type (insn);
26331
26332 switch (type)
26333 {
26334 case TYPE_MFCR:
26335 case TYPE_MTCR:
26336 case TYPE_ISYNC:
26337 case TYPE_SYNC:
26338 case TYPE_LOAD_L:
26339 case TYPE_STORE_C:
26340 return true;
26341 case TYPE_LOAD:
26342 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26343 && get_attr_update (insn) == UPDATE_YES)
26344 return true;
26345 else
26346 break;
26347 case TYPE_STORE:
26348 if (get_attr_update (insn) == UPDATE_YES
26349 && get_attr_indexed (insn) == INDEXED_YES)
26350 return true;
26351 else
26352 break;
26353 default:
26354 break;
26355 }
26356 break;
26357 default:
26358 break;
26359 }
26360
26361 return false;
26362 }
26363
26364 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
26365 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
26366
26367 static bool
26368 is_costly_group (rtx *group_insns, rtx next_insn)
26369 {
26370 int i;
26371 int issue_rate = rs6000_issue_rate ();
26372
26373 for (i = 0; i < issue_rate; i++)
26374 {
26375 sd_iterator_def sd_it;
26376 dep_t dep;
26377 rtx insn = group_insns[i];
26378
26379 if (!insn)
26380 continue;
26381
26382 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
26383 {
26384 rtx next = DEP_CON (dep);
26385
26386 if (next == next_insn
26387 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
26388 return true;
26389 }
26390 }
26391
26392 return false;
26393 }
26394
26395 /* Utility of the function redefine_groups.
26396 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
26397 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
26398 to keep it "far" (in a separate group) from GROUP_INSNS, following
26399 one of the following schemes, depending on the value of the flag
26400 -minsert_sched_nops = X:
26401 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
26402 in order to force NEXT_INSN into a separate group.
26403 (2) X < sched_finish_regroup_exact: insert exactly X nops.
26404 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
26405 insertion (has a group just ended, how many vacant issue slots remain in the
26406 last group, and how many dispatch groups were encountered so far). */
26407
26408 static int
26409 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
26410 rtx_insn *next_insn, bool *group_end, int can_issue_more,
26411 int *group_count)
26412 {
26413 rtx nop;
26414 bool force;
26415 int issue_rate = rs6000_issue_rate ();
26416 bool end = *group_end;
26417 int i;
26418
26419 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
26420 return can_issue_more;
26421
26422 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
26423 return can_issue_more;
26424
26425 force = is_costly_group (group_insns, next_insn);
26426 if (!force)
26427 return can_issue_more;
26428
26429 if (sched_verbose > 6)
26430 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
26431 *group_count ,can_issue_more);
26432
26433 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
26434 {
26435 if (*group_end)
26436 can_issue_more = 0;
26437
26438 /* Since only a branch can be issued in the last issue_slot, it is
26439 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
26440 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
26441 in this case the last nop will start a new group and the branch
26442 will be forced to the new group. */
26443 if (can_issue_more && !is_branch_slot_insn (next_insn))
26444 can_issue_more--;
26445
26446 /* Do we have a special group ending nop? */
26447 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
26448 || rs6000_tune == PROCESSOR_POWER8)
26449 {
26450 nop = gen_group_ending_nop ();
26451 emit_insn_before (nop, next_insn);
26452 can_issue_more = 0;
26453 }
26454 else
26455 while (can_issue_more > 0)
26456 {
26457 nop = gen_nop ();
26458 emit_insn_before (nop, next_insn);
26459 can_issue_more--;
26460 }
26461
26462 *group_end = true;
26463 return 0;
26464 }
26465
26466 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
26467 {
26468 int n_nops = rs6000_sched_insert_nops;
26469
26470 /* Nops can't be issued from the branch slot, so the effective
26471 issue_rate for nops is 'issue_rate - 1'. */
26472 if (can_issue_more == 0)
26473 can_issue_more = issue_rate;
26474 can_issue_more--;
26475 if (can_issue_more == 0)
26476 {
26477 can_issue_more = issue_rate - 1;
26478 (*group_count)++;
26479 end = true;
26480 for (i = 0; i < issue_rate; i++)
26481 {
26482 group_insns[i] = 0;
26483 }
26484 }
26485
26486 while (n_nops > 0)
26487 {
26488 nop = gen_nop ();
26489 emit_insn_before (nop, next_insn);
26490 if (can_issue_more == issue_rate - 1) /* new group begins */
26491 end = false;
26492 can_issue_more--;
26493 if (can_issue_more == 0)
26494 {
26495 can_issue_more = issue_rate - 1;
26496 (*group_count)++;
26497 end = true;
26498 for (i = 0; i < issue_rate; i++)
26499 {
26500 group_insns[i] = 0;
26501 }
26502 }
26503 n_nops--;
26504 }
26505
26506 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
26507 can_issue_more++;
26508
26509 /* Is next_insn going to start a new group? */
26510 *group_end
26511 = (end
26512 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26513 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26514 || (can_issue_more < issue_rate &&
26515 insn_terminates_group_p (next_insn, previous_group)));
26516 if (*group_end && end)
26517 (*group_count)--;
26518
26519 if (sched_verbose > 6)
26520 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
26521 *group_count, can_issue_more);
26522 return can_issue_more;
26523 }
26524
26525 return can_issue_more;
26526 }
26527
26528 /* This function tries to synch the dispatch groups that the compiler "sees"
26529 with the dispatch groups that the processor dispatcher is expected to
26530 form in practice. It tries to achieve this synchronization by forcing the
26531 estimated processor grouping on the compiler (as opposed to the function
26532 'pad_goups' which tries to force the scheduler's grouping on the processor).
26533
26534 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
26535 examines the (estimated) dispatch groups that will be formed by the processor
26536 dispatcher. It marks these group boundaries to reflect the estimated
26537 processor grouping, overriding the grouping that the scheduler had marked.
26538 Depending on the value of the flag '-minsert-sched-nops' this function can
26539 force certain insns into separate groups or force a certain distance between
26540 them by inserting nops, for example, if there exists a "costly dependence"
26541 between the insns.
26542
26543 The function estimates the group boundaries that the processor will form as
26544 follows: It keeps track of how many vacant issue slots are available after
26545 each insn. A subsequent insn will start a new group if one of the following
26546 4 cases applies:
26547 - no more vacant issue slots remain in the current dispatch group.
26548 - only the last issue slot, which is the branch slot, is vacant, but the next
26549 insn is not a branch.
26550 - only the last 2 or less issue slots, including the branch slot, are vacant,
26551 which means that a cracked insn (which occupies two issue slots) can't be
26552 issued in this group.
26553 - less than 'issue_rate' slots are vacant, and the next insn always needs to
26554 start a new group. */
26555
26556 static int
26557 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
26558 rtx_insn *tail)
26559 {
26560 rtx_insn *insn, *next_insn;
26561 int issue_rate;
26562 int can_issue_more;
26563 int slot, i;
26564 bool group_end;
26565 int group_count = 0;
26566 rtx *group_insns;
26567
26568 /* Initialize. */
26569 issue_rate = rs6000_issue_rate ();
26570 group_insns = XALLOCAVEC (rtx, issue_rate);
26571 for (i = 0; i < issue_rate; i++)
26572 {
26573 group_insns[i] = 0;
26574 }
26575 can_issue_more = issue_rate;
26576 slot = 0;
26577 insn = get_next_active_insn (prev_head_insn, tail);
26578 group_end = false;
26579
26580 while (insn != NULL_RTX)
26581 {
26582 slot = (issue_rate - can_issue_more);
26583 group_insns[slot] = insn;
26584 can_issue_more =
26585 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26586 if (insn_terminates_group_p (insn, current_group))
26587 can_issue_more = 0;
26588
26589 next_insn = get_next_active_insn (insn, tail);
26590 if (next_insn == NULL_RTX)
26591 return group_count + 1;
26592
26593 /* Is next_insn going to start a new group? */
26594 group_end
26595 = (can_issue_more == 0
26596 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
26597 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
26598 || (can_issue_more < issue_rate &&
26599 insn_terminates_group_p (next_insn, previous_group)));
26600
26601 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
26602 next_insn, &group_end, can_issue_more,
26603 &group_count);
26604
26605 if (group_end)
26606 {
26607 group_count++;
26608 can_issue_more = 0;
26609 for (i = 0; i < issue_rate; i++)
26610 {
26611 group_insns[i] = 0;
26612 }
26613 }
26614
26615 if (GET_MODE (next_insn) == TImode && can_issue_more)
26616 PUT_MODE (next_insn, VOIDmode);
26617 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
26618 PUT_MODE (next_insn, TImode);
26619
26620 insn = next_insn;
26621 if (can_issue_more == 0)
26622 can_issue_more = issue_rate;
26623 } /* while */
26624
26625 return group_count;
26626 }
26627
26628 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
26629 dispatch group boundaries that the scheduler had marked. Pad with nops
26630 any dispatch groups which have vacant issue slots, in order to force the
26631 scheduler's grouping on the processor dispatcher. The function
26632 returns the number of dispatch groups found. */
26633
26634 static int
26635 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
26636 rtx_insn *tail)
26637 {
26638 rtx_insn *insn, *next_insn;
26639 rtx nop;
26640 int issue_rate;
26641 int can_issue_more;
26642 int group_end;
26643 int group_count = 0;
26644
26645 /* Initialize issue_rate. */
26646 issue_rate = rs6000_issue_rate ();
26647 can_issue_more = issue_rate;
26648
26649 insn = get_next_active_insn (prev_head_insn, tail);
26650 next_insn = get_next_active_insn (insn, tail);
26651
26652 while (insn != NULL_RTX)
26653 {
26654 can_issue_more =
26655 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
26656
26657 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
26658
26659 if (next_insn == NULL_RTX)
26660 break;
26661
26662 if (group_end)
26663 {
26664 /* If the scheduler had marked group termination at this location
26665 (between insn and next_insn), and neither insn nor next_insn will
26666 force group termination, pad the group with nops to force group
26667 termination. */
26668 if (can_issue_more
26669 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
26670 && !insn_terminates_group_p (insn, current_group)
26671 && !insn_terminates_group_p (next_insn, previous_group))
26672 {
26673 if (!is_branch_slot_insn (next_insn))
26674 can_issue_more--;
26675
26676 while (can_issue_more)
26677 {
26678 nop = gen_nop ();
26679 emit_insn_before (nop, next_insn);
26680 can_issue_more--;
26681 }
26682 }
26683
26684 can_issue_more = issue_rate;
26685 group_count++;
26686 }
26687
26688 insn = next_insn;
26689 next_insn = get_next_active_insn (insn, tail);
26690 }
26691
26692 return group_count;
26693 }
26694
26695 /* We're beginning a new block. Initialize data structures as necessary. */
26696
26697 static void
26698 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
26699 int sched_verbose ATTRIBUTE_UNUSED,
26700 int max_ready ATTRIBUTE_UNUSED)
26701 {
26702 last_scheduled_insn = NULL;
26703 load_store_pendulum = 0;
26704 divide_cnt = 0;
26705 vec_pairing = 0;
26706 }
26707
26708 /* The following function is called at the end of scheduling BB.
26709 After reload, it inserts nops at insn group bundling. */
26710
26711 static void
26712 rs6000_sched_finish (FILE *dump, int sched_verbose)
26713 {
26714 int n_groups;
26715
26716 if (sched_verbose)
26717 fprintf (dump, "=== Finishing schedule.\n");
26718
26719 if (reload_completed && rs6000_sched_groups)
26720 {
26721 /* Do not run sched_finish hook when selective scheduling enabled. */
26722 if (sel_sched_p ())
26723 return;
26724
26725 if (rs6000_sched_insert_nops == sched_finish_none)
26726 return;
26727
26728 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
26729 n_groups = pad_groups (dump, sched_verbose,
26730 current_sched_info->prev_head,
26731 current_sched_info->next_tail);
26732 else
26733 n_groups = redefine_groups (dump, sched_verbose,
26734 current_sched_info->prev_head,
26735 current_sched_info->next_tail);
26736
26737 if (sched_verbose >= 6)
26738 {
26739 fprintf (dump, "ngroups = %d\n", n_groups);
26740 print_rtl (dump, current_sched_info->prev_head);
26741 fprintf (dump, "Done finish_sched\n");
26742 }
26743 }
26744 }
26745
26746 struct rs6000_sched_context
26747 {
26748 short cached_can_issue_more;
26749 rtx_insn *last_scheduled_insn;
26750 int load_store_pendulum;
26751 int divide_cnt;
26752 int vec_pairing;
26753 };
26754
26755 typedef struct rs6000_sched_context rs6000_sched_context_def;
26756 typedef rs6000_sched_context_def *rs6000_sched_context_t;
26757
26758 /* Allocate store for new scheduling context. */
26759 static void *
26760 rs6000_alloc_sched_context (void)
26761 {
26762 return xmalloc (sizeof (rs6000_sched_context_def));
26763 }
26764
26765 /* If CLEAN_P is true then initializes _SC with clean data,
26766 and from the global context otherwise. */
26767 static void
26768 rs6000_init_sched_context (void *_sc, bool clean_p)
26769 {
26770 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26771
26772 if (clean_p)
26773 {
26774 sc->cached_can_issue_more = 0;
26775 sc->last_scheduled_insn = NULL;
26776 sc->load_store_pendulum = 0;
26777 sc->divide_cnt = 0;
26778 sc->vec_pairing = 0;
26779 }
26780 else
26781 {
26782 sc->cached_can_issue_more = cached_can_issue_more;
26783 sc->last_scheduled_insn = last_scheduled_insn;
26784 sc->load_store_pendulum = load_store_pendulum;
26785 sc->divide_cnt = divide_cnt;
26786 sc->vec_pairing = vec_pairing;
26787 }
26788 }
26789
26790 /* Sets the global scheduling context to the one pointed to by _SC. */
26791 static void
26792 rs6000_set_sched_context (void *_sc)
26793 {
26794 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
26795
26796 gcc_assert (sc != NULL);
26797
26798 cached_can_issue_more = sc->cached_can_issue_more;
26799 last_scheduled_insn = sc->last_scheduled_insn;
26800 load_store_pendulum = sc->load_store_pendulum;
26801 divide_cnt = sc->divide_cnt;
26802 vec_pairing = sc->vec_pairing;
26803 }
26804
26805 /* Free _SC. */
26806 static void
26807 rs6000_free_sched_context (void *_sc)
26808 {
26809 gcc_assert (_sc != NULL);
26810
26811 free (_sc);
26812 }
26813
26814 static bool
26815 rs6000_sched_can_speculate_insn (rtx_insn *insn)
26816 {
26817 switch (get_attr_type (insn))
26818 {
26819 case TYPE_DIV:
26820 case TYPE_SDIV:
26821 case TYPE_DDIV:
26822 case TYPE_VECDIV:
26823 case TYPE_SSQRT:
26824 case TYPE_DSQRT:
26825 return false;
26826
26827 default:
26828 return true;
26829 }
26830 }
26831 \f
26832 /* Length in units of the trampoline for entering a nested function. */
26833
26834 int
26835 rs6000_trampoline_size (void)
26836 {
26837 int ret = 0;
26838
26839 switch (DEFAULT_ABI)
26840 {
26841 default:
26842 gcc_unreachable ();
26843
26844 case ABI_AIX:
26845 ret = (TARGET_32BIT) ? 12 : 24;
26846 break;
26847
26848 case ABI_ELFv2:
26849 gcc_assert (!TARGET_32BIT);
26850 ret = 32;
26851 break;
26852
26853 case ABI_DARWIN:
26854 case ABI_V4:
26855 ret = (TARGET_32BIT) ? 40 : 48;
26856 break;
26857 }
26858
26859 return ret;
26860 }
26861
26862 /* Emit RTL insns to initialize the variable parts of a trampoline.
26863 FNADDR is an RTX for the address of the function's pure code.
26864 CXT is an RTX for the static chain value for the function. */
26865
26866 static void
26867 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
26868 {
26869 int regsize = (TARGET_32BIT) ? 4 : 8;
26870 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
26871 rtx ctx_reg = force_reg (Pmode, cxt);
26872 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
26873
26874 switch (DEFAULT_ABI)
26875 {
26876 default:
26877 gcc_unreachable ();
26878
26879 /* Under AIX, just build the 3 word function descriptor */
26880 case ABI_AIX:
26881 {
26882 rtx fnmem, fn_reg, toc_reg;
26883
26884 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
26885 error ("you cannot take the address of a nested function if you use "
26886 "the %qs option", "-mno-pointers-to-nested-functions");
26887
26888 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
26889 fn_reg = gen_reg_rtx (Pmode);
26890 toc_reg = gen_reg_rtx (Pmode);
26891
26892 /* Macro to shorten the code expansions below. */
26893 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
26894
26895 m_tramp = replace_equiv_address (m_tramp, addr);
26896
26897 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
26898 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
26899 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
26900 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
26901 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
26902
26903 # undef MEM_PLUS
26904 }
26905 break;
26906
26907 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
26908 case ABI_ELFv2:
26909 case ABI_DARWIN:
26910 case ABI_V4:
26911 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
26912 LCT_NORMAL, VOIDmode,
26913 addr, Pmode,
26914 GEN_INT (rs6000_trampoline_size ()), SImode,
26915 fnaddr, Pmode,
26916 ctx_reg, Pmode);
26917 break;
26918 }
26919 }
26920
26921 \f
26922 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
26923 identifier as an argument, so the front end shouldn't look it up. */
26924
26925 static bool
26926 rs6000_attribute_takes_identifier_p (const_tree attr_id)
26927 {
26928 return is_attribute_p ("altivec", attr_id);
26929 }
26930
26931 /* Handle the "altivec" attribute. The attribute may have
26932 arguments as follows:
26933
26934 __attribute__((altivec(vector__)))
26935 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
26936 __attribute__((altivec(bool__))) (always followed by 'unsigned')
26937
26938 and may appear more than once (e.g., 'vector bool char') in a
26939 given declaration. */
26940
26941 static tree
26942 rs6000_handle_altivec_attribute (tree *node,
26943 tree name ATTRIBUTE_UNUSED,
26944 tree args,
26945 int flags ATTRIBUTE_UNUSED,
26946 bool *no_add_attrs)
26947 {
26948 tree type = *node, result = NULL_TREE;
26949 machine_mode mode;
26950 int unsigned_p;
26951 char altivec_type
26952 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
26953 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
26954 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
26955 : '?');
26956
26957 while (POINTER_TYPE_P (type)
26958 || TREE_CODE (type) == FUNCTION_TYPE
26959 || TREE_CODE (type) == METHOD_TYPE
26960 || TREE_CODE (type) == ARRAY_TYPE)
26961 type = TREE_TYPE (type);
26962
26963 mode = TYPE_MODE (type);
26964
26965 /* Check for invalid AltiVec type qualifiers. */
26966 if (type == long_double_type_node)
26967 error ("use of %<long double%> in AltiVec types is invalid");
26968 else if (type == boolean_type_node)
26969 error ("use of boolean types in AltiVec types is invalid");
26970 else if (TREE_CODE (type) == COMPLEX_TYPE)
26971 error ("use of %<complex%> in AltiVec types is invalid");
26972 else if (DECIMAL_FLOAT_MODE_P (mode))
26973 error ("use of decimal floating point types in AltiVec types is invalid");
26974 else if (!TARGET_VSX)
26975 {
26976 if (type == long_unsigned_type_node || type == long_integer_type_node)
26977 {
26978 if (TARGET_64BIT)
26979 error ("use of %<long%> in AltiVec types is invalid for "
26980 "64-bit code without %qs", "-mvsx");
26981 else if (rs6000_warn_altivec_long)
26982 warning (0, "use of %<long%> in AltiVec types is deprecated; "
26983 "use %<int%>");
26984 }
26985 else if (type == long_long_unsigned_type_node
26986 || type == long_long_integer_type_node)
26987 error ("use of %<long long%> in AltiVec types is invalid without %qs",
26988 "-mvsx");
26989 else if (type == double_type_node)
26990 error ("use of %<double%> in AltiVec types is invalid without %qs",
26991 "-mvsx");
26992 }
26993
26994 switch (altivec_type)
26995 {
26996 case 'v':
26997 unsigned_p = TYPE_UNSIGNED (type);
26998 switch (mode)
26999 {
27000 case E_TImode:
27001 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
27002 break;
27003 case E_DImode:
27004 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
27005 break;
27006 case E_SImode:
27007 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
27008 break;
27009 case E_HImode:
27010 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
27011 break;
27012 case E_QImode:
27013 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
27014 break;
27015 case E_SFmode: result = V4SF_type_node; break;
27016 case E_DFmode: result = V2DF_type_node; break;
27017 /* If the user says 'vector int bool', we may be handed the 'bool'
27018 attribute _before_ the 'vector' attribute, and so select the
27019 proper type in the 'b' case below. */
27020 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
27021 case E_V2DImode: case E_V2DFmode:
27022 result = type;
27023 default: break;
27024 }
27025 break;
27026 case 'b':
27027 switch (mode)
27028 {
27029 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
27030 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
27031 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
27032 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
27033 default: break;
27034 }
27035 break;
27036 case 'p':
27037 switch (mode)
27038 {
27039 case E_V8HImode: result = pixel_V8HI_type_node;
27040 default: break;
27041 }
27042 default: break;
27043 }
27044
27045 /* Propagate qualifiers attached to the element type
27046 onto the vector type. */
27047 if (result && result != type && TYPE_QUALS (type))
27048 result = build_qualified_type (result, TYPE_QUALS (type));
27049
27050 *no_add_attrs = true; /* No need to hang on to the attribute. */
27051
27052 if (result)
27053 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
27054
27055 return NULL_TREE;
27056 }
27057
27058 /* AltiVec defines five built-in scalar types that serve as vector
27059 elements; we must teach the compiler how to mangle them. The 128-bit
27060 floating point mangling is target-specific as well. */
27061
27062 static const char *
27063 rs6000_mangle_type (const_tree type)
27064 {
27065 type = TYPE_MAIN_VARIANT (type);
27066
27067 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
27068 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
27069 return NULL;
27070
27071 if (type == bool_char_type_node) return "U6__boolc";
27072 if (type == bool_short_type_node) return "U6__bools";
27073 if (type == pixel_type_node) return "u7__pixel";
27074 if (type == bool_int_type_node) return "U6__booli";
27075 if (type == bool_long_long_type_node) return "U6__boolx";
27076
27077 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
27078 return "g";
27079 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
27080 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
27081
27082 /* For all other types, use the default mangling. */
27083 return NULL;
27084 }
27085
27086 /* Handle a "longcall" or "shortcall" attribute; arguments as in
27087 struct attribute_spec.handler. */
27088
27089 static tree
27090 rs6000_handle_longcall_attribute (tree *node, tree name,
27091 tree args ATTRIBUTE_UNUSED,
27092 int flags ATTRIBUTE_UNUSED,
27093 bool *no_add_attrs)
27094 {
27095 if (TREE_CODE (*node) != FUNCTION_TYPE
27096 && TREE_CODE (*node) != FIELD_DECL
27097 && TREE_CODE (*node) != TYPE_DECL)
27098 {
27099 warning (OPT_Wattributes, "%qE attribute only applies to functions",
27100 name);
27101 *no_add_attrs = true;
27102 }
27103
27104 return NULL_TREE;
27105 }
27106
27107 /* Set longcall attributes on all functions declared when
27108 rs6000_default_long_calls is true. */
27109 static void
27110 rs6000_set_default_type_attributes (tree type)
27111 {
27112 if (rs6000_default_long_calls
27113 && (TREE_CODE (type) == FUNCTION_TYPE
27114 || TREE_CODE (type) == METHOD_TYPE))
27115 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
27116 NULL_TREE,
27117 TYPE_ATTRIBUTES (type));
27118
27119 #if TARGET_MACHO
27120 darwin_set_default_type_attributes (type);
27121 #endif
27122 }
27123
27124 /* Return a reference suitable for calling a function with the
27125 longcall attribute. */
27126
27127 static rtx
27128 rs6000_longcall_ref (rtx call_ref, rtx arg)
27129 {
27130 /* System V adds '.' to the internal name, so skip them. */
27131 const char *call_name = XSTR (call_ref, 0);
27132 if (*call_name == '.')
27133 {
27134 while (*call_name == '.')
27135 call_name++;
27136
27137 tree node = get_identifier (call_name);
27138 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
27139 }
27140
27141 if (TARGET_PLTSEQ)
27142 {
27143 rtx base = const0_rtx;
27144 int regno = 12;
27145 if (rs6000_pcrel_p (cfun))
27146 {
27147 rtx reg = gen_rtx_REG (Pmode, regno);
27148 rtx u = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
27149 UNSPEC_PLT_PCREL);
27150 emit_insn (gen_rtx_SET (reg, u));
27151 return reg;
27152 }
27153
27154 if (DEFAULT_ABI == ABI_ELFv2)
27155 base = gen_rtx_REG (Pmode, TOC_REGISTER);
27156 else
27157 {
27158 if (flag_pic)
27159 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
27160 regno = 11;
27161 }
27162 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
27163 may be used by a function global entry point. For SysV4, r11
27164 is used by __glink_PLTresolve lazy resolver entry. */
27165 rtx reg = gen_rtx_REG (Pmode, regno);
27166 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
27167 UNSPEC_PLT16_HA);
27168 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
27169 UNSPEC_PLT16_LO);
27170 emit_insn (gen_rtx_SET (reg, hi));
27171 emit_insn (gen_rtx_SET (reg, lo));
27172 return reg;
27173 }
27174
27175 return force_reg (Pmode, call_ref);
27176 }
27177 \f
27178 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
27179 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
27180 #endif
27181
27182 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
27183 struct attribute_spec.handler. */
27184 static tree
27185 rs6000_handle_struct_attribute (tree *node, tree name,
27186 tree args ATTRIBUTE_UNUSED,
27187 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
27188 {
27189 tree *type = NULL;
27190 if (DECL_P (*node))
27191 {
27192 if (TREE_CODE (*node) == TYPE_DECL)
27193 type = &TREE_TYPE (*node);
27194 }
27195 else
27196 type = node;
27197
27198 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
27199 || TREE_CODE (*type) == UNION_TYPE)))
27200 {
27201 warning (OPT_Wattributes, "%qE attribute ignored", name);
27202 *no_add_attrs = true;
27203 }
27204
27205 else if ((is_attribute_p ("ms_struct", name)
27206 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
27207 || ((is_attribute_p ("gcc_struct", name)
27208 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
27209 {
27210 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
27211 name);
27212 *no_add_attrs = true;
27213 }
27214
27215 return NULL_TREE;
27216 }
27217
27218 static bool
27219 rs6000_ms_bitfield_layout_p (const_tree record_type)
27220 {
27221 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
27222 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
27223 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
27224 }
27225 \f
27226 #ifdef USING_ELFOS_H
27227
27228 /* A get_unnamed_section callback, used for switching to toc_section. */
27229
27230 static void
27231 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
27232 {
27233 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27234 && TARGET_MINIMAL_TOC)
27235 {
27236 if (!toc_initialized)
27237 {
27238 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
27239 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27240 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
27241 fprintf (asm_out_file, "\t.tc ");
27242 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
27243 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27244 fprintf (asm_out_file, "\n");
27245
27246 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27247 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27248 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27249 fprintf (asm_out_file, " = .+32768\n");
27250 toc_initialized = 1;
27251 }
27252 else
27253 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27254 }
27255 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27256 {
27257 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
27258 if (!toc_initialized)
27259 {
27260 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27261 toc_initialized = 1;
27262 }
27263 }
27264 else
27265 {
27266 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27267 if (!toc_initialized)
27268 {
27269 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
27270 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
27271 fprintf (asm_out_file, " = .+32768\n");
27272 toc_initialized = 1;
27273 }
27274 }
27275 }
27276
27277 /* Implement TARGET_ASM_INIT_SECTIONS. */
27278
27279 static void
27280 rs6000_elf_asm_init_sections (void)
27281 {
27282 toc_section
27283 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
27284
27285 sdata2_section
27286 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
27287 SDATA2_SECTION_ASM_OP);
27288 }
27289
27290 /* Implement TARGET_SELECT_RTX_SECTION. */
27291
27292 static section *
27293 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
27294 unsigned HOST_WIDE_INT align)
27295 {
27296 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
27297 return toc_section;
27298 else
27299 return default_elf_select_rtx_section (mode, x, align);
27300 }
27301 \f
27302 /* For a SYMBOL_REF, set generic flags and then perform some
27303 target-specific processing.
27304
27305 When the AIX ABI is requested on a non-AIX system, replace the
27306 function name with the real name (with a leading .) rather than the
27307 function descriptor name. This saves a lot of overriding code to
27308 read the prefixes. */
27309
27310 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
27311 static void
27312 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
27313 {
27314 default_encode_section_info (decl, rtl, first);
27315
27316 if (first
27317 && TREE_CODE (decl) == FUNCTION_DECL
27318 && !TARGET_AIX
27319 && DEFAULT_ABI == ABI_AIX)
27320 {
27321 rtx sym_ref = XEXP (rtl, 0);
27322 size_t len = strlen (XSTR (sym_ref, 0));
27323 char *str = XALLOCAVEC (char, len + 2);
27324 str[0] = '.';
27325 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
27326 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
27327 }
27328 }
27329
27330 static inline bool
27331 compare_section_name (const char *section, const char *templ)
27332 {
27333 int len;
27334
27335 len = strlen (templ);
27336 return (strncmp (section, templ, len) == 0
27337 && (section[len] == 0 || section[len] == '.'));
27338 }
27339
27340 bool
27341 rs6000_elf_in_small_data_p (const_tree decl)
27342 {
27343 if (rs6000_sdata == SDATA_NONE)
27344 return false;
27345
27346 /* We want to merge strings, so we never consider them small data. */
27347 if (TREE_CODE (decl) == STRING_CST)
27348 return false;
27349
27350 /* Functions are never in the small data area. */
27351 if (TREE_CODE (decl) == FUNCTION_DECL)
27352 return false;
27353
27354 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
27355 {
27356 const char *section = DECL_SECTION_NAME (decl);
27357 if (compare_section_name (section, ".sdata")
27358 || compare_section_name (section, ".sdata2")
27359 || compare_section_name (section, ".gnu.linkonce.s")
27360 || compare_section_name (section, ".sbss")
27361 || compare_section_name (section, ".sbss2")
27362 || compare_section_name (section, ".gnu.linkonce.sb")
27363 || strcmp (section, ".PPC.EMB.sdata0") == 0
27364 || strcmp (section, ".PPC.EMB.sbss0") == 0)
27365 return true;
27366 }
27367 else
27368 {
27369 /* If we are told not to put readonly data in sdata, then don't. */
27370 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
27371 && !rs6000_readonly_in_sdata)
27372 return false;
27373
27374 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
27375
27376 if (size > 0
27377 && size <= g_switch_value
27378 /* If it's not public, and we're not going to reference it there,
27379 there's no need to put it in the small data section. */
27380 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
27381 return true;
27382 }
27383
27384 return false;
27385 }
27386
27387 #endif /* USING_ELFOS_H */
27388 \f
27389 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
27390
27391 static bool
27392 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
27393 {
27394 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
27395 }
27396
27397 /* Do not place thread-local symbols refs in the object blocks. */
27398
27399 static bool
27400 rs6000_use_blocks_for_decl_p (const_tree decl)
27401 {
27402 return !DECL_THREAD_LOCAL_P (decl);
27403 }
27404 \f
27405 /* Return a REG that occurs in ADDR with coefficient 1.
27406 ADDR can be effectively incremented by incrementing REG.
27407
27408 r0 is special and we must not select it as an address
27409 register by this routine since our caller will try to
27410 increment the returned register via an "la" instruction. */
27411
27412 rtx
27413 find_addr_reg (rtx addr)
27414 {
27415 while (GET_CODE (addr) == PLUS)
27416 {
27417 if (REG_P (XEXP (addr, 0))
27418 && REGNO (XEXP (addr, 0)) != 0)
27419 addr = XEXP (addr, 0);
27420 else if (REG_P (XEXP (addr, 1))
27421 && REGNO (XEXP (addr, 1)) != 0)
27422 addr = XEXP (addr, 1);
27423 else if (CONSTANT_P (XEXP (addr, 0)))
27424 addr = XEXP (addr, 1);
27425 else if (CONSTANT_P (XEXP (addr, 1)))
27426 addr = XEXP (addr, 0);
27427 else
27428 gcc_unreachable ();
27429 }
27430 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
27431 return addr;
27432 }
27433
27434 void
27435 rs6000_fatal_bad_address (rtx op)
27436 {
27437 fatal_insn ("bad address", op);
27438 }
27439
27440 #if TARGET_MACHO
27441
27442 typedef struct branch_island_d {
27443 tree function_name;
27444 tree label_name;
27445 int line_number;
27446 } branch_island;
27447
27448
27449 static vec<branch_island, va_gc> *branch_islands;
27450
27451 /* Remember to generate a branch island for far calls to the given
27452 function. */
27453
27454 static void
27455 add_compiler_branch_island (tree label_name, tree function_name,
27456 int line_number)
27457 {
27458 branch_island bi = {function_name, label_name, line_number};
27459 vec_safe_push (branch_islands, bi);
27460 }
27461
27462 /* Generate far-jump branch islands for everything recorded in
27463 branch_islands. Invoked immediately after the last instruction of
27464 the epilogue has been emitted; the branch islands must be appended
27465 to, and contiguous with, the function body. Mach-O stubs are
27466 generated in machopic_output_stub(). */
27467
27468 static void
27469 macho_branch_islands (void)
27470 {
27471 char tmp_buf[512];
27472
27473 while (!vec_safe_is_empty (branch_islands))
27474 {
27475 branch_island *bi = &branch_islands->last ();
27476 const char *label = IDENTIFIER_POINTER (bi->label_name);
27477 const char *name = IDENTIFIER_POINTER (bi->function_name);
27478 char name_buf[512];
27479 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
27480 if (name[0] == '*' || name[0] == '&')
27481 strcpy (name_buf, name+1);
27482 else
27483 {
27484 name_buf[0] = '_';
27485 strcpy (name_buf+1, name);
27486 }
27487 strcpy (tmp_buf, "\n");
27488 strcat (tmp_buf, label);
27489 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
27490 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
27491 dbxout_stabd (N_SLINE, bi->line_number);
27492 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
27493 if (flag_pic)
27494 {
27495 if (TARGET_LINK_STACK)
27496 {
27497 char name[32];
27498 get_ppc476_thunk_name (name);
27499 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
27500 strcat (tmp_buf, name);
27501 strcat (tmp_buf, "\n");
27502 strcat (tmp_buf, label);
27503 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
27504 }
27505 else
27506 {
27507 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
27508 strcat (tmp_buf, label);
27509 strcat (tmp_buf, "_pic\n");
27510 strcat (tmp_buf, label);
27511 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
27512 }
27513
27514 strcat (tmp_buf, "\taddis r11,r11,ha16(");
27515 strcat (tmp_buf, name_buf);
27516 strcat (tmp_buf, " - ");
27517 strcat (tmp_buf, label);
27518 strcat (tmp_buf, "_pic)\n");
27519
27520 strcat (tmp_buf, "\tmtlr r0\n");
27521
27522 strcat (tmp_buf, "\taddi r12,r11,lo16(");
27523 strcat (tmp_buf, name_buf);
27524 strcat (tmp_buf, " - ");
27525 strcat (tmp_buf, label);
27526 strcat (tmp_buf, "_pic)\n");
27527
27528 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
27529 }
27530 else
27531 {
27532 strcat (tmp_buf, ":\n\tlis r12,hi16(");
27533 strcat (tmp_buf, name_buf);
27534 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
27535 strcat (tmp_buf, name_buf);
27536 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
27537 }
27538 output_asm_insn (tmp_buf, 0);
27539 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
27540 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
27541 dbxout_stabd (N_SLINE, bi->line_number);
27542 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
27543 branch_islands->pop ();
27544 }
27545 }
27546
27547 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
27548 already there or not. */
27549
27550 static int
27551 no_previous_def (tree function_name)
27552 {
27553 branch_island *bi;
27554 unsigned ix;
27555
27556 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
27557 if (function_name == bi->function_name)
27558 return 0;
27559 return 1;
27560 }
27561
27562 /* GET_PREV_LABEL gets the label name from the previous definition of
27563 the function. */
27564
27565 static tree
27566 get_prev_label (tree function_name)
27567 {
27568 branch_island *bi;
27569 unsigned ix;
27570
27571 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
27572 if (function_name == bi->function_name)
27573 return bi->label_name;
27574 return NULL_TREE;
27575 }
27576
27577 /* Generate PIC and indirect symbol stubs. */
27578
27579 void
27580 machopic_output_stub (FILE *file, const char *symb, const char *stub)
27581 {
27582 unsigned int length;
27583 char *symbol_name, *lazy_ptr_name;
27584 char *local_label_0;
27585 static unsigned label = 0;
27586
27587 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
27588 symb = (*targetm.strip_name_encoding) (symb);
27589
27590
27591 length = strlen (symb);
27592 symbol_name = XALLOCAVEC (char, length + 32);
27593 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
27594
27595 lazy_ptr_name = XALLOCAVEC (char, length + 32);
27596 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
27597
27598 if (flag_pic == 2)
27599 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
27600 else
27601 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
27602
27603 if (flag_pic == 2)
27604 {
27605 fprintf (file, "\t.align 5\n");
27606
27607 fprintf (file, "%s:\n", stub);
27608 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27609
27610 label++;
27611 local_label_0 = XALLOCAVEC (char, 16);
27612 sprintf (local_label_0, "L%u$spb", label);
27613
27614 fprintf (file, "\tmflr r0\n");
27615 if (TARGET_LINK_STACK)
27616 {
27617 char name[32];
27618 get_ppc476_thunk_name (name);
27619 fprintf (file, "\tbl %s\n", name);
27620 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27621 }
27622 else
27623 {
27624 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
27625 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
27626 }
27627 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
27628 lazy_ptr_name, local_label_0);
27629 fprintf (file, "\tmtlr r0\n");
27630 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
27631 (TARGET_64BIT ? "ldu" : "lwzu"),
27632 lazy_ptr_name, local_label_0);
27633 fprintf (file, "\tmtctr r12\n");
27634 fprintf (file, "\tbctr\n");
27635 }
27636 else
27637 {
27638 fprintf (file, "\t.align 4\n");
27639
27640 fprintf (file, "%s:\n", stub);
27641 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27642
27643 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
27644 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
27645 (TARGET_64BIT ? "ldu" : "lwzu"),
27646 lazy_ptr_name);
27647 fprintf (file, "\tmtctr r12\n");
27648 fprintf (file, "\tbctr\n");
27649 }
27650
27651 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
27652 fprintf (file, "%s:\n", lazy_ptr_name);
27653 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
27654 fprintf (file, "%sdyld_stub_binding_helper\n",
27655 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
27656 }
27657
27658 /* Legitimize PIC addresses. If the address is already
27659 position-independent, we return ORIG. Newly generated
27660 position-independent addresses go into a reg. This is REG if non
27661 zero, otherwise we allocate register(s) as necessary. */
27662
27663 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
27664
27665 rtx
27666 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
27667 rtx reg)
27668 {
27669 rtx base, offset;
27670
27671 if (reg == NULL && !reload_completed)
27672 reg = gen_reg_rtx (Pmode);
27673
27674 if (GET_CODE (orig) == CONST)
27675 {
27676 rtx reg_temp;
27677
27678 if (GET_CODE (XEXP (orig, 0)) == PLUS
27679 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
27680 return orig;
27681
27682 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
27683
27684 /* Use a different reg for the intermediate value, as
27685 it will be marked UNCHANGING. */
27686 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
27687 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
27688 Pmode, reg_temp);
27689 offset =
27690 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
27691 Pmode, reg);
27692
27693 if (CONST_INT_P (offset))
27694 {
27695 if (SMALL_INT (offset))
27696 return plus_constant (Pmode, base, INTVAL (offset));
27697 else if (!reload_completed)
27698 offset = force_reg (Pmode, offset);
27699 else
27700 {
27701 rtx mem = force_const_mem (Pmode, orig);
27702 return machopic_legitimize_pic_address (mem, Pmode, reg);
27703 }
27704 }
27705 return gen_rtx_PLUS (Pmode, base, offset);
27706 }
27707
27708 /* Fall back on generic machopic code. */
27709 return machopic_legitimize_pic_address (orig, mode, reg);
27710 }
27711
27712 /* Output a .machine directive for the Darwin assembler, and call
27713 the generic start_file routine. */
27714
27715 static void
27716 rs6000_darwin_file_start (void)
27717 {
27718 static const struct
27719 {
27720 const char *arg;
27721 const char *name;
27722 HOST_WIDE_INT if_set;
27723 } mapping[] = {
27724 { "ppc64", "ppc64", MASK_64BIT },
27725 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
27726 { "power4", "ppc970", 0 },
27727 { "G5", "ppc970", 0 },
27728 { "7450", "ppc7450", 0 },
27729 { "7400", "ppc7400", MASK_ALTIVEC },
27730 { "G4", "ppc7400", 0 },
27731 { "750", "ppc750", 0 },
27732 { "740", "ppc750", 0 },
27733 { "G3", "ppc750", 0 },
27734 { "604e", "ppc604e", 0 },
27735 { "604", "ppc604", 0 },
27736 { "603e", "ppc603", 0 },
27737 { "603", "ppc603", 0 },
27738 { "601", "ppc601", 0 },
27739 { NULL, "ppc", 0 } };
27740 const char *cpu_id = "";
27741 size_t i;
27742
27743 rs6000_file_start ();
27744 darwin_file_start ();
27745
27746 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
27747
27748 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
27749 cpu_id = rs6000_default_cpu;
27750
27751 if (global_options_set.x_rs6000_cpu_index)
27752 cpu_id = processor_target_table[rs6000_cpu_index].name;
27753
27754 /* Look through the mapping array. Pick the first name that either
27755 matches the argument, has a bit set in IF_SET that is also set
27756 in the target flags, or has a NULL name. */
27757
27758 i = 0;
27759 while (mapping[i].arg != NULL
27760 && strcmp (mapping[i].arg, cpu_id) != 0
27761 && (mapping[i].if_set & rs6000_isa_flags) == 0)
27762 i++;
27763
27764 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
27765 }
27766
27767 #endif /* TARGET_MACHO */
27768
27769 #if TARGET_ELF
27770 static int
27771 rs6000_elf_reloc_rw_mask (void)
27772 {
27773 if (flag_pic)
27774 return 3;
27775 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27776 return 2;
27777 else
27778 return 0;
27779 }
27780
27781 /* Record an element in the table of global constructors. SYMBOL is
27782 a SYMBOL_REF of the function to be called; PRIORITY is a number
27783 between 0 and MAX_INIT_PRIORITY.
27784
27785 This differs from default_named_section_asm_out_constructor in
27786 that we have special handling for -mrelocatable. */
27787
27788 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
27789 static void
27790 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
27791 {
27792 const char *section = ".ctors";
27793 char buf[18];
27794
27795 if (priority != DEFAULT_INIT_PRIORITY)
27796 {
27797 sprintf (buf, ".ctors.%.5u",
27798 /* Invert the numbering so the linker puts us in the proper
27799 order; constructors are run from right to left, and the
27800 linker sorts in increasing order. */
27801 MAX_INIT_PRIORITY - priority);
27802 section = buf;
27803 }
27804
27805 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27806 assemble_align (POINTER_SIZE);
27807
27808 if (DEFAULT_ABI == ABI_V4
27809 && (TARGET_RELOCATABLE || flag_pic > 1))
27810 {
27811 fputs ("\t.long (", asm_out_file);
27812 output_addr_const (asm_out_file, symbol);
27813 fputs (")@fixup\n", asm_out_file);
27814 }
27815 else
27816 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27817 }
27818
27819 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
27820 static void
27821 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
27822 {
27823 const char *section = ".dtors";
27824 char buf[18];
27825
27826 if (priority != DEFAULT_INIT_PRIORITY)
27827 {
27828 sprintf (buf, ".dtors.%.5u",
27829 /* Invert the numbering so the linker puts us in the proper
27830 order; constructors are run from right to left, and the
27831 linker sorts in increasing order. */
27832 MAX_INIT_PRIORITY - priority);
27833 section = buf;
27834 }
27835
27836 switch_to_section (get_section (section, SECTION_WRITE, NULL));
27837 assemble_align (POINTER_SIZE);
27838
27839 if (DEFAULT_ABI == ABI_V4
27840 && (TARGET_RELOCATABLE || flag_pic > 1))
27841 {
27842 fputs ("\t.long (", asm_out_file);
27843 output_addr_const (asm_out_file, symbol);
27844 fputs (")@fixup\n", asm_out_file);
27845 }
27846 else
27847 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
27848 }
27849
27850 void
27851 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
27852 {
27853 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
27854 {
27855 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
27856 ASM_OUTPUT_LABEL (file, name);
27857 fputs (DOUBLE_INT_ASM_OP, file);
27858 rs6000_output_function_entry (file, name);
27859 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
27860 if (DOT_SYMBOLS)
27861 {
27862 fputs ("\t.size\t", file);
27863 assemble_name (file, name);
27864 fputs (",24\n\t.type\t.", file);
27865 assemble_name (file, name);
27866 fputs (",@function\n", file);
27867 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
27868 {
27869 fputs ("\t.globl\t.", file);
27870 assemble_name (file, name);
27871 putc ('\n', file);
27872 }
27873 }
27874 else
27875 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27876 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27877 rs6000_output_function_entry (file, name);
27878 fputs (":\n", file);
27879 return;
27880 }
27881
27882 int uses_toc;
27883 if (DEFAULT_ABI == ABI_V4
27884 && (TARGET_RELOCATABLE || flag_pic > 1)
27885 && !TARGET_SECURE_PLT
27886 && (!constant_pool_empty_p () || crtl->profile)
27887 && (uses_toc = uses_TOC ()))
27888 {
27889 char buf[256];
27890
27891 if (uses_toc == 2)
27892 switch_to_other_text_partition ();
27893 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27894
27895 fprintf (file, "\t.long ");
27896 assemble_name (file, toc_label_name);
27897 need_toc_init = 1;
27898 putc ('-', file);
27899 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27900 assemble_name (file, buf);
27901 putc ('\n', file);
27902 if (uses_toc == 2)
27903 switch_to_other_text_partition ();
27904 }
27905
27906 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
27907 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
27908
27909 if (TARGET_CMODEL == CMODEL_LARGE
27910 && rs6000_global_entry_point_prologue_needed_p ())
27911 {
27912 char buf[256];
27913
27914 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
27915
27916 fprintf (file, "\t.quad .TOC.-");
27917 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27918 assemble_name (file, buf);
27919 putc ('\n', file);
27920 }
27921
27922 if (DEFAULT_ABI == ABI_AIX)
27923 {
27924 const char *desc_name, *orig_name;
27925
27926 orig_name = (*targetm.strip_name_encoding) (name);
27927 desc_name = orig_name;
27928 while (*desc_name == '.')
27929 desc_name++;
27930
27931 if (TREE_PUBLIC (decl))
27932 fprintf (file, "\t.globl %s\n", desc_name);
27933
27934 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
27935 fprintf (file, "%s:\n", desc_name);
27936 fprintf (file, "\t.long %s\n", orig_name);
27937 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
27938 fputs ("\t.long 0\n", file);
27939 fprintf (file, "\t.previous\n");
27940 }
27941 ASM_OUTPUT_LABEL (file, name);
27942 }
27943
27944 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
27945 static void
27946 rs6000_elf_file_end (void)
27947 {
27948 #ifdef HAVE_AS_GNU_ATTRIBUTE
27949 /* ??? The value emitted depends on options active at file end.
27950 Assume anyone using #pragma or attributes that might change
27951 options knows what they are doing. */
27952 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
27953 && rs6000_passes_float)
27954 {
27955 int fp;
27956
27957 if (TARGET_HARD_FLOAT)
27958 fp = 1;
27959 else
27960 fp = 2;
27961 if (rs6000_passes_long_double)
27962 {
27963 if (!TARGET_LONG_DOUBLE_128)
27964 fp |= 2 * 4;
27965 else if (TARGET_IEEEQUAD)
27966 fp |= 3 * 4;
27967 else
27968 fp |= 1 * 4;
27969 }
27970 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
27971 }
27972 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
27973 {
27974 if (rs6000_passes_vector)
27975 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
27976 (TARGET_ALTIVEC_ABI ? 2 : 1));
27977 if (rs6000_returns_struct)
27978 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
27979 aix_struct_return ? 2 : 1);
27980 }
27981 #endif
27982 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
27983 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
27984 file_end_indicate_exec_stack ();
27985 #endif
27986
27987 if (flag_split_stack)
27988 file_end_indicate_split_stack ();
27989
27990 if (cpu_builtin_p)
27991 {
27992 /* We have expanded a CPU builtin, so we need to emit a reference to
27993 the special symbol that LIBC uses to declare it supports the
27994 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
27995 switch_to_section (data_section);
27996 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
27997 fprintf (asm_out_file, "\t%s %s\n",
27998 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
27999 }
28000 }
28001 #endif
28002
28003 #if TARGET_XCOFF
28004
28005 #ifndef HAVE_XCOFF_DWARF_EXTRAS
28006 #define HAVE_XCOFF_DWARF_EXTRAS 0
28007 #endif
28008
28009 static enum unwind_info_type
28010 rs6000_xcoff_debug_unwind_info (void)
28011 {
28012 return UI_NONE;
28013 }
28014
28015 static void
28016 rs6000_xcoff_asm_output_anchor (rtx symbol)
28017 {
28018 char buffer[100];
28019
28020 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
28021 SYMBOL_REF_BLOCK_OFFSET (symbol));
28022 fprintf (asm_out_file, "%s", SET_ASM_OP);
28023 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
28024 fprintf (asm_out_file, ",");
28025 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
28026 fprintf (asm_out_file, "\n");
28027 }
28028
28029 static void
28030 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
28031 {
28032 fputs (GLOBAL_ASM_OP, stream);
28033 RS6000_OUTPUT_BASENAME (stream, name);
28034 putc ('\n', stream);
28035 }
28036
28037 /* A get_unnamed_decl callback, used for read-only sections. PTR
28038 points to the section string variable. */
28039
28040 static void
28041 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
28042 {
28043 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
28044 *(const char *const *) directive,
28045 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28046 }
28047
28048 /* Likewise for read-write sections. */
28049
28050 static void
28051 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
28052 {
28053 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
28054 *(const char *const *) directive,
28055 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28056 }
28057
28058 static void
28059 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
28060 {
28061 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
28062 *(const char *const *) directive,
28063 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
28064 }
28065
28066 /* A get_unnamed_section callback, used for switching to toc_section. */
28067
28068 static void
28069 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28070 {
28071 if (TARGET_MINIMAL_TOC)
28072 {
28073 /* toc_section is always selected at least once from
28074 rs6000_xcoff_file_start, so this is guaranteed to
28075 always be defined once and only once in each file. */
28076 if (!toc_initialized)
28077 {
28078 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
28079 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
28080 toc_initialized = 1;
28081 }
28082 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
28083 (TARGET_32BIT ? "" : ",3"));
28084 }
28085 else
28086 fputs ("\t.toc\n", asm_out_file);
28087 }
28088
28089 /* Implement TARGET_ASM_INIT_SECTIONS. */
28090
28091 static void
28092 rs6000_xcoff_asm_init_sections (void)
28093 {
28094 read_only_data_section
28095 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
28096 &xcoff_read_only_section_name);
28097
28098 private_data_section
28099 = get_unnamed_section (SECTION_WRITE,
28100 rs6000_xcoff_output_readwrite_section_asm_op,
28101 &xcoff_private_data_section_name);
28102
28103 read_only_private_data_section
28104 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
28105 &xcoff_private_rodata_section_name);
28106
28107 tls_data_section
28108 = get_unnamed_section (SECTION_TLS,
28109 rs6000_xcoff_output_tls_section_asm_op,
28110 &xcoff_tls_data_section_name);
28111
28112 tls_private_data_section
28113 = get_unnamed_section (SECTION_TLS,
28114 rs6000_xcoff_output_tls_section_asm_op,
28115 &xcoff_private_data_section_name);
28116
28117 toc_section
28118 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
28119
28120 readonly_data_section = read_only_data_section;
28121 }
28122
28123 static int
28124 rs6000_xcoff_reloc_rw_mask (void)
28125 {
28126 return 3;
28127 }
28128
28129 static void
28130 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
28131 tree decl ATTRIBUTE_UNUSED)
28132 {
28133 int smclass;
28134 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
28135
28136 if (flags & SECTION_EXCLUDE)
28137 smclass = 4;
28138 else if (flags & SECTION_DEBUG)
28139 {
28140 fprintf (asm_out_file, "\t.dwsect %s\n", name);
28141 return;
28142 }
28143 else if (flags & SECTION_CODE)
28144 smclass = 0;
28145 else if (flags & SECTION_TLS)
28146 smclass = 3;
28147 else if (flags & SECTION_WRITE)
28148 smclass = 2;
28149 else
28150 smclass = 1;
28151
28152 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
28153 (flags & SECTION_CODE) ? "." : "",
28154 name, suffix[smclass], flags & SECTION_ENTSIZE);
28155 }
28156
28157 #define IN_NAMED_SECTION(DECL) \
28158 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
28159 && DECL_SECTION_NAME (DECL) != NULL)
28160
28161 static section *
28162 rs6000_xcoff_select_section (tree decl, int reloc,
28163 unsigned HOST_WIDE_INT align)
28164 {
28165 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
28166 named section. */
28167 if (align > BIGGEST_ALIGNMENT)
28168 {
28169 resolve_unique_section (decl, reloc, true);
28170 if (IN_NAMED_SECTION (decl))
28171 return get_named_section (decl, NULL, reloc);
28172 }
28173
28174 if (decl_readonly_section (decl, reloc))
28175 {
28176 if (TREE_PUBLIC (decl))
28177 return read_only_data_section;
28178 else
28179 return read_only_private_data_section;
28180 }
28181 else
28182 {
28183 #if HAVE_AS_TLS
28184 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
28185 {
28186 if (TREE_PUBLIC (decl))
28187 return tls_data_section;
28188 else if (bss_initializer_p (decl))
28189 {
28190 /* Convert to COMMON to emit in BSS. */
28191 DECL_COMMON (decl) = 1;
28192 return tls_comm_section;
28193 }
28194 else
28195 return tls_private_data_section;
28196 }
28197 else
28198 #endif
28199 if (TREE_PUBLIC (decl))
28200 return data_section;
28201 else
28202 return private_data_section;
28203 }
28204 }
28205
28206 static void
28207 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
28208 {
28209 const char *name;
28210
28211 /* Use select_section for private data and uninitialized data with
28212 alignment <= BIGGEST_ALIGNMENT. */
28213 if (!TREE_PUBLIC (decl)
28214 || DECL_COMMON (decl)
28215 || (DECL_INITIAL (decl) == NULL_TREE
28216 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
28217 || DECL_INITIAL (decl) == error_mark_node
28218 || (flag_zero_initialized_in_bss
28219 && initializer_zerop (DECL_INITIAL (decl))))
28220 return;
28221
28222 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
28223 name = (*targetm.strip_name_encoding) (name);
28224 set_decl_section_name (decl, name);
28225 }
28226
28227 /* Select section for constant in constant pool.
28228
28229 On RS/6000, all constants are in the private read-only data area.
28230 However, if this is being placed in the TOC it must be output as a
28231 toc entry. */
28232
28233 static section *
28234 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
28235 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
28236 {
28237 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28238 return toc_section;
28239 else
28240 return read_only_private_data_section;
28241 }
28242
28243 /* Remove any trailing [DS] or the like from the symbol name. */
28244
28245 static const char *
28246 rs6000_xcoff_strip_name_encoding (const char *name)
28247 {
28248 size_t len;
28249 if (*name == '*')
28250 name++;
28251 len = strlen (name);
28252 if (name[len - 1] == ']')
28253 return ggc_alloc_string (name, len - 4);
28254 else
28255 return name;
28256 }
28257
28258 /* Section attributes. AIX is always PIC. */
28259
28260 static unsigned int
28261 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
28262 {
28263 unsigned int align;
28264 unsigned int flags = default_section_type_flags (decl, name, reloc);
28265
28266 /* Align to at least UNIT size. */
28267 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
28268 align = MIN_UNITS_PER_WORD;
28269 else
28270 /* Increase alignment of large objects if not already stricter. */
28271 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
28272 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
28273 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
28274
28275 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
28276 }
28277
28278 /* Output at beginning of assembler file.
28279
28280 Initialize the section names for the RS/6000 at this point.
28281
28282 Specify filename, including full path, to assembler.
28283
28284 We want to go into the TOC section so at least one .toc will be emitted.
28285 Also, in order to output proper .bs/.es pairs, we need at least one static
28286 [RW] section emitted.
28287
28288 Finally, declare mcount when profiling to make the assembler happy. */
28289
28290 static void
28291 rs6000_xcoff_file_start (void)
28292 {
28293 rs6000_gen_section_name (&xcoff_bss_section_name,
28294 main_input_filename, ".bss_");
28295 rs6000_gen_section_name (&xcoff_private_data_section_name,
28296 main_input_filename, ".rw_");
28297 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
28298 main_input_filename, ".rop_");
28299 rs6000_gen_section_name (&xcoff_read_only_section_name,
28300 main_input_filename, ".ro_");
28301 rs6000_gen_section_name (&xcoff_tls_data_section_name,
28302 main_input_filename, ".tls_");
28303 rs6000_gen_section_name (&xcoff_tbss_section_name,
28304 main_input_filename, ".tbss_[UL]");
28305
28306 fputs ("\t.file\t", asm_out_file);
28307 output_quoted_string (asm_out_file, main_input_filename);
28308 fputc ('\n', asm_out_file);
28309 if (write_symbols != NO_DEBUG)
28310 switch_to_section (private_data_section);
28311 switch_to_section (toc_section);
28312 switch_to_section (text_section);
28313 if (profile_flag)
28314 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
28315 rs6000_file_start ();
28316 }
28317
28318 /* Output at end of assembler file.
28319 On the RS/6000, referencing data should automatically pull in text. */
28320
28321 static void
28322 rs6000_xcoff_file_end (void)
28323 {
28324 switch_to_section (text_section);
28325 fputs ("_section_.text:\n", asm_out_file);
28326 switch_to_section (data_section);
28327 fputs (TARGET_32BIT
28328 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
28329 asm_out_file);
28330 }
28331
28332 struct declare_alias_data
28333 {
28334 FILE *file;
28335 bool function_descriptor;
28336 };
28337
28338 /* Declare alias N. A helper function for for_node_and_aliases. */
28339
28340 static bool
28341 rs6000_declare_alias (struct symtab_node *n, void *d)
28342 {
28343 struct declare_alias_data *data = (struct declare_alias_data *)d;
28344 /* Main symbol is output specially, because varasm machinery does part of
28345 the job for us - we do not need to declare .globl/lglobs and such. */
28346 if (!n->alias || n->weakref)
28347 return false;
28348
28349 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
28350 return false;
28351
28352 /* Prevent assemble_alias from trying to use .set pseudo operation
28353 that does not behave as expected by the middle-end. */
28354 TREE_ASM_WRITTEN (n->decl) = true;
28355
28356 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
28357 char *buffer = (char *) alloca (strlen (name) + 2);
28358 char *p;
28359 int dollar_inside = 0;
28360
28361 strcpy (buffer, name);
28362 p = strchr (buffer, '$');
28363 while (p) {
28364 *p = '_';
28365 dollar_inside++;
28366 p = strchr (p + 1, '$');
28367 }
28368 if (TREE_PUBLIC (n->decl))
28369 {
28370 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
28371 {
28372 if (dollar_inside) {
28373 if (data->function_descriptor)
28374 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
28375 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
28376 }
28377 if (data->function_descriptor)
28378 {
28379 fputs ("\t.globl .", data->file);
28380 RS6000_OUTPUT_BASENAME (data->file, buffer);
28381 putc ('\n', data->file);
28382 }
28383 fputs ("\t.globl ", data->file);
28384 RS6000_OUTPUT_BASENAME (data->file, buffer);
28385 putc ('\n', data->file);
28386 }
28387 #ifdef ASM_WEAKEN_DECL
28388 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
28389 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
28390 #endif
28391 }
28392 else
28393 {
28394 if (dollar_inside)
28395 {
28396 if (data->function_descriptor)
28397 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
28398 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
28399 }
28400 if (data->function_descriptor)
28401 {
28402 fputs ("\t.lglobl .", data->file);
28403 RS6000_OUTPUT_BASENAME (data->file, buffer);
28404 putc ('\n', data->file);
28405 }
28406 fputs ("\t.lglobl ", data->file);
28407 RS6000_OUTPUT_BASENAME (data->file, buffer);
28408 putc ('\n', data->file);
28409 }
28410 if (data->function_descriptor)
28411 fputs (".", data->file);
28412 RS6000_OUTPUT_BASENAME (data->file, buffer);
28413 fputs (":\n", data->file);
28414 return false;
28415 }
28416
28417
28418 #ifdef HAVE_GAS_HIDDEN
28419 /* Helper function to calculate visibility of a DECL
28420 and return the value as a const string. */
28421
28422 static const char *
28423 rs6000_xcoff_visibility (tree decl)
28424 {
28425 static const char * const visibility_types[] = {
28426 "", ",protected", ",hidden", ",internal"
28427 };
28428
28429 enum symbol_visibility vis = DECL_VISIBILITY (decl);
28430 return visibility_types[vis];
28431 }
28432 #endif
28433
28434
28435 /* This macro produces the initial definition of a function name.
28436 On the RS/6000, we need to place an extra '.' in the function name and
28437 output the function descriptor.
28438 Dollar signs are converted to underscores.
28439
28440 The csect for the function will have already been created when
28441 text_section was selected. We do have to go back to that csect, however.
28442
28443 The third and fourth parameters to the .function pseudo-op (16 and 044)
28444 are placeholders which no longer have any use.
28445
28446 Because AIX assembler's .set command has unexpected semantics, we output
28447 all aliases as alternative labels in front of the definition. */
28448
28449 void
28450 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
28451 {
28452 char *buffer = (char *) alloca (strlen (name) + 1);
28453 char *p;
28454 int dollar_inside = 0;
28455 struct declare_alias_data data = {file, false};
28456
28457 strcpy (buffer, name);
28458 p = strchr (buffer, '$');
28459 while (p) {
28460 *p = '_';
28461 dollar_inside++;
28462 p = strchr (p + 1, '$');
28463 }
28464 if (TREE_PUBLIC (decl))
28465 {
28466 if (!RS6000_WEAK || !DECL_WEAK (decl))
28467 {
28468 if (dollar_inside) {
28469 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
28470 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
28471 }
28472 fputs ("\t.globl .", file);
28473 RS6000_OUTPUT_BASENAME (file, buffer);
28474 #ifdef HAVE_GAS_HIDDEN
28475 fputs (rs6000_xcoff_visibility (decl), file);
28476 #endif
28477 putc ('\n', file);
28478 }
28479 }
28480 else
28481 {
28482 if (dollar_inside) {
28483 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
28484 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
28485 }
28486 fputs ("\t.lglobl .", file);
28487 RS6000_OUTPUT_BASENAME (file, buffer);
28488 putc ('\n', file);
28489 }
28490 fputs ("\t.csect ", file);
28491 RS6000_OUTPUT_BASENAME (file, buffer);
28492 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
28493 RS6000_OUTPUT_BASENAME (file, buffer);
28494 fputs (":\n", file);
28495 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28496 &data, true);
28497 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
28498 RS6000_OUTPUT_BASENAME (file, buffer);
28499 fputs (", TOC[tc0], 0\n", file);
28500 in_section = NULL;
28501 switch_to_section (function_section (decl));
28502 putc ('.', file);
28503 RS6000_OUTPUT_BASENAME (file, buffer);
28504 fputs (":\n", file);
28505 data.function_descriptor = true;
28506 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28507 &data, true);
28508 if (!DECL_IGNORED_P (decl))
28509 {
28510 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28511 xcoffout_declare_function (file, decl, buffer);
28512 else if (write_symbols == DWARF2_DEBUG)
28513 {
28514 name = (*targetm.strip_name_encoding) (name);
28515 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
28516 }
28517 }
28518 return;
28519 }
28520
28521
28522 /* Output assembly language to globalize a symbol from a DECL,
28523 possibly with visibility. */
28524
28525 void
28526 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
28527 {
28528 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
28529 fputs (GLOBAL_ASM_OP, stream);
28530 RS6000_OUTPUT_BASENAME (stream, name);
28531 #ifdef HAVE_GAS_HIDDEN
28532 fputs (rs6000_xcoff_visibility (decl), stream);
28533 #endif
28534 putc ('\n', stream);
28535 }
28536
28537 /* Output assembly language to define a symbol as COMMON from a DECL,
28538 possibly with visibility. */
28539
28540 void
28541 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
28542 tree decl ATTRIBUTE_UNUSED,
28543 const char *name,
28544 unsigned HOST_WIDE_INT size,
28545 unsigned HOST_WIDE_INT align)
28546 {
28547 unsigned HOST_WIDE_INT align2 = 2;
28548
28549 if (align > 32)
28550 align2 = floor_log2 (align / BITS_PER_UNIT);
28551 else if (size > 4)
28552 align2 = 3;
28553
28554 fputs (COMMON_ASM_OP, stream);
28555 RS6000_OUTPUT_BASENAME (stream, name);
28556
28557 fprintf (stream,
28558 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
28559 size, align2);
28560
28561 #ifdef HAVE_GAS_HIDDEN
28562 if (decl != NULL)
28563 fputs (rs6000_xcoff_visibility (decl), stream);
28564 #endif
28565 putc ('\n', stream);
28566 }
28567
28568 /* This macro produces the initial definition of a object (variable) name.
28569 Because AIX assembler's .set command has unexpected semantics, we output
28570 all aliases as alternative labels in front of the definition. */
28571
28572 void
28573 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
28574 {
28575 struct declare_alias_data data = {file, false};
28576 RS6000_OUTPUT_BASENAME (file, name);
28577 fputs (":\n", file);
28578 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
28579 &data, true);
28580 }
28581
28582 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
28583
28584 void
28585 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
28586 {
28587 fputs (integer_asm_op (size, FALSE), file);
28588 assemble_name (file, label);
28589 fputs ("-$", file);
28590 }
28591
28592 /* Output a symbol offset relative to the dbase for the current object.
28593 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
28594 signed offsets.
28595
28596 __gcc_unwind_dbase is embedded in all executables/libraries through
28597 libgcc/config/rs6000/crtdbase.S. */
28598
28599 void
28600 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
28601 {
28602 fputs (integer_asm_op (size, FALSE), file);
28603 assemble_name (file, label);
28604 fputs("-__gcc_unwind_dbase", file);
28605 }
28606
28607 #ifdef HAVE_AS_TLS
28608 static void
28609 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
28610 {
28611 rtx symbol;
28612 int flags;
28613 const char *symname;
28614
28615 default_encode_section_info (decl, rtl, first);
28616
28617 /* Careful not to prod global register variables. */
28618 if (!MEM_P (rtl))
28619 return;
28620 symbol = XEXP (rtl, 0);
28621 if (!SYMBOL_REF_P (symbol))
28622 return;
28623
28624 flags = SYMBOL_REF_FLAGS (symbol);
28625
28626 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
28627 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
28628
28629 SYMBOL_REF_FLAGS (symbol) = flags;
28630
28631 /* Append mapping class to extern decls. */
28632 symname = XSTR (symbol, 0);
28633 if (decl /* sync condition with assemble_external () */
28634 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
28635 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
28636 || TREE_CODE (decl) == FUNCTION_DECL)
28637 && symname[strlen (symname) - 1] != ']')
28638 {
28639 char *newname = (char *) alloca (strlen (symname) + 5);
28640 strcpy (newname, symname);
28641 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
28642 ? "[DS]" : "[UA]"));
28643 XSTR (symbol, 0) = ggc_strdup (newname);
28644 }
28645 }
28646 #endif /* HAVE_AS_TLS */
28647 #endif /* TARGET_XCOFF */
28648
28649 void
28650 rs6000_asm_weaken_decl (FILE *stream, tree decl,
28651 const char *name, const char *val)
28652 {
28653 fputs ("\t.weak\t", stream);
28654 RS6000_OUTPUT_BASENAME (stream, name);
28655 if (decl && TREE_CODE (decl) == FUNCTION_DECL
28656 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
28657 {
28658 if (TARGET_XCOFF)
28659 fputs ("[DS]", stream);
28660 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
28661 if (TARGET_XCOFF)
28662 fputs (rs6000_xcoff_visibility (decl), stream);
28663 #endif
28664 fputs ("\n\t.weak\t.", stream);
28665 RS6000_OUTPUT_BASENAME (stream, name);
28666 }
28667 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
28668 if (TARGET_XCOFF)
28669 fputs (rs6000_xcoff_visibility (decl), stream);
28670 #endif
28671 fputc ('\n', stream);
28672 if (val)
28673 {
28674 #ifdef ASM_OUTPUT_DEF
28675 ASM_OUTPUT_DEF (stream, name, val);
28676 #endif
28677 if (decl && TREE_CODE (decl) == FUNCTION_DECL
28678 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
28679 {
28680 fputs ("\t.set\t.", stream);
28681 RS6000_OUTPUT_BASENAME (stream, name);
28682 fputs (",.", stream);
28683 RS6000_OUTPUT_BASENAME (stream, val);
28684 fputc ('\n', stream);
28685 }
28686 }
28687 }
28688
28689
28690 /* Return true if INSN should not be copied. */
28691
28692 static bool
28693 rs6000_cannot_copy_insn_p (rtx_insn *insn)
28694 {
28695 return recog_memoized (insn) >= 0
28696 && get_attr_cannot_copy (insn);
28697 }
28698
28699 /* Compute a (partial) cost for rtx X. Return true if the complete
28700 cost has been computed, and false if subexpressions should be
28701 scanned. In either case, *TOTAL contains the cost result. */
28702
28703 static bool
28704 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
28705 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
28706 {
28707 int code = GET_CODE (x);
28708
28709 switch (code)
28710 {
28711 /* On the RS/6000, if it is valid in the insn, it is free. */
28712 case CONST_INT:
28713 if (((outer_code == SET
28714 || outer_code == PLUS
28715 || outer_code == MINUS)
28716 && (satisfies_constraint_I (x)
28717 || satisfies_constraint_L (x)))
28718 || (outer_code == AND
28719 && (satisfies_constraint_K (x)
28720 || (mode == SImode
28721 ? satisfies_constraint_L (x)
28722 : satisfies_constraint_J (x))))
28723 || ((outer_code == IOR || outer_code == XOR)
28724 && (satisfies_constraint_K (x)
28725 || (mode == SImode
28726 ? satisfies_constraint_L (x)
28727 : satisfies_constraint_J (x))))
28728 || outer_code == ASHIFT
28729 || outer_code == ASHIFTRT
28730 || outer_code == LSHIFTRT
28731 || outer_code == ROTATE
28732 || outer_code == ROTATERT
28733 || outer_code == ZERO_EXTRACT
28734 || (outer_code == MULT
28735 && satisfies_constraint_I (x))
28736 || ((outer_code == DIV || outer_code == UDIV
28737 || outer_code == MOD || outer_code == UMOD)
28738 && exact_log2 (INTVAL (x)) >= 0)
28739 || (outer_code == COMPARE
28740 && (satisfies_constraint_I (x)
28741 || satisfies_constraint_K (x)))
28742 || ((outer_code == EQ || outer_code == NE)
28743 && (satisfies_constraint_I (x)
28744 || satisfies_constraint_K (x)
28745 || (mode == SImode
28746 ? satisfies_constraint_L (x)
28747 : satisfies_constraint_J (x))))
28748 || (outer_code == GTU
28749 && satisfies_constraint_I (x))
28750 || (outer_code == LTU
28751 && satisfies_constraint_P (x)))
28752 {
28753 *total = 0;
28754 return true;
28755 }
28756 else if ((outer_code == PLUS
28757 && reg_or_add_cint_operand (x, VOIDmode))
28758 || (outer_code == MINUS
28759 && reg_or_sub_cint_operand (x, VOIDmode))
28760 || ((outer_code == SET
28761 || outer_code == IOR
28762 || outer_code == XOR)
28763 && (INTVAL (x)
28764 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
28765 {
28766 *total = COSTS_N_INSNS (1);
28767 return true;
28768 }
28769 /* FALLTHRU */
28770
28771 case CONST_DOUBLE:
28772 case CONST_WIDE_INT:
28773 case CONST:
28774 case HIGH:
28775 case SYMBOL_REF:
28776 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28777 return true;
28778
28779 case MEM:
28780 /* When optimizing for size, MEM should be slightly more expensive
28781 than generating address, e.g., (plus (reg) (const)).
28782 L1 cache latency is about two instructions. */
28783 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28784 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
28785 *total += COSTS_N_INSNS (100);
28786 return true;
28787
28788 case LABEL_REF:
28789 *total = 0;
28790 return true;
28791
28792 case PLUS:
28793 case MINUS:
28794 if (FLOAT_MODE_P (mode))
28795 *total = rs6000_cost->fp;
28796 else
28797 *total = COSTS_N_INSNS (1);
28798 return false;
28799
28800 case MULT:
28801 if (CONST_INT_P (XEXP (x, 1))
28802 && satisfies_constraint_I (XEXP (x, 1)))
28803 {
28804 if (INTVAL (XEXP (x, 1)) >= -256
28805 && INTVAL (XEXP (x, 1)) <= 255)
28806 *total = rs6000_cost->mulsi_const9;
28807 else
28808 *total = rs6000_cost->mulsi_const;
28809 }
28810 else if (mode == SFmode)
28811 *total = rs6000_cost->fp;
28812 else if (FLOAT_MODE_P (mode))
28813 *total = rs6000_cost->dmul;
28814 else if (mode == DImode)
28815 *total = rs6000_cost->muldi;
28816 else
28817 *total = rs6000_cost->mulsi;
28818 return false;
28819
28820 case FMA:
28821 if (mode == SFmode)
28822 *total = rs6000_cost->fp;
28823 else
28824 *total = rs6000_cost->dmul;
28825 break;
28826
28827 case DIV:
28828 case MOD:
28829 if (FLOAT_MODE_P (mode))
28830 {
28831 *total = mode == DFmode ? rs6000_cost->ddiv
28832 : rs6000_cost->sdiv;
28833 return false;
28834 }
28835 /* FALLTHRU */
28836
28837 case UDIV:
28838 case UMOD:
28839 if (CONST_INT_P (XEXP (x, 1))
28840 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
28841 {
28842 if (code == DIV || code == MOD)
28843 /* Shift, addze */
28844 *total = COSTS_N_INSNS (2);
28845 else
28846 /* Shift */
28847 *total = COSTS_N_INSNS (1);
28848 }
28849 else
28850 {
28851 if (GET_MODE (XEXP (x, 1)) == DImode)
28852 *total = rs6000_cost->divdi;
28853 else
28854 *total = rs6000_cost->divsi;
28855 }
28856 /* Add in shift and subtract for MOD unless we have a mod instruction. */
28857 if (!TARGET_MODULO && (code == MOD || code == UMOD))
28858 *total += COSTS_N_INSNS (2);
28859 return false;
28860
28861 case CTZ:
28862 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
28863 return false;
28864
28865 case FFS:
28866 *total = COSTS_N_INSNS (4);
28867 return false;
28868
28869 case POPCOUNT:
28870 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
28871 return false;
28872
28873 case PARITY:
28874 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
28875 return false;
28876
28877 case NOT:
28878 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
28879 *total = 0;
28880 else
28881 *total = COSTS_N_INSNS (1);
28882 return false;
28883
28884 case AND:
28885 if (CONST_INT_P (XEXP (x, 1)))
28886 {
28887 rtx left = XEXP (x, 0);
28888 rtx_code left_code = GET_CODE (left);
28889
28890 /* rotate-and-mask: 1 insn. */
28891 if ((left_code == ROTATE
28892 || left_code == ASHIFT
28893 || left_code == LSHIFTRT)
28894 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
28895 {
28896 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
28897 if (!CONST_INT_P (XEXP (left, 1)))
28898 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
28899 *total += COSTS_N_INSNS (1);
28900 return true;
28901 }
28902
28903 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
28904 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
28905 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
28906 || (val & 0xffff) == val
28907 || (val & 0xffff0000) == val
28908 || ((val & 0xffff) == 0 && mode == SImode))
28909 {
28910 *total = rtx_cost (left, mode, AND, 0, speed);
28911 *total += COSTS_N_INSNS (1);
28912 return true;
28913 }
28914
28915 /* 2 insns. */
28916 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
28917 {
28918 *total = rtx_cost (left, mode, AND, 0, speed);
28919 *total += COSTS_N_INSNS (2);
28920 return true;
28921 }
28922 }
28923
28924 *total = COSTS_N_INSNS (1);
28925 return false;
28926
28927 case IOR:
28928 /* FIXME */
28929 *total = COSTS_N_INSNS (1);
28930 return true;
28931
28932 case CLZ:
28933 case XOR:
28934 case ZERO_EXTRACT:
28935 *total = COSTS_N_INSNS (1);
28936 return false;
28937
28938 case ASHIFT:
28939 /* The EXTSWSLI instruction is a combined instruction. Don't count both
28940 the sign extend and shift separately within the insn. */
28941 if (TARGET_EXTSWSLI && mode == DImode
28942 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
28943 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
28944 {
28945 *total = 0;
28946 return false;
28947 }
28948 /* fall through */
28949
28950 case ASHIFTRT:
28951 case LSHIFTRT:
28952 case ROTATE:
28953 case ROTATERT:
28954 /* Handle mul_highpart. */
28955 if (outer_code == TRUNCATE
28956 && GET_CODE (XEXP (x, 0)) == MULT)
28957 {
28958 if (mode == DImode)
28959 *total = rs6000_cost->muldi;
28960 else
28961 *total = rs6000_cost->mulsi;
28962 return true;
28963 }
28964 else if (outer_code == AND)
28965 *total = 0;
28966 else
28967 *total = COSTS_N_INSNS (1);
28968 return false;
28969
28970 case SIGN_EXTEND:
28971 case ZERO_EXTEND:
28972 if (MEM_P (XEXP (x, 0)))
28973 *total = 0;
28974 else
28975 *total = COSTS_N_INSNS (1);
28976 return false;
28977
28978 case COMPARE:
28979 case NEG:
28980 case ABS:
28981 if (!FLOAT_MODE_P (mode))
28982 {
28983 *total = COSTS_N_INSNS (1);
28984 return false;
28985 }
28986 /* FALLTHRU */
28987
28988 case FLOAT:
28989 case UNSIGNED_FLOAT:
28990 case FIX:
28991 case UNSIGNED_FIX:
28992 case FLOAT_TRUNCATE:
28993 *total = rs6000_cost->fp;
28994 return false;
28995
28996 case FLOAT_EXTEND:
28997 if (mode == DFmode)
28998 *total = rs6000_cost->sfdf_convert;
28999 else
29000 *total = rs6000_cost->fp;
29001 return false;
29002
29003 case UNSPEC:
29004 switch (XINT (x, 1))
29005 {
29006 case UNSPEC_FRSP:
29007 *total = rs6000_cost->fp;
29008 return true;
29009
29010 default:
29011 break;
29012 }
29013 break;
29014
29015 case CALL:
29016 case IF_THEN_ELSE:
29017 if (!speed)
29018 {
29019 *total = COSTS_N_INSNS (1);
29020 return true;
29021 }
29022 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
29023 {
29024 *total = rs6000_cost->fp;
29025 return false;
29026 }
29027 break;
29028
29029 case NE:
29030 case EQ:
29031 case GTU:
29032 case LTU:
29033 /* Carry bit requires mode == Pmode.
29034 NEG or PLUS already counted so only add one. */
29035 if (mode == Pmode
29036 && (outer_code == NEG || outer_code == PLUS))
29037 {
29038 *total = COSTS_N_INSNS (1);
29039 return true;
29040 }
29041 /* FALLTHRU */
29042
29043 case GT:
29044 case LT:
29045 case UNORDERED:
29046 if (outer_code == SET)
29047 {
29048 if (XEXP (x, 1) == const0_rtx)
29049 {
29050 *total = COSTS_N_INSNS (2);
29051 return true;
29052 }
29053 else
29054 {
29055 *total = COSTS_N_INSNS (3);
29056 return false;
29057 }
29058 }
29059 /* CC COMPARE. */
29060 if (outer_code == COMPARE)
29061 {
29062 *total = 0;
29063 return true;
29064 }
29065 break;
29066
29067 default:
29068 break;
29069 }
29070
29071 return false;
29072 }
29073
29074 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
29075
29076 static bool
29077 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
29078 int opno, int *total, bool speed)
29079 {
29080 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
29081
29082 fprintf (stderr,
29083 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
29084 "opno = %d, total = %d, speed = %s, x:\n",
29085 ret ? "complete" : "scan inner",
29086 GET_MODE_NAME (mode),
29087 GET_RTX_NAME (outer_code),
29088 opno,
29089 *total,
29090 speed ? "true" : "false");
29091
29092 debug_rtx (x);
29093
29094 return ret;
29095 }
29096
29097 static int
29098 rs6000_insn_cost (rtx_insn *insn, bool speed)
29099 {
29100 if (recog_memoized (insn) < 0)
29101 return 0;
29102
29103 if (!speed)
29104 return get_attr_length (insn);
29105
29106 int cost = get_attr_cost (insn);
29107 if (cost > 0)
29108 return cost;
29109
29110 int n = get_attr_length (insn) / 4;
29111 enum attr_type type = get_attr_type (insn);
29112
29113 switch (type)
29114 {
29115 case TYPE_LOAD:
29116 case TYPE_FPLOAD:
29117 case TYPE_VECLOAD:
29118 cost = COSTS_N_INSNS (n + 1);
29119 break;
29120
29121 case TYPE_MUL:
29122 switch (get_attr_size (insn))
29123 {
29124 case SIZE_8:
29125 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
29126 break;
29127 case SIZE_16:
29128 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
29129 break;
29130 case SIZE_32:
29131 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
29132 break;
29133 case SIZE_64:
29134 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
29135 break;
29136 default:
29137 gcc_unreachable ();
29138 }
29139 break;
29140 case TYPE_DIV:
29141 switch (get_attr_size (insn))
29142 {
29143 case SIZE_32:
29144 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
29145 break;
29146 case SIZE_64:
29147 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
29148 break;
29149 default:
29150 gcc_unreachable ();
29151 }
29152 break;
29153
29154 case TYPE_FP:
29155 cost = n * rs6000_cost->fp;
29156 break;
29157 case TYPE_DMUL:
29158 cost = n * rs6000_cost->dmul;
29159 break;
29160 case TYPE_SDIV:
29161 cost = n * rs6000_cost->sdiv;
29162 break;
29163 case TYPE_DDIV:
29164 cost = n * rs6000_cost->ddiv;
29165 break;
29166
29167 case TYPE_SYNC:
29168 case TYPE_LOAD_L:
29169 case TYPE_MFCR:
29170 case TYPE_MFCRF:
29171 cost = COSTS_N_INSNS (n + 2);
29172 break;
29173
29174 default:
29175 cost = COSTS_N_INSNS (n);
29176 }
29177
29178 return cost;
29179 }
29180
29181 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
29182
29183 static int
29184 rs6000_debug_address_cost (rtx x, machine_mode mode,
29185 addr_space_t as, bool speed)
29186 {
29187 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
29188
29189 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
29190 ret, speed ? "true" : "false");
29191 debug_rtx (x);
29192
29193 return ret;
29194 }
29195
29196
29197 /* A C expression returning the cost of moving data from a register of class
29198 CLASS1 to one of CLASS2. */
29199
29200 static int
29201 rs6000_register_move_cost (machine_mode mode,
29202 reg_class_t from, reg_class_t to)
29203 {
29204 int ret;
29205 reg_class_t rclass;
29206
29207 if (TARGET_DEBUG_COST)
29208 dbg_cost_ctrl++;
29209
29210 /* If we have VSX, we can easily move between FPR or Altivec registers,
29211 otherwise we can only easily move within classes.
29212 Do this first so we give best-case answers for union classes
29213 containing both gprs and vsx regs. */
29214 HARD_REG_SET to_vsx, from_vsx;
29215 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
29216 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
29217 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
29218 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
29219 if (!hard_reg_set_empty_p (to_vsx)
29220 && !hard_reg_set_empty_p (from_vsx)
29221 && (TARGET_VSX
29222 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
29223 {
29224 int reg = FIRST_FPR_REGNO;
29225 if (TARGET_VSX
29226 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
29227 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
29228 reg = FIRST_ALTIVEC_REGNO;
29229 ret = 2 * hard_regno_nregs (reg, mode);
29230 }
29231
29232 /* Moves from/to GENERAL_REGS. */
29233 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
29234 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
29235 {
29236 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
29237 {
29238 if (TARGET_DIRECT_MOVE)
29239 {
29240 /* Keep the cost for direct moves above that for within
29241 a register class even if the actual processor cost is
29242 comparable. We do this because a direct move insn
29243 can't be a nop, whereas with ideal register
29244 allocation a move within the same class might turn
29245 out to be a nop. */
29246 if (rs6000_tune == PROCESSOR_POWER9
29247 || rs6000_tune == PROCESSOR_FUTURE)
29248 ret = 3 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29249 else
29250 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29251 /* SFmode requires a conversion when moving between gprs
29252 and vsx. */
29253 if (mode == SFmode)
29254 ret += 2;
29255 }
29256 else
29257 ret = (rs6000_memory_move_cost (mode, rclass, false)
29258 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
29259 }
29260
29261 /* It's more expensive to move CR_REGS than CR0_REGS because of the
29262 shift. */
29263 else if (rclass == CR_REGS)
29264 ret = 4;
29265
29266 /* For those processors that have slow LR/CTR moves, make them more
29267 expensive than memory in order to bias spills to memory .*/
29268 else if ((rs6000_tune == PROCESSOR_POWER6
29269 || rs6000_tune == PROCESSOR_POWER7
29270 || rs6000_tune == PROCESSOR_POWER8
29271 || rs6000_tune == PROCESSOR_POWER9)
29272 && reg_class_subset_p (rclass, SPECIAL_REGS))
29273 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29274
29275 else
29276 /* A move will cost one instruction per GPR moved. */
29277 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
29278 }
29279
29280 /* Everything else has to go through GENERAL_REGS. */
29281 else
29282 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
29283 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
29284
29285 if (TARGET_DEBUG_COST)
29286 {
29287 if (dbg_cost_ctrl == 1)
29288 fprintf (stderr,
29289 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
29290 ret, GET_MODE_NAME (mode), reg_class_names[from],
29291 reg_class_names[to]);
29292 dbg_cost_ctrl--;
29293 }
29294
29295 return ret;
29296 }
29297
29298 /* A C expressions returning the cost of moving data of MODE from a register to
29299 or from memory. */
29300
29301 static int
29302 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
29303 bool in ATTRIBUTE_UNUSED)
29304 {
29305 int ret;
29306
29307 if (TARGET_DEBUG_COST)
29308 dbg_cost_ctrl++;
29309
29310 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
29311 ret = 4 * hard_regno_nregs (0, mode);
29312 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
29313 || reg_classes_intersect_p (rclass, VSX_REGS)))
29314 ret = 4 * hard_regno_nregs (32, mode);
29315 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
29316 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
29317 else
29318 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
29319
29320 if (TARGET_DEBUG_COST)
29321 {
29322 if (dbg_cost_ctrl == 1)
29323 fprintf (stderr,
29324 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
29325 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
29326 dbg_cost_ctrl--;
29327 }
29328
29329 return ret;
29330 }
29331
29332 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
29333
29334 The register allocator chooses GEN_OR_VSX_REGS for the allocno
29335 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
29336 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
29337 move cost between GENERAL_REGS and VSX_REGS low.
29338
29339 It might seem reasonable to use a union class. After all, if usage
29340 of vsr is low and gpr high, it might make sense to spill gpr to vsr
29341 rather than memory. However, in cases where register pressure of
29342 both is high, like the cactus_adm spec test, allowing
29343 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
29344 the first scheduling pass. This is partly due to an allocno of
29345 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
29346 class, which gives too high a pressure for GENERAL_REGS and too low
29347 for VSX_REGS. So, force a choice of the subclass here.
29348
29349 The best class is also the union if GENERAL_REGS and VSX_REGS have
29350 the same cost. In that case we do use GEN_OR_VSX_REGS as the
29351 allocno class, since trying to narrow down the class by regno mode
29352 is prone to error. For example, SImode is allowed in VSX regs and
29353 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
29354 it would be wrong to choose an allocno of GENERAL_REGS based on
29355 SImode. */
29356
29357 static reg_class_t
29358 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
29359 reg_class_t allocno_class,
29360 reg_class_t best_class)
29361 {
29362 switch (allocno_class)
29363 {
29364 case GEN_OR_VSX_REGS:
29365 /* best_class must be a subset of allocno_class. */
29366 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
29367 || best_class == GEN_OR_FLOAT_REGS
29368 || best_class == VSX_REGS
29369 || best_class == ALTIVEC_REGS
29370 || best_class == FLOAT_REGS
29371 || best_class == GENERAL_REGS
29372 || best_class == BASE_REGS);
29373 /* Use best_class but choose wider classes when copying from the
29374 wider class to best_class is cheap. This mimics IRA choice
29375 of allocno class. */
29376 if (best_class == BASE_REGS)
29377 return GENERAL_REGS;
29378 if (TARGET_VSX
29379 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
29380 return VSX_REGS;
29381 return best_class;
29382
29383 default:
29384 break;
29385 }
29386
29387 return allocno_class;
29388 }
29389
29390 /* Returns a code for a target-specific builtin that implements
29391 reciprocal of the function, or NULL_TREE if not available. */
29392
29393 static tree
29394 rs6000_builtin_reciprocal (tree fndecl)
29395 {
29396 switch (DECL_FUNCTION_CODE (fndecl))
29397 {
29398 case VSX_BUILTIN_XVSQRTDP:
29399 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
29400 return NULL_TREE;
29401
29402 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
29403
29404 case VSX_BUILTIN_XVSQRTSP:
29405 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
29406 return NULL_TREE;
29407
29408 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
29409
29410 default:
29411 return NULL_TREE;
29412 }
29413 }
29414
29415 /* Load up a constant. If the mode is a vector mode, splat the value across
29416 all of the vector elements. */
29417
29418 static rtx
29419 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
29420 {
29421 rtx reg;
29422
29423 if (mode == SFmode || mode == DFmode)
29424 {
29425 rtx d = const_double_from_real_value (dconst, mode);
29426 reg = force_reg (mode, d);
29427 }
29428 else if (mode == V4SFmode)
29429 {
29430 rtx d = const_double_from_real_value (dconst, SFmode);
29431 rtvec v = gen_rtvec (4, d, d, d, d);
29432 reg = gen_reg_rtx (mode);
29433 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29434 }
29435 else if (mode == V2DFmode)
29436 {
29437 rtx d = const_double_from_real_value (dconst, DFmode);
29438 rtvec v = gen_rtvec (2, d, d);
29439 reg = gen_reg_rtx (mode);
29440 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29441 }
29442 else
29443 gcc_unreachable ();
29444
29445 return reg;
29446 }
29447
29448 /* Generate an FMA instruction. */
29449
29450 static void
29451 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
29452 {
29453 machine_mode mode = GET_MODE (target);
29454 rtx dst;
29455
29456 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
29457 gcc_assert (dst != NULL);
29458
29459 if (dst != target)
29460 emit_move_insn (target, dst);
29461 }
29462
29463 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
29464
29465 static void
29466 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
29467 {
29468 machine_mode mode = GET_MODE (dst);
29469 rtx r;
29470
29471 /* This is a tad more complicated, since the fnma_optab is for
29472 a different expression: fma(-m1, m2, a), which is the same
29473 thing except in the case of signed zeros.
29474
29475 Fortunately we know that if FMA is supported that FNMSUB is
29476 also supported in the ISA. Just expand it directly. */
29477
29478 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
29479
29480 r = gen_rtx_NEG (mode, a);
29481 r = gen_rtx_FMA (mode, m1, m2, r);
29482 r = gen_rtx_NEG (mode, r);
29483 emit_insn (gen_rtx_SET (dst, r));
29484 }
29485
29486 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
29487 add a reg_note saying that this was a division. Support both scalar and
29488 vector divide. Assumes no trapping math and finite arguments. */
29489
29490 void
29491 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
29492 {
29493 machine_mode mode = GET_MODE (dst);
29494 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
29495 int i;
29496
29497 /* Low precision estimates guarantee 5 bits of accuracy. High
29498 precision estimates guarantee 14 bits of accuracy. SFmode
29499 requires 23 bits of accuracy. DFmode requires 52 bits of
29500 accuracy. Each pass at least doubles the accuracy, leading
29501 to the following. */
29502 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29503 if (mode == DFmode || mode == V2DFmode)
29504 passes++;
29505
29506 enum insn_code code = optab_handler (smul_optab, mode);
29507 insn_gen_fn gen_mul = GEN_FCN (code);
29508
29509 gcc_assert (code != CODE_FOR_nothing);
29510
29511 one = rs6000_load_constant_and_splat (mode, dconst1);
29512
29513 /* x0 = 1./d estimate */
29514 x0 = gen_reg_rtx (mode);
29515 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
29516 UNSPEC_FRES)));
29517
29518 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
29519 if (passes > 1) {
29520
29521 /* e0 = 1. - d * x0 */
29522 e0 = gen_reg_rtx (mode);
29523 rs6000_emit_nmsub (e0, d, x0, one);
29524
29525 /* x1 = x0 + e0 * x0 */
29526 x1 = gen_reg_rtx (mode);
29527 rs6000_emit_madd (x1, e0, x0, x0);
29528
29529 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
29530 ++i, xprev = xnext, eprev = enext) {
29531
29532 /* enext = eprev * eprev */
29533 enext = gen_reg_rtx (mode);
29534 emit_insn (gen_mul (enext, eprev, eprev));
29535
29536 /* xnext = xprev + enext * xprev */
29537 xnext = gen_reg_rtx (mode);
29538 rs6000_emit_madd (xnext, enext, xprev, xprev);
29539 }
29540
29541 } else
29542 xprev = x0;
29543
29544 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
29545
29546 /* u = n * xprev */
29547 u = gen_reg_rtx (mode);
29548 emit_insn (gen_mul (u, n, xprev));
29549
29550 /* v = n - (d * u) */
29551 v = gen_reg_rtx (mode);
29552 rs6000_emit_nmsub (v, d, u, n);
29553
29554 /* dst = (v * xprev) + u */
29555 rs6000_emit_madd (dst, v, xprev, u);
29556
29557 if (note_p)
29558 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
29559 }
29560
29561 /* Goldschmidt's Algorithm for single/double-precision floating point
29562 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
29563
29564 void
29565 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
29566 {
29567 machine_mode mode = GET_MODE (src);
29568 rtx e = gen_reg_rtx (mode);
29569 rtx g = gen_reg_rtx (mode);
29570 rtx h = gen_reg_rtx (mode);
29571
29572 /* Low precision estimates guarantee 5 bits of accuracy. High
29573 precision estimates guarantee 14 bits of accuracy. SFmode
29574 requires 23 bits of accuracy. DFmode requires 52 bits of
29575 accuracy. Each pass at least doubles the accuracy, leading
29576 to the following. */
29577 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
29578 if (mode == DFmode || mode == V2DFmode)
29579 passes++;
29580
29581 int i;
29582 rtx mhalf;
29583 enum insn_code code = optab_handler (smul_optab, mode);
29584 insn_gen_fn gen_mul = GEN_FCN (code);
29585
29586 gcc_assert (code != CODE_FOR_nothing);
29587
29588 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
29589
29590 /* e = rsqrt estimate */
29591 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
29592 UNSPEC_RSQRT)));
29593
29594 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
29595 if (!recip)
29596 {
29597 rtx zero = force_reg (mode, CONST0_RTX (mode));
29598
29599 if (mode == SFmode)
29600 {
29601 rtx target = emit_conditional_move (e, GT, src, zero, mode,
29602 e, zero, mode, 0);
29603 if (target != e)
29604 emit_move_insn (e, target);
29605 }
29606 else
29607 {
29608 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
29609 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
29610 }
29611 }
29612
29613 /* g = sqrt estimate. */
29614 emit_insn (gen_mul (g, e, src));
29615 /* h = 1/(2*sqrt) estimate. */
29616 emit_insn (gen_mul (h, e, mhalf));
29617
29618 if (recip)
29619 {
29620 if (passes == 1)
29621 {
29622 rtx t = gen_reg_rtx (mode);
29623 rs6000_emit_nmsub (t, g, h, mhalf);
29624 /* Apply correction directly to 1/rsqrt estimate. */
29625 rs6000_emit_madd (dst, e, t, e);
29626 }
29627 else
29628 {
29629 for (i = 0; i < passes; i++)
29630 {
29631 rtx t1 = gen_reg_rtx (mode);
29632 rtx g1 = gen_reg_rtx (mode);
29633 rtx h1 = gen_reg_rtx (mode);
29634
29635 rs6000_emit_nmsub (t1, g, h, mhalf);
29636 rs6000_emit_madd (g1, g, t1, g);
29637 rs6000_emit_madd (h1, h, t1, h);
29638
29639 g = g1;
29640 h = h1;
29641 }
29642 /* Multiply by 2 for 1/rsqrt. */
29643 emit_insn (gen_add3_insn (dst, h, h));
29644 }
29645 }
29646 else
29647 {
29648 rtx t = gen_reg_rtx (mode);
29649 rs6000_emit_nmsub (t, g, h, mhalf);
29650 rs6000_emit_madd (dst, g, t, g);
29651 }
29652
29653 return;
29654 }
29655
29656 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
29657 (Power7) targets. DST is the target, and SRC is the argument operand. */
29658
29659 void
29660 rs6000_emit_popcount (rtx dst, rtx src)
29661 {
29662 machine_mode mode = GET_MODE (dst);
29663 rtx tmp1, tmp2;
29664
29665 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
29666 if (TARGET_POPCNTD)
29667 {
29668 if (mode == SImode)
29669 emit_insn (gen_popcntdsi2 (dst, src));
29670 else
29671 emit_insn (gen_popcntddi2 (dst, src));
29672 return;
29673 }
29674
29675 tmp1 = gen_reg_rtx (mode);
29676
29677 if (mode == SImode)
29678 {
29679 emit_insn (gen_popcntbsi2 (tmp1, src));
29680 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
29681 NULL_RTX, 0);
29682 tmp2 = force_reg (SImode, tmp2);
29683 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
29684 }
29685 else
29686 {
29687 emit_insn (gen_popcntbdi2 (tmp1, src));
29688 tmp2 = expand_mult (DImode, tmp1,
29689 GEN_INT ((HOST_WIDE_INT)
29690 0x01010101 << 32 | 0x01010101),
29691 NULL_RTX, 0);
29692 tmp2 = force_reg (DImode, tmp2);
29693 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
29694 }
29695 }
29696
29697
29698 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
29699 target, and SRC is the argument operand. */
29700
29701 void
29702 rs6000_emit_parity (rtx dst, rtx src)
29703 {
29704 machine_mode mode = GET_MODE (dst);
29705 rtx tmp;
29706
29707 tmp = gen_reg_rtx (mode);
29708
29709 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
29710 if (TARGET_CMPB)
29711 {
29712 if (mode == SImode)
29713 {
29714 emit_insn (gen_popcntbsi2 (tmp, src));
29715 emit_insn (gen_paritysi2_cmpb (dst, tmp));
29716 }
29717 else
29718 {
29719 emit_insn (gen_popcntbdi2 (tmp, src));
29720 emit_insn (gen_paritydi2_cmpb (dst, tmp));
29721 }
29722 return;
29723 }
29724
29725 if (mode == SImode)
29726 {
29727 /* Is mult+shift >= shift+xor+shift+xor? */
29728 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
29729 {
29730 rtx tmp1, tmp2, tmp3, tmp4;
29731
29732 tmp1 = gen_reg_rtx (SImode);
29733 emit_insn (gen_popcntbsi2 (tmp1, src));
29734
29735 tmp2 = gen_reg_rtx (SImode);
29736 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
29737 tmp3 = gen_reg_rtx (SImode);
29738 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
29739
29740 tmp4 = gen_reg_rtx (SImode);
29741 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
29742 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
29743 }
29744 else
29745 rs6000_emit_popcount (tmp, src);
29746 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
29747 }
29748 else
29749 {
29750 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
29751 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
29752 {
29753 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
29754
29755 tmp1 = gen_reg_rtx (DImode);
29756 emit_insn (gen_popcntbdi2 (tmp1, src));
29757
29758 tmp2 = gen_reg_rtx (DImode);
29759 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
29760 tmp3 = gen_reg_rtx (DImode);
29761 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
29762
29763 tmp4 = gen_reg_rtx (DImode);
29764 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
29765 tmp5 = gen_reg_rtx (DImode);
29766 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
29767
29768 tmp6 = gen_reg_rtx (DImode);
29769 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
29770 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
29771 }
29772 else
29773 rs6000_emit_popcount (tmp, src);
29774 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
29775 }
29776 }
29777
29778 /* Expand an Altivec constant permutation for little endian mode.
29779 OP0 and OP1 are the input vectors and TARGET is the output vector.
29780 SEL specifies the constant permutation vector.
29781
29782 There are two issues: First, the two input operands must be
29783 swapped so that together they form a double-wide array in LE
29784 order. Second, the vperm instruction has surprising behavior
29785 in LE mode: it interprets the elements of the source vectors
29786 in BE mode ("left to right") and interprets the elements of
29787 the destination vector in LE mode ("right to left"). To
29788 correct for this, we must subtract each element of the permute
29789 control vector from 31.
29790
29791 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
29792 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
29793 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
29794 serve as the permute control vector. Then, in BE mode,
29795
29796 vperm 9,10,11,12
29797
29798 places the desired result in vr9. However, in LE mode the
29799 vector contents will be
29800
29801 vr10 = 00000003 00000002 00000001 00000000
29802 vr11 = 00000007 00000006 00000005 00000004
29803
29804 The result of the vperm using the same permute control vector is
29805
29806 vr9 = 05000000 07000000 01000000 03000000
29807
29808 That is, the leftmost 4 bytes of vr10 are interpreted as the
29809 source for the rightmost 4 bytes of vr9, and so on.
29810
29811 If we change the permute control vector to
29812
29813 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
29814
29815 and issue
29816
29817 vperm 9,11,10,12
29818
29819 we get the desired
29820
29821 vr9 = 00000006 00000004 00000002 00000000. */
29822
29823 static void
29824 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
29825 const vec_perm_indices &sel)
29826 {
29827 unsigned int i;
29828 rtx perm[16];
29829 rtx constv, unspec;
29830
29831 /* Unpack and adjust the constant selector. */
29832 for (i = 0; i < 16; ++i)
29833 {
29834 unsigned int elt = 31 - (sel[i] & 31);
29835 perm[i] = GEN_INT (elt);
29836 }
29837
29838 /* Expand to a permute, swapping the inputs and using the
29839 adjusted selector. */
29840 if (!REG_P (op0))
29841 op0 = force_reg (V16QImode, op0);
29842 if (!REG_P (op1))
29843 op1 = force_reg (V16QImode, op1);
29844
29845 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
29846 constv = force_reg (V16QImode, constv);
29847 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
29848 UNSPEC_VPERM);
29849 if (!REG_P (target))
29850 {
29851 rtx tmp = gen_reg_rtx (V16QImode);
29852 emit_move_insn (tmp, unspec);
29853 unspec = tmp;
29854 }
29855
29856 emit_move_insn (target, unspec);
29857 }
29858
29859 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
29860 permute control vector. But here it's not a constant, so we must
29861 generate a vector NAND or NOR to do the adjustment. */
29862
29863 void
29864 altivec_expand_vec_perm_le (rtx operands[4])
29865 {
29866 rtx notx, iorx, unspec;
29867 rtx target = operands[0];
29868 rtx op0 = operands[1];
29869 rtx op1 = operands[2];
29870 rtx sel = operands[3];
29871 rtx tmp = target;
29872 rtx norreg = gen_reg_rtx (V16QImode);
29873 machine_mode mode = GET_MODE (target);
29874
29875 /* Get everything in regs so the pattern matches. */
29876 if (!REG_P (op0))
29877 op0 = force_reg (mode, op0);
29878 if (!REG_P (op1))
29879 op1 = force_reg (mode, op1);
29880 if (!REG_P (sel))
29881 sel = force_reg (V16QImode, sel);
29882 if (!REG_P (target))
29883 tmp = gen_reg_rtx (mode);
29884
29885 if (TARGET_P9_VECTOR)
29886 {
29887 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
29888 UNSPEC_VPERMR);
29889 }
29890 else
29891 {
29892 /* Invert the selector with a VNAND if available, else a VNOR.
29893 The VNAND is preferred for future fusion opportunities. */
29894 notx = gen_rtx_NOT (V16QImode, sel);
29895 iorx = (TARGET_P8_VECTOR
29896 ? gen_rtx_IOR (V16QImode, notx, notx)
29897 : gen_rtx_AND (V16QImode, notx, notx));
29898 emit_insn (gen_rtx_SET (norreg, iorx));
29899
29900 /* Permute with operands reversed and adjusted selector. */
29901 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
29902 UNSPEC_VPERM);
29903 }
29904
29905 /* Copy into target, possibly by way of a register. */
29906 if (!REG_P (target))
29907 {
29908 emit_move_insn (tmp, unspec);
29909 unspec = tmp;
29910 }
29911
29912 emit_move_insn (target, unspec);
29913 }
29914
29915 /* Expand an Altivec constant permutation. Return true if we match
29916 an efficient implementation; false to fall back to VPERM.
29917
29918 OP0 and OP1 are the input vectors and TARGET is the output vector.
29919 SEL specifies the constant permutation vector. */
29920
29921 static bool
29922 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
29923 const vec_perm_indices &sel)
29924 {
29925 struct altivec_perm_insn {
29926 HOST_WIDE_INT mask;
29927 enum insn_code impl;
29928 unsigned char perm[16];
29929 };
29930 static const struct altivec_perm_insn patterns[] = {
29931 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
29932 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
29933 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
29934 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
29935 { OPTION_MASK_ALTIVEC,
29936 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
29937 : CODE_FOR_altivec_vmrglb_direct),
29938 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
29939 { OPTION_MASK_ALTIVEC,
29940 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
29941 : CODE_FOR_altivec_vmrglh_direct),
29942 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
29943 { OPTION_MASK_ALTIVEC,
29944 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
29945 : CODE_FOR_altivec_vmrglw_direct),
29946 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
29947 { OPTION_MASK_ALTIVEC,
29948 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
29949 : CODE_FOR_altivec_vmrghb_direct),
29950 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
29951 { OPTION_MASK_ALTIVEC,
29952 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
29953 : CODE_FOR_altivec_vmrghh_direct),
29954 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
29955 { OPTION_MASK_ALTIVEC,
29956 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
29957 : CODE_FOR_altivec_vmrghw_direct),
29958 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
29959 { OPTION_MASK_P8_VECTOR,
29960 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
29961 : CODE_FOR_p8_vmrgow_v4sf_direct),
29962 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
29963 { OPTION_MASK_P8_VECTOR,
29964 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
29965 : CODE_FOR_p8_vmrgew_v4sf_direct),
29966 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
29967 };
29968
29969 unsigned int i, j, elt, which;
29970 unsigned char perm[16];
29971 rtx x;
29972 bool one_vec;
29973
29974 /* Unpack the constant selector. */
29975 for (i = which = 0; i < 16; ++i)
29976 {
29977 elt = sel[i] & 31;
29978 which |= (elt < 16 ? 1 : 2);
29979 perm[i] = elt;
29980 }
29981
29982 /* Simplify the constant selector based on operands. */
29983 switch (which)
29984 {
29985 default:
29986 gcc_unreachable ();
29987
29988 case 3:
29989 one_vec = false;
29990 if (!rtx_equal_p (op0, op1))
29991 break;
29992 /* FALLTHRU */
29993
29994 case 2:
29995 for (i = 0; i < 16; ++i)
29996 perm[i] &= 15;
29997 op0 = op1;
29998 one_vec = true;
29999 break;
30000
30001 case 1:
30002 op1 = op0;
30003 one_vec = true;
30004 break;
30005 }
30006
30007 /* Look for splat patterns. */
30008 if (one_vec)
30009 {
30010 elt = perm[0];
30011
30012 for (i = 0; i < 16; ++i)
30013 if (perm[i] != elt)
30014 break;
30015 if (i == 16)
30016 {
30017 if (!BYTES_BIG_ENDIAN)
30018 elt = 15 - elt;
30019 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
30020 return true;
30021 }
30022
30023 if (elt % 2 == 0)
30024 {
30025 for (i = 0; i < 16; i += 2)
30026 if (perm[i] != elt || perm[i + 1] != elt + 1)
30027 break;
30028 if (i == 16)
30029 {
30030 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
30031 x = gen_reg_rtx (V8HImode);
30032 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
30033 GEN_INT (field)));
30034 emit_move_insn (target, gen_lowpart (V16QImode, x));
30035 return true;
30036 }
30037 }
30038
30039 if (elt % 4 == 0)
30040 {
30041 for (i = 0; i < 16; i += 4)
30042 if (perm[i] != elt
30043 || perm[i + 1] != elt + 1
30044 || perm[i + 2] != elt + 2
30045 || perm[i + 3] != elt + 3)
30046 break;
30047 if (i == 16)
30048 {
30049 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
30050 x = gen_reg_rtx (V4SImode);
30051 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
30052 GEN_INT (field)));
30053 emit_move_insn (target, gen_lowpart (V16QImode, x));
30054 return true;
30055 }
30056 }
30057 }
30058
30059 /* Look for merge and pack patterns. */
30060 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
30061 {
30062 bool swapped;
30063
30064 if ((patterns[j].mask & rs6000_isa_flags) == 0)
30065 continue;
30066
30067 elt = patterns[j].perm[0];
30068 if (perm[0] == elt)
30069 swapped = false;
30070 else if (perm[0] == elt + 16)
30071 swapped = true;
30072 else
30073 continue;
30074 for (i = 1; i < 16; ++i)
30075 {
30076 elt = patterns[j].perm[i];
30077 if (swapped)
30078 elt = (elt >= 16 ? elt - 16 : elt + 16);
30079 else if (one_vec && elt >= 16)
30080 elt -= 16;
30081 if (perm[i] != elt)
30082 break;
30083 }
30084 if (i == 16)
30085 {
30086 enum insn_code icode = patterns[j].impl;
30087 machine_mode omode = insn_data[icode].operand[0].mode;
30088 machine_mode imode = insn_data[icode].operand[1].mode;
30089
30090 /* For little-endian, don't use vpkuwum and vpkuhum if the
30091 underlying vector type is not V4SI and V8HI, respectively.
30092 For example, using vpkuwum with a V8HI picks up the even
30093 halfwords (BE numbering) when the even halfwords (LE
30094 numbering) are what we need. */
30095 if (!BYTES_BIG_ENDIAN
30096 && icode == CODE_FOR_altivec_vpkuwum_direct
30097 && ((REG_P (op0)
30098 && GET_MODE (op0) != V4SImode)
30099 || (SUBREG_P (op0)
30100 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
30101 continue;
30102 if (!BYTES_BIG_ENDIAN
30103 && icode == CODE_FOR_altivec_vpkuhum_direct
30104 && ((REG_P (op0)
30105 && GET_MODE (op0) != V8HImode)
30106 || (SUBREG_P (op0)
30107 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
30108 continue;
30109
30110 /* For little-endian, the two input operands must be swapped
30111 (or swapped back) to ensure proper right-to-left numbering
30112 from 0 to 2N-1. */
30113 if (swapped ^ !BYTES_BIG_ENDIAN)
30114 std::swap (op0, op1);
30115 if (imode != V16QImode)
30116 {
30117 op0 = gen_lowpart (imode, op0);
30118 op1 = gen_lowpart (imode, op1);
30119 }
30120 if (omode == V16QImode)
30121 x = target;
30122 else
30123 x = gen_reg_rtx (omode);
30124 emit_insn (GEN_FCN (icode) (x, op0, op1));
30125 if (omode != V16QImode)
30126 emit_move_insn (target, gen_lowpart (V16QImode, x));
30127 return true;
30128 }
30129 }
30130
30131 if (!BYTES_BIG_ENDIAN)
30132 {
30133 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
30134 return true;
30135 }
30136
30137 return false;
30138 }
30139
30140 /* Expand a VSX Permute Doubleword constant permutation.
30141 Return true if we match an efficient implementation. */
30142
30143 static bool
30144 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
30145 unsigned char perm0, unsigned char perm1)
30146 {
30147 rtx x;
30148
30149 /* If both selectors come from the same operand, fold to single op. */
30150 if ((perm0 & 2) == (perm1 & 2))
30151 {
30152 if (perm0 & 2)
30153 op0 = op1;
30154 else
30155 op1 = op0;
30156 }
30157 /* If both operands are equal, fold to simpler permutation. */
30158 if (rtx_equal_p (op0, op1))
30159 {
30160 perm0 = perm0 & 1;
30161 perm1 = (perm1 & 1) + 2;
30162 }
30163 /* If the first selector comes from the second operand, swap. */
30164 else if (perm0 & 2)
30165 {
30166 if (perm1 & 2)
30167 return false;
30168 perm0 -= 2;
30169 perm1 += 2;
30170 std::swap (op0, op1);
30171 }
30172 /* If the second selector does not come from the second operand, fail. */
30173 else if ((perm1 & 2) == 0)
30174 return false;
30175
30176 /* Success! */
30177 if (target != NULL)
30178 {
30179 machine_mode vmode, dmode;
30180 rtvec v;
30181
30182 vmode = GET_MODE (target);
30183 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
30184 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
30185 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
30186 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
30187 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
30188 emit_insn (gen_rtx_SET (target, x));
30189 }
30190 return true;
30191 }
30192
30193 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
30194
30195 static bool
30196 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
30197 rtx op1, const vec_perm_indices &sel)
30198 {
30199 bool testing_p = !target;
30200
30201 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
30202 if (TARGET_ALTIVEC && testing_p)
30203 return true;
30204
30205 /* Check for ps_merge* or xxpermdi insns. */
30206 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
30207 {
30208 if (testing_p)
30209 {
30210 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
30211 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
30212 }
30213 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
30214 return true;
30215 }
30216
30217 if (TARGET_ALTIVEC)
30218 {
30219 /* Force the target-independent code to lower to V16QImode. */
30220 if (vmode != V16QImode)
30221 return false;
30222 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
30223 return true;
30224 }
30225
30226 return false;
30227 }
30228
30229 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
30230 OP0 and OP1 are the input vectors and TARGET is the output vector.
30231 PERM specifies the constant permutation vector. */
30232
30233 static void
30234 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
30235 machine_mode vmode, const vec_perm_builder &perm)
30236 {
30237 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
30238 if (x != target)
30239 emit_move_insn (target, x);
30240 }
30241
30242 /* Expand an extract even operation. */
30243
30244 void
30245 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
30246 {
30247 machine_mode vmode = GET_MODE (target);
30248 unsigned i, nelt = GET_MODE_NUNITS (vmode);
30249 vec_perm_builder perm (nelt, nelt, 1);
30250
30251 for (i = 0; i < nelt; i++)
30252 perm.quick_push (i * 2);
30253
30254 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
30255 }
30256
30257 /* Expand a vector interleave operation. */
30258
30259 void
30260 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
30261 {
30262 machine_mode vmode = GET_MODE (target);
30263 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
30264 vec_perm_builder perm (nelt, nelt, 1);
30265
30266 high = (highp ? 0 : nelt / 2);
30267 for (i = 0; i < nelt / 2; i++)
30268 {
30269 perm.quick_push (i + high);
30270 perm.quick_push (i + nelt + high);
30271 }
30272
30273 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
30274 }
30275
30276 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
30277 void
30278 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
30279 {
30280 HOST_WIDE_INT hwi_scale (scale);
30281 REAL_VALUE_TYPE r_pow;
30282 rtvec v = rtvec_alloc (2);
30283 rtx elt;
30284 rtx scale_vec = gen_reg_rtx (V2DFmode);
30285 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
30286 elt = const_double_from_real_value (r_pow, DFmode);
30287 RTVEC_ELT (v, 0) = elt;
30288 RTVEC_ELT (v, 1) = elt;
30289 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
30290 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
30291 }
30292
30293 /* Return an RTX representing where to find the function value of a
30294 function returning MODE. */
30295 static rtx
30296 rs6000_complex_function_value (machine_mode mode)
30297 {
30298 unsigned int regno;
30299 rtx r1, r2;
30300 machine_mode inner = GET_MODE_INNER (mode);
30301 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
30302
30303 if (TARGET_FLOAT128_TYPE
30304 && (mode == KCmode
30305 || (mode == TCmode && TARGET_IEEEQUAD)))
30306 regno = ALTIVEC_ARG_RETURN;
30307
30308 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30309 regno = FP_ARG_RETURN;
30310
30311 else
30312 {
30313 regno = GP_ARG_RETURN;
30314
30315 /* 32-bit is OK since it'll go in r3/r4. */
30316 if (TARGET_32BIT && inner_bytes >= 4)
30317 return gen_rtx_REG (mode, regno);
30318 }
30319
30320 if (inner_bytes >= 8)
30321 return gen_rtx_REG (mode, regno);
30322
30323 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
30324 const0_rtx);
30325 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
30326 GEN_INT (inner_bytes));
30327 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
30328 }
30329
30330 /* Return an rtx describing a return value of MODE as a PARALLEL
30331 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
30332 stride REG_STRIDE. */
30333
30334 static rtx
30335 rs6000_parallel_return (machine_mode mode,
30336 int n_elts, machine_mode elt_mode,
30337 unsigned int regno, unsigned int reg_stride)
30338 {
30339 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
30340
30341 int i;
30342 for (i = 0; i < n_elts; i++)
30343 {
30344 rtx r = gen_rtx_REG (elt_mode, regno);
30345 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
30346 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
30347 regno += reg_stride;
30348 }
30349
30350 return par;
30351 }
30352
30353 /* Target hook for TARGET_FUNCTION_VALUE.
30354
30355 An integer value is in r3 and a floating-point value is in fp1,
30356 unless -msoft-float. */
30357
30358 static rtx
30359 rs6000_function_value (const_tree valtype,
30360 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
30361 bool outgoing ATTRIBUTE_UNUSED)
30362 {
30363 machine_mode mode;
30364 unsigned int regno;
30365 machine_mode elt_mode;
30366 int n_elts;
30367
30368 /* Special handling for structs in darwin64. */
30369 if (TARGET_MACHO
30370 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
30371 {
30372 CUMULATIVE_ARGS valcum;
30373 rtx valret;
30374
30375 valcum.words = 0;
30376 valcum.fregno = FP_ARG_MIN_REG;
30377 valcum.vregno = ALTIVEC_ARG_MIN_REG;
30378 /* Do a trial code generation as if this were going to be passed as
30379 an argument; if any part goes in memory, we return NULL. */
30380 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
30381 if (valret)
30382 return valret;
30383 /* Otherwise fall through to standard ABI rules. */
30384 }
30385
30386 mode = TYPE_MODE (valtype);
30387
30388 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
30389 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
30390 {
30391 int first_reg, n_regs;
30392
30393 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
30394 {
30395 /* _Decimal128 must use even/odd register pairs. */
30396 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30397 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
30398 }
30399 else
30400 {
30401 first_reg = ALTIVEC_ARG_RETURN;
30402 n_regs = 1;
30403 }
30404
30405 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
30406 }
30407
30408 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
30409 if (TARGET_32BIT && TARGET_POWERPC64)
30410 switch (mode)
30411 {
30412 default:
30413 break;
30414 case E_DImode:
30415 case E_SCmode:
30416 case E_DCmode:
30417 case E_TCmode:
30418 int count = GET_MODE_SIZE (mode) / 4;
30419 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
30420 }
30421
30422 if ((INTEGRAL_TYPE_P (valtype)
30423 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
30424 || POINTER_TYPE_P (valtype))
30425 mode = TARGET_32BIT ? SImode : DImode;
30426
30427 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30428 /* _Decimal128 must use an even/odd register pair. */
30429 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30430 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
30431 && !FLOAT128_VECTOR_P (mode))
30432 regno = FP_ARG_RETURN;
30433 else if (TREE_CODE (valtype) == COMPLEX_TYPE
30434 && targetm.calls.split_complex_arg)
30435 return rs6000_complex_function_value (mode);
30436 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30437 return register is used in both cases, and we won't see V2DImode/V2DFmode
30438 for pure altivec, combine the two cases. */
30439 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
30440 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
30441 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
30442 regno = ALTIVEC_ARG_RETURN;
30443 else
30444 regno = GP_ARG_RETURN;
30445
30446 return gen_rtx_REG (mode, regno);
30447 }
30448
30449 /* Define how to find the value returned by a library function
30450 assuming the value has mode MODE. */
30451 rtx
30452 rs6000_libcall_value (machine_mode mode)
30453 {
30454 unsigned int regno;
30455
30456 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30457 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
30458 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
30459
30460 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
30461 /* _Decimal128 must use an even/odd register pair. */
30462 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30463 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
30464 regno = FP_ARG_RETURN;
30465 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30466 return register is used in both cases, and we won't see V2DImode/V2DFmode
30467 for pure altivec, combine the two cases. */
30468 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
30469 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
30470 regno = ALTIVEC_ARG_RETURN;
30471 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
30472 return rs6000_complex_function_value (mode);
30473 else
30474 regno = GP_ARG_RETURN;
30475
30476 return gen_rtx_REG (mode, regno);
30477 }
30478
30479 /* Compute register pressure classes. We implement the target hook to avoid
30480 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
30481 lead to incorrect estimates of number of available registers and therefor
30482 increased register pressure/spill. */
30483 static int
30484 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
30485 {
30486 int n;
30487
30488 n = 0;
30489 pressure_classes[n++] = GENERAL_REGS;
30490 if (TARGET_VSX)
30491 pressure_classes[n++] = VSX_REGS;
30492 else
30493 {
30494 if (TARGET_ALTIVEC)
30495 pressure_classes[n++] = ALTIVEC_REGS;
30496 if (TARGET_HARD_FLOAT)
30497 pressure_classes[n++] = FLOAT_REGS;
30498 }
30499 pressure_classes[n++] = CR_REGS;
30500 pressure_classes[n++] = SPECIAL_REGS;
30501
30502 return n;
30503 }
30504
30505 /* Given FROM and TO register numbers, say whether this elimination is allowed.
30506 Frame pointer elimination is automatically handled.
30507
30508 For the RS/6000, if frame pointer elimination is being done, we would like
30509 to convert ap into fp, not sp.
30510
30511 We need r30 if -mminimal-toc was specified, and there are constant pool
30512 references. */
30513
30514 static bool
30515 rs6000_can_eliminate (const int from, const int to)
30516 {
30517 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
30518 ? ! frame_pointer_needed
30519 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
30520 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
30521 || constant_pool_empty_p ()
30522 : true);
30523 }
30524
30525 /* Define the offset between two registers, FROM to be eliminated and its
30526 replacement TO, at the start of a routine. */
30527 HOST_WIDE_INT
30528 rs6000_initial_elimination_offset (int from, int to)
30529 {
30530 rs6000_stack_t *info = rs6000_stack_info ();
30531 HOST_WIDE_INT offset;
30532
30533 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30534 offset = info->push_p ? 0 : -info->total_size;
30535 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30536 {
30537 offset = info->push_p ? 0 : -info->total_size;
30538 if (FRAME_GROWS_DOWNWARD)
30539 offset += info->fixed_size + info->vars_size + info->parm_size;
30540 }
30541 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30542 offset = FRAME_GROWS_DOWNWARD
30543 ? info->fixed_size + info->vars_size + info->parm_size
30544 : 0;
30545 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
30546 offset = info->total_size;
30547 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
30548 offset = info->push_p ? info->total_size : 0;
30549 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
30550 offset = 0;
30551 else
30552 gcc_unreachable ();
30553
30554 return offset;
30555 }
30556
30557 /* Fill in sizes of registers used by unwinder. */
30558
30559 static void
30560 rs6000_init_dwarf_reg_sizes_extra (tree address)
30561 {
30562 if (TARGET_MACHO && ! TARGET_ALTIVEC)
30563 {
30564 int i;
30565 machine_mode mode = TYPE_MODE (char_type_node);
30566 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
30567 rtx mem = gen_rtx_MEM (BLKmode, addr);
30568 rtx value = gen_int_mode (16, mode);
30569
30570 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
30571 The unwinder still needs to know the size of Altivec registers. */
30572
30573 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
30574 {
30575 int column = DWARF_REG_TO_UNWIND_COLUMN
30576 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
30577 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
30578
30579 emit_move_insn (adjust_address (mem, mode, offset), value);
30580 }
30581 }
30582 }
30583
30584 /* Map internal gcc register numbers to debug format register numbers.
30585 FORMAT specifies the type of debug register number to use:
30586 0 -- debug information, except for frame-related sections
30587 1 -- DWARF .debug_frame section
30588 2 -- DWARF .eh_frame section */
30589
30590 unsigned int
30591 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
30592 {
30593 /* On some platforms, we use the standard DWARF register
30594 numbering for .debug_info and .debug_frame. */
30595 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
30596 {
30597 #ifdef RS6000_USE_DWARF_NUMBERING
30598 if (regno <= 31)
30599 return regno;
30600 if (FP_REGNO_P (regno))
30601 return regno - FIRST_FPR_REGNO + 32;
30602 if (ALTIVEC_REGNO_P (regno))
30603 return regno - FIRST_ALTIVEC_REGNO + 1124;
30604 if (regno == LR_REGNO)
30605 return 108;
30606 if (regno == CTR_REGNO)
30607 return 109;
30608 if (regno == CA_REGNO)
30609 return 101; /* XER */
30610 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
30611 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
30612 The actual code emitted saves the whole of CR, so we map CR2_REGNO
30613 to the DWARF reg for CR. */
30614 if (format == 1 && regno == CR2_REGNO)
30615 return 64;
30616 if (CR_REGNO_P (regno))
30617 return regno - CR0_REGNO + 86;
30618 if (regno == VRSAVE_REGNO)
30619 return 356;
30620 if (regno == VSCR_REGNO)
30621 return 67;
30622
30623 /* These do not make much sense. */
30624 if (regno == FRAME_POINTER_REGNUM)
30625 return 111;
30626 if (regno == ARG_POINTER_REGNUM)
30627 return 67;
30628 if (regno == 64)
30629 return 100;
30630
30631 gcc_unreachable ();
30632 #endif
30633 }
30634
30635 /* We use the GCC 7 (and before) internal number for non-DWARF debug
30636 information, and also for .eh_frame. */
30637 /* Translate the regnos to their numbers in GCC 7 (and before). */
30638 if (regno <= 31)
30639 return regno;
30640 if (FP_REGNO_P (regno))
30641 return regno - FIRST_FPR_REGNO + 32;
30642 if (ALTIVEC_REGNO_P (regno))
30643 return regno - FIRST_ALTIVEC_REGNO + 77;
30644 if (regno == LR_REGNO)
30645 return 65;
30646 if (regno == CTR_REGNO)
30647 return 66;
30648 if (regno == CA_REGNO)
30649 return 76; /* XER */
30650 if (CR_REGNO_P (regno))
30651 return regno - CR0_REGNO + 68;
30652 if (regno == VRSAVE_REGNO)
30653 return 109;
30654 if (regno == VSCR_REGNO)
30655 return 110;
30656
30657 if (regno == FRAME_POINTER_REGNUM)
30658 return 111;
30659 if (regno == ARG_POINTER_REGNUM)
30660 return 67;
30661 if (regno == 64)
30662 return 64;
30663
30664 gcc_unreachable ();
30665 }
30666
30667 /* target hook eh_return_filter_mode */
30668 static scalar_int_mode
30669 rs6000_eh_return_filter_mode (void)
30670 {
30671 return TARGET_32BIT ? SImode : word_mode;
30672 }
30673
30674 /* Target hook for translate_mode_attribute. */
30675 static machine_mode
30676 rs6000_translate_mode_attribute (machine_mode mode)
30677 {
30678 if ((FLOAT128_IEEE_P (mode)
30679 && ieee128_float_type_node == long_double_type_node)
30680 || (FLOAT128_IBM_P (mode)
30681 && ibm128_float_type_node == long_double_type_node))
30682 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
30683 return mode;
30684 }
30685
30686 /* Target hook for scalar_mode_supported_p. */
30687 static bool
30688 rs6000_scalar_mode_supported_p (scalar_mode mode)
30689 {
30690 /* -m32 does not support TImode. This is the default, from
30691 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
30692 same ABI as for -m32. But default_scalar_mode_supported_p allows
30693 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
30694 for -mpowerpc64. */
30695 if (TARGET_32BIT && mode == TImode)
30696 return false;
30697
30698 if (DECIMAL_FLOAT_MODE_P (mode))
30699 return default_decimal_float_supported_p ();
30700 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
30701 return true;
30702 else
30703 return default_scalar_mode_supported_p (mode);
30704 }
30705
30706 /* Target hook for vector_mode_supported_p. */
30707 static bool
30708 rs6000_vector_mode_supported_p (machine_mode mode)
30709 {
30710 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
30711 128-bit, the compiler might try to widen IEEE 128-bit to IBM
30712 double-double. */
30713 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
30714 return true;
30715
30716 else
30717 return false;
30718 }
30719
30720 /* Target hook for floatn_mode. */
30721 static opt_scalar_float_mode
30722 rs6000_floatn_mode (int n, bool extended)
30723 {
30724 if (extended)
30725 {
30726 switch (n)
30727 {
30728 case 32:
30729 return DFmode;
30730
30731 case 64:
30732 if (TARGET_FLOAT128_TYPE)
30733 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30734 else
30735 return opt_scalar_float_mode ();
30736
30737 case 128:
30738 return opt_scalar_float_mode ();
30739
30740 default:
30741 /* Those are the only valid _FloatNx types. */
30742 gcc_unreachable ();
30743 }
30744 }
30745 else
30746 {
30747 switch (n)
30748 {
30749 case 32:
30750 return SFmode;
30751
30752 case 64:
30753 return DFmode;
30754
30755 case 128:
30756 if (TARGET_FLOAT128_TYPE)
30757 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30758 else
30759 return opt_scalar_float_mode ();
30760
30761 default:
30762 return opt_scalar_float_mode ();
30763 }
30764 }
30765
30766 }
30767
30768 /* Target hook for c_mode_for_suffix. */
30769 static machine_mode
30770 rs6000_c_mode_for_suffix (char suffix)
30771 {
30772 if (TARGET_FLOAT128_TYPE)
30773 {
30774 if (suffix == 'q' || suffix == 'Q')
30775 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
30776
30777 /* At the moment, we are not defining a suffix for IBM extended double.
30778 If/when the default for -mabi=ieeelongdouble is changed, and we want
30779 to support __ibm128 constants in legacy library code, we may need to
30780 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
30781 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
30782 __float80 constants. */
30783 }
30784
30785 return VOIDmode;
30786 }
30787
30788 /* Target hook for invalid_arg_for_unprototyped_fn. */
30789 static const char *
30790 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
30791 {
30792 return (!rs6000_darwin64_abi
30793 && typelist == 0
30794 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
30795 && (funcdecl == NULL_TREE
30796 || (TREE_CODE (funcdecl) == FUNCTION_DECL
30797 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
30798 ? N_("AltiVec argument passed to unprototyped function")
30799 : NULL;
30800 }
30801
30802 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
30803 setup by using __stack_chk_fail_local hidden function instead of
30804 calling __stack_chk_fail directly. Otherwise it is better to call
30805 __stack_chk_fail directly. */
30806
30807 static tree ATTRIBUTE_UNUSED
30808 rs6000_stack_protect_fail (void)
30809 {
30810 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
30811 ? default_hidden_stack_protect_fail ()
30812 : default_external_stack_protect_fail ();
30813 }
30814
30815 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
30816
30817 #if TARGET_ELF
30818 static unsigned HOST_WIDE_INT
30819 rs6000_asan_shadow_offset (void)
30820 {
30821 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
30822 }
30823 #endif
30824 \f
30825 /* Mask options that we want to support inside of attribute((target)) and
30826 #pragma GCC target operations. Note, we do not include things like
30827 64/32-bit, endianness, hard/soft floating point, etc. that would have
30828 different calling sequences. */
30829
30830 struct rs6000_opt_mask {
30831 const char *name; /* option name */
30832 HOST_WIDE_INT mask; /* mask to set */
30833 bool invert; /* invert sense of mask */
30834 bool valid_target; /* option is a target option */
30835 };
30836
30837 static struct rs6000_opt_mask const rs6000_opt_masks[] =
30838 {
30839 { "altivec", OPTION_MASK_ALTIVEC, false, true },
30840 { "cmpb", OPTION_MASK_CMPB, false, true },
30841 { "crypto", OPTION_MASK_CRYPTO, false, true },
30842 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
30843 { "dlmzb", OPTION_MASK_DLMZB, false, true },
30844 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
30845 false, true },
30846 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
30847 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
30848 { "fprnd", OPTION_MASK_FPRND, false, true },
30849 { "future", OPTION_MASK_FUTURE, false, true },
30850 { "hard-dfp", OPTION_MASK_DFP, false, true },
30851 { "htm", OPTION_MASK_HTM, false, true },
30852 { "isel", OPTION_MASK_ISEL, false, true },
30853 { "mfcrf", OPTION_MASK_MFCRF, false, true },
30854 { "mfpgpr", 0, false, true },
30855 { "modulo", OPTION_MASK_MODULO, false, true },
30856 { "mulhw", OPTION_MASK_MULHW, false, true },
30857 { "multiple", OPTION_MASK_MULTIPLE, false, true },
30858 { "pcrel", OPTION_MASK_PCREL, false, true },
30859 { "popcntb", OPTION_MASK_POPCNTB, false, true },
30860 { "popcntd", OPTION_MASK_POPCNTD, false, true },
30861 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
30862 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
30863 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
30864 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
30865 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
30866 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
30867 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
30868 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
30869 { "prefixed-addr", OPTION_MASK_PREFIXED_ADDR, false, true },
30870 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
30871 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
30872 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
30873 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
30874 { "string", 0, false, true },
30875 { "update", OPTION_MASK_NO_UPDATE, true , true },
30876 { "vsx", OPTION_MASK_VSX, false, true },
30877 #ifdef OPTION_MASK_64BIT
30878 #if TARGET_AIX_OS
30879 { "aix64", OPTION_MASK_64BIT, false, false },
30880 { "aix32", OPTION_MASK_64BIT, true, false },
30881 #else
30882 { "64", OPTION_MASK_64BIT, false, false },
30883 { "32", OPTION_MASK_64BIT, true, false },
30884 #endif
30885 #endif
30886 #ifdef OPTION_MASK_EABI
30887 { "eabi", OPTION_MASK_EABI, false, false },
30888 #endif
30889 #ifdef OPTION_MASK_LITTLE_ENDIAN
30890 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
30891 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
30892 #endif
30893 #ifdef OPTION_MASK_RELOCATABLE
30894 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
30895 #endif
30896 #ifdef OPTION_MASK_STRICT_ALIGN
30897 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
30898 #endif
30899 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
30900 { "string", 0, false, false },
30901 };
30902
30903 /* Builtin mask mapping for printing the flags. */
30904 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
30905 {
30906 { "altivec", RS6000_BTM_ALTIVEC, false, false },
30907 { "vsx", RS6000_BTM_VSX, false, false },
30908 { "fre", RS6000_BTM_FRE, false, false },
30909 { "fres", RS6000_BTM_FRES, false, false },
30910 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
30911 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
30912 { "popcntd", RS6000_BTM_POPCNTD, false, false },
30913 { "cell", RS6000_BTM_CELL, false, false },
30914 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
30915 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
30916 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
30917 { "crypto", RS6000_BTM_CRYPTO, false, false },
30918 { "htm", RS6000_BTM_HTM, false, false },
30919 { "hard-dfp", RS6000_BTM_DFP, false, false },
30920 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
30921 { "long-double-128", RS6000_BTM_LDBL128, false, false },
30922 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
30923 { "float128", RS6000_BTM_FLOAT128, false, false },
30924 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
30925 };
30926
30927 /* Option variables that we want to support inside attribute((target)) and
30928 #pragma GCC target operations. */
30929
30930 struct rs6000_opt_var {
30931 const char *name; /* option name */
30932 size_t global_offset; /* offset of the option in global_options. */
30933 size_t target_offset; /* offset of the option in target options. */
30934 };
30935
30936 static struct rs6000_opt_var const rs6000_opt_vars[] =
30937 {
30938 { "friz",
30939 offsetof (struct gcc_options, x_TARGET_FRIZ),
30940 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
30941 { "avoid-indexed-addresses",
30942 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
30943 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
30944 { "longcall",
30945 offsetof (struct gcc_options, x_rs6000_default_long_calls),
30946 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
30947 { "optimize-swaps",
30948 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
30949 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
30950 { "allow-movmisalign",
30951 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
30952 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
30953 { "sched-groups",
30954 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
30955 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
30956 { "always-hint",
30957 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
30958 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
30959 { "align-branch-targets",
30960 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
30961 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
30962 { "tls-markers",
30963 offsetof (struct gcc_options, x_tls_markers),
30964 offsetof (struct cl_target_option, x_tls_markers), },
30965 { "sched-prolog",
30966 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
30967 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
30968 { "sched-epilog",
30969 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
30970 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
30971 { "speculate-indirect-jumps",
30972 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
30973 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
30974 };
30975
30976 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
30977 parsing. Return true if there were no errors. */
30978
30979 static bool
30980 rs6000_inner_target_options (tree args, bool attr_p)
30981 {
30982 bool ret = true;
30983
30984 if (args == NULL_TREE)
30985 ;
30986
30987 else if (TREE_CODE (args) == STRING_CST)
30988 {
30989 char *p = ASTRDUP (TREE_STRING_POINTER (args));
30990 char *q;
30991
30992 while ((q = strtok (p, ",")) != NULL)
30993 {
30994 bool error_p = false;
30995 bool not_valid_p = false;
30996 const char *cpu_opt = NULL;
30997
30998 p = NULL;
30999 if (strncmp (q, "cpu=", 4) == 0)
31000 {
31001 int cpu_index = rs6000_cpu_name_lookup (q+4);
31002 if (cpu_index >= 0)
31003 rs6000_cpu_index = cpu_index;
31004 else
31005 {
31006 error_p = true;
31007 cpu_opt = q+4;
31008 }
31009 }
31010 else if (strncmp (q, "tune=", 5) == 0)
31011 {
31012 int tune_index = rs6000_cpu_name_lookup (q+5);
31013 if (tune_index >= 0)
31014 rs6000_tune_index = tune_index;
31015 else
31016 {
31017 error_p = true;
31018 cpu_opt = q+5;
31019 }
31020 }
31021 else
31022 {
31023 size_t i;
31024 bool invert = false;
31025 char *r = q;
31026
31027 error_p = true;
31028 if (strncmp (r, "no-", 3) == 0)
31029 {
31030 invert = true;
31031 r += 3;
31032 }
31033
31034 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
31035 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31036 {
31037 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31038
31039 if (!rs6000_opt_masks[i].valid_target)
31040 not_valid_p = true;
31041 else
31042 {
31043 error_p = false;
31044 rs6000_isa_flags_explicit |= mask;
31045
31046 /* VSX needs altivec, so -mvsx automagically sets
31047 altivec and disables -mavoid-indexed-addresses. */
31048 if (!invert)
31049 {
31050 if (mask == OPTION_MASK_VSX)
31051 {
31052 mask |= OPTION_MASK_ALTIVEC;
31053 TARGET_AVOID_XFORM = 0;
31054 }
31055 }
31056
31057 if (rs6000_opt_masks[i].invert)
31058 invert = !invert;
31059
31060 if (invert)
31061 rs6000_isa_flags &= ~mask;
31062 else
31063 rs6000_isa_flags |= mask;
31064 }
31065 break;
31066 }
31067
31068 if (error_p && !not_valid_p)
31069 {
31070 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31071 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31072 {
31073 size_t j = rs6000_opt_vars[i].global_offset;
31074 *((int *) ((char *)&global_options + j)) = !invert;
31075 error_p = false;
31076 not_valid_p = false;
31077 break;
31078 }
31079 }
31080 }
31081
31082 if (error_p)
31083 {
31084 const char *eprefix, *esuffix;
31085
31086 ret = false;
31087 if (attr_p)
31088 {
31089 eprefix = "__attribute__((__target__(";
31090 esuffix = ")))";
31091 }
31092 else
31093 {
31094 eprefix = "#pragma GCC target ";
31095 esuffix = "";
31096 }
31097
31098 if (cpu_opt)
31099 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
31100 q, esuffix);
31101 else if (not_valid_p)
31102 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
31103 else
31104 error ("%s%qs%s is invalid", eprefix, q, esuffix);
31105 }
31106 }
31107 }
31108
31109 else if (TREE_CODE (args) == TREE_LIST)
31110 {
31111 do
31112 {
31113 tree value = TREE_VALUE (args);
31114 if (value)
31115 {
31116 bool ret2 = rs6000_inner_target_options (value, attr_p);
31117 if (!ret2)
31118 ret = false;
31119 }
31120 args = TREE_CHAIN (args);
31121 }
31122 while (args != NULL_TREE);
31123 }
31124
31125 else
31126 {
31127 error ("attribute %<target%> argument not a string");
31128 return false;
31129 }
31130
31131 return ret;
31132 }
31133
31134 /* Print out the target options as a list for -mdebug=target. */
31135
31136 static void
31137 rs6000_debug_target_options (tree args, const char *prefix)
31138 {
31139 if (args == NULL_TREE)
31140 fprintf (stderr, "%s<NULL>", prefix);
31141
31142 else if (TREE_CODE (args) == STRING_CST)
31143 {
31144 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31145 char *q;
31146
31147 while ((q = strtok (p, ",")) != NULL)
31148 {
31149 p = NULL;
31150 fprintf (stderr, "%s\"%s\"", prefix, q);
31151 prefix = ", ";
31152 }
31153 }
31154
31155 else if (TREE_CODE (args) == TREE_LIST)
31156 {
31157 do
31158 {
31159 tree value = TREE_VALUE (args);
31160 if (value)
31161 {
31162 rs6000_debug_target_options (value, prefix);
31163 prefix = ", ";
31164 }
31165 args = TREE_CHAIN (args);
31166 }
31167 while (args != NULL_TREE);
31168 }
31169
31170 else
31171 gcc_unreachable ();
31172
31173 return;
31174 }
31175
31176 \f
31177 /* Hook to validate attribute((target("..."))). */
31178
31179 static bool
31180 rs6000_valid_attribute_p (tree fndecl,
31181 tree ARG_UNUSED (name),
31182 tree args,
31183 int flags)
31184 {
31185 struct cl_target_option cur_target;
31186 bool ret;
31187 tree old_optimize;
31188 tree new_target, new_optimize;
31189 tree func_optimize;
31190
31191 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
31192
31193 if (TARGET_DEBUG_TARGET)
31194 {
31195 tree tname = DECL_NAME (fndecl);
31196 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
31197 if (tname)
31198 fprintf (stderr, "function: %.*s\n",
31199 (int) IDENTIFIER_LENGTH (tname),
31200 IDENTIFIER_POINTER (tname));
31201 else
31202 fprintf (stderr, "function: unknown\n");
31203
31204 fprintf (stderr, "args:");
31205 rs6000_debug_target_options (args, " ");
31206 fprintf (stderr, "\n");
31207
31208 if (flags)
31209 fprintf (stderr, "flags: 0x%x\n", flags);
31210
31211 fprintf (stderr, "--------------------\n");
31212 }
31213
31214 /* attribute((target("default"))) does nothing, beyond
31215 affecting multi-versioning. */
31216 if (TREE_VALUE (args)
31217 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
31218 && TREE_CHAIN (args) == NULL_TREE
31219 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
31220 return true;
31221
31222 old_optimize = build_optimization_node (&global_options);
31223 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31224
31225 /* If the function changed the optimization levels as well as setting target
31226 options, start with the optimizations specified. */
31227 if (func_optimize && func_optimize != old_optimize)
31228 cl_optimization_restore (&global_options,
31229 TREE_OPTIMIZATION (func_optimize));
31230
31231 /* The target attributes may also change some optimization flags, so update
31232 the optimization options if necessary. */
31233 cl_target_option_save (&cur_target, &global_options);
31234 rs6000_cpu_index = rs6000_tune_index = -1;
31235 ret = rs6000_inner_target_options (args, true);
31236
31237 /* Set up any additional state. */
31238 if (ret)
31239 {
31240 ret = rs6000_option_override_internal (false);
31241 new_target = build_target_option_node (&global_options);
31242 }
31243 else
31244 new_target = NULL;
31245
31246 new_optimize = build_optimization_node (&global_options);
31247
31248 if (!new_target)
31249 ret = false;
31250
31251 else if (fndecl)
31252 {
31253 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
31254
31255 if (old_optimize != new_optimize)
31256 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
31257 }
31258
31259 cl_target_option_restore (&global_options, &cur_target);
31260
31261 if (old_optimize != new_optimize)
31262 cl_optimization_restore (&global_options,
31263 TREE_OPTIMIZATION (old_optimize));
31264
31265 return ret;
31266 }
31267
31268 \f
31269 /* Hook to validate the current #pragma GCC target and set the state, and
31270 update the macros based on what was changed. If ARGS is NULL, then
31271 POP_TARGET is used to reset the options. */
31272
31273 bool
31274 rs6000_pragma_target_parse (tree args, tree pop_target)
31275 {
31276 tree prev_tree = build_target_option_node (&global_options);
31277 tree cur_tree;
31278 struct cl_target_option *prev_opt, *cur_opt;
31279 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
31280 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
31281
31282 if (TARGET_DEBUG_TARGET)
31283 {
31284 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
31285 fprintf (stderr, "args:");
31286 rs6000_debug_target_options (args, " ");
31287 fprintf (stderr, "\n");
31288
31289 if (pop_target)
31290 {
31291 fprintf (stderr, "pop_target:\n");
31292 debug_tree (pop_target);
31293 }
31294 else
31295 fprintf (stderr, "pop_target: <NULL>\n");
31296
31297 fprintf (stderr, "--------------------\n");
31298 }
31299
31300 if (! args)
31301 {
31302 cur_tree = ((pop_target)
31303 ? pop_target
31304 : target_option_default_node);
31305 cl_target_option_restore (&global_options,
31306 TREE_TARGET_OPTION (cur_tree));
31307 }
31308 else
31309 {
31310 rs6000_cpu_index = rs6000_tune_index = -1;
31311 if (!rs6000_inner_target_options (args, false)
31312 || !rs6000_option_override_internal (false)
31313 || (cur_tree = build_target_option_node (&global_options))
31314 == NULL_TREE)
31315 {
31316 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
31317 fprintf (stderr, "invalid pragma\n");
31318
31319 return false;
31320 }
31321 }
31322
31323 target_option_current_node = cur_tree;
31324 rs6000_activate_target_options (target_option_current_node);
31325
31326 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
31327 change the macros that are defined. */
31328 if (rs6000_target_modify_macros_ptr)
31329 {
31330 prev_opt = TREE_TARGET_OPTION (prev_tree);
31331 prev_bumask = prev_opt->x_rs6000_builtin_mask;
31332 prev_flags = prev_opt->x_rs6000_isa_flags;
31333
31334 cur_opt = TREE_TARGET_OPTION (cur_tree);
31335 cur_flags = cur_opt->x_rs6000_isa_flags;
31336 cur_bumask = cur_opt->x_rs6000_builtin_mask;
31337
31338 diff_bumask = (prev_bumask ^ cur_bumask);
31339 diff_flags = (prev_flags ^ cur_flags);
31340
31341 if ((diff_flags != 0) || (diff_bumask != 0))
31342 {
31343 /* Delete old macros. */
31344 rs6000_target_modify_macros_ptr (false,
31345 prev_flags & diff_flags,
31346 prev_bumask & diff_bumask);
31347
31348 /* Define new macros. */
31349 rs6000_target_modify_macros_ptr (true,
31350 cur_flags & diff_flags,
31351 cur_bumask & diff_bumask);
31352 }
31353 }
31354
31355 return true;
31356 }
31357
31358 \f
31359 /* Remember the last target of rs6000_set_current_function. */
31360 static GTY(()) tree rs6000_previous_fndecl;
31361
31362 /* Restore target's globals from NEW_TREE and invalidate the
31363 rs6000_previous_fndecl cache. */
31364
31365 void
31366 rs6000_activate_target_options (tree new_tree)
31367 {
31368 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
31369 if (TREE_TARGET_GLOBALS (new_tree))
31370 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31371 else if (new_tree == target_option_default_node)
31372 restore_target_globals (&default_target_globals);
31373 else
31374 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
31375 rs6000_previous_fndecl = NULL_TREE;
31376 }
31377
31378 /* Establish appropriate back-end context for processing the function
31379 FNDECL. The argument might be NULL to indicate processing at top
31380 level, outside of any function scope. */
31381 static void
31382 rs6000_set_current_function (tree fndecl)
31383 {
31384 if (TARGET_DEBUG_TARGET)
31385 {
31386 fprintf (stderr, "\n==================== rs6000_set_current_function");
31387
31388 if (fndecl)
31389 fprintf (stderr, ", fndecl %s (%p)",
31390 (DECL_NAME (fndecl)
31391 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
31392 : "<unknown>"), (void *)fndecl);
31393
31394 if (rs6000_previous_fndecl)
31395 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
31396
31397 fprintf (stderr, "\n");
31398 }
31399
31400 /* Only change the context if the function changes. This hook is called
31401 several times in the course of compiling a function, and we don't want to
31402 slow things down too much or call target_reinit when it isn't safe. */
31403 if (fndecl == rs6000_previous_fndecl)
31404 return;
31405
31406 tree old_tree;
31407 if (rs6000_previous_fndecl == NULL_TREE)
31408 old_tree = target_option_current_node;
31409 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
31410 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
31411 else
31412 old_tree = target_option_default_node;
31413
31414 tree new_tree;
31415 if (fndecl == NULL_TREE)
31416 {
31417 if (old_tree != target_option_current_node)
31418 new_tree = target_option_current_node;
31419 else
31420 new_tree = NULL_TREE;
31421 }
31422 else
31423 {
31424 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
31425 if (new_tree == NULL_TREE)
31426 new_tree = target_option_default_node;
31427 }
31428
31429 if (TARGET_DEBUG_TARGET)
31430 {
31431 if (new_tree)
31432 {
31433 fprintf (stderr, "\nnew fndecl target specific options:\n");
31434 debug_tree (new_tree);
31435 }
31436
31437 if (old_tree)
31438 {
31439 fprintf (stderr, "\nold fndecl target specific options:\n");
31440 debug_tree (old_tree);
31441 }
31442
31443 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
31444 fprintf (stderr, "--------------------\n");
31445 }
31446
31447 if (new_tree && old_tree != new_tree)
31448 rs6000_activate_target_options (new_tree);
31449
31450 if (fndecl)
31451 rs6000_previous_fndecl = fndecl;
31452 }
31453
31454 \f
31455 /* Save the current options */
31456
31457 static void
31458 rs6000_function_specific_save (struct cl_target_option *ptr,
31459 struct gcc_options *opts)
31460 {
31461 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
31462 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
31463 }
31464
31465 /* Restore the current options */
31466
31467 static void
31468 rs6000_function_specific_restore (struct gcc_options *opts,
31469 struct cl_target_option *ptr)
31470
31471 {
31472 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
31473 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
31474 (void) rs6000_option_override_internal (false);
31475 }
31476
31477 /* Print the current options */
31478
31479 static void
31480 rs6000_function_specific_print (FILE *file, int indent,
31481 struct cl_target_option *ptr)
31482 {
31483 rs6000_print_isa_options (file, indent, "Isa options set",
31484 ptr->x_rs6000_isa_flags);
31485
31486 rs6000_print_isa_options (file, indent, "Isa options explicit",
31487 ptr->x_rs6000_isa_flags_explicit);
31488 }
31489
31490 /* Helper function to print the current isa or misc options on a line. */
31491
31492 static void
31493 rs6000_print_options_internal (FILE *file,
31494 int indent,
31495 const char *string,
31496 HOST_WIDE_INT flags,
31497 const char *prefix,
31498 const struct rs6000_opt_mask *opts,
31499 size_t num_elements)
31500 {
31501 size_t i;
31502 size_t start_column = 0;
31503 size_t cur_column;
31504 size_t max_column = 120;
31505 size_t prefix_len = strlen (prefix);
31506 size_t comma_len = 0;
31507 const char *comma = "";
31508
31509 if (indent)
31510 start_column += fprintf (file, "%*s", indent, "");
31511
31512 if (!flags)
31513 {
31514 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
31515 return;
31516 }
31517
31518 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
31519
31520 /* Print the various mask options. */
31521 cur_column = start_column;
31522 for (i = 0; i < num_elements; i++)
31523 {
31524 bool invert = opts[i].invert;
31525 const char *name = opts[i].name;
31526 const char *no_str = "";
31527 HOST_WIDE_INT mask = opts[i].mask;
31528 size_t len = comma_len + prefix_len + strlen (name);
31529
31530 if (!invert)
31531 {
31532 if ((flags & mask) == 0)
31533 {
31534 no_str = "no-";
31535 len += sizeof ("no-") - 1;
31536 }
31537
31538 flags &= ~mask;
31539 }
31540
31541 else
31542 {
31543 if ((flags & mask) != 0)
31544 {
31545 no_str = "no-";
31546 len += sizeof ("no-") - 1;
31547 }
31548
31549 flags |= mask;
31550 }
31551
31552 cur_column += len;
31553 if (cur_column > max_column)
31554 {
31555 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
31556 cur_column = start_column + len;
31557 comma = "";
31558 }
31559
31560 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
31561 comma = ", ";
31562 comma_len = sizeof (", ") - 1;
31563 }
31564
31565 fputs ("\n", file);
31566 }
31567
31568 /* Helper function to print the current isa options on a line. */
31569
31570 static void
31571 rs6000_print_isa_options (FILE *file, int indent, const char *string,
31572 HOST_WIDE_INT flags)
31573 {
31574 rs6000_print_options_internal (file, indent, string, flags, "-m",
31575 &rs6000_opt_masks[0],
31576 ARRAY_SIZE (rs6000_opt_masks));
31577 }
31578
31579 static void
31580 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
31581 HOST_WIDE_INT flags)
31582 {
31583 rs6000_print_options_internal (file, indent, string, flags, "",
31584 &rs6000_builtin_mask_names[0],
31585 ARRAY_SIZE (rs6000_builtin_mask_names));
31586 }
31587
31588 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
31589 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
31590 -mupper-regs-df, etc.).
31591
31592 If the user used -mno-power8-vector, we need to turn off all of the implicit
31593 ISA 2.07 and 3.0 options that relate to the vector unit.
31594
31595 If the user used -mno-power9-vector, we need to turn off all of the implicit
31596 ISA 3.0 options that relate to the vector unit.
31597
31598 This function does not handle explicit options such as the user specifying
31599 -mdirect-move. These are handled in rs6000_option_override_internal, and
31600 the appropriate error is given if needed.
31601
31602 We return a mask of all of the implicit options that should not be enabled
31603 by default. */
31604
31605 static HOST_WIDE_INT
31606 rs6000_disable_incompatible_switches (void)
31607 {
31608 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
31609 size_t i, j;
31610
31611 static const struct {
31612 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
31613 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
31614 const char *const name; /* name of the switch. */
31615 } flags[] = {
31616 { OPTION_MASK_FUTURE, OTHER_FUTURE_MASKS, "future" },
31617 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
31618 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
31619 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
31620 };
31621
31622 for (i = 0; i < ARRAY_SIZE (flags); i++)
31623 {
31624 HOST_WIDE_INT no_flag = flags[i].no_flag;
31625
31626 if ((rs6000_isa_flags & no_flag) == 0
31627 && (rs6000_isa_flags_explicit & no_flag) != 0)
31628 {
31629 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
31630 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
31631 & rs6000_isa_flags
31632 & dep_flags);
31633
31634 if (set_flags)
31635 {
31636 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
31637 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
31638 {
31639 set_flags &= ~rs6000_opt_masks[j].mask;
31640 error ("%<-mno-%s%> turns off %<-m%s%>",
31641 flags[i].name,
31642 rs6000_opt_masks[j].name);
31643 }
31644
31645 gcc_assert (!set_flags);
31646 }
31647
31648 rs6000_isa_flags &= ~dep_flags;
31649 ignore_masks |= no_flag | dep_flags;
31650 }
31651 }
31652
31653 return ignore_masks;
31654 }
31655
31656 \f
31657 /* Helper function for printing the function name when debugging. */
31658
31659 static const char *
31660 get_decl_name (tree fn)
31661 {
31662 tree name;
31663
31664 if (!fn)
31665 return "<null>";
31666
31667 name = DECL_NAME (fn);
31668 if (!name)
31669 return "<no-name>";
31670
31671 return IDENTIFIER_POINTER (name);
31672 }
31673
31674 /* Return the clone id of the target we are compiling code for in a target
31675 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
31676 the priority list for the target clones (ordered from lowest to
31677 highest). */
31678
31679 static int
31680 rs6000_clone_priority (tree fndecl)
31681 {
31682 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
31683 HOST_WIDE_INT isa_masks;
31684 int ret = CLONE_DEFAULT;
31685 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
31686 const char *attrs_str = NULL;
31687
31688 attrs = TREE_VALUE (TREE_VALUE (attrs));
31689 attrs_str = TREE_STRING_POINTER (attrs);
31690
31691 /* Return priority zero for default function. Return the ISA needed for the
31692 function if it is not the default. */
31693 if (strcmp (attrs_str, "default") != 0)
31694 {
31695 if (fn_opts == NULL_TREE)
31696 fn_opts = target_option_default_node;
31697
31698 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
31699 isa_masks = rs6000_isa_flags;
31700 else
31701 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
31702
31703 for (ret = CLONE_MAX - 1; ret != 0; ret--)
31704 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
31705 break;
31706 }
31707
31708 if (TARGET_DEBUG_TARGET)
31709 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
31710 get_decl_name (fndecl), ret);
31711
31712 return ret;
31713 }
31714
31715 /* This compares the priority of target features in function DECL1 and DECL2.
31716 It returns positive value if DECL1 is higher priority, negative value if
31717 DECL2 is higher priority and 0 if they are the same. Note, priorities are
31718 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
31719
31720 static int
31721 rs6000_compare_version_priority (tree decl1, tree decl2)
31722 {
31723 int priority1 = rs6000_clone_priority (decl1);
31724 int priority2 = rs6000_clone_priority (decl2);
31725 int ret = priority1 - priority2;
31726
31727 if (TARGET_DEBUG_TARGET)
31728 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
31729 get_decl_name (decl1), get_decl_name (decl2), ret);
31730
31731 return ret;
31732 }
31733
31734 /* Make a dispatcher declaration for the multi-versioned function DECL.
31735 Calls to DECL function will be replaced with calls to the dispatcher
31736 by the front-end. Returns the decl of the dispatcher function. */
31737
31738 static tree
31739 rs6000_get_function_versions_dispatcher (void *decl)
31740 {
31741 tree fn = (tree) decl;
31742 struct cgraph_node *node = NULL;
31743 struct cgraph_node *default_node = NULL;
31744 struct cgraph_function_version_info *node_v = NULL;
31745 struct cgraph_function_version_info *first_v = NULL;
31746
31747 tree dispatch_decl = NULL;
31748
31749 struct cgraph_function_version_info *default_version_info = NULL;
31750 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
31751
31752 if (TARGET_DEBUG_TARGET)
31753 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
31754 get_decl_name (fn));
31755
31756 node = cgraph_node::get (fn);
31757 gcc_assert (node != NULL);
31758
31759 node_v = node->function_version ();
31760 gcc_assert (node_v != NULL);
31761
31762 if (node_v->dispatcher_resolver != NULL)
31763 return node_v->dispatcher_resolver;
31764
31765 /* Find the default version and make it the first node. */
31766 first_v = node_v;
31767 /* Go to the beginning of the chain. */
31768 while (first_v->prev != NULL)
31769 first_v = first_v->prev;
31770
31771 default_version_info = first_v;
31772 while (default_version_info != NULL)
31773 {
31774 const tree decl2 = default_version_info->this_node->decl;
31775 if (is_function_default_version (decl2))
31776 break;
31777 default_version_info = default_version_info->next;
31778 }
31779
31780 /* If there is no default node, just return NULL. */
31781 if (default_version_info == NULL)
31782 return NULL;
31783
31784 /* Make default info the first node. */
31785 if (first_v != default_version_info)
31786 {
31787 default_version_info->prev->next = default_version_info->next;
31788 if (default_version_info->next)
31789 default_version_info->next->prev = default_version_info->prev;
31790 first_v->prev = default_version_info;
31791 default_version_info->next = first_v;
31792 default_version_info->prev = NULL;
31793 }
31794
31795 default_node = default_version_info->this_node;
31796
31797 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
31798 error_at (DECL_SOURCE_LOCATION (default_node->decl),
31799 "%<target_clones%> attribute needs GLIBC (2.23 and newer) that "
31800 "exports hardware capability bits");
31801 #else
31802
31803 if (targetm.has_ifunc_p ())
31804 {
31805 struct cgraph_function_version_info *it_v = NULL;
31806 struct cgraph_node *dispatcher_node = NULL;
31807 struct cgraph_function_version_info *dispatcher_version_info = NULL;
31808
31809 /* Right now, the dispatching is done via ifunc. */
31810 dispatch_decl = make_dispatcher_decl (default_node->decl);
31811
31812 dispatcher_node = cgraph_node::get_create (dispatch_decl);
31813 gcc_assert (dispatcher_node != NULL);
31814 dispatcher_node->dispatcher_function = 1;
31815 dispatcher_version_info
31816 = dispatcher_node->insert_new_function_version ();
31817 dispatcher_version_info->next = default_version_info;
31818 dispatcher_node->definition = 1;
31819
31820 /* Set the dispatcher for all the versions. */
31821 it_v = default_version_info;
31822 while (it_v != NULL)
31823 {
31824 it_v->dispatcher_resolver = dispatch_decl;
31825 it_v = it_v->next;
31826 }
31827 }
31828 else
31829 {
31830 error_at (DECL_SOURCE_LOCATION (default_node->decl),
31831 "multiversioning needs ifunc which is not supported "
31832 "on this target");
31833 }
31834 #endif
31835
31836 return dispatch_decl;
31837 }
31838
31839 /* Make the resolver function decl to dispatch the versions of a multi-
31840 versioned function, DEFAULT_DECL. Create an empty basic block in the
31841 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
31842 function. */
31843
31844 static tree
31845 make_resolver_func (const tree default_decl,
31846 const tree dispatch_decl,
31847 basic_block *empty_bb)
31848 {
31849 /* Make the resolver function static. The resolver function returns
31850 void *. */
31851 tree decl_name = clone_function_name (default_decl, "resolver");
31852 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
31853 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
31854 tree decl = build_fn_decl (resolver_name, type);
31855 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
31856
31857 DECL_NAME (decl) = decl_name;
31858 TREE_USED (decl) = 1;
31859 DECL_ARTIFICIAL (decl) = 1;
31860 DECL_IGNORED_P (decl) = 0;
31861 TREE_PUBLIC (decl) = 0;
31862 DECL_UNINLINABLE (decl) = 1;
31863
31864 /* Resolver is not external, body is generated. */
31865 DECL_EXTERNAL (decl) = 0;
31866 DECL_EXTERNAL (dispatch_decl) = 0;
31867
31868 DECL_CONTEXT (decl) = NULL_TREE;
31869 DECL_INITIAL (decl) = make_node (BLOCK);
31870 DECL_STATIC_CONSTRUCTOR (decl) = 0;
31871
31872 /* Build result decl and add to function_decl. */
31873 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
31874 DECL_CONTEXT (t) = decl;
31875 DECL_ARTIFICIAL (t) = 1;
31876 DECL_IGNORED_P (t) = 1;
31877 DECL_RESULT (decl) = t;
31878
31879 gimplify_function_tree (decl);
31880 push_cfun (DECL_STRUCT_FUNCTION (decl));
31881 *empty_bb = init_lowered_empty_function (decl, false,
31882 profile_count::uninitialized ());
31883
31884 cgraph_node::add_new_function (decl, true);
31885 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
31886
31887 pop_cfun ();
31888
31889 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
31890 DECL_ATTRIBUTES (dispatch_decl)
31891 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
31892
31893 cgraph_node::create_same_body_alias (dispatch_decl, decl);
31894
31895 return decl;
31896 }
31897
31898 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
31899 return a pointer to VERSION_DECL if we are running on a machine that
31900 supports the index CLONE_ISA hardware architecture bits. This function will
31901 be called during version dispatch to decide which function version to
31902 execute. It returns the basic block at the end, to which more conditions
31903 can be added. */
31904
31905 static basic_block
31906 add_condition_to_bb (tree function_decl, tree version_decl,
31907 int clone_isa, basic_block new_bb)
31908 {
31909 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
31910
31911 gcc_assert (new_bb != NULL);
31912 gimple_seq gseq = bb_seq (new_bb);
31913
31914
31915 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
31916 build_fold_addr_expr (version_decl));
31917 tree result_var = create_tmp_var (ptr_type_node);
31918 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
31919 gimple *return_stmt = gimple_build_return (result_var);
31920
31921 if (clone_isa == CLONE_DEFAULT)
31922 {
31923 gimple_seq_add_stmt (&gseq, convert_stmt);
31924 gimple_seq_add_stmt (&gseq, return_stmt);
31925 set_bb_seq (new_bb, gseq);
31926 gimple_set_bb (convert_stmt, new_bb);
31927 gimple_set_bb (return_stmt, new_bb);
31928 pop_cfun ();
31929 return new_bb;
31930 }
31931
31932 tree bool_zero = build_int_cst (bool_int_type_node, 0);
31933 tree cond_var = create_tmp_var (bool_int_type_node);
31934 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
31935 const char *arg_str = rs6000_clone_map[clone_isa].name;
31936 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
31937 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
31938 gimple_call_set_lhs (call_cond_stmt, cond_var);
31939
31940 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
31941 gimple_set_bb (call_cond_stmt, new_bb);
31942 gimple_seq_add_stmt (&gseq, call_cond_stmt);
31943
31944 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
31945 NULL_TREE, NULL_TREE);
31946 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
31947 gimple_set_bb (if_else_stmt, new_bb);
31948 gimple_seq_add_stmt (&gseq, if_else_stmt);
31949
31950 gimple_seq_add_stmt (&gseq, convert_stmt);
31951 gimple_seq_add_stmt (&gseq, return_stmt);
31952 set_bb_seq (new_bb, gseq);
31953
31954 basic_block bb1 = new_bb;
31955 edge e12 = split_block (bb1, if_else_stmt);
31956 basic_block bb2 = e12->dest;
31957 e12->flags &= ~EDGE_FALLTHRU;
31958 e12->flags |= EDGE_TRUE_VALUE;
31959
31960 edge e23 = split_block (bb2, return_stmt);
31961 gimple_set_bb (convert_stmt, bb2);
31962 gimple_set_bb (return_stmt, bb2);
31963
31964 basic_block bb3 = e23->dest;
31965 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
31966
31967 remove_edge (e23);
31968 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
31969
31970 pop_cfun ();
31971 return bb3;
31972 }
31973
31974 /* This function generates the dispatch function for multi-versioned functions.
31975 DISPATCH_DECL is the function which will contain the dispatch logic.
31976 FNDECLS are the function choices for dispatch, and is a tree chain.
31977 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
31978 code is generated. */
31979
31980 static int
31981 dispatch_function_versions (tree dispatch_decl,
31982 void *fndecls_p,
31983 basic_block *empty_bb)
31984 {
31985 int ix;
31986 tree ele;
31987 vec<tree> *fndecls;
31988 tree clones[CLONE_MAX];
31989
31990 if (TARGET_DEBUG_TARGET)
31991 fputs ("dispatch_function_versions, top\n", stderr);
31992
31993 gcc_assert (dispatch_decl != NULL
31994 && fndecls_p != NULL
31995 && empty_bb != NULL);
31996
31997 /* fndecls_p is actually a vector. */
31998 fndecls = static_cast<vec<tree> *> (fndecls_p);
31999
32000 /* At least one more version other than the default. */
32001 gcc_assert (fndecls->length () >= 2);
32002
32003 /* The first version in the vector is the default decl. */
32004 memset ((void *) clones, '\0', sizeof (clones));
32005 clones[CLONE_DEFAULT] = (*fndecls)[0];
32006
32007 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
32008 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
32009 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
32010 recent glibc. If we ever need to call __builtin_cpu_init, we would need
32011 to insert the code here to do the call. */
32012
32013 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
32014 {
32015 int priority = rs6000_clone_priority (ele);
32016 if (!clones[priority])
32017 clones[priority] = ele;
32018 }
32019
32020 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
32021 if (clones[ix])
32022 {
32023 if (TARGET_DEBUG_TARGET)
32024 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
32025 ix, get_decl_name (clones[ix]));
32026
32027 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
32028 *empty_bb);
32029 }
32030
32031 return 0;
32032 }
32033
32034 /* Generate the dispatching code body to dispatch multi-versioned function
32035 DECL. The target hook is called to process the "target" attributes and
32036 provide the code to dispatch the right function at run-time. NODE points
32037 to the dispatcher decl whose body will be created. */
32038
32039 static tree
32040 rs6000_generate_version_dispatcher_body (void *node_p)
32041 {
32042 tree resolver;
32043 basic_block empty_bb;
32044 struct cgraph_node *node = (cgraph_node *) node_p;
32045 struct cgraph_function_version_info *ninfo = node->function_version ();
32046
32047 if (ninfo->dispatcher_resolver)
32048 return ninfo->dispatcher_resolver;
32049
32050 /* node is going to be an alias, so remove the finalized bit. */
32051 node->definition = false;
32052
32053 /* The first version in the chain corresponds to the default version. */
32054 ninfo->dispatcher_resolver = resolver
32055 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
32056
32057 if (TARGET_DEBUG_TARGET)
32058 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
32059 get_decl_name (resolver));
32060
32061 push_cfun (DECL_STRUCT_FUNCTION (resolver));
32062 auto_vec<tree, 2> fn_ver_vec;
32063
32064 for (struct cgraph_function_version_info *vinfo = ninfo->next;
32065 vinfo;
32066 vinfo = vinfo->next)
32067 {
32068 struct cgraph_node *version = vinfo->this_node;
32069 /* Check for virtual functions here again, as by this time it should
32070 have been determined if this function needs a vtable index or
32071 not. This happens for methods in derived classes that override
32072 virtual methods in base classes but are not explicitly marked as
32073 virtual. */
32074 if (DECL_VINDEX (version->decl))
32075 sorry ("Virtual function multiversioning not supported");
32076
32077 fn_ver_vec.safe_push (version->decl);
32078 }
32079
32080 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
32081 cgraph_edge::rebuild_edges ();
32082 pop_cfun ();
32083 return resolver;
32084 }
32085
32086 \f
32087 /* Hook to determine if one function can safely inline another. */
32088
32089 static bool
32090 rs6000_can_inline_p (tree caller, tree callee)
32091 {
32092 bool ret = false;
32093 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32094 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32095
32096 /* If callee has no option attributes, then it is ok to inline. */
32097 if (!callee_tree)
32098 ret = true;
32099
32100 /* If caller has no option attributes, but callee does then it is not ok to
32101 inline. */
32102 else if (!caller_tree)
32103 ret = false;
32104
32105 else
32106 {
32107 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32108 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32109
32110 /* Callee's options should a subset of the caller's, i.e. a vsx function
32111 can inline an altivec function but a non-vsx function can't inline a
32112 vsx function. */
32113 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32114 == callee_opts->x_rs6000_isa_flags)
32115 ret = true;
32116 }
32117
32118 if (TARGET_DEBUG_TARGET)
32119 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32120 get_decl_name (caller), get_decl_name (callee),
32121 (ret ? "can" : "cannot"));
32122
32123 return ret;
32124 }
32125 \f
32126 /* Allocate a stack temp and fixup the address so it meets the particular
32127 memory requirements (either offetable or REG+REG addressing). */
32128
32129 rtx
32130 rs6000_allocate_stack_temp (machine_mode mode,
32131 bool offsettable_p,
32132 bool reg_reg_p)
32133 {
32134 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32135 rtx addr = XEXP (stack, 0);
32136 int strict_p = reload_completed;
32137
32138 if (!legitimate_indirect_address_p (addr, strict_p))
32139 {
32140 if (offsettable_p
32141 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32142 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32143
32144 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32145 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32146 }
32147
32148 return stack;
32149 }
32150
32151 /* Given a memory reference, if it is not a reg or reg+reg addressing,
32152 convert to such a form to deal with memory reference instructions
32153 like STFIWX and LDBRX that only take reg+reg addressing. */
32154
32155 rtx
32156 rs6000_force_indexed_or_indirect_mem (rtx x)
32157 {
32158 machine_mode mode = GET_MODE (x);
32159
32160 gcc_assert (MEM_P (x));
32161 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
32162 {
32163 rtx addr = XEXP (x, 0);
32164 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32165 {
32166 rtx reg = XEXP (addr, 0);
32167 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32168 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32169 gcc_assert (REG_P (reg));
32170 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32171 addr = reg;
32172 }
32173 else if (GET_CODE (addr) == PRE_MODIFY)
32174 {
32175 rtx reg = XEXP (addr, 0);
32176 rtx expr = XEXP (addr, 1);
32177 gcc_assert (REG_P (reg));
32178 gcc_assert (GET_CODE (expr) == PLUS);
32179 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32180 addr = reg;
32181 }
32182
32183 x = replace_equiv_address (x, force_reg (Pmode, addr));
32184 }
32185
32186 return x;
32187 }
32188
32189 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32190
32191 On the RS/6000, all integer constants are acceptable, most won't be valid
32192 for particular insns, though. Only easy FP constants are acceptable. */
32193
32194 static bool
32195 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
32196 {
32197 if (TARGET_ELF && tls_referenced_p (x))
32198 return false;
32199
32200 if (CONST_DOUBLE_P (x))
32201 return easy_fp_constant (x, mode);
32202
32203 if (GET_CODE (x) == CONST_VECTOR)
32204 return easy_vector_constant (x, mode);
32205
32206 return true;
32207 }
32208
32209 \f
32210 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
32211
32212 static bool
32213 chain_already_loaded (rtx_insn *last)
32214 {
32215 for (; last != NULL; last = PREV_INSN (last))
32216 {
32217 if (NONJUMP_INSN_P (last))
32218 {
32219 rtx patt = PATTERN (last);
32220
32221 if (GET_CODE (patt) == SET)
32222 {
32223 rtx lhs = XEXP (patt, 0);
32224
32225 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
32226 return true;
32227 }
32228 }
32229 }
32230 return false;
32231 }
32232
32233 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32234
32235 void
32236 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32237 {
32238 rtx func = func_desc;
32239 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32240 rtx toc_load = NULL_RTX;
32241 rtx toc_restore = NULL_RTX;
32242 rtx func_addr;
32243 rtx abi_reg = NULL_RTX;
32244 rtx call[4];
32245 int n_call;
32246 rtx insn;
32247 bool is_pltseq_longcall;
32248
32249 if (global_tlsarg)
32250 tlsarg = global_tlsarg;
32251
32252 /* Handle longcall attributes. */
32253 is_pltseq_longcall = false;
32254 if ((INTVAL (cookie) & CALL_LONG) != 0
32255 && GET_CODE (func_desc) == SYMBOL_REF)
32256 {
32257 func = rs6000_longcall_ref (func_desc, tlsarg);
32258 if (TARGET_PLTSEQ)
32259 is_pltseq_longcall = true;
32260 }
32261
32262 /* Handle indirect calls. */
32263 if (!SYMBOL_REF_P (func)
32264 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
32265 {
32266 if (!rs6000_pcrel_p (cfun))
32267 {
32268 /* Save the TOC into its reserved slot before the call,
32269 and prepare to restore it after the call. */
32270 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32271 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
32272 gen_rtvec (1, stack_toc_offset),
32273 UNSPEC_TOCSLOT);
32274 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
32275
32276 /* Can we optimize saving the TOC in the prologue or
32277 do we need to do it at every call? */
32278 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32279 cfun->machine->save_toc_in_prologue = true;
32280 else
32281 {
32282 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32283 rtx stack_toc_mem = gen_frame_mem (Pmode,
32284 gen_rtx_PLUS (Pmode, stack_ptr,
32285 stack_toc_offset));
32286 MEM_VOLATILE_P (stack_toc_mem) = 1;
32287 if (is_pltseq_longcall)
32288 {
32289 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
32290 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32291 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
32292 }
32293 else
32294 emit_move_insn (stack_toc_mem, toc_reg);
32295 }
32296 }
32297
32298 if (DEFAULT_ABI == ABI_ELFv2)
32299 {
32300 /* A function pointer in the ELFv2 ABI is just a plain address, but
32301 the ABI requires it to be loaded into r12 before the call. */
32302 func_addr = gen_rtx_REG (Pmode, 12);
32303 if (!rtx_equal_p (func_addr, func))
32304 emit_move_insn (func_addr, func);
32305 abi_reg = func_addr;
32306 /* Indirect calls via CTR are strongly preferred over indirect
32307 calls via LR, so move the address there. Needed to mark
32308 this insn for linker plt sequence editing too. */
32309 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32310 if (is_pltseq_longcall)
32311 {
32312 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
32313 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32314 emit_insn (gen_rtx_SET (func_addr, mark_func));
32315 v = gen_rtvec (2, func_addr, func_desc);
32316 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32317 }
32318 else
32319 emit_move_insn (func_addr, abi_reg);
32320 }
32321 else
32322 {
32323 /* A function pointer under AIX is a pointer to a data area whose
32324 first word contains the actual address of the function, whose
32325 second word contains a pointer to its TOC, and whose third word
32326 contains a value to place in the static chain register (r11).
32327 Note that if we load the static chain, our "trampoline" need
32328 not have any executable code. */
32329
32330 /* Load up address of the actual function. */
32331 func = force_reg (Pmode, func);
32332 func_addr = gen_reg_rtx (Pmode);
32333 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
32334
32335 /* Indirect calls via CTR are strongly preferred over indirect
32336 calls via LR, so move the address there. */
32337 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
32338 emit_move_insn (ctr_reg, func_addr);
32339 func_addr = ctr_reg;
32340
32341 /* Prepare to load the TOC of the called function. Note that the
32342 TOC load must happen immediately before the actual call so
32343 that unwinding the TOC registers works correctly. See the
32344 comment in frob_update_context. */
32345 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32346 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32347 gen_rtx_PLUS (Pmode, func,
32348 func_toc_offset));
32349 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32350
32351 /* If we have a static chain, load it up. But, if the call was
32352 originally direct, the 3rd word has not been written since no
32353 trampoline has been built, so we ought not to load it, lest we
32354 override a static chain value. */
32355 if (!(GET_CODE (func_desc) == SYMBOL_REF
32356 && SYMBOL_REF_FUNCTION_P (func_desc))
32357 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
32358 && !chain_already_loaded (get_current_sequence ()->next->last))
32359 {
32360 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32361 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32362 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32363 gen_rtx_PLUS (Pmode, func,
32364 func_sc_offset));
32365 emit_move_insn (sc_reg, func_sc_mem);
32366 abi_reg = sc_reg;
32367 }
32368 }
32369 }
32370 else
32371 {
32372 /* No TOC register needed for calls from PC-relative callers. */
32373 if (!rs6000_pcrel_p (cfun))
32374 /* Direct calls use the TOC: for local calls, the callee will
32375 assume the TOC register is set; for non-local calls, the
32376 PLT stub needs the TOC register. */
32377 abi_reg = toc_reg;
32378 func_addr = func;
32379 }
32380
32381 /* Create the call. */
32382 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32383 if (value != NULL_RTX)
32384 call[0] = gen_rtx_SET (value, call[0]);
32385 n_call = 1;
32386
32387 if (toc_load)
32388 call[n_call++] = toc_load;
32389 if (toc_restore)
32390 call[n_call++] = toc_restore;
32391
32392 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32393
32394 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32395 insn = emit_call_insn (insn);
32396
32397 /* Mention all registers defined by the ABI to hold information
32398 as uses in CALL_INSN_FUNCTION_USAGE. */
32399 if (abi_reg)
32400 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32401 }
32402
32403 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32404
32405 void
32406 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32407 {
32408 rtx call[2];
32409 rtx insn;
32410
32411 gcc_assert (INTVAL (cookie) == 0);
32412
32413 if (global_tlsarg)
32414 tlsarg = global_tlsarg;
32415
32416 /* Create the call. */
32417 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
32418 if (value != NULL_RTX)
32419 call[0] = gen_rtx_SET (value, call[0]);
32420
32421 call[1] = simple_return_rtx;
32422
32423 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32424 insn = emit_call_insn (insn);
32425
32426 /* Note use of the TOC register. */
32427 if (!rs6000_pcrel_p (cfun))
32428 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
32429 gen_rtx_REG (Pmode, TOC_REGNUM));
32430 }
32431
32432 /* Expand code to perform a call under the SYSV4 ABI. */
32433
32434 void
32435 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32436 {
32437 rtx func = func_desc;
32438 rtx func_addr;
32439 rtx call[4];
32440 rtx insn;
32441 rtx abi_reg = NULL_RTX;
32442 int n;
32443
32444 if (global_tlsarg)
32445 tlsarg = global_tlsarg;
32446
32447 /* Handle longcall attributes. */
32448 if ((INTVAL (cookie) & CALL_LONG) != 0
32449 && GET_CODE (func_desc) == SYMBOL_REF)
32450 {
32451 func = rs6000_longcall_ref (func_desc, tlsarg);
32452 /* If the longcall was implemented as an inline PLT call using
32453 PLT unspecs then func will be REG:r11. If not, func will be
32454 a pseudo reg. The inline PLT call sequence supports lazy
32455 linking (and longcalls to functions in dlopen'd libraries).
32456 The other style of longcalls don't. The lazy linking entry
32457 to the dynamic symbol resolver requires r11 be the function
32458 address (as it is for linker generated PLT stubs). Ensure
32459 r11 stays valid to the bctrl by marking r11 used by the call. */
32460 if (TARGET_PLTSEQ)
32461 abi_reg = func;
32462 }
32463
32464 /* Handle indirect calls. */
32465 if (GET_CODE (func) != SYMBOL_REF)
32466 {
32467 func = force_reg (Pmode, func);
32468
32469 /* Indirect calls via CTR are strongly preferred over indirect
32470 calls via LR, so move the address there. That can't be left
32471 to reload because we want to mark every instruction in an
32472 inline PLT call sequence with a reloc, enabling the linker to
32473 edit the sequence back to a direct call when that makes sense. */
32474 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32475 if (abi_reg)
32476 {
32477 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
32478 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32479 emit_insn (gen_rtx_SET (func_addr, mark_func));
32480 v = gen_rtvec (2, func_addr, func_desc);
32481 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32482 }
32483 else
32484 emit_move_insn (func_addr, func);
32485 }
32486 else
32487 func_addr = func;
32488
32489 /* Create the call. */
32490 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32491 if (value != NULL_RTX)
32492 call[0] = gen_rtx_SET (value, call[0]);
32493
32494 call[1] = gen_rtx_USE (VOIDmode, cookie);
32495 n = 2;
32496 if (TARGET_SECURE_PLT
32497 && flag_pic
32498 && GET_CODE (func_addr) == SYMBOL_REF
32499 && !SYMBOL_REF_LOCAL_P (func_addr))
32500 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
32501
32502 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32503
32504 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
32505 insn = emit_call_insn (insn);
32506 if (abi_reg)
32507 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32508 }
32509
32510 /* Expand code to perform a sibling call under the SysV4 ABI. */
32511
32512 void
32513 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
32514 {
32515 rtx func = func_desc;
32516 rtx func_addr;
32517 rtx call[3];
32518 rtx insn;
32519 rtx abi_reg = NULL_RTX;
32520
32521 if (global_tlsarg)
32522 tlsarg = global_tlsarg;
32523
32524 /* Handle longcall attributes. */
32525 if ((INTVAL (cookie) & CALL_LONG) != 0
32526 && GET_CODE (func_desc) == SYMBOL_REF)
32527 {
32528 func = rs6000_longcall_ref (func_desc, tlsarg);
32529 /* If the longcall was implemented as an inline PLT call using
32530 PLT unspecs then func will be REG:r11. If not, func will be
32531 a pseudo reg. The inline PLT call sequence supports lazy
32532 linking (and longcalls to functions in dlopen'd libraries).
32533 The other style of longcalls don't. The lazy linking entry
32534 to the dynamic symbol resolver requires r11 be the function
32535 address (as it is for linker generated PLT stubs). Ensure
32536 r11 stays valid to the bctr by marking r11 used by the call. */
32537 if (TARGET_PLTSEQ)
32538 abi_reg = func;
32539 }
32540
32541 /* Handle indirect calls. */
32542 if (GET_CODE (func) != SYMBOL_REF)
32543 {
32544 func = force_reg (Pmode, func);
32545
32546 /* Indirect sibcalls must go via CTR. That can't be left to
32547 reload because we want to mark every instruction in an inline
32548 PLT call sequence with a reloc, enabling the linker to edit
32549 the sequence back to a direct call when that makes sense. */
32550 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32551 if (abi_reg)
32552 {
32553 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
32554 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32555 emit_insn (gen_rtx_SET (func_addr, mark_func));
32556 v = gen_rtvec (2, func_addr, func_desc);
32557 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
32558 }
32559 else
32560 emit_move_insn (func_addr, func);
32561 }
32562 else
32563 func_addr = func;
32564
32565 /* Create the call. */
32566 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32567 if (value != NULL_RTX)
32568 call[0] = gen_rtx_SET (value, call[0]);
32569
32570 call[1] = gen_rtx_USE (VOIDmode, cookie);
32571 call[2] = simple_return_rtx;
32572
32573 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
32574 insn = emit_call_insn (insn);
32575 if (abi_reg)
32576 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32577 }
32578
32579 #if TARGET_MACHO
32580
32581 /* Expand code to perform a call under the Darwin ABI.
32582 Modulo handling of mlongcall, this is much the same as sysv.
32583 if/when the longcall optimisation is removed, we could drop this
32584 code and use the sysv case (taking care to avoid the tls stuff).
32585
32586 We can use this for sibcalls too, if needed. */
32587
32588 void
32589 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
32590 rtx cookie, bool sibcall)
32591 {
32592 rtx func = func_desc;
32593 rtx func_addr;
32594 rtx call[3];
32595 rtx insn;
32596 int cookie_val = INTVAL (cookie);
32597 bool make_island = false;
32598
32599 /* Handle longcall attributes, there are two cases for Darwin:
32600 1) Newer linkers are capable of synthesising any branch islands needed.
32601 2) We need a helper branch island synthesised by the compiler.
32602 The second case has mostly been retired and we don't use it for m64.
32603 In fact, it's is an optimisation, we could just indirect as sysv does..
32604 ... however, backwards compatibility for now.
32605 If we're going to use this, then we need to keep the CALL_LONG bit set,
32606 so that we can pick up the special insn form later. */
32607 if ((cookie_val & CALL_LONG) != 0
32608 && GET_CODE (func_desc) == SYMBOL_REF)
32609 {
32610 /* FIXME: the longcall opt should not hang off picsymbol stubs. */
32611 if (darwin_picsymbol_stubs && TARGET_32BIT)
32612 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
32613 else
32614 {
32615 /* The linker is capable of doing this, but the user explicitly
32616 asked for -mlongcall, so we'll do the 'normal' version. */
32617 func = rs6000_longcall_ref (func_desc, NULL_RTX);
32618 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
32619 }
32620 }
32621
32622 /* Handle indirect calls. */
32623 if (GET_CODE (func) != SYMBOL_REF)
32624 {
32625 func = force_reg (Pmode, func);
32626
32627 /* Indirect calls via CTR are strongly preferred over indirect
32628 calls via LR, and are required for indirect sibcalls, so move
32629 the address there. */
32630 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
32631 emit_move_insn (func_addr, func);
32632 }
32633 else
32634 func_addr = func;
32635
32636 /* Create the call. */
32637 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
32638 if (value != NULL_RTX)
32639 call[0] = gen_rtx_SET (value, call[0]);
32640
32641 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
32642
32643 if (sibcall)
32644 call[2] = simple_return_rtx;
32645 else
32646 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
32647
32648 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
32649 insn = emit_call_insn (insn);
32650 /* Now we have the debug info in the insn, we can set up the branch island
32651 if we're using one. */
32652 if (make_island)
32653 {
32654 tree funname = get_identifier (XSTR (func_desc, 0));
32655
32656 if (no_previous_def (funname))
32657 {
32658 rtx label_rtx = gen_label_rtx ();
32659 char *label_buf, temp_buf[256];
32660 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32661 CODE_LABEL_NUMBER (label_rtx));
32662 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32663 tree labelname = get_identifier (label_buf);
32664 add_compiler_branch_island (labelname, funname,
32665 insn_line ((const rtx_insn*)insn));
32666 }
32667 }
32668 }
32669 #endif
32670
32671 void
32672 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
32673 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
32674 {
32675 #if TARGET_MACHO
32676 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
32677 #else
32678 gcc_unreachable();
32679 #endif
32680 }
32681
32682
32683 void
32684 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
32685 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
32686 {
32687 #if TARGET_MACHO
32688 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
32689 #else
32690 gcc_unreachable();
32691 #endif
32692 }
32693
32694 /* Return whether we should generate PC-relative code for FNDECL. */
32695 bool
32696 rs6000_fndecl_pcrel_p (const_tree fndecl)
32697 {
32698 if (DEFAULT_ABI != ABI_ELFv2)
32699 return false;
32700
32701 struct cl_target_option *opts = target_opts_for_fn (fndecl);
32702
32703 return ((opts->x_rs6000_isa_flags & OPTION_MASK_PCREL) != 0
32704 && TARGET_CMODEL == CMODEL_MEDIUM);
32705 }
32706
32707 /* Return whether we should generate PC-relative code for *FN. */
32708 bool
32709 rs6000_pcrel_p (struct function *fn)
32710 {
32711 if (DEFAULT_ABI != ABI_ELFv2)
32712 return false;
32713
32714 /* Optimize usual case. */
32715 if (fn == cfun)
32716 return ((rs6000_isa_flags & OPTION_MASK_PCREL) != 0
32717 && TARGET_CMODEL == CMODEL_MEDIUM);
32718
32719 return rs6000_fndecl_pcrel_p (fn->decl);
32720 }
32721
32722 #ifdef HAVE_GAS_HIDDEN
32723 # define USE_HIDDEN_LINKONCE 1
32724 #else
32725 # define USE_HIDDEN_LINKONCE 0
32726 #endif
32727
32728 /* Fills in the label name that should be used for a 476 link stack thunk. */
32729
32730 void
32731 get_ppc476_thunk_name (char name[32])
32732 {
32733 gcc_assert (TARGET_LINK_STACK);
32734
32735 if (USE_HIDDEN_LINKONCE)
32736 sprintf (name, "__ppc476.get_thunk");
32737 else
32738 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32739 }
32740
32741 /* This function emits the simple thunk routine that is used to preserve
32742 the link stack on the 476 cpu. */
32743
32744 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32745 static void
32746 rs6000_code_end (void)
32747 {
32748 char name[32];
32749 tree decl;
32750
32751 if (!TARGET_LINK_STACK)
32752 return;
32753
32754 get_ppc476_thunk_name (name);
32755
32756 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32757 build_function_type_list (void_type_node, NULL_TREE));
32758 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32759 NULL_TREE, void_type_node);
32760 TREE_PUBLIC (decl) = 1;
32761 TREE_STATIC (decl) = 1;
32762
32763 #if RS6000_WEAK
32764 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
32765 {
32766 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
32767 targetm.asm_out.unique_section (decl, 0);
32768 switch_to_section (get_named_section (decl, NULL, 0));
32769 DECL_WEAK (decl) = 1;
32770 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32771 targetm.asm_out.globalize_label (asm_out_file, name);
32772 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32773 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32774 }
32775 else
32776 #endif
32777 {
32778 switch_to_section (text_section);
32779 ASM_OUTPUT_LABEL (asm_out_file, name);
32780 }
32781
32782 DECL_INITIAL (decl) = make_node (BLOCK);
32783 current_function_decl = decl;
32784 allocate_struct_function (decl, false);
32785 init_function_start (decl);
32786 first_function_block_is_cold = false;
32787 /* Make sure unwind info is emitted for the thunk if needed. */
32788 final_start_function (emit_barrier (), asm_out_file, 1);
32789
32790 fputs ("\tblr\n", asm_out_file);
32791
32792 final_end_function ();
32793 init_insn_lengths ();
32794 free_after_compilation (cfun);
32795 set_cfun (NULL);
32796 current_function_decl = NULL;
32797 }
32798
32799 /* Add r30 to hard reg set if the prologue sets it up and it is not
32800 pic_offset_table_rtx. */
32801
32802 static void
32803 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32804 {
32805 if (!TARGET_SINGLE_PIC_BASE
32806 && TARGET_TOC
32807 && TARGET_MINIMAL_TOC
32808 && !constant_pool_empty_p ())
32809 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32810 if (cfun->machine->split_stack_argp_used)
32811 add_to_hard_reg_set (&set->set, Pmode, 12);
32812
32813 /* Make sure the hard reg set doesn't include r2, which was possibly added
32814 via PIC_OFFSET_TABLE_REGNUM. */
32815 if (TARGET_TOC)
32816 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
32817 }
32818
32819 \f
32820 /* Helper function for rs6000_split_logical to emit a logical instruction after
32821 spliting the operation to single GPR registers.
32822
32823 DEST is the destination register.
32824 OP1 and OP2 are the input source registers.
32825 CODE is the base operation (AND, IOR, XOR, NOT).
32826 MODE is the machine mode.
32827 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32828 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32829 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32830
32831 static void
32832 rs6000_split_logical_inner (rtx dest,
32833 rtx op1,
32834 rtx op2,
32835 enum rtx_code code,
32836 machine_mode mode,
32837 bool complement_final_p,
32838 bool complement_op1_p,
32839 bool complement_op2_p)
32840 {
32841 rtx bool_rtx;
32842
32843 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32844 if (op2 && CONST_INT_P (op2)
32845 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32846 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32847 {
32848 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32849 HOST_WIDE_INT value = INTVAL (op2) & mask;
32850
32851 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32852 if (code == AND)
32853 {
32854 if (value == 0)
32855 {
32856 emit_insn (gen_rtx_SET (dest, const0_rtx));
32857 return;
32858 }
32859
32860 else if (value == mask)
32861 {
32862 if (!rtx_equal_p (dest, op1))
32863 emit_insn (gen_rtx_SET (dest, op1));
32864 return;
32865 }
32866 }
32867
32868 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32869 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32870 else if (code == IOR || code == XOR)
32871 {
32872 if (value == 0)
32873 {
32874 if (!rtx_equal_p (dest, op1))
32875 emit_insn (gen_rtx_SET (dest, op1));
32876 return;
32877 }
32878 }
32879 }
32880
32881 if (code == AND && mode == SImode
32882 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32883 {
32884 emit_insn (gen_andsi3 (dest, op1, op2));
32885 return;
32886 }
32887
32888 if (complement_op1_p)
32889 op1 = gen_rtx_NOT (mode, op1);
32890
32891 if (complement_op2_p)
32892 op2 = gen_rtx_NOT (mode, op2);
32893
32894 /* For canonical RTL, if only one arm is inverted it is the first. */
32895 if (!complement_op1_p && complement_op2_p)
32896 std::swap (op1, op2);
32897
32898 bool_rtx = ((code == NOT)
32899 ? gen_rtx_NOT (mode, op1)
32900 : gen_rtx_fmt_ee (code, mode, op1, op2));
32901
32902 if (complement_final_p)
32903 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32904
32905 emit_insn (gen_rtx_SET (dest, bool_rtx));
32906 }
32907
32908 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32909 operations are split immediately during RTL generation to allow for more
32910 optimizations of the AND/IOR/XOR.
32911
32912 OPERANDS is an array containing the destination and two input operands.
32913 CODE is the base operation (AND, IOR, XOR, NOT).
32914 MODE is the machine mode.
32915 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32916 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32917 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32918 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32919 formation of the AND instructions. */
32920
32921 static void
32922 rs6000_split_logical_di (rtx operands[3],
32923 enum rtx_code code,
32924 bool complement_final_p,
32925 bool complement_op1_p,
32926 bool complement_op2_p)
32927 {
32928 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32929 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32930 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32931 enum hi_lo { hi = 0, lo = 1 };
32932 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32933 size_t i;
32934
32935 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32936 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32937 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32938 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32939
32940 if (code == NOT)
32941 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32942 else
32943 {
32944 if (!CONST_INT_P (operands[2]))
32945 {
32946 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32947 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32948 }
32949 else
32950 {
32951 HOST_WIDE_INT value = INTVAL (operands[2]);
32952 HOST_WIDE_INT value_hi_lo[2];
32953
32954 gcc_assert (!complement_final_p);
32955 gcc_assert (!complement_op1_p);
32956 gcc_assert (!complement_op2_p);
32957
32958 value_hi_lo[hi] = value >> 32;
32959 value_hi_lo[lo] = value & lower_32bits;
32960
32961 for (i = 0; i < 2; i++)
32962 {
32963 HOST_WIDE_INT sub_value = value_hi_lo[i];
32964
32965 if (sub_value & sign_bit)
32966 sub_value |= upper_32bits;
32967
32968 op2_hi_lo[i] = GEN_INT (sub_value);
32969
32970 /* If this is an AND instruction, check to see if we need to load
32971 the value in a register. */
32972 if (code == AND && sub_value != -1 && sub_value != 0
32973 && !and_operand (op2_hi_lo[i], SImode))
32974 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32975 }
32976 }
32977 }
32978
32979 for (i = 0; i < 2; i++)
32980 {
32981 /* Split large IOR/XOR operations. */
32982 if ((code == IOR || code == XOR)
32983 && CONST_INT_P (op2_hi_lo[i])
32984 && !complement_final_p
32985 && !complement_op1_p
32986 && !complement_op2_p
32987 && !logical_const_operand (op2_hi_lo[i], SImode))
32988 {
32989 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32990 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32991 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32992 rtx tmp = gen_reg_rtx (SImode);
32993
32994 /* Make sure the constant is sign extended. */
32995 if ((hi_16bits & sign_bit) != 0)
32996 hi_16bits |= upper_32bits;
32997
32998 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32999 code, SImode, false, false, false);
33000
33001 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
33002 code, SImode, false, false, false);
33003 }
33004 else
33005 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
33006 code, SImode, complement_final_p,
33007 complement_op1_p, complement_op2_p);
33008 }
33009
33010 return;
33011 }
33012
33013 /* Split the insns that make up boolean operations operating on multiple GPR
33014 registers. The boolean MD patterns ensure that the inputs either are
33015 exactly the same as the output registers, or there is no overlap.
33016
33017 OPERANDS is an array containing the destination and two input operands.
33018 CODE is the base operation (AND, IOR, XOR, NOT).
33019 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33020 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33021 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
33022
33023 void
33024 rs6000_split_logical (rtx operands[3],
33025 enum rtx_code code,
33026 bool complement_final_p,
33027 bool complement_op1_p,
33028 bool complement_op2_p)
33029 {
33030 machine_mode mode = GET_MODE (operands[0]);
33031 machine_mode sub_mode;
33032 rtx op0, op1, op2;
33033 int sub_size, regno0, regno1, nregs, i;
33034
33035 /* If this is DImode, use the specialized version that can run before
33036 register allocation. */
33037 if (mode == DImode && !TARGET_POWERPC64)
33038 {
33039 rs6000_split_logical_di (operands, code, complement_final_p,
33040 complement_op1_p, complement_op2_p);
33041 return;
33042 }
33043
33044 op0 = operands[0];
33045 op1 = operands[1];
33046 op2 = (code == NOT) ? NULL_RTX : operands[2];
33047 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
33048 sub_size = GET_MODE_SIZE (sub_mode);
33049 regno0 = REGNO (op0);
33050 regno1 = REGNO (op1);
33051
33052 gcc_assert (reload_completed);
33053 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33054 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33055
33056 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
33057 gcc_assert (nregs > 1);
33058
33059 if (op2 && REG_P (op2))
33060 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
33061
33062 for (i = 0; i < nregs; i++)
33063 {
33064 int offset = i * sub_size;
33065 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
33066 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
33067 rtx sub_op2 = ((code == NOT)
33068 ? NULL_RTX
33069 : simplify_subreg (sub_mode, op2, mode, offset));
33070
33071 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
33072 complement_final_p, complement_op1_p,
33073 complement_op2_p);
33074 }
33075
33076 return;
33077 }
33078
33079 \f
33080 /* Return true if the peephole2 can combine a load involving a combination of
33081 an addis instruction and a load with an offset that can be fused together on
33082 a power8. */
33083
33084 bool
33085 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
33086 rtx addis_value, /* addis value. */
33087 rtx target, /* target register that is loaded. */
33088 rtx mem) /* bottom part of the memory addr. */
33089 {
33090 rtx addr;
33091 rtx base_reg;
33092
33093 /* Validate arguments. */
33094 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
33095 return false;
33096
33097 if (!base_reg_operand (target, GET_MODE (target)))
33098 return false;
33099
33100 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
33101 return false;
33102
33103 /* Allow sign/zero extension. */
33104 if (GET_CODE (mem) == ZERO_EXTEND
33105 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33106 mem = XEXP (mem, 0);
33107
33108 if (!MEM_P (mem))
33109 return false;
33110
33111 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
33112 return false;
33113
33114 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33115 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33116 return false;
33117
33118 /* Validate that the register used to load the high value is either the
33119 register being loaded, or we can safely replace its use.
33120
33121 This function is only called from the peephole2 pass and we assume that
33122 there are 2 instructions in the peephole (addis and load), so we want to
33123 check if the target register was not used in the memory address and the
33124 register to hold the addis result is dead after the peephole. */
33125 if (REGNO (addis_reg) != REGNO (target))
33126 {
33127 if (reg_mentioned_p (target, mem))
33128 return false;
33129
33130 if (!peep2_reg_dead_p (2, addis_reg))
33131 return false;
33132
33133 /* If the target register being loaded is the stack pointer, we must
33134 avoid loading any other value into it, even temporarily. */
33135 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33136 return false;
33137 }
33138
33139 base_reg = XEXP (addr, 0);
33140 return REGNO (addis_reg) == REGNO (base_reg);
33141 }
33142
33143 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33144 sequence. We adjust the addis register to use the target register. If the
33145 load sign extends, we adjust the code to do the zero extending load, and an
33146 explicit sign extension later since the fusion only covers zero extending
33147 loads.
33148
33149 The operands are:
33150 operands[0] register set with addis (to be replaced with target)
33151 operands[1] value set via addis
33152 operands[2] target register being loaded
33153 operands[3] D-form memory reference using operands[0]. */
33154
33155 void
33156 expand_fusion_gpr_load (rtx *operands)
33157 {
33158 rtx addis_value = operands[1];
33159 rtx target = operands[2];
33160 rtx orig_mem = operands[3];
33161 rtx new_addr, new_mem, orig_addr, offset;
33162 enum rtx_code plus_or_lo_sum;
33163 machine_mode target_mode = GET_MODE (target);
33164 machine_mode extend_mode = target_mode;
33165 machine_mode ptr_mode = Pmode;
33166 enum rtx_code extend = UNKNOWN;
33167
33168 if (GET_CODE (orig_mem) == ZERO_EXTEND
33169 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33170 {
33171 extend = GET_CODE (orig_mem);
33172 orig_mem = XEXP (orig_mem, 0);
33173 target_mode = GET_MODE (orig_mem);
33174 }
33175
33176 gcc_assert (MEM_P (orig_mem));
33177
33178 orig_addr = XEXP (orig_mem, 0);
33179 plus_or_lo_sum = GET_CODE (orig_addr);
33180 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33181
33182 offset = XEXP (orig_addr, 1);
33183 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
33184 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
33185
33186 if (extend != UNKNOWN)
33187 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33188
33189 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
33190 UNSPEC_FUSION_GPR);
33191 emit_insn (gen_rtx_SET (target, new_mem));
33192
33193 if (extend == SIGN_EXTEND)
33194 {
33195 int sub_off = ((BYTES_BIG_ENDIAN)
33196 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33197 : 0);
33198 rtx sign_reg
33199 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33200
33201 emit_insn (gen_rtx_SET (target,
33202 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33203 }
33204
33205 return;
33206 }
33207
33208 /* Emit the addis instruction that will be part of a fused instruction
33209 sequence. */
33210
33211 void
33212 emit_fusion_addis (rtx target, rtx addis_value)
33213 {
33214 rtx fuse_ops[10];
33215 const char *addis_str = NULL;
33216
33217 /* Emit the addis instruction. */
33218 fuse_ops[0] = target;
33219 if (satisfies_constraint_L (addis_value))
33220 {
33221 fuse_ops[1] = addis_value;
33222 addis_str = "lis %0,%v1";
33223 }
33224
33225 else if (GET_CODE (addis_value) == PLUS)
33226 {
33227 rtx op0 = XEXP (addis_value, 0);
33228 rtx op1 = XEXP (addis_value, 1);
33229
33230 if (REG_P (op0) && CONST_INT_P (op1)
33231 && satisfies_constraint_L (op1))
33232 {
33233 fuse_ops[1] = op0;
33234 fuse_ops[2] = op1;
33235 addis_str = "addis %0,%1,%v2";
33236 }
33237 }
33238
33239 else if (GET_CODE (addis_value) == HIGH)
33240 {
33241 rtx value = XEXP (addis_value, 0);
33242 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33243 {
33244 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33245 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33246 if (TARGET_ELF)
33247 addis_str = "addis %0,%2,%1@toc@ha";
33248
33249 else if (TARGET_XCOFF)
33250 addis_str = "addis %0,%1@u(%2)";
33251
33252 else
33253 gcc_unreachable ();
33254 }
33255
33256 else if (GET_CODE (value) == PLUS)
33257 {
33258 rtx op0 = XEXP (value, 0);
33259 rtx op1 = XEXP (value, 1);
33260
33261 if (GET_CODE (op0) == UNSPEC
33262 && XINT (op0, 1) == UNSPEC_TOCREL
33263 && CONST_INT_P (op1))
33264 {
33265 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33266 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33267 fuse_ops[3] = op1;
33268 if (TARGET_ELF)
33269 addis_str = "addis %0,%2,%1+%3@toc@ha";
33270
33271 else if (TARGET_XCOFF)
33272 addis_str = "addis %0,%1+%3@u(%2)";
33273
33274 else
33275 gcc_unreachable ();
33276 }
33277 }
33278
33279 else if (satisfies_constraint_L (value))
33280 {
33281 fuse_ops[1] = value;
33282 addis_str = "lis %0,%v1";
33283 }
33284
33285 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33286 {
33287 fuse_ops[1] = value;
33288 addis_str = "lis %0,%1@ha";
33289 }
33290 }
33291
33292 if (!addis_str)
33293 fatal_insn ("Could not generate addis value for fusion", addis_value);
33294
33295 output_asm_insn (addis_str, fuse_ops);
33296 }
33297
33298 /* Emit a D-form load or store instruction that is the second instruction
33299 of a fusion sequence. */
33300
33301 static void
33302 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
33303 {
33304 rtx fuse_ops[10];
33305 char insn_template[80];
33306
33307 fuse_ops[0] = load_reg;
33308 fuse_ops[1] = addis_reg;
33309
33310 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
33311 {
33312 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
33313 fuse_ops[2] = offset;
33314 output_asm_insn (insn_template, fuse_ops);
33315 }
33316
33317 else if (GET_CODE (offset) == UNSPEC
33318 && XINT (offset, 1) == UNSPEC_TOCREL)
33319 {
33320 if (TARGET_ELF)
33321 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
33322
33323 else if (TARGET_XCOFF)
33324 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
33325
33326 else
33327 gcc_unreachable ();
33328
33329 fuse_ops[2] = XVECEXP (offset, 0, 0);
33330 output_asm_insn (insn_template, fuse_ops);
33331 }
33332
33333 else if (GET_CODE (offset) == PLUS
33334 && GET_CODE (XEXP (offset, 0)) == UNSPEC
33335 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
33336 && CONST_INT_P (XEXP (offset, 1)))
33337 {
33338 rtx tocrel_unspec = XEXP (offset, 0);
33339 if (TARGET_ELF)
33340 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
33341
33342 else if (TARGET_XCOFF)
33343 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
33344
33345 else
33346 gcc_unreachable ();
33347
33348 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
33349 fuse_ops[3] = XEXP (offset, 1);
33350 output_asm_insn (insn_template, fuse_ops);
33351 }
33352
33353 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
33354 {
33355 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
33356
33357 fuse_ops[2] = offset;
33358 output_asm_insn (insn_template, fuse_ops);
33359 }
33360
33361 else
33362 fatal_insn ("Unable to generate load/store offset for fusion", offset);
33363
33364 return;
33365 }
33366
33367 /* Given an address, convert it into the addis and load offset parts. Addresses
33368 created during the peephole2 process look like:
33369 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
33370 (unspec [(...)] UNSPEC_TOCREL)) */
33371
33372 static void
33373 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
33374 {
33375 rtx hi, lo;
33376
33377 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
33378 {
33379 hi = XEXP (addr, 0);
33380 lo = XEXP (addr, 1);
33381 }
33382 else
33383 gcc_unreachable ();
33384
33385 *p_hi = hi;
33386 *p_lo = lo;
33387 }
33388
33389 /* Return a string to fuse an addis instruction with a gpr load to the same
33390 register that we loaded up the addis instruction. The address that is used
33391 is the logical address that was formed during peephole2:
33392 (lo_sum (high) (low-part))
33393
33394 The code is complicated, so we call output_asm_insn directly, and just
33395 return "". */
33396
33397 const char *
33398 emit_fusion_gpr_load (rtx target, rtx mem)
33399 {
33400 rtx addis_value;
33401 rtx addr;
33402 rtx load_offset;
33403 const char *load_str = NULL;
33404 machine_mode mode;
33405
33406 if (GET_CODE (mem) == ZERO_EXTEND)
33407 mem = XEXP (mem, 0);
33408
33409 gcc_assert (REG_P (target) && MEM_P (mem));
33410
33411 addr = XEXP (mem, 0);
33412 fusion_split_address (addr, &addis_value, &load_offset);
33413
33414 /* Now emit the load instruction to the same register. */
33415 mode = GET_MODE (mem);
33416 switch (mode)
33417 {
33418 case E_QImode:
33419 load_str = "lbz";
33420 break;
33421
33422 case E_HImode:
33423 load_str = "lhz";
33424 break;
33425
33426 case E_SImode:
33427 case E_SFmode:
33428 load_str = "lwz";
33429 break;
33430
33431 case E_DImode:
33432 case E_DFmode:
33433 gcc_assert (TARGET_POWERPC64);
33434 load_str = "ld";
33435 break;
33436
33437 default:
33438 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
33439 }
33440
33441 /* Emit the addis instruction. */
33442 emit_fusion_addis (target, addis_value);
33443
33444 /* Emit the D-form load instruction. */
33445 emit_fusion_load (target, target, load_offset, load_str);
33446
33447 return "";
33448 }
33449 \f
33450
33451 #ifdef RS6000_GLIBC_ATOMIC_FENV
33452 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
33453 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
33454 #endif
33455
33456 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
33457
33458 static void
33459 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
33460 {
33461 if (!TARGET_HARD_FLOAT)
33462 {
33463 #ifdef RS6000_GLIBC_ATOMIC_FENV
33464 if (atomic_hold_decl == NULL_TREE)
33465 {
33466 atomic_hold_decl
33467 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33468 get_identifier ("__atomic_feholdexcept"),
33469 build_function_type_list (void_type_node,
33470 double_ptr_type_node,
33471 NULL_TREE));
33472 TREE_PUBLIC (atomic_hold_decl) = 1;
33473 DECL_EXTERNAL (atomic_hold_decl) = 1;
33474 }
33475
33476 if (atomic_clear_decl == NULL_TREE)
33477 {
33478 atomic_clear_decl
33479 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33480 get_identifier ("__atomic_feclearexcept"),
33481 build_function_type_list (void_type_node,
33482 NULL_TREE));
33483 TREE_PUBLIC (atomic_clear_decl) = 1;
33484 DECL_EXTERNAL (atomic_clear_decl) = 1;
33485 }
33486
33487 tree const_double = build_qualified_type (double_type_node,
33488 TYPE_QUAL_CONST);
33489 tree const_double_ptr = build_pointer_type (const_double);
33490 if (atomic_update_decl == NULL_TREE)
33491 {
33492 atomic_update_decl
33493 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
33494 get_identifier ("__atomic_feupdateenv"),
33495 build_function_type_list (void_type_node,
33496 const_double_ptr,
33497 NULL_TREE));
33498 TREE_PUBLIC (atomic_update_decl) = 1;
33499 DECL_EXTERNAL (atomic_update_decl) = 1;
33500 }
33501
33502 tree fenv_var = create_tmp_var_raw (double_type_node);
33503 TREE_ADDRESSABLE (fenv_var) = 1;
33504 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
33505
33506 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
33507 *clear = build_call_expr (atomic_clear_decl, 0);
33508 *update = build_call_expr (atomic_update_decl, 1,
33509 fold_convert (const_double_ptr, fenv_addr));
33510 #endif
33511 return;
33512 }
33513
33514 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
33515 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
33516 tree call_mffs = build_call_expr (mffs, 0);
33517
33518 /* Generates the equivalent of feholdexcept (&fenv_var)
33519
33520 *fenv_var = __builtin_mffs ();
33521 double fenv_hold;
33522 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
33523 __builtin_mtfsf (0xff, fenv_hold); */
33524
33525 /* Mask to clear everything except for the rounding modes and non-IEEE
33526 arithmetic flag. */
33527 const unsigned HOST_WIDE_INT hold_exception_mask =
33528 HOST_WIDE_INT_C (0xffffffff00000007);
33529
33530 tree fenv_var = create_tmp_var_raw (double_type_node);
33531
33532 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
33533
33534 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
33535 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
33536 build_int_cst (uint64_type_node,
33537 hold_exception_mask));
33538
33539 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33540 fenv_llu_and);
33541
33542 tree hold_mtfsf = build_call_expr (mtfsf, 2,
33543 build_int_cst (unsigned_type_node, 0xff),
33544 fenv_hold_mtfsf);
33545
33546 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
33547
33548 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
33549
33550 double fenv_clear = __builtin_mffs ();
33551 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
33552 __builtin_mtfsf (0xff, fenv_clear); */
33553
33554 /* Mask to clear everything except for the rounding modes and non-IEEE
33555 arithmetic flag. */
33556 const unsigned HOST_WIDE_INT clear_exception_mask =
33557 HOST_WIDE_INT_C (0xffffffff00000000);
33558
33559 tree fenv_clear = create_tmp_var_raw (double_type_node);
33560
33561 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
33562
33563 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
33564 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
33565 fenv_clean_llu,
33566 build_int_cst (uint64_type_node,
33567 clear_exception_mask));
33568
33569 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33570 fenv_clear_llu_and);
33571
33572 tree clear_mtfsf = build_call_expr (mtfsf, 2,
33573 build_int_cst (unsigned_type_node, 0xff),
33574 fenv_clear_mtfsf);
33575
33576 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
33577
33578 /* Generates the equivalent of feupdateenv (&fenv_var)
33579
33580 double old_fenv = __builtin_mffs ();
33581 double fenv_update;
33582 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
33583 (*(uint64_t*)fenv_var 0x1ff80fff);
33584 __builtin_mtfsf (0xff, fenv_update); */
33585
33586 const unsigned HOST_WIDE_INT update_exception_mask =
33587 HOST_WIDE_INT_C (0xffffffff1fffff00);
33588 const unsigned HOST_WIDE_INT new_exception_mask =
33589 HOST_WIDE_INT_C (0x1ff80fff);
33590
33591 tree old_fenv = create_tmp_var_raw (double_type_node);
33592 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
33593
33594 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
33595 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
33596 build_int_cst (uint64_type_node,
33597 update_exception_mask));
33598
33599 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
33600 build_int_cst (uint64_type_node,
33601 new_exception_mask));
33602
33603 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
33604 old_llu_and, new_llu_and);
33605
33606 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
33607 new_llu_mask);
33608
33609 tree update_mtfsf = build_call_expr (mtfsf, 2,
33610 build_int_cst (unsigned_type_node, 0xff),
33611 fenv_update_mtfsf);
33612
33613 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
33614 }
33615
33616 void
33617 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
33618 {
33619 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33620
33621 rtx_tmp0 = gen_reg_rtx (V2DFmode);
33622 rtx_tmp1 = gen_reg_rtx (V2DFmode);
33623
33624 /* The destination of the vmrgew instruction layout is:
33625 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
33626 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
33627 vmrgew instruction will be correct. */
33628 if (BYTES_BIG_ENDIAN)
33629 {
33630 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
33631 GEN_INT (0)));
33632 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
33633 GEN_INT (3)));
33634 }
33635 else
33636 {
33637 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
33638 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
33639 }
33640
33641 rtx_tmp2 = gen_reg_rtx (V4SFmode);
33642 rtx_tmp3 = gen_reg_rtx (V4SFmode);
33643
33644 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
33645 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
33646
33647 if (BYTES_BIG_ENDIAN)
33648 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
33649 else
33650 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
33651 }
33652
33653 void
33654 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
33655 {
33656 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33657
33658 rtx_tmp0 = gen_reg_rtx (V2DImode);
33659 rtx_tmp1 = gen_reg_rtx (V2DImode);
33660
33661 /* The destination of the vmrgew instruction layout is:
33662 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
33663 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
33664 vmrgew instruction will be correct. */
33665 if (BYTES_BIG_ENDIAN)
33666 {
33667 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
33668 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
33669 }
33670 else
33671 {
33672 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
33673 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
33674 }
33675
33676 rtx_tmp2 = gen_reg_rtx (V4SFmode);
33677 rtx_tmp3 = gen_reg_rtx (V4SFmode);
33678
33679 if (signed_convert)
33680 {
33681 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
33682 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
33683 }
33684 else
33685 {
33686 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
33687 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
33688 }
33689
33690 if (BYTES_BIG_ENDIAN)
33691 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
33692 else
33693 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
33694 }
33695
33696 void
33697 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
33698 rtx src2)
33699 {
33700 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
33701
33702 rtx_tmp0 = gen_reg_rtx (V2DFmode);
33703 rtx_tmp1 = gen_reg_rtx (V2DFmode);
33704
33705 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
33706 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
33707
33708 rtx_tmp2 = gen_reg_rtx (V4SImode);
33709 rtx_tmp3 = gen_reg_rtx (V4SImode);
33710
33711 if (signed_convert)
33712 {
33713 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
33714 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
33715 }
33716 else
33717 {
33718 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
33719 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
33720 }
33721
33722 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
33723 }
33724
33725 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
33726
33727 static bool
33728 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
33729 optimization_type opt_type)
33730 {
33731 switch (op)
33732 {
33733 case rsqrt_optab:
33734 return (opt_type == OPTIMIZE_FOR_SPEED
33735 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
33736
33737 default:
33738 return true;
33739 }
33740 }
33741
33742 /* Implement TARGET_CONSTANT_ALIGNMENT. */
33743
33744 static HOST_WIDE_INT
33745 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
33746 {
33747 if (TREE_CODE (exp) == STRING_CST
33748 && (STRICT_ALIGNMENT || !optimize_size))
33749 return MAX (align, BITS_PER_WORD);
33750 return align;
33751 }
33752
33753 /* Implement TARGET_STARTING_FRAME_OFFSET. */
33754
33755 static HOST_WIDE_INT
33756 rs6000_starting_frame_offset (void)
33757 {
33758 if (FRAME_GROWS_DOWNWARD)
33759 return 0;
33760 return RS6000_STARTING_FRAME_OFFSET;
33761 }
33762 \f
33763
33764 /* Create an alias for a mangled name where we have changed the mangling (in
33765 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
33766 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
33767
33768 #if TARGET_ELF && RS6000_WEAK
33769 static void
33770 rs6000_globalize_decl_name (FILE * stream, tree decl)
33771 {
33772 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33773
33774 targetm.asm_out.globalize_label (stream, name);
33775
33776 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
33777 {
33778 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
33779 const char *old_name;
33780
33781 ieee128_mangling_gcc_8_1 = true;
33782 lang_hooks.set_decl_assembler_name (decl);
33783 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33784 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
33785 ieee128_mangling_gcc_8_1 = false;
33786
33787 if (strcmp (name, old_name) != 0)
33788 {
33789 fprintf (stream, "\t.weak %s\n", old_name);
33790 fprintf (stream, "\t.set %s,%s\n", old_name, name);
33791 }
33792 }
33793 }
33794 #endif
33795
33796 \f
33797 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
33798 function names from <foo>l to <foo>f128 if the default long double type is
33799 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
33800 include file switches the names on systems that support long double as IEEE
33801 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
33802 In the future, glibc will export names like __ieee128_sinf128 and we can
33803 switch to using those instead of using sinf128, which pollutes the user's
33804 namespace.
33805
33806 This will switch the names for Fortran math functions as well (which doesn't
33807 use math.h). However, Fortran needs other changes to the compiler and
33808 library before you can switch the real*16 type at compile time.
33809
33810 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
33811 only do this if the default is that long double is IBM extended double, and
33812 the user asked for IEEE 128-bit. */
33813
33814 static tree
33815 rs6000_mangle_decl_assembler_name (tree decl, tree id)
33816 {
33817 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
33818 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
33819 {
33820 size_t len = IDENTIFIER_LENGTH (id);
33821 const char *name = IDENTIFIER_POINTER (id);
33822
33823 if (name[len - 1] == 'l')
33824 {
33825 bool uses_ieee128_p = false;
33826 tree type = TREE_TYPE (decl);
33827 machine_mode ret_mode = TYPE_MODE (type);
33828
33829 /* See if the function returns a IEEE 128-bit floating point type or
33830 complex type. */
33831 if (ret_mode == TFmode || ret_mode == TCmode)
33832 uses_ieee128_p = true;
33833 else
33834 {
33835 function_args_iterator args_iter;
33836 tree arg;
33837
33838 /* See if the function passes a IEEE 128-bit floating point type
33839 or complex type. */
33840 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
33841 {
33842 machine_mode arg_mode = TYPE_MODE (arg);
33843 if (arg_mode == TFmode || arg_mode == TCmode)
33844 {
33845 uses_ieee128_p = true;
33846 break;
33847 }
33848 }
33849 }
33850
33851 /* If we passed or returned an IEEE 128-bit floating point type,
33852 change the name. */
33853 if (uses_ieee128_p)
33854 {
33855 char *name2 = (char *) alloca (len + 4);
33856 memcpy (name2, name, len - 1);
33857 strcpy (name2 + len - 1, "f128");
33858 id = get_identifier (name2);
33859 }
33860 }
33861 }
33862
33863 return id;
33864 }
33865
33866 /* Predict whether the given loop in gimple will be transformed in the RTL
33867 doloop_optimize pass. */
33868
33869 static bool
33870 rs6000_predict_doloop_p (struct loop *loop)
33871 {
33872 gcc_assert (loop);
33873
33874 /* On rs6000, targetm.can_use_doloop_p is actually
33875 can_use_doloop_if_innermost. Just ensure the loop is innermost. */
33876 if (loop->inner != NULL)
33877 {
33878 if (dump_file && (dump_flags & TDF_DETAILS))
33879 fprintf (dump_file, "Predict doloop failure due to"
33880 " loop nesting.\n");
33881 return false;
33882 }
33883
33884 return true;
33885 }
33886
33887 struct gcc_target targetm = TARGET_INITIALIZER;
33888
33889 #include "gt-rs6000.h"