+2011-05-10 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ PR target/48857, 48495
+ * config/rs6000/rs6000.h (VSX_SCALAR_MODE): Delete.
+ (VSX_MODE): Ditto.
+ (VSX_MOVE_MODE): Ditto.
+ (ALTIVEC_OR_VSX_VECTOR_MODE): New macro, combine all Altivec and
+ VSX vector types. Add V2DImode.
+ (HARD_REGNO_CALLER_SAVE_MODE): Use it instead of
+ ALTIVEC_VECTOR_MODE and VSX_VECTOR_MODE calls.
+ (MODES_TIEABLE_P): Ditto.
+
+ * config/rs6000/rs6000.c (rs6000_emit_move): Use
+ ALTIVEC_OR_VSX_MODE instead of ALTIVEC_VECTOR_MODE and
+ VSX_VECTOR_MODE.
+ (init_cumulative_args): Ditto.
+ (rs6000_function_arg_boundary): Ditto.
+ (rs6000_function_arg_advance_1): Ditto.
+ (rs6000_function_arg): Ditto.
+ (rs6000_function_ok_for_sibcall): Ditto.
+ (emit_frame_save): Ditto.
+ (rs6000_function_value): Ditto.
+ (rs6000_libcall_value): Ditto.
+
2011-05-10 Joseph Myers <joseph@codesourcery.com>
* config.gcc (i[34567]86-*-darwin*, x86_64-*-darwin*): Add
/* Nonzero if we can use an AltiVec register to pass this arg. */
#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
- ((ALTIVEC_VECTOR_MODE (MODE) || VSX_VECTOR_MODE (MODE)) \
+ (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
&& (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
&& TARGET_ALTIVEC_ABI \
&& (NAMED))
}
if (SCALAR_FLOAT_MODE_P (return_mode))
rs6000_passes_float = true;
- else if (ALTIVEC_VECTOR_MODE (return_mode)
- || VSX_VECTOR_MODE (return_mode)
+ else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
|| SPE_VECTOR_MODE (return_mode))
rs6000_passes_vector = true;
}
existing library interfaces.
Doubleword align SPE vectors.
- Quadword align Altivec vectors.
+ Quadword align Altivec/VSX vectors.
Quadword align large synthetic vector types. */
static unsigned int
&& int_size_in_bytes (type) >= 8
&& int_size_in_bytes (type) < 16))
return 64;
- else if ((ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode))
+ else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) >= 16))
return 128;
{
if (SCALAR_FLOAT_MODE_P (mode))
rs6000_passes_float = true;
- else if (named && (ALTIVEC_VECTOR_MODE (mode) || VSX_VECTOR_MODE (mode)))
+ else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
rs6000_passes_vector = true;
else if (SPE_VECTOR_MODE (mode)
&& !cum->stdarg
#endif
if (TARGET_ALTIVEC_ABI
- && (ALTIVEC_VECTOR_MODE (mode)
- || VSX_VECTOR_MODE (mode)
+ && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) == 16)))
{
else
return gen_rtx_REG (mode, cum->vregno);
else if (TARGET_ALTIVEC_ABI
- && (ALTIVEC_VECTOR_MODE (mode)
- || VSX_VECTOR_MODE (mode)
+ && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) == 16)))
{
here. */
FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
if (TREE_CODE (type) == VECTOR_TYPE
- && (ALTIVEC_VECTOR_MODE (TYPE_MODE (type))
- || VSX_VECTOR_MODE (TYPE_MODE (type))))
+ && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
nvreg++;
FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
if (TREE_CODE (type) == VECTOR_TYPE
- && (ALTIVEC_VECTOR_MODE (TYPE_MODE (type))
- || VSX_VECTOR_MODE (TYPE_MODE (type))))
+ && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
nvreg--;
if (nvreg > 0)
/* Some cases that need register indexed addressing. */
if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
- || (TARGET_VSX && VSX_VECTOR_MODE (mode))
+ || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
|| (TARGET_E500_DOUBLE && mode == DFmode)
|| (TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode)
else if (TREE_CODE (valtype) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
+ /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
+ return register is used in both cases, and we won't see V2DImode/V2DFmode
+ for pure altivec, combine the two cases. */
else if (TREE_CODE (valtype) == VECTOR_TYPE
&& TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
- && ALTIVEC_VECTOR_MODE (mode))
- regno = ALTIVEC_ARG_RETURN;
- else if (TREE_CODE (valtype) == VECTOR_TYPE
- && TARGET_VSX && TARGET_ALTIVEC_ABI
- && VSX_VECTOR_MODE (mode))
+ && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
regno = ALTIVEC_ARG_RETURN;
else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
&& (mode == DFmode || mode == DCmode
&& TARGET_HARD_FLOAT && TARGET_FPRS
&& ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
regno = FP_ARG_RETURN;
- else if (ALTIVEC_VECTOR_MODE (mode)
+ /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
+ return register is used in both cases, and we won't see V2DImode/V2DFmode
+ for pure altivec, combine the two cases. */
+ else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
&& TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
regno = ALTIVEC_ARG_RETURN;
- else if (VSX_VECTOR_MODE (mode)
- && TARGET_VSX && TARGET_ALTIVEC_ABI)
- regno = ALTIVEC_ARG_RETURN;
else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
/* When setting up caller-save slots (MODE == VOIDmode) ensure we allocate
enough space to account for vectors in FP regs. */
-#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
- (TARGET_VSX \
- && ((MODE) == VOIDmode || VSX_VECTOR_MODE (MODE) \
- || ALTIVEC_VECTOR_MODE (MODE)) \
+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+ (TARGET_VSX \
+ && ((MODE) == VOIDmode || ALTIVEC_OR_VSX_VECTOR_MODE (MODE)) \
&& FP_REGNO_P (REGNO) \
? V2DFmode \
: choose_hard_reg_mode ((REGNO), (NREGS), false))
((MODE) == V4SFmode \
|| (MODE) == V2DFmode) \
-#define VSX_SCALAR_MODE(MODE) \
- ((MODE) == DFmode)
-
-#define VSX_MODE(MODE) \
- (VSX_VECTOR_MODE (MODE) \
- || VSX_SCALAR_MODE (MODE))
-
-#define VSX_MOVE_MODE(MODE) \
- (VSX_VECTOR_MODE (MODE) \
- || VSX_SCALAR_MODE (MODE) \
- || ALTIVEC_VECTOR_MODE (MODE) \
- || (MODE) == TImode)
-
#define ALTIVEC_VECTOR_MODE(MODE) \
((MODE) == V16QImode \
|| (MODE) == V8HImode \
|| (MODE) == V4SFmode \
|| (MODE) == V4SImode)
+#define ALTIVEC_OR_VSX_VECTOR_MODE(MODE) \
+ (ALTIVEC_VECTOR_MODE (MODE) || VSX_VECTOR_MODE (MODE) \
+ || (MODE) == V2DImode)
+
#define SPE_VECTOR_MODE(MODE) \
((MODE) == V4HImode \
|| (MODE) == V2SFmode \
? ALTIVEC_VECTOR_MODE (MODE2) \
: ALTIVEC_VECTOR_MODE (MODE2) \
? ALTIVEC_VECTOR_MODE (MODE1) \
- : VSX_VECTOR_MODE (MODE1) \
- ? VSX_VECTOR_MODE (MODE2) \
- : VSX_VECTOR_MODE (MODE2) \
- ? VSX_VECTOR_MODE (MODE1) \
+ : ALTIVEC_OR_VSX_VECTOR_MODE (MODE1) \
+ ? ALTIVEC_OR_VSX_VECTOR_MODE (MODE2) \
+ : ALTIVEC_OR_VSX_VECTOR_MODE (MODE2) \
+ ? ALTIVEC_OR_VSX_VECTOR_MODE (MODE1) \
: 1)
/* Post-reload, we can't use any new AltiVec registers, as we already
--- /dev/null
+/* { dg-do compile { target { powerpc*-*-* } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-O2 -mcpu=power7 -mabi=altivec" } */
+/* { dg-final { scan-assembler-times "lxvd2x" 1 } } */
+/* { dg-final { scan-assembler-times "stxvd2x" 1 } } */
+/* { dg-final { scan-assembler-not "ld" } } */
+/* { dg-final { scan-assembler-not "lwz" } } */
+/* { dg-final { scan-assembler-not "stw" } } */
+/* { dg-final { scan-assembler-not "addi" } } */
+
+typedef vector long long v2di_type;
+
+v2di_type
+return_v2di (v2di_type *ptr)
+{
+ return *ptr; /* should generate lxvd2x 34,0,3. */
+}
+
+void
+pass_v2di (v2di_type arg, v2di_type *ptr)
+{
+ *ptr = arg; /* should generate stxvd2x 34,0,{3,5}. */
+}
+