+2014-08-28 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.h (vec_xl): New #define.
+ (vec_xst): Likewise.
+ * config/rs6000/rs6000-builtin.def (XXSPLTD_V2DF): New built-in.
+ (XXSPLTD_V2DI): Likewise.
+ (DIV_V2DI): Likewise.
+ (UDIV_V2DI): Likewise.
+ (MUL_V2DI): Likewise.
+ * config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
+ entries for VSX_BUILTIN_XVRDPI, VSX_BUILTIN_DIV_V2DI,
+ VSX_BUILTIN_UDIV_V2DI, VSX_BUILTIN_MUL_V2DI,
+ VSX_BUILTIN_XXSPLTD_V2DF, and VSX_BUILTIN_XXSPLTD_V2DI).
+ * config/rs6000/vsx.md (UNSPEC_VSX_XXSPLTD): New unspec.
+ (UNSPEC_VSX_DIVSD): Likewise.
+ (UNSPEC_VSX_DIVUD): Likewise.
+ (UNSPEC_VSX_MULSD): Likewise.
+ (vsx_mul_v2di): New insn-and-split.
+ (vsx_div_v2di): Likewise.
+ (vsx_udiv_v2di): Likewise.
+ (vsx_xxspltd_<mode>): New insn.
+
2014-08-28 David Malcolm <dmalcolm@redhat.com>
* rtl.h (RTX_PREV): Added checked casts to uses of PREV_INSN and
#define vec_sqrt __builtin_vec_sqrt
#define vec_vsx_ld __builtin_vec_vsx_ld
#define vec_vsx_st __builtin_vec_vsx_st
+#define vec_xl __builtin_vec_vsx_ld
+#define vec_xst __builtin_vec_vsx_st
/* Note, xxsldi and xxpermdi were added as __builtin_vsx_<xxx> functions
instead of __builtin_vec_<xxx> */
BU_VSX_2 (VEC_MERGEL_V2DI, "mergel_2di", CONST, vsx_mergel_v2di)
BU_VSX_2 (VEC_MERGEH_V2DF, "mergeh_2df", CONST, vsx_mergeh_v2df)
BU_VSX_2 (VEC_MERGEH_V2DI, "mergeh_2di", CONST, vsx_mergeh_v2di)
+BU_VSX_2 (XXSPLTD_V2DF, "xxspltd_2df", CONST, vsx_xxspltd_v2df)
+BU_VSX_2 (XXSPLTD_V2DI, "xxspltd_2di", CONST, vsx_xxspltd_v2di)
+BU_VSX_2 (DIV_V2DI, "div_2di", CONST, vsx_div_v2di)
+BU_VSX_2 (UDIV_V2DI, "udiv_2di", CONST, vsx_udiv_v2di)
+BU_VSX_2 (MUL_V2DI, "mul_2di", CONST, vsx_mul_v2di)
/* VSX abs builtin functions. */
BU_VSX_A (XVABSDP, "xvabsdp", CONST, absv2df2)
RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 },
{ ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ROUND, VSX_BUILTIN_XVRDPI,
+ RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 },
{ ALTIVEC_BUILTIN_VEC_RECIP, ALTIVEC_BUILTIN_VRECIPFP,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_RECIP, VSX_BUILTIN_RECIP_V2DF,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVDP,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
+ { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_DIV_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_UDIV_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DF,
RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 },
{ ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_XVMULDP,
RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
+ { VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_MUL_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 },
+ { VSX_BUILTIN_VEC_MUL, VSX_BUILTIN_MUL_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 },
{ ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB,
RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
{ ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DF,
+ RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI,
+ RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
{ ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
UNSPEC_VSX_ROUND_IC
UNSPEC_VSX_SLDWI
UNSPEC_VSX_XXSPLTW
+ UNSPEC_VSX_XXSPLTD
+ UNSPEC_VSX_DIVSD
+ UNSPEC_VSX_DIVUD
+ UNSPEC_VSX_MULSD
])
;; VSX moves
[(set_attr "type" "<VStype_simple>")
(set_attr "fp_type" "<VSfptype_mul>")])
+; Emulate vector with scalar for vec_mul in V2DImode
+(define_insn_and_split "vsx_mul_v2di"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (match_operand:V2DI 2 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_MULSD))]
+ "VECTOR_MEM_VSX_P (V2DImode)"
+ "#"
+ "VECTOR_MEM_VSX_P (V2DImode) && !reload_completed && !reload_in_progress"
+ [(const_int 0)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = gen_reg_rtx (DImode);
+ rtx op4 = gen_reg_rtx (DImode);
+ rtx op5 = gen_reg_rtx (DImode);
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (0)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (0)));
+ emit_insn (gen_muldi3 (op5, op3, op4));
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (1)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (1)));
+ emit_insn (gen_muldi3 (op3, op3, op4));
+ emit_insn (gen_vsx_concat_v2di (op0, op5, op3));
+}"
+ [(set_attr "type" "mul")])
+
(define_insn "*vsx_div<mode>3"
[(set (match_operand:VSX_F 0 "vsx_register_operand" "=<VSr>,?<VSa>")
(div:VSX_F (match_operand:VSX_F 1 "vsx_register_operand" "<VSr>,<VSa>")
[(set_attr "type" "<VStype_div>")
(set_attr "fp_type" "<VSfptype_div>")])
+; Emulate vector with scalar for vec_div in V2DImode
+(define_insn_and_split "vsx_div_v2di"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (match_operand:V2DI 2 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_DIVSD))]
+ "VECTOR_MEM_VSX_P (V2DImode)"
+ "#"
+ "VECTOR_MEM_VSX_P (V2DImode) && !reload_completed && !reload_in_progress"
+ [(const_int 0)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = gen_reg_rtx (DImode);
+ rtx op4 = gen_reg_rtx (DImode);
+ rtx op5 = gen_reg_rtx (DImode);
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (0)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (0)));
+ emit_insn (gen_divdi3 (op5, op3, op4));
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (1)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (1)));
+ emit_insn (gen_divdi3 (op3, op3, op4));
+ emit_insn (gen_vsx_concat_v2di (op0, op5, op3));
+}"
+ [(set_attr "type" "div")])
+
+(define_insn_and_split "vsx_udiv_v2di"
+ [(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
+ (unspec:V2DI [(match_operand:V2DI 1 "vsx_register_operand" "wa")
+ (match_operand:V2DI 2 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_DIVUD))]
+ "VECTOR_MEM_VSX_P (V2DImode)"
+ "#"
+ "VECTOR_MEM_VSX_P (V2DImode) && !reload_completed && !reload_in_progress"
+ [(const_int 0)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = gen_reg_rtx (DImode);
+ rtx op4 = gen_reg_rtx (DImode);
+ rtx op5 = gen_reg_rtx (DImode);
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (0)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (0)));
+ emit_insn (gen_udivdi3 (op5, op3, op4));
+ emit_insn (gen_vsx_extract_v2di (op3, op1, GEN_INT (1)));
+ emit_insn (gen_vsx_extract_v2di (op4, op2, GEN_INT (1)));
+ emit_insn (gen_udivdi3 (op3, op3, op4));
+ emit_insn (gen_vsx_concat_v2di (op0, op5, op3));
+}"
+ [(set_attr "type" "div")])
+
;; *tdiv* instruction returning the FG flag
(define_expand "vsx_tdiv<mode>3_fg"
[(set (match_dup 3)
"xxspltw %x0,%x1,%2"
[(set_attr "type" "vecperm")])
+;; V2DF/V2DI splat for use by vec_splat builtin
+(define_insn "vsx_xxspltd_<mode>"
+ [(set (match_operand:VSX_D 0 "vsx_register_operand" "=wa")
+ (unspec:VSX_D [(match_operand:VSX_D 1 "vsx_register_operand" "wa")
+ (match_operand:QI 2 "u5bit_cint_operand" "i")]
+ UNSPEC_VSX_XXSPLTD))]
+ "VECTOR_MEM_VSX_P (<MODE>mode)"
+{
+ if ((VECTOR_ELT_ORDER_BIG && INTVAL (operands[2]) == 0)
+ || (!VECTOR_ELT_ORDER_BIG && INTVAL (operands[2]) == 1))
+ return "xxpermdi %x0,%x1,%x1,0";
+ else
+ return "xxpermdi %x0,%x1,%x1,3";
+}
+ [(set_attr "type" "vecperm")])
+
;; V4SF/V4SI interleave
(define_insn "vsx_xxmrghw_<mode>"
[(set (match_operand:VSX_W 0 "vsx_register_operand" "=wf,?<VSa>")
+2014-08-28 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/builtins-1.c: Add tests for vec_xl, vec_xst,
+ vec_round, vec_splat, vec_div, and vec_mul.
+ * gcc.target/powerpc/builtins-2.c: New test.
+
2014-08-28 Richard Biener <rguenther@suse.de>
PR tree-optimization/62283
#include <altivec.h>
+vector double y = { 2.0, 4.0 };
+vector double z;
+
int main ()
{
vector float fa = {1.0, 2.0, 3.0, -4.0};
vector signed char scb = vec_cntlz (sca);
vector unsigned char cb = vec_cntlz (ca);
+ vector double dd = vec_xl (0, &y);
+ vec_xst (dd, 0, &z);
+
+ vector double de = vec_round (dd);
+
+ vector double df = vec_splat (de, 0);
+ vector double dg = vec_splat (de, 1);
+ vector long long l3 = vec_splat (l2, 0);
+ vector long long l4 = vec_splat (l2, 1);
+ vector unsigned long long u3 = vec_splat (u2, 0);
+ vector unsigned long long u4 = vec_splat (u2, 1);
+ vector bool long long l5 = vec_splat (ld, 0);
+ vector bool long long l6 = vec_splat (ld, 1);
+
+ vector long long l7 = vec_div (l3, l4);
+ vector unsigned long long u5 = vec_div (u3, u4);
+
+ vector long long l8 = vec_mul (l3, l4);
+ vector unsigned long long u6 = vec_mul (u3, u4);
+
return 0;
}
--- /dev/null
+/* { dg-do run { target { powerpc64le-*-* } } } */
+/* { dg-options "-mcpu=power8 " } */
+
+#include <altivec.h>
+
+void abort (void);
+
+int main ()
+{
+ vector long long sa = {27L, -14L};
+ vector long long sb = {-9L, -2L};
+
+ vector unsigned long long ua = {27L, 14L};
+ vector unsigned long long ub = {9L, 2L};
+
+ vector long long sc = vec_div (sa, sb);
+ vector unsigned long long uc = vec_div (ua, ub);
+
+ if (sc[0] != -3L || sc[1] != 7L || uc[0] != 3L || uc[1] != 7L)
+ abort ();
+
+ vector long long sd = vec_mul (sa, sb);
+ vector unsigned long long ud = vec_mul (ua, ub);
+
+ if (sd[0] != -243L || sd[1] != 28L || ud[0] != 243L || ud[1] != 28L)
+ abort ();
+
+ vector long long se = vec_splat (sa, 0);
+ vector long long sf = vec_splat (sa, 1);
+ vector unsigned long long ue = vec_splat (ua, 0);
+ vector unsigned long long uf = vec_splat (ua, 1);
+
+ if (se[0] != 27L || se[1] != 27L || sf[0] != -14L || sf[1] != -14L
+ || ue[0] != 27L || ue[1] != 27L || uf[0] != 14L || uf[1] != 14L)
+ abort ();
+
+ return 0;
+}