+2017-07-06 Carl Love <cel@us.ibm.com>
+
+ * config/rs6000/rs6000-c.c: Add support for built-in functions
+ vector signed int vec_subc (vector signed int, vector signed int);
+ vector signed __int128 vec_subc (vector signed __int128,
+ vector signed __int128);
+ vector unsigned __int128 vec_subc (vector unsigned __int128,
+ vector unsigned __int128);
+ vector signed int vec_sube (vector signed int, vector signed int,
+ vector signed int);
+ vector unsigned int vec_sube (vector unsigned int,
+ vector unsigned int,
+ vector unsigned int);
+ vector signed __int128 vec_sube (vector signed __int128,
+ vector signed __int128,
+ vector signed__int128);
+ vector unsigned __int128 vec_sube (vector unsigned __int128,
+ vector unsigned __int128,
+ vector unsigned __int128);
+ vector signed int vec_subec (vector signed int, vector signed int,
+ vector signed int);
+ vector unsigned int vec_subec (vector unsigned int,
+ vector unsigned int,
+ vector unsigned int);
+ vector signed __int128 vec_subec (vector signed __int128,
+ vector signed __int128,
+ vector signed__int128);
+ vector unsigned __int128 vec_subec (vector unsigned __int128,
+ vector unsigned __int128,
+ vector unsigned __int128);
+ * config/rs6000/rs6000.c (ALTIVEC_BUILTIN_VEC_SUBE,
+ ALTIVEC_BUILTIN_VEC_SUBEC): Add ef_builtins.
+ * config/rs6000/rs6000-builtin.def (SUBE, SUBEC): Add
+ BU_ALTIVEC_OVERLOAD_X definitions.
+ * config/rs6000/altivec.h (vec_sube, vec_subec): Add builtin defines.
+ * doc/extend.texi: Update the built-in documentation file for the new
+ built-in functions.
+
2017-07-06 David Malcolm <dmalcolm@redhat.com>
PR c++/79300
(linemap_client_expand_location_to_spelling_point): Add "aspect"
param, and pass it to expand_location_1.
+>>>>>>> .r250022
2017-07-06 Sebastian Peryt <sebastian.peryt@intel.com>
* config/i386/avx512fintrin.h (_mm_mask_getexp_round_ss,
#define vec_unsignedo __builtin_vec_vunsignedo
#define vec_vsubfp __builtin_vec_vsubfp
#define vec_subc __builtin_vec_subc
+#define vec_sube __builtin_vec_sube
+#define vec_subec __builtin_vec_subec
#define vec_vsubsws __builtin_vec_vsubsws
#define vec_vsubshs __builtin_vec_vsubshs
#define vec_vsubsbs __builtin_vec_vsubsbs
BU_ALTIVEC_OVERLOAD_X (STVLXL, "stvlxl")
BU_ALTIVEC_OVERLOAD_X (STVRX, "stvrx")
BU_ALTIVEC_OVERLOAD_X (STVRXL, "stvrxl")
+BU_ALTIVEC_OVERLOAD_X (SUBE, "sube")
+BU_ALTIVEC_OVERLOAD_X (SUBEC, "subec")
BU_ALTIVEC_OVERLOAD_X (VCFSX, "vcfsx")
BU_ALTIVEC_OVERLOAD_X (VCFUX, "vcfux")
BU_ALTIVEC_OVERLOAD_X (VSPLTB, "vspltb")
RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
{ ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+
+ { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
{ ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW,
RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBC, P8V_BUILTIN_VSUBCUQ,
+ RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI,
+ RS6000_BTI_unsigned_V1TI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBC, P8V_BUILTIN_VSUBCUQ,
+ RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 },
+
{ ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
{ ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
/* else, fall through and process the Power9 alternative below */
}
- if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDE
+ || fcode == ALTIVEC_BUILTIN_VEC_SUBE)
{
/* vec_adde needs to be special cased because there is no instruction
for the {un}signed int version. */
if (nargs != 3)
{
- error ("vec_adde only accepts 3 arguments");
+ const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDE ?
+ "vec_adde": "vec_sube";
+ error ("%s only accepts 3 arguments", name);
return error_mark_node;
}
{
/* For {un}signed ints,
vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
- vec_and (carryv, 0x1)). */
+ vec_and (carryv, 1)).
+ vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
+ vec_and (carryv, 1)). */
case SImode:
{
+ tree add_sub_builtin;
+
vec<tree, va_gc> *params = make_tree_vector ();
vec_safe_push (params, arg0);
vec_safe_push (params, arg1);
- tree add_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
- tree call = altivec_resolve_overloaded_builtin (loc, add_builtin,
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
+ add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
+ else
+ add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
+
+ tree call = altivec_resolve_overloaded_builtin (loc,
+ add_sub_builtin,
params);
tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
tree ones_vector = build_vector_from_val (arg0_type, const1);
params = make_tree_vector ();
vec_safe_push (params, call);
vec_safe_push (params, and_expr);
- return altivec_resolve_overloaded_builtin (loc, add_builtin,
+ return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
params);
}
/* For {un}signed __int128s use the vaddeuqm instruction
directly. */
case TImode:
{
- tree adde_bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
- return altivec_resolve_overloaded_builtin (loc, adde_bii,
- arglist);
+ tree bii;
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
+ bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
+
+ else
+ bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBEUQM];
+
+ return altivec_resolve_overloaded_builtin (loc, bii, arglist);
}
/* Types other than {un}signed int and {un}signed __int128
}
}
- if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC
+ || fcode == ALTIVEC_BUILTIN_VEC_SUBEC)
{
- /* vec_addec needs to be special cased because there is no instruction
- for the {un}signed int version. */
+ /* vec_addec and vec_subec needs to be special cased because there is
+ no instruction for the {un}signed int version. */
if (nargs != 3)
{
- error ("vec_addec only accepts 3 arguments");
+ const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDEC ?
+ "vec_addec": "vec_subec";
+ error ("%s only accepts 3 arguments", name);
return error_mark_node;
}
/* Use save_expr to ensure that operands used more than once
that may have side effects (like calls) are only evaluated
once. */
+ tree as_builtin;
+ tree as_c_builtin;
+
arg0 = save_expr (arg0);
arg1 = save_expr (arg1);
vec<tree, va_gc> *params = make_tree_vector ();
vec_safe_push (params, arg0);
vec_safe_push (params, arg1);
- tree addc_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC];
- tree call1 = altivec_resolve_overloaded_builtin (loc, addc_builtin,
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
+ as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC];
+ else
+ as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUBC];
+
+ tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
params);
params = make_tree_vector ();
vec_safe_push (params, arg0);
vec_safe_push (params, arg1);
- tree add_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
- tree call2 = altivec_resolve_overloaded_builtin (loc, add_builtin,
+
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
+ as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
+ else
+ as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
+
+ tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
params);
tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
tree ones_vector = build_vector_from_val (arg0_type, const1);
params = make_tree_vector ();
vec_safe_push (params, call2);
vec_safe_push (params, and_expr);
- call2 = altivec_resolve_overloaded_builtin (loc, addc_builtin,
+ call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
params);
params = make_tree_vector ();
vec_safe_push (params, call1);
return altivec_resolve_overloaded_builtin (loc, or_builtin,
params);
}
- /* For {un}signed __int128s use the vaddecuq instruction. */
+ /* For {un}signed __int128s use the vaddecuq/vsubbecuq
+ instructions. */
case TImode:
{
- tree VADDECUQ_bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ];
- return altivec_resolve_overloaded_builtin (loc, VADDECUQ_bii,
- arglist);
+ tree bii;
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
+ bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ];
+
+ else
+ bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBECUQ];
+
+ return altivec_resolve_overloaded_builtin (loc, bii, arglist);
}
/* Types other than {un}signed int and {un}signed __int128
are errors. */
ALTIVEC_BUILTIN_VEC_CMPNE);
def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
ALTIVEC_BUILTIN_VEC_MUL);
+ def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
+ ALTIVEC_BUILTIN_VEC_SUBE);
+ def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
+ ALTIVEC_BUILTIN_VEC_SUBEC);
/* Cell builtins. */
def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
vector unsigned char vec_vsububm (vector unsigned char,
vector unsigned char);
+vector signed int vec_subc (vector signed int, vector signed int);
vector unsigned int vec_subc (vector unsigned int, vector unsigned int);
+vector signed __int128 vec_subc (vector signed __int128,
+ vector signed __int128);
+vector unsigned __int128 vec_subc (vector unsigned __int128,
+ vector unsigned __int128);
+
+vector signed int vec_sube (vector signed int, vector signed int,
+ vector signed int);
+vector unsigned int vec_sube (vector unsigned int, vector unsigned int,
+ vector unsigned int);
+vector signed __int128 vec_sube (vector signed __int128,
+ vector signed __int128,
+ vector signed __int128);
+vector unsigned __int128 vec_sube (vector unsigned __int128,
+ vector unsigned __int128,
+ vector unsigned __int128);
+
+vector signed int vec_subec (vector signed int, vector signed int,
+ vector signed int);
+vector unsigned int vec_subec (vector unsigned int, vector unsigned int,
+ vector unsigned int);
+vector signed __int128 vec_subec (vector signed __int128,
+ vector signed __int128,
+ vector signed __int128);
+vector unsigned __int128 vec_subec (vector unsigned __int128,
+ vector unsigned __int128,
+ vector unsigned __int128);
vector unsigned char vec_subs (vector bool char, vector unsigned char);
vector unsigned char vec_subs (vector unsigned char, vector bool char);
+
+2017-07-06 Carl Love <cel@us.ibm.com>
+
+ * gcc.target/powerpc/p8vector-builtin-8.c (foo): Add test cases for
+ the new vec_subc, vec_sube, vec_subec built-ins. Add the missing test
+ cases for vec_addc, adde and addec builtins.
+
2017-07-06 David Malcolm <dmalcolm@redhat.com>
PR c++/79300
* gcc.dg/spellcheck-fields-2.c (test_macro): Update expected
underlining within macro expansion.
+>>>>>>> .r250022
2017-07-06 Sebastian Peryt <sebastian.peryt@intel.com>
* gcc.target/i386/avx512f-vgetexpsd-1.c (_mm_mask_getexp_sd,
vector bool char vbca, vbcb;
vector unsigned short vusa, vusb;
vector bool short vbsa, vbsb;
-vector unsigned int vuia, vuib;
+vector signed int vsia, vsib, vsic;
+vector unsigned int vuia, vuib, vuic;
vector bool int vbia, vbib;
vector signed long long vsla, vslb;
vector unsigned long long vula, vulb, vulc;
vector bool char *vbcr,
vector unsigned short *vusr,
vector bool short *vbsr,
+ vector signed int *vsir,
vector unsigned int *vuir,
vector bool int *vbir,
vector unsigned long long *vulr,
vector unsigned __int128 *vuxr,
vector double *vdr)
{
+ *vsir++ = vec_addc (vsia, vsib);
+ *vuir++ = vec_addc (vuia, vuib);
*vsxr++ = vec_addc (vsxa, vsxb);
*vuxr++ = vec_addc (vuxa, vuxb);
+ *vsir++ = vec_adde (vsia, vsib, vsic);
+ *vuir++ = vec_adde (vuia, vuib, vuic);
*vsxr++ = vec_adde (vsxa, vsxb, vsxc);
*vuxr++ = vec_adde (vuxa, vuxb, vuxc);
+ *vsir++ = vec_addec (vsia, vsib, vsic);
+ *vuir++ = vec_addec (vuia, vuib, vuic);
*vsxr++ = vec_addec (vsxa, vsxb, vsxc);
*vuxr++ = vec_addec (vuxa, vuxb, vuxc);
*vucr++ = vec_bperm (vuca, vucb);
*vuxr++ = vec_pmsum_be (vula, vulb);
*vuir++ = vec_shasigma_be (vuia, 0, 1);
*vulr++ = vec_shasigma_be (vula, 0, 1);
+ *vsir++ = vec_subc (vsia, vsib);
+ *vuir++ = vec_subc (vuia, vuib);
+ *vsxr++ = vec_subc (vsxa, vsxb);
+ *vuxr++ = vec_subc (vuxa, vuxb);
+ *vsir++ = vec_sube (vsia, vsib, vsic);
+ *vuir++ = vec_sube (vuia, vuib, vuic);
+ *vsxr++ = vec_sube (vsxa, vsxb, vsxc);
+ *vuxr++ = vec_sube (vuxa, vuxb, vuxc);
+ *vsir++ = vec_subec (vsia, vsib, vsic);
+ *vuir++ = vec_subec (vuia, vuib, vuic);
+ *vsxr++ = vec_subec (vsxa, vsxb, vsxc);
+ *vuxr++ = vec_subec (vuxa, vuxb, vuxc);
}
/* { dg-final { scan-assembler-times "vaddcuq" 2 } } */
/* { dg-final { scan-assembler-times "vaddeuqm" 2 } } */
/* { dg-final { scan-assembler-times "vaddecuq" 2 } } */
+/* { dg-final { scan-assembler-times "vaddcuw" 6 } } */
+/* { dg-final { scan-assembler-times "vadduwm" 4 } } */
+/* { dg-final { scan-assembler-times "vsubcuq" 2 } } */
+/* { dg-final { scan-assembler-times "vsubeuqm" 2 } } */
+/* { dg-final { scan-assembler-times "vsubecuq" 2 } } */
+/* { dg-final { scan-assembler-times "vsubcuw" 4 } } */
+/* { dg-final { scan-assembler-times "vsubuwm" 4 } } */
/* { dg-final { scan-assembler-times "vbpermq" 2 } } */
/* { dg-final { scan-assembler-times "xxleqv" 4 } } */
/* { dg-final { scan-assembler-times "vgbbd" 1 } } */