+2017-06-29 Carl Love <cel@us.ibm.com>
+
+ * config/rs6000/rs6000-c.c: Add support for built-in functions
+ vector signed int vec_signed (vector float);
+ vector signed long long vec_signed (vector double);
+ vector signed int vec_signed2 (vector double, vector double);
+ vector signed int vec_signede (vector double);
+ vector signed int vec_signedo (vector double);
+ * config/rs6000/rs6000.c (rs6000_generate_vsigned2_code): Add
+ instruction generator.
+ * config/rs6000/vsx.md (UNSPEC_VSX_XVCVSPSXWS, UNSPEC_VSX_XVCVSPSXDS,
+ UNSPEC_VSX_VSIGNED2): Add UNSPECS.
+ (vsx_xvcvspsxws, vsx_xvcvdpuxds_scale, vsx_xvcvspuxws, vsigned2_v2df):
+ Add define_insn.
+ (vsignedo_v2df, vsignede_v2df, vunsigned2_v2df, vunsignedo_v2df,
+ vunsignede_v2df): Add define_expands.
+ * config/rs6000/rs6000-builtin.def (VEC_SIGNED, VEC_UNSIGNED,
+ VEC_SIGNED2, VEC_UNSIGNED2, VEC_SIGNEDE, VEC_UNSIGNEDE, VEC_SIGNEDO,
+ VEC_UNSIGNEDO): Add definitions.
+ * config/vsx.md (UNSPEC_VSX_XVCVSPSXWS, UNSPEC_VSX_XVCVSPSXDS,
+ UNSPEC_VSX_VSIGNED2): Add UNSPECs.
+ (vsx_xvcvspsxws, vsx_xvcvspuxws): Add define_insn.
+ (vsigned2_v2df, vsigendo_v2df, vsignede_v2df,
+ vunsigned2_v2df, vunsignedo_v2df, vunsignede_v2df): Add define_expands.
+ * config/rs6000/altivec.h (vec_signed, vec_signed2,
+ vec_signede and vec_signedo, vec_unsigned, vec_unsigned2,
+ vec_unsignede, vec_unsignedo): Add builtin defines.
+ * config/rs6000-protos.h (rs6000_generate_vsigned2_code): Add extern
+ declaration.
+ * doc/extend.texi: Update the built-in documentation file for the
+ new built-in functions.
+
2017-06-29 Richard Biener <rguenther@suse.de>
* tree-vect-loop.c (vect_analyze_scalar_cycles_1): Do not add
#define vec_rlnm(a,b,c) (__builtin_vec_rlnm((a),((b)<<8)|(c)))
#define vec_rsqrt __builtin_vec_rsqrt
#define vec_rsqrte __builtin_vec_rsqrte
+#define vec_signed __builtin_vec_vsigned
+#define vec_signed2 __builtin_vec_vsigned2
+#define vec_signede __builtin_vec_vsignede
+#define vec_signedo __builtin_vec_vsignedo
+#define vec_unsigned __builtin_vec_vunsigned
+#define vec_unsigned2 __builtin_vec_vunsigned2
+#define vec_unsignede __builtin_vec_vunsignede
+#define vec_unsignedo __builtin_vec_vunsignedo
#define vec_vsubfp __builtin_vec_vsubfp
#define vec_subc __builtin_vec_subc
#define vec_vsubsws __builtin_vec_vsubsws
BU_VSX_2 (FLOAT2_V2DI, "float2_v2di", CONST, float2_v2di)
BU_VSX_2 (UNS_FLOAT2_V2DI, "uns_float2_v2di", CONST, uns_float2_v2di)
+BU_VSX_2 (VEC_VSIGNED2_V2DF, "vsigned2_v2df", CONST, vsigned2_v2df)
+BU_VSX_2 (VEC_VUNSIGNED2_V2DF, "vunsigned2_v2df", CONST, vunsigned2_v2df)
+
/* VSX abs builtin functions. */
BU_VSX_A (XVABSDP, "xvabsdp", CONST, absv2df2)
BU_VSX_A (XVNABSDP, "xvnabsdp", CONST, vsx_nabsv2df2)
BU_VSX_1 (DOUBLEL_V4SF, "doublel_v4sf", CONST, doublelv4sf2)
BU_VSX_1 (UNS_DOUBLEL_V4SI, "uns_doublel_v4si", CONST, unsdoublelv4si2)
+BU_VSX_1 (VEC_VSIGNED_V4SF, "vsigned_v4sf", CONST, vsx_xvcvspsxws)
+BU_VSX_1 (VEC_VSIGNED_V2DF, "vsigned_v2df", CONST, vsx_xvcvdpsxds)
+BU_VSX_1 (VEC_VSIGNEDE_V2DF, "vsignede_v2df", CONST, vsignede_v2df)
+BU_VSX_1 (VEC_VSIGNEDO_V2DF, "vsignedo_v2df", CONST, vsignedo_v2df)
+
+BU_VSX_1 (VEC_VUNSIGNED_V4SF, "vunsigned_v4sf", CONST, vsx_xvcvspsxws)
+BU_VSX_1 (VEC_VUNSIGNED_V2DF, "vunsigned_v2df", CONST, vsx_xvcvdpsxds)
+BU_VSX_1 (VEC_VUNSIGNEDE_V2DF, "vunsignede_v2df", CONST, vunsignede_v2df)
+BU_VSX_1 (VEC_VUNSIGNEDO_V2DF, "vunsignedo_v2df", CONST, vunsignedo_v2df)
+
/* VSX predicate functions. */
BU_VSX_P (XVCMPEQSP_P, "xvcmpeqsp_p", CONST, vector_eq_v4sf_p)
BU_VSX_P (XVCMPGESP_P, "xvcmpgesp_p", CONST, vector_ge_v4sf_p)
BU_VSX_OVERLOAD_2 (XXSPLTW, "xxspltw")
BU_VSX_OVERLOAD_2 (FLOAT2, "float2")
BU_VSX_OVERLOAD_2 (UNS_FLOAT2, "uns_float2")
+BU_VSX_OVERLOAD_2 (VSIGNED2, "vsigned2")
+BU_VSX_OVERLOAD_2 (VUNSIGNED2, "vunsigned2")
/* 1 argument VSX overloaded builtin functions. */
BU_VSX_OVERLOAD_1 (DOUBLE, "double")
BU_VSX_OVERLOAD_1 (FLOATE, "floate")
BU_VSX_OVERLOAD_1 (FLOATO, "floato")
+BU_VSX_OVERLOAD_1 (VSIGNED, "vsigned")
+BU_VSX_OVERLOAD_1 (VSIGNEDE, "vsignede")
+BU_VSX_OVERLOAD_1 (VSIGNEDO, "vsignedo")
+
+BU_VSX_OVERLOAD_1 (VUNSIGNED, "vunsigned")
+BU_VSX_OVERLOAD_1 (VUNSIGNEDE, "vunsignede")
+BU_VSX_OVERLOAD_1 (VUNSIGNEDO, "vunsignedo")
+
/* VSX builtins that are handled as special cases. */
BU_VSX_OVERLOAD_X (LD, "ld")
BU_VSX_OVERLOAD_X (ST, "st")
{ ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V16QI,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 },
+ { VSX_BUILTIN_VEC_VSIGNED, VSX_BUILTIN_VEC_VSIGNED_V4SF,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, 0, 0 },
+ { VSX_BUILTIN_VEC_VSIGNED, VSX_BUILTIN_VEC_VSIGNED_V2DF,
+ RS6000_BTI_V2DI, RS6000_BTI_V2DF, 0, 0 },
+ { VSX_BUILTIN_VEC_VSIGNEDE, VSX_BUILTIN_VEC_VSIGNEDE_V2DF,
+ RS6000_BTI_V4SI, RS6000_BTI_V2DF, 0, 0 },
+ { VSX_BUILTIN_VEC_VSIGNEDO, VSX_BUILTIN_VEC_VSIGNEDO_V2DF,
+ RS6000_BTI_V4SI, RS6000_BTI_V2DF, 0, 0 },
+ { VSX_BUILTIN_VEC_VSIGNED2, VSX_BUILTIN_VEC_VSIGNED2_V2DF,
+ RS6000_BTI_V4SI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 },
+
+ { VSX_BUILTIN_VEC_VUNSIGNED, VSX_BUILTIN_VEC_VUNSIGNED_V4SF,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, 0, 0 },
+ { VSX_BUILTIN_VEC_VUNSIGNED, VSX_BUILTIN_VEC_VUNSIGNED_V2DF,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, 0, 0 },
+ { VSX_BUILTIN_VEC_VUNSIGNEDE, VSX_BUILTIN_VEC_VUNSIGNEDE_V2DF,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DF, 0, 0 },
+ { VSX_BUILTIN_VEC_VUNSIGNEDO, VSX_BUILTIN_VEC_VUNSIGNEDO_V2DF,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DF, 0, 0 },
+ { VSX_BUILTIN_VEC_VUNSIGNED2, VSX_BUILTIN_VEC_VUNSIGNED2_V2DF,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DF,
+ RS6000_BTI_V2DF, 0 },
+
/* Crypto builtins. */
{ CRYPTO_BUILTIN_VPERMXOR, CRYPTO_BUILTIN_VPERMXOR_V16QI,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI,
extern void rs6000_expand_interleave (rtx, rtx, rtx, bool);
extern void rs6000_scale_v2df (rtx, rtx, int);
extern void rs6000_generate_float2_code (bool, rtx, rtx, rtx);
+extern void rs6000_generate_vsigned2_code (bool, rtx, rtx, rtx);
extern int expand_block_clear (rtx[]);
extern int expand_block_move (rtx[]);
extern bool expand_block_compare (rtx[]);
emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
}
+void
+rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
+ rtx src2)
+{
+ rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
+
+ rtx_tmp0 = gen_reg_rtx (V2DFmode);
+ rtx_tmp1 = gen_reg_rtx (V2DFmode);
+
+ emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
+ emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
+
+ rtx_tmp2 = gen_reg_rtx (V4SImode);
+ rtx_tmp3 = gen_reg_rtx (V4SImode);
+
+ if (signed_convert)
+ {
+ emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
+ emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
+ }
+ else
+ {
+ emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
+ emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
+ }
+
+ emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
+}
+
/* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
static bool
UNSPEC_VSX_XVCVDPSXDS
UNSPEC_VSX_XVCVDPUXDS
UNSPEC_VSX_SIGN_EXTEND
+ UNSPEC_VSX_XVCVSPSXWS
+ UNSPEC_VSX_XVCVSPSXDS
UNSPEC_VSX_VSLO
UNSPEC_VSX_EXTRACT
UNSPEC_VSX_SXEXPDP
UNSPEC_VSX_VIEXP
UNSPEC_VSX_VTSTDC
UNSPEC_VSX_VEC_INIT
+ UNSPEC_VSX_VSIGNED2
UNSPEC_LXVL
UNSPEC_STXVL
UNSPEC_VCLZLSBB
DONE;
})
+;; convert vector of 64-bit floating point numbers to vector of
+;; 64-bit signed integer
(define_insn "vsx_xvcvdpsxds"
[(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
(unspec:V2DI [(match_operand:V2DF 1 "vsx_register_operand" "wa")]
"xvcvdpsxds %x0,%x1"
[(set_attr "type" "vecdouble")])
+;; convert vector of 32-bit floating point numbers to vector of
+;; 32-bit signed integer
+(define_insn "vsx_xvcvspsxws"
+ [(set (match_operand:V4SI 0 "vsx_register_operand" "=wa")
+ (unspec:V4SI [(match_operand:V4SF 1 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_XVCVSPSXWS))]
+ "VECTOR_UNIT_VSX_P (V4SFmode)"
+ "xvcvspsxws %x0,%x1"
+ [(set_attr "type" "vecfloat")])
+
+;; convert vector of 64-bit floating point numbers to vector of
+;; 64-bit unsigned integer
(define_expand "vsx_xvcvdpuxds_scale"
[(match_operand:V2DI 0 "vsx_register_operand" "")
(match_operand:V2DF 1 "vsx_register_operand" "")
DONE;
})
+;; convert vector of 32-bit floating point numbers to vector of
+;; 32-bit unsigned integer
+(define_insn "vsx_xvcvspuxws"
+ [(set (match_operand:V4SI 0 "vsx_register_operand" "=wa")
+ (unspec:V4SI [(match_operand:V4SF 1 "vsx_register_operand" "wa")]
+ UNSPEC_VSX_XVCVSPSXWS))]
+ "VECTOR_UNIT_VSX_P (V4SFmode)"
+ "xvcvspuxws %x0,%x1"
+ [(set_attr "type" "vecfloat")])
+
(define_insn "vsx_xvcvdpuxds"
[(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
(unspec:V2DI [(match_operand:V2DF 1 "vsx_register_operand" "wa")]
DONE;
})
+;; Generate vsigned2
+;; convert two double float vectors to a vector of single precision ints
+(define_expand "vsigned2_v2df"
+ [(match_operand:V4SI 0 "register_operand" "=wa")
+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "wa")
+ (match_operand:V2DF 2 "register_operand" "wa")]
+ UNSPEC_VSX_VSIGNED2)]
+ "TARGET_VSX"
+{
+ rtx rtx_src1, rtx_src2, rtx_dst;
+ bool signed_convert=true;
+
+ rtx_dst = operands[0];
+ rtx_src1 = operands[1];
+ rtx_src2 = operands[2];
+
+ rs6000_generate_vsigned2_code (signed_convert, rtx_dst, rtx_src1, rtx_src2);
+ DONE;
+})
+
+;; Generate vsignedo_v2df
+;; signed double float to int convert odd word
+(define_expand "vsignedo_v2df"
+ [(set (match_operand:V4SI 0 "register_operand" "=wa")
+ (match_operand:V2DF 1 "register_operand" "wa"))]
+ "TARGET_VSX"
+{
+ if (VECTOR_ELT_ORDER_BIG)
+ {
+ rtx rtx_tmp;
+ rtx rtx_val = GEN_INT (12);
+ rtx_tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp, operands[1]));
+
+ /* Big endian word numbering for words in operand is 0 1 2 3.
+ take (operand[1] operand[1]) and shift left one word
+ 0 1 2 3 0 1 2 3 => 1 2 3 0
+ Words 1 and 3 are now are now where they need to be for result. */
+
+ emit_insn (gen_altivec_vsldoi_v4si (operands[0], rtx_tmp,
+ rtx_tmp, rtx_val));
+ }
+ else
+ /* Little endian word numbering for operand is 3 2 1 0.
+ Result words 3 and 1 are where they need to be. */
+ emit_insn (gen_vsx_xvcvdpsxws (operands[0], operands[1]));
+
+ DONE;
+}
+ [(set_attr "type" "veccomplex")])
+
+;; Generate vsignede_v2df
+;; signed double float to int even word
+(define_expand "vsignede_v2df"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V2DF 1 "register_operand" "v"))]
+ "TARGET_VSX"
+{
+ if (VECTOR_ELT_ORDER_BIG)
+ /* Big endian word numbering for words in operand is 0 1
+ Result words 0 is where they need to be. */
+ emit_insn (gen_vsx_xvcvdpsxws (operands[0], operands[1]));
+
+ else
+ {
+ rtx rtx_tmp;
+ rtx rtx_val = GEN_INT (12);
+ rtx_tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp, operands[1]));
+
+ /* Little endian word numbering for operand is 3 2 1 0.
+ take (operand[1] operand[1]) and shift left three words
+ 0 1 2 3 0 1 2 3 => 3 0 1 2
+ Words 0 and 2 are now where they need to be for the result. */
+ emit_insn (gen_altivec_vsldoi_v4si (operands[0], rtx_tmp,
+ rtx_tmp, rtx_val));
+ }
+ DONE;
+}
+ [(set_attr "type" "veccomplex")])
+
+;; Generate unsigned2
+;; convert two double float vectors to a vector of single precision
+;; unsigned ints
+(define_expand "vunsigned2_v2df"
+[(match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V2DF 1 "register_operand" "v")
+ (match_operand:V2DF 2 "register_operand" "v")]
+ UNSPEC_VSX_VSIGNED2)]
+ "TARGET_VSX"
+{
+ rtx rtx_src1, rtx_src2, rtx_dst;
+ bool signed_convert=false;
+
+ rtx_dst = operands[0];
+ rtx_src1 = operands[1];
+ rtx_src2 = operands[2];
+
+ rs6000_generate_vsigned2_code (signed_convert, rtx_dst, rtx_src1, rtx_src2);
+ DONE;
+})
+
+;; Generate vunsignedo_v2df
+;; unsigned double float to int convert odd word
+(define_expand "vunsignedo_v2df"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V2DF 1 "register_operand" "v"))]
+ "TARGET_VSX"
+{
+ if (VECTOR_ELT_ORDER_BIG)
+ {
+ rtx rtx_tmp;
+ rtx rtx_val = GEN_INT (12);
+ rtx_tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp, operands[1]));
+
+ /* Big endian word numbering for words in operand is 0 1 2 3.
+ take (operand[1] operand[1]) and shift left one word
+ 0 1 2 3 0 1 2 3 => 1 2 3 0
+ Words 1 and 3 are now are now where they need to be for result. */
+
+ emit_insn (gen_altivec_vsldoi_v4si (operands[0], rtx_tmp,
+ rtx_tmp, rtx_val));
+ }
+ else
+ /* Little endian word numbering for operand is 3 2 1 0.
+ Result words 3 and 1 are where they need to be. */
+ emit_insn (gen_vsx_xvcvdpuxws (operands[0], operands[1]));
+
+ DONE;
+}
+ [(set_attr "type" "veccomplex")])
+
+;; Generate vunsignede_v2df
+;; unsigned double float to int even word
+(define_expand "vunsignede_v2df"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V2DF 1 "register_operand" "v"))]
+ "TARGET_VSX"
+{
+ if (VECTOR_ELT_ORDER_BIG)
+ /* Big endian word numbering for words in operand is 0 1
+ Result words 0 is where they need to be. */
+ emit_insn (gen_vsx_xvcvdpuxws (operands[0], operands[1]));
+
+ else
+ {
+ rtx rtx_tmp;
+ rtx rtx_val = GEN_INT (12);
+ rtx_tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp, operands[1]));
+
+ /* Little endian word numbering for operand is 3 2 1 0.
+ take (operand[1] operand[1]) and shift left three words
+ 0 1 2 3 0 1 2 3 => 3 0 1 2
+ Words 0 and 2 are now where they need to be for the result. */
+ emit_insn (gen_altivec_vsldoi_v4si (operands[0], rtx_tmp,
+ rtx_tmp, rtx_val));
+ }
+ DONE;
+}
+ [(set_attr "type" "veccomplex")])
+
;; Only optimize (float (fix x)) -> frz if we are in fast-math mode, since
;; since the xvrdpiz instruction does not truncate the value if the floating
;; point value is < LONG_MIN or > LONG_MAX.
vector bool char,
vector unsigned char);
+vector signed long long vec_signed (vector double);
+vector signed int vec_signed (vector float);
+
+vector signed int vec_signede (vector double);
+vector signed int vec_signedo (vector double);
+vector signed int vec_signed2 (vector double, vector double);
+
vector signed char vec_sl (vector signed char,
vector unsigned char);
vector unsigned char vec_sl (vector unsigned char,
vector float vec_trunc (vector float);
+vector signed long long vec_unsigned (vector double);
+vector signed int vec_unsigned (vector float);
+
+vector signed int vec_unsignede (vector double);
+vector signed int vec_unsignedo (vector double);
+vector signed int vec_unsigned2 (vector double, vector double);
+
vector signed short vec_unpackh (vector signed char);
vector bool short vec_unpackh (vector bool char);
vector signed int vec_unpackh (vector signed short);
+2017-06-29 Carl Love <cel@us.ibm.com>
+
+ * gcc.target/powerpc/builtins-3-runnable.c (test_int_result,
+ test_unsigned_int_result, test_ll_int_result,
+ test_ll_unsigned_int_result): Add result checking functions, add
+ debug support.
+ (main): Add builtin function tests.
+
2017-06-29 Carl Love <cel@us.ibm.com>
* gcc.target/powerpc/builtins-3-vec_reve-runnable.c (dg-options,
#include <altivec.h> // vector
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
#define ALL 1
#define EVEN 2
#define ODD 3
void abort (void);
-void test_result_sp(int check, vector float vec_result, vector float vec_expected)
+void test_int_result(int check, vector int vec_result, vector int vec_expected)
{
int i;
- for(i = 0; i<4; i++) {
- switch (check) {
- case ALL:
- break;
- case EVEN:
- if (i%2 == 0)
+ for (i = 0; i < 4; i++) {
+ switch (check) {
+ case ALL:
break;
- else
- continue;
- case ODD:
- if (i%2 != 0)
+ case EVEN:
+ if (i%2 == 0)
+ break;
+ else
+ continue;
+ case ODD:
+ if (i%2 != 0)
+ break;
+ else
+ continue;
+ }
+
+ if (vec_result[i] != vec_expected[i]) {
+#ifdef DEBUG
+ printf("Test_int_result: ");
+ printf("vec_result[%d] (%d) != vec_expected[%d] (%d)\n",
+ i, vec_result[i], i, vec_expected[i]);
+#else
+ abort();
+#endif
+ }
+ }
+}
+
+void test_unsigned_int_result(int check, vector unsigned int vec_result,
+ vector unsigned int vec_expected)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ switch (check) {
+ case ALL:
break;
- else
- continue;
+ case EVEN:
+ if (i%2 == 0)
+ break;
+ else
+ continue;
+ case ODD:
+ if (i%2 != 0)
+ break;
+ else
+ continue;
+ }
+
+ if (vec_result[i] != vec_expected[i]) {
+#ifdef DEBUG
+ printf("Test_unsigned int_result: ");
+ printf("vec_result[%d] (%d) != vec_expected[%d] (%d)\n",
+ i, vec_result[i], i, vec_expected[i]);
+#else
+ abort();
+#endif
+ }
+
}
+}
- if (vec_result[i] != vec_expected[i])
- abort();
+void test_ll_int_result(vector long long int vec_result,
+ vector long long int vec_expected)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ if (vec_result[i] != vec_expected[i]) {
+#ifdef DEBUG
+ printf("Test_ll_int_result: ");
+ printf("vec_result[%d] (%lld) != vec_expected[%d] (%lld)\n",
+ i, vec_result[i], i, vec_expected[i]);
+#else
+ abort();
+#endif
+ }
+}
+
+void test_ll_unsigned_int_result(vector long long unsigned int vec_result,
+ vector long long unsigned int vec_expected)
+{
+ int i;
+
+ for (i = 0; i < 2; i++)
+ if (vec_result[i] != vec_expected[i]) {
+#ifdef DEBUG
+ printf("Test_ll_unsigned_int_result: ");
+ printf("vec_result[%d] (%lld) != vec_expected[%d] (%lld)\n",
+ i, vec_result[i], i, vec_expected[i]);
+#else
+ abort();
+#endif
+ }
+}
+
+void test_result_sp(int check, vector float vec_result,
+ vector float vec_expected)
+{
+ int i;
+ for(i = 0; i<4; i++) {
+
+ switch (check) {
+ case ALL:
+ break;
+ case EVEN:
+ if (i%2 == 0)
+ break;
+ else
+ continue;
+ case ODD:
+ if (i%2 != 0)
+ break;
+ else
+ continue;
+ }
+
+ if (vec_result[i] != vec_expected[i]) {
+#ifdef DEBUG
+ printf("Test_result_sp: ");
+ printf("vec_result[%d] (%lld) != vec_expected[%d] (%lld)\n",
+ i, vec_result[i], i, vec_expected[i]);
+#else
+ abort();
+#endif
+ }
}
}
void test_result_dp(vector double vec_result, vector double vec_expected)
{
- if (vec_result[0] != vec_expected[0])
+ if (vec_result[0] != vec_expected[0]) {
+#ifdef DEBUG
+ printf("Test_result_dp: ");
+ printf("vec_result[0] (%lld) != vec_expected[0] (%lld)\n",
+ vec_result[0], vec_expected[0]);
+#else
abort();
+#endif
+ }
- if (vec_result[1] != vec_expected[1])
+ if (vec_result[1] != vec_expected[1]) {
+#ifdef DEBUG
+ printf("Test_result_dp: ");
+ printf("vec_result[1] (%lld) != vec_expected[1] (%lld)\n",
+ vec_result[1], vec_expected[1]);
+#else
abort();
+#endif
+ }
}
int main()
{
int i;
- vector unsigned int vec_unint;
- vector signed int vec_int;
+ vector unsigned int vec_unint, vec_uns_int_expected, vec_uns_int_result;
+ vector signed int vec_int, vec_int_expected, vec_int_result;
vector long long int vec_ll_int0, vec_ll_int1;
+ vector long long int vec_ll_int_expected, vec_ll_int_result;
vector long long unsigned int vec_ll_uns_int0, vec_ll_uns_int1;
+ vector long long unsigned int vec_ll_uns_int_expected, vec_ll_uns_int_result;
vector float vec_flt, vec_flt_result, vec_flt_expected;
vector double vec_dble0, vec_dble1, vec_dble_result, vec_dble_expected;
vec_flt_expected = (vector float){0.00, 34.00, 0.00, 97.00};
vec_flt_result = vec_floato (vec_dble0);
test_result_sp(ODD, vec_flt_result, vec_flt_expected);
+
+ /* Convert single precision float to int */
+ vec_flt = (vector float){-14.30, 34.00, 22.00, 97.00};
+ vec_int_expected = (vector signed int){-14, 34, 22, 97};
+ vec_int_result = vec_signed (vec_flt);
+ test_int_result (ALL, vec_int_result, vec_int_expected);
+
+ /* Convert double precision float to long long int */
+ vec_dble0 = (vector double){-124.930, 81234.49};
+ vec_ll_int_expected = (vector long long signed int){-124, 81234};
+ vec_ll_int_result = vec_signed (vec_dble0);
+ test_ll_int_result (vec_ll_int_result, vec_ll_int_expected);
+
+ /* Convert two double precision vector float to vector int */
+ vec_dble0 = (vector double){-124.930, 81234.49};
+ vec_dble1 = (vector double){-24.370, 8354.99};
+ vec_int_expected = (vector signed int){-124, 81234, -24, 8354};
+ vec_int_result = vec_signed2 (vec_dble0, vec_dble1);
+ test_int_result (ALL, vec_int_result, vec_int_expected);
+
+ /* Convert double precision vector float to vector int, even words */
+ vec_dble0 = (vector double){-124.930, 81234.49};
+ vec_int_expected = (vector signed int){-124, 0, 81234, 0};
+ vec_int_result = vec_signede (vec_dble0);
+ test_int_result (EVEN, vec_int_result, vec_int_expected);
+
+ /* Convert double precision vector float to vector int, odd words */
+ vec_dble0 = (vector double){-124.930, 81234.49};
+ vec_int_expected = (vector signed int){0, -124, 0, 81234};
+ vec_int_result = vec_signedo (vec_dble0);
+ test_int_result (ODD, vec_int_result, vec_int_expected);
+
+ /* Convert double precision float to long long unsigned int */
+ vec_dble0 = (vector double){124.930, 8134.49};
+ vec_ll_uns_int_expected = (vector long long unsigned int){124, 8134};
+ vec_ll_uns_int_result = vec_unsigned (vec_dble0);
+ test_ll_unsigned_int_result (vec_ll_uns_int_result,
+ vec_ll_uns_int_expected);
+
+ /* Convert two double precision vector float to vector unsigned int */
+ vec_dble0 = (vector double){124.930, 8134.49};
+ vec_dble1 = (vector double){24.370, 834.99};
+ vec_uns_int_expected = (vector unsigned int){124, 8134, 24, 834};
+ vec_uns_int_result = vec_unsigned2 (vec_dble0, vec_dble1);
+ test_unsigned_int_result (ALL, vec_uns_int_result,
+ vec_uns_int_expected);
+
+ /* Convert double precision vector float to vector unsigned int,
+ even words */
+ vec_dble0 = (vector double){3124.930, 8234.49};
+ vec_uns_int_expected = (vector unsigned int){3124, 0, 8234, 0};
+ vec_uns_int_result = vec_unsignede (vec_dble0);
+ test_unsigned_int_result (EVEN, vec_uns_int_result,
+ vec_uns_int_expected);
+
+ /* Convert double precision vector float to vector unsigned int,
+ odd words */
+ vec_dble0 = (vector double){1924.930, 81234.49};
+ vec_uns_int_expected = (vector unsigned int){0, 1924, 0, 81234};
+ vec_uns_int_result = vec_unsignedo (vec_dble0);
+ test_unsigned_int_result (ODD, vec_uns_int_result,
+ vec_uns_int_expected);
}
+