~RS6000_BTI_unsigned_V16QI, 0 },
{ VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LD_ELEMREV_V16QI,
RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V2DI,
+ RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V2DI,
+ RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long_long, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_XL_BE_V2DF,
+ RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
{ ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
return target;
}
+static rtx
+altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
+{
+ rtx pat, addr;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ machine_mode tmode = insn_data[icode].operand[0].mode;
+ machine_mode mode0 = Pmode;
+ machine_mode mode1 = Pmode;
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (op0 == const0_rtx)
+ addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
+ else
+ {
+ op0 = copy_to_mode_reg (mode0, op0);
+ addr = gen_rtx_MEM (blk ? BLKmode : tmode,
+ gen_rtx_PLUS (Pmode, op1, op0));
+ }
+
+ pat = GEN_FCN (icode) (target, addr);
+ if (!pat)
+ return 0;
+
+ emit_insn (pat);
+ /* Reverse element order of elements if in LE mode */
+ if (!VECTOR_ELT_ORDER_BIG)
+ {
+ rtx sel = swap_selector_for_mode (tmode);
+ rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
+ UNSPEC_VPERM);
+ emit_insn (gen_rtx_SET (target, vperm));
+ }
+ return target;
+}
+
static rtx
paired_expand_stv_builtin (enum insn_code icode, tree exp)
{
/* Fall through. */
}
+ /* XL_BE We initialized them to always load in big endian order. */
+ switch (fcode)
+ {
+ case VSX_BUILTIN_XL_BE_V2DI:
+ {
+ enum insn_code code = CODE_FOR_vsx_load_v2di;
+ return altivec_expand_xl_be_builtin (code, exp, target, false);
+ }
+ break;
+ case VSX_BUILTIN_XL_BE_V4SI:
+ {
+ enum insn_code code = CODE_FOR_vsx_load_v4si;
+ return altivec_expand_xl_be_builtin (code, exp, target, false);
+ }
+ break;
+ case VSX_BUILTIN_XL_BE_V8HI:
+ {
+ enum insn_code code = CODE_FOR_vsx_load_v8hi;
+ return altivec_expand_xl_be_builtin (code, exp, target, false);
+ }
+ break;
+ case VSX_BUILTIN_XL_BE_V16QI:
+ {
+ enum insn_code code = CODE_FOR_vsx_load_v16qi;
+ return altivec_expand_xl_be_builtin (code, exp, target, false);
+ }
+ break;
+ case VSX_BUILTIN_XL_BE_V2DF:
+ {
+ enum insn_code code = CODE_FOR_vsx_load_v2df;
+ return altivec_expand_xl_be_builtin (code, exp, target, false);
+ }
+ break;
+ case VSX_BUILTIN_XL_BE_V4SF:
+ {
+ enum insn_code code = CODE_FOR_vsx_load_v4sf;
+ return altivec_expand_xl_be_builtin (code, exp, target, false);
+ }
+ break;
+ default:
+ break;
+ /* Fall through. */
+ }
+
*expandedp = false;
return NULL_RTX;
}
def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
VSX_BUILTIN_ST_ELEMREV_V4SI);
+ def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
+ VSX_BUILTIN_XL_BE_V8HI);
+ def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
+ VSX_BUILTIN_XL_BE_V4SI);
+ def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
+ VSX_BUILTIN_XL_BE_V2DI);
+ def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
+ VSX_BUILTIN_XL_BE_V4SF);
+ def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
+ VSX_BUILTIN_XL_BE_V2DF);
+ def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
+ VSX_BUILTIN_XL_BE_V16QI);
+
if (TARGET_P9_VECTOR)
{
def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
VSX_BUILTIN_VEC_ST);
def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
VSX_BUILTIN_VEC_XL);
+ def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
+ VSX_BUILTIN_VEC_XL_BE);
def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
VSX_BUILTIN_VEC_XST);
--- /dev/null
+/* { dg-do run } */
+/* { dg-require-effective-target powerpc_vsx_hw } */
+/* { dg-options "-maltivec -mvsx" } */
+
+#include <inttypes.h>
+#include <altivec.h> // vector
+#include <stdio.h>
+
+void abort (void);
+
+int main() {
+ int i;
+ signed char data_c[100];
+ unsigned char data_uc[100];
+
+ signed short int data_ssi[100];
+ unsigned short int data_usi[100];
+
+ signed int data_si[100];
+ unsigned int data_ui[100];
+
+ signed long long data_sll[100];
+ unsigned long long data_ull[100];
+
+ float data_f[100];
+ double data_d[100];
+
+ signed long long disp;
+
+ vector signed char vec_c_expected1, vec_c_expected2, vec_c_result1, vec_c_result2;
+ vector unsigned char vec_uc_expected1, vec_uc_expected2,
+ vec_uc_result1, vec_uc_result2;
+ vector signed short int vec_ssi_expected1, vec_ssi_expected2,
+ vec_ssi_result1, vec_ssi_result2;
+ vector unsigned short int vec_usi_expected1, vec_usi_expected2,
+ vec_usi_result1, vec_usi_result2;
+ vector signed int vec_si_expected1, vec_si_expected2, vec_si_result1,
+ vec_si_result2;
+ vector unsigned int vec_ui_expected1, vec_ui_expected2, vec_ui_result1,
+ vec_ui_result2;
+ vector signed long long vec_sll_expected1, vec_sll_expected2,
+ vec_sll_result1, vec_sll_result2;
+ vector unsigned long long vec_ull_expected1, vec_ull_expected2,
+ vec_ull_result1, vec_ull_result2;
+ vector float vec_f_expected1, vec_f_expected2, vec_f_result1, vec_f_result2;
+ vector double vec_d_expected1, vec_d_expected2, vec_d_result1, vec_d_result2;
+ char buf[20];
+ signed long long zero = (signed long long) 0;
+
+ for (i = 0; i < 100; i++)
+ {
+ data_c[i] = i;
+ data_uc[i] = i+1;
+ data_ssi[i] = i+10;
+ data_usi[i] = i+11;
+ data_si[i] = i+100;
+ data_ui[i] = i+101;
+ data_sll[i] = i+1000;
+ data_ull[i] = i+1001;
+ data_f[i] = i+100000.0;
+ data_d[i] = i+1000000.0;
+ }
+
+ disp = 0;
+#ifdef __BIG_ENDIAN__
+ printf("BIG ENDIAN\n");
+ vec_c_expected1 = (vector signed char){0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+#else
+ printf("LITTLE ENDIAN\n");
+ vec_c_expected1 = (vector signed char){15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+#endif
+ vec_c_result1 = vec_xl_be (0, data_c);
+
+ disp = 1;
+
+#ifdef __BIG_ENDIAN__
+ vec_c_expected2 = (vector signed char){1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+#else
+ vec_c_expected2 = (vector signed char){16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1};
+#endif
+
+ vec_c_result2 = vec_xl_be (disp, data_c);
+
+#ifdef __BIG_ENDIAN__
+ vec_uc_expected1 = (vector unsigned char){1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+#else
+ vec_uc_expected1 = (vector unsigned char){16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1};
+#endif
+
+ vec_uc_result1 = vec_xl_be (0, data_uc);
+
+#ifdef __BIG_ENDIAN__
+ vec_uc_expected2 = (vector unsigned char){2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17};
+#else
+ vec_uc_expected2 = (vector unsigned char){17, 16, 15, 14, 13, 12, 11, 10,
+ 9, 8, 7, 6, 5, 4, 3, 2};
+#endif
+
+ vec_uc_result2 = vec_xl_be (disp, data_uc);
+
+ for (i = 0; i < 16; i++)
+ {
+ if (vec_c_result1[i] != vec_c_expected1[i])
+ abort ();
+
+ if (vec_c_result2[i] != vec_c_expected2[i])
+ abort ();
+
+ if (vec_uc_result1[i] != vec_uc_expected1[i])
+ abort ();
+
+ if (vec_uc_result2[i] != vec_uc_expected2[i])
+ abort ();
+ }
+
+ vec_ssi_result1 = vec_xl_be (zero, data_ssi);
+
+#ifdef __BIG_ENDIAN__
+ vec_ssi_expected1 = (vector signed short){10, 11, 12, 13, 14, 15, 16, 17};
+#else
+ vec_ssi_expected1 = (vector signed short){17, 16, 15, 14, 13, 12, 11, 10};
+#endif
+
+ disp = 2;
+ vec_ssi_result2 = vec_xl_be (disp, data_ssi);
+
+#ifdef __BIG_ENDIAN__
+ vec_ssi_expected2 = (vector signed short){11, 12, 13, 14, 15, 16, 17, 18};
+#else
+ vec_ssi_expected2 = (vector signed short){18, 17, 16, 15, 14, 13, 12, 11};
+#endif
+
+ vec_usi_result1 = vec_xl_be (zero, data_usi);
+
+#ifdef __BIG_ENDIAN__
+ vec_usi_expected1 = (vector unsigned short){11, 12, 13, 14, 15, 16, 17, 18};
+#else
+ vec_usi_expected1 = (vector unsigned short){18, 17, 16, 15, 14, 13, 12, 11};
+#endif
+
+ disp = 2;
+ vec_usi_result2 = vec_xl_be (disp, data_usi);
+
+#ifdef __BIG_ENDIAN__
+ vec_usi_expected2 = (vector unsigned short){12, 13, 14, 15, 16, 17, 18, 19};
+#else
+ vec_usi_expected2 = (vector unsigned short){19, 18, 17, 16, 15, 14, 13, 12};
+#endif
+
+ for (i = 0; i < 8; i++)
+ {
+ if (vec_ssi_result1[i] != vec_ssi_expected1[i])
+ abort ();
+
+ if (vec_ssi_result2[i] != vec_ssi_expected2[i])
+ abort ();
+
+ if (vec_usi_result1[i] != vec_usi_expected1[i])
+ abort ();
+
+ if (vec_usi_result2[i] != vec_usi_expected2[i])
+ abort ();
+ }
+
+ vec_si_result1 = vec_xl_be (zero, data_si);
+
+#ifdef __BIG_ENDIAN__
+ vec_si_expected1 = (vector int){100, 101, 102, 103};
+#else
+ vec_si_expected1 = (vector int){103, 102, 101, 100};
+#endif
+
+ disp = 4;
+ vec_si_result2 = vec_xl_be (disp, data_si);
+
+#ifdef __BIG_ENDIAN__
+ vec_si_expected2 = (vector int){101, 102, 103, 104};
+#else
+ vec_si_expected2 = (vector int){104, 103, 102, 101};
+#endif
+
+ vec_ui_result1 = vec_xl_be (zero, data_ui);
+
+#ifdef __BIG_ENDIAN__
+ vec_ui_expected1 = (vector unsigned int){101, 102, 103, 104};
+#else
+ vec_ui_expected1 = (vector unsigned int){104, 103, 102, 101};
+#endif
+
+ disp = 4;
+ vec_ui_result2 = vec_xl_be (disp, data_ui);
+
+#ifdef __BIG_ENDIAN__
+ vec_ui_expected2 = (vector unsigned int){102, 103, 104, 105};
+#else
+ vec_ui_expected2 = (vector unsigned int){105, 104, 103, 102};
+#endif
+
+
+ for (i = 0; i < 4; i++)
+ {
+ if (vec_si_result1[i] != vec_si_expected1[i])
+ abort ();
+
+ if (vec_si_result2[i] != vec_si_expected2[i])
+ abort ();
+
+ if (vec_ui_result1[i] != vec_ui_expected1[i])
+ abort ();
+
+ if (vec_ui_result2[i] != vec_ui_expected2[i])
+ abort ();
+ }
+
+ vec_sll_result1 = vec_xl_be (zero, data_sll);
+
+#ifdef __BIG_ENDIAN__
+ vec_sll_expected1 = (vector signed long long){1000, 1001};
+#else
+ vec_sll_expected1 = (vector signed long long){1001, 1000};
+#endif
+
+ disp = 8;
+ vec_sll_result2 = vec_xl_be (disp, data_sll);
+
+#ifdef __BIG_ENDIAN__
+ vec_sll_expected2 = (vector signed long long){1001, 1002};
+#else
+ vec_sll_expected2 = (vector signed long long){1002, 1001};
+#endif
+
+ vec_ull_result1 = vec_xl_be (zero, data_ull);
+
+#ifdef __BIG_ENDIAN__
+ vec_ull_expected1 = (vector unsigned long long){1001, 1002};
+#else
+ vec_ull_expected1 = (vector unsigned long long){1002, 1001};
+#endif
+
+ disp = 8;
+ vec_ull_result2 = vec_xl_be (disp, data_ull);
+
+#ifdef __BIG_ENDIAN__
+ vec_ull_expected2 = (vector unsigned long long){1002, 1003};
+#else
+ vec_ull_expected2 = (vector unsigned long long){1003, 1002};
+#endif
+
+
+ for (i = 0; i < 2; i++)
+ {
+ if (vec_sll_result1[i] != vec_sll_expected1[i])
+ abort ();
+
+ if (vec_sll_result2[i] != vec_sll_expected2[i])
+ abort ();
+
+ if (vec_ull_result1[i] != vec_ull_expected1[i])
+ abort ();
+
+ if (vec_ull_result2[i] != vec_ull_expected2[i])
+ abort ();
+ }
+
+ vec_f_result1 = vec_xl_be (zero, data_f);
+
+#ifdef __BIG_ENDIAN__
+ vec_f_expected1 = (vector float){100000.0, 100001.0, 100002.0, 100003.0};
+#else
+ vec_f_expected1 = (vector float){100003.0, 100002.0, 100001.0, 100000.0};
+#endif
+
+ disp = 4;
+ vec_f_result2 = vec_xl_be (disp, data_f);
+
+#ifdef __BIG_ENDIAN__
+ vec_f_expected2 = (vector float){100001.0, 100002.0, 100003.0, 100004.0};
+#else
+ vec_f_expected2 = (vector float){100004.0, 100003.0, 100002.0, 100001.0};
+#endif
+
+ for (i = 0; i < 4; i++)
+ {
+ if (vec_f_result1[i] != vec_f_expected1[i])
+ abort ();
+ if (vec_f_result2[i] != vec_f_expected2[i])
+ abort ();
+ }
+
+ vec_d_result1 = vec_xl_be (zero, data_d);
+
+#ifdef __BIG_ENDIAN__
+ vec_d_expected1 = (vector double){1000000.0, 1000001.0};
+#else
+ vec_d_expected1 = (vector double){1000001.0, 1000000.0};
+#endif
+
+ disp = 8;
+ vec_d_result2 = vec_xl_be (disp, data_d);
+
+#ifdef __BIG_ENDIAN__
+ vec_d_expected2 = (vector double){1000001.0, 1000002.0};
+#else
+ vec_d_expected2 = (vector double){1000002.0, 1000001.0};
+#endif
+
+ for (i = 0; i < 2; i++)
+ {
+ if (vec_d_result1[i] != vec_d_expected1[i])
+ abort ();
+ if (vec_d_result2[i] != vec_d_expected2[i])
+ abort ();
+ }
+}