+2016-11-04 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/rs6000.c (gimple-ssa.h): New #include.
+ (TARGET_GIMPLE_FOLD_BUILTIN): Define as
+ rs6000_gimple_fold_builtin.
+ (rs6000_gimple_fold_builtin): New function. Add handling for
+ early expansion of vector addition builtins.
+
2016-11-04 Eric Botcazou <ebotcazou@adacore.com>
* expr.h (copy_blkmode_from_reg): Delete.
#include "sched-int.h"
#include "gimplify.h"
#include "gimple-iterator.h"
+#include "gimple-ssa.h"
#include "gimple-walk.h"
#include "intl.h"
#include "params.h"
#undef TARGET_FOLD_BUILTIN
#define TARGET_FOLD_BUILTIN rs6000_fold_builtin
+#undef TARGET_GIMPLE_FOLD_BUILTIN
+#define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
#endif
}
+/* Fold a machine-dependent built-in in GIMPLE. (For folding into
+ a constant, use rs6000_fold_builtin.) */
+
+bool
+rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+ tree fndecl = gimple_call_fndecl (stmt);
+ gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
+ enum rs6000_builtins fn_code
+ = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1, lhs;
+
+ switch (fn_code)
+ {
+ /* Flavors of vec_add. We deliberately don't expand
+ P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
+ TImode, resulting in much poorer code generation. */
+ case ALTIVEC_BUILTIN_VADDUBM:
+ case ALTIVEC_BUILTIN_VADDUHM:
+ case ALTIVEC_BUILTIN_VADDUWM:
+ case P8V_BUILTIN_VADDUDM:
+ case ALTIVEC_BUILTIN_VADDFP:
+ case VSX_BUILTIN_XVADDDP:
+ {
+ arg0 = gimple_call_arg (stmt, 0);
+ arg1 = gimple_call_arg (stmt, 1);
+ lhs = gimple_call_lhs (stmt);
+ gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
+ gimple_set_location (g, gimple_location (stmt));
+ gsi_replace (gsi, g, true);
+ return true;
+ }
+ default:
+ break;
+ }
+
+ return false;
+}
+
/* Expand an expression EXP that calls a built-in function,
with result going to TARGET if that's convenient
(and in mode MODE if that's convenient).
+2016-11-04 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/fold-vec-add-1.c: New.
+ * gcc.target/powerpc/fold-vec-add-2.c: New.
+ * gcc.target/powerpc/fold-vec-add-3.c: New.
+ * gcc.target/powerpc/fold-vec-add-4.c: New.
+ * gcc.target/powerpc/fold-vec-add-5.c: New.
+ * gcc.target/powerpc/fold-vec-add-6.c: New.
+ * gcc.target/powerpc/fold-vec-add-7.c: New.
+
2016-11-04 Toma Tabacu <toma.tabacu@imgtec.com>
* gcc.target/mips/mips.exp (mips-dg-options): Downgrade to R5
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with char
+ inputs produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+
+#include <altivec.h>
+
+vector signed char
+test1 (vector bool char x, vector signed char y)
+{
+ return vec_add (x, y);
+}
+
+vector signed char
+test2 (vector signed char x, vector bool char y)
+{
+ return vec_add (x, y);
+}
+
+vector signed char
+test3 (vector signed char x, vector signed char y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned char
+test4 (vector bool char x, vector unsigned char y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned char
+test5 (vector unsigned char x, vector bool char y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned char
+test6 (vector unsigned char x, vector unsigned char y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "vaddubm" 6 } } */
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with short
+ inputs produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+
+#include <altivec.h>
+
+vector signed short
+test1 (vector bool short x, vector signed short y)
+{
+ return vec_add (x, y);
+}
+
+vector signed short
+test2 (vector signed short x, vector bool short y)
+{
+ return vec_add (x, y);
+}
+
+vector signed short
+test3 (vector signed short x, vector signed short y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned short
+test4 (vector bool short x, vector unsigned short y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned short
+test5 (vector unsigned short x, vector bool short y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned short
+test6 (vector unsigned short x, vector unsigned short y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "vadduhm" 6 } } */
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with int
+ inputs produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+
+#include <altivec.h>
+
+vector signed int
+test1 (vector bool int x, vector signed int y)
+{
+ return vec_add (x, y);
+}
+
+vector signed int
+test2 (vector signed int x, vector bool int y)
+{
+ return vec_add (x, y);
+}
+
+vector signed int
+test3 (vector signed int x, vector signed int y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned int
+test4 (vector bool int x, vector unsigned int y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned int
+test5 (vector unsigned int x, vector bool int y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned int
+test6 (vector unsigned int x, vector unsigned int y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "vadduwm" 6 } } */
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with long long
+ inputs produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+
+#include <altivec.h>
+
+vector signed long long
+test1 (vector bool long long x, vector signed long long y)
+{
+ return vec_add (x, y);
+}
+
+vector signed long long
+test2 (vector signed long long x, vector bool long long y)
+{
+ return vec_add (x, y);
+}
+
+vector signed long long
+test3 (vector signed long long x, vector signed long long y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned long long
+test4 (vector bool long long x, vector unsigned long long y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned long long
+test5 (vector unsigned long long x, vector bool long long y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned long long
+test6 (vector unsigned long long x, vector unsigned long long y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "vaddudm" 6 } } */
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with float
+ inputs produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-additional-options "-mno-vsx" } */
+
+#include <altivec.h>
+
+vector float
+test1 (vector float x, vector float y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "vaddfp" 1 } } */
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with float and
+ double inputs for VSX produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+
+#include <altivec.h>
+
+vector float
+test1 (vector float x, vector float y)
+{
+ return vec_add (x, y);
+}
+
+vector double
+test2 (vector double x, vector double y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "xvaddsp" 1 } } */
+/* { dg-final { scan-assembler-times "xvadddp" 1 } } */
--- /dev/null
+/* Verify that overloaded built-ins for vec_add with __int128
+ inputs produce the right results. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+
+#include "altivec.h"
+
+vector signed __int128
+test1 (vector signed __int128 x, vector signed __int128 y)
+{
+ return vec_add (x, y);
+}
+
+vector unsigned __int128
+test2 (vector unsigned __int128 x, vector unsigned __int128 y)
+{
+ return vec_add (x, y);
+}
+
+/* { dg-final { scan-assembler-times "vadduqm" 2 } } */