From d5d9f7834ab809841c4ccc90bca74808b4bcaf8d Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Thu, 2 Jul 2020 11:12:51 +0200 Subject: [PATCH] tree-optimization/96022 - fix ICE with vectorized shift This fixes lane extraction for internal def vectorized shifts with an effective scalar shift operand by always using lane zero of the first vector stmt. It also fixes a SLP build issue noticed on the testcase where we end up building unary vector ops with the only operand built form scalars which isn't profitable by itself. The exception is for stores. 2020-07-02 Richard Biener PR tree-optimization/96022 * tree-vect-stmts.c (vectorizable_shift): Only use the first vector stmt when extracting the scalar shift amount. * tree-vect-slp.c (vect_build_slp_tree_2): Also build unary nodes with all-scalar children from scalars but not stores. (vect_analyze_slp_instance): Mark the node not failed. * g++.dg/vect/pr96022.cc: New testcase. --- gcc/testsuite/g++.dg/vect/pr96022.cc | 12 ++++++++++++ gcc/tree-vect-slp.c | 11 ++++++----- gcc/tree-vect-stmts.c | 6 ++++-- 3 files changed, 22 insertions(+), 7 deletions(-) create mode 100644 gcc/testsuite/g++.dg/vect/pr96022.cc diff --git a/gcc/testsuite/g++.dg/vect/pr96022.cc b/gcc/testsuite/g++.dg/vect/pr96022.cc new file mode 100644 index 00000000000..ca6b27696f5 --- /dev/null +++ b/gcc/testsuite/g++.dg/vect/pr96022.cc @@ -0,0 +1,12 @@ +// { dg-do compile } +// { dg-additional-options "-O3" } + +extern int arr_6[]; +extern char arr_7[] __attribute__((aligned)); +void test(short a, bool, int p8) { + for (bool b = 0; b < (bool)p8; b = 1) + for (short c = 0; c < 5; c++) { + arr_6[c] = (long)2 << a - 30574; + arr_7[c] = 0; + } +} diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c index 532809d2667..af123b5ab1d 100644 --- a/gcc/tree-vect-slp.c +++ b/gcc/tree-vect-slp.c @@ -1530,11 +1530,11 @@ fail: vect_free_oprnd_info (oprnds_info); - /* If we have all children of a non-unary child built up from - uniform scalars then just throw that away, causing it built up - from scalars. */ - if (nops > 1 - && is_a (vinfo) + /* If we have all children of a child built up from uniform scalars + then just throw that away, causing it built up from scalars. + The exception is the SLP node for the vector store. */ + if (is_a (vinfo) + && !STMT_VINFO_GROUPED_ACCESS (stmt_info) /* ??? Rejecting patterns this way doesn't work. We'd have to do extra work to cancel the pattern so the uses see the scalar version. */ @@ -2230,6 +2230,7 @@ vect_analyze_slp_instance (vec_info *vinfo, return false; } /* Fatal mismatch. */ + matches[0] = true; matches[group_size / const_max_nunits * const_max_nunits] = false; vect_free_slp_tree (node, false); } diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 9b7b04ce2d3..d68547ed1b5 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -5403,7 +5403,7 @@ vectorizable_shift (vec_info *vinfo, if (!op1_vectype) op1_vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op1), - slp_node); + slp_op1); /* Unlike the other binary operators, shifts/rotates have the rhs being int, instead of the same type as the lhs, @@ -5575,11 +5575,11 @@ vectorizable_shift (vec_info *vinfo, /* Arguments are ready. Create the new vector stmt. */ FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0) { - vop1 = vec_oprnds1[i]; /* For internal defs where we need to use a scalar shift arg extract the first lane. */ if (scalar_shift_arg && dt[1] == vect_internal_def) { + vop1 = vec_oprnds1[0]; new_temp = make_ssa_name (TREE_TYPE (TREE_TYPE (vop1))); gassign *new_stmt = gimple_build_assign (new_temp, @@ -5590,6 +5590,8 @@ vectorizable_shift (vec_info *vinfo, vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); vop1 = new_temp; } + else + vop1 = vec_oprnds1[i]; gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); -- 2.30.2