+2015-01-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/64829
+ * tree-vect-patterns.c (vect_handle_widen_op_by_const): Do
+ not add a widening conversion pattern but hand off extra
+ widenings to callers.
+ (vect_recog_widen_mult_pattern): Handle extra widening produced
+ by vect_handle_widen_op_by_const.
+ (vect_recog_widen_shift_pattern): Likewise.
+ (vect_pattern_recog_1): Remove excess vertical space in dumping.
+ * tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): Likewise.
+ (vect_init_vector_1): Likewise.
+ (vect_get_vec_def_for_operand): Likewise.
+ (vect_finish_stmt_generation): Likewise.
+ (vectorizable_load): Likewise.
+ (vect_analyze_stmt): Likewise.
+ (vect_is_simple_use): Likewise.
+
2015-01-29 Jeff Law <law@redhat.com>
* combine.c (try_combine): Fix typo in comment.
+2015-01-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/64829
+ * gcc.dg/vect/pr64829.c: New testcase.
+
2015-01-29 Marek Polacek <polacek@redhat.com>
PR c/64709
--- /dev/null
+/* { dg-do compile } */
+
+typedef unsigned char Uint8;
+typedef int Sint32;
+typedef unsigned int Uint32;
+
+typedef union RMColorDataRef
+{
+ Uint8* data8;
+} RMColorDataRef;
+
+typedef struct RMColorData
+{
+ Uint32 dataCount;
+ RMColorDataRef dataRef;
+} RMColorData;
+
+typedef struct RMColorTable
+{
+ Uint8 dataCompsOut;
+ RMColorDataRef dataRef;
+} RMColorTable;
+
+int fail ( const RMColorData * pInColor,
+ RMColorData * pOutColor,
+ const RMColorTable * pColorTable )
+{
+ Uint32 comp;
+ Uint8 nCompOut;
+
+ Sint32 result;
+
+ Uint32 interpFrac1, interpFrac2, interpFrac3;
+ Sint32 val0, val1, val2, val3;
+
+ Uint8 * pOut;
+
+ const Uint8 * pClutData;
+ const Uint8 * pCornerPoint0;
+
+ Uint8 lastOut[((8) > (4) ? (8) : (4))];
+
+ pOut = pOutColor->dataRef.data8;
+ pClutData = pColorTable->dataRef.data8;
+
+ nCompOut = pColorTable->dataCompsOut;
+
+ pCornerPoint0 = pClutData;
+
+ for (comp = 0; comp < nCompOut; comp++)
+ {
+ val0 = *pCornerPoint0++;
+
+ result = val0 << 4;
+
+ result += (val1 - val0) * interpFrac1;
+ result += (val2 - val1) * interpFrac2;
+ result += (val3 - val2) * interpFrac3;
+
+ *pOut++ = lastOut[comp] = (Uint8)(result >> 4);
+ }
+
+ return (0);
+}
+
+/* { dg-final { cleanup-tree-dump "vect" } } */
HALF_TYPE, and there is an intermediate type (2 times smaller than TYPE)
that satisfies the above restrictions, we can perform a widening opeartion
from the intermediate type to TYPE and replace a_T = (TYPE) a_t;
- with a_it = (interm_type) a_t; */
+ with a_it = (interm_type) a_t; Store such operation in *WSTMT. */
static bool
vect_handle_widen_op_by_const (gimple stmt, enum tree_code code,
tree const_oprnd, tree *oprnd,
- vec<gimple> *stmts, tree type,
+ gimple *wstmt, tree type,
tree *half_type, gimple def_stmt)
{
tree new_type, new_oprnd;
- gimple new_stmt;
if (code != MULT_EXPR && code != LSHIFT_EXPR)
return false;
&& compare_tree_int (const_oprnd, TYPE_PRECISION (new_type)) == 1))
return false;
- /* Use NEW_TYPE for widening operation. */
- if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)))
- {
- new_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
- /* Check if the already created pattern stmt is what we need. */
- if (!is_gimple_assign (new_stmt)
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (new_stmt))
- || TREE_TYPE (gimple_assign_lhs (new_stmt)) != new_type)
- return false;
-
- stmts->safe_push (def_stmt);
- *oprnd = gimple_assign_lhs (new_stmt);
- }
- else
- {
- /* Create a_T = (NEW_TYPE) a_t; */
- *oprnd = gimple_assign_rhs1 (def_stmt);
- new_oprnd = make_ssa_name (new_type);
- new_stmt = gimple_build_assign (new_oprnd, NOP_EXPR, *oprnd);
- STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
- stmts->safe_push (def_stmt);
- *oprnd = new_oprnd;
- }
+ /* Use NEW_TYPE for widening operation and create a_T = (NEW_TYPE) a_t; */
+ *oprnd = gimple_assign_rhs1 (def_stmt);
+ new_oprnd = make_ssa_name (new_type);
+ *wstmt = gimple_build_assign (new_oprnd, NOP_EXPR, *oprnd);
+ *oprnd = new_oprnd;
*half_type = new_type;
return true;
if (TREE_CODE (oprnd1) == INTEGER_CST
&& TREE_CODE (half_type0) == INTEGER_TYPE
&& vect_handle_widen_op_by_const (last_stmt, MULT_EXPR, oprnd1,
- &oprnd0, stmts, type,
+ &oprnd0, &new_stmt, type,
&half_type0, def_stmt0))
{
half_type1 = half_type0;
the smaller type into the larger type. */
if (TYPE_PRECISION (half_type0) != TYPE_PRECISION (half_type1))
{
+ /* If we already used up the single-stmt slot give up. */
+ if (new_stmt)
+ return NULL;
+
tree* oprnd = NULL;
gimple def_stmt = NULL;
/* Check operand 0: it has to be defined by a type promotion. */
if (!type_conversion_p (oprnd0, last_stmt, false, &half_type0, &def_stmt0,
- &promotion)
+ &promotion)
|| !promotion)
return NULL;
}
/* Check if this a widening operation. */
+ gimple wstmt = NULL;
if (!vect_handle_widen_op_by_const (last_stmt, LSHIFT_EXPR, oprnd1,
- &oprnd0, stmts,
+ &oprnd0, &wstmt,
type, &half_type0, def_stmt0))
return NULL;
var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt =
gimple_build_assign (var, WIDEN_LSHIFT_EXPR, oprnd0, oprnd1);
+ if (wstmt)
+ {
+ stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
+ loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
+ bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
+ new_pattern_def_seq (stmt_vinfo, wstmt);
+ stmt_vec_info new_stmt_info
+ = new_stmt_vec_info (wstmt, loop_vinfo, bb_vinfo);
+ set_vinfo_for_stmt (wstmt, new_stmt_info);
+ STMT_VINFO_VECTYPE (new_stmt_info) = vectype;
+ }
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
dump_printf_loc (MSG_NOTE, vect_location,
"pattern recognized: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
/* Mark the stmts that are involved in the pattern. */
dump_printf_loc (MSG_NOTE, vect_location,
"additional pattern stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
vect_mark_pattern_stmts (stmt, pattern_stmt, NULL_TREE);
{
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
- dump_printf (MSG_NOTE, "\n");
}
if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
{
dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
/* Examine the USEs of STMT. For each USE, mark the stmt that defines it
live_p, relevant, &worklist, false)
|| !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
live_p, relevant, &worklist, false))
- return false;
+ return false;
i = 2;
}
for (; i < gimple_num_ops (stmt); i++)
gcc_assert (decl);
if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
&worklist, true))
- return false;
+ return false;
}
} /* while worklist */
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
}
else
dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
}
{
dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
gimple_set_location (vec_stmt, gimple_location (stmt));
"hoisting out of the vectorized "
"loop: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
tree tem = copy_ssa_name (scalar_dest);
gsi_insert_on_edge_immediate
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
if (gimple_has_volatile_ops (stmt))
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
}
else
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
if (!vect_analyze_stmt (pattern_def_stmt,
"not vectorized: relevant stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
"not vectorized: live stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
- dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
{
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
- dump_printf (MSG_NOTE, "\n");
}
/* Empty stmt is expected only in case of a function argument.