+2019-10-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/92094
+ * tree-vect-loop.c (vectorizable_reduction): For nested cycles
+ do not adjust the reduction definition def type.
+ * tree-vect-stmts.c (vect_transform_stmt): Verify the scalar stmt
+ defines the latch argument of the PHI.
+
2019-10-15 Hongyu Wang <hongtao.wang@intel.com>
PR target/92035
+2019-10-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/92094
+ * gfortran.dg/pr92094.f90: New testcase.
+
2019-10-15 Hongyu Wang <hongyu.wang@intel.com>
PR target/92035
--- /dev/null
+! { dg-do compile }
+! { dg-options "-O3" }
+ subroutine hesfcn(n, x, h, ldh)
+ integer n,ldh
+ double precision x(n), h(ldh)
+
+ integer i,j,k,kj
+ double precision th,u1,u2,v2
+
+ kj = 0
+ do 770 j = 1, n
+ kj = kj - j
+ do 760 k = 1, j
+ kj = kj + 1
+ v2 = 2 * x(k) - 1
+ u1 = 0
+ u2 = 2
+ do 750 i = 1, n
+ h(kj) = h(kj) + u2
+ th = 4 * v2 + u2 - u1
+ u1 = u2
+ u2 = th
+ th = v2 - 1
+ 750 continue
+ 760 continue
+ 770 continue
+
+ end
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
{
if (is_a <gphi *> (stmt_info->stmt))
- {
- /* Analysis for double-reduction is done on the outer
- loop PHI, nested cycles have no further restrictions. */
- STMT_VINFO_TYPE (stmt_info) = cycle_phi_info_type;
- /* For nested cycles we want to let regular vectorizable_*
- routines handle code-generation. */
- if (STMT_VINFO_DEF_TYPE (reduc_info) != vect_double_reduction_def)
- {
- stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
- STMT_VINFO_DEF_TYPE (stmt_info) = vect_internal_def;
- STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize (stmt_info))
- = vect_internal_def;
- }
- }
+ /* Analysis for double-reduction is done on the outer
+ loop PHI, nested cycles have no further restrictions. */
+ STMT_VINFO_TYPE (stmt_info) = cycle_phi_info_type;
else
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;
&& STMT_VINFO_REDUC_TYPE (reduc_info) != EXTRACT_LAST_REDUCTION)
{
gphi *phi;
+ edge e;
if (!slp_node
&& (phi = dyn_cast <gphi *>
(STMT_VINFO_REDUC_DEF (orig_stmt_info)->stmt))
&& dominated_by_p (CDI_DOMINATORS,
- gimple_bb (orig_stmt_info->stmt), gimple_bb (phi)))
+ gimple_bb (orig_stmt_info->stmt), gimple_bb (phi))
+ && (e = loop_latch_edge (gimple_bb (phi)->loop_father))
+ && (PHI_ARG_DEF_FROM_EDGE (phi, e)
+ == gimple_get_lhs (orig_stmt_info->stmt)))
{
- edge e = loop_latch_edge (gimple_bb (phi)->loop_father);
stmt_vec_info phi_info
= STMT_VINFO_VEC_STMT (STMT_VINFO_REDUC_DEF (orig_stmt_info));
stmt_vec_info vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
{
slp_tree phi_node = slp_node_instance->reduc_phis;
gphi *phi = as_a <gphi *> (SLP_TREE_SCALAR_STMTS (phi_node)[0]->stmt);
- edge e = loop_latch_edge (gimple_bb (phi)->loop_father);
+ e = loop_latch_edge (gimple_bb (phi)->loop_father);
gcc_assert (SLP_TREE_VEC_STMTS (phi_node).length ()
== SLP_TREE_VEC_STMTS (slp_node).length ());
for (unsigned i = 0; i < SLP_TREE_VEC_STMTS (phi_node).length (); ++i)