From: Richard Sandiford Date: Wed, 3 Jan 2018 21:46:45 +0000 (+0000) Subject: Move code that stubs out IFN_MASK_LOADs X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=e251d3ec013bc46d58470988fc45566051a9030b;p=gcc.git Move code that stubs out IFN_MASK_LOADs vectorizable_mask_load_store replaces scalar IFN_MASK_LOAD calls with dummy assignments, so that they never survive vectorisation. This patch moves the code to vect_transform_loop instead, so that we only change the scalar statements once all of them have been vectorised. This makes it easier to handle other types of functions that need stubbing out, and also makes it easier to handle groups and patterns. 2018-01-03 Richard Sandiford gcc/ * tree-vect-loop.c (vect_transform_loop): Stub out scalar IFN_MASK_LOAD calls here rather than... * tree-vect-stmts.c (vectorizable_mask_load_store): ...here. From-SVN: r256210 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5dbf3ba5524..309c00942bf 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2018-01-03 Richard Sandiford + + * tree-vect-loop.c (vect_transform_loop): Stub out scalar + IFN_MASK_LOAD calls here rather than... + * tree-vect-stmts.c (vectorizable_mask_load_store): ...here. + 2018-01-03 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index 2fd11df6c09..c2501a8407c 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -7810,6 +7810,25 @@ vect_transform_loop (loop_vec_info loop_vinfo) gsi_next (&si); } } /* stmts in BB */ + + /* Stub out scalar statements that must not survive vectorization. + Doing this here helps with grouped statements, or statements that + are involved in patterns. */ + for (gimple_stmt_iterator gsi = gsi_start_bb (bb); + !gsi_end_p (gsi); gsi_next (&gsi)) + { + gcall *call = dyn_cast (gsi_stmt (gsi)); + if (call && gimple_call_internal_p (call, IFN_MASK_LOAD)) + { + tree lhs = gimple_get_lhs (call); + if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) + { + tree zero = build_zero_cst (TREE_TYPE (lhs)); + gimple *new_stmt = gimple_build_assign (lhs, zero); + gsi_replace (&gsi, new_stmt, true); + } + } + } } /* BBs in loop */ /* The vectorization factor is always > 1, so if we use an IV increment of 1. diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 27a8f7bb7ae..96c6605d959 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -2347,20 +2347,6 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi, STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); } - - /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed - from the IL. */ - if (STMT_VINFO_RELATED_STMT (stmt_info)) - { - stmt = STMT_VINFO_RELATED_STMT (stmt_info); - stmt_info = vinfo_for_stmt (stmt); - } - tree lhs = gimple_call_lhs (stmt); - new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); - set_vinfo_for_stmt (new_stmt, stmt_info); - set_vinfo_for_stmt (stmt, NULL); - STMT_VINFO_STMT (stmt_info) = new_stmt; - gsi_replace (gsi, new_stmt, true); return true; } else if (vls_type != VLS_LOAD) @@ -2477,23 +2463,6 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi, } } - if (vls_type == VLS_LOAD) - { - /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed - from the IL. */ - if (STMT_VINFO_RELATED_STMT (stmt_info)) - { - stmt = STMT_VINFO_RELATED_STMT (stmt_info); - stmt_info = vinfo_for_stmt (stmt); - } - tree lhs = gimple_call_lhs (stmt); - new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs))); - set_vinfo_for_stmt (new_stmt, stmt_info); - set_vinfo_for_stmt (stmt, NULL); - STMT_VINFO_STMT (stmt_info) = new_stmt; - gsi_replace (gsi, new_stmt, true); - } - return true; }