{
if (!optimize
|| optimize_debug
+ || !flag_tree_loop_optimize
|| (!flag_tree_loop_vectorize
&& (global_options_set.x_flag_tree_loop_vectorize
|| global_options_set.x_flag_tree_vectorize)))
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
- || gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_FOR)
+ || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
}
if ((flag_tree_loop_vectorize
|| (!global_options_set.x_flag_tree_loop_vectorize
&& !global_options_set.x_flag_tree_vectorize))
+ && flag_tree_loop_optimize
&& loop->safelen > 1)
{
loop->force_vect = true;
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
ctx ? NULL : &wi))
- gimple_regimplify_operands (stmt, gsi_p);
+ {
+ /* Just remove clobbers, this should happen only if we have
+ "privatized" local addressable variables in SIMD regions,
+ the clobber isn't needed in that case and gimplifying address
+ of the ARRAY_REF into a pointer and creating MEM_REF based
+ clobber would create worse code than we get with the clobber
+ dropped. */
+ if (gimple_clobber_p (stmt))
+ {
+ gsi_replace (gsi_p, gimple_build_nop (), true);
+ break;
+ }
+ gimple_regimplify_operands (stmt, gsi_p);
+ }
break;
}
}
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
- /* Inside target region we haven't called fold_stmt during gimplification,
- because it can break code by adding decl references that weren't in the
- source. Call fold_stmt now. */
+ /* During gimplification, we have not always invoked fold_stmt
+ (gimplify.c:maybe_fold_stmt); call it now. */
if (target_nesting_level)
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
fold_stmt (&gsi);