From: Richard Biener Date: Tue, 8 Nov 2016 10:23:57 +0000 (+0000) Subject: tree-vect-stmts.c (get_group_load_store_type): If the access is aligned do not trigge... X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=f9ef2c765ad3fce93ddacdabede6b67efdc13c0d;p=gcc.git tree-vect-stmts.c (get_group_load_store_type): If the access is aligned do not trigger peeling for gaps. 2016-11-08 Richard Biener * tree-vect-stmts.c (get_group_load_store_type): If the access is aligned do not trigger peeling for gaps. * tree-vect-data-refs.c (vect_compute_data_ref_alignment): Do not force alignment of vars with DECL_USER_ALIGN. * gcc.dg/vect/vect-nb-iter-ub-2.c: Adjust. From-SVN: r241959 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 4f5c464cec4..694c8e4cfa4 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2016-11-08 Richard Biener + + * tree-vect-stmts.c (get_group_load_store_type): If the + access is aligned do not trigger peeling for gaps. + * tree-vect-data-refs.c (vect_compute_data_ref_alignment): Do not + force alignment of vars with DECL_USER_ALIGN. + 2016-11-08 James Greenhalgh * config/aarch64/t-aarch64 (aarch64-c.o): Depend on TARGET_H. diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 060b1a7d291..36b0d33776f 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2016-11-08 Richard Biener + + * gcc.dg/vect/vect-nb-iter-ub-2.c: Adjust. + 2016-11-08 Tamar Christina PR testsuite/78136 diff --git a/gcc/testsuite/gcc.dg/vect/vect-nb-iter-ub-2.c b/gcc/testsuite/gcc.dg/vect/vect-nb-iter-ub-2.c index bc07b4bea0f..4e13702621f 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-nb-iter-ub-2.c +++ b/gcc/testsuite/gcc.dg/vect/vect-nb-iter-ub-2.c @@ -3,7 +3,7 @@ #include "tree-vect.h" int ii[32]; -char cc[66] = +char cc[66] __attribute__((aligned(1))) = { 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index b03cb1ec637..f014d688036 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -831,6 +831,19 @@ vect_compute_data_ref_alignment (struct data_reference *dr) return true; } + if (DECL_USER_ALIGN (base)) + { + if (dump_enabled_p ()) + { + dump_printf_loc (MSG_NOTE, vect_location, + "not forcing alignment of user-aligned " + "variable: "); + dump_generic_expr (MSG_NOTE, TDF_SLIM, base); + dump_printf (MSG_NOTE, "\n"); + } + return true; + } + /* Force the alignment of the decl. NOTE: This is the only change to the code we make during the analysis phase, before deciding to vectorize the loop. */ diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 15aec2197b3..c29e73df946 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -1770,6 +1770,11 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, " non-consecutive accesses\n"); return false; } + /* If the access is aligned an overrun is fine. */ + if (overrun_p + && aligned_access_p + (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))) + overrun_p = false; if (overrun_p && !can_overrun_p) { if (dump_enabled_p ()) @@ -1789,6 +1794,10 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, /* If there is a gap at the end of the group then these optimizations would access excess elements in the last iteration. */ bool would_overrun_p = (gap != 0); + /* If the access is aligned an overrun is fine. */ + if (would_overrun_p + && aligned_access_p (STMT_VINFO_DATA_REF (stmt_info))) + would_overrun_p = false; if (!STMT_VINFO_STRIDED_P (stmt_info) && (can_overrun_p || !would_overrun_p) && compare_step_with_zero (stmt) > 0)