From: Richard Biener Date: Mon, 6 Mar 2017 13:58:01 +0000 (+0000) Subject: re PR tree-optimization/79824 (Failure to peel for gaps leads to read beyond mapped... X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=522fcdd739e5cc24bd8392f5c00dd5b439804c36;p=gcc.git re PR tree-optimization/79824 (Failure to peel for gaps leads to read beyond mapped memory) 2017-03-06 Richard Biener PR tree-optimization/79824 * tree-vect-stmts.c (get_group_load_store_type): Fix alignment check disabling peeling for gaps. * gcc.dg/vect/pr79824-1.c: New testcase. * gcc.dg/vect/pr79824-2.c: Likewise. From-SVN: r245922 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index be8b6d9b673..7c732a1dc77 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2017-03-06 Richard Biener + + PR tree-optimization/79824 + * tree-vect-stmts.c (get_group_load_store_type): Fix alignment + check disabling peeling for gaps. + 2017-03-06 Toma Tabacu * doc/sourcebuild.texi (Effective-Target Keywords, Environment diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index f90154a6fa6..51432932bfe 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,9 @@ +2017-03-06 Richard Biener + + PR tree-optimization/79824 + * gcc.dg/vect/pr79824-1.c: New testcase. + * gcc.dg/vect/pr79824-2.c: Likewise. + 2017-03-06 Toma Tabacu * gcc.dg/lto/pr60449_0.c: Add dg-require-effective-target for diff --git a/gcc/testsuite/gcc.dg/vect/pr79824-1.c b/gcc/testsuite/gcc.dg/vect/pr79824-1.c new file mode 100644 index 00000000000..e6b887b9d31 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr79824-1.c @@ -0,0 +1,46 @@ +/* { dg-require-effective-target mmap } */ + +#include +#include +#include "tree-vect.h" + +#define COUNT 320 +#define MMAP_SIZE 0x10000 +#define ADDRESS 0x1122000000 +#define TYPE double + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +void __attribute__((noinline)) +foo (TYPE *__restrict a, TYPE *__restrict b) +{ + int n; + + b = __builtin_assume_aligned (b, sizeof (TYPE) * 2); + a = __builtin_assume_aligned (a, sizeof (TYPE) * 2); + for (n = 0; n < COUNT; n++) + a[n] = b[n * 4]; +} + +int +main (void) +{ + void *x; + size_t b_offset; + + check_vect (); + + x = mmap ((void *) ADDRESS, MMAP_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (x == MAP_FAILED) + { + perror ("mmap"); + return 1; + } + + b_offset = MMAP_SIZE - (4 * COUNT - 2) * sizeof (TYPE); + foo ((TYPE *) x, (TYPE *) ((char *) x + b_offset)); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/vect/pr79824-2.c b/gcc/testsuite/gcc.dg/vect/pr79824-2.c new file mode 100644 index 00000000000..629b97e21b1 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr79824-2.c @@ -0,0 +1,48 @@ +/* { dg-require-effective-target mmap } */ + +#include +#include +#include "tree-vect.h" + +#define COUNT 320 +#define MMAP_SIZE 0x10000 +#define ADDRESS 0x1122000000 +#define TYPE double + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +void __attribute__((noinline)) +foo (TYPE *__restrict a, TYPE *__restrict b) +{ + int n; + + b = __builtin_assume_aligned (b, sizeof (TYPE) * 2); + a = __builtin_assume_aligned (a, sizeof (TYPE) * 2); + for (n = 0; n < COUNT; n++) + { + a[n] = b[n * 4] + b[n * 4 + 1]; + } +} + +int +main (void) +{ + void *x; + size_t b_offset; + + check_vect (); + + x = mmap ((void *) ADDRESS, MMAP_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (x == MAP_FAILED) + { + perror ("mmap"); + return 1; + } + + b_offset = MMAP_SIZE - (4 * COUNT - 2) * sizeof (TYPE); + foo ((TYPE *) x, (TYPE *) ((char *) x + b_offset)); + return 0; +} diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index c87f72c8af8..9f28321280a 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -1731,7 +1731,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, bool single_element_p = (stmt == first_stmt && !GROUP_NEXT_ELEMENT (stmt_info)); unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt)); - int nunits = TYPE_VECTOR_SUBPARTS (vectype); + unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype); /* True if the vectorized statements would access beyond the last statement in the group. */ @@ -1794,9 +1794,13 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, /* If there is a gap at the end of the group then these optimizations would access excess elements in the last iteration. */ bool would_overrun_p = (gap != 0); - /* If the access is aligned an overrun is fine. */ + /* If the access is aligned an overrun is fine, but only if the + overrun is not inside an unused vector (if the gap is as large + or larger than a vector). */ if (would_overrun_p - && aligned_access_p (STMT_VINFO_DATA_REF (stmt_info))) + && gap < nunits + && aligned_access_p + (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)))) would_overrun_p = false; if (!STMT_VINFO_STRIDED_P (stmt_info) && (can_overrun_p || !would_overrun_p)