+2015-06-01 Yuri Rumyantsev <ysrumyan@gmail.com>
+
+ * tree-vect-data-refs.c (vect_analyze_data_ref_access): Allow
+ consecutive accesses within outer-loop with force_vectorize
+ for references with zero step in inner-loop.
+
2015-06-01 Vidya Praveen <vidyapraveen@arm.com>
* Makefile.in: Pick up gcov-dump dependencies from gcc/ directory
+2015-06-01 Yuri Rumyantsev <ysrumyan@gmail.com>
+
+ * gcc.dg/vect/vect-outer-simd-1.c: New test.
+
2015-06-01 Matthew Wahab <matthew.wahab@arm.com>
PR target/65697
--- /dev/null
+/* { dg-require-effective-target vect_simd_clones } */
+/* { dg-additional-options "-fopenmp-simd -ffast-math" } */
+#include <stdlib.h>
+#include "tree-vect.h"
+#define N 64
+
+float *px, *py;
+float *tx, *ty;
+float *x1, *z1, *t1, *t2;
+
+static void inline bar(const float cx, float cy,
+ float *vx, float *vy)
+{
+ int j;
+ for (j = 0; j < N; ++j)
+ {
+ const float dx = cx - px[j];
+ const float dy = cy - py[j];
+ *vx -= dx * tx[j];
+ *vy -= dy * ty[j];
+ }
+}
+
+__attribute__((noinline, noclone)) void foo1 ()
+{
+ int i;
+#pragma omp simd
+ for (i=0; i<N; i++)
+ bar(px[i], py[i], x1+i, z1+i);
+}
+
+__attribute__((noinline, noclone)) void foo2 ()
+{
+ volatile int i;
+ for (i=0; i<N; i++)
+ bar(px[i], py[i], x1+i, z1+i);
+}
+
+
+int main()
+{
+ float *X = (float*)malloc(N * 8 * sizeof (float));
+ int i;
+ check_vect ();
+ px = &X[0];
+ py = &X[N * 1];
+ tx = &X[N * 2];
+ ty = &X[N * 3];
+ x1 = &X[N * 4];
+ z1 = &X[N * 5];
+ t1 = &X[N * 6];
+ t2 = &X[N * 7];
+
+ for (i=0; i<N; i++)
+ {
+ px[i] = (float) (i+2);
+ tx[i] = (float) (i+1);
+ py[i] = (float) (i+4);
+ ty[i] = (float) (i+3);
+ x1[i] = z1[i] = 1.0f;
+ }
+ foo1 (); /* vector variant. */
+ for (i=0; i<N;i++)
+ {
+ t1[i] = x1[i]; x1[i] = 1.0f;
+ t2[i] = z1[i]; z1[i] = 1.0f;
+ }
+ foo2 (); /* scalar variant. */
+ for (i=0; i<N; i++)
+ if (x1[i] != t1[i] || z1[i] != t2[i])
+ abort ();
+ return 0;
+}
+/* { dg-final { scan-tree-dump "OUTER LOOP VECTORIZED" "vect" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
return false;
}
- /* Allow invariant loads in not nested loops. */
+ /* Allow loads with zero step in inner-loop vectorization. */
if (loop_vinfo && integer_zerop (step))
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
- if (nested_in_vect_loop_p (loop, stmt))
+ if (!nested_in_vect_loop_p (loop, stmt))
+ return DR_IS_READ (dr);
+ /* Allow references with zero step for outer loops marked
+ with pragma omp simd only - it guarantees absence of
+ loop-carried dependencies between inner loop iterations. */
+ if (!loop->force_vectorize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in inner loop of nest\n");
return false;
}
- return DR_IS_READ (dr);
}
if (loop && nested_in_vect_loop_p (loop, stmt))