+2015-11-26 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/68128
+ * tree.h (OMP_CLAUSE_SHARED_READONLY): Define.
+ * gimplify.c: Include gimple-walk.h.
+ (enum gimplify_omp_var_data): Add GOVD_WRITTEN.
+ (omp_notice_variable): Set flags to n->value if n already
+ exists in target region, but we need to jump to do_outer.
+ (omp_shared_to_firstprivate_optimizable_decl_p,
+ omp_mark_stores, omp_find_stores_op, omp_find_stores_stmt): New
+ functions.
+ (gimplify_adjust_omp_clauses_1): Set OMP_CLAUSE_SHARED_READONLY
+ on OMP_CLAUSE_SHARED if it is a scalar non-addressable that is
+ not modified in the body. Call omp_mark_stores for outer
+ contexts on OMP_CLAUSE_SHARED clauses if they could be written
+ in the body or on OMP_CLAUSE_LASTPRIVATE.
+ (gimplify_adjust_omp_clauses): Add body argument, call
+ omp_find_stores_{stmt,op} on the body through walk_gimple_seq.
+ Set OMP_CLAUSE_SHARED_READONLY
+ on OMP_CLAUSE_SHARED if it is a scalar non-addressable that is
+ not modified in the body. Call omp_mark_stores for outer
+ contexts on OMP_CLAUSE_SHARED clauses if they could be written
+ in the body or on OMP_CLAUSE_LASTPRIVATE or on OMP_CLAUSE_LINEAR
+ without OMP_CLAUSE_LINEAR_NO_COPYOUT or on OMP_CLAUSE_REDUCTION.
+ (gimplify_oacc_cache, gimplify_omp_parallel, gimplify_omp_task,
+ gimplify_omp_for, gimplify_omp_workshare, gimplify_omp_target_update,
+ gimplify_expr): Adjust gimplify_adjust_omp_clauses callers.
+ * tree-nested.c (convert_nonlocal_omp_clauses,
+ convert_local_omp_clauses): Clear OMP_CLAUSE_SHARED_READONLY on
+ non-local vars or local vars referenced from nested routines.
+ * omp-low.c (scan_sharing_clauses): For OMP_CLAUSE_SHARED_READONLY
+ attempt to optimize it into OMP_CLAUSE_FIRSTPRIVATE. Even for
+ TREE_READONLY, don't call use_pointer_for_field with non-NULL
+ second argument until we are sure we are keeping OMP_CLAUSE_SHARED.
+
2015-11-26 Paolo Bonzini <bonzini@gnu.org>
* doc/implement-c.texi (Integers Implementation): Make GCC's promises
#include "cilk.h"
#include "gomp-constants.h"
#include "tree-dump.h"
-
+#include "gimple-walk.h"
#include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name */
#include "builtins.h"
/* Flag for GOVD_MAP, if it is always, to or always, tofrom mapping. */
GOVD_MAP_ALWAYS_TO = 65536,
+ /* Flag for shared vars that are or might be stored to in the region. */
+ GOVD_WRITTEN = 131072,
+
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR
| GOVD_LOCAL)
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
- n->value |= flags;
+ flags |= n->value;
+ n->value = flags;
}
goto do_outer;
}
delete struct_map_to_clause;
}
+/* Return true if DECL is a candidate for shared to firstprivate
+ optimization. We only consider non-addressable scalars, not
+ too big, and not references. */
+
+static bool
+omp_shared_to_firstprivate_optimizable_decl_p (tree decl)
+{
+ if (TREE_ADDRESSABLE (decl))
+ return false;
+ tree type = TREE_TYPE (decl);
+ if (!is_gimple_reg_type (type)
+ || TREE_CODE (type) == REFERENCE_TYPE
+ || TREE_ADDRESSABLE (type))
+ return false;
+ /* Don't optimize too large decls, as each thread/task will have
+ its own. */
+ HOST_WIDE_INT len = int_size_in_bytes (type);
+ if (len == -1 || len > 4 * POINTER_SIZE / BITS_PER_UNIT)
+ return false;
+ if (lang_hooks.decls.omp_privatize_by_reference (decl))
+ return false;
+ return true;
+}
+
+/* Helper function of omp_find_stores_op and gimplify_adjust_omp_clauses*.
+ For omp_shared_to_firstprivate_optimizable_decl_p decl mark it as
+ GOVD_WRITTEN in outer contexts. */
+
+static void
+omp_mark_stores (struct gimplify_omp_ctx *ctx, tree decl)
+{
+ for (; ctx; ctx = ctx->outer_context)
+ {
+ splay_tree_node n = splay_tree_lookup (ctx->variables,
+ (splay_tree_key) decl);
+ if (n == NULL)
+ continue;
+ else if (n->value & GOVD_SHARED)
+ {
+ n->value |= GOVD_WRITTEN;
+ return;
+ }
+ else if (n->value & GOVD_DATA_SHARE_CLASS)
+ return;
+ }
+}
+
+/* Helper callback for walk_gimple_seq to discover possible stores
+ to omp_shared_to_firstprivate_optimizable_decl_p decls and set
+ GOVD_WRITTEN if they are GOVD_SHARED in some outer context
+ for those. */
+
+static tree
+omp_find_stores_op (tree *tp, int *walk_subtrees, void *data)
+{
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+
+ *walk_subtrees = 0;
+ if (!wi->is_lhs)
+ return NULL_TREE;
+
+ tree op = *tp;
+ do
+ {
+ if (handled_component_p (op))
+ op = TREE_OPERAND (op, 0);
+ else if ((TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF)
+ && TREE_CODE (TREE_OPERAND (op, 0)) == ADDR_EXPR)
+ op = TREE_OPERAND (TREE_OPERAND (op, 0), 0);
+ else
+ break;
+ }
+ while (1);
+ if (!DECL_P (op) || !omp_shared_to_firstprivate_optimizable_decl_p (op))
+ return NULL_TREE;
+
+ omp_mark_stores (gimplify_omp_ctxp, op);
+ return NULL_TREE;
+}
+
+/* Helper callback for walk_gimple_seq to discover possible stores
+ to omp_shared_to_firstprivate_optimizable_decl_p decls and set
+ GOVD_WRITTEN if they are GOVD_SHARED in some outer context
+ for those. */
+
+static tree
+omp_find_stores_stmt (gimple_stmt_iterator *gsi_p,
+ bool *handled_ops_p,
+ struct walk_stmt_info *wi)
+{
+ gimple *stmt = gsi_stmt (*gsi_p);
+ switch (gimple_code (stmt))
+ {
+ /* Don't recurse on OpenMP constructs for which
+ gimplify_adjust_omp_clauses already handled the bodies,
+ except handle gimple_omp_for_pre_body. */
+ case GIMPLE_OMP_FOR:
+ *handled_ops_p = true;
+ if (gimple_omp_for_pre_body (stmt))
+ walk_gimple_seq (gimple_omp_for_pre_body (stmt),
+ omp_find_stores_stmt, omp_find_stores_op, wi);
+ break;
+ case GIMPLE_OMP_PARALLEL:
+ case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_SECTIONS:
+ case GIMPLE_OMP_SINGLE:
+ case GIMPLE_OMP_TARGET:
+ case GIMPLE_OMP_TEAMS:
+ case GIMPLE_OMP_CRITICAL:
+ *handled_ops_p = true;
+ break;
+ default:
+ break;
+ }
+ return NULL_TREE;
+}
+
struct gimplify_adjust_omp_clauses_data
{
tree *list_p;
else
gcc_unreachable ();
+ if (((flags & GOVD_LASTPRIVATE)
+ || (code == OMP_CLAUSE_SHARED && (flags & GOVD_WRITTEN)))
+ && omp_shared_to_firstprivate_optimizable_decl_p (decl))
+ omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
+
clause = build_omp_clause (input_location, code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = *list_p;
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
+ else if (code == OMP_CLAUSE_SHARED
+ && (flags & GOVD_WRITTEN) == 0
+ && omp_shared_to_firstprivate_optimizable_decl_p (decl))
+ OMP_CLAUSE_SHARED_READONLY (clause) = 1;
else if (code == OMP_CLAUSE_MAP && (flags & GOVD_MAP_0LEN_ARRAY) != 0)
{
tree nc = build_omp_clause (input_location, OMP_CLAUSE_MAP);
}
static void
-gimplify_adjust_omp_clauses (gimple_seq *pre_p, tree *list_p,
+gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
enum tree_code code)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree c, decl;
+ if (body)
+ {
+ struct gimplify_omp_ctx *octx;
+ for (octx = ctx; octx; octx = octx->outer_context)
+ if ((octx->region_type & (ORT_PARALLEL | ORT_TASK | ORT_TEAMS)) != 0)
+ break;
+ if (octx)
+ {
+ struct walk_stmt_info wi;
+ memset (&wi, 0, sizeof (wi));
+ walk_gimple_seq (body, omp_find_stores_stmt,
+ omp_find_stores_op, &wi);
+ }
+ }
while ((c = *list_p) != NULL)
{
splay_tree_node n;
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
+ && (n->value & GOVD_WRITTEN) == 0
+ && DECL_P (decl)
+ && omp_shared_to_firstprivate_optimizable_decl_p (decl))
+ OMP_CLAUSE_SHARED_READONLY (c) = 1;
+ else if (DECL_P (decl)
+ && ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
+ && (n->value & GOVD_WRITTEN) != 1)
+ || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
+ && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
+ && omp_shared_to_firstprivate_optimizable_decl_p (decl))
+ omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
}
break;
"%<lastprivate%> clauses on %<distribute%> "
"construct");
}
+ if (!remove
+ && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && DECL_P (decl)
+ && omp_shared_to_firstprivate_optimizable_decl_p (decl))
+ omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
break;
case OMP_CLAUSE_ALIGNED:
break;
case OMP_CLAUSE_REDUCTION:
+ decl = OMP_CLAUSE_DECL (c);
+ if (DECL_P (decl)
+ && omp_shared_to_firstprivate_optimizable_decl_p (decl))
+ omp_mark_stores (gimplify_omp_ctxp->outer_context, decl);
+ break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
gimplify_scan_omp_clauses (&OACC_CACHE_CLAUSES (expr), pre_p, ORT_ACC,
OACC_CACHE);
- gimplify_adjust_omp_clauses (pre_p, &OACC_CACHE_CLAUSES (expr), OACC_CACHE);
+ gimplify_adjust_omp_clauses (pre_p, NULL, &OACC_CACHE_CLAUSES (expr),
+ OACC_CACHE);
/* TODO: Do something sensible with this information. */
else
pop_gimplify_context (NULL);
- gimplify_adjust_omp_clauses (pre_p, &OMP_PARALLEL_CLAUSES (expr),
+ gimplify_adjust_omp_clauses (pre_p, body, &OMP_PARALLEL_CLAUSES (expr),
OMP_PARALLEL);
g = gimple_build_omp_parallel (body,
else
pop_gimplify_context (NULL);
- gimplify_adjust_omp_clauses (pre_p, &OMP_TASK_CLAUSES (expr), OMP_TASK);
+ gimplify_adjust_omp_clauses (pre_p, body, &OMP_TASK_CLAUSES (expr),
+ OMP_TASK);
g = gimple_build_omp_task (body,
OMP_TASK_CLAUSES (expr),
TREE_OPERAND (TREE_OPERAND (t, 1), 0) = var;
}
- gimplify_adjust_omp_clauses (pre_p, &OMP_FOR_CLAUSES (orig_for_stmt),
+ gimplify_adjust_omp_clauses (pre_p, for_body,
+ &OMP_FOR_CLAUSES (orig_for_stmt),
TREE_CODE (orig_for_stmt));
int kind;
}
else
gimplify_and_add (OMP_BODY (expr), &body);
- gimplify_adjust_omp_clauses (pre_p, &OMP_CLAUSES (expr), TREE_CODE (expr));
+ gimplify_adjust_omp_clauses (pre_p, body, &OMP_CLAUSES (expr),
+ TREE_CODE (expr));
switch (TREE_CODE (expr))
{
}
gimplify_scan_omp_clauses (&OMP_STANDALONE_CLAUSES (expr), pre_p,
ort, TREE_CODE (expr));
- gimplify_adjust_omp_clauses (pre_p, &OMP_STANDALONE_CLAUSES (expr),
+ gimplify_adjust_omp_clauses (pre_p, NULL, &OMP_STANDALONE_CLAUSES (expr),
TREE_CODE (expr));
stmt = gimple_build_omp_target (NULL, kind, OMP_STANDALONE_CLAUSES (expr));
case OMP_CRITICAL:
gimplify_scan_omp_clauses (&OMP_CRITICAL_CLAUSES (*expr_p),
pre_p, ORT_WORKSHARE, OMP_CRITICAL);
- gimplify_adjust_omp_clauses (pre_p,
+ gimplify_adjust_omp_clauses (pre_p, body,
&OMP_CRITICAL_CLAUSES (*expr_p),
OMP_CRITICAL);
g = gimple_build_omp_critical (body,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
- by_ref = use_pointer_for_field (decl, ctx);
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
- break;
- if (! TREE_READONLY (decl)
+ {
+ use_pointer_for_field (decl, ctx);
+ break;
+ }
+ by_ref = use_pointer_for_field (decl, NULL);
+ if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| is_reference (decl))
{
+ by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
+2015-11-26 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/68128
+ * gcc.dg/gomp/pr68128-1.c: New test.
+ * gcc.dg/gomp/pr68128-2.c: New test.
+
2015-11-26 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
PR rtl-optimization/67226
--- /dev/null
+/* PR tree-optimization/68128 */
+/* { dg-do compile } */
+/* { dg-options "-Ofast -fopenmp -fdump-tree-vect-details" } */
+/* { dg-additional-options "-mavx" { target i?86-*-* x86_64-*-* } } */
+
+/* Make sure the following loop is vectorized even when not using
+ firstprivate variables for scalar vars that are not modified
+ in the parallel region. */
+
+void
+foo (float *u, float v, float w, float x, float y, float z, float t)
+{
+ int i, j, k, l;
+ float a, *b, c, s, e;
+#pragma omp parallel for private (i, j, k, l, a, b, c, s, e)
+ for (j = 0; j < 1024; j++)
+ {
+ k = j * 64;
+ l = j * 64 + 63;
+ a = v + j * w;
+ b = u + j * 64;
+ for (i = k; i <= l; i++, b++, a += w)
+ {
+ c = a * a + y;
+ s = (1.f - c * x) * (1.f - c * x);
+ e = t * (1 / __builtin_sqrtf (c)) * s;
+ *b += (c < z ? e : 0);
+ }
+ }
+}
+
+/* { dg-final { scan-tree-dump "note: vectorized 1 loops in function" "vect" { target i?86-*-* x86_64-*-* } } } */
--- /dev/null
+/* PR tree-optimization/68128 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fopenmp -fdump-tree-omplower" } */
+
+extern int omp_get_thread_num (void);
+extern int omp_get_ancestor_thread_num (int);
+
+void b1 (int, int);
+
+int
+f1 (void)
+{
+ int a1 = 1;
+ unsigned char a2 = 2;
+ unsigned long a3 = 3;
+ long long a4 = 4;
+ short a5 = 5;
+ char a6 = 6;
+ #pragma omp parallel shared (a1, a2, a3)
+ {
+ if (omp_get_thread_num () == 0)
+ {
+ a1 = a2;
+ a4 = a5;
+ }
+ b1 (a2, a6);
+ #pragma omp barrier
+ if (omp_get_thread_num () == 1)
+ {
+ a1 += a3;
+ a4 += a6;
+ }
+ }
+ return a1 + a2 + a3 + a4 + a5 + a6;
+}
+
+/* { dg-final { scan-tree-dump "shared\\(a1\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "firstprivate\\(a2\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "firstprivate\\(a3\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a4\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "firstprivate\\(a5\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "firstprivate\\(a6\\)" "omplower" } } */
+
+struct S { int a, b; };
+
+void b2 (int *, int *, int, int, struct S, struct S);
+
+void
+f2 (void)
+{
+ struct S a7 = { 7, 7 }, a8 = { 8, 8 };
+ int a9 = 9, a10 = 10;
+ short a11[2] = { 11, 11 };
+ char a12[1] = { 12 };
+ #pragma omp parallel shared (a7, a9, a11)
+ {
+ b2 (&a9, &a10, a11[1], a12[0], a7, a8);
+ }
+}
+
+/* { dg-final { scan-tree-dump "shared\\(a7\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a8\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a9\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a10\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a11\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a12\\)" "omplower" } } */
+
+void b3 (_Complex float, _Complex float);
+
+_Complex float
+f3 (void)
+{
+ _Complex float a13 = 13.0f, a14 = 14.0f, a15 = 15.0f, a16 = 16.0f;
+ #pragma omp parallel shared (a13, a15)
+ {
+ #pragma omp parallel shared (a14)
+ {
+ if (omp_get_thread_num () == 0 && omp_get_ancestor_thread_num (1) == 1)
+ __imag__ a13 = __real__ a15;
+ else if (omp_get_thread_num () == 1 && omp_get_ancestor_thread_num (1) == 0)
+ __real__ a14 = __imag__ a16;
+ b3 (a15, a16);
+ }
+ }
+ return a13 + a14 + a15 + a16;
+}
+
+/* { dg-final { scan-tree-dump-times "shared\\(a13\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "shared\\(a14\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "firstprivate\\(a15\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "firstprivate\\(a16\\)" 2 "omplower" } } */
+
+int
+f4 (void)
+{
+ int a17 = 17, a18 = 18, a19 = 19, a20 = 20, a21 = 21, a22 = 22, a23 = 23, a24 = 0, a25 = 0, a26 = 0;
+ int i;
+ #pragma omp task shared (a17)
+ b1 (a17, a18);
+ b1 (a17, a18);
+ #pragma omp taskwait
+ #pragma omp parallel shared (a19)
+ {
+ #pragma omp task shared (a19)
+ {
+ a19 = 1;
+ }
+ #pragma omp task shared (a20)
+ a20 = a21;
+ #pragma omp for firstprivate (a25) lastprivate (a22) linear (a23:2) reduction (+:a24) private (a26)
+ for (i = 0; i < 10; i++)
+ {
+ a26 = i;
+ a22 = a26 + 7;
+ a23 += 2;
+ a24 += i;
+ a25++;
+ }
+ }
+ return a22 + a23 + a24 + a25 + a26;
+}
+
+/* { dg-final { scan-tree-dump "firstprivate\\(a17\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "firstprivate\\(a18\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump-times "shared\\(a19\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "shared\\(a20\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "firstprivate\\(a21\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "lastprivate\\(a22\\)" 1 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "shared\\(a22\\)" 1 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "linear\\(a23:2\\)" 1 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "shared\\(a23\\)" 1 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "reduction\\(.:a24\\)" 1 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "shared\\(a24\\)" 1 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "firstprivate\\(a25\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "private\\(a26\\)" 1 "omplower" } } */
+
+void
+f5 (void)
+{
+ int a27 = 27, a28 = 28, a29 = 29, a30 = 30;
+ #pragma omp target data map (tofrom: a27, a28)
+ {
+ #pragma omp target map (tofrom: a27)
+ a27++;
+ #pragma omp parallel shared (a27, a28)
+ {
+ #pragma omp critical
+ {
+ /* This might modify a27 for non-shared memory offloading. */
+ #pragma omp target update to (a27)
+ #pragma omp target map (always, from: a28) private (a30)
+ {
+ a28++;
+ a29++;
+ a30 = a29;
+ }
+ }
+ #pragma omp barrier
+ b1 (a27, a28);
+ }
+ }
+}
+
+/* { dg-final { scan-tree-dump "shared\\(a27\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a28\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump-times "firstprivate\\(a29\\)" 2 "omplower" } } */
+/* { dg-final { scan-tree-dump-times "private\\(a30\\)" 1 "omplower" } } */
+
+int
+f6 (void)
+{
+ int a31 = 31, a32 = 32, a33 = 33, a34 = 34;
+ #pragma omp parallel
+ {
+ #pragma omp sections
+ {
+ #pragma omp section
+ {
+ a31 = 9;
+ }
+ #pragma omp section
+ {
+ int i = 10;
+ __builtin_memcpy (&a32, &i, sizeof (int));
+ }
+ }
+ #pragma omp single
+ a33 = 11;
+ #pragma omp atomic
+ a34++;
+ }
+ return a31 + a32 + a33 + a34;
+}
+
+/* { dg-final { scan-tree-dump "shared\\(a31\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a32\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a33\\)" "omplower" } } */
+/* { dg-final { scan-tree-dump "shared\\(a34\\)" "omplower" } } */
break;
if (decl_function_context (decl) != info->context)
{
+ if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_SHARED)
+ OMP_CLAUSE_SHARED_READONLY (clause) = 0;
bitmap_set_bit (new_suppress, DECL_UID (decl));
OMP_CLAUSE_DECL (clause) = get_nonlocal_debug_decl (info, decl);
if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE_PRIVATE)
tree field = lookup_field_for_decl (info, decl, NO_INSERT);
if (field)
{
+ if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_SHARED)
+ OMP_CLAUSE_SHARED_READONLY (clause) = 0;
bitmap_set_bit (new_suppress, DECL_UID (decl));
OMP_CLAUSE_DECL (clause)
= get_local_debug_decl (info, decl, field);
#define OMP_CLAUSE_SHARED_FIRSTPRIVATE(NODE) \
(OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SHARED)->base.public_flag)
+/* True on a SHARED clause if a scalar is not modified in the body and
+ thus could be optimized as firstprivate. */
+#define OMP_CLAUSE_SHARED_READONLY(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SHARED))
+
#define OMP_CLAUSE_IF_MODIFIER(NODE) \
(OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_IF)->omp_clause.subcode.if_modifier)