#include "attribs.h"
#include "gimplify.h"
#include "langhooks.h"
+#include "bitmap.h"
/* Complete a #pragma oacc wait construct. LOC is the location of
tree next, c;
enum c_omp_clause_split s;
int i;
+ bool has_dup_allocate = false;
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
cclauses[i] = NULL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
+ /* Allocate clause is allowed on target, teams, distribute, parallel,
+ for, sections and taskloop. Distribute it to all. */
+ case OMP_CLAUSE_ALLOCATE:
+ s = C_OMP_CLAUSE_SPLIT_COUNT;
+ for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
+ {
+ switch (i)
+ {
+ case C_OMP_CLAUSE_SPLIT_TARGET:
+ if ((mask & (OMP_CLAUSE_MASK_1
+ << PRAGMA_OMP_CLAUSE_MAP)) == 0)
+ continue;
+ break;
+ case C_OMP_CLAUSE_SPLIT_TEAMS:
+ if ((mask & (OMP_CLAUSE_MASK_1
+ << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
+ continue;
+ break;
+ case C_OMP_CLAUSE_SPLIT_DISTRIBUTE:
+ if ((mask & (OMP_CLAUSE_MASK_1
+ << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
+ continue;
+ break;
+ case C_OMP_CLAUSE_SPLIT_PARALLEL:
+ if ((mask & (OMP_CLAUSE_MASK_1
+ << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
+ continue;
+ break;
+ case C_OMP_CLAUSE_SPLIT_FOR:
+ STATIC_ASSERT (C_OMP_CLAUSE_SPLIT_SECTIONS
+ == C_OMP_CLAUSE_SPLIT_FOR
+ && (C_OMP_CLAUSE_SPLIT_TASKLOOP
+ == C_OMP_CLAUSE_SPLIT_FOR)
+ && (C_OMP_CLAUSE_SPLIT_LOOP
+ == C_OMP_CLAUSE_SPLIT_FOR));
+ if (code == OMP_SECTIONS)
+ break;
+ if ((mask & (OMP_CLAUSE_MASK_1
+ << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
+ break;
+ if ((mask & (OMP_CLAUSE_MASK_1
+ << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
+ break;
+ continue;
+ case C_OMP_CLAUSE_SPLIT_SIMD:
+ continue;
+ default:
+ gcc_unreachable ();
+ }
+ if (s != C_OMP_CLAUSE_SPLIT_COUNT)
+ {
+ c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
+ OMP_CLAUSE_ALLOCATE);
+ OMP_CLAUSE_DECL (c)
+ = OMP_CLAUSE_DECL (clauses);
+ OMP_CLAUSE_ALLOCATE_ALLOCATOR (c)
+ = OMP_CLAUSE_ALLOCATE_ALLOCATOR (clauses);
+ OMP_CLAUSE_CHAIN (c) = cclauses[s];
+ cclauses[s] = c;
+ has_dup_allocate = true;
+ }
+ s = (enum c_omp_clause_split) i;
+ }
+ gcc_assert (s != C_OMP_CLAUSE_SPLIT_COUNT);
+ break;
default:
gcc_unreachable ();
}
cclauses[s] = clauses;
}
+ if (has_dup_allocate)
+ {
+ bool need_prune = false;
+ bitmap_obstack_initialize (NULL);
+ for (i = 0; i < C_OMP_CLAUSE_SPLIT_SIMD - (code == OMP_LOOP); i++)
+ if (cclauses[i])
+ {
+ bitmap_head allocate_head;
+ bitmap_initialize (&allocate_head, &bitmap_default_obstack);
+ for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
+ && DECL_P (OMP_CLAUSE_DECL (c)))
+ bitmap_set_bit (&allocate_head,
+ DECL_UID (OMP_CLAUSE_DECL (c)));
+ for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ case OMP_CLAUSE_PRIVATE:
+ case OMP_CLAUSE_FIRSTPRIVATE:
+ case OMP_CLAUSE_LASTPRIVATE:
+ case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
+ case OMP_CLAUSE_TASK_REDUCTION:
+ if (DECL_P (OMP_CLAUSE_DECL (c)))
+ bitmap_clear_bit (&allocate_head,
+ DECL_UID (OMP_CLAUSE_DECL (c)));
+ break;
+ default:
+ break;
+ }
+ for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
+ && DECL_P (OMP_CLAUSE_DECL (c))
+ && bitmap_bit_p (&allocate_head,
+ DECL_UID (OMP_CLAUSE_DECL (c))))
+ {
+ /* Mark allocate clauses which don't have corresponding
+ explicit data sharing clause. */
+ OMP_CLAUSE_ALLOCATE_COMBINED (c) = 1;
+ need_prune = true;
+ }
+ }
+ bitmap_obstack_release (NULL);
+ if (need_prune)
+ {
+ /* At least one allocate clause has been marked. Walk all the
+ duplicated allocate clauses in sync. If it is marked in all
+ constituent constructs, diagnose it as invalid and remove
+ them. Otherwise, remove all marked inner clauses inside
+ a construct that doesn't have them marked. Keep the outer
+ marked ones, because some clause duplication is done only
+ during gimplification. */
+ tree *p[C_OMP_CLAUSE_SPLIT_COUNT];
+ for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
+ if (cclauses[i] == NULL_TREE
+ || i == C_OMP_CLAUSE_SPLIT_SIMD
+ || (i == C_OMP_CLAUSE_SPLIT_LOOP && code == OMP_LOOP))
+ p[i] = NULL;
+ else
+ p[i] = &cclauses[i];
+ do
+ {
+ int j = -1;
+ tree seen = NULL_TREE;
+ for (i = C_OMP_CLAUSE_SPLIT_COUNT - 1; i >= 0; i--)
+ if (p[i])
+ {
+ while (*p[i]
+ && OMP_CLAUSE_CODE (*p[i]) != OMP_CLAUSE_ALLOCATE)
+ p[i] = &OMP_CLAUSE_CHAIN (*p[i]);
+ if (*p[i] == NULL_TREE)
+ {
+ i = C_OMP_CLAUSE_SPLIT_COUNT;
+ break;
+ }
+ if (!OMP_CLAUSE_ALLOCATE_COMBINED (*p[i]) && j == -1)
+ j = i;
+ seen = *p[i];
+ }
+ if (i == C_OMP_CLAUSE_SPLIT_COUNT)
+ break;
+ if (j == -1)
+ error_at (OMP_CLAUSE_LOCATION (seen),
+ "%qD specified in %<allocate%> clause but not in "
+ "an explicit privatization clause",
+ OMP_CLAUSE_DECL (seen));
+ for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
+ if (p[i])
+ {
+ if (i > j)
+ /* Remove. */
+ *p[i] = OMP_CLAUSE_CHAIN (*p[i]);
+ else
+ /* Keep. */
+ p[i] = &OMP_CLAUSE_CHAIN (*p[i]);
+ }
+ }
+ while (1);
+ }
+ }
+
if (!flag_checking)
return;
PRAGMA_OMP_CLAUSE_NONE = 0,
PRAGMA_OMP_CLAUSE_ALIGNED,
+ PRAGMA_OMP_CLAUSE_ALLOCATE,
PRAGMA_OMP_CLAUSE_BIND,
PRAGMA_OMP_CLAUSE_COLLAPSE,
PRAGMA_OMP_CLAUSE_COPYIN,
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
+ else if (!strcmp ("allocate", p))
+ result = PRAGMA_OMP_CLAUSE_ALLOCATE;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
else if (!strcmp ("attach", p))
return nl;
}
+/* OpenMP 5.0:
+ allocate ( variable-list )
+ allocate ( expression : variable-list ) */
+
+static tree
+c_parser_omp_clause_allocate (c_parser *parser, tree list)
+{
+ location_t clause_loc = c_parser_peek_token (parser)->location;
+ tree nl, c;
+ tree allocator = NULL_TREE;
+
+ matching_parens parens;
+ if (!parens.require_open (parser))
+ return list;
+
+ if ((c_parser_next_token_is_not (parser, CPP_NAME)
+ && c_parser_next_token_is_not (parser, CPP_KEYWORD))
+ || (c_parser_peek_2nd_token (parser)->type != CPP_COMMA
+ && c_parser_peek_2nd_token (parser)->type != CPP_CLOSE_PAREN))
+ {
+ location_t expr_loc = c_parser_peek_token (parser)->location;
+ c_expr expr = c_parser_expr_no_commas (parser, NULL);
+ expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
+ allocator = expr.value;
+ allocator = c_fully_fold (allocator, false, NULL);
+ tree orig_type
+ = expr.original_type ? expr.original_type : TREE_TYPE (allocator);
+ orig_type = TYPE_MAIN_VARIANT (orig_type);
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (allocator))
+ || TREE_CODE (orig_type) != ENUMERAL_TYPE
+ || TYPE_NAME (orig_type) != get_identifier ("omp_allocator_handle_t"))
+ {
+ error_at (clause_loc, "%<allocate%> clause allocator expression "
+ "has type %qT rather than "
+ "%<omp_allocator_handle_t%>",
+ TREE_TYPE (allocator));
+ allocator = NULL_TREE;
+ }
+ if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
+ {
+ parens.skip_until_found_close (parser);
+ return list;
+ }
+ }
+
+ nl = c_parser_omp_variable_list (parser, clause_loc,
+ OMP_CLAUSE_ALLOCATE, list);
+
+ if (allocator)
+ for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
+ OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) = allocator;
+
+ parens.skip_until_found_close (parser);
+ return nl;
+}
+
/* OpenMP 4.0:
linear ( variable-list )
linear ( variable-list : expression )
clauses = c_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
+ case PRAGMA_OMP_CLAUSE_ALLOCATE:
+ clauses = c_parser_omp_clause_allocate (parser, clauses);
+ c_name = "allocate";
+ break;
case PRAGMA_OMP_CLAUSE_LINEAR:
clauses = c_parser_omp_clause_linear (parser, clauses);
c_name = "linear";
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
static tree
*/
#define OMP_TASKGROUP_CLAUSE_MASK \
- ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
+ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
static tree
c_parser_omp_taskgroup (location_t loc, c_parser *parser, bool *if_p)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR))
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
/* 1 if normal/task reduction has been seen, -1 if inscan reduction
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
+ bool allocate_seen = false;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
break;
+ case OMP_CLAUSE_ALLOCATE:
+ t = OMP_CLAUSE_DECL (c);
+ if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%qE is not a variable in %<allocate%> clause", t);
+ remove = true;
+ }
+ else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
+ {
+ warning_at (OMP_CLAUSE_LOCATION (c), 0,
+ "%qE appears more than once in %<allocate%> clauses",
+ t);
+ remove = true;
+ }
+ else
+ {
+ bitmap_set_bit (&aligned_head, DECL_UID (t));
+ if (!OMP_CLAUSE_ALLOCATE_COMBINED (c))
+ allocate_seen = true;
+ }
+ break;
+
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
reduction_seen = -2;
}
- if (linear_variable_step_check || reduction_seen == -2)
+ if (linear_variable_step_check || reduction_seen == -2 || allocate_seen)
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
+ if (allocate_seen)
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ case OMP_CLAUSE_PRIVATE:
+ case OMP_CLAUSE_FIRSTPRIVATE:
+ case OMP_CLAUSE_LASTPRIVATE:
+ case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
+ case OMP_CLAUSE_TASK_REDUCTION:
+ if (DECL_P (OMP_CLAUSE_DECL (c)))
+ bitmap_clear_bit (&aligned_head,
+ DECL_UID (OMP_CLAUSE_DECL (c)));
+ break;
+ default:
+ break;
+ }
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)
&& !bitmap_bit_p (&map_head,
pc = &OMP_CLAUSE_CHAIN (c);
}
+ if (allocate_seen)
+ for (pc = &clauses, c = clauses; c ; c = *pc)
+ {
+ bool remove = false;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
+ && !OMP_CLAUSE_ALLOCATE_COMBINED (c)
+ && bitmap_bit_p (&aligned_head, DECL_UID (OMP_CLAUSE_DECL (c))))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%qD specified in %<allocate%> clause but not in "
+ "an explicit privatization clause", OMP_CLAUSE_DECL (c));
+ remove = true;
+ }
+ if (remove)
+ *pc = OMP_CLAUSE_CHAIN (c);
+ else
+ pc = &OMP_CLAUSE_CHAIN (c);
+ }
+
if (nogroup_seen && reduction_seen)
{
error_at (OMP_CLAUSE_LOCATION (*nogroup_seen),
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
+ else if (!strcmp ("allocate", p))
+ result = PRAGMA_OMP_CLAUSE_ALLOCATE;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
else if (!strcmp ("attach", p))
return nlist;
}
+/* OpenMP 5.0:
+ allocate ( variable-list )
+ allocate ( expression : variable-list ) */
+
+static tree
+cp_parser_omp_clause_allocate (cp_parser *parser, tree list)
+{
+ tree nlist, c, allocator = NULL_TREE;
+ bool colon;
+
+ matching_parens parens;
+ if (!parens.require_open (parser))
+ return list;
+
+ cp_parser_parse_tentatively (parser);
+ bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
+ parser->colon_corrects_to_scope_p = false;
+ allocator = cp_parser_assignment_expression (parser);
+ parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
+ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
+ {
+ cp_parser_parse_definitely (parser);
+ cp_lexer_consume_token (parser->lexer);
+ if (allocator == error_mark_node)
+ allocator = NULL_TREE;
+ }
+ else
+ {
+ cp_parser_abort_tentative_parse (parser);
+ allocator = NULL_TREE;
+ }
+
+ nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_ALLOCATE, list,
+ &colon);
+
+ for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
+ OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) = allocator;
+
+ return nlist;
+}
+
/* OpenMP 2.5:
lastprivate ( variable-list )
clauses = cp_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
+ case PRAGMA_OMP_CLAUSE_ALLOCATE:
+ clauses = cp_parser_omp_clause_allocate (parser, clauses);
+ c_name = "allocate";
+ break;
case PRAGMA_OMP_CLAUSE_LINEAR:
{
bool declare_simd = false;
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
static tree
# pragma omp taskgroup taskgroup-clause[optseq] new-line */
#define OMP_TASKGROUP_CLAUSE_MASK \
- ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
+ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
static tree
cp_parser_omp_taskgroup (cp_parser *parser, cp_token *pragma_tok, bool *if_p)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR))
static bool
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
break;
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_ALIGNED:
+ case OMP_CLAUSE_ALLOCATE:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl, NULL);
/* 1 if normal/task reduction has been seen, -1 if inscan reduction
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
+ bool allocate_seen = false;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
bitmap_set_bit (&oacc_reduction_head, DECL_UID (t));
break;
+ case OMP_CLAUSE_ALLOCATE:
+ t = OMP_CLAUSE_DECL (c);
+ if (t == current_class_ptr)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<this%> not allowed in %<allocate%> clause");
+ remove = true;
+ break;
+ }
+ if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
+ {
+ if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
+ break;
+ if (DECL_P (t))
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%qD is not a variable in %<allocate%> clause", t);
+ else
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%qE is not a variable in %<allocate%> clause", t);
+ remove = true;
+ }
+ else if (bitmap_bit_p (&aligned_head, DECL_UID (t)))
+ {
+ warning_at (OMP_CLAUSE_LOCATION (c), 0,
+ "%qD appears more than once in %<allocate%> clauses",
+ t);
+ remove = true;
+ }
+ else
+ {
+ bitmap_set_bit (&aligned_head, DECL_UID (t));
+ allocate_seen = true;
+ }
+ t = OMP_CLAUSE_ALLOCATE_ALLOCATOR (c);
+ if (error_operand_p (t))
+ {
+ remove = true;
+ break;
+ }
+ if (t == NULL_TREE)
+ break;
+ tree allocatort;
+ allocatort = TYPE_MAIN_VARIANT (TREE_TYPE (t));
+ if (!type_dependent_expression_p (t)
+ && (TREE_CODE (allocatort) != ENUMERAL_TYPE
+ || TYPE_NAME (allocatort) == NULL_TREE
+ || TREE_CODE (TYPE_NAME (allocatort)) != TYPE_DECL
+ || (DECL_NAME (TYPE_NAME (allocatort))
+ != get_identifier ("omp_allocator_handle_t"))
+ || (TYPE_CONTEXT (allocatort)
+ != DECL_CONTEXT (global_namespace))))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<allocate%> clause allocator expression has "
+ "type %qT rather than %<omp_allocator_handle_t%>",
+ TREE_TYPE (t));
+ remove = true;
+ }
+ else
+ {
+ t = mark_rvalue_use (t);
+ if (!processing_template_decl)
+ t = maybe_constant_value (t);
+ OMP_CLAUSE_ALLOCATE_ALLOCATOR (c) = t;
+ }
+ break;
+
case OMP_CLAUSE_DEPEND:
t = OMP_CLAUSE_DECL (c);
if (t == NULL_TREE)
{
const char *share_name = NULL;
+ if (allocate_seen
+ && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED
+ && DECL_P (t))
+ bitmap_clear_bit (&aligned_head,
+ DECL_UID (OMP_CLAUSE_DECL (c)));
+
if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t))
share_name = "threadprivate";
else switch (cxx_omp_predetermined_sharing_1 (t))
pc = &OMP_CLAUSE_CHAIN (c);
}
+ if (allocate_seen)
+ for (pc = &clauses, c = clauses; c ; c = *pc)
+ {
+ bool remove = false;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
+ && !OMP_CLAUSE_ALLOCATE_COMBINED (c)
+ && bitmap_bit_p (&aligned_head, DECL_UID (OMP_CLAUSE_DECL (c))))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%qD specified in %<allocate%> clause but not in "
+ "an explicit privatization clause", OMP_CLAUSE_DECL (c));
+ remove = true;
+ }
+ if (remove)
+ *pc = OMP_CLAUSE_CHAIN (c);
+ else
+ pc = &OMP_CLAUSE_CHAIN (c);
+ }
+
bitmap_obstack_release (NULL);
return clauses;
}
omp_add_variable (ctx, decl, GOVD_NONTEMPORAL);
break;
+ case OMP_CLAUSE_ALLOCATE:
+ decl = OMP_CLAUSE_DECL (c);
+ if (error_operand_p (decl))
+ {
+ remove = true;
+ break;
+ }
+ if (gimplify_expr (&OMP_CLAUSE_ALLOCATE_ALLOCATOR (c), pre_p, NULL,
+ is_gimple_val, fb_rvalue) == GS_ERROR)
+ {
+ remove = true;
+ break;
+ }
+ break;
+
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
+ case OMP_CLAUSE_ALLOCATE:
break;
default:
*gforo_clauses_ptr = c;
gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
break;
- /* Taskloop clause we duplicate on both taskloops. */
+ /* Collapse clause we duplicate on both taskloops. */
case OMP_CLAUSE_COLLAPSE:
*gfor_clauses_ptr = c;
gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
gtask_clauses_ptr
= &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
break;
+ /* Allocate clause we duplicate on task and inner taskloop. */
+ case OMP_CLAUSE_ALLOCATE:
+ *gfor_clauses_ptr = c;
+ gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c);
+ *gtask_clauses_ptr = copy_node (c);
+ gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr);
+ break;
default:
gcc_unreachable ();
}
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_TASK_REDUCTION:
+ case OMP_CLAUSE_ALLOCATE:
break;
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE_ALLOCATE:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_TO:
--- /dev/null
+typedef enum omp_allocator_handle_t
+#if __cplusplus >= 201103L
+: __UINTPTR_TYPE__
+#endif
+{
+ omp_null_allocator = 0,
+ omp_default_mem_alloc = 1,
+ omp_large_cap_mem_alloc = 2,
+ omp_const_mem_alloc = 3,
+ omp_high_bw_mem_alloc = 4,
+ omp_low_lat_mem_alloc = 5,
+ omp_cgroup_mem_alloc = 6,
+ omp_pteam_mem_alloc = 7,
+ omp_thread_mem_alloc = 8,
+ __omp_allocator_handle_t_max__ = __UINTPTR_MAX__
+} omp_allocator_handle_t;
+
+int bar (int, int *, int);
+omp_allocator_handle_t baz (void);
+
+void
+foo (int x, int z)
+{
+ int y[16] = { 0 }, r = 0, i;
+ omp_allocator_handle_t h = baz ();
+ #pragma omp parallel allocate (x) allocate (omp_default_mem_alloc : y) \
+ allocate ((omp_allocator_handle_t) omp_default_mem_alloc:z) firstprivate (x, y, z)
+ bar (x, y, z);
+ #pragma omp task private (x) firstprivate (z) allocate (omp_low_lat_mem_alloc:x,z)
+ bar (0, &x, z);
+ #pragma omp taskwait
+ #pragma omp target teams distribute parallel for private (x) firstprivate (y) \
+ allocate ((omp_allocator_handle_t)(omp_default_mem_alloc + 0):z) \
+ allocate (omp_default_mem_alloc: x, y) allocate (omp_low_lat_mem_alloc: r) \
+ lastprivate (z) reduction(+:r)
+ for (i = 0; i < 64; i++)
+ {
+ z = bar (0, &x, 0);
+ r += bar (1, y, 0);
+ }
+ #pragma omp single private (x) allocate (h:x)
+ ;
+ #pragma omp single allocate (*&h : x) private (x)
+ ;
+ #pragma omp parallel shared (r, x, z)
+ #pragma omp single firstprivate (r) allocate (x, r, z) private (x, z)
+ ;
+ #pragma omp for allocate (x) private (x)
+ for (i = 0; i < 64; i++)
+ x = 1;
+ #pragma omp sections private (x) allocate (omp_low_lat_mem_alloc: x)
+ {
+ x = 1;
+ #pragma omp section
+ x = 2;
+ #pragma omp section
+ x = 3;
+ }
+ #pragma omp taskgroup task_reduction(+:r) allocate (omp_default_mem_alloc : r)
+ #pragma omp task in_reduction(+:r) allocate (omp_default_mem_alloc : r)
+ r += bar (r, &r, 0);
+ #pragma omp teams private (x) firstprivate (y) allocate (h : x, y)
+ bar (x, y, 0);
+ #pragma omp taskloop lastprivate (x) reduction (+:r) allocate (h : x, r)
+ for (i = 0; i < 16; i++)
+ {
+ r += bar (0, &r, 0);
+ x = i;
+ }
+ #pragma omp taskgroup task_reduction(+:r) allocate (omp_default_mem_alloc : r)
+ #pragma omp taskloop firstprivate (x) in_reduction (+:r) \
+ allocate (omp_default_mem_alloc : x, r)
+ for (i = 0; i < 16; i++)
+ r += bar (x, &r, 0);
+ #pragma omp taskwait
+}
--- /dev/null
+typedef enum omp_allocator_handle_t
+#if __cplusplus >= 201103L
+: __UINTPTR_TYPE__
+#endif
+{
+ omp_null_allocator = 0,
+ omp_default_mem_alloc = 1,
+ omp_large_cap_mem_alloc = 2,
+ omp_const_mem_alloc = 3,
+ omp_high_bw_mem_alloc = 4,
+ omp_low_lat_mem_alloc = 5,
+ omp_cgroup_mem_alloc = 6,
+ omp_pteam_mem_alloc = 7,
+ omp_thread_mem_alloc = 8,
+ __omp_allocator_handle_t_max__ = __UINTPTR_MAX__
+} omp_allocator_handle_t;
+
+int bar (int, int *, int);
+omp_allocator_handle_t baz (void);
+
+void
+foo (int x, int z)
+{
+ int i;
+ #pragma omp task allocate (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
+ bar (x, &x, 0);
+ #pragma omp taskwait
+ #pragma omp parallel allocate (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
+ bar (x, &x, 0);
+ #pragma omp parallel for simd private (x) allocate (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
+ for (i = 0; i < 16; i++)
+ x = i;
+ #pragma omp parallel allocate (foo) /* { dg-error "'\[^\n\r]*foo\[^\n\r]*' is not a variable in 'allocate' clause" } */
+ ;
+ #pragma omp parallel allocate (x) shared (x) /* { dg-error "'x' specified in 'allocate' clause but not in an explicit privatization clause" } */
+ bar (x, &x, 0);
+ #pragma omp parallel private (x) allocate (x) allocate (x) /* { dg-warning "'x' appears more than once in 'allocate' clauses" } */
+ bar (x, &x, 0);
+ #pragma omp parallel private (x) allocate (x, x) /* { dg-warning "'x' appears more than once in 'allocate' clauses" } */
+ bar (x, &x, 0);
+ #pragma omp parallel private (x) allocate (0.0 : x) /* { dg-error "'allocate' clause allocator expression has type 'double' rather than 'omp_allocator_handle_t'" } */
+ bar (x, &x, 0);
+ #pragma omp parallel private (x) allocate (0 : x) /* { dg-error "'allocate' clause allocator expression has type 'int' rather than 'omp_allocator_handle_t'" } */
+ bar (x, &x, 0);
+}
/* { dg-do compile } */
/* { dg-additional-options "-std=c99" { target c } } */
+typedef enum omp_allocator_handle_t
+#if __cplusplus >= 201103L
+: __UINTPTR_TYPE__
+#endif
+{
+ omp_null_allocator = 0,
+ omp_default_mem_alloc = 1,
+ omp_large_cap_mem_alloc = 2,
+ omp_const_mem_alloc = 3,
+ omp_high_bw_mem_alloc = 4,
+ omp_low_lat_mem_alloc = 5,
+ omp_cgroup_mem_alloc = 6,
+ omp_pteam_mem_alloc = 7,
+ omp_thread_mem_alloc = 8,
+ __omp_allocator_handle_t_max__ = __UINTPTR_MAX__
+} omp_allocator_handle_t;
+
int t;
#pragma omp threadprivate (t)
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) order(concurrent)
+ lastprivate (l) schedule(static, 4) order(concurrent) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nontemporal(ntm) \
- safelen(8) simdlen(4) aligned(q: 32) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) order(concurrent) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
}
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) copyin(t) order(concurrent)
+ lastprivate (l) schedule(static, 4) copyin(t) order(concurrent) allocate (p)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nontemporal(ntm) \
- safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp loop bind(parallel) order(concurrent) \
{
#pragma omp for simd \
private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait \
- safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1)
+ lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) order(concurrent)
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for simd \
private (p) firstprivate (f) if (i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
- safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel sections \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l)
+ lastprivate (l) allocate (f)
{
#pragma omp section
{}
#pragma omp target parallel \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- nowait depend(inout: dd[0])
+ nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f)
;
#pragma omp target parallel for \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0])
+ lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target parallel for \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) order(concurrent)
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) order(concurrent) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target parallel for simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
- safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) order(concurrent) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
- shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
+ allocate (omp_default_mem_alloc:f)
;
#pragma omp target teams distribute \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0])
+ collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
;
#pragma omp target teams distribute parallel for \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0]) order(concurrent)
+ lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0]) order(concurrent) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute parallel for simd \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent) \
- safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute simd \
device(d) map (tofrom: m) if (i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
- safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r) \
- nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3) order(concurrent)
+ nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3) order(concurrent) \
+ allocate (omp_default_mem_alloc:f)
for (int i = 0; i < 64; i++)
ll++;
- #pragma omp taskgroup task_reduction(+:r2)
+ #pragma omp taskgroup task_reduction(+:r2) allocate (r2)
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
- #pragma omp taskgroup task_reduction(+:r)
+ #pragma omp taskgroup task_reduction(+:r) allocate (r)
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(i1) final(fi) mergeable nogroup priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) in_reduction(+:r) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskwait
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) if(taskloop: i1) final(fi) priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) if (simd: i3) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target nowait depend(inout: dd[0])
#pragma omp teams distribute \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) dist_schedule(static, 16)
+ collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
;
#pragma omp target
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) order(concurrent)
+ lastprivate (l) schedule(static, 4) order(concurrent) allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent) \
- safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) \
+ allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
- safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm) \
+ allocate (omp_default_mem_alloc: f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute parallel for \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) copyin(t)
+ lastprivate (l) schedule(static, 4) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute parallel for simd \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) order(concurrent) \
- safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t)
+ safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t) \
+ allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) order(concurrent) \
- safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm) allocate(f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r) \
- num_threads (nth) proc_bind(spread) copyin(t)
+ num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
;
- #pragma omp taskgroup task_reduction (+:r2)
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
- reduction(default, +:r) in_reduction(+:r2)
+ reduction(default, +:r) in_reduction(+:r2) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
- #pragma omp taskgroup task_reduction (+:r2)
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
- reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t)
+ reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
- #pragma omp taskgroup task_reduction (+:r2)
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) in_reduction(+:r2)
for (int i = 0; i < 64; i++)
ll++;
- #pragma omp taskgroup task_reduction (+:r2)
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
- reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t)
+ reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t) \
- order(concurrent)
+ order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp loop bind(thread) order(concurrent) \
ll++;
#pragma omp parallel loop \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) collapse(1) bind(parallel) order(concurrent)
+ lastprivate (l) collapse(1) bind(parallel) order(concurrent) allocate (f)
for (l = 0; l < 64; l++)
ll++;
#pragma omp parallel loop \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) collapse(1)
+ lastprivate (l) collapse(1) allocate (f)
for (l = 0; l < 64; l++)
ll++;
#pragma omp teams loop \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) lastprivate (l) bind(teams)
+ collapse(1) lastprivate (l) bind(teams) allocate (f)
for (l = 0; l < 64; ++l)
;
#pragma omp teams loop \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) lastprivate (l) order(concurrent)
+ collapse(1) lastprivate (l) order(concurrent) allocate (f)
for (l = 0; l < 64; ++l)
;
#pragma omp target parallel loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- nowait depend(inout: dd[0]) lastprivate (l) bind(parallel) order(concurrent) collapse(1)
+ nowait depend(inout: dd[0]) lastprivate (l) bind(parallel) order(concurrent) collapse(1) \
+ allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
#pragma omp target parallel loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- nowait depend(inout: dd[0]) lastprivate (l) order(concurrent) collapse(1)
+ nowait depend(inout: dd[0]) lastprivate (l) order(concurrent) collapse(1) \
+ allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
#pragma omp target teams loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
- lastprivate (l) bind(teams) collapse(1)
+ lastprivate (l) bind(teams) collapse(1) \
+ allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
#pragma omp target teams loop \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
- lastprivate (l) order(concurrent) collapse(1)
+ lastprivate (l) order(concurrent) collapse(1) \
+ allocate (omp_default_mem_alloc: f)
for (l = 0; l < 64; ++l)
;
}
/* OpenMP clause: aligned (variable-list[:alignment]). */
OMP_CLAUSE_ALIGNED,
+ /* OpenMP clause: allocate ([allocator:]variable-list). */
+ OMP_CLAUSE_ALLOCATE,
+
/* OpenMP clause: depend ({in,out,inout}:variable-list). */
OMP_CLAUSE_DEPEND,
}
/* FALLTHRU */
case OMP_CLAUSE_NONTEMPORAL:
+ do_decl_clause_no_supp:
/* Like do_decl_clause, but don't add any suppression. */
decl = OMP_CLAUSE_DECL (clause);
if (VAR_P (decl)
}
break;
+ case OMP_CLAUSE_ALLOCATE:
+ if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause))
+ {
+ wi->val_only = true;
+ wi->is_lhs = false;
+ convert_nonlocal_reference_op
+ (&OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause), &dummy, wi);
+ }
+ goto do_decl_clause_no_supp;
+
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
}
/* FALLTHRU */
case OMP_CLAUSE_NONTEMPORAL:
+ do_decl_clause_no_supp:
/* Like do_decl_clause, but don't add any suppression. */
decl = OMP_CLAUSE_DECL (clause);
if (VAR_P (decl)
}
break;
+ case OMP_CLAUSE_ALLOCATE:
+ if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause))
+ {
+ wi->val_only = true;
+ wi->is_lhs = false;
+ convert_local_reference_op
+ (&OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause), &dummy, wi);
+ }
+ goto do_decl_clause_no_supp;
+
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
pp_right_paren (pp);
break;
+ case OMP_CLAUSE_ALLOCATE:
+ pp_string (pp, "allocate(");
+ if (OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause))
+ {
+ dump_generic_node (pp, OMP_CLAUSE_ALLOCATE_ALLOCATOR (clause),
+ spc, flags, false);
+ pp_colon (pp);
+ }
+ dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
+ spc, flags, false);
+ pp_right_paren (pp);
+ break;
+
case OMP_CLAUSE_DEPEND:
pp_string (pp, "depend(");
switch (OMP_CLAUSE_DEPEND_KIND (clause))
1, /* OMP_CLAUSE_COPYPRIVATE */
3, /* OMP_CLAUSE_LINEAR */
2, /* OMP_CLAUSE_ALIGNED */
+ 2, /* OMP_CLAUSE_ALLOCATE */
1, /* OMP_CLAUSE_DEPEND */
1, /* OMP_CLAUSE_NONTEMPORAL */
1, /* OMP_CLAUSE_UNIFORM */
"copyprivate",
"linear",
"aligned",
+ "allocate",
"depend",
"nontemporal",
"uniform",
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
case OMP_CLAUSE_ALIGNED:
+ case OMP_CLAUSE_ALLOCATE:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_MAP:
#define OMP_CLAUSE_ALIGNED_ALIGNMENT(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALIGNED), 1)
+#define OMP_CLAUSE_ALLOCATE_ALLOCATOR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE), 1)
+
+/* True if an ALLOCATE clause was present on a combined or composite
+ construct and the code for splitting the clauses has already performed
+ checking if the listed variable has explicit privatization on the
+ construct. */
+#define OMP_CLAUSE_ALLOCATE_COMBINED(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE)->base.public_flag)
+
#define OMP_CLAUSE_NUM_TEAMS_EXPR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TEAMS), 0)