BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_LONG, BT_ULONGLONG,
BT_PTR_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR, BT_PTR)
-DEF_FUNCTION_TYPE_9 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT,
- BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
- BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
- BT_BOOL, BT_UINT, BT_PTR, BT_INT)
DEF_FUNCTION_TYPE_9 (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR,
BT_VOID, BT_INT, BT_PTR_FN_VOID_PTR, BT_SIZE, BT_PTR,
BT_PTR, BT_PTR, BT_UINT, BT_PTR, BT_PTR)
BT_BOOL, BT_LONG, BT_LONG, BT_LONG, BT_LONG, BT_LONG,
BT_PTR_LONG, BT_PTR_LONG, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_10 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT_PTR,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_BOOL, BT_UINT, BT_PTR, BT_INT, BT_PTR)
DEF_FUNCTION_TYPE_10 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
BT_ULONGLONG, BT_LONG, BT_ULONGLONG, BT_PTR_ULONGLONG,
PRAGMA_OMP_CLAUSE_DEFAULT,
PRAGMA_OMP_CLAUSE_DEFAULTMAP,
PRAGMA_OMP_CLAUSE_DEPEND,
+ PRAGMA_OMP_CLAUSE_DETACH,
PRAGMA_OMP_CLAUSE_DEVICE,
PRAGMA_OMP_CLAUSE_DEVICE_TYPE,
PRAGMA_OMP_CLAUSE_DIST_SCHEDULE,
PRAGMA_OACC_CLAUSE_COPYOUT,
PRAGMA_OACC_CLAUSE_CREATE,
PRAGMA_OACC_CLAUSE_DELETE,
- PRAGMA_OACC_CLAUSE_DETACH,
PRAGMA_OACC_CLAUSE_DEVICEPTR,
PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT,
PRAGMA_OACC_CLAUSE_FINALIZE,
PRAGMA_OACC_CLAUSE_COPYIN = PRAGMA_OMP_CLAUSE_COPYIN,
PRAGMA_OACC_CLAUSE_DEVICE = PRAGMA_OMP_CLAUSE_DEVICE,
PRAGMA_OACC_CLAUSE_DEFAULT = PRAGMA_OMP_CLAUSE_DEFAULT,
+ PRAGMA_OACC_CLAUSE_DETACH = PRAGMA_OMP_CLAUSE_DETACH,
PRAGMA_OACC_CLAUSE_FIRSTPRIVATE = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
PRAGMA_OACC_CLAUSE_IF = PRAGMA_OMP_CLAUSE_IF,
PRAGMA_OACC_CLAUSE_PRIVATE = PRAGMA_OMP_CLAUSE_PRIVATE,
return list;
}
+/* OpenMP 5.0:
+ detach ( event-handle ) */
+
+static tree
+c_parser_omp_clause_detach (c_parser *parser, tree list)
+{
+ matching_parens parens;
+ location_t clause_loc = c_parser_peek_token (parser)->location;
+
+ if (!parens.require_open (parser))
+ return list;
+
+ if (c_parser_next_token_is_not (parser, CPP_NAME)
+ || c_parser_peek_token (parser)->id_kind != C_ID_ID)
+ {
+ c_parser_error (parser, "expected identifier");
+ parens.skip_until_found_close (parser);
+ return list;
+ }
+
+ tree t = lookup_name (c_parser_peek_token (parser)->value);
+ if (t == NULL_TREE)
+ {
+ undeclared_variable (c_parser_peek_token (parser)->location,
+ c_parser_peek_token (parser)->value);
+ parens.skip_until_found_close (parser);
+ return list;
+ }
+ c_parser_consume_token (parser);
+
+ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (t));
+ if (!INTEGRAL_TYPE_P (type)
+ || TREE_CODE (type) != ENUMERAL_TYPE
+ || TYPE_NAME (type) != get_identifier ("omp_event_handle_t"))
+ {
+ error_at (clause_loc, "%<detach%> clause event handle "
+ "has type %qT rather than "
+ "%<omp_event_handle_t%>",
+ type);
+ parens.skip_until_found_close (parser);
+ return list;
+ }
+
+ tree u = build_omp_clause (clause_loc, OMP_CLAUSE_DETACH);
+ OMP_CLAUSE_DECL (u) = t;
+ OMP_CLAUSE_CHAIN (u) = list;
+ parens.skip_until_found_close (parser);
+ return u;
+}
+
/* Parse all OpenACC clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found. */
clauses = c_parser_omp_clause_default (parser, clauses, false);
c_name = "default";
break;
+ case PRAGMA_OMP_CLAUSE_DETACH:
+ clauses = c_parser_omp_clause_detach (parser, clauses);
+ c_name = "detach";
+ break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
- | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DETACH))
static tree
c_parser_omp_task (location_t loc, c_parser *parser, bool *if_p)
tree simdlen = NULL_TREE, safelen = NULL_TREE;
bool branch_seen = false;
bool copyprivate_seen = false;
+ bool mergeable_seen = false;
+ tree *detach_seen = NULL;
bool linear_variable_step_check = false;
tree *nowait_clause = NULL;
tree ordered_clause = NULL_TREE;
pc = &OMP_CLAUSE_CHAIN (c);
continue;
+ case OMP_CLAUSE_DETACH:
+ t = OMP_CLAUSE_DECL (c);
+ if (detach_seen)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "too many %qs clauses on a task construct",
+ "detach");
+ remove = true;
+ break;
+ }
+ detach_seen = pc;
+ pc = &OMP_CLAUSE_CHAIN (c);
+ c_mark_addressable (t);
+ continue;
+
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
- case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_PARALLEL:
pc = &OMP_CLAUSE_CHAIN (c);
continue;
+ case OMP_CLAUSE_MERGEABLE:
+ mergeable_seen = true;
+ pc = &OMP_CLAUSE_CHAIN (c);
+ continue;
+
case OMP_CLAUSE_NOGROUP:
nogroup_seen = pc;
pc = &OMP_CLAUSE_CHAIN (c);
*nogroup_seen = OMP_CLAUSE_CHAIN (*nogroup_seen);
}
+ if (detach_seen)
+ {
+ if (mergeable_seen)
+ {
+ error_at (OMP_CLAUSE_LOCATION (*detach_seen),
+ "%<detach%> clause must not be used together with "
+ "%<mergeable%> clause");
+ *detach_seen = OMP_CLAUSE_CHAIN (*detach_seen);
+ }
+ else
+ {
+ tree detach_decl = OMP_CLAUSE_DECL (*detach_seen);
+
+ for (pc = &clauses, c = clauses; c ; c = *pc)
+ {
+ bool remove = false;
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
+ && OMP_CLAUSE_DECL (c) == detach_decl)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "the event handle of a %<detach%> clause "
+ "should not be in a data-sharing clause");
+ remove = true;
+ }
+ if (remove)
+ *pc = OMP_CLAUSE_CHAIN (c);
+ else
+ pc = &OMP_CLAUSE_CHAIN (c);
+ }
+ }
+ }
+
bitmap_obstack_release (NULL);
return clauses;
}
return list;
}
+/* OpenMP 5.0:
+ detach ( event-handle ) */
+
+static tree
+cp_parser_omp_clause_detach (cp_parser *parser, tree list)
+{
+ matching_parens parens;
+
+ if (!parens.require_open (parser))
+ return list;
+
+ cp_token *token;
+ tree name, decl;
+
+ token = cp_lexer_peek_token (parser->lexer);
+ name = cp_parser_id_expression (parser, /*template_p=*/false,
+ /*check_dependency_p=*/true,
+ /*template_p=*/NULL,
+ /*declarator_p=*/false,
+ /*optional_p=*/false);
+ if (name == error_mark_node)
+ decl = error_mark_node;
+ else
+ {
+ if (identifier_p (name))
+ decl = cp_parser_lookup_name_simple (parser, name, token->location);
+ else
+ decl = name;
+ if (decl == error_mark_node)
+ cp_parser_name_lookup_error (parser, name, decl, NLE_NULL,
+ token->location);
+ }
+
+ if (decl == error_mark_node
+ || !parens.require_close (parser))
+ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
+ /*or_comma=*/false,
+ /*consume_paren=*/true);
+
+ tree u = build_omp_clause (token->location, OMP_CLAUSE_DETACH);
+ OMP_CLAUSE_DECL (u) = decl;
+ OMP_CLAUSE_CHAIN (u) = list;
+
+ return u;
+}
+
/* OpenMP 5.0:
iterators ( iterators-definition )
token->location);
c_name = "depend";
break;
+ case PRAGMA_OMP_CLAUSE_DETACH:
+ clauses = cp_parser_omp_clause_detach (parser, clauses);
+ c_name = "detach";
+ break;
case PRAGMA_OMP_CLAUSE_MAP:
clauses = cp_parser_omp_clause_map (parser, clauses);
c_name = "map";
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALLOCATE) \
- | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DETACH))
static tree
cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok, bool *if_p)
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
+ case OMP_CLAUSE_DETACH:
OMP_CLAUSE_OPERAND (nc, 0)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
has been seen, -2 if mixed inscan/normal reduction diagnosed. */
int reduction_seen = 0;
bool allocate_seen = false;
+ bool detach_seen = false;
+ bool mergeable_seen = false;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
}
}
break;
+ case OMP_CLAUSE_DETACH:
+ t = OMP_CLAUSE_DECL (c);
+ if (detach_seen)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "too many %qs clauses on a task construct",
+ "detach");
+ remove = true;
+ break;
+ }
+ else
+ {
+ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (t));
+ if (!type_dependent_expression_p (t)
+ && (!INTEGRAL_TYPE_P (type)
+ || TREE_CODE (type) != ENUMERAL_TYPE
+ || (DECL_NAME (TYPE_NAME (type))
+ != get_identifier ("omp_event_handle_t"))))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<detach%> clause event handle "
+ "has type %qT rather than "
+ "%<omp_event_handle_t%>",
+ type);
+ remove = true;
+ }
+ detach_seen = true;
+ cxx_mark_addressable (t);
+ }
+ break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
- case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PARALLEL:
case OMP_CLAUSE_FOR:
case OMP_CLAUSE_SECTIONS:
case OMP_CLAUSE_FINALIZE:
break;
+ case OMP_CLAUSE_MERGEABLE:
+ mergeable_seen = true;
+ break;
+
case OMP_CLAUSE_TILE:
for (tree list = OMP_CLAUSE_TILE_LIST (c); !remove && list;
list = TREE_CHAIN (list))
}
pc = &OMP_CLAUSE_CHAIN (c);
continue;
+ case OMP_CLAUSE_DETACH:
+ if (mergeable_seen)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<detach%> clause must not be used together with "
+ "%<mergeable%> clause");
+ *pc = OMP_CLAUSE_CHAIN (c);
+ continue;
+ }
+ pc = &OMP_CLAUSE_CHAIN (c);
+ continue;
case OMP_CLAUSE_NOWAIT:
if (copyprivate_seen)
{
}
}
+ if (detach_seen
+ && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
+ && OMP_CLAUSE_DECL (c) == t)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "the event handle of a %<detach%> clause "
+ "should not be in a data-sharing clause");
+ remove = true;
+ }
+
/* We're interested in the base element, not arrays. */
inner_type = type = TREE_TYPE (t);
if ((need_complete_type
show_expr (omp_clauses->priority);
fputc (')', dumpfile);
}
+ if (omp_clauses->detach)
+ {
+ fputs (" DETACH(", dumpfile);
+ show_expr (omp_clauses->detach);
+ fputc (')', dumpfile);
+ }
for (i = 0; i < OMP_IF_LAST; i++)
if (omp_clauses->if_exprs[i])
{
WALK_SUBEXPR (co->ext.omp_clauses->hint);
WALK_SUBEXPR (co->ext.omp_clauses->num_tasks);
WALK_SUBEXPR (co->ext.omp_clauses->priority);
+ WALK_SUBEXPR (co->ext.omp_clauses->detach);
for (idx = 0; idx < OMP_IF_LAST; idx++)
WALK_SUBEXPR (co->ext.omp_clauses->if_exprs[idx]);
for (idx = 0;
struct gfc_expr *hint;
struct gfc_expr *num_tasks;
struct gfc_expr *priority;
+ struct gfc_expr *detach;
struct gfc_expr *if_exprs[OMP_IF_LAST];
enum gfc_omp_sched_kind dist_sched_kind;
struct gfc_expr *dist_chunk_size;
extern int gfc_default_logical_kind;
extern int gfc_default_complex_kind;
extern int gfc_c_int_kind;
+extern int gfc_c_intptr_kind;
extern int gfc_atomic_int_kind;
extern int gfc_atomic_logical_kind;
extern int gfc_intio_kind;
gfc_free_expr (c->hint);
gfc_free_expr (c->num_tasks);
gfc_free_expr (c->priority);
+ gfc_free_expr (c->detach);
for (i = 0; i < OMP_IF_LAST; i++)
gfc_free_expr (c->if_exprs[i]);
gfc_free_expr (c->async_expr);
return MATCH_ERROR;
}
+/* Match detach(event-handle). */
+
+static match
+gfc_match_omp_detach (gfc_expr **expr)
+{
+ locus old_loc = gfc_current_locus;
+
+ if (gfc_match ("detach ( ") != MATCH_YES)
+ goto syntax_error;
+
+ if (gfc_match_variable (expr, 0) != MATCH_YES)
+ goto syntax_error;
+
+ if ((*expr)->ts.type != BT_INTEGER || (*expr)->ts.kind != gfc_c_intptr_kind)
+ {
+ gfc_error ("%qs at %L should be of type "
+ "integer(kind=omp_event_handle_kind)",
+ (*expr)->symtree->n.sym->name, &(*expr)->where);
+ return MATCH_ERROR;
+ }
+
+ if (gfc_match_char (')') != MATCH_YES)
+ goto syntax_error;
+
+ return MATCH_YES;
+
+syntax_error:
+ gfc_error ("Syntax error in OpenMP detach clause at %C");
+ gfc_current_locus = old_loc;
+ return MATCH_ERROR;
+
+}
+
/* Match depend(sink : ...) construct a namelist from it. */
static match
OMP_CLAUSE_ATOMIC, /* OpenMP 5.0. */
OMP_CLAUSE_CAPTURE, /* OpenMP 5.0. */
OMP_CLAUSE_MEMORDER, /* OpenMP 5.0. */
+ OMP_CLAUSE_DETACH, /* OpenMP 5.0. */
OMP_CLAUSE_NOWAIT,
/* This must come last. */
OMP_MASK1_LAST
OMP_CLAUSE_IF_PRESENT,
OMP_CLAUSE_FINALIZE,
OMP_CLAUSE_ATTACH,
- OMP_CLAUSE_DETACH,
/* This must come last. */
OMP_MASK2_LAST
};
gfc_current_locus = old_loc;
}
if ((mask & OMP_CLAUSE_DETACH)
+ && !openacc
+ && !c->detach
+ && gfc_match_omp_detach (&c->detach) == MATCH_YES)
+ continue;
+ if ((mask & OMP_CLAUSE_DETACH)
+ && openacc
&& gfc_match ("detach ( ") == MATCH_YES
&& gfc_match_omp_map_clause (&c->lists[OMP_LIST_MAP],
OMP_MAP_DETACH, false,
(omp_mask (OMP_CLAUSE_PRIVATE) | OMP_CLAUSE_FIRSTPRIVATE \
| OMP_CLAUSE_SHARED | OMP_CLAUSE_IF | OMP_CLAUSE_DEFAULT \
| OMP_CLAUSE_UNTIED | OMP_CLAUSE_FINAL | OMP_CLAUSE_MERGEABLE \
- | OMP_CLAUSE_DEPEND | OMP_CLAUSE_PRIORITY | OMP_CLAUSE_IN_REDUCTION)
+ | OMP_CLAUSE_DEPEND | OMP_CLAUSE_PRIORITY | OMP_CLAUSE_IN_REDUCTION \
+ | OMP_CLAUSE_DETACH)
#define OMP_TASKLOOP_CLAUSES \
(omp_mask (OMP_CLAUSE_PRIVATE) | OMP_CLAUSE_FIRSTPRIVATE \
| OMP_CLAUSE_LASTPRIVATE | OMP_CLAUSE_SHARED | OMP_CLAUSE_IF \
if (n->sym->attr.associate_var)
gfc_error ("ASSOCIATE name %qs in SHARED clause at %L",
n->sym->name, &n->where);
+ if (omp_clauses->detach
+ && n->sym == omp_clauses->detach->symtree->n.sym)
+ gfc_error ("DETACH event handle %qs in SHARED clause at %L",
+ n->sym->name, &n->where);
}
break;
case OMP_LIST_ALIGNED:
default:
break;
}
-
+ if (omp_clauses->detach
+ && (list == OMP_LIST_PRIVATE
+ || list == OMP_LIST_FIRSTPRIVATE
+ || list == OMP_LIST_LASTPRIVATE)
+ && n->sym == omp_clauses->detach->symtree->n.sym)
+ gfc_error ("DETACH event handle %qs in %s clause at %L",
+ n->sym->name, name, &n->where);
switch (list)
{
case OMP_LIST_REDUCTION_INSCAN:
gfc_error ("%s must contain at least one MAP clause at %L",
p, &code->loc);
}
+ if (!openacc && omp_clauses->mergeable && omp_clauses->detach)
+ gfc_error ("%<DETACH%> clause at %L must not be used together with "
+ "%<MERGEABLE%> clause", &omp_clauses->detach->where);
}
omp_clauses = gfc_trans_add_clause (c, omp_clauses);
}
+ if (clauses->detach)
+ {
+ tree detach;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->detach);
+ gfc_add_block_to_block (block, &se.pre);
+ detach = se.expr;
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_DETACH);
+ TREE_ADDRESSABLE (detach) = 1;
+ OMP_CLAUSE_DECL (c) = detach;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
if (clauses->hint)
{
tree hint;
int gfc_default_logical_kind;
int gfc_default_complex_kind;
int gfc_c_int_kind;
+int gfc_c_intptr_kind;
int gfc_atomic_int_kind;
int gfc_atomic_logical_kind;
/* Choose atomic kinds to match C's int. */
gfc_atomic_int_kind = gfc_c_int_kind;
gfc_atomic_logical_kind = gfc_c_int_kind;
+
+ gfc_c_intptr_kind = POINTER_SIZE / 8;
}
BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_LONG, BT_ULONGLONG,
BT_PTR_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR, BT_PTR)
-DEF_FUNCTION_TYPE_9 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT,
- BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
- BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
- BT_BOOL, BT_UINT, BT_PTR, BT_INT)
DEF_FUNCTION_TYPE_9 (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR,
BT_VOID, BT_INT, BT_PTR_FN_VOID_PTR, BT_SIZE, BT_PTR,
BT_PTR, BT_PTR, BT_UINT, BT_PTR, BT_PTR)
BT_BOOL, BT_LONG, BT_LONG, BT_LONG, BT_LONG, BT_LONG,
BT_PTR_LONG, BT_PTR_LONG, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_10 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT_PTR,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_BOOL, BT_UINT, BT_PTR, BT_INT, BT_PTR)
DEF_FUNCTION_TYPE_10 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
BT_ULONGLONG, BT_LONG, BT_ULONGLONG, BT_PTR_ULONGLONG,
enum omp_clause_default_kind kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
+ if (ctx->region_type & ORT_TASK)
+ {
+ tree detach_clause = omp_find_clause (ctx->clauses, OMP_CLAUSE_DETACH);
+
+ /* The event-handle specified by a detach clause should always be firstprivate,
+ regardless of the current default. */
+ if (detach_clause && OMP_CLAUSE_DECL (detach_clause) == decl)
+ kind = OMP_CLAUSE_DEFAULT_FIRSTPRIVATE;
+ }
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
else if (VAR_P (decl) && TREE_STATIC (decl) && DECL_IN_CONSTANT_POOL (decl))
}
break;
+ case OMP_CLAUSE_DETACH:
+ flags = GOVD_FIRSTPRIVATE | GOVD_SEEN;
+ goto do_add;
+
case OMP_CLAUSE_IF:
if (OMP_CLAUSE_IF_MODIFIER (c) != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (c) != code)
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
case OMP_CLAUSE_BIND:
+ case OMP_CLAUSE_DETACH:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
"GOMP_parallel_reductions",
BT_FN_UINT_OMPFN_PTR_UINT_UINT, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASK, "GOMP_task",
- BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT,
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT_PTR,
ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKLOOP, "GOMP_taskloop",
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_LONG_LONG_LONG,
tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL);
tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY);
+ tree detach = omp_find_clause (clauses, OMP_CLAUSE_DETACH);
unsigned int iflags
= (untied ? GOMP_TASK_FLAG_UNTIED : 0)
if (omp_find_clause (clauses, OMP_CLAUSE_REDUCTION))
iflags |= GOMP_TASK_FLAG_REDUCTION;
}
- else if (priority)
- iflags |= GOMP_TASK_FLAG_PRIORITY;
+ else
+ {
+ if (priority)
+ iflags |= GOMP_TASK_FLAG_PRIORITY;
+ if (detach)
+ iflags |= GOMP_TASK_FLAG_DETACH;
+ }
tree flags = build_int_cst (unsigned_type_node, iflags);
priority = integer_zero_node;
gsi = gsi_last_nondebug_bb (bb);
+
+ detach = (detach
+ ? build_fold_addr_expr (OMP_CLAUSE_DECL (detach))
+ : null_pointer_node);
+
tree t = gimple_omp_task_data_arg (entry_stmt);
if (t == NULL)
t2 = null_pointer_node;
num_tasks, priority, startvar, endvar, step);
else
t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
- 9, t1, t2, t3,
+ 10, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
gimple_omp_task_arg_align (entry_stmt), cond, flags,
- depend, priority);
+ depend, priority, detach);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
+ case OMP_CLAUSE_DETACH:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE_DETACH:
case OMP_CLAUSE_ALLOCATE:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
{
location_t loc = gimple_location (ctx->stmt);
tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
+ tree detach_clause
+ = omp_find_clause (gimple_omp_task_clauses (ctx->stmt),
+ OMP_CLAUSE_DETACH);
/* Move VLA fields to the end. */
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
TYPE_FIELDS (ctx->srecord_type) = f1;
}
}
+ if (detach_clause)
+ {
+ tree c, field;
+
+ /* Look for a firstprivate clause with the detach event handle. */
+ for (c = gimple_omp_taskreg_clauses (ctx->stmt);
+ c; c = OMP_CLAUSE_CHAIN (c))
+ {
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
+ continue;
+ if (maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_DECL (c), ctx)
+ == OMP_CLAUSE_DECL (detach_clause))
+ break;
+ }
+
+ gcc_assert (c);
+ field = lookup_field (OMP_CLAUSE_DECL (c), ctx);
+
+ /* Move field corresponding to the detach clause first.
+ This is filled by GOMP_task and needs to be in a
+ specific position. */
+ p = &TYPE_FIELDS (ctx->record_type);
+ while (*p)
+ if (*p == field)
+ *p = DECL_CHAIN (*p);
+ else
+ p = &DECL_CHAIN (*p);
+ DECL_CHAIN (field) = TYPE_FIELDS (ctx->record_type);
+ TYPE_FIELDS (ctx->record_type) = field;
+ if (ctx->srecord_type)
+ {
+ field = lookup_sfield (OMP_CLAUSE_DECL (detach_clause), ctx);
+ p = &TYPE_FIELDS (ctx->srecord_type);
+ while (*p)
+ if (*p == field)
+ *p = DECL_CHAIN (*p);
+ else
+ p = &DECL_CHAIN (*p);
+ DECL_CHAIN (field) = TYPE_FIELDS (ctx->srecord_type);
+ TYPE_FIELDS (ctx->srecord_type) = field;
+ }
+ }
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
if (ctx->srecord_type)
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+typedef enum omp_event_handle_t
+{
+ __omp_event_handle_t_max__ = __UINTPTR_MAX__
+} omp_event_handle_t;
+
+extern void omp_fulfill_event (omp_event_handle_t);
+
+void f (omp_event_handle_t x, omp_event_handle_t y, int z)
+{
+ #pragma omp task detach (x) detach (y) /* { dg-error "too many 'detach' clauses on a task construct" } */
+ ;
+
+ #pragma omp task mergeable detach (x) /* { dg-error "'detach' clause must not be used together with 'mergeable' clause" } */
+ ;
+
+ #pragma omp task detach (x) mergeable /* { dg-error "'detach' clause must not be used together with 'mergeable' clause" } */
+ ;
+
+ #pragma omp task detach (z) /* { dg-error "'detach' clause event handle has type 'int' rather than 'omp_event_handle_t'" } */
+ ;
+
+ #pragma omp parallel master default (none) /* { dg-message "enclosing 'parallel'" } */
+ #pragma omp task detach (x) /* { dg-error "'x' not specified in enclosing 'parallel'" } */
+ ;
+
+ #pragma omp task detach (x) default (none) /* This should work. */
+ omp_fulfill_event (x);
+
+ #pragma omp task detach (x) firstprivate (x) /* { dg-error "the event handle of a 'detach' clause should not be in a data-sharing clause" } */
+ ;
+
+ #pragma omp task detach (x) shared (x) /* { dg-error "the event handle of a 'detach' clause should not be in a data-sharing clause" } */
+ ;
+}
--- /dev/null
+// { dg-do compile }
+// { dg-options "-fopenmp" }
+
+typedef enum omp_event_handle_t
+{
+ __omp_event_handle_t_max__ = __UINTPTR_MAX__
+} omp_event_handle_t;
+
+template <typename T>
+void foo ()
+{
+ T t;
+ #pragma omp task detach (t)
+ ;
+}
+
+template <typename T>
+void bar ()
+{
+ T t;
+ #pragma omp task detach (t) // { dg-error "'detach' clause event handle has type 'int' rather than 'omp_event_handle_t'" }
+ ;
+}
+
+void f()
+{
+ foo <omp_event_handle_t> ();
+ bar <int> (); // { dg-message "required from here" }
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+typedef enum omp_event_handle_t
+{
+ __omp_event_handle_t_max__ = __UINTPTR_MAX__
+} omp_event_handle_t;
+
+extern void omp_fulfill_event (omp_event_handle_t);
+
+void f (omp_event_handle_t x)
+{
+ void g (void)
+ {
+ #pragma omp task detach (x)
+ omp_fulfill_event (x);
+ }
+
+ g ();
+}
--- /dev/null
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+
+program task_detach_1
+ use iso_c_binding, only: c_intptr_t
+ implicit none
+
+ integer, parameter :: omp_event_handle_kind = c_intptr_t
+ integer (kind=omp_event_handle_kind) :: x, y
+ integer :: z
+
+ !$omp task detach(x) detach(y) ! { dg-error "Failed to match clause at \\\(1\\\)" }
+ !$omp end task ! { dg-error "Unexpected !\\\$OMP END TASK statement at \\\(1\\\)" }
+
+ !$omp task mergeable detach(x) ! { dg-error "'DETACH' clause at \\\(1\\\) must not be used together with 'MERGEABLE' clause" }
+ !$omp end task
+
+ !$omp task detach(x) mergeable ! { dg-error "'DETACH' clause at \\\(1\\\) must not be used together with 'MERGEABLE' clause" }
+ !$omp end task
+
+ !$omp task detach(z) ! { dg-error "'z' at \\\(1\\\) should be of type integer\\\(kind=omp_event_handle_kind\\\)" }
+ !$omp end task ! { dg-error "Unexpected !\\\$OMP END TASK statement at \\\(1\\\)" }
+
+ !$omp task detach (x) firstprivate (x) ! { dg-error "DETACH event handle 'x' in FIRSTPRIVATE clause at \\\(1\\\)" }
+ !$omp end task
+
+ !$omp task detach (x) shared (x) ! { dg-error "DETACH event handle 'x' in SHARED clause at \\\(1\\\)" }
+ !$omp end task
+end program
/* OpenMP clause: link (variable-list). */
OMP_CLAUSE_LINK,
- /* OpenMP clause: from (variable-list). */
- OMP_CLAUSE_FROM,
-
- /* OpenMP clause: to (variable-list). */
- OMP_CLAUSE_TO,
-
- /* OpenACC clauses: {copy, copyin, copyout, create, delete, deviceptr,
- device, host (self), present, present_or_copy (pcopy), present_or_copyin
- (pcopyin), present_or_copyout (pcopyout), present_or_create (pcreate)}
- (variable-list).
-
- OpenMP clause: map ({alloc:,to:,from:,tofrom:,}variable-list). */
- OMP_CLAUSE_MAP,
+ /* OpenMP clause: detach (event-handle). */
+ OMP_CLAUSE_DETACH,
/* OpenACC clause: use_device (variable-list).
OpenMP clause: use_device_ptr (ptr-list). */
/* OpenMP clause: exclusive (variable-list). */
OMP_CLAUSE_EXCLUSIVE,
+ /* OpenMP clause: from (variable-list). */
+ OMP_CLAUSE_FROM,
+
+ /* OpenMP clause: to (variable-list). */
+ OMP_CLAUSE_TO,
+
+ /* OpenACC clauses: {copy, copyin, copyout, create, delete, deviceptr,
+ device, host (self), present, present_or_copy (pcopy), present_or_copyin
+ (pcopyin), present_or_copyout (pcopyout), present_or_create (pcreate)}
+ (variable-list).
+
+ OpenMP clause: map ({alloc:,to:,from:,tofrom:,}variable-list). */
+ OMP_CLAUSE_MAP,
+
/* Internal structure to hold OpenACC cache directive's variable-list.
#pragma acc cache (variable-list). */
OMP_CLAUSE__CACHE_,
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
+ case OMP_CLAUSE_DETACH:
do_decl_clause:
if (pdecl == NULL)
pdecl = &OMP_CLAUSE_DECL (clause);
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
+ case OMP_CLAUSE_DETACH:
do_decl_clause:
if (pdecl == NULL)
pdecl = &OMP_CLAUSE_DECL (clause);
case OMP_CLAUSE_FINALIZE:
pp_string (pp, "finalize");
break;
+ case OMP_CLAUSE_DETACH:
+ pp_string (pp, "detach(");
+ dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags,
+ false);
+ pp_right_paren (pp);
+ break;
default:
gcc_unreachable ();
1, /* OMP_CLAUSE_UNIFORM */
1, /* OMP_CLAUSE_TO_DECLARE */
1, /* OMP_CLAUSE_LINK */
- 2, /* OMP_CLAUSE_FROM */
- 2, /* OMP_CLAUSE_TO */
- 2, /* OMP_CLAUSE_MAP */
+ 1, /* OMP_CLAUSE_DETACH */
1, /* OMP_CLAUSE_USE_DEVICE_PTR */
1, /* OMP_CLAUSE_USE_DEVICE_ADDR */
1, /* OMP_CLAUSE_IS_DEVICE_PTR */
1, /* OMP_CLAUSE_INCLUSIVE */
1, /* OMP_CLAUSE_EXCLUSIVE */
+ 2, /* OMP_CLAUSE_FROM */
+ 2, /* OMP_CLAUSE_TO */
+ 2, /* OMP_CLAUSE_MAP */
2, /* OMP_CLAUSE__CACHE_ */
2, /* OMP_CLAUSE_GANG */
1, /* OMP_CLAUSE_ASYNC */
"uniform",
"to",
"link",
- "from",
- "to",
- "map",
+ "detach",
"use_device_ptr",
"use_device_addr",
"is_device_ptr",
"inclusive",
"exclusive",
+ "from",
+ "to",
+ "map",
"_cache_",
"gang",
"async",
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_TO_DECLARE:
case OMP_CLAUSE_LINK:
+ case OMP_CLAUSE_DETACH:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
#define GOMP_TASK_FLAG_IF (1 << 10)
#define GOMP_TASK_FLAG_NOGROUP (1 << 11)
#define GOMP_TASK_FLAG_REDUCTION (1 << 12)
+#define GOMP_TASK_FLAG_DETACH (1 << 13)
/* GOMP_target{_ext,update_ext,enter_exit_data} flags argument. */
#define GOMP_TARGET_FLAG_NOWAIT (1 << 0)
return omp_get_max_task_priority ();
}
+void
+omp_fulfill_event_ (intptr_t event)
+{
+ omp_fulfill_event ((omp_event_handle_t) event);
+}
+
void
omp_set_affinity_format_ (const char *format, size_t format_len)
{
entries and the gomp_task in which they reside. */
struct priority_node pnode[3];
+ bool detach;
+ gomp_sem_t completion_sem;
+
struct gomp_task_icv icv;
void (*fn) (void *);
void *fn_data;
int work_share_cancelled;
int team_cancelled;
+ /* Tasks waiting for their completion event to be fulfilled. */
+ struct priority_queue task_detach_queue;
+ unsigned int task_detach_count;
+
/* This array contains structures for implicit tasks. */
struct gomp_task implicit_task[];
};
omp_free;
omp_get_supported_active_levels;
omp_get_supported_active_levels_;
+ omp_fulfill_event;
+ omp_fulfill_event_;
} OMP_5.0;
GOMP_1.0 {
/* task.c */
extern void GOMP_task (void (*) (void *), void *, void (*) (void *, void *),
- long, long, bool, unsigned, void **, int);
+ long, long, bool, unsigned, void **, int, void *);
extern void GOMP_taskloop (void (*) (void *), void *,
void (*) (void *, void *), long, long, unsigned,
unsigned long, int, long, long, long);
omp_uintptr_t value;
} omp_alloctrait_t;
+typedef enum omp_event_handle_t __GOMP_UINTPTR_T_ENUM
+{
+ __omp_event_handle_t_max__ = __UINTPTR_MAX__
+} omp_event_handle_t;
+
#ifdef __cplusplus
extern "C" {
# define __GOMP_NOTHROW throw ()
extern int omp_get_initial_device (void) __GOMP_NOTHROW;
extern int omp_get_max_task_priority (void) __GOMP_NOTHROW;
+extern void omp_fulfill_event (omp_event_handle_t) __GOMP_NOTHROW;
+
extern void *omp_target_alloc (__SIZE_TYPE__, int) __GOMP_NOTHROW;
extern void omp_target_free (void *, int) __GOMP_NOTHROW;
extern int omp_target_is_present (const void *, int) __GOMP_NOTHROW;
integer, parameter :: omp_alloctrait_val_kind = c_intptr_t
integer, parameter :: omp_memspace_handle_kind = c_intptr_t
integer, parameter :: omp_depend_kind = @OMP_DEPEND_KIND@
+ integer, parameter :: omp_event_handle_kind = c_intptr_t
integer (omp_sched_kind), parameter :: omp_sched_static = 1
integer (omp_sched_kind), parameter :: omp_sched_dynamic = 2
integer (omp_sched_kind), parameter :: omp_sched_guided = 3
end function omp_get_max_task_priority
end interface
+ interface
+ subroutine omp_fulfill_event (event)
+ use omp_lib_kinds
+ integer (kind=omp_event_handle_kind), &
+ value, intent(in) :: event
+ end subroutine omp_fulfill_event
+ end interface
+
interface
subroutine omp_set_affinity_format (format)
character(len=*), intent(in) :: format
integer omp_allocator_handle_kind, omp_alloctrait_key_kind
integer omp_alloctrait_val_kind, omp_memspace_handle_kind
+ integer omp_event_handle_kind
parameter (omp_allocator_handle_kind = @INTPTR_T_KIND@)
parameter (omp_alloctrait_key_kind = 4)
parameter (omp_alloctrait_val_kind = @INTPTR_T_KIND@)
parameter (omp_memspace_handle_kind = @INTPTR_T_KIND@)
+ parameter (omp_event_handle_kind = @INTPTR_T_KIND@)
integer (omp_alloctrait_key_kind) omp_atk_sync_hint
integer (omp_alloctrait_key_kind) omp_atk_alignment
integer (omp_alloctrait_key_kind) omp_atk_access
external omp_get_max_task_priority
integer(4) omp_get_max_task_priority
+ external omp_fulfill_event
+
external omp_set_affinity_format, omp_get_affinity_format
external omp_display_affinity, omp_capture_affinity
integer(4) omp_get_affinity_format
}
#endif /* _LIBGOMP_CHECKING_ */
+/* Tree version of priority_queue_find. */
+
+static struct gomp_task *
+priority_tree_find (enum priority_queue_type type,
+ prio_splay_tree_node node,
+ priority_queue_predicate pred)
+{
+ again:
+ if (!node)
+ return NULL;
+ struct gomp_task *task = priority_tree_find (type, node->right, pred);
+ if (task)
+ return task;
+ task = priority_node_to_task (type, node->key.l.tasks);
+ if (pred (task))
+ return task;
+ node = node->left;
+ goto again;
+}
+
+/* List version of priority_queue_find. */
+
+static struct gomp_task *
+priority_list_find (enum priority_queue_type type,
+ struct priority_list *list,
+ priority_queue_predicate pred)
+{
+ struct priority_node *node = list->tasks;
+ if (!node)
+ return NULL;
+
+ do
+ {
+ struct gomp_task *task = priority_node_to_task (type, node);
+ if (pred (task))
+ return task;
+ node = node->next;
+ }
+ while (node != list->tasks);
+
+ return NULL;
+}
+
+/* Return the highest priority task in the priority queue HEAD that
+ satisfies the predicate PRED. HEAD contains tasks of type TYPE. */
+
+struct gomp_task *
+priority_queue_find (enum priority_queue_type type,
+ struct priority_queue *head,
+ priority_queue_predicate pred)
+{
+ if (priority_queue_multi_p (head))
+ return priority_tree_find (type, head->t.root, pred);
+ else
+ return priority_list_find (type, &head->l, pred);
+}
+
/* Remove NODE from priority queue HEAD, wherever it may be inside the
tree. HEAD contains tasks of type TYPE. */
PQ_IGNORED = 999
};
+typedef bool (*priority_queue_predicate) (struct gomp_task *);
+
/* Priority queue implementation prototypes. */
extern bool priority_queue_task_in_queue_p (enum priority_queue_type,
struct priority_queue *);
extern void priority_queue_verify (enum priority_queue_type,
struct priority_queue *, bool);
+extern struct gomp_task *priority_queue_find (enum priority_queue_type,
+ struct priority_queue *,
+ priority_queue_predicate);
extern void priority_tree_remove (enum priority_queue_type,
struct priority_queue *,
struct priority_node *);
task->dependers = NULL;
task->depend_hash = NULL;
task->depend_count = 0;
+ task->detach = false;
}
/* Clean up a task, after completing it. */
}
}
+static bool
+task_fulfilled_p (struct gomp_task *task)
+{
+ return __atomic_load_n (&task->completion_sem, __ATOMIC_RELAXED);
+}
+
/* Called when encountering an explicit task directive. If IF_CLAUSE is
false, then we must not delay in executing the task. If UNTIED is true,
then the task may be executed by any member of the team.
void
GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
long arg_size, long arg_align, bool if_clause, unsigned flags,
- void **depend, int priority)
+ void **depend, int priority, void *detach)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
else if (priority > gomp_max_task_priority_var)
priority = gomp_max_task_priority_var;
+ if ((flags & GOMP_TASK_FLAG_DETACH) == 0)
+ detach = NULL;
+
if (!if_clause || team == NULL
|| (thr->task && thr->task->final_task)
|| team->task_count > 64 * team->nthreads)
task.final_task = (thr->task && thr->task->final_task)
|| (flags & GOMP_TASK_FLAG_FINAL);
task.priority = priority;
+
+ if (detach)
+ {
+ task.detach = true;
+ gomp_sem_init (&task.completion_sem, 0);
+ *(void **) detach = &task.completion_sem;
+ if (data)
+ *(void **) data = &task.completion_sem;
+
+ gomp_debug (0, "New event: %p\n", &task.completion_sem);
+ }
+
if (thr->task)
{
task.in_tied_task = thr->task->in_tied_task;
}
else
fn (data);
+
+ if (detach && !task_fulfilled_p (&task))
+ gomp_sem_wait (&task.completion_sem);
+
/* Access to "children" is normally done inside a task_lock
mutex region, but the only way this particular task.children
can be set is if this thread's task work function (fn)
task->kind = GOMP_TASK_UNDEFERRED;
task->in_tied_task = parent->in_tied_task;
task->taskgroup = taskgroup;
+ if (detach)
+ {
+ task->detach = true;
+ gomp_sem_init (&task->completion_sem, 0);
+ *(void **) detach = &task->completion_sem;
+ if (data)
+ *(void **) data = &task->completion_sem;
+
+ gomp_debug (0, "New event: %p\n", &task->completion_sem);
+ }
thr->task = task;
if (cpyfn)
{
while (1)
{
bool cancelled = false;
+
+ /* Look for a queued detached task with a fulfilled completion event
+ that is ready to finish. */
+ child_task = priority_queue_find (PQ_TEAM, &team->task_detach_queue,
+ task_fulfilled_p);
+ if (child_task)
+ {
+ priority_queue_remove (PQ_TEAM, &team->task_detach_queue,
+ child_task, MEMMODEL_RELAXED);
+ --team->task_detach_count;
+ gomp_debug (0, "thread %d: found task with fulfilled event %p\n",
+ thr->ts.team_id, &child_task->completion_sem);
+
+ if (to_free)
+ {
+ gomp_finish_task (to_free);
+ free (to_free);
+ to_free = NULL;
+ }
+ goto finish_cancelled;
+ }
+
if (!priority_queue_empty_p (&team->task_queue, MEMMODEL_RELAXED))
{
bool ignored;
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
- finish_cancelled:;
- size_t new_tasks
- = gomp_task_run_post_handle_depend (child_task, team);
- gomp_task_run_post_remove_parent (child_task);
- gomp_clear_parent (&child_task->children_queue);
- gomp_task_run_post_remove_taskgroup (child_task);
- to_free = child_task;
- child_task = NULL;
- if (!cancelled)
- team->task_running_count--;
- if (new_tasks > 1)
+ if (child_task->detach && !task_fulfilled_p (child_task))
{
- do_wake = team->nthreads - team->task_running_count;
- if (do_wake > new_tasks)
- do_wake = new_tasks;
+ priority_queue_insert (PQ_TEAM, &team->task_detach_queue,
+ child_task, child_task->priority,
+ PRIORITY_INSERT_END,
+ false, false);
+ ++team->task_detach_count;
+ gomp_debug (0, "thread %d: queueing task with event %p\n",
+ thr->ts.team_id, &child_task->completion_sem);
+ child_task = NULL;
}
- if (--team->task_count == 0
- && gomp_team_barrier_waiting_for_tasks (&team->barrier))
+ else
{
- gomp_team_barrier_done (&team->barrier, state);
- gomp_mutex_unlock (&team->task_lock);
- gomp_team_barrier_wake (&team->barrier, 0);
- gomp_mutex_lock (&team->task_lock);
+ finish_cancelled:;
+ size_t new_tasks
+ = gomp_task_run_post_handle_depend (child_task, team);
+ gomp_task_run_post_remove_parent (child_task);
+ gomp_clear_parent (&child_task->children_queue);
+ gomp_task_run_post_remove_taskgroup (child_task);
+ to_free = child_task;
+ child_task = NULL;
+ if (!cancelled)
+ team->task_running_count--;
+ if (new_tasks > 1)
+ {
+ do_wake = team->nthreads - team->task_running_count;
+ if (do_wake > new_tasks)
+ do_wake = new_tasks;
+ }
+ if (--team->task_count == 0
+ && gomp_team_barrier_waiting_for_tasks (&team->barrier))
+ {
+ gomp_team_barrier_done (&team->barrier, state);
+ gomp_mutex_unlock (&team->task_lock);
+ gomp_team_barrier_wake (&team->barrier, 0);
+ gomp_mutex_lock (&team->task_lock);
+ }
}
}
}
}
ialias (omp_in_final)
+
+void
+omp_fulfill_event (omp_event_handle_t event)
+{
+ gomp_sem_t *sem = (gomp_sem_t *) event;
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr ? thr->ts.team : NULL;
+
+ if (__atomic_load_n (sem, __ATOMIC_RELAXED))
+ gomp_fatal ("omp_fulfill_event: %p event already fulfilled!\n", sem);
+
+ gomp_debug (0, "omp_fulfill_event: %p\n", sem);
+ gomp_sem_post (sem);
+ if (team)
+ gomp_team_barrier_wake (&team->barrier, 1);
+}
+
+ialias (omp_fulfill_event)
team->work_share_cancelled = 0;
team->team_cancelled = 0;
+ priority_queue_init (&team->task_detach_queue);
+ team->task_detach_count = 0;
+
return team;
}
gomp_barrier_destroy (&team->barrier);
gomp_mutex_destroy (&team->task_lock);
priority_queue_free (&team->task_queue);
+ priority_queue_free (&team->task_detach_queue);
team_free (team);
}
--- /dev/null
+/* { dg-do run } */
+
+#include <omp.h>
+#include <assert.h>
+
+/* Test chaining of detached tasks, with each task fulfilling the
+ completion event of the previous one. */
+
+int main (void)
+{
+ omp_event_handle_t detach_event1, detach_event2;
+ int x = 0, y = 0, z = 0;
+
+ #pragma omp parallel
+ #pragma omp single
+ {
+ #pragma omp task detach(detach_event1)
+ x++;
+
+ #pragma omp task detach(detach_event2)
+ {
+ y++;
+ omp_fulfill_event (detach_event1);
+ }
+
+ #pragma omp task
+ {
+ z++;
+ omp_fulfill_event (detach_event2);
+ }
+ }
+
+ assert (x == 1);
+ assert (y == 1);
+ assert (z == 1);
+}
--- /dev/null
+/* { dg-do run } */
+
+#include <omp.h>
+#include <assert.h>
+
+/* Test handling of detach clause with only a single thread. The runtime
+ should not block when a task with an unfulfilled event finishes
+ running. */
+
+int main (void)
+{
+ omp_event_handle_t detach_event1, detach_event2;
+ int x = 0, y = 0, z = 0;
+
+ #pragma omp parallel num_threads(1)
+ #pragma omp single
+ {
+ #pragma omp task detach(detach_event1)
+ x++;
+
+ #pragma omp task detach(detach_event2)
+ {
+ y++;
+ omp_fulfill_event (detach_event1);
+ }
+
+ #pragma omp task
+ {
+ z++;
+ omp_fulfill_event (detach_event2);
+ }
+ }
+
+ assert (x == 1);
+ assert (y == 1);
+ assert (z == 1);
+}
--- /dev/null
+/* { dg-do run } */
+
+#include <omp.h>
+#include <assert.h>
+
+/* Test the task detach clause used together with dependencies. */
+
+int main (void)
+{
+ omp_event_handle_t detach_event;
+ int x = 0, y = 0, z = 0;
+ int dep;
+
+ #pragma omp parallel
+ #pragma omp single
+ {
+ #pragma omp task depend(out:dep) detach(detach_event)
+ x++;
+
+ #pragma omp task
+ {
+ y++;
+ omp_fulfill_event(detach_event);
+ }
+
+ #pragma omp task depend(in:dep)
+ z++;
+ }
+
+ assert (x == 1);
+ assert (y == 1);
+ assert (z == 1);
+}
--- /dev/null
+/* { dg-do run } */
+
+#include <omp.h>
+#include <assert.h>
+
+/* Test detach clause, where a task fulfills its own completion event. */
+
+int main (void)
+{
+ omp_event_handle_t detach_event;
+ int x = 0;
+
+ detach_event = (omp_event_handle_t) 0x123456789abcdef0;
+
+ #pragma omp parallel
+ #pragma omp single
+ #pragma omp task detach(detach_event)
+ {
+ x++;
+ omp_fulfill_event(detach_event);
+ }
+
+ assert (x == 1);
+}
--- /dev/null
+/* { dg-do run } */
+
+#include <omp.h>
+#include <assert.h>
+
+/* Test tasks with detach clause. Each thread spawns off a chain of tasks,
+ that can then be executed by any available thread. */
+
+int main (void)
+{
+ int x = 0, y = 0, z = 0;
+ int thread_count;
+ omp_event_handle_t detach_event1, detach_event2;
+
+ #pragma omp parallel firstprivate(detach_event1, detach_event2)
+ {
+ #pragma omp single
+ thread_count = omp_get_num_threads();
+
+ #pragma omp task detach(detach_event1) untied
+ #pragma omp atomic update
+ x++;
+
+ #pragma omp task detach(detach_event2) untied
+ {
+ #pragma omp atomic update
+ y++;
+ omp_fulfill_event (detach_event1);
+ }
+
+ #pragma omp task untied
+ {
+ #pragma omp atomic update
+ z++;
+ omp_fulfill_event (detach_event2);
+ }
+ }
+
+ assert (x == thread_count);
+ assert (y == thread_count);
+ assert (z == thread_count);
+}
--- /dev/null
+/* { dg-do run } */
+
+#include <omp.h>
+#include <assert.h>
+
+/* Test tasks with detach clause on an offload device. Each device
+ thread spawns off a chain of tasks, that can then be executed by
+ any available thread. */
+
+int main (void)
+{
+ int x = 0, y = 0, z = 0;
+ int thread_count;
+ omp_event_handle_t detach_event1, detach_event2;
+
+ #pragma omp target map(tofrom: x, y, z) map(from: thread_count)
+ #pragma omp parallel firstprivate(detach_event1, detach_event2)
+ {
+ #pragma omp single
+ thread_count = omp_get_num_threads();
+
+ #pragma omp task detach(detach_event1) untied
+ #pragma omp atomic update
+ x++;
+
+ #pragma omp task detach(detach_event2) untied
+ {
+ #pragma omp atomic update
+ y++;
+ omp_fulfill_event (detach_event1);
+ }
+
+ #pragma omp task untied
+ {
+ #pragma omp atomic update
+ z++;
+ omp_fulfill_event (detach_event2);
+ }
+
+ #pragma omp taskwait
+ }
+
+ assert (x == thread_count);
+ assert (y == thread_count);
+ assert (z == thread_count);
+}
--- /dev/null
+! { dg-do run }
+
+! Test chaining of detached tasks, with each task fulfilling the
+! completion event of the previous one.
+
+program task_detach_1
+ use omp_lib
+
+ integer (kind=omp_event_handle_kind) :: detach_event1, detach_event2
+ integer :: x = 0, y = 0, z = 0
+
+ !$omp parallel
+ !$omp single
+ !$omp task detach(detach_event1)
+ x = x + 1
+ !$omp end task
+
+ !$omp task detach(detach_event2)
+ y = y + 1
+ call omp_fulfill_event (detach_event1)
+ !$omp end task
+
+ !$omp task
+ z = z + 1
+ call omp_fulfill_event (detach_event2)
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+
+ if (x /= 1) stop 1
+ if (y /= 1) stop 2
+ if (z /= 1) stop 3
+end program
--- /dev/null
+! { dg-do run }
+
+! Test handling of detach clause with only a single thread. The runtime
+! should not block when a task with an unfulfilled event finishes
+! running.
+
+program task_detach_2
+ use omp_lib
+
+ integer (kind=omp_event_handle_kind) :: detach_event1, detach_event2
+ integer :: x = 0, y = 0, z = 0
+
+ !$omp parallel num_threads(1)
+ !$omp single
+ !$omp task detach(detach_event1)
+ x = x + 1
+ !$omp end task
+
+ !$omp task detach(detach_event2)
+ y = y + 1
+ call omp_fulfill_event (detach_event1)
+ !$omp end task
+
+ !$omp task
+ z = z + 1
+ call omp_fulfill_event (detach_event2)
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+
+ if (x /= 1) stop 1
+ if (y /= 1) stop 2
+ if (z /= 1) stop 3
+end program
--- /dev/null
+! { dg-do run }
+
+! Test the task detach clause used together with dependencies.
+
+program task_detach_3
+
+ use omp_lib
+
+ integer (kind=omp_event_handle_kind) :: detach_event
+ integer :: x = 0, y = 0, z = 0
+ integer :: dep
+
+ !$omp parallel
+ !$omp single
+ !$omp task depend(out:dep) detach(detach_event)
+ x = x + 1
+ !$omp end task
+
+ !$omp task
+ y = y + 1
+ call omp_fulfill_event(detach_event)
+ !$omp end task
+
+ !$omp task depend(in:dep)
+ z = z + 1
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+
+ if (x /= 1) stop 1
+ if (y /= 1) stop 2
+ if (z /= 1) stop 3
+end program
--- /dev/null
+! { dg-do run }
+
+! Test detach clause, where a task fulfills its own completion event.
+
+program task_detach_4
+
+ use omp_lib
+
+ integer (kind=omp_event_handle_kind) :: detach_event
+ integer :: x = 0
+
+ !$omp parallel
+ !$omp single
+ !$omp task detach(detach_event)
+ x = x + 1
+ call omp_fulfill_event(detach_event)
+ !$omp end task
+ !$omp end single
+ !$omp end parallel
+
+ if (x /= 1) stop 1
+end program
--- /dev/null
+! { dg-do run }
+
+! Test tasks with detach clause. Each thread spawns off a chain of tasks,
+! that can then be executed by any available thread.
+
+program task_detach_5
+ use omp_lib
+
+ integer (kind=omp_event_handle_kind) :: detach_event1, detach_event2
+ integer :: x = 0, y = 0, z = 0
+ integer :: thread_count
+
+ !$omp parallel firstprivate(detach_event1, detach_event2)
+ !$omp single
+ thread_count = omp_get_num_threads()
+ !$omp end single
+
+ !$omp task detach(detach_event1) untied
+ !$omp atomic update
+ x = x + 1
+ !$omp end task
+
+ !$omp task detach(detach_event2) untied
+ !$omp atomic update
+ y = y + 1
+ call omp_fulfill_event (detach_event1);
+ !$omp end task
+
+ !$omp task untied
+ !$omp atomic update
+ z = z + 1
+ call omp_fulfill_event (detach_event2);
+ !$omp end task
+ !$omp end parallel
+
+ if (x /= thread_count) stop 1
+ if (y /= thread_count) stop 2
+ if (z /= thread_count) stop 3
+end program
--- /dev/null
+! { dg-do run }
+
+! Test tasks with detach clause on an offload device. Each device
+! thread spawns off a chain of tasks, that can then be executed by
+! any available thread.
+
+program task_detach_6
+ use omp_lib
+
+ integer (kind=omp_event_handle_kind) :: detach_event1, detach_event2
+ integer :: x = 0, y = 0, z = 0
+ integer :: thread_count
+
+ !$omp target map(tofrom: x, y, z) map(from: thread_count)
+ !$omp parallel firstprivate(detach_event1, detach_event2)
+ !$omp single
+ thread_count = omp_get_num_threads()
+ !$omp end single
+
+ !$omp task detach(detach_event1) untied
+ !$omp atomic update
+ x = x + 1
+ !$omp end task
+
+ !$omp task detach(detach_event2) untied
+ !$omp atomic update
+ y = y + 1
+ call omp_fulfill_event (detach_event1);
+ !$omp end task
+
+ !$omp task untied
+ !$omp atomic update
+ z = z + 1
+ call omp_fulfill_event (detach_event2);
+ !$omp end task
+
+ !$omp taskwait
+ !$omp end parallel
+ !$omp end target
+
+ if (x /= thread_count) stop 1
+ if (y /= thread_count) stop 2
+ if (z /= thread_count) stop 3
+end program