2 // Copyright 2012 Francisco Jerez
4 // Permission is hereby granted, free of charge, to any person obtaining a
5 // copy of this software and associated documentation files (the "Software"),
6 // to deal in the Software without restriction, including without limitation
7 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 // and/or sell copies of the Software, and to permit persons to whom the
9 // Software is furnished to do so, subject to the following conditions:
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 // OTHER DEALINGS IN THE SOFTWARE.
23 #include "api/util.hpp"
24 #include "core/event.hpp"
26 using namespace clover
;
29 clCreateUserEvent(cl_context d_ctx
, cl_int
*r_errcode
) try {
30 auto &ctx
= obj(d_ctx
);
32 ret_error(r_errcode
, CL_SUCCESS
);
33 return desc(new soft_event(ctx
, {}, false));
36 ret_error(r_errcode
, e
);
41 clSetUserEventStatus(cl_event d_ev
, cl_int status
) try {
42 auto &sev
= obj
<soft_event
>(d_ev
);
45 return CL_INVALID_VALUE
;
47 if (sev
.status() <= 0)
48 return CL_INVALID_OPERATION
;
62 clWaitForEvents(cl_uint num_evs
, const cl_event
*d_evs
) try {
63 auto evs
= objs(d_evs
, num_evs
);
65 for (auto &ev
: evs
) {
66 if (ev
.context() != evs
.front().context())
67 throw error(CL_INVALID_CONTEXT
);
70 throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST
);
73 // Create a temporary soft event that depends on all the events in
75 auto sev
= create
<soft_event
>(evs
.front().context(), evs
, true);
87 clGetEventInfo(cl_event d_ev
, cl_event_info param
,
88 size_t size
, void *r_buf
, size_t *r_size
) try {
89 property_buffer buf
{ r_buf
, size
, r_size
};
93 case CL_EVENT_COMMAND_QUEUE
:
94 buf
.as_scalar
<cl_command_queue
>() = desc(ev
.queue());
97 case CL_EVENT_CONTEXT
:
98 buf
.as_scalar
<cl_context
>() = desc(ev
.context());
101 case CL_EVENT_COMMAND_TYPE
:
102 buf
.as_scalar
<cl_command_type
>() = ev
.command();
105 case CL_EVENT_COMMAND_EXECUTION_STATUS
:
106 buf
.as_scalar
<cl_int
>() = ev
.status();
109 case CL_EVENT_REFERENCE_COUNT
:
110 buf
.as_scalar
<cl_uint
>() = ev
.ref_count();
114 throw error(CL_INVALID_VALUE
);
124 clSetEventCallback(cl_event d_ev
, cl_int type
,
125 void (CL_CALLBACK
*pfn_notify
)(cl_event
, cl_int
, void *),
126 void *user_data
) try {
127 auto &ev
= obj(d_ev
);
129 if (!pfn_notify
|| type
!= CL_COMPLETE
)
130 throw error(CL_INVALID_VALUE
);
132 // Create a temporary soft event that depends on ev, with
133 // pfn_notify as completion action.
134 create
<soft_event
>(ev
.context(), ref_vector
<event
> { ev
}, true,
137 pfn_notify(desc(ev
), ev
.status(), user_data
);
147 clRetainEvent(cl_event d_ev
) try {
156 clReleaseEvent(cl_event d_ev
) try {
157 if (obj(d_ev
).release())
167 clEnqueueMarker(cl_command_queue d_q
, cl_event
*rd_ev
) try {
171 throw error(CL_INVALID_VALUE
);
173 *rd_ev
= desc(new hard_event(q
, CL_COMMAND_MARKER
, {}));
182 clEnqueueMarkerWithWaitList(cl_command_queue d_q
, cl_uint num_deps
,
183 const cl_event
*d_deps
, cl_event
*rd_ev
) try {
185 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
187 for (auto &ev
: deps
) {
188 if (ev
.context() != q
.context())
189 throw error(CL_INVALID_CONTEXT
);
192 // Create a hard event that depends on the events in the wait list:
193 // previous commands in the same queue are implicitly serialized
194 // with respect to it -- hard events always are.
195 auto hev
= create
<hard_event
>(q
, CL_COMMAND_MARKER
, deps
);
197 ret_object(rd_ev
, hev
);
205 clEnqueueBarrier(cl_command_queue d_q
) try {
208 // No need to do anything, q preserves data ordering strictly.
217 clEnqueueBarrierWithWaitList(cl_command_queue d_q
, cl_uint num_deps
,
218 const cl_event
*d_deps
, cl_event
*rd_ev
) try {
220 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
222 for (auto &ev
: deps
) {
223 if (ev
.context() != q
.context())
224 throw error(CL_INVALID_CONTEXT
);
227 // Create a hard event that depends on the events in the wait list:
228 // subsequent commands in the same queue will be implicitly
229 // serialized with respect to it -- hard events always are.
230 auto hev
= create
<hard_event
>(q
, CL_COMMAND_BARRIER
, deps
);
232 ret_object(rd_ev
, hev
);
240 clEnqueueWaitForEvents(cl_command_queue d_q
, cl_uint num_evs
,
241 const cl_event
*d_evs
) try {
242 // The wait list is mandatory for clEnqueueWaitForEvents().
243 objs(d_evs
, num_evs
);
245 return clEnqueueBarrierWithWaitList(d_q
, num_evs
, d_evs
, NULL
);
252 clGetEventProfilingInfo(cl_event d_ev
, cl_profiling_info param
,
253 size_t size
, void *r_buf
, size_t *r_size
) try {
254 property_buffer buf
{ r_buf
, size
, r_size
};
255 hard_event
&hev
= dynamic_cast<hard_event
&>(obj(d_ev
));
257 if (hev
.status() != CL_COMPLETE
)
258 throw error(CL_PROFILING_INFO_NOT_AVAILABLE
);
261 case CL_PROFILING_COMMAND_QUEUED
:
262 buf
.as_scalar
<cl_ulong
>() = hev
.time_queued();
265 case CL_PROFILING_COMMAND_SUBMIT
:
266 buf
.as_scalar
<cl_ulong
>() = hev
.time_submit();
269 case CL_PROFILING_COMMAND_START
:
270 buf
.as_scalar
<cl_ulong
>() = hev
.time_start();
273 case CL_PROFILING_COMMAND_END
:
274 buf
.as_scalar
<cl_ulong
>() = hev
.time_end();
278 throw error(CL_INVALID_VALUE
);
283 } catch (std::bad_cast
&e
) {
284 return CL_PROFILING_INFO_NOT_AVAILABLE
;
286 } catch (lazy
<cl_ulong
>::undefined_error
&e
) {
287 return CL_PROFILING_INFO_NOT_AVAILABLE
;
294 clFinish(cl_command_queue d_q
) try {
297 // Create a temporary hard event -- it implicitly depends on all
298 // the previously queued hard events.
299 auto hev
= create
<hard_event
>(q
, 0, ref_vector
<event
> {});