using namespace clover;
PUBLIC cl_event
-clCreateUserEvent(cl_context d_ctx, cl_int *errcode_ret) try {
+clCreateUserEvent(cl_context d_ctx, cl_int *r_errcode) try {
auto &ctx = obj(d_ctx);
- ret_error(errcode_ret, CL_SUCCESS);
- return new soft_event(ctx, {}, false);
+ ret_error(r_errcode, CL_SUCCESS);
+ return desc(new soft_event(ctx, {}, false));
-} catch(error &e) {
- ret_error(errcode_ret, e);
+} catch (error &e) {
+ ret_error(r_errcode, e);
return NULL;
}
PUBLIC cl_int
-clSetUserEventStatus(cl_event ev, cl_int status) {
- if (!dynamic_cast<soft_event *>(ev))
- return CL_INVALID_EVENT;
+clSetUserEventStatus(cl_event d_ev, cl_int status) try {
+ auto &sev = obj<soft_event>(d_ev);
if (status > 0)
return CL_INVALID_VALUE;
- if (ev->status() <= 0)
+ if (sev.status() <= 0)
return CL_INVALID_OPERATION;
if (status)
- ev->abort(status);
+ sev.abort(status);
else
- ev->trigger();
+ sev.trigger();
return CL_SUCCESS;
+
+} catch (error &e) {
+ return e.get();
}
PUBLIC cl_int
-clWaitForEvents(cl_uint num_evs, const cl_event *evs) try {
- if (!num_evs || !evs)
- throw error(CL_INVALID_VALUE);
-
- std::for_each(evs, evs + num_evs, [&](const cl_event ev) {
- if (!ev)
- throw error(CL_INVALID_EVENT);
+clWaitForEvents(cl_uint num_evs, const cl_event *d_evs) try {
+ auto evs = objs(d_evs, num_evs);
- if (&ev->ctx != &evs[0]->ctx)
- throw error(CL_INVALID_CONTEXT);
+ for (auto &ev : evs) {
+ if (&ev.ctx != &evs.front().ctx)
+ throw error(CL_INVALID_CONTEXT);
- if (ev->status() < 0)
- throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
- });
+ if (ev.status() < 0)
+ throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
+ }
// Create a temporary soft event that depends on all the events in
// the wait list
- ref_ptr<soft_event> sev = transfer(
- new soft_event(evs[0]->ctx, { evs, evs + num_evs }, true));
+ ref_ptr<soft_event> sev =
+ transfer(new soft_event(evs.front().ctx, evs, true));
// ...and wait on it.
sev->wait();
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clGetEventInfo(cl_event ev, cl_event_info param,
+clGetEventInfo(cl_event d_ev, cl_event_info param,
size_t size, void *r_buf, size_t *r_size) try {
property_buffer buf { r_buf, size, r_size };
-
- if (!ev)
- return CL_INVALID_EVENT;
+ auto &ev = obj(d_ev);
switch (param) {
case CL_EVENT_COMMAND_QUEUE:
- buf.as_scalar<cl_command_queue>() = ev->queue();
+ buf.as_scalar<cl_command_queue>() = ev.queue();
break;
case CL_EVENT_CONTEXT:
- buf.as_scalar<cl_context>() = &ev->ctx;
+ buf.as_scalar<cl_context>() = desc(ev.ctx);
break;
case CL_EVENT_COMMAND_TYPE:
- buf.as_scalar<cl_command_type>() = ev->command();
+ buf.as_scalar<cl_command_type>() = ev.command();
break;
case CL_EVENT_COMMAND_EXECUTION_STATUS:
- buf.as_scalar<cl_int>() = ev->status();
+ buf.as_scalar<cl_int>() = ev.status();
break;
case CL_EVENT_REFERENCE_COUNT:
- buf.as_scalar<cl_uint>() = ev->ref_count();
+ buf.as_scalar<cl_uint>() = ev.ref_count();
break;
default:
}
PUBLIC cl_int
-clSetEventCallback(cl_event ev, cl_int type,
- void (CL_CALLBACK *pfn_event_notify)(cl_event, cl_int,
- void *),
+clSetEventCallback(cl_event d_ev, cl_int type,
+ void (CL_CALLBACK *pfn_notify)(cl_event, cl_int, void *),
void *user_data) try {
- if (!ev)
- throw error(CL_INVALID_EVENT);
+ auto &ev = obj(d_ev);
- if (!pfn_event_notify || type != CL_COMPLETE)
+ if (!pfn_notify || type != CL_COMPLETE)
throw error(CL_INVALID_VALUE);
// Create a temporary soft event that depends on ev, with
- // pfn_event_notify as completion action.
+ // pfn_notify as completion action.
ref_ptr<soft_event> sev = transfer(
- new soft_event(ev->ctx, { ev }, true,
- [=](event &) {
- ev->wait();
- pfn_event_notify(ev, ev->status(), user_data);
+ new soft_event(ev.ctx, { ev }, true,
+ [=, &ev](event &) {
+ ev.wait();
+ pfn_notify(desc(ev), ev.status(), user_data);
}));
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clRetainEvent(cl_event ev) {
- if (!ev)
- return CL_INVALID_EVENT;
-
- ev->retain();
+clRetainEvent(cl_event d_ev) try {
+ obj(d_ev).retain();
return CL_SUCCESS;
+
+} catch (error &e) {
+ return e.get();
}
PUBLIC cl_int
-clReleaseEvent(cl_event ev) {
- if (!ev)
- return CL_INVALID_EVENT;
-
- if (ev->release())
- delete ev;
+clReleaseEvent(cl_event d_ev) try {
+ if (obj(d_ev).release())
+ delete pobj(d_ev);
return CL_SUCCESS;
+
+} catch (error &e) {
+ return e.get();
}
PUBLIC cl_int
-clEnqueueMarker(cl_command_queue q, cl_event *ev) try {
- if (!q)
+clEnqueueMarker(cl_command_queue d_q, cl_event *rd_ev) try {
+ if (!d_q)
throw error(CL_INVALID_COMMAND_QUEUE);
- if (!ev)
+ if (!rd_ev)
throw error(CL_INVALID_VALUE);
- *ev = new hard_event(*q, CL_COMMAND_MARKER, {});
+ *rd_ev = desc(new hard_event(*d_q, CL_COMMAND_MARKER, {}));
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clEnqueueBarrier(cl_command_queue q) {
- if (!q)
+clEnqueueBarrier(cl_command_queue d_q) {
+ if (!d_q)
return CL_INVALID_COMMAND_QUEUE;
// No need to do anything, q preserves data ordering strictly.
+
return CL_SUCCESS;
}
PUBLIC cl_int
-clEnqueueWaitForEvents(cl_command_queue q, cl_uint num_evs,
- const cl_event *evs) try {
- if (!q)
+clEnqueueWaitForEvents(cl_command_queue d_q, cl_uint num_evs,
+ const cl_event *d_evs) try {
+ if (!d_q)
throw error(CL_INVALID_COMMAND_QUEUE);
- if (!num_evs || !evs)
- throw error(CL_INVALID_VALUE);
+ auto &q = *d_q;
+ auto evs = objs(d_evs, num_evs);
- std::for_each(evs, evs + num_evs, [&](const cl_event ev) {
- if (!ev)
- throw error(CL_INVALID_EVENT);
-
- if (&ev->ctx != &q->ctx)
+ for (auto &ev : evs) {
+ if (&ev.ctx != &q.ctx)
throw error(CL_INVALID_CONTEXT);
- });
+ }
// Create a hard event that depends on the events in the wait list:
// subsequent commands in the same queue will be implicitly
// serialized with respect to it -- hard events always are.
- ref_ptr<hard_event> hev = transfer(
- new hard_event(*q, 0, { evs, evs + num_evs }));
+ ref_ptr<hard_event> hev = transfer(new hard_event(q, 0, evs));
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clGetEventProfilingInfo(cl_event ev, cl_profiling_info param,
+clGetEventProfilingInfo(cl_event d_ev, cl_profiling_info param,
size_t size, void *r_buf, size_t *r_size) try {
property_buffer buf { r_buf, size, r_size };
- hard_event *hev = dynamic_cast<hard_event *>(ev);
-
- if (!ev)
- return CL_INVALID_EVENT;
+ hard_event &hev = dynamic_cast<hard_event &>(obj(d_ev));
- if (!hev || hev->status() != CL_COMPLETE)
- return CL_PROFILING_INFO_NOT_AVAILABLE;
+ if (hev.status() != CL_COMPLETE)
+ throw error(CL_PROFILING_INFO_NOT_AVAILABLE);
switch (param) {
case CL_PROFILING_COMMAND_QUEUED:
- buf.as_scalar<cl_ulong>() = hev->time_queued();
+ buf.as_scalar<cl_ulong>() = hev.time_queued();
break;
case CL_PROFILING_COMMAND_SUBMIT:
- buf.as_scalar<cl_ulong>() = hev->time_submit();
+ buf.as_scalar<cl_ulong>() = hev.time_submit();
break;
case CL_PROFILING_COMMAND_START:
- buf.as_scalar<cl_ulong>() = hev->time_start();
+ buf.as_scalar<cl_ulong>() = hev.time_start();
break;
case CL_PROFILING_COMMAND_END:
- buf.as_scalar<cl_ulong>() = hev->time_end();
+ buf.as_scalar<cl_ulong>() = hev.time_end();
break;
default:
return CL_SUCCESS;
+} catch (std::bad_cast &e) {
+ return CL_PROFILING_INFO_NOT_AVAILABLE;
+
} catch (lazy<cl_ulong>::undefined_error &e) {
return CL_PROFILING_INFO_NOT_AVAILABLE;
}
PUBLIC cl_int
-clFinish(cl_command_queue q) try {
- if (!q)
+clFinish(cl_command_queue d_q) try {
+ if (!d_q)
throw error(CL_INVALID_COMMAND_QUEUE);
// Create a temporary hard event -- it implicitly depends on all
// the previously queued hard events.
- ref_ptr<hard_event> hev = transfer(new hard_event(*q, 0, { }));
+ ref_ptr<hard_event> hev = transfer(new hard_event(*d_q, 0, { }));
// And wait on it.
hev->wait();
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
if (&kern->prog.ctx != &q->ctx ||
any_of([&](const cl_event ev) {
- return &ev->ctx != &q->ctx;
+ return &obj(ev).ctx != &q->ctx;
}, range(deps, num_deps)))
throw error(CL_INVALID_CONTEXT);
clEnqueueNDRangeKernel(cl_command_queue q, cl_kernel kern,
cl_uint dims, const size_t *pgrid_offset,
const size_t *pgrid_size, const size_t *pblock_size,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
auto grid_offset = opt_vector(pgrid_offset, dims, 0);
auto grid_size = opt_vector(pgrid_size, dims, 1);
auto block_size = opt_vector(pblock_size, dims, 1);
kernel_validate(q, kern, dims, pgrid_offset, pgrid_size, pblock_size,
- num_deps, deps, ev);
+ num_deps, d_deps, ev);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_NDRANGE_KERNEL, { deps, deps + num_deps },
+ *q, CL_COMMAND_NDRANGE_KERNEL, deps,
kernel_op(q, kern, grid_offset, grid_size, block_size));
ret_object(ev, hev);
PUBLIC cl_int
clEnqueueTask(cl_command_queue q, cl_kernel kern,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
const std::vector<size_t> grid_offset = { 0 };
const std::vector<size_t> grid_size = { 1 };
const std::vector<size_t> block_size = { 1 };
kernel_validate(q, kern, 1, grid_offset.data(), grid_size.data(),
- block_size.data(), num_deps, deps, ev);
+ block_size.data(), num_deps, d_deps, ev);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_TASK, { deps, deps + num_deps },
+ *q, CL_COMMAND_TASK, deps,
kernel_op(q, kern, grid_offset, grid_size, block_size));
ret_object(ev, hev);
throw error(CL_INVALID_EVENT_WAIT_LIST);
if (any_of([&](const cl_event ev) {
- return &ev->ctx != &q->ctx;
+ return &obj(ev).ctx != &q->ctx;
}, range(deps, num_deps)))
throw error(CL_INVALID_CONTEXT);
}
PUBLIC cl_int
clEnqueueReadBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
size_t offset, size_t size, void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr || offset > obj->size() || offset + size > obj->size())
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_READ_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_READ_BUFFER, deps,
soft_copy_op(q,
ptr, {{ 0 }}, {{ 1 }},
obj, {{ offset }}, {{ 1 }},
PUBLIC cl_int
clEnqueueWriteBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
size_t offset, size_t size, const void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr || offset > obj->size() || offset + size > obj->size())
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_WRITE_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_WRITE_BUFFER, deps,
soft_copy_op(q,
obj, {{ offset }}, {{ 1 }},
ptr, {{ 0 }}, {{ 1 }},
size_t obj_row_pitch, size_t obj_slice_pitch,
size_t host_row_pitch, size_t host_slice_pitch,
void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_READ_BUFFER_RECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_READ_BUFFER_RECT, deps,
soft_copy_op(q,
ptr, vector(host_origin),
{{ 1, host_row_pitch, host_slice_pitch }},
size_t obj_row_pitch, size_t obj_slice_pitch,
size_t host_row_pitch, size_t host_slice_pitch,
const void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_WRITE_BUFFER_RECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_WRITE_BUFFER_RECT, deps,
soft_copy_op(q,
obj, vector(obj_origin),
{{ 1, obj_row_pitch, obj_slice_pitch }},
PUBLIC cl_int
clEnqueueCopyBuffer(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
size_t src_offset, size_t dst_offset, size_t size,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_obj);
validate_obj(q, dst_obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_BUFFER, deps,
hard_copy_op(q, dst_obj, {{ dst_offset }},
src_obj, {{ src_offset }},
{{ size, 1, 1 }}));
const size_t *region,
size_t src_row_pitch, size_t src_slice_pitch,
size_t dst_row_pitch, size_t dst_slice_pitch,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_obj);
validate_obj(q, dst_obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_BUFFER_RECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_BUFFER_RECT, deps,
soft_copy_op(q,
dst_obj, vector(dst_origin),
{{ 1, dst_row_pitch, dst_slice_pitch }},
clEnqueueReadImage(cl_command_queue q, cl_mem obj, cl_bool blocking,
const size_t *origin, const size_t *region,
size_t row_pitch, size_t slice_pitch, void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *img = dynamic_cast<image *>(obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, img);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_READ_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_READ_IMAGE, deps,
soft_copy_op(q,
ptr, {},
{{ 1, row_pitch, slice_pitch }},
clEnqueueWriteImage(cl_command_queue q, cl_mem obj, cl_bool blocking,
const size_t *origin, const size_t *region,
size_t row_pitch, size_t slice_pitch, const void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *img = dynamic_cast<image *>(obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, img);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_WRITE_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_WRITE_IMAGE, deps,
soft_copy_op(q,
obj, vector(origin),
{{ 1, img->row_pitch(), img->slice_pitch() }},
clEnqueueCopyImage(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
const size_t *src_origin, const size_t *dst_origin,
const size_t *region,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *src_img = dynamic_cast<image *>(src_obj);
image *dst_img = dynamic_cast<image *>(dst_obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_img);
validate_obj(q, dst_img);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_IMAGE, deps,
hard_copy_op(q,
dst_obj, vector(dst_origin),
src_obj, vector(src_origin),
clEnqueueCopyImageToBuffer(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
const size_t *src_origin, const size_t *region,
size_t dst_offset,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *src_img = dynamic_cast<image *>(src_obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_img);
validate_obj(q, dst_obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_IMAGE_TO_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_IMAGE_TO_BUFFER, deps,
soft_copy_op(q,
dst_obj, {{ dst_offset }},
{{ 0, 0, 0 }},
clEnqueueCopyBufferToImage(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
size_t src_offset,
const size_t *dst_origin, const size_t *region,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *dst_img = dynamic_cast<image *>(dst_obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_obj);
validate_obj(q, dst_img);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_BUFFER_TO_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_BUFFER_TO_IMAGE, deps,
soft_copy_op(q,
dst_obj, vector(dst_origin),
{{ 1, dst_img->row_pitch(), dst_img->slice_pitch() }},
PUBLIC void *
clEnqueueMapBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
cl_map_flags flags, size_t offset, size_t size,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev, cl_int *errcode_ret) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (offset > obj->size() || offset + size > obj->size())
*q, flags, blocking, {{ offset }}, {{ size }});
ret_object(ev, new hard_event(*q, CL_COMMAND_MAP_BUFFER,
- { deps, deps + num_deps }));
+ deps));
ret_error(errcode_ret, CL_SUCCESS);
return map;
cl_map_flags flags,
const size_t *origin, const size_t *region,
size_t *row_pitch, size_t *slice_pitch,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev, cl_int *errcode_ret) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *img = dynamic_cast<image *>(obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, img);
void *map = obj->resource(q).add_map(
*q, flags, blocking, vector(origin), vector(region));
ret_object(ev, new hard_event(*q, CL_COMMAND_MAP_IMAGE,
- { deps, deps + num_deps }));
+ deps));
ret_error(errcode_ret, CL_SUCCESS);
return map;
PUBLIC cl_int
clEnqueueUnmapMemObject(cl_command_queue q, cl_mem obj, void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_UNMAP_MEM_OBJECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_UNMAP_MEM_OBJECT, deps,
[=](event &) {
obj->resource(q).del_map(ptr);
});
typedef struct _cl_command_queue command_queue;
class context;
class device;
- typedef struct _cl_event event;
+ class event;
class hard_event;
class soft_event;
typedef struct _cl_kernel kernel;
//
#include "core/event.hpp"
-#include "util/algorithm.hpp"
#include "pipe/p_screen.h"
using namespace clover;
-_cl_event::_cl_event(clover::context &ctx,
- std::vector<clover::event *> deps,
- action action_ok, action action_fail) :
+event::event(context &ctx, const ref_vector<event> &deps,
+ action action_ok, action action_fail) :
ctx(ctx), _status(0), wait_count(1),
action_ok(action_ok), action_fail(action_fail) {
- for (auto ev : deps)
- ev->chain(this);
+ for (auto &ev : deps)
+ ev.chain(this);
}
-_cl_event::~_cl_event() {
+event::~event() {
}
void
-_cl_event::trigger() {
+event::trigger() {
if (!--wait_count) {
action_ok(*this);
}
void
-_cl_event::abort(cl_int status) {
+event::abort(cl_int status) {
_status = status;
action_fail(*this);
}
bool
-_cl_event::signalled() const {
+event::signalled() const {
return !wait_count;
}
void
-_cl_event::chain(clover::event *ev) {
+event::chain(event *ev) {
if (wait_count) {
ev->wait_count++;
_chain.push_back(ev);
ev->deps.push_back(this);
}
-hard_event::hard_event(clover::command_queue &q, cl_command_type command,
- std::vector<clover::event *> deps, action action) :
- _cl_event(q.ctx, deps, profile(q, action), [](event &ev){}),
+hard_event::hard_event(command_queue &q, cl_command_type command,
+ const ref_vector<event> &deps, action action) :
+ event(q.ctx, deps, profile(q, action), [](event &ev){}),
_queue(q), _command(command), _fence(NULL) {
if (q.profiling_enabled())
_time_queued = timestamp::current(q);
return CL_COMPLETE;
}
-cl_command_queue
+command_queue *
hard_event::queue() const {
return &_queue;
}
}
}
-soft_event::soft_event(clover::context &ctx,
- std::vector<clover::event *> deps,
+soft_event::soft_event(context &ctx, const ref_vector<event> &deps,
bool _trigger, action action) :
- _cl_event(ctx, deps, action, action) {
+ event(ctx, deps, action, action) {
if (_trigger)
trigger();
}
return CL_COMPLETE;
}
-cl_command_queue
+command_queue *
soft_event::queue() const {
return NULL;
}
#include "util/lazy.hpp"
namespace clover {
- typedef struct _cl_event event;
-}
+ ///
+ /// Class that represents a task that might be executed
+ /// asynchronously at some point in the future.
+ ///
+ /// An event consists of a list of dependencies, a boolean
+ /// signalled() flag, and an associated task. An event is
+ /// considered signalled as soon as all its dependencies (if any)
+ /// are signalled as well, and the trigger() method is called; at
+ /// that point the associated task will be started through the
+ /// specified \a action_ok. If the abort() method is called
+ /// instead, the specified \a action_fail is executed and the
+ /// associated task will never be started. Dependent events will
+ /// be aborted recursively.
+ ///
+ /// The execution status of the associated task can be queried
+ /// using the status() method, and it can be waited for completion
+ /// using the wait() method.
+ ///
+ class event : public ref_counter, public _cl_event {
+ public:
+ typedef std::function<void (event &)> action;
-///
-/// Class that represents a task that might be executed asynchronously
-/// at some point in the future.
-///
-/// An event consists of a list of dependencies, a boolean signalled()
-/// flag, and an associated task. An event is considered signalled as
-/// soon as all its dependencies (if any) are signalled as well, and
-/// the trigger() method is called; at that point the associated task
-/// will be started through the specified \a action_ok. If the
-/// abort() method is called instead, the specified \a action_fail is
-/// executed and the associated task will never be started. Dependent
-/// events will be aborted recursively.
-///
-/// The execution status of the associated task can be queried using
-/// the status() method, and it can be waited for completion using the
-/// wait() method.
-///
-struct _cl_event : public clover::ref_counter {
-public:
- typedef std::function<void (clover::event &)> action;
-
- _cl_event(clover::context &ctx, std::vector<clover::event *> deps,
- action action_ok, action action_fail);
- virtual ~_cl_event();
-
- void trigger();
- void abort(cl_int status);
- bool signalled() const;
-
- virtual cl_int status() const = 0;
- virtual cl_command_queue queue() const = 0;
- virtual cl_command_type command() const = 0;
- virtual void wait() const = 0;
-
- clover::context &ctx;
-
-protected:
- void chain(clover::event *ev);
-
- cl_int _status;
- std::vector<clover::ref_ptr<clover::event>> deps;
-
-private:
- unsigned wait_count;
- action action_ok;
- action action_fail;
- std::vector<clover::ref_ptr<clover::event>> _chain;
-};
+ event(context &ctx, const ref_vector<event> &deps,
+ action action_ok, action action_fail);
+ virtual ~event();
+
+ void trigger();
+ void abort(cl_int status);
+ bool signalled() const;
+
+ virtual cl_int status() const = 0;
+ virtual command_queue *queue() const = 0;
+ virtual cl_command_type command() const = 0;
+ virtual void wait() const = 0;
+
+ context &ctx;
+
+ protected:
+ void chain(event *ev);
+
+ cl_int _status;
+ std::vector<ref_ptr<event>> deps;
+
+ private:
+ unsigned wait_count;
+ action action_ok;
+ action action_fail;
+ std::vector<ref_ptr<event>> _chain;
+ };
-namespace clover {
///
/// Class that represents a task executed by a command queue.
///
///
class hard_event : public event {
public:
- hard_event(clover::command_queue &q, cl_command_type command,
- std::vector<clover::event *> deps,
+ hard_event(command_queue &q, cl_command_type command,
+ const ref_vector<event> &deps,
action action = [](event &){});
~hard_event();
virtual cl_int status() const;
- virtual cl_command_queue queue() const;
+ virtual command_queue *queue() const;
virtual cl_command_type command() const;
virtual void wait() const;
const lazy<cl_ulong> &time_start() const;
const lazy<cl_ulong> &time_end() const;
- friend class ::_cl_command_queue;
+ friend struct ::_cl_command_queue;
private:
virtual void fence(pipe_fence_handle *fence);
action profile(command_queue &q, const action &action) const;
- clover::command_queue &_queue;
+ command_queue &_queue;
cl_command_type _command;
pipe_fence_handle *_fence;
lazy<cl_ulong> _time_queued, _time_submit, _time_start, _time_end;
///
class soft_event : public event {
public:
- soft_event(clover::context &ctx, std::vector<clover::event *> deps,
+ soft_event(context &ctx, const ref_vector<event> &deps,
bool trigger, action action = [](event &){});
virtual cl_int status() const;
- virtual cl_command_queue queue() const;
+ virtual command_queue *queue() const;
virtual cl_command_type command() const;
virtual void wait() const;
};
struct _cl_device_id :
public clover::descriptor<clover::device, _cl_device_id> {};
+struct _cl_event :
+ public clover::descriptor<clover::event, _cl_event> {};
+
struct _cl_platform_id :
public clover::descriptor<clover::platform, _cl_platform_id> {};