auto evs = objs(d_evs, num_evs);
for (auto &ev : evs) {
- if (ev.ctx != evs.front().ctx)
+ if (ev.context() != evs.front().context())
throw error(CL_INVALID_CONTEXT);
if (ev.status() < 0)
// Create a temporary soft event that depends on all the events in
// the wait list
intrusive_ptr<soft_event> sev =
- transfer(new soft_event(evs.front().ctx, evs, true));
+ transfer(new soft_event(evs.front().context(), evs, true));
// ...and wait on it.
sev->wait();
break;
case CL_EVENT_CONTEXT:
- buf.as_scalar<cl_context>() = desc(ev.ctx);
+ buf.as_scalar<cl_context>() = desc(ev.context());
break;
case CL_EVENT_COMMAND_TYPE:
// Create a temporary soft event that depends on ev, with
// pfn_notify as completion action.
intrusive_ptr<soft_event> sev = transfer(
- new soft_event(ev.ctx, { ev }, true,
+ new soft_event(ev.context(), { ev }, true,
[=, &ev](event &) {
ev.wait();
pfn_notify(desc(ev), ev.status(), user_data);
auto evs = objs(d_evs, num_evs);
for (auto &ev : evs) {
- if (ev.ctx != q.ctx)
- throw error(CL_INVALID_CONTEXT);
+ if (ev.context() != q.context())
+ throw error(CL_INVALID_CONTEXT);
}
// Create a hard event that depends on the events in the wait list:
break;
case CL_KERNEL_CONTEXT:
- buf.as_scalar<cl_context>() = desc(kern.prog.ctx);
+ buf.as_scalar<cl_context>() = desc(kern.program().context());
break;
case CL_KERNEL_PROGRAM:
- buf.as_scalar<cl_program>() = desc(kern.prog);
+ buf.as_scalar<cl_program>() = desc(kern.program());
break;
default:
size_t size, void *r_buf, size_t *r_size) try {
property_buffer buf { r_buf, size, r_size };
auto &kern = obj(d_kern);
- auto &dev = (d_dev ? *pobj(d_dev) : unique(kern.prog.devices()));
+ auto &dev = (d_dev ? *pobj(d_dev) : unique(kern.program().devices()));
- if (!count(dev, kern.prog.devices()))
+ if (!count(dev, kern.program().devices()))
throw error(CL_INVALID_DEVICE);
switch (param) {
void
validate_common(const command_queue &q, kernel &kern,
const ref_vector<event> &deps) {
- if (kern.prog.ctx != q.ctx ||
+ if (kern.program().context() != q.context() ||
any_of([&](const event &ev) {
- return ev.ctx != q.ctx;
+ return ev.context() != q.context();
}, deps))
throw error(CL_INVALID_CONTEXT);
}, kern.args()))
throw error(CL_INVALID_KERNEL_ARGS);
- if (!count(q.dev, kern.prog.devices()))
+ if (!count(q.device(), kern.program().devices()))
throw error(CL_INVALID_PROGRAM_EXECUTABLE);
}
const size_t *d_grid_size) {
auto grid_size = range(d_grid_size, dims);
- if (dims < 1 || dims > q.dev.max_block_size().size())
+ if (dims < 1 || dims > q.device().max_block_size().size())
throw error(CL_INVALID_WORK_DIMENSION);
if (!d_grid_size || any_of(is_zero(), grid_size))
auto block_size = range(d_block_size, dims);
if (any_of(is_zero(), block_size) ||
- any_of(greater(), block_size, q.dev.max_block_size()))
+ any_of(greater(), block_size, q.device().max_block_size()))
throw error(CL_INVALID_WORK_ITEM_SIZE);
if (any_of(modulus(), grid_size, block_size))
throw error(CL_INVALID_WORK_GROUP_SIZE);
if (fold(multiplies(), 1u, block_size) >
- q.dev.max_threads_per_block())
+ q.device().max_threads_per_block())
throw error(CL_INVALID_WORK_GROUP_SIZE);
return block_size;
break;
case CL_MEM_CONTEXT:
- buf.as_scalar<cl_context>() = desc(mem.ctx);
+ buf.as_scalar<cl_context>() = desc(mem.context());
break;
case CL_MEM_ASSOCIATED_MEMOBJECT: {
sub_buffer *sub = dynamic_cast<sub_buffer *>(&mem);
- buf.as_scalar<cl_mem>() = (sub ? desc(sub->parent) : NULL);
+ buf.as_scalar<cl_mem>() = (sub ? desc(sub->parent()) : NULL);
break;
}
case CL_MEM_OFFSET: {
void *user_data) try {
auto &prog = obj(d_prog);
auto devs = (d_devs ? objs(d_devs, num_devs) :
- ref_vector<device>(prog.ctx.devs()));
+ ref_vector<device>(prog.context().devs()));
auto opts = (p_opts ? p_opts : "");
if (bool(num_devs) != bool(d_devs) ||
throw error(CL_INVALID_VALUE);
if (any_of([&](const device &dev) {
- return !count(dev, prog.ctx.devs());
+ return !count(dev, prog.context().devs());
}, devs))
throw error(CL_INVALID_DEVICE);
break;
case CL_PROGRAM_CONTEXT:
- buf.as_scalar<cl_context>() = desc(prog.ctx);
+ buf.as_scalar<cl_context>() = desc(prog.context());
break;
case CL_PROGRAM_NUM_DEVICES:
- buf.as_scalar<cl_uint>() = prog.devices().size() ?
- prog.devices().size() :
- prog.ctx.devs().size();
+ buf.as_scalar<cl_uint>() = (prog.devices().size() ?
+ prog.devices().size() :
+ prog.context().devs().size());
break;
case CL_PROGRAM_DEVICES:
- buf.as_vector<cl_device_id>() = prog.devices().size() ?
- descs(prog.devices()) :
- descs(prog.ctx.devs());
+ buf.as_vector<cl_device_id>() = (prog.devices().size() ?
+ descs(prog.devices()) :
+ descs(prog.context().devs()));
break;
case CL_PROGRAM_SOURCE:
auto &prog = obj(d_prog);
auto &dev = obj(d_dev);
- if (!count(dev, prog.ctx.devs()))
+ if (!count(dev, prog.context().devs()))
return CL_INVALID_DEVICE;
switch (param) {
switch (param) {
case CL_QUEUE_CONTEXT:
- buf.as_scalar<cl_context>() = desc(q.ctx);
+ buf.as_scalar<cl_context>() = desc(q.context());
break;
case CL_QUEUE_DEVICE:
- buf.as_scalar<cl_device_id>() = desc(q.dev);
+ buf.as_scalar<cl_device_id>() = desc(q.device());
break;
case CL_QUEUE_REFERENCE_COUNT:
break;
case CL_SAMPLER_CONTEXT:
- buf.as_scalar<cl_context>() = desc(s.ctx);
+ buf.as_scalar<cl_context>() = desc(s.context());
break;
case CL_SAMPLER_NORMALIZED_COORDS:
#include "api/util.hpp"
#include "core/event.hpp"
-#include "core/resource.hpp"
+#include "core/memory.hpp"
using namespace clover;
validate_common(command_queue &q,
const ref_vector<event> &deps) {
if (any_of([&](const event &ev) {
- return &ev.ctx != &q.ctx;
+ return ev.context() != q.context();
}, deps))
throw error(CL_INVALID_CONTEXT);
}
void
validate_object(command_queue &q, buffer &mem, const vector_t &origin,
const vector_t &pitch, const vector_t ®ion) {
- if (mem.ctx != q.ctx)
+ if (mem.context() != q.context())
throw error(CL_INVALID_CONTEXT);
// The region must fit within the specified pitch,
const vector_t &orig, const vector_t ®ion) {
vector_t size = { img.width(), img.height(), img.depth() };
- if (img.ctx != q.ctx)
+ if (img.context() != q.context())
throw error(CL_INVALID_CONTEXT);
if (any_of(greater(), orig + region, size))
context::context(const property_list &props,
const ref_vector<device> &devs) :
- _props(props), _devs(map(addresses(), devs)) {
+ _props(props), _devs(devs) {
}
bool
context::device_range
context::devs() const {
- return map(derefs(), _devs);
+ return map(evals(), _devs);
}
namespace clover {
class context : public ref_counter, public _cl_context {
private:
- typedef adaptor_range<derefs, const std::vector<device *> &> device_range;
+ typedef adaptor_range<
+ evals, const std::vector<intrusive_ref<device>> &
+ > device_range;
typedef clover::property_list<cl_context_properties> property_list;
public:
private:
property_list _props;
- const std::vector<clover::device *> _devs;
+ const std::vector<intrusive_ref<device>> _devs;
};
}
//
#include "core/device.hpp"
+#include "core/platform.hpp"
#include "pipe/p_screen.h"
#include "pipe/p_state.h"
using namespace clover;
-event::event(context &ctx, const ref_vector<event> &deps,
+event::event(clover::context &ctx, const ref_vector<event> &deps,
action action_ok, action action_fail) :
- ctx(ctx), _status(0), wait_count(1),
+ context(ctx), _status(0), wait_count(1),
action_ok(action_ok), action_fail(action_fail) {
for (auto &ev : deps)
- ev.chain(this);
+ ev.chain(*this);
}
event::~event() {
action_ok(*this);
while (!_chain.empty()) {
- _chain.back()->trigger();
+ _chain.back()().trigger();
_chain.pop_back();
}
}
action_fail(*this);
while (!_chain.empty()) {
- _chain.back()->abort(status);
+ _chain.back()().abort(status);
_chain.pop_back();
}
}
}
void
-event::chain(event *ev) {
+event::chain(event &ev) {
if (wait_count) {
- ev->wait_count++;
+ ev.wait_count++;
_chain.push_back(ev);
}
- ev->deps.push_back(this);
+ ev.deps.push_back(*this);
}
hard_event::hard_event(command_queue &q, cl_command_type command,
const ref_vector<event> &deps, action action) :
- event(q.ctx, deps, profile(q, action), [](event &ev){}),
+ event(q.context(), deps, profile(q, action), [](event &ev){}),
_queue(q), _command(command), _fence(NULL) {
if (q.profiling_enabled())
_time_queued = timestamp::current(q);
- q.sequence(this);
+ q.sequence(*this);
trigger();
}
hard_event::~hard_event() {
- pipe_screen *screen = queue()->dev.pipe;
+ pipe_screen *screen = queue()->device().pipe;
screen->fence_reference(screen, &_fence, NULL);
}
cl_int
hard_event::status() const {
- pipe_screen *screen = queue()->dev.pipe;
+ pipe_screen *screen = queue()->device().pipe;
if (_status < 0)
return _status;
command_queue *
hard_event::queue() const {
- return &_queue;
+ return &_queue();
}
cl_command_type
void
hard_event::wait() const {
- pipe_screen *screen = queue()->dev.pipe;
+ pipe_screen *screen = queue()->device().pipe;
if (status() == CL_QUEUED)
queue()->flush();
void
hard_event::fence(pipe_fence_handle *fence) {
- pipe_screen *screen = queue()->dev.pipe;
+ pipe_screen *screen = queue()->device().pipe;
screen->fence_reference(screen, &_fence, fence);
}
}
}
-soft_event::soft_event(context &ctx, const ref_vector<event> &deps,
+soft_event::soft_event(clover::context &ctx, const ref_vector<event> &deps,
bool _trigger, action action) :
event(ctx, deps, action, action) {
if (_trigger)
return _status;
else if (!signalled() ||
- any_of([](const intrusive_ptr<event> &ev) {
- return ev->status() != CL_COMPLETE;
+ any_of([](const event &ev) {
+ return ev.status() != CL_COMPLETE;
}, deps))
return CL_SUBMITTED;
void
soft_event::wait() const {
- for (auto ev : deps)
- ev->wait();
+ for (event &ev : deps)
+ ev.wait();
if (status() != CL_COMPLETE)
throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
public:
typedef std::function<void (event &)> action;
- event(context &ctx, const ref_vector<event> &deps,
+ event(clover::context &ctx, const ref_vector<event> &deps,
action action_ok, action action_fail);
virtual ~event();
virtual cl_command_type command() const = 0;
virtual void wait() const = 0;
- context &ctx;
+ const intrusive_ref<clover::context> context;
protected:
- void chain(event *ev);
+ void chain(event &ev);
cl_int _status;
- std::vector<intrusive_ptr<event>> deps;
+ std::vector<intrusive_ref<event>> deps;
private:
unsigned wait_count;
action action_ok;
action action_fail;
- std::vector<intrusive_ptr<event>> _chain;
+ std::vector<intrusive_ref<event>> _chain;
};
///
virtual void fence(pipe_fence_handle *fence);
action profile(command_queue &q, const action &action) const;
- command_queue &_queue;
+ const intrusive_ref<command_queue> _queue;
cl_command_type _command;
pipe_fence_handle *_fence;
lazy<cl_ulong> _time_queued, _time_submit, _time_start, _time_end;
///
class soft_event : public event {
public:
- soft_event(context &ctx, const ref_vector<event> &deps,
+ soft_event(clover::context &ctx, const ref_vector<event> &deps,
bool trigger, action action = [](event &){});
virtual cl_int status() const;
using namespace clover;
-kernel::kernel(program &prog, const std::string &name,
+kernel::kernel(clover::program &prog, const std::string &name,
const std::vector<module::argument> &margs) :
- prog(prog), _name(name), exec(*this) {
+ program(prog), _name(name), exec(*this) {
for (auto &marg : margs) {
if (marg.type == module::argument::scalar)
_args.emplace_back(new scalar_argument(marg.size));
static inline std::vector<uint>
pad_vector(command_queue &q, const V &v, uint x) {
std::vector<uint> w { v.begin(), v.end() };
- w.resize(q.dev.max_block_size().size(), x);
+ w.resize(q.device().max_block_size().size(), x);
return w;
}
const std::vector<size_t> &grid_offset,
const std::vector<size_t> &grid_size,
const std::vector<size_t> &block_size) {
- const auto m = prog.binary(q.dev);
+ const auto m = program().binary(q.device());
const auto reduced_grid_size =
map(divides(), grid_size, block_size);
void *st = exec.bind(&q);
kernel::optimal_block_size(const command_queue &q,
const std::vector<size_t> &grid_size) const {
return factor::find_grid_optimal_factor<size_t>(
- q.dev.max_threads_per_block(), q.dev.max_block_size(),
+ q.device().max_threads_per_block(), q.device().max_block_size(),
grid_size);
}
const module &
kernel::module(const command_queue &q) const {
- return prog.binary(q.dev);
+ return program().binary(q.device());
}
kernel::exec_context::exec_context(kernel &kern) :
}
void *
-kernel::exec_context::bind(command_queue *_q) {
+kernel::exec_context::bind(intrusive_ptr<command_queue> _q) {
std::swap(q, _q);
// Bind kernel arguments.
- auto &m = kern.prog.binary(q->dev);
+ auto &m = kern.program().binary(q->device());
auto margs = find(name_equals(kern.name()), m.syms).args;
auto msec = find(type_equals(module::section::text), m.secs);
auto w = v;
extend(w, marg.ext_type, marg.target_size);
- byteswap(w, ctx.q->dev.endianness());
+ byteswap(w, ctx.q->device().endianness());
align(ctx.input, marg.target_align);
insert(ctx.input, w);
}
auto v = bytes(ctx.mem_local);
extend(v, module::argument::zero_ext, marg.target_size);
- byteswap(v, ctx.q->dev.endianness());
+ byteswap(v, ctx.q->device().endianness());
align(ctx.input, marg.target_align);
insert(ctx.input, v);
auto v = bytes(ctx.resources.size() << 24);
extend(v, module::argument::zero_ext, marg.target_size);
- byteswap(v, ctx.q->dev.endianness());
+ byteswap(v, ctx.q->device().endianness());
insert(ctx.input, v);
st = buf->resource(*ctx.q).bind_surface(*ctx.q, false);
auto v = bytes(ctx.sviews.size());
extend(v, module::argument::zero_ext, marg.target_size);
- byteswap(v, ctx.q->dev.endianness());
+ byteswap(v, ctx.q->device().endianness());
align(ctx.input, marg.target_align);
insert(ctx.input, v);
auto v = bytes(ctx.resources.size());
extend(v, module::argument::zero_ext, marg.target_size);
- byteswap(v, ctx.q->dev.endianness());
+ byteswap(v, ctx.q->device().endianness());
align(ctx.input, marg.target_align);
insert(ctx.input, v);
exec_context &
operator=(const exec_context &) = delete;
- void *bind(command_queue *q);
+ void *bind(intrusive_ptr<command_queue> _q);
void unbind();
kernel &kern;
- command_queue *q;
+ intrusive_ptr<command_queue> q;
std::vector<uint8_t> input;
std::vector<void *> samplers;
> const_argument_range;
public:
- kernel(program &prog, const std::string &name,
+ kernel(clover::program &prog, const std::string &name,
const std::vector<clover::module::argument> &margs);
kernel(const kernel &kern) = delete;
argument_range args();
const_argument_range args() const;
- program &prog;
+ const intrusive_ref<clover::program> program;
private:
const clover::module &module(const command_queue &q) const;
using namespace clover;
-memory_obj::memory_obj(context &ctx, cl_mem_flags flags,
+memory_obj::memory_obj(clover::context &ctx, cl_mem_flags flags,
size_t size, void *host_ptr) :
- ctx(ctx), _flags(flags),
+ context(ctx), _flags(flags),
_size(size), _host_ptr(host_ptr),
_destroy_notify([]{}) {
if (flags & (CL_MEM_COPY_HOST_PTR | CL_MEM_USE_HOST_PTR))
return _host_ptr;
}
-buffer::buffer(context &ctx, cl_mem_flags flags,
+buffer::buffer(clover::context &ctx, cl_mem_flags flags,
size_t size, void *host_ptr) :
memory_obj(ctx, flags, size, host_ptr) {
}
return CL_MEM_OBJECT_BUFFER;
}
-root_buffer::root_buffer(context &ctx, cl_mem_flags flags,
+root_buffer::root_buffer(clover::context &ctx, cl_mem_flags flags,
size_t size, void *host_ptr) :
buffer(ctx, flags, size, host_ptr) {
}
resource &
root_buffer::resource(command_queue &q) {
// Create a new resource if there's none for this device yet.
- if (!resources.count(&q.dev)) {
+ if (!resources.count(&q.device())) {
auto r = (!resources.empty() ?
- new root_resource(q.dev, *this, *resources.begin()->second) :
- new root_resource(q.dev, *this, q, data));
+ new root_resource(q.device(), *this,
+ *resources.begin()->second) :
+ new root_resource(q.device(), *this, q, data));
- resources.insert(std::make_pair(&q.dev,
+ resources.insert(std::make_pair(&q.device(),
std::unique_ptr<root_resource>(r)));
data.clear();
}
- return *resources.find(&q.dev)->second;
+ return *resources.find(&q.device())->second;
}
sub_buffer::sub_buffer(root_buffer &parent, cl_mem_flags flags,
size_t offset, size_t size) :
- buffer(parent.ctx, flags, size,
+ buffer(parent.context(), flags, size,
(char *)parent.host_ptr() + offset),
parent(parent), _offset(offset) {
}
resource &
sub_buffer::resource(command_queue &q) {
// Create a new resource if there's none for this device yet.
- if (!resources.count(&q.dev)) {
- auto r = new sub_resource(parent.resource(q), {{ offset() }});
+ if (!resources.count(&q.device())) {
+ auto r = new sub_resource(parent().resource(q), {{ offset() }});
- resources.insert(std::make_pair(&q.dev,
+ resources.insert(std::make_pair(&q.device(),
std::unique_ptr<sub_resource>(r)));
}
- return *resources.find(&q.dev)->second;
+ return *resources.find(&q.device())->second;
}
size_t
return _offset;
}
-image::image(context &ctx, cl_mem_flags flags,
+image::image(clover::context &ctx, cl_mem_flags flags,
const cl_image_format *format,
size_t width, size_t height, size_t depth,
size_t row_pitch, size_t slice_pitch, size_t size,
resource &
image::resource(command_queue &q) {
// Create a new resource if there's none for this device yet.
- if (!resources.count(&q.dev)) {
+ if (!resources.count(&q.device())) {
auto r = (!resources.empty() ?
- new root_resource(q.dev, *this, *resources.begin()->second) :
- new root_resource(q.dev, *this, q, data));
+ new root_resource(q.device(), *this,
+ *resources.begin()->second) :
+ new root_resource(q.device(), *this, q, data));
- resources.insert(std::make_pair(&q.dev,
+ resources.insert(std::make_pair(&q.device(),
std::unique_ptr<root_resource>(r)));
data.clear();
}
- return *resources.find(&q.dev)->second;
+ return *resources.find(&q.device())->second;
}
cl_image_format
return _slice_pitch;
}
-image2d::image2d(context &ctx, cl_mem_flags flags,
+image2d::image2d(clover::context &ctx, cl_mem_flags flags,
const cl_image_format *format, size_t width,
size_t height, size_t row_pitch,
void *host_ptr) :
return CL_MEM_OBJECT_IMAGE2D;
}
-image3d::image3d(context &ctx, cl_mem_flags flags,
+image3d::image3d(clover::context &ctx, cl_mem_flags flags,
const cl_image_format *format,
size_t width, size_t height, size_t depth,
size_t row_pitch, size_t slice_pitch,
#include "core/object.hpp"
#include "core/queue.hpp"
+#include "core/resource.hpp"
namespace clover {
- class resource;
- class sub_resource;
-
class memory_obj : public ref_counter, public _cl_mem {
protected:
- memory_obj(context &ctx, cl_mem_flags flags,
+ memory_obj(clover::context &ctx, cl_mem_flags flags,
size_t size, void *host_ptr);
memory_obj(const memory_obj &obj) = delete;
size_t size() const;
void *host_ptr() const;
- context &ctx;
+ const intrusive_ref<clover::context> context;
private:
cl_mem_flags _flags;
class buffer : public memory_obj {
protected:
- buffer(context &ctx, cl_mem_flags flags,
+ buffer(clover::context &ctx, cl_mem_flags flags,
size_t size, void *host_ptr);
public:
class root_buffer : public buffer {
public:
- root_buffer(context &ctx, cl_mem_flags flags,
+ root_buffer(clover::context &ctx, cl_mem_flags flags,
size_t size, void *host_ptr);
virtual clover::resource &resource(command_queue &q);
virtual clover::resource &resource(command_queue &q);
size_t offset() const;
- root_buffer &parent;
+ const intrusive_ref<root_buffer> parent;
private:
size_t _offset;
class image : public memory_obj {
protected:
- image(context &ctx, cl_mem_flags flags,
+ image(clover::context &ctx, cl_mem_flags flags,
const cl_image_format *format,
size_t width, size_t height, size_t depth,
size_t row_pitch, size_t slice_pitch, size_t size,
class image2d : public image {
public:
- image2d(context &ctx, cl_mem_flags flags,
+ image2d(clover::context &ctx, cl_mem_flags flags,
const cl_image_format *format, size_t width,
size_t height, size_t row_pitch,
void *host_ptr);
class image3d : public image {
public:
- image3d(context &ctx, cl_mem_flags flags,
+ image3d(clover::context &ctx, cl_mem_flags flags,
const cl_image_format *format,
size_t width, size_t height, size_t depth,
size_t row_pitch, size_t slice_pitch,
using namespace clover;
-platform::platform() : adaptor_range(derefs(), devs) {
+platform::platform() : adaptor_range(evals(), devs) {
int n = pipe_loader_probe(NULL, 0);
std::vector<pipe_loader_device *> ldevs(n);
for (pipe_loader_device *ldev : ldevs) {
try {
- devs.push_back(transfer(new device(*this, ldev)));
+ devs.push_back(transfer(*new device(*this, ldev)));
} catch (error &) {
pipe_loader_release(&ldev, 1);
}
namespace clover {
class platform : public _cl_platform_id,
public adaptor_range<
- derefs, std::vector<intrusive_ptr<device>> &> {
+ evals, std::vector<intrusive_ref<device>> &> {
public:
platform();
operator=(const platform &platform) = delete;
protected:
- std::vector<intrusive_ptr<device>> devs;
+ std::vector<intrusive_ref<device>> devs;
};
}
using namespace clover;
-program::program(context &ctx, const std::string &source) :
- has_source(true), ctx(ctx), _source(source) {
+program::program(clover::context &ctx, const std::string &source) :
+ has_source(true), context(ctx), _source(source) {
}
-program::program(context &ctx,
+program::program(clover::context &ctx,
const ref_vector<device> &devs,
const std::vector<module> &binaries) :
- has_source(false), ctx(ctx) {
+ has_source(false), context(ctx),
+ _devices(devs) {
for_each([&](device &dev, const module &bin) {
_binaries.insert({ &dev, bin });
},
void
program::build(const ref_vector<device> &devs, const char *opts) {
if (has_source) {
+ _devices = devs;
+
for (auto &dev : devs) {
_binaries.erase(&dev);
_logs.erase(&dev);
program::device_range
program::devices() const {
- return map(derefs(), map(keys(), _binaries));
+ return map(evals(), _devices);
}
const module &
program::binary(const device &dev) const {
- return _binaries.find(const_cast<device *>(&dev))->second;
+ return _binaries.find(&dev)->second;
}
cl_build_status
program::build_status(const device &dev) const {
- if (_binaries.count(const_cast<device *>(&dev)))
+ if (_binaries.count(&dev))
return CL_BUILD_SUCCESS;
else
return CL_BUILD_NONE;
class program : public ref_counter, public _cl_program {
private:
typedef adaptor_range<
- derefs, adaptor_range<
- keys, const std::map<device *, module> &>> device_range;
+ evals, const std::vector<intrusive_ref<device>> &> device_range;
public:
- program(context &ctx,
+ program(clover::context &ctx,
const std::string &source);
- program(context &ctx,
+ program(clover::context &ctx,
const ref_vector<device> &devs,
const std::vector<module> &binaries);
const compat::vector<module::symbol> &symbols() const;
- context &ctx;
+ const intrusive_ref<clover::context> context;
private:
- std::map<device *, module> _binaries;
+ std::vector<intrusive_ref<device>> _devices;
+ std::map<const device *, module> _binaries;
std::map<const device *, std::string> _logs;
std::map<const device *, std::string> _opts;
std::string _source;
using namespace clover;
-command_queue::command_queue(context &ctx, device &dev,
+command_queue::command_queue(clover::context &ctx, clover::device &dev,
cl_command_queue_properties props) :
- ctx(ctx), dev(dev), _props(props) {
+ context(ctx), device(dev), _props(props) {
pipe = dev.pipe->context_create(dev.pipe, NULL);
if (!pipe)
throw error(CL_INVALID_DEVICE);
void
command_queue::flush() {
- pipe_screen *screen = dev.pipe;
+ pipe_screen *screen = device().pipe;
pipe_fence_handle *fence = NULL;
if (!queued_events.empty()) {
pipe->flush(pipe, &fence, 0);
while (!queued_events.empty() &&
- queued_events.front()->signalled()) {
- queued_events.front()->fence(fence);
+ queued_events.front()().signalled()) {
+ queued_events.front()().fence(fence);
queued_events.pop_front();
}
}
void
-command_queue::sequence(hard_event *ev) {
+command_queue::sequence(hard_event &ev) {
if (!queued_events.empty())
- queued_events.back()->chain(ev);
+ queued_events.back()().chain(ev);
queued_events.push_back(ev);
}
class command_queue : public ref_counter, public _cl_command_queue {
public:
- command_queue(context &ctx, device &dev,
+ command_queue(clover::context &ctx, clover::device &dev,
cl_command_queue_properties props);
~command_queue();
cl_command_queue_properties props() const;
bool profiling_enabled() const;
- context &ctx;
- device &dev;
+ const intrusive_ref<clover::context> context;
+ const intrusive_ref<clover::device> device;
friend class resource;
friend class root_resource;
private:
/// Serialize a hardware event with respect to the previous ones,
/// and push it to the pending list.
- void sequence(hard_event *ev);
+ void sequence(hard_event &ev);
cl_command_queue_properties _props;
pipe_context *pipe;
- std::deque<intrusive_ptr<hard_event>> queued_events;
+ std::deque<intrusive_ref<hard_event>> queued_events;
};
}
//
#include "core/resource.hpp"
+#include "core/memory.hpp"
#include "pipe/p_screen.h"
#include "util/u_sampler.h"
#include "util/u_format.h"
};
}
-resource::resource(device &dev, memory_obj &obj) :
- dev(dev), obj(obj), pipe(NULL), offset() {
+resource::resource(clover::device &dev, memory_obj &obj) :
+ device(dev), obj(obj), pipe(NULL), offset() {
}
resource::~resource() {
q.pipe->surface_destroy(q.pipe, st);
}
-root_resource::root_resource(device &dev, memory_obj &obj,
+root_resource::root_resource(clover::device &dev, memory_obj &obj,
command_queue &q, const std::string &data) :
resource(dev, obj) {
pipe_resource info {};
}
}
-root_resource::root_resource(device &dev, memory_obj &obj,
+root_resource::root_resource(clover::device &dev, memory_obj &obj,
root_resource &r) :
resource(dev, obj) {
assert(0); // XXX -- resource shared among dev and r.dev
}
root_resource::~root_resource() {
- dev.pipe->resource_destroy(dev.pipe, pipe);
+ device().pipe->resource_destroy(device().pipe, pipe);
}
sub_resource::sub_resource(resource &r, const vector &offset) :
- resource(r.dev, r.obj) {
+ resource(r.device(), r.obj) {
this->pipe = r.pipe;
this->offset = r.offset + offset;
}
#include <list>
-#include "core/memory.hpp"
+#include "core/queue.hpp"
#include "util/algebra.hpp"
#include "pipe/p_state.h"
namespace clover {
+ class memory_obj;
class mapping;
///
void del_map(void *p);
unsigned map_count() const;
- device &dev;
+ const intrusive_ref<clover::device> device;
memory_obj &obj;
friend class sub_resource;
friend class kernel;
protected:
- resource(device &dev, memory_obj &obj);
+ resource(clover::device &dev, memory_obj &obj);
pipe_sampler_view *bind_sampler_view(command_queue &q);
void unbind_sampler_view(command_queue &q,
///
class root_resource : public resource {
public:
- root_resource(device &dev, memory_obj &obj,
+ root_resource(clover::device &dev, memory_obj &obj,
command_queue &q, const std::string &data);
- root_resource(device &dev, memory_obj &obj, root_resource &r);
+ root_resource(clover::device &dev, memory_obj &obj, root_resource &r);
virtual ~root_resource();
};
using namespace clover;
-sampler::sampler(context &ctx, bool norm_mode,
+sampler::sampler(clover::context &ctx, bool norm_mode,
cl_addressing_mode addr_mode,
cl_filter_mode filter_mode) :
- ctx(ctx), _norm_mode(norm_mode),
+ context(ctx), _norm_mode(norm_mode),
_addr_mode(addr_mode), _filter_mode(filter_mode) {
}
namespace clover {
class sampler : public ref_counter, public _cl_sampler {
public:
- sampler(context &ctx, bool norm_mode,
+ sampler(clover::context &ctx, bool norm_mode,
cl_addressing_mode addr_mode,
cl_filter_mode filter_mode);
cl_addressing_mode addr_mode();
cl_filter_mode filter_mode();
- context &ctx;
+ const intrusive_ref<clover::context> context;
friend class kernel;
timestamp::query::~query() {
if (_query)
- q.pipe->destroy_query(q.pipe, _query);
+ q().pipe->destroy_query(q().pipe, _query);
}
cl_ulong
timestamp::query::operator()() const {
pipe_query_result result;
- if (!q.pipe->get_query_result(q.pipe, _query, false, &result))
+ if (!q().pipe->get_query_result(q().pipe, _query, false, &result))
throw error(CL_PROFILING_INFO_NOT_AVAILABLE);
return result.u64;
cl_ulong operator()() const;
private:
- command_queue &q;
+ const intrusive_ref<command_queue> q;
pipe_query *_query;
};
}
};
+ struct evals {
+ template<typename T>
+ auto
+ operator()(T &&x) const -> decltype(x()) {
+ return x();
+ }
+ };
+
struct derefs {
template<typename T>
auto