2 // Copyright 2012 Francisco Jerez
4 // Permission is hereby granted, free of charge, to any person obtaining a
5 // copy of this software and associated documentation files (the "Software"),
6 // to deal in the Software without restriction, including without limitation
7 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 // and/or sell copies of the Software, and to permit persons to whom the
9 // Software is furnished to do so, subject to the following conditions:
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 // OTHER DEALINGS IN THE SOFTWARE.
25 #include "api/util.hpp"
26 #include "core/event.hpp"
27 #include "core/memory.hpp"
29 using namespace clover
;
32 typedef resource::vector vector_t
;
35 vector(const size_t *p
) {
40 pitch(const vector_t
®ion
, vector_t pitch
) {
41 for (auto x
: zip(tail(pitch
),
42 map(multiplies(), region
, pitch
))) {
43 // The spec defines a value of zero as the natural pitch,
44 // i.e. the unaligned size of the previous dimension.
45 if (std::get
<0>(x
) == 0)
46 std::get
<0>(x
) = std::get
<1>(x
);
53 /// Common argument checking shared by memory transfer commands.
56 validate_common(command_queue
&q
,
57 const ref_vector
<event
> &deps
) {
58 if (any_of([&](const event
&ev
) {
59 return ev
.context() != q
.context();
61 throw error(CL_INVALID_CONTEXT
);
65 /// Common error checking for a buffer object argument.
68 validate_object(command_queue
&q
, buffer
&mem
, const vector_t
&origin
,
69 const vector_t
&pitch
, const vector_t
®ion
) {
70 if (mem
.context() != q
.context())
71 throw error(CL_INVALID_CONTEXT
);
73 // The region must fit within the specified pitch,
74 if (any_of(greater(), map(multiplies(), pitch
, region
), tail(pitch
)))
75 throw error(CL_INVALID_VALUE
);
77 // ...and within the specified object.
78 if (dot(pitch
, origin
) + pitch
[2] * region
[2] > mem
.size())
79 throw error(CL_INVALID_VALUE
);
81 if (any_of(is_zero(), region
))
82 throw error(CL_INVALID_VALUE
);
86 /// Common error checking for an image argument.
89 validate_object(command_queue
&q
, image
&img
,
90 const vector_t
&orig
, const vector_t
®ion
) {
91 vector_t size
= { img
.width(), img
.height(), img
.depth() };
93 if (img
.context() != q
.context())
94 throw error(CL_INVALID_CONTEXT
);
96 if (any_of(greater(), orig
+ region
, size
))
97 throw error(CL_INVALID_VALUE
);
99 if (any_of(is_zero(), region
))
100 throw error(CL_INVALID_VALUE
);
104 /// Common error checking for a host pointer argument.
107 validate_object(command_queue
&q
, const void *ptr
, const vector_t
&orig
,
108 const vector_t
&pitch
, const vector_t
®ion
) {
110 throw error(CL_INVALID_VALUE
);
112 // The region must fit within the specified pitch.
113 if (any_of(greater(), map(multiplies(), pitch
, region
), tail(pitch
)))
114 throw error(CL_INVALID_VALUE
);
118 /// Common argument checking for a copy between two buffer objects.
121 validate_copy(command_queue
&q
, buffer
&dst_mem
,
122 const vector_t
&dst_orig
, const vector_t
&dst_pitch
,
124 const vector_t
&src_orig
, const vector_t
&src_pitch
,
125 const vector_t
®ion
) {
126 if (dst_mem
== src_mem
) {
127 auto dst_offset
= dot(dst_pitch
, dst_orig
);
128 auto src_offset
= dot(src_pitch
, src_orig
);
130 if (interval_overlaps()(
131 dst_offset
, dst_offset
+ dst_pitch
[2] * region
[2],
132 src_offset
, src_offset
+ src_pitch
[2] * region
[2]))
133 throw error(CL_MEM_COPY_OVERLAP
);
138 /// Common argument checking for a copy between two image objects.
141 validate_copy(command_queue
&q
,
142 image
&dst_img
, const vector_t
&dst_orig
,
143 image
&src_img
, const vector_t
&src_orig
,
144 const vector_t
®ion
) {
145 if (dst_img
.format() != src_img
.format())
146 throw error(CL_IMAGE_FORMAT_MISMATCH
);
148 if (dst_img
== src_img
) {
149 if (all_of(interval_overlaps(),
150 dst_orig
, dst_orig
+ region
,
151 src_orig
, src_orig
+ region
))
152 throw error(CL_MEM_COPY_OVERLAP
);
157 /// Class that encapsulates the task of mapping an object of type
158 /// \a T. The return value of get() should be implicitly
159 /// convertible to \a void *.
164 get(command_queue
&q
, T obj
, cl_map_flags flags
,
165 size_t offset
, size_t size
) {
166 return { q
, obj
->resource(q
), flags
, true,
167 {{ offset
}}, {{ size
, 1, 1 }} };
172 struct _map
<void *> {
174 get(command_queue
&q
, void *obj
, cl_map_flags flags
,
175 size_t offset
, size_t size
) {
176 return (char *)obj
+ offset
;
181 struct _map
<const void *> {
183 get(command_queue
&q
, const void *obj
, cl_map_flags flags
,
184 size_t offset
, size_t size
) {
185 return (const char *)obj
+ offset
;
190 /// Software copy from \a src_obj to \a dst_obj. They can be
191 /// either pointers or memory objects.
193 template<typename T
, typename S
>
194 std::function
<void (event
&)>
195 soft_copy_op(command_queue
&q
,
196 T dst_obj
, const vector_t
&dst_orig
, const vector_t
&dst_pitch
,
197 S src_obj
, const vector_t
&src_orig
, const vector_t
&src_pitch
,
198 const vector_t
®ion
) {
199 return [=, &q
](event
&) {
200 auto dst
= _map
<T
>::get(q
, dst_obj
, CL_MAP_WRITE
,
201 dot(dst_pitch
, dst_orig
),
202 dst_pitch
[2] * region
[2]);
203 auto src
= _map
<S
>::get(q
, src_obj
, CL_MAP_READ
,
204 dot(src_pitch
, src_orig
),
205 src_pitch
[2] * region
[2]);
208 for (v
[2] = 0; v
[2] < region
[2]; ++v
[2]) {
209 for (v
[1] = 0; v
[1] < region
[1]; ++v
[1]) {
211 static_cast<char *>(dst
) + dot(dst_pitch
, v
),
212 static_cast<const char *>(src
) + dot(src_pitch
, v
),
213 src_pitch
[0] * region
[0]);
220 /// Hardware copy from \a src_obj to \a dst_obj.
222 template<typename T
, typename S
>
223 std::function
<void (event
&)>
224 hard_copy_op(command_queue
&q
, T dst_obj
, const vector_t
&dst_orig
,
225 S src_obj
, const vector_t
&src_orig
, const vector_t
®ion
) {
226 return [=, &q
](event
&) {
227 dst_obj
->resource(q
).copy(q
, dst_orig
, region
,
228 src_obj
->resource(q
), src_orig
);
234 clEnqueueReadBuffer(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
235 size_t offset
, size_t size
, void *ptr
,
236 cl_uint num_deps
, const cl_event
*d_deps
,
237 cl_event
*rd_ev
) try {
239 auto &mem
= obj
<buffer
>(d_mem
);
240 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
241 vector_t region
= { size
, 1, 1 };
242 vector_t obj_origin
= { offset
};
243 auto obj_pitch
= pitch(region
, {{ 1 }});
245 validate_common(q
, deps
);
246 validate_object(q
, ptr
, {}, obj_pitch
, region
);
247 validate_object(q
, mem
, obj_origin
, obj_pitch
, region
);
249 hard_event
*hev
= new hard_event(
250 q
, CL_COMMAND_READ_BUFFER
, deps
,
251 soft_copy_op(q
, ptr
, {}, obj_pitch
,
252 &mem
, obj_origin
, obj_pitch
,
255 ret_object(rd_ev
, hev
);
263 clEnqueueWriteBuffer(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
264 size_t offset
, size_t size
, const void *ptr
,
265 cl_uint num_deps
, const cl_event
*d_deps
,
266 cl_event
*rd_ev
) try {
268 auto &mem
= obj
<buffer
>(d_mem
);
269 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
270 vector_t region
= { size
, 1, 1 };
271 vector_t obj_origin
= { offset
};
272 auto obj_pitch
= pitch(region
, {{ 1 }});
274 validate_common(q
, deps
);
275 validate_object(q
, mem
, obj_origin
, obj_pitch
, region
);
276 validate_object(q
, ptr
, {}, obj_pitch
, region
);
278 hard_event
*hev
= new hard_event(
279 q
, CL_COMMAND_WRITE_BUFFER
, deps
,
280 soft_copy_op(q
, &mem
, obj_origin
, obj_pitch
,
284 ret_object(rd_ev
, hev
);
292 clEnqueueReadBufferRect(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
293 const size_t *p_obj_origin
,
294 const size_t *p_host_origin
,
295 const size_t *p_region
,
296 size_t obj_row_pitch
, size_t obj_slice_pitch
,
297 size_t host_row_pitch
, size_t host_slice_pitch
,
299 cl_uint num_deps
, const cl_event
*d_deps
,
300 cl_event
*rd_ev
) try {
302 auto &mem
= obj
<buffer
>(d_mem
);
303 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
304 auto region
= vector(p_region
);
305 auto obj_origin
= vector(p_obj_origin
);
306 auto obj_pitch
= pitch(region
, {{ 1, obj_row_pitch
, obj_slice_pitch
}});
307 auto host_origin
= vector(p_host_origin
);
308 auto host_pitch
= pitch(region
, {{ 1, host_row_pitch
, host_slice_pitch
}});
310 validate_common(q
, deps
);
311 validate_object(q
, ptr
, host_origin
, host_pitch
, region
);
312 validate_object(q
, mem
, obj_origin
, obj_pitch
, region
);
314 hard_event
*hev
= new hard_event(
315 q
, CL_COMMAND_READ_BUFFER_RECT
, deps
,
316 soft_copy_op(q
, ptr
, host_origin
, host_pitch
,
317 &mem
, obj_origin
, obj_pitch
,
320 ret_object(rd_ev
, hev
);
328 clEnqueueWriteBufferRect(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
329 const size_t *p_obj_origin
,
330 const size_t *p_host_origin
,
331 const size_t *p_region
,
332 size_t obj_row_pitch
, size_t obj_slice_pitch
,
333 size_t host_row_pitch
, size_t host_slice_pitch
,
335 cl_uint num_deps
, const cl_event
*d_deps
,
336 cl_event
*rd_ev
) try {
338 auto &mem
= obj
<buffer
>(d_mem
);
339 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
340 auto region
= vector(p_region
);
341 auto obj_origin
= vector(p_obj_origin
);
342 auto obj_pitch
= pitch(region
, {{ 1, obj_row_pitch
, obj_slice_pitch
}});
343 auto host_origin
= vector(p_host_origin
);
344 auto host_pitch
= pitch(region
, {{ 1, host_row_pitch
, host_slice_pitch
}});
346 validate_common(q
, deps
);
347 validate_object(q
, mem
, obj_origin
, obj_pitch
, region
);
348 validate_object(q
, ptr
, host_origin
, host_pitch
, region
);
350 hard_event
*hev
= new hard_event(
351 q
, CL_COMMAND_WRITE_BUFFER_RECT
, deps
,
352 soft_copy_op(q
, &mem
, obj_origin
, obj_pitch
,
353 ptr
, host_origin
, host_pitch
,
356 ret_object(rd_ev
, hev
);
364 clEnqueueCopyBuffer(cl_command_queue d_q
, cl_mem d_src_mem
, cl_mem d_dst_mem
,
365 size_t src_offset
, size_t dst_offset
, size_t size
,
366 cl_uint num_deps
, const cl_event
*d_deps
,
367 cl_event
*rd_ev
) try {
369 auto &src_mem
= obj
<buffer
>(d_src_mem
);
370 auto &dst_mem
= obj
<buffer
>(d_dst_mem
);
371 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
372 vector_t region
= { size
, 1, 1 };
373 vector_t dst_origin
= { dst_offset
};
374 auto dst_pitch
= pitch(region
, {{ 1 }});
375 vector_t src_origin
= { src_offset
};
376 auto src_pitch
= pitch(region
, {{ 1 }});
378 validate_common(q
, deps
);
379 validate_object(q
, dst_mem
, dst_origin
, dst_pitch
, region
);
380 validate_object(q
, src_mem
, src_origin
, src_pitch
, region
);
381 validate_copy(q
, dst_mem
, dst_origin
, dst_pitch
,
382 src_mem
, src_origin
, src_pitch
, region
);
384 hard_event
*hev
= new hard_event(
385 q
, CL_COMMAND_COPY_BUFFER
, deps
,
386 hard_copy_op(q
, &dst_mem
, dst_origin
,
387 &src_mem
, src_origin
, region
));
389 ret_object(rd_ev
, hev
);
397 clEnqueueCopyBufferRect(cl_command_queue d_q
, cl_mem d_src_mem
,
399 const size_t *p_src_origin
, const size_t *p_dst_origin
,
400 const size_t *p_region
,
401 size_t src_row_pitch
, size_t src_slice_pitch
,
402 size_t dst_row_pitch
, size_t dst_slice_pitch
,
403 cl_uint num_deps
, const cl_event
*d_deps
,
404 cl_event
*rd_ev
) try {
406 auto &src_mem
= obj
<buffer
>(d_src_mem
);
407 auto &dst_mem
= obj
<buffer
>(d_dst_mem
);
408 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
409 auto region
= vector(p_region
);
410 auto dst_origin
= vector(p_dst_origin
);
411 auto dst_pitch
= pitch(region
, {{ 1, dst_row_pitch
, dst_slice_pitch
}});
412 auto src_origin
= vector(p_src_origin
);
413 auto src_pitch
= pitch(region
, {{ 1, src_row_pitch
, src_slice_pitch
}});
415 validate_common(q
, deps
);
416 validate_object(q
, dst_mem
, dst_origin
, dst_pitch
, region
);
417 validate_object(q
, src_mem
, src_origin
, src_pitch
, region
);
418 validate_copy(q
, dst_mem
, dst_origin
, dst_pitch
,
419 src_mem
, src_origin
, src_pitch
, region
);
421 hard_event
*hev
= new hard_event(
422 q
, CL_COMMAND_COPY_BUFFER_RECT
, deps
,
423 soft_copy_op(q
, &dst_mem
, dst_origin
, dst_pitch
,
424 &src_mem
, src_origin
, src_pitch
,
427 ret_object(rd_ev
, hev
);
435 clEnqueueReadImage(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
436 const size_t *p_origin
, const size_t *p_region
,
437 size_t row_pitch
, size_t slice_pitch
, void *ptr
,
438 cl_uint num_deps
, const cl_event
*d_deps
,
439 cl_event
*rd_ev
) try {
441 auto &img
= obj
<image
>(d_mem
);
442 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
443 auto region
= vector(p_region
);
444 auto dst_pitch
= pitch(region
, {{ img
.pixel_size(),
445 row_pitch
, slice_pitch
}});
446 auto src_origin
= vector(p_origin
);
447 auto src_pitch
= pitch(region
, {{ img
.pixel_size(),
448 img
.row_pitch(), img
.slice_pitch() }});
450 validate_common(q
, deps
);
451 validate_object(q
, ptr
, {}, dst_pitch
, region
);
452 validate_object(q
, img
, src_origin
, region
);
454 hard_event
*hev
= new hard_event(
455 q
, CL_COMMAND_READ_IMAGE
, deps
,
456 soft_copy_op(q
, ptr
, {}, dst_pitch
,
457 &img
, src_origin
, src_pitch
,
460 ret_object(rd_ev
, hev
);
468 clEnqueueWriteImage(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
469 const size_t *p_origin
, const size_t *p_region
,
470 size_t row_pitch
, size_t slice_pitch
, const void *ptr
,
471 cl_uint num_deps
, const cl_event
*d_deps
,
472 cl_event
*rd_ev
) try {
474 auto &img
= obj
<image
>(d_mem
);
475 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
476 auto region
= vector(p_region
);
477 auto dst_origin
= vector(p_origin
);
478 auto dst_pitch
= pitch(region
, {{ img
.pixel_size(),
479 img
.row_pitch(), img
.slice_pitch() }});
480 auto src_pitch
= pitch(region
, {{ img
.pixel_size(),
481 row_pitch
, slice_pitch
}});
483 validate_common(q
, deps
);
484 validate_object(q
, img
, dst_origin
, region
);
485 validate_object(q
, ptr
, {}, src_pitch
, region
);
487 hard_event
*hev
= new hard_event(
488 q
, CL_COMMAND_WRITE_IMAGE
, deps
,
489 soft_copy_op(q
, &img
, dst_origin
, dst_pitch
,
493 ret_object(rd_ev
, hev
);
501 clEnqueueCopyImage(cl_command_queue d_q
, cl_mem d_src_mem
, cl_mem d_dst_mem
,
502 const size_t *p_src_origin
, const size_t *p_dst_origin
,
503 const size_t *p_region
,
504 cl_uint num_deps
, const cl_event
*d_deps
,
505 cl_event
*rd_ev
) try {
507 auto &src_img
= obj
<image
>(d_src_mem
);
508 auto &dst_img
= obj
<image
>(d_dst_mem
);
509 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
510 auto region
= vector(p_region
);
511 auto dst_origin
= vector(p_dst_origin
);
512 auto src_origin
= vector(p_src_origin
);
514 validate_common(q
, deps
);
515 validate_object(q
, dst_img
, dst_origin
, region
);
516 validate_object(q
, src_img
, src_origin
, region
);
517 validate_copy(q
, dst_img
, dst_origin
, src_img
, src_origin
, region
);
519 hard_event
*hev
= new hard_event(
520 q
, CL_COMMAND_COPY_IMAGE
, deps
,
521 hard_copy_op(q
, &dst_img
, dst_origin
,
522 &src_img
, src_origin
,
525 ret_object(rd_ev
, hev
);
533 clEnqueueCopyImageToBuffer(cl_command_queue d_q
,
534 cl_mem d_src_mem
, cl_mem d_dst_mem
,
535 const size_t *p_src_origin
, const size_t *p_region
,
537 cl_uint num_deps
, const cl_event
*d_deps
,
538 cl_event
*rd_ev
) try {
540 auto &src_img
= obj
<image
>(d_src_mem
);
541 auto &dst_mem
= obj
<buffer
>(d_dst_mem
);
542 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
543 auto region
= vector(p_region
);
544 vector_t dst_origin
= { dst_offset
};
545 auto dst_pitch
= pitch(region
, {{ src_img
.pixel_size() }});
546 auto src_origin
= vector(p_src_origin
);
547 auto src_pitch
= pitch(region
, {{ src_img
.pixel_size(),
549 src_img
.slice_pitch() }});
551 validate_common(q
, deps
);
552 validate_object(q
, dst_mem
, dst_origin
, dst_pitch
, region
);
553 validate_object(q
, src_img
, src_origin
, region
);
555 hard_event
*hev
= new hard_event(
556 q
, CL_COMMAND_COPY_IMAGE_TO_BUFFER
, deps
,
557 soft_copy_op(q
, &dst_mem
, dst_origin
, dst_pitch
,
558 &src_img
, src_origin
, src_pitch
,
561 ret_object(rd_ev
, hev
);
569 clEnqueueCopyBufferToImage(cl_command_queue d_q
,
570 cl_mem d_src_mem
, cl_mem d_dst_mem
,
572 const size_t *p_dst_origin
, const size_t *p_region
,
573 cl_uint num_deps
, const cl_event
*d_deps
,
574 cl_event
*rd_ev
) try {
576 auto &src_mem
= obj
<buffer
>(d_src_mem
);
577 auto &dst_img
= obj
<image
>(d_dst_mem
);
578 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
579 auto region
= vector(p_region
);
580 auto dst_origin
= vector(p_dst_origin
);
581 auto dst_pitch
= pitch(region
, {{ dst_img
.pixel_size(),
583 dst_img
.slice_pitch() }});
584 vector_t src_origin
= { src_offset
};
585 auto src_pitch
= pitch(region
, {{ dst_img
.pixel_size() }});
587 validate_common(q
, deps
);
588 validate_object(q
, dst_img
, dst_origin
, region
);
589 validate_object(q
, src_mem
, src_origin
, src_pitch
, region
);
591 hard_event
*hev
= new hard_event(
592 q
, CL_COMMAND_COPY_BUFFER_TO_IMAGE
, deps
,
593 soft_copy_op(q
, &dst_img
, dst_origin
, dst_pitch
,
594 &src_mem
, src_origin
, src_pitch
,
597 ret_object(rd_ev
, hev
);
605 clEnqueueMapBuffer(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
606 cl_map_flags flags
, size_t offset
, size_t size
,
607 cl_uint num_deps
, const cl_event
*d_deps
,
608 cl_event
*rd_ev
, cl_int
*r_errcode
) try {
610 auto &mem
= obj
<buffer
>(d_mem
);
611 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
612 vector_t region
= { size
, 1, 1 };
613 vector_t obj_origin
= { offset
};
614 auto obj_pitch
= pitch(region
, {{ 1 }});
616 validate_common(q
, deps
);
617 validate_object(q
, mem
, obj_origin
, obj_pitch
, region
);
619 void *map
= mem
.resource(q
).add_map(q
, flags
, blocking
, obj_origin
, region
);
621 ret_object(rd_ev
, new hard_event(q
, CL_COMMAND_MAP_BUFFER
, deps
));
622 ret_error(r_errcode
, CL_SUCCESS
);
626 ret_error(r_errcode
, e
);
631 clEnqueueMapImage(cl_command_queue d_q
, cl_mem d_mem
, cl_bool blocking
,
633 const size_t *p_origin
, const size_t *p_region
,
634 size_t *row_pitch
, size_t *slice_pitch
,
635 cl_uint num_deps
, const cl_event
*d_deps
,
636 cl_event
*rd_ev
, cl_int
*r_errcode
) try {
638 auto &img
= obj
<image
>(d_mem
);
639 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
640 auto region
= vector(p_region
);
641 auto origin
= vector(p_origin
);
643 validate_common(q
, deps
);
644 validate_object(q
, img
, origin
, region
);
646 void *map
= img
.resource(q
).add_map(q
, flags
, blocking
, origin
, region
);
648 ret_object(rd_ev
, new hard_event(q
, CL_COMMAND_MAP_IMAGE
, deps
));
649 ret_error(r_errcode
, CL_SUCCESS
);
653 ret_error(r_errcode
, e
);
658 clEnqueueUnmapMemObject(cl_command_queue d_q
, cl_mem d_mem
, void *ptr
,
659 cl_uint num_deps
, const cl_event
*d_deps
,
660 cl_event
*rd_ev
) try {
662 auto &mem
= obj(d_mem
);
663 auto deps
= objs
<wait_list_tag
>(d_deps
, num_deps
);
665 validate_common(q
, deps
);
667 hard_event
*hev
= new hard_event(
668 q
, CL_COMMAND_UNMAP_MEM_OBJECT
, deps
,
669 [=, &q
, &mem
](event
&) {
670 mem
.resource(q
).del_map(ptr
);
673 ret_object(rd_ev
, hev
);