clover: Migrate a bunch of pointers and references in the object tree to smart refere...
[mesa.git] / src / gallium / state_trackers / clover / api / transfer.cpp
1 //
2 // Copyright 2012 Francisco Jerez
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a
5 // copy of this software and associated documentation files (the "Software"),
6 // to deal in the Software without restriction, including without limitation
7 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 // and/or sell copies of the Software, and to permit persons to whom the
9 // Software is furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 // OTHER DEALINGS IN THE SOFTWARE.
21 //
22
23 #include <cstring>
24
25 #include "api/util.hpp"
26 #include "core/event.hpp"
27 #include "core/memory.hpp"
28
29 using namespace clover;
30
31 namespace {
32 typedef resource::vector vector_t;
33
34 vector_t
35 vector(const size_t *p) {
36 return range(p, 3);
37 }
38
39 vector_t
40 pitch(const vector_t &region, vector_t pitch) {
41 for (auto x : zip(tail(pitch),
42 map(multiplies(), region, pitch))) {
43 // The spec defines a value of zero as the natural pitch,
44 // i.e. the unaligned size of the previous dimension.
45 if (std::get<0>(x) == 0)
46 std::get<0>(x) = std::get<1>(x);
47 }
48
49 return pitch;
50 }
51
52 ///
53 /// Common argument checking shared by memory transfer commands.
54 ///
55 void
56 validate_common(command_queue &q,
57 const ref_vector<event> &deps) {
58 if (any_of([&](const event &ev) {
59 return ev.context() != q.context();
60 }, deps))
61 throw error(CL_INVALID_CONTEXT);
62 }
63
64 ///
65 /// Common error checking for a buffer object argument.
66 ///
67 void
68 validate_object(command_queue &q, buffer &mem, const vector_t &origin,
69 const vector_t &pitch, const vector_t &region) {
70 if (mem.context() != q.context())
71 throw error(CL_INVALID_CONTEXT);
72
73 // The region must fit within the specified pitch,
74 if (any_of(greater(), map(multiplies(), pitch, region), tail(pitch)))
75 throw error(CL_INVALID_VALUE);
76
77 // ...and within the specified object.
78 if (dot(pitch, origin) + pitch[2] * region[2] > mem.size())
79 throw error(CL_INVALID_VALUE);
80
81 if (any_of(is_zero(), region))
82 throw error(CL_INVALID_VALUE);
83 }
84
85 ///
86 /// Common error checking for an image argument.
87 ///
88 void
89 validate_object(command_queue &q, image &img,
90 const vector_t &orig, const vector_t &region) {
91 vector_t size = { img.width(), img.height(), img.depth() };
92
93 if (img.context() != q.context())
94 throw error(CL_INVALID_CONTEXT);
95
96 if (any_of(greater(), orig + region, size))
97 throw error(CL_INVALID_VALUE);
98
99 if (any_of(is_zero(), region))
100 throw error(CL_INVALID_VALUE);
101 }
102
103 ///
104 /// Common error checking for a host pointer argument.
105 ///
106 void
107 validate_object(command_queue &q, const void *ptr, const vector_t &orig,
108 const vector_t &pitch, const vector_t &region) {
109 if (!ptr)
110 throw error(CL_INVALID_VALUE);
111
112 // The region must fit within the specified pitch.
113 if (any_of(greater(), map(multiplies(), pitch, region), tail(pitch)))
114 throw error(CL_INVALID_VALUE);
115 }
116
117 ///
118 /// Common argument checking for a copy between two buffer objects.
119 ///
120 void
121 validate_copy(command_queue &q, buffer &dst_mem,
122 const vector_t &dst_orig, const vector_t &dst_pitch,
123 buffer &src_mem,
124 const vector_t &src_orig, const vector_t &src_pitch,
125 const vector_t &region) {
126 if (dst_mem == src_mem) {
127 auto dst_offset = dot(dst_pitch, dst_orig);
128 auto src_offset = dot(src_pitch, src_orig);
129
130 if (interval_overlaps()(
131 dst_offset, dst_offset + dst_pitch[2] * region[2],
132 src_offset, src_offset + src_pitch[2] * region[2]))
133 throw error(CL_MEM_COPY_OVERLAP);
134 }
135 }
136
137 ///
138 /// Common argument checking for a copy between two image objects.
139 ///
140 void
141 validate_copy(command_queue &q,
142 image &dst_img, const vector_t &dst_orig,
143 image &src_img, const vector_t &src_orig,
144 const vector_t &region) {
145 if (dst_img.format() != src_img.format())
146 throw error(CL_IMAGE_FORMAT_MISMATCH);
147
148 if (dst_img == src_img) {
149 if (all_of(interval_overlaps(),
150 dst_orig, dst_orig + region,
151 src_orig, src_orig + region))
152 throw error(CL_MEM_COPY_OVERLAP);
153 }
154 }
155
156 ///
157 /// Class that encapsulates the task of mapping an object of type
158 /// \a T. The return value of get() should be implicitly
159 /// convertible to \a void *.
160 ///
161 template<typename T>
162 struct _map {
163 static mapping
164 get(command_queue &q, T obj, cl_map_flags flags,
165 size_t offset, size_t size) {
166 return { q, obj->resource(q), flags, true,
167 {{ offset }}, {{ size, 1, 1 }} };
168 }
169 };
170
171 template<>
172 struct _map<void *> {
173 static void *
174 get(command_queue &q, void *obj, cl_map_flags flags,
175 size_t offset, size_t size) {
176 return (char *)obj + offset;
177 }
178 };
179
180 template<>
181 struct _map<const void *> {
182 static const void *
183 get(command_queue &q, const void *obj, cl_map_flags flags,
184 size_t offset, size_t size) {
185 return (const char *)obj + offset;
186 }
187 };
188
189 ///
190 /// Software copy from \a src_obj to \a dst_obj. They can be
191 /// either pointers or memory objects.
192 ///
193 template<typename T, typename S>
194 std::function<void (event &)>
195 soft_copy_op(command_queue &q,
196 T dst_obj, const vector_t &dst_orig, const vector_t &dst_pitch,
197 S src_obj, const vector_t &src_orig, const vector_t &src_pitch,
198 const vector_t &region) {
199 return [=, &q](event &) {
200 auto dst = _map<T>::get(q, dst_obj, CL_MAP_WRITE,
201 dot(dst_pitch, dst_orig),
202 dst_pitch[2] * region[2]);
203 auto src = _map<S>::get(q, src_obj, CL_MAP_READ,
204 dot(src_pitch, src_orig),
205 src_pitch[2] * region[2]);
206 vector_t v = {};
207
208 for (v[2] = 0; v[2] < region[2]; ++v[2]) {
209 for (v[1] = 0; v[1] < region[1]; ++v[1]) {
210 std::memcpy(
211 static_cast<char *>(dst) + dot(dst_pitch, v),
212 static_cast<const char *>(src) + dot(src_pitch, v),
213 src_pitch[0] * region[0]);
214 }
215 }
216 };
217 }
218
219 ///
220 /// Hardware copy from \a src_obj to \a dst_obj.
221 ///
222 template<typename T, typename S>
223 std::function<void (event &)>
224 hard_copy_op(command_queue &q, T dst_obj, const vector_t &dst_orig,
225 S src_obj, const vector_t &src_orig, const vector_t &region) {
226 return [=, &q](event &) {
227 dst_obj->resource(q).copy(q, dst_orig, region,
228 src_obj->resource(q), src_orig);
229 };
230 }
231 }
232
233 CLOVER_API cl_int
234 clEnqueueReadBuffer(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
235 size_t offset, size_t size, void *ptr,
236 cl_uint num_deps, const cl_event *d_deps,
237 cl_event *rd_ev) try {
238 auto &q = obj(d_q);
239 auto &mem = obj<buffer>(d_mem);
240 auto deps = objs<wait_list_tag>(d_deps, num_deps);
241 vector_t region = { size, 1, 1 };
242 vector_t obj_origin = { offset };
243 auto obj_pitch = pitch(region, {{ 1 }});
244
245 validate_common(q, deps);
246 validate_object(q, ptr, {}, obj_pitch, region);
247 validate_object(q, mem, obj_origin, obj_pitch, region);
248
249 hard_event *hev = new hard_event(
250 q, CL_COMMAND_READ_BUFFER, deps,
251 soft_copy_op(q, ptr, {}, obj_pitch,
252 &mem, obj_origin, obj_pitch,
253 region));
254
255 ret_object(rd_ev, hev);
256 return CL_SUCCESS;
257
258 } catch (error &e) {
259 return e.get();
260 }
261
262 CLOVER_API cl_int
263 clEnqueueWriteBuffer(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
264 size_t offset, size_t size, const void *ptr,
265 cl_uint num_deps, const cl_event *d_deps,
266 cl_event *rd_ev) try {
267 auto &q = obj(d_q);
268 auto &mem = obj<buffer>(d_mem);
269 auto deps = objs<wait_list_tag>(d_deps, num_deps);
270 vector_t region = { size, 1, 1 };
271 vector_t obj_origin = { offset };
272 auto obj_pitch = pitch(region, {{ 1 }});
273
274 validate_common(q, deps);
275 validate_object(q, mem, obj_origin, obj_pitch, region);
276 validate_object(q, ptr, {}, obj_pitch, region);
277
278 hard_event *hev = new hard_event(
279 q, CL_COMMAND_WRITE_BUFFER, deps,
280 soft_copy_op(q, &mem, obj_origin, obj_pitch,
281 ptr, {}, obj_pitch,
282 region));
283
284 ret_object(rd_ev, hev);
285 return CL_SUCCESS;
286
287 } catch (error &e) {
288 return e.get();
289 }
290
291 CLOVER_API cl_int
292 clEnqueueReadBufferRect(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
293 const size_t *p_obj_origin,
294 const size_t *p_host_origin,
295 const size_t *p_region,
296 size_t obj_row_pitch, size_t obj_slice_pitch,
297 size_t host_row_pitch, size_t host_slice_pitch,
298 void *ptr,
299 cl_uint num_deps, const cl_event *d_deps,
300 cl_event *rd_ev) try {
301 auto &q = obj(d_q);
302 auto &mem = obj<buffer>(d_mem);
303 auto deps = objs<wait_list_tag>(d_deps, num_deps);
304 auto region = vector(p_region);
305 auto obj_origin = vector(p_obj_origin);
306 auto obj_pitch = pitch(region, {{ 1, obj_row_pitch, obj_slice_pitch }});
307 auto host_origin = vector(p_host_origin);
308 auto host_pitch = pitch(region, {{ 1, host_row_pitch, host_slice_pitch }});
309
310 validate_common(q, deps);
311 validate_object(q, ptr, host_origin, host_pitch, region);
312 validate_object(q, mem, obj_origin, obj_pitch, region);
313
314 hard_event *hev = new hard_event(
315 q, CL_COMMAND_READ_BUFFER_RECT, deps,
316 soft_copy_op(q, ptr, host_origin, host_pitch,
317 &mem, obj_origin, obj_pitch,
318 region));
319
320 ret_object(rd_ev, hev);
321 return CL_SUCCESS;
322
323 } catch (error &e) {
324 return e.get();
325 }
326
327 CLOVER_API cl_int
328 clEnqueueWriteBufferRect(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
329 const size_t *p_obj_origin,
330 const size_t *p_host_origin,
331 const size_t *p_region,
332 size_t obj_row_pitch, size_t obj_slice_pitch,
333 size_t host_row_pitch, size_t host_slice_pitch,
334 const void *ptr,
335 cl_uint num_deps, const cl_event *d_deps,
336 cl_event *rd_ev) try {
337 auto &q = obj(d_q);
338 auto &mem = obj<buffer>(d_mem);
339 auto deps = objs<wait_list_tag>(d_deps, num_deps);
340 auto region = vector(p_region);
341 auto obj_origin = vector(p_obj_origin);
342 auto obj_pitch = pitch(region, {{ 1, obj_row_pitch, obj_slice_pitch }});
343 auto host_origin = vector(p_host_origin);
344 auto host_pitch = pitch(region, {{ 1, host_row_pitch, host_slice_pitch }});
345
346 validate_common(q, deps);
347 validate_object(q, mem, obj_origin, obj_pitch, region);
348 validate_object(q, ptr, host_origin, host_pitch, region);
349
350 hard_event *hev = new hard_event(
351 q, CL_COMMAND_WRITE_BUFFER_RECT, deps,
352 soft_copy_op(q, &mem, obj_origin, obj_pitch,
353 ptr, host_origin, host_pitch,
354 region));
355
356 ret_object(rd_ev, hev);
357 return CL_SUCCESS;
358
359 } catch (error &e) {
360 return e.get();
361 }
362
363 CLOVER_API cl_int
364 clEnqueueCopyBuffer(cl_command_queue d_q, cl_mem d_src_mem, cl_mem d_dst_mem,
365 size_t src_offset, size_t dst_offset, size_t size,
366 cl_uint num_deps, const cl_event *d_deps,
367 cl_event *rd_ev) try {
368 auto &q = obj(d_q);
369 auto &src_mem = obj<buffer>(d_src_mem);
370 auto &dst_mem = obj<buffer>(d_dst_mem);
371 auto deps = objs<wait_list_tag>(d_deps, num_deps);
372 vector_t region = { size, 1, 1 };
373 vector_t dst_origin = { dst_offset };
374 auto dst_pitch = pitch(region, {{ 1 }});
375 vector_t src_origin = { src_offset };
376 auto src_pitch = pitch(region, {{ 1 }});
377
378 validate_common(q, deps);
379 validate_object(q, dst_mem, dst_origin, dst_pitch, region);
380 validate_object(q, src_mem, src_origin, src_pitch, region);
381 validate_copy(q, dst_mem, dst_origin, dst_pitch,
382 src_mem, src_origin, src_pitch, region);
383
384 hard_event *hev = new hard_event(
385 q, CL_COMMAND_COPY_BUFFER, deps,
386 hard_copy_op(q, &dst_mem, dst_origin,
387 &src_mem, src_origin, region));
388
389 ret_object(rd_ev, hev);
390 return CL_SUCCESS;
391
392 } catch (error &e) {
393 return e.get();
394 }
395
396 CLOVER_API cl_int
397 clEnqueueCopyBufferRect(cl_command_queue d_q, cl_mem d_src_mem,
398 cl_mem d_dst_mem,
399 const size_t *p_src_origin, const size_t *p_dst_origin,
400 const size_t *p_region,
401 size_t src_row_pitch, size_t src_slice_pitch,
402 size_t dst_row_pitch, size_t dst_slice_pitch,
403 cl_uint num_deps, const cl_event *d_deps,
404 cl_event *rd_ev) try {
405 auto &q = obj(d_q);
406 auto &src_mem = obj<buffer>(d_src_mem);
407 auto &dst_mem = obj<buffer>(d_dst_mem);
408 auto deps = objs<wait_list_tag>(d_deps, num_deps);
409 auto region = vector(p_region);
410 auto dst_origin = vector(p_dst_origin);
411 auto dst_pitch = pitch(region, {{ 1, dst_row_pitch, dst_slice_pitch }});
412 auto src_origin = vector(p_src_origin);
413 auto src_pitch = pitch(region, {{ 1, src_row_pitch, src_slice_pitch }});
414
415 validate_common(q, deps);
416 validate_object(q, dst_mem, dst_origin, dst_pitch, region);
417 validate_object(q, src_mem, src_origin, src_pitch, region);
418 validate_copy(q, dst_mem, dst_origin, dst_pitch,
419 src_mem, src_origin, src_pitch, region);
420
421 hard_event *hev = new hard_event(
422 q, CL_COMMAND_COPY_BUFFER_RECT, deps,
423 soft_copy_op(q, &dst_mem, dst_origin, dst_pitch,
424 &src_mem, src_origin, src_pitch,
425 region));
426
427 ret_object(rd_ev, hev);
428 return CL_SUCCESS;
429
430 } catch (error &e) {
431 return e.get();
432 }
433
434 CLOVER_API cl_int
435 clEnqueueReadImage(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
436 const size_t *p_origin, const size_t *p_region,
437 size_t row_pitch, size_t slice_pitch, void *ptr,
438 cl_uint num_deps, const cl_event *d_deps,
439 cl_event *rd_ev) try {
440 auto &q = obj(d_q);
441 auto &img = obj<image>(d_mem);
442 auto deps = objs<wait_list_tag>(d_deps, num_deps);
443 auto region = vector(p_region);
444 auto dst_pitch = pitch(region, {{ img.pixel_size(),
445 row_pitch, slice_pitch }});
446 auto src_origin = vector(p_origin);
447 auto src_pitch = pitch(region, {{ img.pixel_size(),
448 img.row_pitch(), img.slice_pitch() }});
449
450 validate_common(q, deps);
451 validate_object(q, ptr, {}, dst_pitch, region);
452 validate_object(q, img, src_origin, region);
453
454 hard_event *hev = new hard_event(
455 q, CL_COMMAND_READ_IMAGE, deps,
456 soft_copy_op(q, ptr, {}, dst_pitch,
457 &img, src_origin, src_pitch,
458 region));
459
460 ret_object(rd_ev, hev);
461 return CL_SUCCESS;
462
463 } catch (error &e) {
464 return e.get();
465 }
466
467 CLOVER_API cl_int
468 clEnqueueWriteImage(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
469 const size_t *p_origin, const size_t *p_region,
470 size_t row_pitch, size_t slice_pitch, const void *ptr,
471 cl_uint num_deps, const cl_event *d_deps,
472 cl_event *rd_ev) try {
473 auto &q = obj(d_q);
474 auto &img = obj<image>(d_mem);
475 auto deps = objs<wait_list_tag>(d_deps, num_deps);
476 auto region = vector(p_region);
477 auto dst_origin = vector(p_origin);
478 auto dst_pitch = pitch(region, {{ img.pixel_size(),
479 img.row_pitch(), img.slice_pitch() }});
480 auto src_pitch = pitch(region, {{ img.pixel_size(),
481 row_pitch, slice_pitch }});
482
483 validate_common(q, deps);
484 validate_object(q, img, dst_origin, region);
485 validate_object(q, ptr, {}, src_pitch, region);
486
487 hard_event *hev = new hard_event(
488 q, CL_COMMAND_WRITE_IMAGE, deps,
489 soft_copy_op(q, &img, dst_origin, dst_pitch,
490 ptr, {}, src_pitch,
491 region));
492
493 ret_object(rd_ev, hev);
494 return CL_SUCCESS;
495
496 } catch (error &e) {
497 return e.get();
498 }
499
500 CLOVER_API cl_int
501 clEnqueueCopyImage(cl_command_queue d_q, cl_mem d_src_mem, cl_mem d_dst_mem,
502 const size_t *p_src_origin, const size_t *p_dst_origin,
503 const size_t *p_region,
504 cl_uint num_deps, const cl_event *d_deps,
505 cl_event *rd_ev) try {
506 auto &q = obj(d_q);
507 auto &src_img = obj<image>(d_src_mem);
508 auto &dst_img = obj<image>(d_dst_mem);
509 auto deps = objs<wait_list_tag>(d_deps, num_deps);
510 auto region = vector(p_region);
511 auto dst_origin = vector(p_dst_origin);
512 auto src_origin = vector(p_src_origin);
513
514 validate_common(q, deps);
515 validate_object(q, dst_img, dst_origin, region);
516 validate_object(q, src_img, src_origin, region);
517 validate_copy(q, dst_img, dst_origin, src_img, src_origin, region);
518
519 hard_event *hev = new hard_event(
520 q, CL_COMMAND_COPY_IMAGE, deps,
521 hard_copy_op(q, &dst_img, dst_origin,
522 &src_img, src_origin,
523 region));
524
525 ret_object(rd_ev, hev);
526 return CL_SUCCESS;
527
528 } catch (error &e) {
529 return e.get();
530 }
531
532 CLOVER_API cl_int
533 clEnqueueCopyImageToBuffer(cl_command_queue d_q,
534 cl_mem d_src_mem, cl_mem d_dst_mem,
535 const size_t *p_src_origin, const size_t *p_region,
536 size_t dst_offset,
537 cl_uint num_deps, const cl_event *d_deps,
538 cl_event *rd_ev) try {
539 auto &q = obj(d_q);
540 auto &src_img = obj<image>(d_src_mem);
541 auto &dst_mem = obj<buffer>(d_dst_mem);
542 auto deps = objs<wait_list_tag>(d_deps, num_deps);
543 auto region = vector(p_region);
544 vector_t dst_origin = { dst_offset };
545 auto dst_pitch = pitch(region, {{ src_img.pixel_size() }});
546 auto src_origin = vector(p_src_origin);
547 auto src_pitch = pitch(region, {{ src_img.pixel_size(),
548 src_img.row_pitch(),
549 src_img.slice_pitch() }});
550
551 validate_common(q, deps);
552 validate_object(q, dst_mem, dst_origin, dst_pitch, region);
553 validate_object(q, src_img, src_origin, region);
554
555 hard_event *hev = new hard_event(
556 q, CL_COMMAND_COPY_IMAGE_TO_BUFFER, deps,
557 soft_copy_op(q, &dst_mem, dst_origin, dst_pitch,
558 &src_img, src_origin, src_pitch,
559 region));
560
561 ret_object(rd_ev, hev);
562 return CL_SUCCESS;
563
564 } catch (error &e) {
565 return e.get();
566 }
567
568 CLOVER_API cl_int
569 clEnqueueCopyBufferToImage(cl_command_queue d_q,
570 cl_mem d_src_mem, cl_mem d_dst_mem,
571 size_t src_offset,
572 const size_t *p_dst_origin, const size_t *p_region,
573 cl_uint num_deps, const cl_event *d_deps,
574 cl_event *rd_ev) try {
575 auto &q = obj(d_q);
576 auto &src_mem = obj<buffer>(d_src_mem);
577 auto &dst_img = obj<image>(d_dst_mem);
578 auto deps = objs<wait_list_tag>(d_deps, num_deps);
579 auto region = vector(p_region);
580 auto dst_origin = vector(p_dst_origin);
581 auto dst_pitch = pitch(region, {{ dst_img.pixel_size(),
582 dst_img.row_pitch(),
583 dst_img.slice_pitch() }});
584 vector_t src_origin = { src_offset };
585 auto src_pitch = pitch(region, {{ dst_img.pixel_size() }});
586
587 validate_common(q, deps);
588 validate_object(q, dst_img, dst_origin, region);
589 validate_object(q, src_mem, src_origin, src_pitch, region);
590
591 hard_event *hev = new hard_event(
592 q, CL_COMMAND_COPY_BUFFER_TO_IMAGE, deps,
593 soft_copy_op(q, &dst_img, dst_origin, dst_pitch,
594 &src_mem, src_origin, src_pitch,
595 region));
596
597 ret_object(rd_ev, hev);
598 return CL_SUCCESS;
599
600 } catch (error &e) {
601 return e.get();
602 }
603
604 CLOVER_API void *
605 clEnqueueMapBuffer(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
606 cl_map_flags flags, size_t offset, size_t size,
607 cl_uint num_deps, const cl_event *d_deps,
608 cl_event *rd_ev, cl_int *r_errcode) try {
609 auto &q = obj(d_q);
610 auto &mem = obj<buffer>(d_mem);
611 auto deps = objs<wait_list_tag>(d_deps, num_deps);
612 vector_t region = { size, 1, 1 };
613 vector_t obj_origin = { offset };
614 auto obj_pitch = pitch(region, {{ 1 }});
615
616 validate_common(q, deps);
617 validate_object(q, mem, obj_origin, obj_pitch, region);
618
619 void *map = mem.resource(q).add_map(q, flags, blocking, obj_origin, region);
620
621 ret_object(rd_ev, new hard_event(q, CL_COMMAND_MAP_BUFFER, deps));
622 ret_error(r_errcode, CL_SUCCESS);
623 return map;
624
625 } catch (error &e) {
626 ret_error(r_errcode, e);
627 return NULL;
628 }
629
630 CLOVER_API void *
631 clEnqueueMapImage(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking,
632 cl_map_flags flags,
633 const size_t *p_origin, const size_t *p_region,
634 size_t *row_pitch, size_t *slice_pitch,
635 cl_uint num_deps, const cl_event *d_deps,
636 cl_event *rd_ev, cl_int *r_errcode) try {
637 auto &q = obj(d_q);
638 auto &img = obj<image>(d_mem);
639 auto deps = objs<wait_list_tag>(d_deps, num_deps);
640 auto region = vector(p_region);
641 auto origin = vector(p_origin);
642
643 validate_common(q, deps);
644 validate_object(q, img, origin, region);
645
646 void *map = img.resource(q).add_map(q, flags, blocking, origin, region);
647
648 ret_object(rd_ev, new hard_event(q, CL_COMMAND_MAP_IMAGE, deps));
649 ret_error(r_errcode, CL_SUCCESS);
650 return map;
651
652 } catch (error &e) {
653 ret_error(r_errcode, e);
654 return NULL;
655 }
656
657 CLOVER_API cl_int
658 clEnqueueUnmapMemObject(cl_command_queue d_q, cl_mem d_mem, void *ptr,
659 cl_uint num_deps, const cl_event *d_deps,
660 cl_event *rd_ev) try {
661 auto &q = obj(d_q);
662 auto &mem = obj(d_mem);
663 auto deps = objs<wait_list_tag>(d_deps, num_deps);
664
665 validate_common(q, deps);
666
667 hard_event *hev = new hard_event(
668 q, CL_COMMAND_UNMAP_MEM_OBJECT, deps,
669 [=, &q, &mem](event &) {
670 mem.resource(q).del_map(ptr);
671 });
672
673 ret_object(rd_ev, hev);
674 return CL_SUCCESS;
675
676 } catch (error &e) {
677 return e.get();
678 }