1 /*******************************************************************************
2 * Copyright (c) 2008-2016 The Khronos Group Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and/or associated documentation files (the
6 * "Materials"), to deal in the Materials without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Materials, and to
9 * permit persons to whom the Materials are furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Materials.
15 * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS
16 * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS
17 * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT
18 * https://www.khronos.org/registry/
20 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
27 ******************************************************************************/
31 * \brief C++ bindings for OpenCL 1.0 (rev 48), OpenCL 1.1 (rev 33),
32 * OpenCL 1.2 (rev 15) and OpenCL 2.0 (rev 29)
33 * \author Lee Howes and Bruce Merry
35 * Derived from the OpenCL 1.x C++ bindings written by
36 * Benedict R. Gaster, Laurent Morichetti and Lee Howes
37 * With additions and fixes from:
38 * Brian Cole, March 3rd 2010 and April 2012
39 * Matt Gruenke, April 2012.
40 * Bruce Merry, February 2013.
41 * Tom Deakin and Simon McIntosh-Smith, July 2013
47 * Optional extension support
49 * cl_ext_device_fission
50 * #define CL_HPP_USE_CL_DEVICE_FISSION
51 * cl_khr_d3d10_sharing
52 * #define CL_HPP_USE_DX_INTEROP
54 * #define CL_HPP_USE_CL_SUB_GROUPS_KHR
55 * cl_khr_image2d_from_buffer
56 * #define CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR
58 * Doxygen documentation for this header is available here:
60 * http://khronosgroup.github.io/OpenCL-CLHPP/
62 * The latest version of this header can be found on the GitHub releases page:
64 * https://github.com/KhronosGroup/OpenCL-CLHPP/releases
66 * Bugs and patches can be submitted to the GitHub repository:
68 * https://github.com/KhronosGroup/OpenCL-CLHPP
72 * \section intro Introduction
73 * For many large applications C++ is the language of choice and so it seems
74 * reasonable to define C++ bindings for OpenCL.
76 * The interface is contained with a single C++ header file \em cl2.hpp and all
77 * definitions are contained within the namespace \em cl. There is no additional
78 * requirement to include \em cl.h and to use either the C++ or original C
79 * bindings; it is enough to simply include \em cl2.hpp.
81 * The bindings themselves are lightweight and correspond closely to the
82 * underlying C API. Using the C++ bindings introduces no additional execution
85 * There are numerous compatibility, portability and memory management
86 * fixes in the new header as well as additional OpenCL 2.0 features.
87 * As a result the header is not directly backward compatible and for this
88 * reason we release it as cl2.hpp rather than a new version of cl.hpp.
91 * \section compatibility Compatibility
92 * Due to the evolution of the underlying OpenCL API the 2.0 C++ bindings
93 * include an updated approach to defining supported feature versions
94 * and the range of valid underlying OpenCL runtime versions supported.
96 * The combination of preprocessor macros CL_HPP_TARGET_OPENCL_VERSION and
97 * CL_HPP_MINIMUM_OPENCL_VERSION control this range. These are three digit
98 * decimal values representing OpenCL runime versions. The default for
99 * the target is 200, representing OpenCL 2.0 and the minimum is also
100 * defined as 200. These settings would use 2.0 API calls only.
101 * If backward compatibility with a 1.2 runtime is required, the minimum
102 * version may be set to 120.
104 * Note that this is a compile-time setting, and so affects linking against
105 * a particular SDK version rather than the versioning of the loaded runtime.
107 * The earlier versions of the header included basic vector and string
108 * classes based loosely on STL versions. These were difficult to
109 * maintain and very rarely used. For the 2.0 header we now assume
110 * the presence of the standard library unless requested otherwise.
111 * We use std::array, std::vector, std::shared_ptr and std::string
112 * throughout to safely manage memory and reduce the chance of a
113 * recurrance of earlier memory management bugs.
115 * These classes are used through typedefs in the cl namespace:
116 * cl::array, cl::vector, cl::pointer and cl::string.
117 * In addition cl::allocate_pointer forwards to std::allocate_shared
119 * In all cases these standard library classes can be replaced with
120 * custom interface-compatible versions using the CL_HPP_NO_STD_ARRAY,
121 * CL_HPP_NO_STD_VECTOR, CL_HPP_NO_STD_UNIQUE_PTR and
122 * CL_HPP_NO_STD_STRING macros.
124 * The OpenCL 1.x versions of the C++ bindings included a size_t wrapper
125 * class to interface with kernel enqueue. This caused unpleasant interactions
126 * with the standard size_t declaration and led to namespacing bugs.
127 * In the 2.0 version we have replaced this with a std::array-based interface.
128 * However, the old behaviour can be regained for backward compatibility
129 * using the CL_HPP_ENABLE_SIZE_T_COMPATIBILITY macro.
131 * Finally, the program construction interface used a clumsy vector-of-pairs
132 * design in the earlier versions. We have replaced that with a cleaner
133 * vector-of-vectors and vector-of-strings design. However, for backward
134 * compatibility old behaviour can be regained with the
135 * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY macro.
137 * In OpenCL 2.0 OpenCL C is not entirely backward compatibility with
138 * earlier versions. As a result a flag must be passed to the OpenCL C
139 * compiled to request OpenCL 2.0 compilation of kernels with 1.2 as
140 * the default in the absence of the flag.
141 * In some cases the C++ bindings automatically compile code for ease.
142 * For those cases the compilation defaults to OpenCL C 2.0.
143 * If this is not wanted, the CL_HPP_CL_1_2_DEFAULT_BUILD macro may
144 * be specified to assume 1.2 compilation.
145 * If more fine-grained decisions on a per-kernel bases are required
146 * then explicit build operations that take the flag should be used.
149 * \section parameterization Parameters
150 * This header may be parameterized by a set of preprocessor macros.
152 * - CL_HPP_TARGET_OPENCL_VERSION
154 * Defines the target OpenCL runtime version to build the header
155 * against. Defaults to 200, representing OpenCL 2.0.
157 * - CL_HPP_NO_STD_STRING
159 * Do not use the standard library string class. cl::string is not
160 * defined and may be defined by the user before cl2.hpp is
163 * - CL_HPP_NO_STD_VECTOR
165 * Do not use the standard library vector class. cl::vector is not
166 * defined and may be defined by the user before cl2.hpp is
169 * - CL_HPP_NO_STD_ARRAY
171 * Do not use the standard library array class. cl::array is not
172 * defined and may be defined by the user before cl2.hpp is
175 * - CL_HPP_NO_STD_UNIQUE_PTR
177 * Do not use the standard library unique_ptr class. cl::pointer and
178 * the cl::allocate_pointer functions are not defined and may be
179 * defined by the user before cl2.hpp is included.
181 * - CL_HPP_ENABLE_DEVICE_FISSION
183 * Enables device fission for OpenCL 1.2 platforms.
185 * - CL_HPP_ENABLE_EXCEPTIONS
187 * Enable exceptions for use in the C++ bindings header. This is the
188 * preferred error handling mechanism but is not required.
190 * - CL_HPP_ENABLE_SIZE_T_COMPATIBILITY
192 * Backward compatibility option to support cl.hpp-style size_t
193 * class. Replaces the updated std::array derived version and
194 * removal of size_t from the namespace. Note that in this case the
195 * new size_t class is placed in the cl::compatibility namespace and
196 * thus requires an additional using declaration for direct backward
199 * - CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY
201 * Enable older vector of pairs interface for construction of
204 * - CL_HPP_CL_1_2_DEFAULT_BUILD
206 * Default to OpenCL C 1.2 compilation rather than OpenCL C 2.0
207 * applies to use of cl::Program construction and other program
211 * \section example Example
213 * The following example shows a general use case for the C++
214 * bindings, including support for the optional exception feature and
215 * also the supplied vector and string classes, see following sections for
216 * decriptions of these features.
219 #define CL_HPP_ENABLE_EXCEPTIONS
220 #define CL_HPP_TARGET_OPENCL_VERSION 200
222 #include <CL/cl2.hpp>
228 const int numElements = 32;
232 // Filter for a 2.0 platform and set it as the default
233 std::vector<cl::Platform> platforms;
234 cl::Platform::get(&platforms);
236 for (auto &p : platforms) {
237 std::string platver = p.getInfo<CL_PLATFORM_VERSION>();
238 if (platver.find("OpenCL 2.") != std::string::npos) {
243 std::cout << "No OpenCL 2.0 platform found.";
247 cl::Platform newP = cl::Platform::setDefault(plat);
249 std::cout << "Error setting default platform.";
253 // Use C++11 raw string literals for kernel source code
254 std::string kernel1{R"CLC(
256 kernel void updateGlobal()
261 std::string kernel2{R"CLC(
262 typedef struct { global int *bar; } Foo;
263 kernel void vectorAdd(global const Foo* aNum, global const int *inputA, global const int *inputB,
264 global int *output, int val, write_only pipe int outPipe, queue_t childQueue)
266 output[get_global_id(0)] = inputA[get_global_id(0)] + inputB[get_global_id(0)] + val + *(aNum->bar);
267 write_pipe(outPipe, &val);
268 queue_t default_queue = get_default_queue();
269 ndrange_t ndrange = ndrange_1D(get_global_size(0)/2, get_global_size(0)/2);
271 // Have a child kernel write into third quarter of output
272 enqueue_kernel(default_queue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange,
274 output[get_global_size(0)*2 + get_global_id(0)] =
275 inputA[get_global_size(0)*2 + get_global_id(0)] + inputB[get_global_size(0)*2 + get_global_id(0)] + globalA;
278 // Have a child kernel write into last quarter of output
279 enqueue_kernel(childQueue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange,
281 output[get_global_size(0)*3 + get_global_id(0)] =
282 inputA[get_global_size(0)*3 + get_global_id(0)] + inputB[get_global_size(0)*3 + get_global_id(0)] + globalA + 2;
287 // New simpler string interface style
288 std::vector<std::string> programStrings {kernel1, kernel2};
290 cl::Program vectorAddProgram(programStrings);
292 vectorAddProgram.build("-cl-std=CL2.0");
295 // Print build info for all devices
296 cl_int buildErr = CL_SUCCESS;
297 auto buildInfo = vectorAddProgram.getBuildInfo<CL_PROGRAM_BUILD_LOG>(&buildErr);
298 for (auto &pair : buildInfo) {
299 std::cerr << pair.second << std::endl << std::endl;
305 typedef struct { int *bar; } Foo;
307 // Get and run kernel that initializes the program-scope global
308 // A test for kernels that take no arguments
309 auto program2Kernel =
310 cl::KernelFunctor<>(vectorAddProgram, "updateGlobal");
318 auto anSVMInt = cl::allocate_svm<int, cl::SVMTraitCoarse<>>();
320 cl::SVMAllocator<Foo, cl::SVMTraitCoarse<cl::SVMTraitReadOnly<>>> svmAllocReadOnly;
321 auto fooPointer = cl::allocate_pointer<Foo>(svmAllocReadOnly);
322 fooPointer->bar = anSVMInt.get();
323 cl::SVMAllocator<int, cl::SVMTraitCoarse<>> svmAlloc;
324 std::vector<int, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>> inputA(numElements, 1, svmAlloc);
325 cl::coarse_svm_vector<int> inputB(numElements, 2, svmAlloc);
330 // Traditional cl_mem allocations
331 std::vector<int> output(numElements, 0xdeadbeef);
332 cl::Buffer outputBuffer(begin(output), end(output), false);
333 cl::Pipe aPipe(sizeof(cl_int), numElements / 2);
335 // Default command queue, also passed in as a parameter
336 cl::DeviceCommandQueue defaultDeviceQueue = cl::DeviceCommandQueue::makeDefault(
337 cl::Context::getDefault(), cl::Device::getDefault());
339 auto vectorAddKernel =
341 decltype(fooPointer)&,
343 cl::coarse_svm_vector<int>&,
347 cl::DeviceCommandQueue
348 >(vectorAddProgram, "vectorAdd");
350 // Ensure that the additional SVM pointer is available to the kernel
351 // This one was not passed as a parameter
352 vectorAddKernel.setSVMPointers(anSVMInt);
354 // Hand control of coarse allocations to runtime
355 cl::enqueueUnmapSVM(anSVMInt);
356 cl::enqueueUnmapSVM(fooPointer);
357 cl::unmapSVM(inputB);
358 cl::unmapSVM(output2);
363 cl::NDRange(numElements/2),
364 cl::NDRange(numElements/2)),
375 cl::copy(outputBuffer, begin(output), end(output));
376 // Grab the SVM output vector using a map
379 cl::Device d = cl::Device::getDefault();
381 std::cout << "Output:\n";
382 for (int i = 1; i < numElements; ++i) {
383 std::cout << "\t" << output[i] << "\n";
396 /* Handle deprecated preprocessor definitions. In each case, we only check for
397 * the old name if the new name is not defined, so that user code can define
398 * both and hence work with either version of the bindings.
400 #if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP)
401 # pragma message("cl2.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead")
402 # define CL_HPP_USE_DX_INTEROP
404 #if !defined(CL_HPP_USE_CL_DEVICE_FISSION) && defined(USE_CL_DEVICE_FISSION)
405 # pragma message("cl2.hpp: USE_CL_DEVICE_FISSION is deprecated. Define CL_HPP_USE_CL_DEVICE_FISSION instead")
406 # define CL_HPP_USE_CL_DEVICE_FISSION
408 #if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS)
409 # pragma message("cl2.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead")
410 # define CL_HPP_ENABLE_EXCEPTIONS
412 #if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR)
413 # pragma message("cl2.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead")
414 # define CL_HPP_NO_STD_VECTOR
416 #if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING)
417 # pragma message("cl2.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead")
418 # define CL_HPP_NO_STD_STRING
420 #if defined(VECTOR_CLASS)
421 # pragma message("cl2.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead")
423 #if defined(STRING_CLASS)
424 # pragma message("cl2.hpp: STRING_CLASS is deprecated. Alias cl::string instead.")
426 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
427 # pragma message("cl2.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead")
428 # define CL_HPP_USER_OVERRIDE_ERROR_STRINGS
431 /* Warn about features that are no longer supported
433 #if defined(__USE_DEV_VECTOR)
434 # pragma message("cl2.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors")
436 #if defined(__USE_DEV_STRING)
437 # pragma message("cl2.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors")
440 /* Detect which version to target */
441 #if !defined(CL_HPP_TARGET_OPENCL_VERSION)
442 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 200 (OpenCL 2.0)")
443 # define CL_HPP_TARGET_OPENCL_VERSION 200
445 #if CL_HPP_TARGET_OPENCL_VERSION != 100 && CL_HPP_TARGET_OPENCL_VERSION != 110 && CL_HPP_TARGET_OPENCL_VERSION != 120 && CL_HPP_TARGET_OPENCL_VERSION != 200
446 # pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 200")
447 # undef CL_HPP_TARGET_OPENCL_VERSION
448 # define CL_HPP_TARGET_OPENCL_VERSION 200
451 /* Forward target OpenCL version to C headers if necessary */
452 #if defined(CL_TARGET_OPENCL_VERSION)
453 /* Warn if prior definition of CL_TARGET_OPENCL_VERSION is lower than
454 * requested C++ bindings version */
455 #if CL_TARGET_OPENCL_VERSION < CL_HPP_TARGET_OPENCL_VERSION
456 # pragma message("CL_TARGET_OPENCL_VERSION is already defined as is lower than CL_HPP_TARGET_OPENCL_VERSION")
459 # define CL_TARGET_OPENCL_VERSION CL_HPP_TARGET_OPENCL_VERSION
462 #if !defined(CL_HPP_MINIMUM_OPENCL_VERSION)
463 # define CL_HPP_MINIMUM_OPENCL_VERSION 200
465 #if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && CL_HPP_MINIMUM_OPENCL_VERSION != 110 && CL_HPP_MINIMUM_OPENCL_VERSION != 120 && CL_HPP_MINIMUM_OPENCL_VERSION != 200
466 # pragma message("cl2.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 100")
467 # undef CL_HPP_MINIMUM_OPENCL_VERSION
468 # define CL_HPP_MINIMUM_OPENCL_VERSION 100
470 #if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION
471 # error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION"
474 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
475 # define CL_USE_DEPRECATED_OPENCL_1_0_APIS
477 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
478 # define CL_USE_DEPRECATED_OPENCL_1_1_APIS
480 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
481 # define CL_USE_DEPRECATED_OPENCL_1_2_APIS
483 #if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
484 # define CL_USE_DEPRECATED_OPENCL_2_0_APIS
491 #if defined(CL_HPP_USE_DX_INTEROP)
492 #include <CL/cl_d3d10.h>
493 #include <CL/cl_dx9_media_sharing.h>
497 #if defined(_MSC_VER)
501 // Check for a valid C++ version
503 // Need to do both tests here because for some reason __cplusplus is not
504 // updated in visual studio
505 #if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700)
506 #error Visual studio 2013 or another C++11-supporting compiler required
510 #if defined(CL_HPP_USE_CL_DEVICE_FISSION) || defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
511 #include <CL/cl_ext.h>
514 #if defined(__APPLE__) || defined(__MACOSX)
515 #include <OpenCL/opencl.h>
517 #include <CL/opencl.h>
520 #if (__cplusplus >= 201103L)
521 #define CL_HPP_NOEXCEPT_ noexcept
523 #define CL_HPP_NOEXCEPT_
526 #if defined(_MSC_VER)
527 # define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany)
529 # define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak))
532 // Define deprecated prefixes and suffixes to ensure compilation
533 // in case they are not pre-defined
534 #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
535 #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
536 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
537 #if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED)
538 #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
539 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
541 #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
542 #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED
543 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
544 #if !defined(CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED)
545 #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
546 #endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
548 #if !defined(CL_CALLBACK)
557 #include <functional>
560 // Define a size_type to represent a correctly resolved size_t
561 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
563 using size_type = ::size_t;
565 #else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
567 using size_type = size_t;
569 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
572 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
574 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
576 #if !defined(CL_HPP_NO_STD_VECTOR)
579 template < class T, class Alloc = std::allocator<T> >
580 using vector = std::vector<T, Alloc>;
582 #endif // #if !defined(CL_HPP_NO_STD_VECTOR)
584 #if !defined(CL_HPP_NO_STD_STRING)
587 using string = std::string;
589 #endif // #if !defined(CL_HPP_NO_STD_STRING)
591 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
593 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
596 // Replace unique_ptr and allocate_pointer for internal use
597 // to allow user to replace them
598 template<class T, class D>
599 using pointer = std::unique_ptr<T, D>;
602 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
603 #if !defined(CL_HPP_NO_STD_ARRAY)
606 template < class T, size_type N >
607 using array = std::array<T, N>;
609 #endif // #if !defined(CL_HPP_NO_STD_ARRAY)
611 // Define size_type appropriately to allow backward-compatibility
612 // use of the old size_t interface class
613 #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
615 namespace compatibility {
616 /*! \brief class used to interface between C++ and
617 * OpenCL C calls that require arrays of size_t values, whose
618 * size is known statically.
627 //! \brief Initialize size_t to all 0s
630 for (int i = 0; i < N; ++i) {
635 size_t(const array<size_type, N> &rhs)
637 for (int i = 0; i < N; ++i) {
642 size_type& operator[](int index)
647 const size_type& operator[](int index) const
652 //! \brief Conversion operator to T*.
653 operator size_type* () { return data_; }
655 //! \brief Conversion operator to const T*.
656 operator const size_type* () const { return data_; }
658 operator array<size_type, N>() const
660 array<size_type, N> ret;
662 for (int i = 0; i < N; ++i) {
668 } // namespace compatibility
671 using size_t = compatibility::size_t<N>;
673 #endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
675 // Helper alias to avoid confusing the macros
678 using size_t_array = array<size_type, 3>;
679 } // namespace detail
685 * \brief The OpenCL C++ bindings are defined within this namespace.
691 #define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \
693 pfn_##name = (PFN_##name) \
694 clGetExtensionFunctionAddress(#name); \
699 #define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \
701 pfn_##name = (PFN_##name) \
702 clGetExtensionFunctionAddressForPlatform(platform, #name); \
711 class DeviceCommandQueue;
716 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
717 /*! \brief Exception class
719 * This may be thrown by API functions when CL_HPP_ENABLE_EXCEPTIONS is defined.
721 class Error : public std::exception
725 const char * errStr_;
727 /*! \brief Create a new CL error exception for a given error code
728 * and corresponding message.
730 * \param err error code value.
732 * \param errStr a descriptive string that must remain in scope until
733 * handling of the exception has concluded. If set, it
734 * will be returned by what().
736 Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr)
741 /*! \brief Get error string associated with exception
743 * \return A memory pointer to the error message string.
745 virtual const char * what() const throw ()
747 if (errStr_ == NULL) {
755 /*! \brief Get error code associated with exception
757 * \return The error code.
759 cl_int err(void) const { return err_; }
761 #define CL_HPP_ERR_STR_(x) #x
763 #define CL_HPP_ERR_STR_(x) NULL
764 #endif // CL_HPP_ENABLE_EXCEPTIONS
769 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
770 static inline cl_int errHandler (
772 const char * errStr = NULL)
774 if (err != CL_SUCCESS) {
775 throw Error(err, errStr);
780 static inline cl_int errHandler (cl_int err, const char * errStr = NULL)
782 (void) errStr; // suppress unused variable warning
785 #endif // CL_HPP_ENABLE_EXCEPTIONS
790 //! \cond DOXYGEN_DETAIL
791 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
792 #define __GET_DEVICE_INFO_ERR CL_HPP_ERR_STR_(clGetDeviceInfo)
793 #define __GET_PLATFORM_INFO_ERR CL_HPP_ERR_STR_(clGetPlatformInfo)
794 #define __GET_DEVICE_IDS_ERR CL_HPP_ERR_STR_(clGetDeviceIDs)
795 #define __GET_PLATFORM_IDS_ERR CL_HPP_ERR_STR_(clGetPlatformIDs)
796 #define __GET_CONTEXT_INFO_ERR CL_HPP_ERR_STR_(clGetContextInfo)
797 #define __GET_EVENT_INFO_ERR CL_HPP_ERR_STR_(clGetEventInfo)
798 #define __GET_EVENT_PROFILE_INFO_ERR CL_HPP_ERR_STR_(clGetEventProfileInfo)
799 #define __GET_MEM_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetMemObjectInfo)
800 #define __GET_IMAGE_INFO_ERR CL_HPP_ERR_STR_(clGetImageInfo)
801 #define __GET_SAMPLER_INFO_ERR CL_HPP_ERR_STR_(clGetSamplerInfo)
802 #define __GET_KERNEL_INFO_ERR CL_HPP_ERR_STR_(clGetKernelInfo)
803 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
804 #define __GET_KERNEL_ARG_INFO_ERR CL_HPP_ERR_STR_(clGetKernelArgInfo)
805 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
806 #define __GET_KERNEL_WORK_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo)
807 #define __GET_PROGRAM_INFO_ERR CL_HPP_ERR_STR_(clGetProgramInfo)
808 #define __GET_PROGRAM_BUILD_INFO_ERR CL_HPP_ERR_STR_(clGetProgramBuildInfo)
809 #define __GET_COMMAND_QUEUE_INFO_ERR CL_HPP_ERR_STR_(clGetCommandQueueInfo)
811 #define __CREATE_CONTEXT_ERR CL_HPP_ERR_STR_(clCreateContext)
812 #define __CREATE_CONTEXT_FROM_TYPE_ERR CL_HPP_ERR_STR_(clCreateContextFromType)
813 #define __GET_SUPPORTED_IMAGE_FORMATS_ERR CL_HPP_ERR_STR_(clGetSupportedImageFormats)
815 #define __CREATE_BUFFER_ERR CL_HPP_ERR_STR_(clCreateBuffer)
816 #define __COPY_ERR CL_HPP_ERR_STR_(cl::copy)
817 #define __CREATE_SUBBUFFER_ERR CL_HPP_ERR_STR_(clCreateSubBuffer)
818 #define __CREATE_GL_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
819 #define __CREATE_GL_RENDER_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
820 #define __GET_GL_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetGLObjectInfo)
821 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
822 #define __CREATE_IMAGE_ERR CL_HPP_ERR_STR_(clCreateImage)
823 #define __CREATE_GL_TEXTURE_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture)
824 #define __IMAGE_DIMENSION_ERR CL_HPP_ERR_STR_(Incorrect image dimensions)
825 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
826 #define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback)
828 #define __CREATE_USER_EVENT_ERR CL_HPP_ERR_STR_(clCreateUserEvent)
829 #define __SET_USER_EVENT_STATUS_ERR CL_HPP_ERR_STR_(clSetUserEventStatus)
830 #define __SET_EVENT_CALLBACK_ERR CL_HPP_ERR_STR_(clSetEventCallback)
831 #define __WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clWaitForEvents)
833 #define __CREATE_KERNEL_ERR CL_HPP_ERR_STR_(clCreateKernel)
834 #define __SET_KERNEL_ARGS_ERR CL_HPP_ERR_STR_(clSetKernelArg)
835 #define __CREATE_PROGRAM_WITH_SOURCE_ERR CL_HPP_ERR_STR_(clCreateProgramWithSource)
836 #define __CREATE_PROGRAM_WITH_BINARY_ERR CL_HPP_ERR_STR_(clCreateProgramWithBinary)
837 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
838 #define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels)
839 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
840 #define __BUILD_PROGRAM_ERR CL_HPP_ERR_STR_(clBuildProgram)
841 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
842 #define __COMPILE_PROGRAM_ERR CL_HPP_ERR_STR_(clCompileProgram)
843 #define __LINK_PROGRAM_ERR CL_HPP_ERR_STR_(clLinkProgram)
844 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
845 #define __CREATE_KERNELS_IN_PROGRAM_ERR CL_HPP_ERR_STR_(clCreateKernelsInProgram)
847 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
848 #define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties)
849 #define __CREATE_SAMPLER_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSamplerWithProperties)
850 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
851 #define __SET_COMMAND_QUEUE_PROPERTY_ERR CL_HPP_ERR_STR_(clSetCommandQueueProperty)
852 #define __ENQUEUE_READ_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueReadBuffer)
853 #define __ENQUEUE_READ_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueReadBufferRect)
854 #define __ENQUEUE_WRITE_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueWriteBuffer)
855 #define __ENQUEUE_WRITE_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueWriteBufferRect)
856 #define __ENQEUE_COPY_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyBuffer)
857 #define __ENQEUE_COPY_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferRect)
858 #define __ENQUEUE_FILL_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueFillBuffer)
859 #define __ENQUEUE_READ_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueReadImage)
860 #define __ENQUEUE_WRITE_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueWriteImage)
861 #define __ENQUEUE_COPY_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyImage)
862 #define __ENQUEUE_FILL_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueFillImage)
863 #define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer)
864 #define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage)
865 #define __ENQUEUE_MAP_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueMapBuffer)
866 #define __ENQUEUE_MAP_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueMapImage)
867 #define __ENQUEUE_UNMAP_MEM_OBJECT_ERR CL_HPP_ERR_STR_(clEnqueueUnMapMemObject)
868 #define __ENQUEUE_NDRANGE_KERNEL_ERR CL_HPP_ERR_STR_(clEnqueueNDRangeKernel)
869 #define __ENQUEUE_NATIVE_KERNEL CL_HPP_ERR_STR_(clEnqueueNativeKernel)
870 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
871 #define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects)
872 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
874 #define __ENQUEUE_ACQUIRE_GL_ERR CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects)
875 #define __ENQUEUE_RELEASE_GL_ERR CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects)
877 #define __CREATE_PIPE_ERR CL_HPP_ERR_STR_(clCreatePipe)
878 #define __GET_PIPE_INFO_ERR CL_HPP_ERR_STR_(clGetPipeInfo)
881 #define __RETAIN_ERR CL_HPP_ERR_STR_(Retain Object)
882 #define __RELEASE_ERR CL_HPP_ERR_STR_(Release Object)
883 #define __FLUSH_ERR CL_HPP_ERR_STR_(clFlush)
884 #define __FINISH_ERR CL_HPP_ERR_STR_(clFinish)
885 #define __VECTOR_CAPACITY_ERR CL_HPP_ERR_STR_(Vector capacity error)
888 * CL 1.2 version that uses device fission.
890 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
891 #define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevices)
893 #define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevicesEXT)
894 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
897 * Deprecated APIs for 1.2
899 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
900 #define __ENQUEUE_MARKER_ERR CL_HPP_ERR_STR_(clEnqueueMarker)
901 #define __ENQUEUE_WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clEnqueueWaitForEvents)
902 #define __ENQUEUE_BARRIER_ERR CL_HPP_ERR_STR_(clEnqueueBarrier)
903 #define __UNLOAD_COMPILER_ERR CL_HPP_ERR_STR_(clUnloadCompiler)
904 #define __CREATE_GL_TEXTURE_2D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture2D)
905 #define __CREATE_GL_TEXTURE_3D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture3D)
906 #define __CREATE_IMAGE2D_ERR CL_HPP_ERR_STR_(clCreateImage2D)
907 #define __CREATE_IMAGE3D_ERR CL_HPP_ERR_STR_(clCreateImage3D)
908 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
911 * Deprecated APIs for 2.0
913 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
914 #define __CREATE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clCreateCommandQueue)
915 #define __ENQUEUE_TASK_ERR CL_HPP_ERR_STR_(clEnqueueTask)
916 #define __CREATE_SAMPLER_ERR CL_HPP_ERR_STR_(clCreateSampler)
917 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
920 * CL 1.2 marker and barrier commands
922 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
923 #define __ENQUEUE_MARKER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList)
924 #define __ENQUEUE_BARRIER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList)
925 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
927 #endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS
933 // Generic getInfoHelper. The final parameter is used to guide overload
934 // resolution: the actual parameter passed is an int, which makes this
935 // a worse conversion sequence than a specialization that declares the
936 // parameter as an int.
937 template<typename Functor, typename T>
938 inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
940 return f(name, sizeof(T), param, NULL);
943 // Specialized for getInfo<CL_PROGRAM_BINARIES>
944 // Assumes that the output vector was correctly resized on the way in
945 template <typename Func>
946 inline cl_int getInfoHelper(Func f, cl_uint name, vector<vector<unsigned char>>* param, int)
948 if (name != CL_PROGRAM_BINARIES) {
949 return CL_INVALID_VALUE;
952 // Create array of pointers, calculate total size and pass pointer array in
953 size_type numBinaries = param->size();
954 vector<unsigned char*> binariesPointers(numBinaries);
956 for (size_type i = 0; i < numBinaries; ++i)
958 binariesPointers[i] = (*param)[i].data();
961 cl_int err = f(name, numBinaries * sizeof(unsigned char*), binariesPointers.data(), NULL);
963 if (err != CL_SUCCESS) {
972 // Specialized getInfoHelper for vector params
973 template <typename Func, typename T>
974 inline cl_int getInfoHelper(Func f, cl_uint name, vector<T>* param, long)
977 cl_int err = f(name, 0, NULL, &required);
978 if (err != CL_SUCCESS) {
981 const size_type elements = required / sizeof(T);
983 // Temporary to avoid changing param on an error
984 vector<T> localData(elements);
985 err = f(name, required, localData.data(), NULL);
986 if (err != CL_SUCCESS) {
990 *param = std::move(localData);
996 /* Specialization for reference-counted types. This depends on the
997 * existence of Wrapper<T>::cl_type, and none of the other types having the
998 * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
999 * does not work, because when using a derived type (e.g. Context) the generic
1000 * template will provide a better match.
1002 template <typename Func, typename T>
1003 inline cl_int getInfoHelper(
1004 Func f, cl_uint name, vector<T>* param, int, typename T::cl_type = 0)
1007 cl_int err = f(name, 0, NULL, &required);
1008 if (err != CL_SUCCESS) {
1012 const size_type elements = required / sizeof(typename T::cl_type);
1014 vector<typename T::cl_type> value(elements);
1015 err = f(name, required, value.data(), NULL);
1016 if (err != CL_SUCCESS) {
1021 // Assign to convert CL type to T for each element
1022 param->resize(elements);
1024 // Assign to param, constructing with retain behaviour
1025 // to correctly capture each underlying CL object
1026 for (size_type i = 0; i < elements; i++) {
1027 (*param)[i] = T(value[i], true);
1033 // Specialized GetInfoHelper for string params
1034 template <typename Func>
1035 inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long)
1038 cl_int err = f(name, 0, NULL, &required);
1039 if (err != CL_SUCCESS) {
1043 // std::string has a constant data member
1044 // a char vector does not
1046 vector<char> value(required);
1047 err = f(name, required, value.data(), NULL);
1048 if (err != CL_SUCCESS) {
1052 param->assign(begin(value), prev(end(value)));
1061 // Specialized GetInfoHelper for clsize_t params
1062 template <typename Func, size_type N>
1063 inline cl_int getInfoHelper(Func f, cl_uint name, array<size_type, N>* param, long)
1066 cl_int err = f(name, 0, NULL, &required);
1067 if (err != CL_SUCCESS) {
1071 size_type elements = required / sizeof(size_type);
1072 vector<size_type> value(elements, 0);
1074 err = f(name, required, value.data(), NULL);
1075 if (err != CL_SUCCESS) {
1079 // Bound the copy with N to prevent overruns
1080 // if passed N > than the amount copied
1084 for (size_type i = 0; i < elements; ++i) {
1085 (*param)[i] = value[i];
1091 template<typename T> struct ReferenceHandler;
1093 /* Specialization for reference-counted types. This depends on the
1094 * existence of Wrapper<T>::cl_type, and none of the other types having the
1095 * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
1096 * does not work, because when using a derived type (e.g. Context) the generic
1097 * template will provide a better match.
1099 template<typename Func, typename T>
1100 inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
1102 typename T::cl_type value;
1103 cl_int err = f(name, sizeof(value), &value, NULL);
1104 if (err != CL_SUCCESS) {
1110 err = param->retain();
1111 if (err != CL_SUCCESS) {
1118 #define CL_HPP_PARAM_NAME_INFO_1_0_(F) \
1119 F(cl_platform_info, CL_PLATFORM_PROFILE, string) \
1120 F(cl_platform_info, CL_PLATFORM_VERSION, string) \
1121 F(cl_platform_info, CL_PLATFORM_NAME, string) \
1122 F(cl_platform_info, CL_PLATFORM_VENDOR, string) \
1123 F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \
1125 F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
1126 F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
1127 F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
1128 F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
1129 F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \
1130 F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector<size_type>) \
1131 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
1132 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
1133 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
1134 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
1135 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
1136 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
1137 F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
1138 F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
1139 F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
1140 F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
1141 F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
1142 F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \
1143 F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \
1144 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \
1145 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \
1146 F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \
1147 F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
1148 F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \
1149 F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
1150 F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
1151 F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
1152 F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
1153 F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
1154 F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
1155 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
1156 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
1157 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
1158 F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
1159 F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
1160 F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
1161 F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
1162 F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
1163 F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
1164 F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \
1165 F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
1166 F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
1167 F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
1168 F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
1169 F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \
1170 F(cl_device_info, CL_DEVICE_NAME, string) \
1171 F(cl_device_info, CL_DEVICE_VENDOR, string) \
1172 F(cl_device_info, CL_DRIVER_VERSION, string) \
1173 F(cl_device_info, CL_DEVICE_PROFILE, string) \
1174 F(cl_device_info, CL_DEVICE_VERSION, string) \
1175 F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \
1177 F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
1178 F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector<Device>) \
1179 F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector<cl_context_properties>) \
1181 F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
1182 F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
1183 F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
1184 F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \
1186 F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
1187 F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
1188 F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
1189 F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
1191 F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
1192 F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
1193 F(cl_mem_info, CL_MEM_SIZE, size_type) \
1194 F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
1195 F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
1196 F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
1197 F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
1199 F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
1200 F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \
1201 F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \
1202 F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \
1203 F(cl_image_info, CL_IMAGE_WIDTH, size_type) \
1204 F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \
1205 F(cl_image_info, CL_IMAGE_DEPTH, size_type) \
1207 F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
1208 F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
1209 F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \
1210 F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \
1211 F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \
1213 F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
1214 F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
1215 F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
1216 F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector<Device>) \
1217 F(cl_program_info, CL_PROGRAM_SOURCE, string) \
1218 F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector<size_type>) \
1219 F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector<cl::vector<unsigned char>>) \
1221 F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
1222 F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \
1223 F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \
1225 F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \
1226 F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
1227 F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
1228 F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
1229 F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
1231 F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \
1232 F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \
1233 F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
1235 F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
1236 F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
1237 F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
1238 F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
1241 #define CL_HPP_PARAM_NAME_INFO_1_1_(F) \
1242 F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
1243 F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
1244 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
1245 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
1246 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
1247 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
1248 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
1249 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
1250 F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
1251 F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \
1253 F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
1254 F(cl_mem_info, CL_MEM_OFFSET, size_type) \
1256 F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
1257 F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
1259 F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
1261 #define CL_HPP_PARAM_NAME_INFO_1_2_(F) \
1262 F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \
1263 F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \
1265 F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
1267 F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \
1269 F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
1270 F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
1271 F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \
1272 F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \
1273 F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \
1275 F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \
1276 F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector<cl_device_partition_property>) \
1277 F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector<cl_device_partition_property>) \
1278 F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
1279 F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, size_type) \
1280 F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
1281 F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \
1283 F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \
1284 F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \
1285 F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint)
1287 #define CL_HPP_PARAM_NAME_INFO_2_0_(F) \
1288 F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \
1289 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \
1290 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \
1291 F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \
1292 F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \
1293 F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \
1294 F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \
1295 F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \
1296 F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \
1297 F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \
1298 F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \
1299 F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \
1300 F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \
1301 F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \
1302 F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \
1303 F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \
1304 F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \
1305 F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint)
1307 #define CL_HPP_PARAM_NAME_DEVICE_FISSION_(F) \
1308 F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \
1309 F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector<cl_device_partition_property_ext>) \
1310 F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector<cl_device_partition_property_ext>) \
1311 F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
1312 F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector<cl_device_partition_property_ext>)
1314 template <typename enum_type, cl_int Name>
1315 struct param_traits {};
1317 #define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \
1320 struct param_traits<detail:: token,param_name> \
1322 enum { value = param_name }; \
1323 typedef T param_type; \
1326 CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1327 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
1328 CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
1329 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1330 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1331 CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
1332 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1333 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
1334 CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1335 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
1338 // Flags deprecated in OpenCL 2.0
1339 #define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \
1340 F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties)
1342 #define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \
1343 F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool)
1345 #define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \
1346 F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer)
1348 // Include deprecated query flags based on versions
1349 // Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash
1350 #if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200
1351 CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1352 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110
1353 #if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1354 CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1355 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1356 #if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
1357 CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
1358 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
1360 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
1361 CL_HPP_PARAM_NAME_DEVICE_FISSION_(CL_HPP_DECLARE_PARAM_TRAITS_);
1362 #endif // CL_HPP_USE_CL_DEVICE_FISSION
1364 #ifdef CL_PLATFORM_ICD_SUFFIX_KHR
1365 CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string)
1368 #ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
1369 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
1372 #ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
1373 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector<size_type>)
1375 #ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
1376 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
1378 #ifdef CL_DEVICE_SIMD_WIDTH_AMD
1379 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
1381 #ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
1382 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
1384 #ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
1385 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
1387 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
1388 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
1390 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
1391 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
1393 #ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
1394 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
1396 #ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
1397 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
1399 #ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
1400 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
1403 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
1404 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
1406 #ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
1407 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
1409 #ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
1410 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
1412 #ifdef CL_DEVICE_WARP_SIZE_NV
1413 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
1415 #ifdef CL_DEVICE_GPU_OVERLAP_NV
1416 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
1418 #ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
1419 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
1421 #ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
1422 CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
1425 // Convenience functions
1427 template <typename Func, typename T>
1429 getInfo(Func f, cl_uint name, T* param)
1431 return getInfoHelper(f, name, param, 0);
1434 template <typename Func, typename Arg0>
1435 struct GetInfoFunctor0
1437 Func f_; const Arg0& arg0_;
1439 cl_uint param, size_type size, void* value, size_type* size_ret)
1440 { return f_(arg0_, param, size, value, size_ret); }
1443 template <typename Func, typename Arg0, typename Arg1>
1444 struct GetInfoFunctor1
1446 Func f_; const Arg0& arg0_; const Arg1& arg1_;
1448 cl_uint param, size_type size, void* value, size_type* size_ret)
1449 { return f_(arg0_, arg1_, param, size, value, size_ret); }
1452 template <typename Func, typename Arg0, typename T>
1454 getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
1456 GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
1457 return getInfoHelper(f0, name, param, 0);
1460 template <typename Func, typename Arg0, typename Arg1, typename T>
1462 getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
1464 GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
1465 return getInfoHelper(f0, name, param, 0);
1469 template<typename T>
1470 struct ReferenceHandler
1473 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1475 * OpenCL 1.2 devices do have retain/release.
1478 struct ReferenceHandler<cl_device_id>
1481 * Retain the device.
1482 * \param device A valid device created using createSubDevices
1484 * CL_SUCCESS if the function executed successfully.
1485 * CL_INVALID_DEVICE if device was not a valid subdevice
1486 * CL_OUT_OF_RESOURCES
1487 * CL_OUT_OF_HOST_MEMORY
1489 static cl_int retain(cl_device_id device)
1490 { return ::clRetainDevice(device); }
1492 * Retain the device.
1493 * \param device A valid device created using createSubDevices
1495 * CL_SUCCESS if the function executed successfully.
1496 * CL_INVALID_DEVICE if device was not a valid subdevice
1497 * CL_OUT_OF_RESOURCES
1498 * CL_OUT_OF_HOST_MEMORY
1500 static cl_int release(cl_device_id device)
1501 { return ::clReleaseDevice(device); }
1503 #else // CL_HPP_TARGET_OPENCL_VERSION >= 120
1505 * OpenCL 1.1 devices do not have retain/release.
1508 struct ReferenceHandler<cl_device_id>
1510 // cl_device_id does not have retain().
1511 static cl_int retain(cl_device_id)
1512 { return CL_SUCCESS; }
1513 // cl_device_id does not have release().
1514 static cl_int release(cl_device_id)
1515 { return CL_SUCCESS; }
1517 #endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120)
1520 struct ReferenceHandler<cl_platform_id>
1522 // cl_platform_id does not have retain().
1523 static cl_int retain(cl_platform_id)
1524 { return CL_SUCCESS; }
1525 // cl_platform_id does not have release().
1526 static cl_int release(cl_platform_id)
1527 { return CL_SUCCESS; }
1531 struct ReferenceHandler<cl_context>
1533 static cl_int retain(cl_context context)
1534 { return ::clRetainContext(context); }
1535 static cl_int release(cl_context context)
1536 { return ::clReleaseContext(context); }
1540 struct ReferenceHandler<cl_command_queue>
1542 static cl_int retain(cl_command_queue queue)
1543 { return ::clRetainCommandQueue(queue); }
1544 static cl_int release(cl_command_queue queue)
1545 { return ::clReleaseCommandQueue(queue); }
1549 struct ReferenceHandler<cl_mem>
1551 static cl_int retain(cl_mem memory)
1552 { return ::clRetainMemObject(memory); }
1553 static cl_int release(cl_mem memory)
1554 { return ::clReleaseMemObject(memory); }
1558 struct ReferenceHandler<cl_sampler>
1560 static cl_int retain(cl_sampler sampler)
1561 { return ::clRetainSampler(sampler); }
1562 static cl_int release(cl_sampler sampler)
1563 { return ::clReleaseSampler(sampler); }
1567 struct ReferenceHandler<cl_program>
1569 static cl_int retain(cl_program program)
1570 { return ::clRetainProgram(program); }
1571 static cl_int release(cl_program program)
1572 { return ::clReleaseProgram(program); }
1576 struct ReferenceHandler<cl_kernel>
1578 static cl_int retain(cl_kernel kernel)
1579 { return ::clRetainKernel(kernel); }
1580 static cl_int release(cl_kernel kernel)
1581 { return ::clReleaseKernel(kernel); }
1585 struct ReferenceHandler<cl_event>
1587 static cl_int retain(cl_event event)
1588 { return ::clRetainEvent(event); }
1589 static cl_int release(cl_event event)
1590 { return ::clReleaseEvent(event); }
1594 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1595 // Extracts version number with major in the upper 16 bits, minor in the lower 16
1596 static cl_uint getVersion(const vector<char> &versionInfo)
1598 int highVersion = 0;
1601 while(versionInfo[index] != '.' ) {
1603 highVersion += versionInfo[index]-'0';
1607 while(versionInfo[index] != ' ' && versionInfo[index] != '\0') {
1609 lowVersion += versionInfo[index]-'0';
1612 return (highVersion << 16) | lowVersion;
1615 static cl_uint getPlatformVersion(cl_platform_id platform)
1618 clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size);
1620 vector<char> versionInfo(size);
1621 clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size);
1622 return getVersion(versionInfo);
1625 static cl_uint getDevicePlatformVersion(cl_device_id device)
1627 cl_platform_id platform;
1628 clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL);
1629 return getPlatformVersion(platform);
1632 static cl_uint getContextPlatformVersion(cl_context context)
1634 // The platform cannot be queried directly, so we first have to grab a
1635 // device and obtain its context
1637 clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size);
1640 vector<cl_device_id> devices(size/sizeof(cl_device_id));
1641 clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), NULL);
1642 return getDevicePlatformVersion(devices[0]);
1644 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
1646 template <typename T>
1656 Wrapper() : object_(NULL) { }
1658 Wrapper(const cl_type &obj, bool retainObject) : object_(obj)
1661 detail::errHandler(retain(), __RETAIN_ERR);
1667 if (object_ != NULL) { release(); }
1670 Wrapper(const Wrapper<cl_type>& rhs)
1672 object_ = rhs.object_;
1673 detail::errHandler(retain(), __RETAIN_ERR);
1676 Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1678 object_ = rhs.object_;
1682 Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1685 detail::errHandler(release(), __RELEASE_ERR);
1686 object_ = rhs.object_;
1687 detail::errHandler(retain(), __RETAIN_ERR);
1692 Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1695 detail::errHandler(release(), __RELEASE_ERR);
1696 object_ = rhs.object_;
1702 Wrapper<cl_type>& operator = (const cl_type &rhs)
1704 detail::errHandler(release(), __RELEASE_ERR);
1709 const cl_type& operator ()() const { return object_; }
1711 cl_type& operator ()() { return object_; }
1713 const cl_type get() const { return object_; }
1715 cl_type get() { return object_; }
1719 template<typename Func, typename U>
1720 friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1722 cl_int retain() const
1724 if (object_ != nullptr) {
1725 return ReferenceHandler<cl_type>::retain(object_);
1732 cl_int release() const
1734 if (object_ != nullptr) {
1735 return ReferenceHandler<cl_type>::release(object_);
1744 class Wrapper<cl_device_id>
1747 typedef cl_device_id cl_type;
1751 bool referenceCountable_;
1753 static bool isReferenceCountable(cl_device_id device)
1755 bool retVal = false;
1756 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
1757 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
1758 if (device != NULL) {
1759 int version = getDevicePlatformVersion(device);
1760 if(version > ((1 << 16) + 1)) {
1764 #else // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1766 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
1767 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
1772 Wrapper() : object_(NULL), referenceCountable_(false)
1776 Wrapper(const cl_type &obj, bool retainObject) :
1778 referenceCountable_(false)
1780 referenceCountable_ = isReferenceCountable(obj);
1783 detail::errHandler(retain(), __RETAIN_ERR);
1792 Wrapper(const Wrapper<cl_type>& rhs)
1794 object_ = rhs.object_;
1795 referenceCountable_ = isReferenceCountable(object_);
1796 detail::errHandler(retain(), __RETAIN_ERR);
1799 Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
1801 object_ = rhs.object_;
1802 referenceCountable_ = rhs.referenceCountable_;
1804 rhs.referenceCountable_ = false;
1807 Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
1810 detail::errHandler(release(), __RELEASE_ERR);
1811 object_ = rhs.object_;
1812 referenceCountable_ = rhs.referenceCountable_;
1813 detail::errHandler(retain(), __RETAIN_ERR);
1818 Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
1821 detail::errHandler(release(), __RELEASE_ERR);
1822 object_ = rhs.object_;
1823 referenceCountable_ = rhs.referenceCountable_;
1825 rhs.referenceCountable_ = false;
1830 Wrapper<cl_type>& operator = (const cl_type &rhs)
1832 detail::errHandler(release(), __RELEASE_ERR);
1834 referenceCountable_ = isReferenceCountable(object_);
1838 const cl_type& operator ()() const { return object_; }
1840 cl_type& operator ()() { return object_; }
1842 cl_type get() const { return object_; }
1845 template<typename Func, typename U>
1846 friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
1848 template<typename Func, typename U>
1849 friend inline cl_int getInfoHelper(Func, cl_uint, vector<U>*, int, typename U::cl_type);
1851 cl_int retain() const
1853 if( object_ != nullptr && referenceCountable_ ) {
1854 return ReferenceHandler<cl_type>::retain(object_);
1861 cl_int release() const
1863 if (object_ != nullptr && referenceCountable_) {
1864 return ReferenceHandler<cl_type>::release(object_);
1872 template <typename T>
1873 inline bool operator==(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1875 return lhs() == rhs();
1878 template <typename T>
1879 inline bool operator!=(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
1881 return !operator==(lhs, rhs);
1884 } // namespace detail
1888 using BuildLogType = vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, CL_PROGRAM_BUILD_LOG>::param_type>>;
1889 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1891 * Exception class for build errors to carry build info
1893 class BuildError : public Error
1896 BuildLogType buildLogs;
1898 BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec)
1902 BuildLogType getBuildLog() const
1908 static inline cl_int buildErrHandler(
1910 const char * errStr,
1911 const BuildLogType &buildLogs)
1913 if (err != CL_SUCCESS) {
1914 throw BuildError(err, errStr, buildLogs);
1918 } // namespace detail
1922 static inline cl_int buildErrHandler(
1924 const char * errStr,
1925 const BuildLogType &buildLogs)
1927 (void)buildLogs; // suppress unused variable warning
1931 } // namespace detail
1932 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
1935 /*! \stuct ImageFormat
1936 * \brief Adds constructors and member functions for cl_image_format.
1938 * \see cl_image_format
1940 struct ImageFormat : public cl_image_format
1942 //! \brief Default constructor - performs no initialization.
1945 //! \brief Initializing constructor.
1946 ImageFormat(cl_channel_order order, cl_channel_type type)
1948 image_channel_order = order;
1949 image_channel_data_type = type;
1952 //! \brief Assignment operator.
1953 ImageFormat& operator = (const ImageFormat& rhs)
1956 this->image_channel_data_type = rhs.image_channel_data_type;
1957 this->image_channel_order = rhs.image_channel_order;
1963 /*! \brief Class interface for cl_device_id.
1965 * \note Copies of these objects are inexpensive, since they don't 'own'
1966 * any underlying resources or data structures.
1970 class Device : public detail::Wrapper<cl_device_id>
1973 static std::once_flag default_initialized_;
1974 static Device default_;
1975 static cl_int default_error_;
1977 /*! \brief Create the default context.
1979 * This sets @c default_ and @c default_error_. It does not throw
1982 static void makeDefault();
1984 /*! \brief Create the default platform from a provided platform.
1986 * This sets @c default_. It does not throw
1989 static void makeDefaultProvided(const Device &p) {
1994 #ifdef CL_HPP_UNIT_TEST_ENABLE
1995 /*! \brief Reset the default.
1997 * This sets @c default_ to an empty value to support cleanup in
1998 * the unit test framework.
1999 * This function is not thread safe.
2001 static void unitTestClearDefault() {
2002 default_ = Device();
2004 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2006 //! \brief Default constructor - initializes to NULL.
2007 Device() : detail::Wrapper<cl_type>() { }
2009 /*! \brief Constructor from cl_device_id.
2011 * This simply copies the device ID value, which is an inexpensive operation.
2013 explicit Device(const cl_device_id &device, bool retainObject = false) :
2014 detail::Wrapper<cl_type>(device, retainObject) { }
2016 /*! \brief Returns the first device on the default context.
2018 * \see Context::getDefault()
2020 static Device getDefault(
2021 cl_int *errResult = NULL)
2023 std::call_once(default_initialized_, makeDefault);
2024 detail::errHandler(default_error_);
2025 if (errResult != NULL) {
2026 *errResult = default_error_;
2032 * Modify the default device to be used by
2033 * subsequent operations.
2034 * Will only set the default if no default was previously created.
2035 * @return updated default device.
2036 * Should be compared to the passed value to ensure that it was updated.
2038 static Device setDefault(const Device &default_device)
2040 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device));
2041 detail::errHandler(default_error_);
2045 /*! \brief Assignment operator from cl_device_id.
2047 * This simply copies the device ID value, which is an inexpensive operation.
2049 Device& operator = (const cl_device_id& rhs)
2051 detail::Wrapper<cl_type>::operator=(rhs);
2055 /*! \brief Copy constructor to forward copy to the superclass correctly.
2056 * Required for MSVC.
2058 Device(const Device& dev) : detail::Wrapper<cl_type>(dev) {}
2060 /*! \brief Copy assignment to forward copy to the superclass correctly.
2061 * Required for MSVC.
2063 Device& operator = (const Device &dev)
2065 detail::Wrapper<cl_type>::operator=(dev);
2069 /*! \brief Move constructor to forward move to the superclass correctly.
2070 * Required for MSVC.
2072 Device(Device&& dev) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(dev)) {}
2074 /*! \brief Move assignment to forward move to the superclass correctly.
2075 * Required for MSVC.
2077 Device& operator = (Device &&dev)
2079 detail::Wrapper<cl_type>::operator=(std::move(dev));
2083 //! \brief Wrapper for clGetDeviceInfo().
2084 template <typename T>
2085 cl_int getInfo(cl_device_info name, T* param) const
2087 return detail::errHandler(
2088 detail::getInfo(&::clGetDeviceInfo, object_, name, param),
2089 __GET_DEVICE_INFO_ERR);
2092 //! \brief Wrapper for clGetDeviceInfo() that returns by value.
2093 template <cl_int name> typename
2094 detail::param_traits<detail::cl_device_info, name>::param_type
2095 getInfo(cl_int* err = NULL) const
2097 typename detail::param_traits<
2098 detail::cl_device_info, name>::param_type param;
2099 cl_int result = getInfo(name, ¶m);
2109 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2110 //! \brief Wrapper for clCreateSubDevices().
2111 cl_int createSubDevices(
2112 const cl_device_partition_property * properties,
2113 vector<Device>* devices)
2116 cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n);
2117 if (err != CL_SUCCESS) {
2118 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2121 vector<cl_device_id> ids(n);
2122 err = clCreateSubDevices(object_, properties, n, ids.data(), NULL);
2123 if (err != CL_SUCCESS) {
2124 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2127 // Cannot trivially assign because we need to capture intermediates
2128 // with safe construction
2130 devices->resize(ids.size());
2132 // Assign to param, constructing with retain behaviour
2133 // to correctly capture each underlying CL object
2134 for (size_type i = 0; i < ids.size(); i++) {
2135 // We do not need to retain because this device is being created
2137 (*devices)[i] = Device(ids[i], false);
2143 #elif defined(CL_HPP_USE_CL_DEVICE_FISSION)
2146 * CL 1.1 version that uses device fission extension.
2148 cl_int createSubDevices(
2149 const cl_device_partition_property_ext * properties,
2150 vector<Device>* devices)
2152 typedef CL_API_ENTRY cl_int
2153 ( CL_API_CALL * PFN_clCreateSubDevicesEXT)(
2154 cl_device_id /*in_device*/,
2155 const cl_device_partition_property_ext * /* properties */,
2156 cl_uint /*num_entries*/,
2157 cl_device_id * /*out_devices*/,
2158 cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
2160 static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL;
2161 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT);
2164 cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n);
2165 if (err != CL_SUCCESS) {
2166 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2169 vector<cl_device_id> ids(n);
2170 err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), NULL);
2171 if (err != CL_SUCCESS) {
2172 return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
2174 // Cannot trivially assign because we need to capture intermediates
2175 // with safe construction
2177 devices->resize(ids.size());
2179 // Assign to param, constructing with retain behaviour
2180 // to correctly capture each underlying CL object
2181 for (size_type i = 0; i < ids.size(); i++) {
2182 // We do not need to retain because this device is being created
2184 (*devices)[i] = Device(ids[i], false);
2189 #endif // defined(CL_HPP_USE_CL_DEVICE_FISSION)
2192 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_;
2193 CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_;
2194 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS;
2196 /*! \brief Class interface for cl_platform_id.
2198 * \note Copies of these objects are inexpensive, since they don't 'own'
2199 * any underlying resources or data structures.
2201 * \see cl_platform_id
2203 class Platform : public detail::Wrapper<cl_platform_id>
2206 static std::once_flag default_initialized_;
2207 static Platform default_;
2208 static cl_int default_error_;
2210 /*! \brief Create the default context.
2212 * This sets @c default_ and @c default_error_. It does not throw
2215 static void makeDefault() {
2216 /* Throwing an exception from a call_once invocation does not do
2217 * what we wish, so we catch it and save the error.
2219 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2223 // If default wasn't passed ,generate one
2227 cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2228 if (err != CL_SUCCESS) {
2229 default_error_ = err;
2233 default_error_ = CL_INVALID_PLATFORM;
2237 vector<cl_platform_id> ids(n);
2238 err = ::clGetPlatformIDs(n, ids.data(), NULL);
2239 if (err != CL_SUCCESS) {
2240 default_error_ = err;
2244 default_ = Platform(ids[0]);
2246 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2247 catch (cl::Error &e) {
2248 default_error_ = e.err();
2253 /*! \brief Create the default platform from a provided platform.
2255 * This sets @c default_. It does not throw
2258 static void makeDefaultProvided(const Platform &p) {
2263 #ifdef CL_HPP_UNIT_TEST_ENABLE
2264 /*! \brief Reset the default.
2266 * This sets @c default_ to an empty value to support cleanup in
2267 * the unit test framework.
2268 * This function is not thread safe.
2270 static void unitTestClearDefault() {
2271 default_ = Platform();
2273 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2275 //! \brief Default constructor - initializes to NULL.
2276 Platform() : detail::Wrapper<cl_type>() { }
2278 /*! \brief Constructor from cl_platform_id.
2280 * \param retainObject will cause the constructor to retain its cl object.
2281 * Defaults to false to maintain compatibility with
2283 * This simply copies the platform ID value, which is an inexpensive operation.
2285 explicit Platform(const cl_platform_id &platform, bool retainObject = false) :
2286 detail::Wrapper<cl_type>(platform, retainObject) { }
2288 /*! \brief Assignment operator from cl_platform_id.
2290 * This simply copies the platform ID value, which is an inexpensive operation.
2292 Platform& operator = (const cl_platform_id& rhs)
2294 detail::Wrapper<cl_type>::operator=(rhs);
2298 static Platform getDefault(
2299 cl_int *errResult = NULL)
2301 std::call_once(default_initialized_, makeDefault);
2302 detail::errHandler(default_error_);
2303 if (errResult != NULL) {
2304 *errResult = default_error_;
2310 * Modify the default platform to be used by
2311 * subsequent operations.
2312 * Will only set the default if no default was previously created.
2313 * @return updated default platform.
2314 * Should be compared to the passed value to ensure that it was updated.
2316 static Platform setDefault(const Platform &default_platform)
2318 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform));
2319 detail::errHandler(default_error_);
2323 //! \brief Wrapper for clGetPlatformInfo().
2324 cl_int getInfo(cl_platform_info name, string* param) const
2326 return detail::errHandler(
2327 detail::getInfo(&::clGetPlatformInfo, object_, name, param),
2328 __GET_PLATFORM_INFO_ERR);
2331 //! \brief Wrapper for clGetPlatformInfo() that returns by value.
2332 template <cl_int name> typename
2333 detail::param_traits<detail::cl_platform_info, name>::param_type
2334 getInfo(cl_int* err = NULL) const
2336 typename detail::param_traits<
2337 detail::cl_platform_info, name>::param_type param;
2338 cl_int result = getInfo(name, ¶m);
2345 /*! \brief Gets a list of devices for this platform.
2347 * Wraps clGetDeviceIDs().
2350 cl_device_type type,
2351 vector<Device>* devices) const
2354 if( devices == NULL ) {
2355 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2357 cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n);
2358 if (err != CL_SUCCESS) {
2359 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2362 vector<cl_device_id> ids(n);
2363 err = ::clGetDeviceIDs(object_, type, n, ids.data(), NULL);
2364 if (err != CL_SUCCESS) {
2365 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2368 // Cannot trivially assign because we need to capture intermediates
2369 // with safe construction
2370 // We must retain things we obtain from the API to avoid releasing
2371 // API-owned objects.
2373 devices->resize(ids.size());
2375 // Assign to param, constructing with retain behaviour
2376 // to correctly capture each underlying CL object
2377 for (size_type i = 0; i < ids.size(); i++) {
2378 (*devices)[i] = Device(ids[i], true);
2384 #if defined(CL_HPP_USE_DX_INTEROP)
2385 /*! \brief Get the list of available D3D10 devices.
2387 * \param d3d_device_source.
2389 * \param d3d_object.
2391 * \param d3d_device_set.
2393 * \param devices returns a vector of OpenCL D3D10 devices found. The cl::Device
2394 * values returned in devices can be used to identify a specific OpenCL
2395 * device. If \a devices argument is NULL, this argument is ignored.
2397 * \return One of the following values:
2398 * - CL_SUCCESS if the function is executed successfully.
2400 * The application can query specific capabilities of the OpenCL device(s)
2401 * returned by cl::getDevices. This can be used by the application to
2402 * determine which device(s) to use.
2404 * \note In the case that exceptions are enabled and a return value
2405 * other than CL_SUCCESS is generated, then cl::Error exception is
2409 cl_d3d10_device_source_khr d3d_device_source,
2411 cl_d3d10_device_set_khr d3d_device_set,
2412 vector<Device>* devices) const
2414 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
2415 cl_platform_id platform,
2416 cl_d3d10_device_source_khr d3d_device_source,
2418 cl_d3d10_device_set_khr d3d_device_set,
2419 cl_uint num_entries,
2420 cl_device_id * devices,
2421 cl_uint* num_devices);
2423 if( devices == NULL ) {
2424 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
2427 static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL;
2428 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR);
2431 cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
2439 if (err != CL_SUCCESS) {
2440 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2443 vector<cl_device_id> ids(n);
2444 err = pfn_clGetDeviceIDsFromD3D10KHR(
2452 if (err != CL_SUCCESS) {
2453 return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
2456 // Cannot trivially assign because we need to capture intermediates
2457 // with safe construction
2458 // We must retain things we obtain from the API to avoid releasing
2459 // API-owned objects.
2461 devices->resize(ids.size());
2463 // Assign to param, constructing with retain behaviour
2464 // to correctly capture each underlying CL object
2465 for (size_type i = 0; i < ids.size(); i++) {
2466 (*devices)[i] = Device(ids[i], true);
2473 /*! \brief Gets a list of available platforms.
2475 * Wraps clGetPlatformIDs().
2478 vector<Platform>* platforms)
2482 if( platforms == NULL ) {
2483 return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
2486 cl_int err = ::clGetPlatformIDs(0, NULL, &n);
2487 if (err != CL_SUCCESS) {
2488 return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2491 vector<cl_platform_id> ids(n);
2492 err = ::clGetPlatformIDs(n, ids.data(), NULL);
2493 if (err != CL_SUCCESS) {
2494 return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
2498 platforms->resize(ids.size());
2500 // Platforms don't reference count
2501 for (size_type i = 0; i < ids.size(); i++) {
2502 (*platforms)[i] = Platform(ids[i]);
2508 /*! \brief Gets the first available platform.
2510 * Wraps clGetPlatformIDs(), returning the first result.
2513 Platform * platform)
2516 Platform default_platform = Platform::getDefault(&err);
2518 *platform = default_platform;
2523 /*! \brief Gets the first available platform, returning it by value.
2525 * \return Returns a valid platform if one is available.
2526 * If no platform is available will return a null platform.
2527 * Throws an exception if no platforms are available
2528 * or an error condition occurs.
2529 * Wraps clGetPlatformIDs(), returning the first result.
2531 static Platform get(
2532 cl_int * errResult = NULL)
2535 Platform default_platform = Platform::getDefault(&err);
2539 return default_platform;
2542 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
2543 //! \brief Wrapper for clUnloadCompiler().
2547 return ::clUnloadPlatformCompiler(object_);
2549 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
2550 }; // class Platform
2552 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_;
2553 CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_;
2554 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS;
2558 * Deprecated APIs for 1.2
2560 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2562 * Unload the OpenCL compiler.
2563 * \note Deprecated for OpenCL 1.2. Use Platform::unloadCompiler instead.
2565 inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int
2566 UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
2570 return ::clUnloadCompiler();
2572 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
2574 /*! \brief Class interface for cl_context.
2576 * \note Copies of these objects are shallow, meaning that the copy will refer
2577 * to the same underlying cl_context as the original. For details, see
2578 * clRetainContext() and clReleaseContext().
2583 : public detail::Wrapper<cl_context>
2586 static std::once_flag default_initialized_;
2587 static Context default_;
2588 static cl_int default_error_;
2590 /*! \brief Create the default context from the default device type in the default platform.
2592 * This sets @c default_ and @c default_error_. It does not throw
2595 static void makeDefault() {
2596 /* Throwing an exception from a call_once invocation does not do
2597 * what we wish, so we catch it and save the error.
2599 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2603 #if !defined(__APPLE__) && !defined(__MACOS)
2604 const Platform &p = Platform::getDefault();
2605 cl_platform_id defaultPlatform = p();
2606 cl_context_properties properties[3] = {
2607 CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0
2609 #else // #if !defined(__APPLE__) && !defined(__MACOS)
2610 cl_context_properties *properties = nullptr;
2611 #endif // #if !defined(__APPLE__) && !defined(__MACOS)
2614 CL_DEVICE_TYPE_DEFAULT,
2620 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2621 catch (cl::Error &e) {
2622 default_error_ = e.err();
2628 /*! \brief Create the default context from a provided Context.
2630 * This sets @c default_. It does not throw
2633 static void makeDefaultProvided(const Context &c) {
2638 #ifdef CL_HPP_UNIT_TEST_ENABLE
2639 /*! \brief Reset the default.
2641 * This sets @c default_ to an empty value to support cleanup in
2642 * the unit test framework.
2643 * This function is not thread safe.
2645 static void unitTestClearDefault() {
2646 default_ = Context();
2648 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
2650 /*! \brief Constructs a context including a list of specified devices.
2652 * Wraps clCreateContext().
2655 const vector<Device>& devices,
2656 cl_context_properties* properties = NULL,
2657 void (CL_CALLBACK * notifyFptr)(
2667 size_type numDevices = devices.size();
2668 vector<cl_device_id> deviceIDs(numDevices);
2670 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
2671 deviceIDs[deviceIndex] = (devices[deviceIndex])();
2674 object_ = ::clCreateContext(
2675 properties, (cl_uint) numDevices,
2677 notifyFptr, data, &error);
2679 detail::errHandler(error, __CREATE_CONTEXT_ERR);
2686 const Device& device,
2687 cl_context_properties* properties = NULL,
2688 void (CL_CALLBACK * notifyFptr)(
2698 cl_device_id deviceID = device();
2700 object_ = ::clCreateContext(
2703 notifyFptr, data, &error);
2705 detail::errHandler(error, __CREATE_CONTEXT_ERR);
2711 /*! \brief Constructs a context including all or a subset of devices of a specified type.
2713 * Wraps clCreateContextFromType().
2716 cl_device_type type,
2717 cl_context_properties* properties = NULL,
2718 void (CL_CALLBACK * notifyFptr)(
2728 #if !defined(__APPLE__) && !defined(__MACOS)
2729 cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
2731 if (properties == NULL) {
2732 // Get a valid platform ID as we cannot send in a blank one
2733 vector<Platform> platforms;
2734 error = Platform::get(&platforms);
2735 if (error != CL_SUCCESS) {
2736 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2743 // Check the platforms we found for a device of our specified type
2744 cl_context_properties platform_id = 0;
2745 for (unsigned int i = 0; i < platforms.size(); i++) {
2747 vector<Device> devices;
2749 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2753 error = platforms[i].getDevices(type, &devices);
2755 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2756 } catch (cl::Error& e) {
2759 // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
2760 // We do error checking next anyway, and can throw there if needed
2763 // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
2764 if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
2765 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2771 if (devices.size() > 0) {
2772 platform_id = (cl_context_properties)platforms[i]();
2777 if (platform_id == 0) {
2778 detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
2780 *err = CL_DEVICE_NOT_FOUND;
2785 prop[1] = platform_id;
2786 properties = &prop[0];
2789 object_ = ::clCreateContextFromType(
2790 properties, type, notifyFptr, data, &error);
2792 detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
2798 /*! \brief Copy constructor to forward copy to the superclass correctly.
2799 * Required for MSVC.
2801 Context(const Context& ctx) : detail::Wrapper<cl_type>(ctx) {}
2803 /*! \brief Copy assignment to forward copy to the superclass correctly.
2804 * Required for MSVC.
2806 Context& operator = (const Context &ctx)
2808 detail::Wrapper<cl_type>::operator=(ctx);
2812 /*! \brief Move constructor to forward move to the superclass correctly.
2813 * Required for MSVC.
2815 Context(Context&& ctx) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(ctx)) {}
2817 /*! \brief Move assignment to forward move to the superclass correctly.
2818 * Required for MSVC.
2820 Context& operator = (Context &&ctx)
2822 detail::Wrapper<cl_type>::operator=(std::move(ctx));
2827 /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
2829 * \note All calls to this function return the same cl_context as the first.
2831 static Context getDefault(cl_int * err = NULL)
2833 std::call_once(default_initialized_, makeDefault);
2834 detail::errHandler(default_error_);
2836 *err = default_error_;
2842 * Modify the default context to be used by
2843 * subsequent operations.
2844 * Will only set the default if no default was previously created.
2845 * @return updated default context.
2846 * Should be compared to the passed value to ensure that it was updated.
2848 static Context setDefault(const Context &default_context)
2850 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context));
2851 detail::errHandler(default_error_);
2855 //! \brief Default constructor - initializes to NULL.
2856 Context() : detail::Wrapper<cl_type>() { }
2858 /*! \brief Constructor from cl_context - takes ownership.
2860 * This effectively transfers ownership of a refcount on the cl_context
2861 * into the new Context object.
2863 explicit Context(const cl_context& context, bool retainObject = false) :
2864 detail::Wrapper<cl_type>(context, retainObject) { }
2866 /*! \brief Assignment operator from cl_context - takes ownership.
2868 * This effectively transfers ownership of a refcount on the rhs and calls
2869 * clReleaseContext() on the value previously held by this instance.
2871 Context& operator = (const cl_context& rhs)
2873 detail::Wrapper<cl_type>::operator=(rhs);
2877 //! \brief Wrapper for clGetContextInfo().
2878 template <typename T>
2879 cl_int getInfo(cl_context_info name, T* param) const
2881 return detail::errHandler(
2882 detail::getInfo(&::clGetContextInfo, object_, name, param),
2883 __GET_CONTEXT_INFO_ERR);
2886 //! \brief Wrapper for clGetContextInfo() that returns by value.
2887 template <cl_int name> typename
2888 detail::param_traits<detail::cl_context_info, name>::param_type
2889 getInfo(cl_int* err = NULL) const
2891 typename detail::param_traits<
2892 detail::cl_context_info, name>::param_type param;
2893 cl_int result = getInfo(name, ¶m);
2900 /*! \brief Gets a list of supported image formats.
2902 * Wraps clGetSupportedImageFormats().
2904 cl_int getSupportedImageFormats(
2906 cl_mem_object_type type,
2907 vector<ImageFormat>* formats) const
2915 cl_int err = ::clGetSupportedImageFormats(
2922 if (err != CL_SUCCESS) {
2923 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
2926 if (numEntries > 0) {
2927 vector<ImageFormat> value(numEntries);
2928 err = ::clGetSupportedImageFormats(
2933 (cl_image_format*)value.data(),
2935 if (err != CL_SUCCESS) {
2936 return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
2939 formats->assign(begin(value), end(value));
2942 // If no values are being returned, ensure an empty vector comes back
2950 inline void Device::makeDefault()
2952 /* Throwing an exception from a call_once invocation does not do
2953 * what we wish, so we catch it and save the error.
2955 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2961 Context context = Context::getDefault(&error);
2962 detail::errHandler(error, __CREATE_CONTEXT_ERR);
2964 if (error != CL_SUCCESS) {
2965 default_error_ = error;
2968 default_ = context.getInfo<CL_CONTEXT_DEVICES>()[0];
2969 default_error_ = CL_SUCCESS;
2972 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
2973 catch (cl::Error &e) {
2974 default_error_ = e.err();
2979 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_;
2980 CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_;
2981 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS;
2983 /*! \brief Class interface for cl_event.
2985 * \note Copies of these objects are shallow, meaning that the copy will refer
2986 * to the same underlying cl_event as the original. For details, see
2987 * clRetainEvent() and clReleaseEvent().
2991 class Event : public detail::Wrapper<cl_event>
2994 //! \brief Default constructor - initializes to NULL.
2995 Event() : detail::Wrapper<cl_type>() { }
2997 /*! \brief Constructor from cl_event - takes ownership.
2999 * \param retainObject will cause the constructor to retain its cl object.
3000 * Defaults to false to maintain compatibility with
3002 * This effectively transfers ownership of a refcount on the cl_event
3003 * into the new Event object.
3005 explicit Event(const cl_event& event, bool retainObject = false) :
3006 detail::Wrapper<cl_type>(event, retainObject) { }
3008 /*! \brief Assignment operator from cl_event - takes ownership.
3010 * This effectively transfers ownership of a refcount on the rhs and calls
3011 * clReleaseEvent() on the value previously held by this instance.
3013 Event& operator = (const cl_event& rhs)
3015 detail::Wrapper<cl_type>::operator=(rhs);
3019 //! \brief Wrapper for clGetEventInfo().
3020 template <typename T>
3021 cl_int getInfo(cl_event_info name, T* param) const
3023 return detail::errHandler(
3024 detail::getInfo(&::clGetEventInfo, object_, name, param),
3025 __GET_EVENT_INFO_ERR);
3028 //! \brief Wrapper for clGetEventInfo() that returns by value.
3029 template <cl_int name> typename
3030 detail::param_traits<detail::cl_event_info, name>::param_type
3031 getInfo(cl_int* err = NULL) const
3033 typename detail::param_traits<
3034 detail::cl_event_info, name>::param_type param;
3035 cl_int result = getInfo(name, ¶m);
3042 //! \brief Wrapper for clGetEventProfilingInfo().
3043 template <typename T>
3044 cl_int getProfilingInfo(cl_profiling_info name, T* param) const
3046 return detail::errHandler(detail::getInfo(
3047 &::clGetEventProfilingInfo, object_, name, param),
3048 __GET_EVENT_PROFILE_INFO_ERR);
3051 //! \brief Wrapper for clGetEventProfilingInfo() that returns by value.
3052 template <cl_int name> typename
3053 detail::param_traits<detail::cl_profiling_info, name>::param_type
3054 getProfilingInfo(cl_int* err = NULL) const
3056 typename detail::param_traits<
3057 detail::cl_profiling_info, name>::param_type param;
3058 cl_int result = getProfilingInfo(name, ¶m);
3065 /*! \brief Blocks the calling thread until this event completes.
3067 * Wraps clWaitForEvents().
3071 return detail::errHandler(
3072 ::clWaitForEvents(1, &object_),
3073 __WAIT_FOR_EVENTS_ERR);
3076 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3077 /*! \brief Registers a user callback function for a specific command execution status.
3079 * Wraps clSetEventCallback().
3083 void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
3084 void * user_data = NULL)
3086 return detail::errHandler(
3087 ::clSetEventCallback(
3092 __SET_EVENT_CALLBACK_ERR);
3094 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3096 /*! \brief Blocks the calling thread until every event specified is complete.
3098 * Wraps clWaitForEvents().
3101 waitForEvents(const vector<Event>& events)
3103 return detail::errHandler(
3105 (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3106 __WAIT_FOR_EVENTS_ERR);
3110 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3111 /*! \brief Class interface for user events (a subset of cl_event's).
3113 * See Event for details about copy semantics, etc.
3115 class UserEvent : public Event
3118 /*! \brief Constructs a user event on a given context.
3120 * Wraps clCreateUserEvent().
3123 const Context& context,
3124 cl_int * err = NULL)
3127 object_ = ::clCreateUserEvent(
3131 detail::errHandler(error, __CREATE_USER_EVENT_ERR);
3137 //! \brief Default constructor - initializes to NULL.
3138 UserEvent() : Event() { }
3140 /*! \brief Sets the execution status of a user event object.
3142 * Wraps clSetUserEventStatus().
3144 cl_int setStatus(cl_int status)
3146 return detail::errHandler(
3147 ::clSetUserEventStatus(object_,status),
3148 __SET_USER_EVENT_STATUS_ERR);
3151 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3153 /*! \brief Blocks the calling thread until every event specified is complete.
3155 * Wraps clWaitForEvents().
3157 inline static cl_int
3158 WaitForEvents(const vector<Event>& events)
3160 return detail::errHandler(
3162 (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
3163 __WAIT_FOR_EVENTS_ERR);
3166 /*! \brief Class interface for cl_mem.
3168 * \note Copies of these objects are shallow, meaning that the copy will refer
3169 * to the same underlying cl_mem as the original. For details, see
3170 * clRetainMemObject() and clReleaseMemObject().
3174 class Memory : public detail::Wrapper<cl_mem>
3177 //! \brief Default constructor - initializes to NULL.
3178 Memory() : detail::Wrapper<cl_type>() { }
3180 /*! \brief Constructor from cl_mem - takes ownership.
3182 * Optionally transfer ownership of a refcount on the cl_mem
3183 * into the new Memory object.
3185 * \param retainObject will cause the constructor to retain its cl object.
3186 * Defaults to false to maintain compatibility with
3189 * See Memory for further details.
3191 explicit Memory(const cl_mem& memory, bool retainObject) :
3192 detail::Wrapper<cl_type>(memory, retainObject) { }
3194 /*! \brief Assignment operator from cl_mem - takes ownership.
3196 * This effectively transfers ownership of a refcount on the rhs and calls
3197 * clReleaseMemObject() on the value previously held by this instance.
3199 Memory& operator = (const cl_mem& rhs)
3201 detail::Wrapper<cl_type>::operator=(rhs);
3205 /*! \brief Copy constructor to forward copy to the superclass correctly.
3206 * Required for MSVC.
3208 Memory(const Memory& mem) : detail::Wrapper<cl_type>(mem) {}
3210 /*! \brief Copy assignment to forward copy to the superclass correctly.
3211 * Required for MSVC.
3213 Memory& operator = (const Memory &mem)
3215 detail::Wrapper<cl_type>::operator=(mem);
3219 /*! \brief Move constructor to forward move to the superclass correctly.
3220 * Required for MSVC.
3222 Memory(Memory&& mem) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(mem)) {}
3224 /*! \brief Move assignment to forward move to the superclass correctly.
3225 * Required for MSVC.
3227 Memory& operator = (Memory &&mem)
3229 detail::Wrapper<cl_type>::operator=(std::move(mem));
3234 //! \brief Wrapper for clGetMemObjectInfo().
3235 template <typename T>
3236 cl_int getInfo(cl_mem_info name, T* param) const
3238 return detail::errHandler(
3239 detail::getInfo(&::clGetMemObjectInfo, object_, name, param),
3240 __GET_MEM_OBJECT_INFO_ERR);
3243 //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
3244 template <cl_int name> typename
3245 detail::param_traits<detail::cl_mem_info, name>::param_type
3246 getInfo(cl_int* err = NULL) const
3248 typename detail::param_traits<
3249 detail::cl_mem_info, name>::param_type param;
3250 cl_int result = getInfo(name, ¶m);
3257 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3258 /*! \brief Registers a callback function to be called when the memory object
3259 * is no longer needed.
3261 * Wraps clSetMemObjectDestructorCallback().
3263 * Repeated calls to this function, for a given cl_mem value, will append
3264 * to the list of functions called (in reverse order) when memory object's
3265 * resources are freed and the memory object is deleted.
3268 * The registered callbacks are associated with the underlying cl_mem
3269 * value - not the Memory class instance.
3271 cl_int setDestructorCallback(
3272 void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
3273 void * user_data = NULL)
3275 return detail::errHandler(
3276 ::clSetMemObjectDestructorCallback(
3280 __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
3282 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3286 // Pre-declare copy functions
3288 template< typename IteratorType >
3289 cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3290 template< typename IteratorType >
3291 cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3292 template< typename IteratorType >
3293 cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
3294 template< typename IteratorType >
3295 cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
3298 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3304 static cl_svm_mem_flags getSVMMemFlags()
3309 } // namespace detail
3311 template<class Trait = detail::SVMTraitNull>
3312 class SVMTraitReadWrite
3315 static cl_svm_mem_flags getSVMMemFlags()
3317 return CL_MEM_READ_WRITE |
3318 Trait::getSVMMemFlags();
3322 template<class Trait = detail::SVMTraitNull>
3323 class SVMTraitReadOnly
3326 static cl_svm_mem_flags getSVMMemFlags()
3328 return CL_MEM_READ_ONLY |
3329 Trait::getSVMMemFlags();
3333 template<class Trait = detail::SVMTraitNull>
3334 class SVMTraitWriteOnly
3337 static cl_svm_mem_flags getSVMMemFlags()
3339 return CL_MEM_WRITE_ONLY |
3340 Trait::getSVMMemFlags();
3344 template<class Trait = SVMTraitReadWrite<>>
3345 class SVMTraitCoarse
3348 static cl_svm_mem_flags getSVMMemFlags()
3350 return Trait::getSVMMemFlags();
3354 template<class Trait = SVMTraitReadWrite<>>
3358 static cl_svm_mem_flags getSVMMemFlags()
3360 return CL_MEM_SVM_FINE_GRAIN_BUFFER |
3361 Trait::getSVMMemFlags();
3365 template<class Trait = SVMTraitReadWrite<>>
3366 class SVMTraitAtomic
3369 static cl_svm_mem_flags getSVMMemFlags()
3372 CL_MEM_SVM_FINE_GRAIN_BUFFER |
3373 CL_MEM_SVM_ATOMICS |
3374 Trait::getSVMMemFlags();
3378 // Pre-declare SVM map function
3379 template<typename T>
3380 inline cl_int enqueueMapSVM(
3385 const vector<Event>* events = NULL,
3386 Event* event = NULL);
3389 * STL-like allocator class for managing SVM objects provided for convenience.
3391 * Note that while this behaves like an allocator for the purposes of constructing vectors and similar objects,
3392 * care must be taken when using with smart pointers.
3393 * The allocator should not be used to construct a unique_ptr if we are using coarse-grained SVM mode because
3394 * the coarse-grained management behaviour would behave incorrectly with respect to reference counting.
3396 * Instead the allocator embeds a Deleter which may be used with unique_ptr and is used
3397 * with the allocate_shared and allocate_ptr supplied operations.
3399 template<typename T, class SVMTrait>
3400 class SVMAllocator {
3405 typedef T value_type;
3406 typedef value_type* pointer;
3407 typedef const value_type* const_pointer;
3408 typedef value_type& reference;
3409 typedef const value_type& const_reference;
3410 typedef std::size_t size_type;
3411 typedef std::ptrdiff_t difference_type;
3413 template<typename U>
3416 typedef SVMAllocator<U, SVMTrait> other;
3419 template<typename U, typename V>
3420 friend class SVMAllocator;
3423 context_(Context::getDefault())
3427 explicit SVMAllocator(cl::Context context) :
3433 SVMAllocator(const SVMAllocator &other) :
3434 context_(other.context_)
3438 template<typename U>
3439 SVMAllocator(const SVMAllocator<U, SVMTrait> &other) :
3440 context_(other.context_)
3448 pointer address(reference r) CL_HPP_NOEXCEPT_
3450 return std::addressof(r);
3453 const_pointer address(const_reference r) CL_HPP_NOEXCEPT_
3455 return std::addressof(r);
3459 * Allocate an SVM pointer.
3461 * If the allocator is coarse-grained, this will take ownership to allow
3462 * containers to correctly construct data in place.
3466 typename cl::SVMAllocator<void, SVMTrait>::const_pointer = 0)
3468 // Allocate memory with default alignment matching the size of the type
3472 SVMTrait::getSVMMemFlags(),
3475 pointer retValue = reinterpret_cast<pointer>(
3477 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3479 std::bad_alloc excep;
3482 #endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
3484 // If allocation was coarse-grained then map it
3485 if (!(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) {
3486 cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T));
3487 if (err != CL_SUCCESS) {
3488 std::bad_alloc excep;
3493 // If exceptions disabled, return null pointer from allocator
3497 void deallocate(pointer p, size_type)
3499 clSVMFree(context_(), p);
3503 * Return the maximum possible allocation size.
3504 * This is the minimum of the maximum sizes of all devices in the context.
3506 size_type max_size() const CL_HPP_NOEXCEPT_
3508 size_type maxSize = std::numeric_limits<size_type>::max() / sizeof(T);
3510 for (const Device &d : context_.getInfo<CL_CONTEXT_DEVICES>()) {
3513 static_cast<size_type>(d.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>()));
3519 template< class U, class... Args >
3520 void construct(U* p, Args&&... args)
3532 * Returns true if the contexts match.
3534 inline bool operator==(SVMAllocator const& rhs)
3536 return (context_==rhs.context_);
3539 inline bool operator!=(SVMAllocator const& a)
3541 return !operator==(a);
3543 }; // class SVMAllocator return cl::pointer<T>(tmp, detail::Deleter<T, Alloc>{alloc, copies});
3546 template<class SVMTrait>
3547 class SVMAllocator<void, SVMTrait> {
3549 typedef void value_type;
3550 typedef value_type* pointer;
3551 typedef const value_type* const_pointer;
3553 template<typename U>
3556 typedef SVMAllocator<U, SVMTrait> other;
3559 template<typename U, typename V>
3560 friend class SVMAllocator;
3563 #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3566 template<class Alloc>
3573 typedef typename std::allocator_traits<Alloc>::pointer pointer;
3575 Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies }
3579 void operator()(pointer ptr) const {
3580 Alloc tmpAlloc{ alloc_ };
3581 std::allocator_traits<Alloc>::destroy(tmpAlloc, std::addressof(*ptr));
3582 std::allocator_traits<Alloc>::deallocate(tmpAlloc, ptr, copies_);
3585 } // namespace detail
3588 * Allocation operation compatible with std::allocate_ptr.
3589 * Creates a unique_ptr<T> by default.
3590 * This requirement is to ensure that the control block is not
3591 * allocated in memory inaccessible to the host.
3593 template <class T, class Alloc, class... Args>
3594 cl::pointer<T, detail::Deleter<Alloc>> allocate_pointer(const Alloc &alloc_, Args&&... args)
3596 Alloc alloc(alloc_);
3597 static const size_type copies = 1;
3599 // Ensure that creation of the management block and the
3600 // object are dealt with separately such that we only provide a deleter
3602 T* tmp = std::allocator_traits<Alloc>::allocate(alloc, copies);
3604 std::bad_alloc excep;
3608 std::allocator_traits<Alloc>::construct(
3610 std::addressof(*tmp),
3611 std::forward<Args>(args)...);
3613 return cl::pointer<T, detail::Deleter<Alloc>>(tmp, detail::Deleter<Alloc>{alloc, copies});
3615 catch (std::bad_alloc b)
3617 std::allocator_traits<Alloc>::deallocate(alloc, tmp, copies);
3622 template< class T, class SVMTrait, class... Args >
3623 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(Args... args)
3625 SVMAllocator<T, SVMTrait> alloc;
3626 return cl::allocate_pointer<T>(alloc, args...);
3629 template< class T, class SVMTrait, class... Args >
3630 cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(const cl::Context &c, Args... args)
3632 SVMAllocator<T, SVMTrait> alloc(c);
3633 return cl::allocate_pointer<T>(alloc, args...);
3635 #endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
3637 /*! \brief Vector alias to simplify contruction of coarse-grained SVM containers.
3640 template < class T >
3641 using coarse_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>>;
3643 /*! \brief Vector alias to simplify contruction of fine-grained SVM containers.
3646 template < class T >
3647 using fine_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitFine<>>>;
3649 /*! \brief Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics.
3652 template < class T >
3653 using atomic_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitAtomic<>>>;
3655 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
3658 /*! \brief Class interface for Buffer Memory Objects.
3660 * See Memory for details about copy semantics, etc.
3664 class Buffer : public Memory
3668 /*! \brief Constructs a Buffer in a specified context.
3670 * Wraps clCreateBuffer().
3672 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
3673 * specified. Note alignment & exclusivity requirements.
3676 const Context& context,
3679 void* host_ptr = NULL,
3683 object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3685 detail::errHandler(error, __CREATE_BUFFER_ERR);
3691 /*! \brief Constructs a Buffer in the default context.
3693 * Wraps clCreateBuffer().
3695 * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
3696 * specified. Note alignment & exclusivity requirements.
3698 * \see Context::getDefault()
3703 void* host_ptr = NULL,
3708 Context context = Context::getDefault(err);
3710 object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
3712 detail::errHandler(error, __CREATE_BUFFER_ERR);
3719 * \brief Construct a Buffer from a host container via iterators.
3720 * IteratorType must be random access.
3721 * If useHostPtr is specified iterators must represent contiguous data.
3723 template< typename IteratorType >
3725 IteratorType startIterator,
3726 IteratorType endIterator,
3728 bool useHostPtr = false,
3731 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
3734 cl_mem_flags flags = 0;
3736 flags |= CL_MEM_READ_ONLY;
3739 flags |= CL_MEM_READ_WRITE;
3742 flags |= CL_MEM_USE_HOST_PTR;
3745 size_type size = sizeof(DataType)*(endIterator - startIterator);
3747 Context context = Context::getDefault(err);
3750 object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
3752 object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
3755 detail::errHandler(error, __CREATE_BUFFER_ERR);
3761 error = cl::copy(startIterator, endIterator, *this);
3762 detail::errHandler(error, __CREATE_BUFFER_ERR);
3770 * \brief Construct a Buffer from a host container via iterators using a specified context.
3771 * IteratorType must be random access.
3772 * If useHostPtr is specified iterators must represent contiguous data.
3774 template< typename IteratorType >
3775 Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
3776 bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3779 * \brief Construct a Buffer from a host container via iterators using a specified queue.
3780 * If useHostPtr is specified iterators must be random access.
3782 template< typename IteratorType >
3783 Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator,
3784 bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
3786 //! \brief Default constructor - initializes to NULL.
3787 Buffer() : Memory() { }
3789 /*! \brief Constructor from cl_mem - takes ownership.
3791 * \param retainObject will cause the constructor to retain its cl object.
3792 * Defaults to false to maintain compatibility with earlier versions.
3794 * See Memory for further details.
3796 explicit Buffer(const cl_mem& buffer, bool retainObject = false) :
3797 Memory(buffer, retainObject) { }
3799 /*! \brief Assignment from cl_mem - performs shallow copy.
3801 * See Memory for further details.
3803 Buffer& operator = (const cl_mem& rhs)
3805 Memory::operator=(rhs);
3809 /*! \brief Copy constructor to forward copy to the superclass correctly.
3810 * Required for MSVC.
3812 Buffer(const Buffer& buf) : Memory(buf) {}
3814 /*! \brief Copy assignment to forward copy to the superclass correctly.
3815 * Required for MSVC.
3817 Buffer& operator = (const Buffer &buf)
3819 Memory::operator=(buf);
3823 /*! \brief Move constructor to forward move to the superclass correctly.
3824 * Required for MSVC.
3826 Buffer(Buffer&& buf) CL_HPP_NOEXCEPT_ : Memory(std::move(buf)) {}
3828 /*! \brief Move assignment to forward move to the superclass correctly.
3829 * Required for MSVC.
3831 Buffer& operator = (Buffer &&buf)
3833 Memory::operator=(std::move(buf));
3837 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
3838 /*! \brief Creates a new buffer object from this.
3840 * Wraps clCreateSubBuffer().
3842 Buffer createSubBuffer(
3844 cl_buffer_create_type buffer_create_type,
3845 const void * buffer_create_info,
3846 cl_int * err = NULL)
3850 result.object_ = ::clCreateSubBuffer(
3857 detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
3864 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
3867 #if defined (CL_HPP_USE_DX_INTEROP)
3868 /*! \brief Class interface for creating OpenCL buffers from ID3D10Buffer's.
3870 * This is provided to facilitate interoperability with Direct3D.
3872 * See Memory for details about copy semantics, etc.
3876 class BufferD3D10 : public Buffer
3881 /*! \brief Constructs a BufferD3D10, in a specified context, from a
3882 * given ID3D10Buffer.
3884 * Wraps clCreateFromD3D10BufferKHR().
3887 const Context& context,
3889 ID3D10Buffer* bufobj,
3890 cl_int * err = NULL) : pfn_clCreateFromD3D10BufferKHR(nullptr)
3892 typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
3893 cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer,
3894 cl_int* errcode_ret);
3895 PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR;
3896 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
3897 vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
3898 cl_platform platform = -1;
3899 for( int i = 0; i < props.size(); ++i ) {
3900 if( props[i] == CL_CONTEXT_PLATFORM ) {
3901 platform = props[i+1];
3904 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR);
3905 #elif CL_HPP_TARGET_OPENCL_VERSION >= 110
3906 CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR);
3910 object_ = pfn_clCreateFromD3D10BufferKHR(
3916 detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
3922 //! \brief Default constructor - initializes to NULL.
3923 BufferD3D10() : Buffer() { }
3925 /*! \brief Constructor from cl_mem - takes ownership.
3927 * \param retainObject will cause the constructor to retain its cl object.
3928 * Defaults to false to maintain compatibility with
3930 * See Memory for further details.
3932 explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) :
3933 Buffer(buffer, retainObject) { }
3935 /*! \brief Assignment from cl_mem - performs shallow copy.
3937 * See Memory for further details.
3939 BufferD3D10& operator = (const cl_mem& rhs)
3941 Buffer::operator=(rhs);
3945 /*! \brief Copy constructor to forward copy to the superclass correctly.
3946 * Required for MSVC.
3948 BufferD3D10(const BufferD3D10& buf) :
3951 /*! \brief Copy assignment to forward copy to the superclass correctly.
3952 * Required for MSVC.
3954 BufferD3D10& operator = (const BufferD3D10 &buf)
3956 Buffer::operator=(buf);
3960 /*! \brief Move constructor to forward move to the superclass correctly.
3961 * Required for MSVC.
3963 BufferD3D10(BufferD3D10&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
3965 /*! \brief Move assignment to forward move to the superclass correctly.
3966 * Required for MSVC.
3968 BufferD3D10& operator = (BufferD3D10 &&buf)
3970 Buffer::operator=(std::move(buf));
3976 /*! \brief Class interface for GL Buffer Memory Objects.
3978 * This is provided to facilitate interoperability with OpenGL.
3980 * See Memory for details about copy semantics, etc.
3984 class BufferGL : public Buffer
3987 /*! \brief Constructs a BufferGL in a specified context, from a given
3990 * Wraps clCreateFromGLBuffer().
3993 const Context& context,
3996 cl_int * err = NULL)
3999 object_ = ::clCreateFromGLBuffer(
4005 detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
4011 //! \brief Default constructor - initializes to NULL.
4012 BufferGL() : Buffer() { }
4014 /*! \brief Constructor from cl_mem - takes ownership.
4016 * \param retainObject will cause the constructor to retain its cl object.
4017 * Defaults to false to maintain compatibility with
4019 * See Memory for further details.
4021 explicit BufferGL(const cl_mem& buffer, bool retainObject = false) :
4022 Buffer(buffer, retainObject) { }
4024 /*! \brief Assignment from cl_mem - performs shallow copy.
4026 * See Memory for further details.
4028 BufferGL& operator = (const cl_mem& rhs)
4030 Buffer::operator=(rhs);
4034 /*! \brief Copy constructor to forward copy to the superclass correctly.
4035 * Required for MSVC.
4037 BufferGL(const BufferGL& buf) : Buffer(buf) {}
4039 /*! \brief Copy assignment to forward copy to the superclass correctly.
4040 * Required for MSVC.
4042 BufferGL& operator = (const BufferGL &buf)
4044 Buffer::operator=(buf);
4048 /*! \brief Move constructor to forward move to the superclass correctly.
4049 * Required for MSVC.
4051 BufferGL(BufferGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4053 /*! \brief Move assignment to forward move to the superclass correctly.
4054 * Required for MSVC.
4056 BufferGL& operator = (BufferGL &&buf)
4058 Buffer::operator=(std::move(buf));
4062 //! \brief Wrapper for clGetGLObjectInfo().
4063 cl_int getObjectInfo(
4064 cl_gl_object_type *type,
4065 cl_GLuint * gl_object_name)
4067 return detail::errHandler(
4068 ::clGetGLObjectInfo(object_,type,gl_object_name),
4069 __GET_GL_OBJECT_INFO_ERR);
4073 /*! \brief Class interface for GL Render Buffer Memory Objects.
4075 * This is provided to facilitate interoperability with OpenGL.
4077 * See Memory for details about copy semantics, etc.
4081 class BufferRenderGL : public Buffer
4084 /*! \brief Constructs a BufferRenderGL in a specified context, from a given
4087 * Wraps clCreateFromGLRenderbuffer().
4090 const Context& context,
4093 cl_int * err = NULL)
4096 object_ = ::clCreateFromGLRenderbuffer(
4102 detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
4108 //! \brief Default constructor - initializes to NULL.
4109 BufferRenderGL() : Buffer() { }
4111 /*! \brief Constructor from cl_mem - takes ownership.
4113 * \param retainObject will cause the constructor to retain its cl object.
4114 * Defaults to false to maintain compatibility with
4116 * See Memory for further details.
4118 explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) :
4119 Buffer(buffer, retainObject) { }
4121 /*! \brief Assignment from cl_mem - performs shallow copy.
4123 * See Memory for further details.
4125 BufferRenderGL& operator = (const cl_mem& rhs)
4127 Buffer::operator=(rhs);
4131 /*! \brief Copy constructor to forward copy to the superclass correctly.
4132 * Required for MSVC.
4134 BufferRenderGL(const BufferRenderGL& buf) : Buffer(buf) {}
4136 /*! \brief Copy assignment to forward copy to the superclass correctly.
4137 * Required for MSVC.
4139 BufferRenderGL& operator = (const BufferRenderGL &buf)
4141 Buffer::operator=(buf);
4145 /*! \brief Move constructor to forward move to the superclass correctly.
4146 * Required for MSVC.
4148 BufferRenderGL(BufferRenderGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
4150 /*! \brief Move assignment to forward move to the superclass correctly.
4151 * Required for MSVC.
4153 BufferRenderGL& operator = (BufferRenderGL &&buf)
4155 Buffer::operator=(std::move(buf));
4159 //! \brief Wrapper for clGetGLObjectInfo().
4160 cl_int getObjectInfo(
4161 cl_gl_object_type *type,
4162 cl_GLuint * gl_object_name)
4164 return detail::errHandler(
4165 ::clGetGLObjectInfo(object_,type,gl_object_name),
4166 __GET_GL_OBJECT_INFO_ERR);
4170 /*! \brief C++ base class for Image Memory objects.
4172 * See Memory for details about copy semantics, etc.
4176 class Image : public Memory
4179 //! \brief Default constructor - initializes to NULL.
4180 Image() : Memory() { }
4182 /*! \brief Constructor from cl_mem - takes ownership.
4184 * \param retainObject will cause the constructor to retain its cl object.
4185 * Defaults to false to maintain compatibility with
4187 * See Memory for further details.
4189 explicit Image(const cl_mem& image, bool retainObject = false) :
4190 Memory(image, retainObject) { }
4192 /*! \brief Assignment from cl_mem - performs shallow copy.
4194 * See Memory for further details.
4196 Image& operator = (const cl_mem& rhs)
4198 Memory::operator=(rhs);
4202 /*! \brief Copy constructor to forward copy to the superclass correctly.
4203 * Required for MSVC.
4205 Image(const Image& img) : Memory(img) {}
4207 /*! \brief Copy assignment to forward copy to the superclass correctly.
4208 * Required for MSVC.
4210 Image& operator = (const Image &img)
4212 Memory::operator=(img);
4216 /*! \brief Move constructor to forward move to the superclass correctly.
4217 * Required for MSVC.
4219 Image(Image&& img) CL_HPP_NOEXCEPT_ : Memory(std::move(img)) {}
4221 /*! \brief Move assignment to forward move to the superclass correctly.
4222 * Required for MSVC.
4224 Image& operator = (Image &&img)
4226 Memory::operator=(std::move(img));
4232 //! \brief Wrapper for clGetImageInfo().
4233 template <typename T>
4234 cl_int getImageInfo(cl_image_info name, T* param) const
4236 return detail::errHandler(
4237 detail::getInfo(&::clGetImageInfo, object_, name, param),
4238 __GET_IMAGE_INFO_ERR);
4241 //! \brief Wrapper for clGetImageInfo() that returns by value.
4242 template <cl_int name> typename
4243 detail::param_traits<detail::cl_image_info, name>::param_type
4244 getImageInfo(cl_int* err = NULL) const
4246 typename detail::param_traits<
4247 detail::cl_image_info, name>::param_type param;
4248 cl_int result = getImageInfo(name, ¶m);
4256 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4257 /*! \brief Class interface for 1D Image Memory objects.
4259 * See Memory for details about copy semantics, etc.
4263 class Image1D : public Image
4266 /*! \brief Constructs a 1D Image in a specified context.
4268 * Wraps clCreateImage().
4271 const Context& context,
4275 void* host_ptr = NULL,
4279 cl_image_desc desc =
4281 CL_MEM_OBJECT_IMAGE1D,
4283 0, 0, 0, 0, 0, 0, 0, 0
4285 object_ = ::clCreateImage(
4293 detail::errHandler(error, __CREATE_IMAGE_ERR);
4299 //! \brief Default constructor - initializes to NULL.
4302 /*! \brief Constructor from cl_mem - takes ownership.
4304 * \param retainObject will cause the constructor to retain its cl object.
4305 * Defaults to false to maintain compatibility with
4307 * See Memory for further details.
4309 explicit Image1D(const cl_mem& image1D, bool retainObject = false) :
4310 Image(image1D, retainObject) { }
4312 /*! \brief Assignment from cl_mem - performs shallow copy.
4314 * See Memory for further details.
4316 Image1D& operator = (const cl_mem& rhs)
4318 Image::operator=(rhs);
4322 /*! \brief Copy constructor to forward copy to the superclass correctly.
4323 * Required for MSVC.
4325 Image1D(const Image1D& img) : Image(img) {}
4327 /*! \brief Copy assignment to forward copy to the superclass correctly.
4328 * Required for MSVC.
4330 Image1D& operator = (const Image1D &img)
4332 Image::operator=(img);
4336 /*! \brief Move constructor to forward move to the superclass correctly.
4337 * Required for MSVC.
4339 Image1D(Image1D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4341 /*! \brief Move assignment to forward move to the superclass correctly.
4342 * Required for MSVC.
4344 Image1D& operator = (Image1D &&img)
4346 Image::operator=(std::move(img));
4352 /*! \class Image1DBuffer
4353 * \brief Image interface for 1D buffer images.
4355 class Image1DBuffer : public Image
4359 const Context& context,
4363 const Buffer &buffer,
4367 cl_image_desc desc =
4369 CL_MEM_OBJECT_IMAGE1D_BUFFER,
4371 0, 0, 0, 0, 0, 0, 0,
4374 object_ = ::clCreateImage(
4382 detail::errHandler(error, __CREATE_IMAGE_ERR);
4390 /*! \brief Constructor from cl_mem - takes ownership.
4392 * \param retainObject will cause the constructor to retain its cl object.
4393 * Defaults to false to maintain compatibility with
4395 * See Memory for further details.
4397 explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) :
4398 Image(image1D, retainObject) { }
4400 Image1DBuffer& operator = (const cl_mem& rhs)
4402 Image::operator=(rhs);
4406 /*! \brief Copy constructor to forward copy to the superclass correctly.
4407 * Required for MSVC.
4409 Image1DBuffer(const Image1DBuffer& img) : Image(img) {}
4411 /*! \brief Copy assignment to forward copy to the superclass correctly.
4412 * Required for MSVC.
4414 Image1DBuffer& operator = (const Image1DBuffer &img)
4416 Image::operator=(img);
4420 /*! \brief Move constructor to forward move to the superclass correctly.
4421 * Required for MSVC.
4423 Image1DBuffer(Image1DBuffer&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4425 /*! \brief Move assignment to forward move to the superclass correctly.
4426 * Required for MSVC.
4428 Image1DBuffer& operator = (Image1DBuffer &&img)
4430 Image::operator=(std::move(img));
4436 /*! \class Image1DArray
4437 * \brief Image interface for arrays of 1D images.
4439 class Image1DArray : public Image
4443 const Context& context,
4446 size_type arraySize,
4449 void* host_ptr = NULL,
4453 cl_image_desc desc =
4455 CL_MEM_OBJECT_IMAGE1D_ARRAY,
4457 0, 0, // height, depth (unused)
4462 object_ = ::clCreateImage(
4470 detail::errHandler(error, __CREATE_IMAGE_ERR);
4478 /*! \brief Constructor from cl_mem - takes ownership.
4480 * \param retainObject will cause the constructor to retain its cl object.
4481 * Defaults to false to maintain compatibility with
4483 * See Memory for further details.
4485 explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) :
4486 Image(imageArray, retainObject) { }
4489 Image1DArray& operator = (const cl_mem& rhs)
4491 Image::operator=(rhs);
4495 /*! \brief Copy constructor to forward copy to the superclass correctly.
4496 * Required for MSVC.
4498 Image1DArray(const Image1DArray& img) : Image(img) {}
4500 /*! \brief Copy assignment to forward copy to the superclass correctly.
4501 * Required for MSVC.
4503 Image1DArray& operator = (const Image1DArray &img)
4505 Image::operator=(img);
4509 /*! \brief Move constructor to forward move to the superclass correctly.
4510 * Required for MSVC.
4512 Image1DArray(Image1DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4514 /*! \brief Move assignment to forward move to the superclass correctly.
4515 * Required for MSVC.
4517 Image1DArray& operator = (Image1DArray &&img)
4519 Image::operator=(std::move(img));
4524 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4527 /*! \brief Class interface for 2D Image Memory objects.
4529 * See Memory for details about copy semantics, etc.
4533 class Image2D : public Image
4536 /*! \brief Constructs a 2D Image in a specified context.
4538 * Wraps clCreateImage().
4541 const Context& context,
4546 size_type row_pitch = 0,
4547 void* host_ptr = NULL,
4551 bool useCreateImage;
4553 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4554 // Run-time decision based on the actual platform
4556 cl_uint version = detail::getContextPlatformVersion(context());
4557 useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4559 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4560 useCreateImage = true;
4562 useCreateImage = false;
4565 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4568 cl_image_desc desc =
4570 CL_MEM_OBJECT_IMAGE2D,
4573 0, 0, // depth, array size (unused)
4577 object_ = ::clCreateImage(
4585 detail::errHandler(error, __CREATE_IMAGE_ERR);
4590 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
4591 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
4592 if (!useCreateImage)
4594 object_ = ::clCreateImage2D(
4595 context(), flags,&format, width, height, row_pitch, host_ptr, &error);
4597 detail::errHandler(error, __CREATE_IMAGE2D_ERR);
4602 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
4605 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 || defined(CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR)
4606 /*! \brief Constructs a 2D Image from a buffer.
4607 * \note This will share storage with the underlying buffer.
4609 * Wraps clCreateImage().
4612 const Context& context,
4614 const Buffer &sourceBuffer,
4617 size_type row_pitch = 0,
4618 cl_int* err = nullptr)
4622 cl_image_desc desc =
4624 CL_MEM_OBJECT_IMAGE2D,
4627 0, 0, // depth, array size (unused)
4630 // Use buffer as input to image
4633 object_ = ::clCreateImage(
4635 0, // flags inherited from buffer
4641 detail::errHandler(error, __CREATE_IMAGE_ERR);
4642 if (err != nullptr) {
4646 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200 || defined(CL_HPP_USE_CL_IMAGE2D_FROM_BUFFER_KHR)
4648 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
4649 /*! \brief Constructs a 2D Image from an image.
4650 * \note This will share storage with the underlying image but may
4651 * reinterpret the channel order and type.
4653 * The image will be created matching with a descriptor matching the source.
4655 * \param order is the channel order to reinterpret the image data as.
4656 * The channel order may differ as described in the OpenCL
4657 * 2.0 API specification.
4659 * Wraps clCreateImage().
4662 const Context& context,
4663 cl_channel_order order,
4664 const Image &sourceImage,
4665 cl_int* err = nullptr)
4669 // Descriptor fields have to match source image
4670 size_type sourceWidth =
4671 sourceImage.getImageInfo<CL_IMAGE_WIDTH>();
4672 size_type sourceHeight =
4673 sourceImage.getImageInfo<CL_IMAGE_HEIGHT>();
4674 size_type sourceRowPitch =
4675 sourceImage.getImageInfo<CL_IMAGE_ROW_PITCH>();
4676 cl_uint sourceNumMIPLevels =
4677 sourceImage.getImageInfo<CL_IMAGE_NUM_MIP_LEVELS>();
4678 cl_uint sourceNumSamples =
4679 sourceImage.getImageInfo<CL_IMAGE_NUM_SAMPLES>();
4680 cl_image_format sourceFormat =
4681 sourceImage.getImageInfo<CL_IMAGE_FORMAT>();
4683 // Update only the channel order.
4684 // Channel format inherited from source.
4685 sourceFormat.image_channel_order = order;
4686 cl_image_desc desc =
4688 CL_MEM_OBJECT_IMAGE2D,
4691 0, 0, // depth (unused), array size (unused)
4693 0, // slice pitch (unused)
4696 // Use buffer as input to image
4699 object_ = ::clCreateImage(
4701 0, // flags should be inherited from mem_object
4707 detail::errHandler(error, __CREATE_IMAGE_ERR);
4708 if (err != nullptr) {
4712 #endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
4714 //! \brief Default constructor - initializes to NULL.
4717 /*! \brief Constructor from cl_mem - takes ownership.
4719 * \param retainObject will cause the constructor to retain its cl object.
4720 * Defaults to false to maintain compatibility with
4722 * See Memory for further details.
4724 explicit Image2D(const cl_mem& image2D, bool retainObject = false) :
4725 Image(image2D, retainObject) { }
4727 /*! \brief Assignment from cl_mem - performs shallow copy.
4729 * See Memory for further details.
4731 Image2D& operator = (const cl_mem& rhs)
4733 Image::operator=(rhs);
4737 /*! \brief Copy constructor to forward copy to the superclass correctly.
4738 * Required for MSVC.
4740 Image2D(const Image2D& img) : Image(img) {}
4742 /*! \brief Copy assignment to forward copy to the superclass correctly.
4743 * Required for MSVC.
4745 Image2D& operator = (const Image2D &img)
4747 Image::operator=(img);
4751 /*! \brief Move constructor to forward move to the superclass correctly.
4752 * Required for MSVC.
4754 Image2D(Image2D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4756 /*! \brief Move assignment to forward move to the superclass correctly.
4757 * Required for MSVC.
4759 Image2D& operator = (Image2D &&img)
4761 Image::operator=(std::move(img));
4768 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
4769 /*! \brief Class interface for GL 2D Image Memory objects.
4771 * This is provided to facilitate interoperability with OpenGL.
4773 * See Memory for details about copy semantics, etc.
4776 * \note Deprecated for OpenCL 1.2. Please use ImageGL instead.
4778 class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D
4781 /*! \brief Constructs an Image2DGL in a specified context, from a given
4784 * Wraps clCreateFromGLTexture2D().
4787 const Context& context,
4792 cl_int * err = NULL)
4795 object_ = ::clCreateFromGLTexture2D(
4803 detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
4810 //! \brief Default constructor - initializes to NULL.
4811 Image2DGL() : Image2D() { }
4813 /*! \brief Constructor from cl_mem - takes ownership.
4815 * \param retainObject will cause the constructor to retain its cl object.
4816 * Defaults to false to maintain compatibility with
4818 * See Memory for further details.
4820 explicit Image2DGL(const cl_mem& image, bool retainObject = false) :
4821 Image2D(image, retainObject) { }
4823 /*! \brief Assignment from cl_mem - performs shallow copy.
4825 * See Memory for further details.
4827 Image2DGL& operator = (const cl_mem& rhs)
4829 Image2D::operator=(rhs);
4833 /*! \brief Copy constructor to forward copy to the superclass correctly.
4834 * Required for MSVC.
4836 Image2DGL(const Image2DGL& img) : Image2D(img) {}
4838 /*! \brief Copy assignment to forward copy to the superclass correctly.
4839 * Required for MSVC.
4841 Image2DGL& operator = (const Image2DGL &img)
4843 Image2D::operator=(img);
4847 /*! \brief Move constructor to forward move to the superclass correctly.
4848 * Required for MSVC.
4850 Image2DGL(Image2DGL&& img) CL_HPP_NOEXCEPT_ : Image2D(std::move(img)) {}
4852 /*! \brief Move assignment to forward move to the superclass correctly.
4853 * Required for MSVC.
4855 Image2DGL& operator = (Image2DGL &&img)
4857 Image2D::operator=(std::move(img));
4861 } CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
4862 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
4864 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4865 /*! \class Image2DArray
4866 * \brief Image interface for arrays of 2D images.
4868 class Image2DArray : public Image
4872 const Context& context,
4875 size_type arraySize,
4879 size_type slicePitch,
4880 void* host_ptr = NULL,
4884 cl_image_desc desc =
4886 CL_MEM_OBJECT_IMAGE2D_ARRAY,
4889 0, // depth (unused)
4895 object_ = ::clCreateImage(
4903 detail::errHandler(error, __CREATE_IMAGE_ERR);
4911 /*! \brief Constructor from cl_mem - takes ownership.
4913 * \param retainObject will cause the constructor to retain its cl object.
4914 * Defaults to false to maintain compatibility with
4916 * See Memory for further details.
4918 explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { }
4920 Image2DArray& operator = (const cl_mem& rhs)
4922 Image::operator=(rhs);
4926 /*! \brief Copy constructor to forward copy to the superclass correctly.
4927 * Required for MSVC.
4929 Image2DArray(const Image2DArray& img) : Image(img) {}
4931 /*! \brief Copy assignment to forward copy to the superclass correctly.
4932 * Required for MSVC.
4934 Image2DArray& operator = (const Image2DArray &img)
4936 Image::operator=(img);
4940 /*! \brief Move constructor to forward move to the superclass correctly.
4941 * Required for MSVC.
4943 Image2DArray(Image2DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
4945 /*! \brief Move assignment to forward move to the superclass correctly.
4946 * Required for MSVC.
4948 Image2DArray& operator = (Image2DArray &&img)
4950 Image::operator=(std::move(img));
4954 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4956 /*! \brief Class interface for 3D Image Memory objects.
4958 * See Memory for details about copy semantics, etc.
4962 class Image3D : public Image
4965 /*! \brief Constructs a 3D Image in a specified context.
4967 * Wraps clCreateImage().
4970 const Context& context,
4976 size_type row_pitch = 0,
4977 size_type slice_pitch = 0,
4978 void* host_ptr = NULL,
4982 bool useCreateImage;
4984 #if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
4985 // Run-time decision based on the actual platform
4987 cl_uint version = detail::getContextPlatformVersion(context());
4988 useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
4990 #elif CL_HPP_TARGET_OPENCL_VERSION >= 120
4991 useCreateImage = true;
4993 useCreateImage = false;
4996 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
4999 cl_image_desc desc =
5001 CL_MEM_OBJECT_IMAGE3D,
5005 0, // array size (unused)
5010 object_ = ::clCreateImage(
5018 detail::errHandler(error, __CREATE_IMAGE_ERR);
5023 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5024 #if CL_HPP_MINIMUM_OPENCL_VERSION < 120
5025 if (!useCreateImage)
5027 object_ = ::clCreateImage3D(
5028 context(), flags, &format, width, height, depth, row_pitch,
5029 slice_pitch, host_ptr, &error);
5031 detail::errHandler(error, __CREATE_IMAGE3D_ERR);
5036 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
5039 //! \brief Default constructor - initializes to NULL.
5040 Image3D() : Image() { }
5042 /*! \brief Constructor from cl_mem - takes ownership.
5044 * \param retainObject will cause the constructor to retain its cl object.
5045 * Defaults to false to maintain compatibility with
5047 * See Memory for further details.
5049 explicit Image3D(const cl_mem& image3D, bool retainObject = false) :
5050 Image(image3D, retainObject) { }
5052 /*! \brief Assignment from cl_mem - performs shallow copy.
5054 * See Memory for further details.
5056 Image3D& operator = (const cl_mem& rhs)
5058 Image::operator=(rhs);
5062 /*! \brief Copy constructor to forward copy to the superclass correctly.
5063 * Required for MSVC.
5065 Image3D(const Image3D& img) : Image(img) {}
5067 /*! \brief Copy assignment to forward copy to the superclass correctly.
5068 * Required for MSVC.
5070 Image3D& operator = (const Image3D &img)
5072 Image::operator=(img);
5076 /*! \brief Move constructor to forward move to the superclass correctly.
5077 * Required for MSVC.
5079 Image3D(Image3D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5081 /*! \brief Move assignment to forward move to the superclass correctly.
5082 * Required for MSVC.
5084 Image3D& operator = (Image3D &&img)
5086 Image::operator=(std::move(img));
5091 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
5092 /*! \brief Class interface for GL 3D Image Memory objects.
5094 * This is provided to facilitate interoperability with OpenGL.
5096 * See Memory for details about copy semantics, etc.
5100 class Image3DGL : public Image3D
5103 /*! \brief Constructs an Image3DGL in a specified context, from a given
5106 * Wraps clCreateFromGLTexture3D().
5109 const Context& context,
5114 cl_int * err = NULL)
5117 object_ = ::clCreateFromGLTexture3D(
5125 detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
5131 //! \brief Default constructor - initializes to NULL.
5132 Image3DGL() : Image3D() { }
5134 /*! \brief Constructor from cl_mem - takes ownership.
5136 * \param retainObject will cause the constructor to retain its cl object.
5137 * Defaults to false to maintain compatibility with
5139 * See Memory for further details.
5141 explicit Image3DGL(const cl_mem& image, bool retainObject = false) :
5142 Image3D(image, retainObject) { }
5144 /*! \brief Assignment from cl_mem - performs shallow copy.
5146 * See Memory for further details.
5148 Image3DGL& operator = (const cl_mem& rhs)
5150 Image3D::operator=(rhs);
5154 /*! \brief Copy constructor to forward copy to the superclass correctly.
5155 * Required for MSVC.
5157 Image3DGL(const Image3DGL& img) : Image3D(img) {}
5159 /*! \brief Copy assignment to forward copy to the superclass correctly.
5160 * Required for MSVC.
5162 Image3DGL& operator = (const Image3DGL &img)
5164 Image3D::operator=(img);
5168 /*! \brief Move constructor to forward move to the superclass correctly.
5169 * Required for MSVC.
5171 Image3DGL(Image3DGL&& img) CL_HPP_NOEXCEPT_ : Image3D(std::move(img)) {}
5173 /*! \brief Move assignment to forward move to the superclass correctly.
5174 * Required for MSVC.
5176 Image3DGL& operator = (Image3DGL &&img)
5178 Image3D::operator=(std::move(img));
5182 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
5184 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5186 * \brief general image interface for GL interop.
5187 * We abstract the 2D and 3D GL images into a single instance here
5188 * that wraps all GL sourced images on the grounds that setup information
5189 * was performed by OpenCL anyway.
5191 class ImageGL : public Image
5195 const Context& context,
5200 cl_int * err = NULL)
5203 object_ = ::clCreateFromGLTexture(
5211 detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
5217 ImageGL() : Image() { }
5219 /*! \brief Constructor from cl_mem - takes ownership.
5221 * \param retainObject will cause the constructor to retain its cl object.
5222 * Defaults to false to maintain compatibility with
5224 * See Memory for further details.
5226 explicit ImageGL(const cl_mem& image, bool retainObject = false) :
5227 Image(image, retainObject) { }
5229 ImageGL& operator = (const cl_mem& rhs)
5231 Image::operator=(rhs);
5235 /*! \brief Copy constructor to forward copy to the superclass correctly.
5236 * Required for MSVC.
5238 ImageGL(const ImageGL& img) : Image(img) {}
5240 /*! \brief Copy assignment to forward copy to the superclass correctly.
5241 * Required for MSVC.
5243 ImageGL& operator = (const ImageGL &img)
5245 Image::operator=(img);
5249 /*! \brief Move constructor to forward move to the superclass correctly.
5250 * Required for MSVC.
5252 ImageGL(ImageGL&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
5254 /*! \brief Move assignment to forward move to the superclass correctly.
5255 * Required for MSVC.
5257 ImageGL& operator = (ImageGL &&img)
5259 Image::operator=(std::move(img));
5263 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5267 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5268 /*! \brief Class interface for Pipe Memory Objects.
5270 * See Memory for details about copy semantics, etc.
5274 class Pipe : public Memory
5278 /*! \brief Constructs a Pipe in a specified context.
5280 * Wraps clCreatePipe().
5281 * @param context Context in which to create the pipe.
5282 * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
5283 * @param packet_size Size in bytes of a single packet of the pipe.
5284 * @param max_packets Number of packets that may be stored in the pipe.
5288 const Context& context,
5289 cl_uint packet_size,
5290 cl_uint max_packets,
5295 cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5296 object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5298 detail::errHandler(error, __CREATE_PIPE_ERR);
5304 /*! \brief Constructs a Pipe in a the default context.
5306 * Wraps clCreatePipe().
5307 * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
5308 * @param packet_size Size in bytes of a single packet of the pipe.
5309 * @param max_packets Number of packets that may be stored in the pipe.
5313 cl_uint packet_size,
5314 cl_uint max_packets,
5319 Context context = Context::getDefault(err);
5321 cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
5322 object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
5324 detail::errHandler(error, __CREATE_PIPE_ERR);
5330 //! \brief Default constructor - initializes to NULL.
5331 Pipe() : Memory() { }
5333 /*! \brief Constructor from cl_mem - takes ownership.
5335 * \param retainObject will cause the constructor to retain its cl object.
5336 * Defaults to false to maintain compatibility with earlier versions.
5338 * See Memory for further details.
5340 explicit Pipe(const cl_mem& pipe, bool retainObject = false) :
5341 Memory(pipe, retainObject) { }
5343 /*! \brief Assignment from cl_mem - performs shallow copy.
5345 * See Memory for further details.
5347 Pipe& operator = (const cl_mem& rhs)
5349 Memory::operator=(rhs);
5353 /*! \brief Copy constructor to forward copy to the superclass correctly.
5354 * Required for MSVC.
5356 Pipe(const Pipe& pipe) : Memory(pipe) {}
5358 /*! \brief Copy assignment to forward copy to the superclass correctly.
5359 * Required for MSVC.
5361 Pipe& operator = (const Pipe &pipe)
5363 Memory::operator=(pipe);
5367 /*! \brief Move constructor to forward move to the superclass correctly.
5368 * Required for MSVC.
5370 Pipe(Pipe&& pipe) CL_HPP_NOEXCEPT_ : Memory(std::move(pipe)) {}
5372 /*! \brief Move assignment to forward move to the superclass correctly.
5373 * Required for MSVC.
5375 Pipe& operator = (Pipe &&pipe)
5377 Memory::operator=(std::move(pipe));
5381 //! \brief Wrapper for clGetMemObjectInfo().
5382 template <typename T>
5383 cl_int getInfo(cl_pipe_info name, T* param) const
5385 return detail::errHandler(
5386 detail::getInfo(&::clGetPipeInfo, object_, name, param),
5387 __GET_PIPE_INFO_ERR);
5390 //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
5391 template <cl_int name> typename
5392 detail::param_traits<detail::cl_pipe_info, name>::param_type
5393 getInfo(cl_int* err = NULL) const
5395 typename detail::param_traits<
5396 detail::cl_pipe_info, name>::param_type param;
5397 cl_int result = getInfo(name, ¶m);
5404 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
5407 /*! \brief Class interface for cl_sampler.
5409 * \note Copies of these objects are shallow, meaning that the copy will refer
5410 * to the same underlying cl_sampler as the original. For details, see
5411 * clRetainSampler() and clReleaseSampler().
5415 class Sampler : public detail::Wrapper<cl_sampler>
5418 //! \brief Default constructor - initializes to NULL.
5421 /*! \brief Constructs a Sampler in a specified context.
5423 * Wraps clCreateSampler().
5426 const Context& context,
5427 cl_bool normalized_coords,
5428 cl_addressing_mode addressing_mode,
5429 cl_filter_mode filter_mode,
5434 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5435 cl_sampler_properties sampler_properties[] = {
5436 CL_SAMPLER_NORMALIZED_COORDS, normalized_coords,
5437 CL_SAMPLER_ADDRESSING_MODE, addressing_mode,
5438 CL_SAMPLER_FILTER_MODE, filter_mode,
5440 object_ = ::clCreateSamplerWithProperties(
5445 detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR);
5450 object_ = ::clCreateSampler(
5457 detail::errHandler(error, __CREATE_SAMPLER_ERR);
5464 /*! \brief Constructor from cl_sampler - takes ownership.
5466 * \param retainObject will cause the constructor to retain its cl object.
5467 * Defaults to false to maintain compatibility with
5469 * This effectively transfers ownership of a refcount on the cl_sampler
5470 * into the new Sampler object.
5472 explicit Sampler(const cl_sampler& sampler, bool retainObject = false) :
5473 detail::Wrapper<cl_type>(sampler, retainObject) { }
5475 /*! \brief Assignment operator from cl_sampler - takes ownership.
5477 * This effectively transfers ownership of a refcount on the rhs and calls
5478 * clReleaseSampler() on the value previously held by this instance.
5480 Sampler& operator = (const cl_sampler& rhs)
5482 detail::Wrapper<cl_type>::operator=(rhs);
5486 /*! \brief Copy constructor to forward copy to the superclass correctly.
5487 * Required for MSVC.
5489 Sampler(const Sampler& sam) : detail::Wrapper<cl_type>(sam) {}
5491 /*! \brief Copy assignment to forward copy to the superclass correctly.
5492 * Required for MSVC.
5494 Sampler& operator = (const Sampler &sam)
5496 detail::Wrapper<cl_type>::operator=(sam);
5500 /*! \brief Move constructor to forward move to the superclass correctly.
5501 * Required for MSVC.
5503 Sampler(Sampler&& sam) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(sam)) {}
5505 /*! \brief Move assignment to forward move to the superclass correctly.
5506 * Required for MSVC.
5508 Sampler& operator = (Sampler &&sam)
5510 detail::Wrapper<cl_type>::operator=(std::move(sam));
5514 //! \brief Wrapper for clGetSamplerInfo().
5515 template <typename T>
5516 cl_int getInfo(cl_sampler_info name, T* param) const
5518 return detail::errHandler(
5519 detail::getInfo(&::clGetSamplerInfo, object_, name, param),
5520 __GET_SAMPLER_INFO_ERR);
5523 //! \brief Wrapper for clGetSamplerInfo() that returns by value.
5524 template <cl_int name> typename
5525 detail::param_traits<detail::cl_sampler_info, name>::param_type
5526 getInfo(cl_int* err = NULL) const
5528 typename detail::param_traits<
5529 detail::cl_sampler_info, name>::param_type param;
5530 cl_int result = getInfo(name, ¶m);
5540 class DeviceCommandQueue;
5543 //! \brief Class interface for specifying NDRange values.
5547 size_type sizes_[3];
5548 cl_uint dimensions_;
5551 //! \brief Default constructor - resulting range has zero dimensions.
5560 //! \brief Constructs one-dimensional range.
5561 NDRange(size_type size0)
5569 //! \brief Constructs two-dimensional range.
5570 NDRange(size_type size0, size_type size1)
5578 //! \brief Constructs three-dimensional range.
5579 NDRange(size_type size0, size_type size1, size_type size2)
5587 /*! \brief Conversion operator to const size_type *.
5589 * \returns a pointer to the size of the first dimension.
5591 operator const size_type*() const {
5595 //! \brief Queries the number of dimensions in the range.
5596 size_type dimensions() const
5601 //! \brief Returns the size of the object in bytes based on the
5602 // runtime number of dimensions
5603 size_type size() const
5605 return dimensions_*sizeof(size_type);
5613 const size_type* get() const
5619 //! \brief A zero-dimensional range.
5620 static const NDRange NullRange;
5622 //! \brief Local address wrapper for use with Kernel::setArg
5623 struct LocalSpaceArg
5630 template <typename T, class Enable = void>
5631 struct KernelArgumentHandler;
5633 // Enable for objects that are not subclasses of memory
5634 // Pointers, constants etc
5635 template <typename T>
5636 struct KernelArgumentHandler<T, typename std::enable_if<!std::is_base_of<cl::Memory, T>::value>::type>
5638 static size_type size(const T&) { return sizeof(T); }
5639 static const T* ptr(const T& value) { return &value; }
5642 // Enable for subclasses of memory where we want to get a reference to the cl_mem out
5643 // and pass that in for safety
5644 template <typename T>
5645 struct KernelArgumentHandler<T, typename std::enable_if<std::is_base_of<cl::Memory, T>::value>::type>
5647 static size_type size(const T&) { return sizeof(cl_mem); }
5648 static const cl_mem* ptr(const T& value) { return &(value()); }
5651 // Specialization for DeviceCommandQueue defined later
5654 struct KernelArgumentHandler<LocalSpaceArg, void>
5656 static size_type size(const LocalSpaceArg& value) { return value.size_; }
5657 static const void* ptr(const LocalSpaceArg&) { return NULL; }
5664 * \brief Helper function for generating LocalSpaceArg objects.
5666 inline LocalSpaceArg
5667 Local(size_type size)
5669 LocalSpaceArg ret = { size };
5673 /*! \brief Class interface for cl_kernel.
5675 * \note Copies of these objects are shallow, meaning that the copy will refer
5676 * to the same underlying cl_kernel as the original. For details, see
5677 * clRetainKernel() and clReleaseKernel().
5681 class Kernel : public detail::Wrapper<cl_kernel>
5684 inline Kernel(const Program& program, const char* name, cl_int* err = NULL);
5686 //! \brief Default constructor - initializes to NULL.
5689 /*! \brief Constructor from cl_kernel - takes ownership.
5691 * \param retainObject will cause the constructor to retain its cl object.
5692 * Defaults to false to maintain compatibility with
5694 * This effectively transfers ownership of a refcount on the cl_kernel
5695 * into the new Kernel object.
5697 explicit Kernel(const cl_kernel& kernel, bool retainObject = false) :
5698 detail::Wrapper<cl_type>(kernel, retainObject) { }
5700 /*! \brief Assignment operator from cl_kernel - takes ownership.
5702 * This effectively transfers ownership of a refcount on the rhs and calls
5703 * clReleaseKernel() on the value previously held by this instance.
5705 Kernel& operator = (const cl_kernel& rhs)
5707 detail::Wrapper<cl_type>::operator=(rhs);
5711 /*! \brief Copy constructor to forward copy to the superclass correctly.
5712 * Required for MSVC.
5714 Kernel(const Kernel& kernel) : detail::Wrapper<cl_type>(kernel) {}
5716 /*! \brief Copy assignment to forward copy to the superclass correctly.
5717 * Required for MSVC.
5719 Kernel& operator = (const Kernel &kernel)
5721 detail::Wrapper<cl_type>::operator=(kernel);
5725 /*! \brief Move constructor to forward move to the superclass correctly.
5726 * Required for MSVC.
5728 Kernel(Kernel&& kernel) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(kernel)) {}
5730 /*! \brief Move assignment to forward move to the superclass correctly.
5731 * Required for MSVC.
5733 Kernel& operator = (Kernel &&kernel)
5735 detail::Wrapper<cl_type>::operator=(std::move(kernel));
5739 template <typename T>
5740 cl_int getInfo(cl_kernel_info name, T* param) const
5742 return detail::errHandler(
5743 detail::getInfo(&::clGetKernelInfo, object_, name, param),
5744 __GET_KERNEL_INFO_ERR);
5747 template <cl_int name> typename
5748 detail::param_traits<detail::cl_kernel_info, name>::param_type
5749 getInfo(cl_int* err = NULL) const
5751 typename detail::param_traits<
5752 detail::cl_kernel_info, name>::param_type param;
5753 cl_int result = getInfo(name, ¶m);
5760 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
5761 template <typename T>
5762 cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
5764 return detail::errHandler(
5765 detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param),
5766 __GET_KERNEL_ARG_INFO_ERR);
5769 template <cl_int name> typename
5770 detail::param_traits<detail::cl_kernel_arg_info, name>::param_type
5771 getArgInfo(cl_uint argIndex, cl_int* err = NULL) const
5773 typename detail::param_traits<
5774 detail::cl_kernel_arg_info, name>::param_type param;
5775 cl_int result = getArgInfo(argIndex, name, ¶m);
5781 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
5783 template <typename T>
5784 cl_int getWorkGroupInfo(
5785 const Device& device, cl_kernel_work_group_info name, T* param) const
5787 return detail::errHandler(
5789 &::clGetKernelWorkGroupInfo, object_, device(), name, param),
5790 __GET_KERNEL_WORK_GROUP_INFO_ERR);
5793 template <cl_int name> typename
5794 detail::param_traits<detail::cl_kernel_work_group_info, name>::param_type
5795 getWorkGroupInfo(const Device& device, cl_int* err = NULL) const
5797 typename detail::param_traits<
5798 detail::cl_kernel_work_group_info, name>::param_type param;
5799 cl_int result = getWorkGroupInfo(device, name, ¶m);
5806 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5807 #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
5808 cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const
5810 typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR;
5811 static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = NULL;
5812 CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR);
5814 return detail::errHandler(
5815 pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
5816 __GET_KERNEL_ARG_INFO_ERR);
5819 template <cl_int name>
5820 size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = NULL) const
5823 cl_int result = getSubGroupInfo(dev, name, range, ¶m);
5829 #endif // #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
5830 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5832 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5833 /*! \brief setArg overload taking a shared_ptr type
5835 template<typename T, class D>
5836 cl_int setArg(cl_uint index, const cl::pointer<T, D> &argPtr)
5838 return detail::errHandler(
5839 ::clSetKernelArgSVMPointer(object_, index, argPtr.get()),
5840 __SET_KERNEL_ARGS_ERR);
5843 /*! \brief setArg overload taking a vector type.
5845 template<typename T, class Alloc>
5846 cl_int setArg(cl_uint index, const cl::vector<T, Alloc> &argPtr)
5848 return detail::errHandler(
5849 ::clSetKernelArgSVMPointer(object_, index, argPtr.data()),
5850 __SET_KERNEL_ARGS_ERR);
5853 /*! \brief setArg overload taking a pointer type
5855 template<typename T>
5856 typename std::enable_if<std::is_pointer<T>::value, cl_int>::type
5857 setArg(cl_uint index, const T argPtr)
5859 return detail::errHandler(
5860 ::clSetKernelArgSVMPointer(object_, index, argPtr),
5861 __SET_KERNEL_ARGS_ERR);
5863 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5865 /*! \brief setArg overload taking a POD type
5867 template <typename T>
5868 typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
5869 setArg(cl_uint index, const T &value)
5871 return detail::errHandler(
5875 detail::KernelArgumentHandler<T>::size(value),
5876 detail::KernelArgumentHandler<T>::ptr(value)),
5877 __SET_KERNEL_ARGS_ERR);
5880 cl_int setArg(cl_uint index, size_type size, const void* argPtr)
5882 return detail::errHandler(
5883 ::clSetKernelArg(object_, index, size, argPtr),
5884 __SET_KERNEL_ARGS_ERR);
5887 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5889 * Specify a vector of SVM pointers that the kernel may access in
5890 * addition to its arguments.
5892 cl_int setSVMPointers(const vector<void*> &pointerList)
5894 return detail::errHandler(
5895 ::clSetKernelExecInfo(
5897 CL_KERNEL_EXEC_INFO_SVM_PTRS,
5898 sizeof(void*)*pointerList.size(),
5899 pointerList.data()));
5903 * Specify a std::array of SVM pointers that the kernel may access in
5904 * addition to its arguments.
5906 template<int ArrayLength>
5907 cl_int setSVMPointers(const std::array<void*, ArrayLength> &pointerList)
5909 return detail::errHandler(
5910 ::clSetKernelExecInfo(
5912 CL_KERNEL_EXEC_INFO_SVM_PTRS,
5913 sizeof(void*)*pointerList.size(),
5914 pointerList.data()));
5917 /*! \brief Enable fine-grained system SVM.
5919 * \note It is only possible to enable fine-grained system SVM if all devices
5920 * in the context associated with kernel support it.
5922 * \param svmEnabled True if fine-grained system SVM is requested. False otherwise.
5923 * \return CL_SUCCESS if the function was executed succesfully. CL_INVALID_OPERATION
5924 * if no devices in the context support fine-grained system SVM.
5926 * \see clSetKernelExecInfo
5928 cl_int enableFineGrainedSystemSVM(bool svmEnabled)
5930 cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE;
5931 return detail::errHandler(
5932 ::clSetKernelExecInfo(
5934 CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM,
5941 template<int index, int ArrayLength, class D, typename T0, typename T1, typename... Ts>
5942 void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0, const pointer<T1, D> &t1, Ts & ... ts)
5944 pointerList[index] = static_cast<void*>(t0.get());
5945 setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
5948 template<int index, int ArrayLength, typename T0, typename T1, typename... Ts>
5949 typename std::enable_if<std::is_pointer<T0>::value, void>::type
5950 setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0, T1 t1, Ts... ts)
5952 pointerList[index] = static_cast<void*>(t0);
5953 setSVMPointersHelper<index + 1, ArrayLength>(pointerList, t1, ts...);
5956 template<int index, int ArrayLength, typename T0, class D>
5957 void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0)
5959 pointerList[index] = static_cast<void*>(t0.get());
5963 template<int index, int ArrayLength, typename T0>
5964 typename std::enable_if<std::is_pointer<T0>::value, void>::type
5965 setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0)
5967 pointerList[index] = static_cast<void*>(t0);
5970 template<typename T0, typename... Ts>
5971 cl_int setSVMPointers(const T0 &t0, Ts & ... ts)
5973 std::array<void*, 1 + sizeof...(Ts)> pointerList;
5975 setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...);
5976 return detail::errHandler(
5977 ::clSetKernelExecInfo(
5979 CL_KERNEL_EXEC_INFO_SVM_PTRS,
5980 sizeof(void*)*(1 + sizeof...(Ts)),
5981 pointerList.data()));
5983 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
5987 * \brief Program interface that implements cl_program.
5989 class Program : public detail::Wrapper<cl_program>
5992 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5993 typedef vector<vector<unsigned char>> Binaries;
5994 typedef vector<string> Sources;
5995 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
5996 typedef vector<std::pair<const void*, size_type> > Binaries;
5997 typedef vector<std::pair<const char*, size_type> > Sources;
5998 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6001 const string& source,
6007 const char * strings = source.c_str();
6008 const size_type length = source.size();
6010 Context context = Context::getDefault(err);
6012 object_ = ::clCreateProgramWithSource(
6013 context(), (cl_uint)1, &strings, &length, &error);
6015 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6017 if (error == CL_SUCCESS && build) {
6019 error = ::clBuildProgram(
6023 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6027 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6031 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6040 const Context& context,
6041 const string& source,
6047 const char * strings = source.c_str();
6048 const size_type length = source.size();
6050 object_ = ::clCreateProgramWithSource(
6051 context(), (cl_uint)1, &strings, &length, &error);
6053 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6055 if (error == CL_SUCCESS && build) {
6056 error = ::clBuildProgram(
6060 #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6064 #endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
6068 detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6077 * Create a program from a vector of source strings and the default context.
6078 * Does not compile or link the program.
6081 const Sources& sources,
6085 Context context = Context::getDefault(err);
6087 const size_type n = (size_type)sources.size();
6089 vector<size_type> lengths(n);
6090 vector<const char*> strings(n);
6092 for (size_type i = 0; i < n; ++i) {
6093 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6094 strings[i] = sources[(int)i].data();
6095 lengths[i] = sources[(int)i].length();
6096 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6097 strings[i] = sources[(int)i].first;
6098 lengths[i] = sources[(int)i].second;
6099 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6102 object_ = ::clCreateProgramWithSource(
6103 context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6105 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6112 * Create a program from a vector of source strings and a provided context.
6113 * Does not compile or link the program.
6116 const Context& context,
6117 const Sources& sources,
6122 const size_type n = (size_type)sources.size();
6124 vector<size_type> lengths(n);
6125 vector<const char*> strings(n);
6127 for (size_type i = 0; i < n; ++i) {
6128 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6129 strings[i] = sources[(int)i].data();
6130 lengths[i] = sources[(int)i].length();
6131 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6132 strings[i] = sources[(int)i].first;
6133 lengths[i] = sources[(int)i].second;
6134 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6137 object_ = ::clCreateProgramWithSource(
6138 context(), (cl_uint)n, strings.data(), lengths.data(), &error);
6140 detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
6147 * Construct a program object from a list of devices and a per-device list of binaries.
6148 * \param context A valid OpenCL context in which to construct the program.
6149 * \param devices A vector of OpenCL device objects for which the program will be created.
6150 * \param binaries A vector of pairs of a pointer to a binary object and its length.
6151 * \param binaryStatus An optional vector that on completion will be resized to
6152 * match the size of binaries and filled with values to specify if each binary
6153 * was successfully loaded.
6154 * Set to CL_SUCCESS if the binary was successfully loaded.
6155 * Set to CL_INVALID_VALUE if the length is 0 or the binary pointer is NULL.
6156 * Set to CL_INVALID_BINARY if the binary provided is not valid for the matching device.
6157 * \param err if non-NULL will be set to CL_SUCCESS on successful operation or one of the following errors:
6158 * CL_INVALID_CONTEXT if context is not a valid context.
6159 * CL_INVALID_VALUE if the length of devices is zero; or if the length of binaries does not match the length of devices;
6160 * or if any entry in binaries is NULL or has length 0.
6161 * CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context.
6162 * CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device.
6163 * CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host.
6166 const Context& context,
6167 const vector<Device>& devices,
6168 const Binaries& binaries,
6169 vector<cl_int>* binaryStatus = NULL,
6174 const size_type numDevices = devices.size();
6176 // Catch size mismatch early and return
6177 if(binaries.size() != numDevices) {
6178 error = CL_INVALID_VALUE;
6179 detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6187 vector<size_type> lengths(numDevices);
6188 vector<const unsigned char*> images(numDevices);
6189 #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6190 for (size_type i = 0; i < numDevices; ++i) {
6191 images[i] = binaries[i].data();
6192 lengths[i] = binaries[(int)i].size();
6194 #else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6195 for (size_type i = 0; i < numDevices; ++i) {
6196 images[i] = (const unsigned char*)binaries[i].first;
6197 lengths[i] = binaries[(int)i].second;
6199 #endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
6201 vector<cl_device_id> deviceIDs(numDevices);
6202 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6203 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6207 binaryStatus->resize(numDevices);
6210 object_ = ::clCreateProgramWithBinary(
6211 context(), (cl_uint) devices.size(),
6213 lengths.data(), images.data(), (binaryStatus != NULL && numDevices > 0)
6214 ? &binaryStatus->front()
6217 detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
6224 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6226 * Create program using builtin kernels.
6227 * \param kernelNames Semi-colon separated list of builtin kernel names
6230 const Context& context,
6231 const vector<Device>& devices,
6232 const string& kernelNames,
6238 size_type numDevices = devices.size();
6239 vector<cl_device_id> deviceIDs(numDevices);
6240 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6241 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6244 object_ = ::clCreateProgramWithBuiltInKernels(
6246 (cl_uint) devices.size(),
6248 kernelNames.c_str(),
6251 detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
6256 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6261 /*! \brief Constructor from cl_mem - takes ownership.
6263 * \param retainObject will cause the constructor to retain its cl object.
6264 * Defaults to false to maintain compatibility with
6267 explicit Program(const cl_program& program, bool retainObject = false) :
6268 detail::Wrapper<cl_type>(program, retainObject) { }
6270 Program& operator = (const cl_program& rhs)
6272 detail::Wrapper<cl_type>::operator=(rhs);
6276 /*! \brief Copy constructor to forward copy to the superclass correctly.
6277 * Required for MSVC.
6279 Program(const Program& program) : detail::Wrapper<cl_type>(program) {}
6281 /*! \brief Copy assignment to forward copy to the superclass correctly.
6282 * Required for MSVC.
6284 Program& operator = (const Program &program)
6286 detail::Wrapper<cl_type>::operator=(program);
6290 /*! \brief Move constructor to forward move to the superclass correctly.
6291 * Required for MSVC.
6293 Program(Program&& program) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(program)) {}
6295 /*! \brief Move assignment to forward move to the superclass correctly.
6296 * Required for MSVC.
6298 Program& operator = (Program &&program)
6300 detail::Wrapper<cl_type>::operator=(std::move(program));
6305 const vector<Device>& devices,
6306 const char* options = NULL,
6307 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6308 void* data = NULL) const
6310 size_type numDevices = devices.size();
6311 vector<cl_device_id> deviceIDs(numDevices);
6313 for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
6314 deviceIDs[deviceIndex] = (devices[deviceIndex])();
6317 cl_int buildError = ::clBuildProgram(
6326 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6330 const char* options = NULL,
6331 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6332 void* data = NULL) const
6334 cl_int buildError = ::clBuildProgram(
6343 return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6346 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6348 const char* options = NULL,
6349 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6350 void* data = NULL) const
6352 cl_int error = ::clCompileProgram(
6362 return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
6364 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6366 template <typename T>
6367 cl_int getInfo(cl_program_info name, T* param) const
6369 return detail::errHandler(
6370 detail::getInfo(&::clGetProgramInfo, object_, name, param),
6371 __GET_PROGRAM_INFO_ERR);
6374 template <cl_int name> typename
6375 detail::param_traits<detail::cl_program_info, name>::param_type
6376 getInfo(cl_int* err = NULL) const
6378 typename detail::param_traits<
6379 detail::cl_program_info, name>::param_type param;
6380 cl_int result = getInfo(name, ¶m);
6387 template <typename T>
6388 cl_int getBuildInfo(
6389 const Device& device, cl_program_build_info name, T* param) const
6391 return detail::errHandler(
6393 &::clGetProgramBuildInfo, object_, device(), name, param),
6394 __GET_PROGRAM_BUILD_INFO_ERR);
6397 template <cl_int name> typename
6398 detail::param_traits<detail::cl_program_build_info, name>::param_type
6399 getBuildInfo(const Device& device, cl_int* err = NULL) const
6401 typename detail::param_traits<
6402 detail::cl_program_build_info, name>::param_type param;
6403 cl_int result = getBuildInfo(device, name, ¶m);
6411 * Build info function that returns a vector of device/info pairs for the specified
6412 * info type and for all devices in the program.
6413 * On an error reading the info for any device, an empty vector of info will be returned.
6415 template <cl_int name>
6416 vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6417 getBuildInfo(cl_int *err = NULL) const
6419 cl_int result = CL_SUCCESS;
6421 auto devs = getInfo<CL_PROGRAM_DEVICES>(&result);
6422 vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
6425 // If there was an initial error from getInfo return the error
6426 if (result != CL_SUCCESS) {
6433 for (const cl::Device &d : devs) {
6434 typename detail::param_traits<
6435 detail::cl_program_build_info, name>::param_type param;
6436 result = getBuildInfo(d, name, ¶m);
6438 std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>
6440 if (result != CL_SUCCESS) {
6441 // On error, leave the loop and return the error code
6448 if (result != CL_SUCCESS) {
6454 cl_int createKernels(vector<Kernel>* kernels)
6457 cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels);
6458 if (err != CL_SUCCESS) {
6459 return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6462 vector<cl_kernel> value(numKernels);
6464 err = ::clCreateKernelsInProgram(
6465 object_, numKernels, value.data(), NULL);
6466 if (err != CL_SUCCESS) {
6467 return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
6471 kernels->resize(value.size());
6473 // Assign to param, constructing with retain behaviour
6474 // to correctly capture each underlying CL object
6475 for (size_type i = 0; i < value.size(); i++) {
6476 // We do not need to retain because this kernel is being created
6478 (*kernels)[i] = Kernel(value[i], false);
6485 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
6486 inline Program linkProgram(
6489 const char* options = NULL,
6490 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6494 cl_int error_local = CL_SUCCESS;
6496 cl_program programs[2] = { input1(), input2() };
6498 Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6499 if(error_local!=CL_SUCCESS) {
6500 detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6503 cl_program prog = ::clLinkProgram(
6514 detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6519 return Program(prog);
6522 inline Program linkProgram(
6523 vector<Program> inputPrograms,
6524 const char* options = NULL,
6525 void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
6529 cl_int error_local = CL_SUCCESS;
6531 vector<cl_program> programs(inputPrograms.size());
6533 for (unsigned int i = 0; i < inputPrograms.size(); i++) {
6534 programs[i] = inputPrograms[i]();
6538 if(inputPrograms.size() > 0) {
6539 ctx = inputPrograms[0].getInfo<CL_PROGRAM_CONTEXT>(&error_local);
6540 if(error_local!=CL_SUCCESS) {
6541 detail::errHandler(error_local, __LINK_PROGRAM_ERR);
6544 cl_program prog = ::clLinkProgram(
6549 (cl_uint)inputPrograms.size(),
6555 detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
6560 return Program(prog, false);
6562 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
6564 // Template specialization for CL_PROGRAM_BINARIES
6566 inline cl_int cl::Program::getInfo(cl_program_info name, vector<vector<unsigned char>>* param) const
6568 if (name != CL_PROGRAM_BINARIES) {
6569 return CL_INVALID_VALUE;
6572 // Resize the parameter array appropriately for each allocation
6573 // and pass down to the helper
6575 vector<size_type> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
6576 size_type numBinaries = sizes.size();
6578 // Resize the parameter array and constituent arrays
6579 param->resize(numBinaries);
6580 for (size_type i = 0; i < numBinaries; ++i) {
6581 (*param)[i].resize(sizes[i]);
6584 return detail::errHandler(
6585 detail::getInfo(&::clGetProgramInfo, object_, name, param),
6586 __GET_PROGRAM_INFO_ERR);
6593 inline vector<vector<unsigned char>> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
6595 vector<vector<unsigned char>> binariesVectors;
6597 cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors);
6601 return binariesVectors;
6604 inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
6608 object_ = ::clCreateKernel(program(), name, &error);
6609 detail::errHandler(error, __CREATE_KERNEL_ERR);
6617 enum class QueueProperties : cl_command_queue_properties
6620 Profiling = CL_QUEUE_PROFILING_ENABLE,
6621 OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
6624 inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs)
6626 return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
6629 /*! \class CommandQueue
6630 * \brief CommandQueue interface for cl_command_queue.
6632 class CommandQueue : public detail::Wrapper<cl_command_queue>
6635 static std::once_flag default_initialized_;
6636 static CommandQueue default_;
6637 static cl_int default_error_;
6639 /*! \brief Create the default command queue returned by @ref getDefault.
6641 * It sets default_error_ to indicate success or failure. It does not throw
6644 static void makeDefault()
6646 /* We don't want to throw an error from this function, so we have to
6647 * catch and set the error flag.
6649 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
6654 Context context = Context::getDefault(&error);
6656 if (error != CL_SUCCESS) {
6657 default_error_ = error;
6660 Device device = Device::getDefault();
6661 default_ = CommandQueue(context, device, 0, &default_error_);
6664 #if defined(CL_HPP_ENABLE_EXCEPTIONS)
6665 catch (cl::Error &e) {
6666 default_error_ = e.err();
6671 /*! \brief Create the default command queue.
6673 * This sets @c default_. It does not throw
6676 static void makeDefaultProvided(const CommandQueue &c) {
6681 #ifdef CL_HPP_UNIT_TEST_ENABLE
6682 /*! \brief Reset the default.
6684 * This sets @c default_ to an empty value to support cleanup in
6685 * the unit test framework.
6686 * This function is not thread safe.
6688 static void unitTestClearDefault() {
6689 default_ = CommandQueue();
6691 #endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
6695 * \brief Constructs a CommandQueue based on passed properties.
6696 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6699 cl_command_queue_properties properties,
6704 Context context = Context::getDefault(&error);
6705 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6707 if (error != CL_SUCCESS) {
6713 Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
6714 bool useWithProperties;
6716 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6717 // Run-time decision based on the actual platform
6719 cl_uint version = detail::getContextPlatformVersion(context());
6720 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6722 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6723 useWithProperties = true;
6725 useWithProperties = false;
6728 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6729 if (useWithProperties) {
6730 cl_queue_properties queue_properties[] = {
6731 CL_QUEUE_PROPERTIES, properties, 0 };
6732 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
6733 object_ = ::clCreateCommandQueueWithProperties(
6734 context(), device(), queue_properties, &error);
6737 error = CL_INVALID_QUEUE_PROPERTIES;
6740 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6745 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6746 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6747 if (!useWithProperties) {
6748 object_ = ::clCreateCommandQueue(
6749 context(), device(), properties, &error);
6751 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6756 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6761 * \brief Constructs a CommandQueue based on passed properties.
6762 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6765 QueueProperties properties,
6770 Context context = Context::getDefault(&error);
6771 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6773 if (error != CL_SUCCESS) {
6779 Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
6780 bool useWithProperties;
6782 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6783 // Run-time decision based on the actual platform
6785 cl_uint version = detail::getContextPlatformVersion(context());
6786 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6788 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6789 useWithProperties = true;
6791 useWithProperties = false;
6794 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6795 if (useWithProperties) {
6796 cl_queue_properties queue_properties[] = {
6797 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6799 object_ = ::clCreateCommandQueueWithProperties(
6800 context(), device(), queue_properties, &error);
6802 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6807 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6808 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6809 if (!useWithProperties) {
6810 object_ = ::clCreateCommandQueue(
6811 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
6813 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6818 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6824 * \brief Constructs a CommandQueue for an implementation defined device in the given context
6825 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6827 explicit CommandQueue(
6828 const Context& context,
6829 cl_command_queue_properties properties = 0,
6833 bool useWithProperties;
6834 vector<cl::Device> devices;
6835 error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
6837 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6839 if (error != CL_SUCCESS)
6847 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6848 // Run-time decision based on the actual platform
6850 cl_uint version = detail::getContextPlatformVersion(context());
6851 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6853 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6854 useWithProperties = true;
6856 useWithProperties = false;
6859 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6860 if (useWithProperties) {
6861 cl_queue_properties queue_properties[] = {
6862 CL_QUEUE_PROPERTIES, properties, 0 };
6863 if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
6864 object_ = ::clCreateCommandQueueWithProperties(
6865 context(), devices[0](), queue_properties, &error);
6868 error = CL_INVALID_QUEUE_PROPERTIES;
6871 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6876 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6877 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6878 if (!useWithProperties) {
6879 object_ = ::clCreateCommandQueue(
6880 context(), devices[0](), properties, &error);
6882 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6887 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6891 * \brief Constructs a CommandQueue for an implementation defined device in the given context
6892 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6894 explicit CommandQueue(
6895 const Context& context,
6896 QueueProperties properties,
6900 bool useWithProperties;
6901 vector<cl::Device> devices;
6902 error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
6904 detail::errHandler(error, __CREATE_CONTEXT_ERR);
6906 if (error != CL_SUCCESS)
6914 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6915 // Run-time decision based on the actual platform
6917 cl_uint version = detail::getContextPlatformVersion(context());
6918 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6920 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6921 useWithProperties = true;
6923 useWithProperties = false;
6926 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6927 if (useWithProperties) {
6928 cl_queue_properties queue_properties[] = {
6929 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
6930 object_ = ::clCreateCommandQueueWithProperties(
6931 context(), devices[0](), queue_properties, &error);
6933 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6938 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6939 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6940 if (!useWithProperties) {
6941 object_ = ::clCreateCommandQueue(
6942 context(), devices[0](), static_cast<cl_command_queue_properties>(properties), &error);
6944 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
6949 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
6953 * \brief Constructs a CommandQueue for a passed device and context
6954 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
6957 const Context& context,
6958 const Device& device,
6959 cl_command_queue_properties properties = 0,
6963 bool useWithProperties;
6965 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
6966 // Run-time decision based on the actual platform
6968 cl_uint version = detail::getContextPlatformVersion(context());
6969 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
6971 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
6972 useWithProperties = true;
6974 useWithProperties = false;
6977 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
6978 if (useWithProperties) {
6979 cl_queue_properties queue_properties[] = {
6980 CL_QUEUE_PROPERTIES, properties, 0 };
6981 object_ = ::clCreateCommandQueueWithProperties(
6982 context(), device(), queue_properties, &error);
6984 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
6989 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
6990 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
6991 if (!useWithProperties) {
6992 object_ = ::clCreateCommandQueue(
6993 context(), device(), properties, &error);
6995 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7000 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7004 * \brief Constructs a CommandQueue for a passed device and context
7005 * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
7008 const Context& context,
7009 const Device& device,
7010 QueueProperties properties,
7014 bool useWithProperties;
7016 #if CL_HPP_TARGET_OPENCL_VERSION >= 200 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
7017 // Run-time decision based on the actual platform
7019 cl_uint version = detail::getContextPlatformVersion(context());
7020 useWithProperties = (version >= 0x20000); // OpenCL 2.0 or above
7022 #elif CL_HPP_TARGET_OPENCL_VERSION >= 200
7023 useWithProperties = true;
7025 useWithProperties = false;
7028 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7029 if (useWithProperties) {
7030 cl_queue_properties queue_properties[] = {
7031 CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
7032 object_ = ::clCreateCommandQueueWithProperties(
7033 context(), device(), queue_properties, &error);
7035 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7040 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7041 #if CL_HPP_MINIMUM_OPENCL_VERSION < 200
7042 if (!useWithProperties) {
7043 object_ = ::clCreateCommandQueue(
7044 context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
7046 detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
7051 #endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
7054 static CommandQueue getDefault(cl_int * err = NULL)
7056 std::call_once(default_initialized_, makeDefault);
7057 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7058 detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
7059 #else // CL_HPP_TARGET_OPENCL_VERSION >= 200
7060 detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR);
7061 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
7063 *err = default_error_;
7069 * Modify the default command queue to be used by
7070 * subsequent operations.
7071 * Will only set the default if no default was previously created.
7072 * @return updated default command queue.
7073 * Should be compared to the passed value to ensure that it was updated.
7075 static CommandQueue setDefault(const CommandQueue &default_queue)
7077 std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue));
7078 detail::errHandler(default_error_);
7085 /*! \brief Constructor from cl_mem - takes ownership.
7087 * \param retainObject will cause the constructor to retain its cl object.
7088 * Defaults to false to maintain compatibility with
7091 explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
7092 detail::Wrapper<cl_type>(commandQueue, retainObject) { }
7094 CommandQueue& operator = (const cl_command_queue& rhs)
7096 detail::Wrapper<cl_type>::operator=(rhs);
7100 /*! \brief Copy constructor to forward copy to the superclass correctly.
7101 * Required for MSVC.
7103 CommandQueue(const CommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
7105 /*! \brief Copy assignment to forward copy to the superclass correctly.
7106 * Required for MSVC.
7108 CommandQueue& operator = (const CommandQueue &queue)
7110 detail::Wrapper<cl_type>::operator=(queue);
7114 /*! \brief Move constructor to forward move to the superclass correctly.
7115 * Required for MSVC.
7117 CommandQueue(CommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
7119 /*! \brief Move assignment to forward move to the superclass correctly.
7120 * Required for MSVC.
7122 CommandQueue& operator = (CommandQueue &&queue)
7124 detail::Wrapper<cl_type>::operator=(std::move(queue));
7128 template <typename T>
7129 cl_int getInfo(cl_command_queue_info name, T* param) const
7131 return detail::errHandler(
7133 &::clGetCommandQueueInfo, object_, name, param),
7134 __GET_COMMAND_QUEUE_INFO_ERR);
7137 template <cl_int name> typename
7138 detail::param_traits<detail::cl_command_queue_info, name>::param_type
7139 getInfo(cl_int* err = NULL) const
7141 typename detail::param_traits<
7142 detail::cl_command_queue_info, name>::param_type param;
7143 cl_int result = getInfo(name, ¶m);
7150 cl_int enqueueReadBuffer(
7151 const Buffer& buffer,
7156 const vector<Event>* events = NULL,
7157 Event* event = NULL) const
7160 cl_int err = detail::errHandler(
7161 ::clEnqueueReadBuffer(
7162 object_, buffer(), blocking, offset, size,
7164 (events != NULL) ? (cl_uint) events->size() : 0,
7165 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7166 (event != NULL) ? &tmp : NULL),
7167 __ENQUEUE_READ_BUFFER_ERR);
7169 if (event != NULL && err == CL_SUCCESS)
7175 cl_int enqueueWriteBuffer(
7176 const Buffer& buffer,
7181 const vector<Event>* events = NULL,
7182 Event* event = NULL) const
7185 cl_int err = detail::errHandler(
7186 ::clEnqueueWriteBuffer(
7187 object_, buffer(), blocking, offset, size,
7189 (events != NULL) ? (cl_uint) events->size() : 0,
7190 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7191 (event != NULL) ? &tmp : NULL),
7192 __ENQUEUE_WRITE_BUFFER_ERR);
7194 if (event != NULL && err == CL_SUCCESS)
7200 cl_int enqueueCopyBuffer(
7203 size_type src_offset,
7204 size_type dst_offset,
7206 const vector<Event>* events = NULL,
7207 Event* event = NULL) const
7210 cl_int err = detail::errHandler(
7211 ::clEnqueueCopyBuffer(
7212 object_, src(), dst(), src_offset, dst_offset, size,
7213 (events != NULL) ? (cl_uint) events->size() : 0,
7214 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7215 (event != NULL) ? &tmp : NULL),
7216 __ENQEUE_COPY_BUFFER_ERR);
7218 if (event != NULL && err == CL_SUCCESS)
7223 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
7224 cl_int enqueueReadBufferRect(
7225 const Buffer& buffer,
7227 const array<size_type, 3>& buffer_offset,
7228 const array<size_type, 3>& host_offset,
7229 const array<size_type, 3>& region,
7230 size_type buffer_row_pitch,
7231 size_type buffer_slice_pitch,
7232 size_type host_row_pitch,
7233 size_type host_slice_pitch,
7235 const vector<Event>* events = NULL,
7236 Event* event = NULL) const
7239 cl_int err = detail::errHandler(
7240 ::clEnqueueReadBufferRect(
7244 buffer_offset.data(),
7252 (events != NULL) ? (cl_uint) events->size() : 0,
7253 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7254 (event != NULL) ? &tmp : NULL),
7255 __ENQUEUE_READ_BUFFER_RECT_ERR);
7257 if (event != NULL && err == CL_SUCCESS)
7263 cl_int enqueueWriteBufferRect(
7264 const Buffer& buffer,
7266 const array<size_type, 3>& buffer_offset,
7267 const array<size_type, 3>& host_offset,
7268 const array<size_type, 3>& region,
7269 size_type buffer_row_pitch,
7270 size_type buffer_slice_pitch,
7271 size_type host_row_pitch,
7272 size_type host_slice_pitch,
7274 const vector<Event>* events = NULL,
7275 Event* event = NULL) const
7278 cl_int err = detail::errHandler(
7279 ::clEnqueueWriteBufferRect(
7283 buffer_offset.data(),
7291 (events != NULL) ? (cl_uint) events->size() : 0,
7292 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7293 (event != NULL) ? &tmp : NULL),
7294 __ENQUEUE_WRITE_BUFFER_RECT_ERR);
7296 if (event != NULL && err == CL_SUCCESS)
7302 cl_int enqueueCopyBufferRect(
7305 const array<size_type, 3>& src_origin,
7306 const array<size_type, 3>& dst_origin,
7307 const array<size_type, 3>& region,
7308 size_type src_row_pitch,
7309 size_type src_slice_pitch,
7310 size_type dst_row_pitch,
7311 size_type dst_slice_pitch,
7312 const vector<Event>* events = NULL,
7313 Event* event = NULL) const
7316 cl_int err = detail::errHandler(
7317 ::clEnqueueCopyBufferRect(
7328 (events != NULL) ? (cl_uint) events->size() : 0,
7329 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7330 (event != NULL) ? &tmp : NULL),
7331 __ENQEUE_COPY_BUFFER_RECT_ERR);
7333 if (event != NULL && err == CL_SUCCESS)
7338 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
7339 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7341 * Enqueue a command to fill a buffer object with a pattern
7342 * of a given size. The pattern is specified as a vector type.
7343 * \tparam PatternType The datatype of the pattern field.
7344 * The pattern type must be an accepted OpenCL data type.
7345 * \tparam offset Is the offset in bytes into the buffer at
7346 * which to start filling. This must be a multiple of
7348 * \tparam size Is the size in bytes of the region to fill.
7349 * This must be a multiple of the pattern size.
7351 template<typename PatternType>
7352 cl_int enqueueFillBuffer(
7353 const Buffer& buffer,
7354 PatternType pattern,
7357 const vector<Event>* events = NULL,
7358 Event* event = NULL) const
7361 cl_int err = detail::errHandler(
7362 ::clEnqueueFillBuffer(
7365 static_cast<void*>(&pattern),
7366 sizeof(PatternType),
7369 (events != NULL) ? (cl_uint) events->size() : 0,
7370 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7371 (event != NULL) ? &tmp : NULL),
7372 __ENQUEUE_FILL_BUFFER_ERR);
7374 if (event != NULL && err == CL_SUCCESS)
7379 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7381 cl_int enqueueReadImage(
7384 const array<size_type, 3>& origin,
7385 const array<size_type, 3>& region,
7386 size_type row_pitch,
7387 size_type slice_pitch,
7389 const vector<Event>* events = NULL,
7390 Event* event = NULL) const
7393 cl_int err = detail::errHandler(
7394 ::clEnqueueReadImage(
7403 (events != NULL) ? (cl_uint) events->size() : 0,
7404 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7405 (event != NULL) ? &tmp : NULL),
7406 __ENQUEUE_READ_IMAGE_ERR);
7408 if (event != NULL && err == CL_SUCCESS)
7414 cl_int enqueueWriteImage(
7417 const array<size_type, 3>& origin,
7418 const array<size_type, 3>& region,
7419 size_type row_pitch,
7420 size_type slice_pitch,
7422 const vector<Event>* events = NULL,
7423 Event* event = NULL) const
7426 cl_int err = detail::errHandler(
7427 ::clEnqueueWriteImage(
7436 (events != NULL) ? (cl_uint) events->size() : 0,
7437 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7438 (event != NULL) ? &tmp : NULL),
7439 __ENQUEUE_WRITE_IMAGE_ERR);
7441 if (event != NULL && err == CL_SUCCESS)
7447 cl_int enqueueCopyImage(
7450 const array<size_type, 3>& src_origin,
7451 const array<size_type, 3>& dst_origin,
7452 const array<size_type, 3>& region,
7453 const vector<Event>* events = NULL,
7454 Event* event = NULL) const
7457 cl_int err = detail::errHandler(
7458 ::clEnqueueCopyImage(
7465 (events != NULL) ? (cl_uint) events->size() : 0,
7466 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7467 (event != NULL) ? &tmp : NULL),
7468 __ENQUEUE_COPY_IMAGE_ERR);
7470 if (event != NULL && err == CL_SUCCESS)
7476 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7478 * Enqueue a command to fill an image object with a specified color.
7479 * \param fillColor is the color to use to fill the image.
7480 * This is a four component RGBA floating-point color value if
7481 * the image channel data type is not an unnormalized signed or
7482 * unsigned data type.
7484 cl_int enqueueFillImage(
7486 cl_float4 fillColor,
7487 const array<size_type, 3>& origin,
7488 const array<size_type, 3>& region,
7489 const vector<Event>* events = NULL,
7490 Event* event = NULL) const
7493 cl_int err = detail::errHandler(
7494 ::clEnqueueFillImage(
7497 static_cast<void*>(&fillColor),
7500 (events != NULL) ? (cl_uint) events->size() : 0,
7501 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7502 (event != NULL) ? &tmp : NULL),
7503 __ENQUEUE_FILL_IMAGE_ERR);
7505 if (event != NULL && err == CL_SUCCESS)
7512 * Enqueue a command to fill an image object with a specified color.
7513 * \param fillColor is the color to use to fill the image.
7514 * This is a four component RGBA signed integer color value if
7515 * the image channel data type is an unnormalized signed integer
7518 cl_int enqueueFillImage(
7521 const array<size_type, 3>& origin,
7522 const array<size_type, 3>& region,
7523 const vector<Event>* events = NULL,
7524 Event* event = NULL) const
7527 cl_int err = detail::errHandler(
7528 ::clEnqueueFillImage(
7531 static_cast<void*>(&fillColor),
7534 (events != NULL) ? (cl_uint) events->size() : 0,
7535 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7536 (event != NULL) ? &tmp : NULL),
7537 __ENQUEUE_FILL_IMAGE_ERR);
7539 if (event != NULL && err == CL_SUCCESS)
7546 * Enqueue a command to fill an image object with a specified color.
7547 * \param fillColor is the color to use to fill the image.
7548 * This is a four component RGBA unsigned integer color value if
7549 * the image channel data type is an unnormalized unsigned integer
7552 cl_int enqueueFillImage(
7555 const array<size_type, 3>& origin,
7556 const array<size_type, 3>& region,
7557 const vector<Event>* events = NULL,
7558 Event* event = NULL) const
7561 cl_int err = detail::errHandler(
7562 ::clEnqueueFillImage(
7565 static_cast<void*>(&fillColor),
7568 (events != NULL) ? (cl_uint) events->size() : 0,
7569 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7570 (event != NULL) ? &tmp : NULL),
7571 __ENQUEUE_FILL_IMAGE_ERR);
7573 if (event != NULL && err == CL_SUCCESS)
7578 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7580 cl_int enqueueCopyImageToBuffer(
7583 const array<size_type, 3>& src_origin,
7584 const array<size_type, 3>& region,
7585 size_type dst_offset,
7586 const vector<Event>* events = NULL,
7587 Event* event = NULL) const
7590 cl_int err = detail::errHandler(
7591 ::clEnqueueCopyImageToBuffer(
7598 (events != NULL) ? (cl_uint) events->size() : 0,
7599 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7600 (event != NULL) ? &tmp : NULL),
7601 __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
7603 if (event != NULL && err == CL_SUCCESS)
7609 cl_int enqueueCopyBufferToImage(
7612 size_type src_offset,
7613 const array<size_type, 3>& dst_origin,
7614 const array<size_type, 3>& region,
7615 const vector<Event>* events = NULL,
7616 Event* event = NULL) const
7619 cl_int err = detail::errHandler(
7620 ::clEnqueueCopyBufferToImage(
7627 (events != NULL) ? (cl_uint) events->size() : 0,
7628 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7629 (event != NULL) ? &tmp : NULL),
7630 __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
7632 if (event != NULL && err == CL_SUCCESS)
7638 void* enqueueMapBuffer(
7639 const Buffer& buffer,
7644 const vector<Event>* events = NULL,
7645 Event* event = NULL,
7646 cl_int* err = NULL) const
7650 void * result = ::clEnqueueMapBuffer(
7651 object_, buffer(), blocking, flags, offset, size,
7652 (events != NULL) ? (cl_uint) events->size() : 0,
7653 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7654 (event != NULL) ? &tmp : NULL,
7657 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
7661 if (event != NULL && error == CL_SUCCESS)
7667 void* enqueueMapImage(
7668 const Image& buffer,
7671 const array<size_type, 3>& origin,
7672 const array<size_type, 3>& region,
7673 size_type * row_pitch,
7674 size_type * slice_pitch,
7675 const vector<Event>* events = NULL,
7676 Event* event = NULL,
7677 cl_int* err = NULL) const
7681 void * result = ::clEnqueueMapImage(
7682 object_, buffer(), blocking, flags,
7685 row_pitch, slice_pitch,
7686 (events != NULL) ? (cl_uint) events->size() : 0,
7687 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7688 (event != NULL) ? &tmp : NULL,
7691 detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
7695 if (event != NULL && error == CL_SUCCESS)
7700 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7702 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
7703 * This variant takes a raw SVM pointer.
7705 template<typename T>
7706 cl_int enqueueMapSVM(
7711 const vector<Event>* events = NULL,
7712 Event* event = NULL) const
7715 cl_int err = detail::errHandler(::clEnqueueSVMMap(
7716 object_, blocking, flags, static_cast<void*>(ptr), size,
7717 (events != NULL) ? (cl_uint)events->size() : 0,
7718 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7719 (event != NULL) ? &tmp : NULL),
7720 __ENQUEUE_MAP_BUFFER_ERR);
7722 if (event != NULL && err == CL_SUCCESS)
7730 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
7731 * This variant takes a cl::pointer instance.
7733 template<typename T, class D>
7734 cl_int enqueueMapSVM(
7735 cl::pointer<T, D> &ptr,
7739 const vector<Event>* events = NULL,
7740 Event* event = NULL) const
7743 cl_int err = detail::errHandler(::clEnqueueSVMMap(
7744 object_, blocking, flags, static_cast<void*>(ptr.get()), size,
7745 (events != NULL) ? (cl_uint)events->size() : 0,
7746 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7747 (event != NULL) ? &tmp : NULL),
7748 __ENQUEUE_MAP_BUFFER_ERR);
7750 if (event != NULL && err == CL_SUCCESS)
7757 * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
7758 * This variant takes a cl::vector instance.
7760 template<typename T, class Alloc>
7761 cl_int enqueueMapSVM(
7762 cl::vector<T, Alloc> &container,
7765 const vector<Event>* events = NULL,
7766 Event* event = NULL) const
7769 cl_int err = detail::errHandler(::clEnqueueSVMMap(
7770 object_, blocking, flags, static_cast<void*>(container.data()), container.size(),
7771 (events != NULL) ? (cl_uint)events->size() : 0,
7772 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7773 (event != NULL) ? &tmp : NULL),
7774 __ENQUEUE_MAP_BUFFER_ERR);
7776 if (event != NULL && err == CL_SUCCESS)
7781 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7783 cl_int enqueueUnmapMemObject(
7784 const Memory& memory,
7786 const vector<Event>* events = NULL,
7787 Event* event = NULL) const
7790 cl_int err = detail::errHandler(
7791 ::clEnqueueUnmapMemObject(
7792 object_, memory(), mapped_ptr,
7793 (events != NULL) ? (cl_uint) events->size() : 0,
7794 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7795 (event != NULL) ? &tmp : NULL),
7796 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7798 if (event != NULL && err == CL_SUCCESS)
7805 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7807 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
7808 * This variant takes a raw SVM pointer.
7810 template<typename T>
7811 cl_int enqueueUnmapSVM(
7813 const vector<Event>* events = NULL,
7814 Event* event = NULL) const
7817 cl_int err = detail::errHandler(
7818 ::clEnqueueSVMUnmap(
7819 object_, static_cast<void*>(ptr),
7820 (events != NULL) ? (cl_uint)events->size() : 0,
7821 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7822 (event != NULL) ? &tmp : NULL),
7823 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7825 if (event != NULL && err == CL_SUCCESS)
7832 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
7833 * This variant takes a cl::pointer instance.
7835 template<typename T, class D>
7836 cl_int enqueueUnmapSVM(
7837 cl::pointer<T, D> &ptr,
7838 const vector<Event>* events = NULL,
7839 Event* event = NULL) const
7842 cl_int err = detail::errHandler(
7843 ::clEnqueueSVMUnmap(
7844 object_, static_cast<void*>(ptr.get()),
7845 (events != NULL) ? (cl_uint)events->size() : 0,
7846 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7847 (event != NULL) ? &tmp : NULL),
7848 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7850 if (event != NULL && err == CL_SUCCESS)
7857 * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
7858 * This variant takes a cl::vector instance.
7860 template<typename T, class Alloc>
7861 cl_int enqueueUnmapSVM(
7862 cl::vector<T, Alloc> &container,
7863 const vector<Event>* events = NULL,
7864 Event* event = NULL) const
7867 cl_int err = detail::errHandler(
7868 ::clEnqueueSVMUnmap(
7869 object_, static_cast<void*>(container.data()),
7870 (events != NULL) ? (cl_uint)events->size() : 0,
7871 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
7872 (event != NULL) ? &tmp : NULL),
7873 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7875 if (event != NULL && err == CL_SUCCESS)
7880 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
7882 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
7884 * Enqueues a marker command which waits for either a list of events to complete,
7885 * or all previously enqueued commands to complete.
7887 * Enqueues a marker command which waits for either a list of events to complete,
7888 * or if the list is empty it waits for all commands previously enqueued in command_queue
7889 * to complete before it completes. This command returns an event which can be waited on,
7890 * i.e. this event can be waited on to insure that all events either in the event_wait_list
7891 * or all previously enqueued commands, queued before this command to command_queue,
7894 cl_int enqueueMarkerWithWaitList(
7895 const vector<Event> *events = 0,
7896 Event *event = 0) const
7899 cl_int err = detail::errHandler(
7900 ::clEnqueueMarkerWithWaitList(
7902 (events != NULL) ? (cl_uint) events->size() : 0,
7903 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7904 (event != NULL) ? &tmp : NULL),
7905 __ENQUEUE_MARKER_WAIT_LIST_ERR);
7907 if (event != NULL && err == CL_SUCCESS)
7914 * A synchronization point that enqueues a barrier operation.
7916 * Enqueues a barrier command which waits for either a list of events to complete,
7917 * or if the list is empty it waits for all commands previously enqueued in command_queue
7918 * to complete before it completes. This command blocks command execution, that is, any
7919 * following commands enqueued after it do not execute until it completes. This command
7920 * returns an event which can be waited on, i.e. this event can be waited on to insure that
7921 * all events either in the event_wait_list or all previously enqueued commands, queued
7922 * before this command to command_queue, have completed.
7924 cl_int enqueueBarrierWithWaitList(
7925 const vector<Event> *events = 0,
7926 Event *event = 0) const
7929 cl_int err = detail::errHandler(
7930 ::clEnqueueBarrierWithWaitList(
7932 (events != NULL) ? (cl_uint) events->size() : 0,
7933 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7934 (event != NULL) ? &tmp : NULL),
7935 __ENQUEUE_BARRIER_WAIT_LIST_ERR);
7937 if (event != NULL && err == CL_SUCCESS)
7944 * Enqueues a command to indicate with which device a set of memory objects
7945 * should be associated.
7947 cl_int enqueueMigrateMemObjects(
7948 const vector<Memory> &memObjects,
7949 cl_mem_migration_flags flags,
7950 const vector<Event>* events = NULL,
7956 vector<cl_mem> localMemObjects(memObjects.size());
7958 for( int i = 0; i < (int)memObjects.size(); ++i ) {
7959 localMemObjects[i] = memObjects[i]();
7963 cl_int err = detail::errHandler(
7964 ::clEnqueueMigrateMemObjects(
7966 (cl_uint)memObjects.size(),
7967 localMemObjects.data(),
7969 (events != NULL) ? (cl_uint) events->size() : 0,
7970 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7971 (event != NULL) ? &tmp : NULL),
7972 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
7974 if (event != NULL && err == CL_SUCCESS)
7979 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
7981 cl_int enqueueNDRangeKernel(
7982 const Kernel& kernel,
7983 const NDRange& offset,
7984 const NDRange& global,
7985 const NDRange& local = NullRange,
7986 const vector<Event>* events = NULL,
7987 Event* event = NULL) const
7990 cl_int err = detail::errHandler(
7991 ::clEnqueueNDRangeKernel(
7992 object_, kernel(), (cl_uint) global.dimensions(),
7993 offset.dimensions() != 0 ? (const size_type*) offset : NULL,
7994 (const size_type*) global,
7995 local.dimensions() != 0 ? (const size_type*) local : NULL,
7996 (events != NULL) ? (cl_uint) events->size() : 0,
7997 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
7998 (event != NULL) ? &tmp : NULL),
7999 __ENQUEUE_NDRANGE_KERNEL_ERR);
8001 if (event != NULL && err == CL_SUCCESS)
8007 #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
8008 CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask(
8009 const Kernel& kernel,
8010 const vector<Event>* events = NULL,
8011 Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
8014 cl_int err = detail::errHandler(
8017 (events != NULL) ? (cl_uint) events->size() : 0,
8018 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8019 (event != NULL) ? &tmp : NULL),
8020 __ENQUEUE_TASK_ERR);
8022 if (event != NULL && err == CL_SUCCESS)
8027 #endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
8029 cl_int enqueueNativeKernel(
8030 void (CL_CALLBACK *userFptr)(void *),
8031 std::pair<void*, size_type> args,
8032 const vector<Memory>* mem_objects = NULL,
8033 const vector<const void*>* mem_locs = NULL,
8034 const vector<Event>* events = NULL,
8035 Event* event = NULL) const
8037 size_type elements = 0;
8038 if (mem_objects != NULL) {
8039 elements = mem_objects->size();
8041 vector<cl_mem> mems(elements);
8042 for (unsigned int i = 0; i < elements; i++) {
8043 mems[i] = ((*mem_objects)[i])();
8047 cl_int err = detail::errHandler(
8048 ::clEnqueueNativeKernel(
8049 object_, userFptr, args.first, args.second,
8050 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8052 (mem_locs != NULL && mem_locs->size() > 0) ? (const void **) &mem_locs->front() : NULL,
8053 (events != NULL) ? (cl_uint) events->size() : 0,
8054 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8055 (event != NULL) ? &tmp : NULL),
8056 __ENQUEUE_NATIVE_KERNEL);
8058 if (event != NULL && err == CL_SUCCESS)
8065 * Deprecated APIs for 1.2
8067 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8068 CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8069 cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8072 cl_int err = detail::errHandler(
8075 (event != NULL) ? &tmp : NULL),
8076 __ENQUEUE_MARKER_ERR);
8078 if (event != NULL && err == CL_SUCCESS)
8084 CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8085 cl_int enqueueWaitForEvents(const vector<Event>& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8087 return detail::errHandler(
8088 ::clEnqueueWaitForEvents(
8090 (cl_uint) events.size(),
8091 events.size() > 0 ? (const cl_event*) &events.front() : NULL),
8092 __ENQUEUE_WAIT_FOR_EVENTS_ERR);
8094 #endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8096 cl_int enqueueAcquireGLObjects(
8097 const vector<Memory>* mem_objects = NULL,
8098 const vector<Event>* events = NULL,
8099 Event* event = NULL) const
8102 cl_int err = detail::errHandler(
8103 ::clEnqueueAcquireGLObjects(
8105 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8106 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8107 (events != NULL) ? (cl_uint) events->size() : 0,
8108 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8109 (event != NULL) ? &tmp : NULL),
8110 __ENQUEUE_ACQUIRE_GL_ERR);
8112 if (event != NULL && err == CL_SUCCESS)
8118 cl_int enqueueReleaseGLObjects(
8119 const vector<Memory>* mem_objects = NULL,
8120 const vector<Event>* events = NULL,
8121 Event* event = NULL) const
8124 cl_int err = detail::errHandler(
8125 ::clEnqueueReleaseGLObjects(
8127 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8128 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8129 (events != NULL) ? (cl_uint) events->size() : 0,
8130 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8131 (event != NULL) ? &tmp : NULL),
8132 __ENQUEUE_RELEASE_GL_ERR);
8134 if (event != NULL && err == CL_SUCCESS)
8140 #if defined (CL_HPP_USE_DX_INTEROP)
8141 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
8142 cl_command_queue command_queue, cl_uint num_objects,
8143 const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
8144 const cl_event* event_wait_list, cl_event* event);
8145 typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
8146 cl_command_queue command_queue, cl_uint num_objects,
8147 const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
8148 const cl_event* event_wait_list, cl_event* event);
8150 cl_int enqueueAcquireD3D10Objects(
8151 const vector<Memory>* mem_objects = NULL,
8152 const vector<Event>* events = NULL,
8153 Event* event = NULL) const
8155 static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL;
8156 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8157 cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8158 cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8159 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8160 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR);
8162 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8163 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR);
8167 cl_int err = detail::errHandler(
8168 pfn_clEnqueueAcquireD3D10ObjectsKHR(
8170 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8171 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8172 (events != NULL) ? (cl_uint) events->size() : 0,
8173 (events != NULL) ? (cl_event*) &events->front() : NULL,
8174 (event != NULL) ? &tmp : NULL),
8175 __ENQUEUE_ACQUIRE_GL_ERR);
8177 if (event != NULL && err == CL_SUCCESS)
8183 cl_int enqueueReleaseD3D10Objects(
8184 const vector<Memory>* mem_objects = NULL,
8185 const vector<Event>* events = NULL,
8186 Event* event = NULL) const
8188 static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL;
8189 #if CL_HPP_TARGET_OPENCL_VERSION >= 120
8190 cl_context context = getInfo<CL_QUEUE_CONTEXT>();
8191 cl::Device device(getInfo<CL_QUEUE_DEVICE>());
8192 cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
8193 CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR);
8194 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
8195 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8196 CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR);
8197 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
8200 cl_int err = detail::errHandler(
8201 pfn_clEnqueueReleaseD3D10ObjectsKHR(
8203 (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
8204 (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
8205 (events != NULL) ? (cl_uint) events->size() : 0,
8206 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8207 (event != NULL) ? &tmp : NULL),
8208 __ENQUEUE_RELEASE_GL_ERR);
8210 if (event != NULL && err == CL_SUCCESS)
8218 * Deprecated APIs for 1.2
8220 #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
8221 CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
8222 cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
8224 return detail::errHandler(
8225 ::clEnqueueBarrier(object_),
8226 __ENQUEUE_BARRIER_ERR);
8228 #endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
8230 cl_int flush() const
8232 return detail::errHandler(::clFlush(object_), __FLUSH_ERR);
8235 cl_int finish() const
8237 return detail::errHandler(::clFinish(object_), __FINISH_ERR);
8241 CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_;
8242 CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_;
8243 CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS;
8246 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8247 enum class DeviceQueueProperties : cl_command_queue_properties
8250 Profiling = CL_QUEUE_PROFILING_ENABLE,
8253 inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs)
8255 return static_cast<DeviceQueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
8258 /*! \class DeviceCommandQueue
8259 * \brief DeviceCommandQueue interface for device cl_command_queues.
8261 class DeviceCommandQueue : public detail::Wrapper<cl_command_queue>
8266 * Trivial empty constructor to create a null queue.
8268 DeviceCommandQueue() { }
8271 * Default construct device command queue on default context and device
8273 DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = NULL)
8276 cl::Context context = cl::Context::getDefault();
8277 cl::Device device = cl::Device::getDefault();
8279 cl_command_queue_properties mergedProperties =
8280 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8282 cl_queue_properties queue_properties[] = {
8283 CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8284 object_ = ::clCreateCommandQueueWithProperties(
8285 context(), device(), queue_properties, &error);
8287 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8294 * Create a device command queue for a specified device in the passed context.
8297 const Context& context,
8298 const Device& device,
8299 DeviceQueueProperties properties = DeviceQueueProperties::None,
8304 cl_command_queue_properties mergedProperties =
8305 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8306 cl_queue_properties queue_properties[] = {
8307 CL_QUEUE_PROPERTIES, mergedProperties, 0 };
8308 object_ = ::clCreateCommandQueueWithProperties(
8309 context(), device(), queue_properties, &error);
8311 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8318 * Create a device command queue for a specified device in the passed context.
8321 const Context& context,
8322 const Device& device,
8324 DeviceQueueProperties properties = DeviceQueueProperties::None,
8329 cl_command_queue_properties mergedProperties =
8330 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
8331 cl_queue_properties queue_properties[] = {
8332 CL_QUEUE_PROPERTIES, mergedProperties,
8333 CL_QUEUE_SIZE, queueSize,
8335 object_ = ::clCreateCommandQueueWithProperties(
8336 context(), device(), queue_properties, &error);
8338 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8344 /*! \brief Constructor from cl_command_queue - takes ownership.
8346 * \param retainObject will cause the constructor to retain its cl object.
8347 * Defaults to false to maintain compatibility with
8350 explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
8351 detail::Wrapper<cl_type>(commandQueue, retainObject) { }
8353 DeviceCommandQueue& operator = (const cl_command_queue& rhs)
8355 detail::Wrapper<cl_type>::operator=(rhs);
8359 /*! \brief Copy constructor to forward copy to the superclass correctly.
8360 * Required for MSVC.
8362 DeviceCommandQueue(const DeviceCommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
8364 /*! \brief Copy assignment to forward copy to the superclass correctly.
8365 * Required for MSVC.
8367 DeviceCommandQueue& operator = (const DeviceCommandQueue &queue)
8369 detail::Wrapper<cl_type>::operator=(queue);
8373 /*! \brief Move constructor to forward move to the superclass correctly.
8374 * Required for MSVC.
8376 DeviceCommandQueue(DeviceCommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
8378 /*! \brief Move assignment to forward move to the superclass correctly.
8379 * Required for MSVC.
8381 DeviceCommandQueue& operator = (DeviceCommandQueue &&queue)
8383 detail::Wrapper<cl_type>::operator=(std::move(queue));
8387 template <typename T>
8388 cl_int getInfo(cl_command_queue_info name, T* param) const
8390 return detail::errHandler(
8392 &::clGetCommandQueueInfo, object_, name, param),
8393 __GET_COMMAND_QUEUE_INFO_ERR);
8396 template <cl_int name> typename
8397 detail::param_traits<detail::cl_command_queue_info, name>::param_type
8398 getInfo(cl_int* err = NULL) const
8400 typename detail::param_traits<
8401 detail::cl_command_queue_info, name>::param_type param;
8402 cl_int result = getInfo(name, ¶m);
8410 * Create a new default device command queue for the default device,
8411 * in the default context and of the default size.
8412 * If there is already a default queue for the specified device this
8413 * function will return the pre-existing queue.
8415 static DeviceCommandQueue makeDefault(
8416 cl_int *err = nullptr)
8419 cl::Context context = cl::Context::getDefault();
8420 cl::Device device = cl::Device::getDefault();
8422 cl_command_queue_properties properties =
8423 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8424 cl_queue_properties queue_properties[] = {
8425 CL_QUEUE_PROPERTIES, properties,
8427 DeviceCommandQueue deviceQueue(
8428 ::clCreateCommandQueueWithProperties(
8429 context(), device(), queue_properties, &error));
8431 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8440 * Create a new default device command queue for the specified device
8441 * and of the default size.
8442 * If there is already a default queue for the specified device this
8443 * function will return the pre-existing queue.
8445 static DeviceCommandQueue makeDefault(
8446 const Context &context, const Device &device, cl_int *err = nullptr)
8450 cl_command_queue_properties properties =
8451 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8452 cl_queue_properties queue_properties[] = {
8453 CL_QUEUE_PROPERTIES, properties,
8455 DeviceCommandQueue deviceQueue(
8456 ::clCreateCommandQueueWithProperties(
8457 context(), device(), queue_properties, &error));
8459 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8468 * Create a new default device command queue for the specified device
8469 * and of the requested size in bytes.
8470 * If there is already a default queue for the specified device this
8471 * function will return the pre-existing queue.
8473 static DeviceCommandQueue makeDefault(
8474 const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr)
8478 cl_command_queue_properties properties =
8479 CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
8480 cl_queue_properties queue_properties[] = {
8481 CL_QUEUE_PROPERTIES, properties,
8482 CL_QUEUE_SIZE, queueSize,
8484 DeviceCommandQueue deviceQueue(
8485 ::clCreateCommandQueueWithProperties(
8486 context(), device(), queue_properties, &error));
8488 detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
8495 }; // DeviceCommandQueue
8499 // Specialization for device command queue
8501 struct KernelArgumentHandler<cl::DeviceCommandQueue, void>
8503 static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); }
8504 static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); }
8506 } // namespace detail
8508 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8511 template< typename IteratorType >
8513 const Context &context,
8514 IteratorType startIterator,
8515 IteratorType endIterator,
8520 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8523 cl_mem_flags flags = 0;
8525 flags |= CL_MEM_READ_ONLY;
8528 flags |= CL_MEM_READ_WRITE;
8531 flags |= CL_MEM_USE_HOST_PTR;
8534 size_type size = sizeof(DataType)*(endIterator - startIterator);
8537 object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
8539 object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
8542 detail::errHandler(error, __CREATE_BUFFER_ERR);
8548 CommandQueue queue(context, 0, &error);
8549 detail::errHandler(error, __CREATE_BUFFER_ERR);
8554 error = cl::copy(queue, startIterator, endIterator, *this);
8555 detail::errHandler(error, __CREATE_BUFFER_ERR);
8562 template< typename IteratorType >
8564 const CommandQueue &queue,
8565 IteratorType startIterator,
8566 IteratorType endIterator,
8571 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8574 cl_mem_flags flags = 0;
8576 flags |= CL_MEM_READ_ONLY;
8579 flags |= CL_MEM_READ_WRITE;
8582 flags |= CL_MEM_USE_HOST_PTR;
8585 size_type size = sizeof(DataType)*(endIterator - startIterator);
8587 Context context = queue.getInfo<CL_QUEUE_CONTEXT>();
8590 object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
8593 object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
8596 detail::errHandler(error, __CREATE_BUFFER_ERR);
8602 error = cl::copy(queue, startIterator, endIterator, *this);
8603 detail::errHandler(error, __CREATE_BUFFER_ERR);
8610 inline cl_int enqueueReadBuffer(
8611 const Buffer& buffer,
8616 const vector<Event>* events = NULL,
8617 Event* event = NULL)
8620 CommandQueue queue = CommandQueue::getDefault(&error);
8622 if (error != CL_SUCCESS) {
8626 return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
8629 inline cl_int enqueueWriteBuffer(
8630 const Buffer& buffer,
8635 const vector<Event>* events = NULL,
8636 Event* event = NULL)
8639 CommandQueue queue = CommandQueue::getDefault(&error);
8641 if (error != CL_SUCCESS) {
8645 return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
8648 inline void* enqueueMapBuffer(
8649 const Buffer& buffer,
8654 const vector<Event>* events = NULL,
8655 Event* event = NULL,
8659 CommandQueue queue = CommandQueue::getDefault(&error);
8660 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8665 void * result = ::clEnqueueMapBuffer(
8666 queue(), buffer(), blocking, flags, offset, size,
8667 (events != NULL) ? (cl_uint) events->size() : 0,
8668 (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
8672 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8680 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8682 * Enqueues to the default queue a command that will allow the host to
8683 * update a region of a coarse-grained SVM buffer.
8684 * This variant takes a raw SVM pointer.
8686 template<typename T>
8687 inline cl_int enqueueMapSVM(
8692 const vector<Event>* events,
8696 CommandQueue queue = CommandQueue::getDefault(&error);
8697 if (error != CL_SUCCESS) {
8698 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8701 return queue.enqueueMapSVM(
8702 ptr, blocking, flags, size, events, event);
8706 * Enqueues to the default queue a command that will allow the host to
8707 * update a region of a coarse-grained SVM buffer.
8708 * This variant takes a cl::pointer instance.
8710 template<typename T, class D>
8711 inline cl_int enqueueMapSVM(
8712 cl::pointer<T, D> ptr,
8716 const vector<Event>* events = NULL,
8717 Event* event = NULL)
8720 CommandQueue queue = CommandQueue::getDefault(&error);
8721 if (error != CL_SUCCESS) {
8722 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8725 return queue.enqueueMapSVM(
8726 ptr, blocking, flags, size, events, event);
8730 * Enqueues to the default queue a command that will allow the host to
8731 * update a region of a coarse-grained SVM buffer.
8732 * This variant takes a cl::vector instance.
8734 template<typename T, class Alloc>
8735 inline cl_int enqueueMapSVM(
8736 cl::vector<T, Alloc> container,
8739 const vector<Event>* events = NULL,
8740 Event* event = NULL)
8743 CommandQueue queue = CommandQueue::getDefault(&error);
8744 if (error != CL_SUCCESS) {
8745 return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8748 return queue.enqueueMapSVM(
8749 container, blocking, flags, events, event);
8752 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8754 inline cl_int enqueueUnmapMemObject(
8755 const Memory& memory,
8757 const vector<Event>* events = NULL,
8758 Event* event = NULL)
8761 CommandQueue queue = CommandQueue::getDefault(&error);
8762 detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
8763 if (error != CL_SUCCESS) {
8768 cl_int err = detail::errHandler(
8769 ::clEnqueueUnmapMemObject(
8770 queue(), memory(), mapped_ptr,
8771 (events != NULL) ? (cl_uint)events->size() : 0,
8772 (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
8773 (event != NULL) ? &tmp : NULL),
8774 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8776 if (event != NULL && err == CL_SUCCESS)
8782 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8784 * Enqueues to the default queue a command that will release a coarse-grained
8785 * SVM buffer back to the OpenCL runtime.
8786 * This variant takes a raw SVM pointer.
8788 template<typename T>
8789 inline cl_int enqueueUnmapSVM(
8791 const vector<Event>* events = NULL,
8792 Event* event = NULL)
8795 CommandQueue queue = CommandQueue::getDefault(&error);
8796 if (error != CL_SUCCESS) {
8797 return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8800 return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
8801 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8806 * Enqueues to the default queue a command that will release a coarse-grained
8807 * SVM buffer back to the OpenCL runtime.
8808 * This variant takes a cl::pointer instance.
8810 template<typename T, class D>
8811 inline cl_int enqueueUnmapSVM(
8812 cl::pointer<T, D> &ptr,
8813 const vector<Event>* events = NULL,
8814 Event* event = NULL)
8817 CommandQueue queue = CommandQueue::getDefault(&error);
8818 if (error != CL_SUCCESS) {
8819 return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8822 return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
8823 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8827 * Enqueues to the default queue a command that will release a coarse-grained
8828 * SVM buffer back to the OpenCL runtime.
8829 * This variant takes a cl::vector instance.
8831 template<typename T, class Alloc>
8832 inline cl_int enqueueUnmapSVM(
8833 cl::vector<T, Alloc> &container,
8834 const vector<Event>* events = NULL,
8835 Event* event = NULL)
8838 CommandQueue queue = CommandQueue::getDefault(&error);
8839 if (error != CL_SUCCESS) {
8840 return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8843 return detail::errHandler(queue.enqueueUnmapSVM(container, events, event),
8844 __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
8847 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8849 inline cl_int enqueueCopyBuffer(
8852 size_type src_offset,
8853 size_type dst_offset,
8855 const vector<Event>* events = NULL,
8856 Event* event = NULL)
8859 CommandQueue queue = CommandQueue::getDefault(&error);
8861 if (error != CL_SUCCESS) {
8865 return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
8869 * Blocking copy operation between iterators and a buffer.
8871 * Uses default command queue.
8873 template< typename IteratorType >
8874 inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
8877 CommandQueue queue = CommandQueue::getDefault(&error);
8878 if (error != CL_SUCCESS)
8881 return cl::copy(queue, startIterator, endIterator, buffer);
8885 * Blocking copy operation between iterators and a buffer.
8887 * Uses default command queue.
8889 template< typename IteratorType >
8890 inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
8893 CommandQueue queue = CommandQueue::getDefault(&error);
8894 if (error != CL_SUCCESS)
8897 return cl::copy(queue, buffer, startIterator, endIterator);
8901 * Blocking copy operation between iterators and a buffer.
8903 * Uses specified queue.
8905 template< typename IteratorType >
8906 inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
8908 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8911 size_type length = endIterator-startIterator;
8912 size_type byteLength = length*sizeof(DataType);
8915 static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
8916 // if exceptions enabled, enqueueMapBuffer will throw
8917 if( error != CL_SUCCESS ) {
8920 #if defined(_MSC_VER)
8924 stdext::checked_array_iterator<DataType*>(
8927 std::copy(startIterator, endIterator, pointer);
8930 error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
8931 // if exceptions enabled, enqueueUnmapMemObject will throw
8932 if( error != CL_SUCCESS ) {
8940 * Blocking copy operation between iterators and a buffer.
8942 * Uses specified queue.
8944 template< typename IteratorType >
8945 inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
8947 typedef typename std::iterator_traits<IteratorType>::value_type DataType;
8950 size_type length = endIterator-startIterator;
8951 size_type byteLength = length*sizeof(DataType);
8954 static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
8955 // if exceptions enabled, enqueueMapBuffer will throw
8956 if( error != CL_SUCCESS ) {
8959 std::copy(pointer, pointer + length, startIterator);
8961 error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
8962 // if exceptions enabled, enqueueUnmapMemObject will throw
8963 if( error != CL_SUCCESS ) {
8971 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8973 * Blocking SVM map operation - performs a blocking map underneath.
8975 template<typename T, class Alloc>
8976 inline cl_int mapSVM(cl::vector<T, Alloc> &container)
8978 return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE);
8982 * Blocking SVM map operation - performs a blocking map underneath.
8984 template<typename T, class Alloc>
8985 inline cl_int unmapSVM(cl::vector<T, Alloc> &container)
8987 return enqueueUnmapSVM(container);
8990 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
8992 #if CL_HPP_TARGET_OPENCL_VERSION >= 110
8993 inline cl_int enqueueReadBufferRect(
8994 const Buffer& buffer,
8996 const array<size_type, 3>& buffer_offset,
8997 const array<size_type, 3>& host_offset,
8998 const array<size_type, 3>& region,
8999 size_type buffer_row_pitch,
9000 size_type buffer_slice_pitch,
9001 size_type host_row_pitch,
9002 size_type host_slice_pitch,
9004 const vector<Event>* events = NULL,
9005 Event* event = NULL)
9008 CommandQueue queue = CommandQueue::getDefault(&error);
9010 if (error != CL_SUCCESS) {
9014 return queue.enqueueReadBufferRect(
9029 inline cl_int enqueueWriteBufferRect(
9030 const Buffer& buffer,
9032 const array<size_type, 3>& buffer_offset,
9033 const array<size_type, 3>& host_offset,
9034 const array<size_type, 3>& region,
9035 size_type buffer_row_pitch,
9036 size_type buffer_slice_pitch,
9037 size_type host_row_pitch,
9038 size_type host_slice_pitch,
9040 const vector<Event>* events = NULL,
9041 Event* event = NULL)
9044 CommandQueue queue = CommandQueue::getDefault(&error);
9046 if (error != CL_SUCCESS) {
9050 return queue.enqueueWriteBufferRect(
9065 inline cl_int enqueueCopyBufferRect(
9068 const array<size_type, 3>& src_origin,
9069 const array<size_type, 3>& dst_origin,
9070 const array<size_type, 3>& region,
9071 size_type src_row_pitch,
9072 size_type src_slice_pitch,
9073 size_type dst_row_pitch,
9074 size_type dst_slice_pitch,
9075 const vector<Event>* events = NULL,
9076 Event* event = NULL)
9079 CommandQueue queue = CommandQueue::getDefault(&error);
9081 if (error != CL_SUCCESS) {
9085 return queue.enqueueCopyBufferRect(
9098 #endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
9100 inline cl_int enqueueReadImage(
9103 const array<size_type, 3>& origin,
9104 const array<size_type, 3>& region,
9105 size_type row_pitch,
9106 size_type slice_pitch,
9108 const vector<Event>* events = NULL,
9109 Event* event = NULL)
9112 CommandQueue queue = CommandQueue::getDefault(&error);
9114 if (error != CL_SUCCESS) {
9118 return queue.enqueueReadImage(
9130 inline cl_int enqueueWriteImage(
9133 const array<size_type, 3>& origin,
9134 const array<size_type, 3>& region,
9135 size_type row_pitch,
9136 size_type slice_pitch,
9138 const vector<Event>* events = NULL,
9139 Event* event = NULL)
9142 CommandQueue queue = CommandQueue::getDefault(&error);
9144 if (error != CL_SUCCESS) {
9148 return queue.enqueueWriteImage(
9160 inline cl_int enqueueCopyImage(
9163 const array<size_type, 3>& src_origin,
9164 const array<size_type, 3>& dst_origin,
9165 const array<size_type, 3>& region,
9166 const vector<Event>* events = NULL,
9167 Event* event = NULL)
9170 CommandQueue queue = CommandQueue::getDefault(&error);
9172 if (error != CL_SUCCESS) {
9176 return queue.enqueueCopyImage(
9186 inline cl_int enqueueCopyImageToBuffer(
9189 const array<size_type, 3>& src_origin,
9190 const array<size_type, 3>& region,
9191 size_type dst_offset,
9192 const vector<Event>* events = NULL,
9193 Event* event = NULL)
9196 CommandQueue queue = CommandQueue::getDefault(&error);
9198 if (error != CL_SUCCESS) {
9202 return queue.enqueueCopyImageToBuffer(
9212 inline cl_int enqueueCopyBufferToImage(
9215 size_type src_offset,
9216 const array<size_type, 3>& dst_origin,
9217 const array<size_type, 3>& region,
9218 const vector<Event>* events = NULL,
9219 Event* event = NULL)
9222 CommandQueue queue = CommandQueue::getDefault(&error);
9224 if (error != CL_SUCCESS) {
9228 return queue.enqueueCopyBufferToImage(
9239 inline cl_int flush(void)
9242 CommandQueue queue = CommandQueue::getDefault(&error);
9244 if (error != CL_SUCCESS) {
9248 return queue.flush();
9251 inline cl_int finish(void)
9254 CommandQueue queue = CommandQueue::getDefault(&error);
9256 if (error != CL_SUCCESS) {
9261 return queue.finish();
9267 CommandQueue queue_;
9268 const NDRange offset_;
9269 const NDRange global_;
9270 const NDRange local_;
9271 vector<Event> events_;
9273 template<typename... Ts>
9274 friend class KernelFunctor;
9277 EnqueueArgs(NDRange global) :
9278 queue_(CommandQueue::getDefault()),
9286 EnqueueArgs(NDRange global, NDRange local) :
9287 queue_(CommandQueue::getDefault()),
9295 EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
9296 queue_(CommandQueue::getDefault()),
9304 EnqueueArgs(Event e, NDRange global) :
9305 queue_(CommandQueue::getDefault()),
9310 events_.push_back(e);
9313 EnqueueArgs(Event e, NDRange global, NDRange local) :
9314 queue_(CommandQueue::getDefault()),
9319 events_.push_back(e);
9322 EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
9323 queue_(CommandQueue::getDefault()),
9328 events_.push_back(e);
9331 EnqueueArgs(const vector<Event> &events, NDRange global) :
9332 queue_(CommandQueue::getDefault()),
9341 EnqueueArgs(const vector<Event> &events, NDRange global, NDRange local) :
9342 queue_(CommandQueue::getDefault()),
9351 EnqueueArgs(const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9352 queue_(CommandQueue::getDefault()),
9361 EnqueueArgs(CommandQueue &queue, NDRange global) :
9370 EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
9379 EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
9388 EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
9394 events_.push_back(e);
9397 EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
9403 events_.push_back(e);
9406 EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
9412 events_.push_back(e);
9415 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global) :
9425 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global, NDRange local) :
9435 EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
9447 //----------------------------------------------------------------------------------------------
9451 * Type safe kernel functor.
9454 template<typename... Ts>
9460 template<int index, typename T0, typename... T1s>
9461 void setArgs(T0&& t0, T1s&&... t1s)
9463 kernel_.setArg(index, t0);
9464 setArgs<index + 1, T1s...>(std::forward<T1s>(t1s)...);
9467 template<int index, typename T0>
9468 void setArgs(T0&& t0)
9470 kernel_.setArg(index, t0);
9480 KernelFunctor(Kernel kernel) : kernel_(kernel)
9484 const Program& program,
9486 cl_int * err = NULL) :
9487 kernel_(program, name.c_str(), err)
9490 //! \brief Return type of the functor
9491 typedef Event result_type;
9495 * @param args Launch parameters of the kernel.
9496 * @param t0... List of kernel arguments based on the template type of the functor.
9499 const EnqueueArgs& args,
9503 setArgs<0>(std::forward<Ts>(ts)...);
9505 args.queue_.enqueueNDRangeKernel(
9517 * Enqueue kernel with support for error code.
9518 * @param args Launch parameters of the kernel.
9519 * @param t0... List of kernel arguments based on the template type of the functor.
9520 * @param error Out parameter returning the error code from the execution.
9523 const EnqueueArgs& args,
9528 setArgs<0>(std::forward<Ts>(ts)...);
9530 error = args.queue_.enqueueNDRangeKernel(
9541 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9542 cl_int setSVMPointers(const vector<void*> &pointerList)
9544 return kernel_.setSVMPointers(pointerList);
9547 template<typename T0, typename... T1s>
9548 cl_int setSVMPointers(const T0 &t0, T1s &... ts)
9550 return kernel_.setSVMPointers(t0, ts...);
9552 #endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
9560 namespace compatibility {
9562 * Backward compatibility class to ensure that cl.hpp code works with cl2.hpp.
9563 * Please use KernelFunctor directly.
9565 template<typename... Ts>
9568 typedef KernelFunctor<Ts...> FunctorType;
9570 FunctorType functor_;
9573 const Program& program,
9575 cl_int * err = NULL) :
9576 functor_(FunctorType(program, name, err))
9580 const Kernel kernel) :
9581 functor_(FunctorType(kernel))
9584 //! \brief Return type of the functor
9585 typedef Event result_type;
9587 //! \brief Function signature of kernel functor with no event dependency.
9588 typedef Event type_(
9593 const EnqueueArgs& enqueueArgs,
9597 enqueueArgs, args...);
9600 } // namespace compatibility
9603 //----------------------------------------------------------------------------------------------------------------------
9605 #undef CL_HPP_ERR_STR_
9606 #if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
9607 #undef __GET_DEVICE_INFO_ERR
9608 #undef __GET_PLATFORM_INFO_ERR
9609 #undef __GET_DEVICE_IDS_ERR
9610 #undef __GET_CONTEXT_INFO_ERR
9611 #undef __GET_EVENT_INFO_ERR
9612 #undef __GET_EVENT_PROFILE_INFO_ERR
9613 #undef __GET_MEM_OBJECT_INFO_ERR
9614 #undef __GET_IMAGE_INFO_ERR
9615 #undef __GET_SAMPLER_INFO_ERR
9616 #undef __GET_KERNEL_INFO_ERR
9617 #undef __GET_KERNEL_ARG_INFO_ERR
9618 #undef __GET_KERNEL_WORK_GROUP_INFO_ERR
9619 #undef __GET_PROGRAM_INFO_ERR
9620 #undef __GET_PROGRAM_BUILD_INFO_ERR
9621 #undef __GET_COMMAND_QUEUE_INFO_ERR
9623 #undef __CREATE_CONTEXT_ERR
9624 #undef __CREATE_CONTEXT_FROM_TYPE_ERR
9625 #undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
9627 #undef __CREATE_BUFFER_ERR
9628 #undef __CREATE_SUBBUFFER_ERR
9629 #undef __CREATE_IMAGE2D_ERR
9630 #undef __CREATE_IMAGE3D_ERR
9631 #undef __CREATE_SAMPLER_ERR
9632 #undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
9634 #undef __CREATE_USER_EVENT_ERR
9635 #undef __SET_USER_EVENT_STATUS_ERR
9636 #undef __SET_EVENT_CALLBACK_ERR
9637 #undef __SET_PRINTF_CALLBACK_ERR
9639 #undef __WAIT_FOR_EVENTS_ERR
9641 #undef __CREATE_KERNEL_ERR
9642 #undef __SET_KERNEL_ARGS_ERR
9643 #undef __CREATE_PROGRAM_WITH_SOURCE_ERR
9644 #undef __CREATE_PROGRAM_WITH_BINARY_ERR
9645 #undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
9646 #undef __BUILD_PROGRAM_ERR
9647 #undef __CREATE_KERNELS_IN_PROGRAM_ERR
9649 #undef __CREATE_COMMAND_QUEUE_ERR
9650 #undef __SET_COMMAND_QUEUE_PROPERTY_ERR
9651 #undef __ENQUEUE_READ_BUFFER_ERR
9652 #undef __ENQUEUE_WRITE_BUFFER_ERR
9653 #undef __ENQUEUE_READ_BUFFER_RECT_ERR
9654 #undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
9655 #undef __ENQEUE_COPY_BUFFER_ERR
9656 #undef __ENQEUE_COPY_BUFFER_RECT_ERR
9657 #undef __ENQUEUE_READ_IMAGE_ERR
9658 #undef __ENQUEUE_WRITE_IMAGE_ERR
9659 #undef __ENQUEUE_COPY_IMAGE_ERR
9660 #undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
9661 #undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
9662 #undef __ENQUEUE_MAP_BUFFER_ERR
9663 #undef __ENQUEUE_MAP_IMAGE_ERR
9664 #undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
9665 #undef __ENQUEUE_NDRANGE_KERNEL_ERR
9666 #undef __ENQUEUE_TASK_ERR
9667 #undef __ENQUEUE_NATIVE_KERNEL
9669 #undef __UNLOAD_COMPILER_ERR
9670 #undef __CREATE_SUB_DEVICES_ERR
9672 #undef __CREATE_PIPE_ERR
9673 #undef __GET_PIPE_INFO_ERR
9675 #endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS
9678 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_
9679 #undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_
9681 #if defined(CL_HPP_USE_CL_DEVICE_FISSION)
9682 #undef CL_HPP_PARAM_NAME_DEVICE_FISSION_
9683 #endif // CL_HPP_USE_CL_DEVICE_FISSION
9685 #undef CL_HPP_NOEXCEPT_
9686 #undef CL_HPP_DEFINE_STATIC_MEMBER_