2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
38 #include <sys/sysinfo.h>
44 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
46 uint32_t mesa_timestamp
;
48 memset(uuid
, 0, VK_UUID_SIZE
);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
53 memcpy(uuid
, &mesa_timestamp
, 4);
54 memcpy((char *)uuid
+ 4, &f
, 2);
55 snprintf((char *)uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
60 tu_get_driver_uuid(void *uuid
)
62 memset(uuid
, 0, VK_UUID_SIZE
);
66 tu_get_device_uuid(void *uuid
)
73 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
75 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
76 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
78 uint32_t gem_handle
= tu_gem_new(dev
, size
, MSM_BO_WC
);
82 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
83 * want immediate backing pages because vkAllocateMemory and friends must
86 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
87 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
88 * maybe I misunderstand.
91 /* TODO: Do we need 'offset' if we have 'iova'? */
92 uint64_t offset
= tu_gem_info_offset(dev
, gem_handle
);
96 uint64_t iova
= tu_gem_info_iova(dev
, gem_handle
);
100 *bo
= (struct tu_bo
) {
101 .gem_handle
= gem_handle
,
110 tu_gem_close(dev
, bo
->gem_handle
);
112 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
116 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
121 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
122 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
123 dev
->physical_device
->local_fd
, bo
->offset
);
124 if (map
== MAP_FAILED
)
125 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
132 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
134 assert(bo
->gem_handle
);
137 munmap(bo
->map
, bo
->size
);
139 tu_gem_close(dev
, bo
->gem_handle
);
143 tu_physical_device_init(struct tu_physical_device
*device
,
144 struct tu_instance
*instance
,
145 drmDevicePtr drm_device
)
147 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
148 VkResult result
= VK_SUCCESS
;
149 drmVersionPtr version
;
152 struct fd_pipe
*tmp_pipe
= NULL
;
155 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
157 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
158 "failed to open device %s", path
);
161 /* Version 1.3 added MSM_INFO_IOVA. */
162 const int min_version_major
= 1;
163 const int min_version_minor
= 3;
165 version
= drmGetVersion(fd
);
168 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
169 "failed to query kernel driver version for device %s",
173 if (strcmp(version
->name
, "msm")) {
174 drmFreeVersion(version
);
178 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
179 "device %s does not use the msm kernel driver", path
);
182 if (version
->version_major
!= 1 || version
->version_minor
< 3) {
183 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
184 "kernel driver for device %s has version %d.%d, "
185 "but Vulkan requires version >= %d.%d",
187 version
->version_major
, version
->version_minor
,
188 min_version_major
, min_version_minor
);
189 drmFreeVersion(version
);
194 drmFreeVersion(version
);
196 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
197 tu_logi("Found compatible device '%s'.", path
);
199 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
200 device
->instance
= instance
;
201 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
202 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
204 if (instance
->enabled_extensions
.KHR_display
) {
205 master_fd
= open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
206 if (master_fd
>= 0) {
207 /* TODO: free master_fd is accel is not working? */
211 device
->master_fd
= master_fd
;
212 device
->local_fd
= fd
;
214 device
->drm_device
= fd_device_new_dup(fd
);
215 if (!device
->drm_device
) {
217 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not create the libdrm device");
221 tmp_pipe
= fd_pipe_new(device
->drm_device
, FD_PIPE_3D
);
224 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not open the 3D pipe");
228 if (fd_pipe_get_param(tmp_pipe
, FD_GPU_ID
, &val
)) {
230 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GPU ID");
233 device
->gpu_id
= val
;
235 if (fd_pipe_get_param(tmp_pipe
, FD_GMEM_SIZE
, &val
)) {
237 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GMEM size");
240 device
->gmem_size
= val
;
242 fd_pipe_del(tmp_pipe
);
245 memset(device
->name
, 0, sizeof(device
->name
));
246 sprintf(device
->name
, "FD%d", device
->gpu_id
);
248 switch(device
->gpu_id
) {
253 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
254 "device %s is unsupported", device
->name
);
257 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
259 instance
, VK_ERROR_INITIALIZATION_FAILED
, "cannot generate UUID");
263 /* The gpu id is already embedded in the uuid so we just pass "tu"
264 * when creating the cache.
266 char buf
[VK_UUID_SIZE
* 2 + 1];
267 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
268 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
271 "WARNING: tu is not a conformant vulkan implementation, "
272 "testing use only.\n");
274 tu_get_driver_uuid(&device
->device_uuid
);
275 tu_get_device_uuid(&device
->device_uuid
);
277 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
279 if (result
!= VK_SUCCESS
) {
280 vk_error(instance
, result
);
288 fd_pipe_del(tmp_pipe
);
289 if (device
->drm_device
)
290 fd_device_del(device
->drm_device
);
298 tu_physical_device_finish(struct tu_physical_device
*device
)
300 disk_cache_destroy(device
->disk_cache
);
301 close(device
->local_fd
);
302 if (device
->master_fd
!= -1)
303 close(device
->master_fd
);
307 default_alloc_func(void *pUserData
,
310 VkSystemAllocationScope allocationScope
)
316 default_realloc_func(void *pUserData
,
320 VkSystemAllocationScope allocationScope
)
322 return realloc(pOriginal
, size
);
326 default_free_func(void *pUserData
, void *pMemory
)
331 static const VkAllocationCallbacks default_alloc
= {
333 .pfnAllocation
= default_alloc_func
,
334 .pfnReallocation
= default_realloc_func
,
335 .pfnFree
= default_free_func
,
338 static const struct debug_control tu_debug_options
[] = { { "startup",
343 tu_get_debug_option_name(int id
)
345 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
346 return tu_debug_options
[id
].string
;
350 tu_get_instance_extension_index(const char *name
)
352 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
353 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
360 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
361 const VkAllocationCallbacks
*pAllocator
,
362 VkInstance
*pInstance
)
364 struct tu_instance
*instance
;
367 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
369 uint32_t client_version
;
370 if (pCreateInfo
->pApplicationInfo
&&
371 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
372 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
374 tu_EnumerateInstanceVersion(&client_version
);
377 instance
= vk_zalloc2(&default_alloc
,
381 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
383 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
385 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
388 instance
->alloc
= *pAllocator
;
390 instance
->alloc
= default_alloc
;
392 instance
->api_version
= client_version
;
393 instance
->physical_device_count
= -1;
395 instance
->debug_flags
=
396 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
398 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
399 tu_logi("Created an instance");
401 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
402 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
403 int index
= tu_get_instance_extension_index(ext_name
);
405 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
406 vk_free2(&default_alloc
, pAllocator
, instance
);
407 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
410 instance
->enabled_extensions
.extensions
[index
] = true;
413 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
414 if (result
!= VK_SUCCESS
) {
415 vk_free2(&default_alloc
, pAllocator
, instance
);
416 return vk_error(instance
, result
);
421 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
423 *pInstance
= tu_instance_to_handle(instance
);
429 tu_DestroyInstance(VkInstance _instance
,
430 const VkAllocationCallbacks
*pAllocator
)
432 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
437 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
438 tu_physical_device_finish(instance
->physical_devices
+ i
);
441 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
445 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
447 vk_free(&instance
->alloc
, instance
);
451 tu_enumerate_devices(struct tu_instance
*instance
)
453 /* TODO: Check for more devices ? */
454 drmDevicePtr devices
[8];
455 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
458 instance
->physical_device_count
= 0;
460 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
462 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
463 tu_logi("Found %d drm nodes", max_devices
);
466 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
468 for (unsigned i
= 0; i
< (unsigned)max_devices
; i
++) {
469 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
470 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
472 result
= tu_physical_device_init(instance
->physical_devices
+
473 instance
->physical_device_count
,
476 if (result
== VK_SUCCESS
)
477 ++instance
->physical_device_count
;
478 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
482 drmFreeDevices(devices
, max_devices
);
488 tu_EnumeratePhysicalDevices(VkInstance _instance
,
489 uint32_t *pPhysicalDeviceCount
,
490 VkPhysicalDevice
*pPhysicalDevices
)
492 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
493 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
497 if (instance
->physical_device_count
< 0) {
498 result
= tu_enumerate_devices(instance
);
499 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
503 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
504 vk_outarray_append(&out
, p
) {
505 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
510 return vk_outarray_status(&out
);
514 tu_EnumeratePhysicalDeviceGroups(
515 VkInstance _instance
,
516 uint32_t *pPhysicalDeviceGroupCount
,
517 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
519 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
520 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
, pPhysicalDeviceGroupCount
);
523 if (instance
->physical_device_count
< 0) {
524 result
= tu_enumerate_devices(instance
);
525 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
529 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
530 vk_outarray_append(&out
, p
) {
531 p
->physicalDeviceCount
= 1;
532 p
->physicalDevices
[0] =
533 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
534 p
->subsetAllocation
= false;
538 return vk_outarray_status(&out
);
542 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
543 VkPhysicalDeviceFeatures
*pFeatures
)
545 memset(pFeatures
, 0, sizeof(*pFeatures
));
547 *pFeatures
= (VkPhysicalDeviceFeatures
){
548 .robustBufferAccess
= false,
549 .fullDrawIndexUint32
= false,
550 .imageCubeArray
= false,
551 .independentBlend
= false,
552 .geometryShader
= false,
553 .tessellationShader
= false,
554 .sampleRateShading
= false,
555 .dualSrcBlend
= false,
557 .multiDrawIndirect
= false,
558 .drawIndirectFirstInstance
= false,
560 .depthBiasClamp
= false,
561 .fillModeNonSolid
= false,
562 .depthBounds
= false,
564 .largePoints
= false,
566 .multiViewport
= false,
567 .samplerAnisotropy
= false,
568 .textureCompressionETC2
= false,
569 .textureCompressionASTC_LDR
= false,
570 .textureCompressionBC
= false,
571 .occlusionQueryPrecise
= false,
572 .pipelineStatisticsQuery
= false,
573 .vertexPipelineStoresAndAtomics
= false,
574 .fragmentStoresAndAtomics
= false,
575 .shaderTessellationAndGeometryPointSize
= false,
576 .shaderImageGatherExtended
= false,
577 .shaderStorageImageExtendedFormats
= false,
578 .shaderStorageImageMultisample
= false,
579 .shaderUniformBufferArrayDynamicIndexing
= false,
580 .shaderSampledImageArrayDynamicIndexing
= false,
581 .shaderStorageBufferArrayDynamicIndexing
= false,
582 .shaderStorageImageArrayDynamicIndexing
= false,
583 .shaderStorageImageReadWithoutFormat
= false,
584 .shaderStorageImageWriteWithoutFormat
= false,
585 .shaderClipDistance
= false,
586 .shaderCullDistance
= false,
587 .shaderFloat64
= false,
588 .shaderInt64
= false,
589 .shaderInt16
= false,
590 .sparseBinding
= false,
591 .variableMultisampleRate
= false,
592 .inheritedQueries
= false,
597 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
598 VkPhysicalDeviceFeatures2KHR
*pFeatures
)
600 vk_foreach_struct(ext
, pFeatures
->pNext
)
602 switch (ext
->sType
) {
603 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR
: {
604 VkPhysicalDeviceVariablePointerFeaturesKHR
*features
= (void *)ext
;
605 features
->variablePointersStorageBuffer
= false;
606 features
->variablePointers
= false;
609 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR
: {
610 VkPhysicalDeviceMultiviewFeaturesKHR
*features
=
611 (VkPhysicalDeviceMultiviewFeaturesKHR
*)ext
;
612 features
->multiview
= false;
613 features
->multiviewGeometryShader
= false;
614 features
->multiviewTessellationShader
= false;
617 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
: {
618 VkPhysicalDeviceShaderDrawParameterFeatures
*features
=
619 (VkPhysicalDeviceShaderDrawParameterFeatures
*)ext
;
620 features
->shaderDrawParameters
= false;
623 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
624 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
625 (VkPhysicalDeviceProtectedMemoryFeatures
*)ext
;
626 features
->protectedMemory
= false;
629 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
630 VkPhysicalDevice16BitStorageFeatures
*features
=
631 (VkPhysicalDevice16BitStorageFeatures
*)ext
;
632 features
->storageBuffer16BitAccess
= false;
633 features
->uniformAndStorageBuffer16BitAccess
= false;
634 features
->storagePushConstant16
= false;
635 features
->storageInputOutput16
= false;
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
639 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
640 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*)ext
;
641 features
->samplerYcbcrConversion
= false;
644 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
645 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
646 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*)ext
;
647 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
648 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
649 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
650 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
651 features
->shaderSampledImageArrayNonUniformIndexing
= false;
652 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
653 features
->shaderStorageImageArrayNonUniformIndexing
= false;
654 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
655 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
656 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
657 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
658 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
659 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
660 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
661 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
662 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
663 features
->descriptorBindingUpdateUnusedWhilePending
= false;
664 features
->descriptorBindingPartiallyBound
= false;
665 features
->descriptorBindingVariableDescriptorCount
= false;
666 features
->runtimeDescriptorArray
= false;
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
670 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
671 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*)ext
;
672 features
->conditionalRendering
= false;
673 features
->inheritedConditionalRendering
= false;
680 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
684 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
685 VkPhysicalDeviceProperties
*pProperties
)
687 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
688 VkSampleCountFlags sample_counts
= 0xf;
690 /* make sure that the entire descriptor set is addressable with a signed
691 * 32-bit int. So the sum of all limits scaled by descriptor size has to
692 * be at most 2 GiB. the combined image & samples object count as one of
693 * both. This limit is for the pipeline layout, not for the set layout, but
694 * there is no set limit, so we just set a pipeline limit. I don't think
695 * any app is going to hit this soon. */
696 size_t max_descriptor_set_size
=
697 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
) /
698 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
699 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
700 32 /* sampler, largest when combined with image */ +
701 64 /* sampled image */ + 64 /* storage image */);
703 VkPhysicalDeviceLimits limits
= {
704 .maxImageDimension1D
= (1 << 14),
705 .maxImageDimension2D
= (1 << 14),
706 .maxImageDimension3D
= (1 << 11),
707 .maxImageDimensionCube
= (1 << 14),
708 .maxImageArrayLayers
= (1 << 11),
709 .maxTexelBufferElements
= 128 * 1024 * 1024,
710 .maxUniformBufferRange
= UINT32_MAX
,
711 .maxStorageBufferRange
= UINT32_MAX
,
712 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
713 .maxMemoryAllocationCount
= UINT32_MAX
,
714 .maxSamplerAllocationCount
= 64 * 1024,
715 .bufferImageGranularity
= 64, /* A cache line */
716 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
717 .maxBoundDescriptorSets
= MAX_SETS
,
718 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
719 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
720 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
721 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
722 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
723 .maxPerStageDescriptorInputAttachments
= max_descriptor_set_size
,
724 .maxPerStageResources
= max_descriptor_set_size
,
725 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
726 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
727 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
728 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
729 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
730 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
731 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
732 .maxDescriptorSetInputAttachments
= max_descriptor_set_size
,
733 .maxVertexInputAttributes
= 32,
734 .maxVertexInputBindings
= 32,
735 .maxVertexInputAttributeOffset
= 2047,
736 .maxVertexInputBindingStride
= 2048,
737 .maxVertexOutputComponents
= 128,
738 .maxTessellationGenerationLevel
= 64,
739 .maxTessellationPatchSize
= 32,
740 .maxTessellationControlPerVertexInputComponents
= 128,
741 .maxTessellationControlPerVertexOutputComponents
= 128,
742 .maxTessellationControlPerPatchOutputComponents
= 120,
743 .maxTessellationControlTotalOutputComponents
= 4096,
744 .maxTessellationEvaluationInputComponents
= 128,
745 .maxTessellationEvaluationOutputComponents
= 128,
746 .maxGeometryShaderInvocations
= 127,
747 .maxGeometryInputComponents
= 64,
748 .maxGeometryOutputComponents
= 128,
749 .maxGeometryOutputVertices
= 256,
750 .maxGeometryTotalOutputComponents
= 1024,
751 .maxFragmentInputComponents
= 128,
752 .maxFragmentOutputAttachments
= 8,
753 .maxFragmentDualSrcAttachments
= 1,
754 .maxFragmentCombinedOutputResources
= 8,
755 .maxComputeSharedMemorySize
= 32768,
756 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
757 .maxComputeWorkGroupInvocations
= 2048,
758 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
759 .subPixelPrecisionBits
= 4 /* FIXME */,
760 .subTexelPrecisionBits
= 4 /* FIXME */,
761 .mipmapPrecisionBits
= 4 /* FIXME */,
762 .maxDrawIndexedIndexValue
= UINT32_MAX
,
763 .maxDrawIndirectCount
= UINT32_MAX
,
764 .maxSamplerLodBias
= 16,
765 .maxSamplerAnisotropy
= 16,
766 .maxViewports
= MAX_VIEWPORTS
,
767 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
768 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
769 .viewportSubPixelBits
= 8,
770 .minMemoryMapAlignment
= 4096, /* A page */
771 .minTexelBufferOffsetAlignment
= 1,
772 .minUniformBufferOffsetAlignment
= 4,
773 .minStorageBufferOffsetAlignment
= 4,
774 .minTexelOffset
= -32,
775 .maxTexelOffset
= 31,
776 .minTexelGatherOffset
= -32,
777 .maxTexelGatherOffset
= 31,
778 .minInterpolationOffset
= -2,
779 .maxInterpolationOffset
= 2,
780 .subPixelInterpolationOffsetBits
= 8,
781 .maxFramebufferWidth
= (1 << 14),
782 .maxFramebufferHeight
= (1 << 14),
783 .maxFramebufferLayers
= (1 << 10),
784 .framebufferColorSampleCounts
= sample_counts
,
785 .framebufferDepthSampleCounts
= sample_counts
,
786 .framebufferStencilSampleCounts
= sample_counts
,
787 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
788 .maxColorAttachments
= MAX_RTS
,
789 .sampledImageColorSampleCounts
= sample_counts
,
790 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
791 .sampledImageDepthSampleCounts
= sample_counts
,
792 .sampledImageStencilSampleCounts
= sample_counts
,
793 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
794 .maxSampleMaskWords
= 1,
795 .timestampComputeAndGraphics
= true,
796 .timestampPeriod
= 1,
797 .maxClipDistances
= 8,
798 .maxCullDistances
= 8,
799 .maxCombinedClipAndCullDistances
= 8,
800 .discreteQueuePriorities
= 1,
801 .pointSizeRange
= { 0.125, 255.875 },
802 .lineWidthRange
= { 0.0, 7.9921875 },
803 .pointSizeGranularity
= (1.0 / 8.0),
804 .lineWidthGranularity
= (1.0 / 128.0),
805 .strictLines
= false, /* FINISHME */
806 .standardSampleLocations
= true,
807 .optimalBufferCopyOffsetAlignment
= 128,
808 .optimalBufferCopyRowPitchAlignment
= 128,
809 .nonCoherentAtomSize
= 64,
812 *pProperties
= (VkPhysicalDeviceProperties
){
813 .apiVersion
= tu_physical_device_api_version(pdevice
),
814 .driverVersion
= vk_get_driver_version(),
815 .vendorID
= 0, /* TODO */
817 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
819 .sparseProperties
= { 0 },
822 strcpy(pProperties
->deviceName
, pdevice
->name
);
823 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
827 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
828 VkPhysicalDeviceProperties2KHR
*pProperties
)
830 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
831 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
833 vk_foreach_struct(ext
, pProperties
->pNext
)
835 switch (ext
->sType
) {
836 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
837 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
838 (VkPhysicalDevicePushDescriptorPropertiesKHR
*)ext
;
839 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
842 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR
: {
843 VkPhysicalDeviceIDPropertiesKHR
*properties
=
844 (VkPhysicalDeviceIDPropertiesKHR
*)ext
;
845 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
846 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
847 properties
->deviceLUIDValid
= false;
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR
: {
851 VkPhysicalDeviceMultiviewPropertiesKHR
*properties
=
852 (VkPhysicalDeviceMultiviewPropertiesKHR
*)ext
;
853 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
854 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
857 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR
: {
858 VkPhysicalDevicePointClippingPropertiesKHR
*properties
=
859 (VkPhysicalDevicePointClippingPropertiesKHR
*)ext
;
860 properties
->pointClippingBehavior
=
861 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR
;
864 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
865 VkPhysicalDeviceMaintenance3Properties
*properties
=
866 (VkPhysicalDeviceMaintenance3Properties
*)ext
;
867 /* Make sure everything is addressable by a signed 32-bit int, and
868 * our largest descriptors are 96 bytes. */
869 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
870 /* Our buffer size fields allow only this much */
871 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
880 static const VkQueueFamilyProperties
881 tu_queue_family_properties
= {
882 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
883 VK_QUEUE_COMPUTE_BIT
|
884 VK_QUEUE_TRANSFER_BIT
,
886 .timestampValidBits
= 64,
887 .minImageTransferGranularity
= (VkExtent3D
) { 1, 1, 1 },
891 tu_GetPhysicalDeviceQueueFamilyProperties(
892 VkPhysicalDevice physicalDevice
,
893 uint32_t *pQueueFamilyPropertyCount
,
894 VkQueueFamilyProperties
*pQueueFamilyProperties
)
896 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
898 vk_outarray_append(&out
, p
) {
899 *p
= tu_queue_family_properties
;
904 tu_GetPhysicalDeviceQueueFamilyProperties2(
905 VkPhysicalDevice physicalDevice
,
906 uint32_t *pQueueFamilyPropertyCount
,
907 VkQueueFamilyProperties2KHR
*pQueueFamilyProperties
)
909 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
911 vk_outarray_append(&out
, p
) {
912 p
->queueFamilyProperties
= tu_queue_family_properties
;
917 tu_get_system_heap_size()
922 uint64_t total_ram
= (uint64_t)info
.totalram
* (uint64_t)info
.mem_unit
;
924 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
925 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
927 uint64_t available_ram
;
928 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
929 available_ram
= total_ram
/ 2;
931 available_ram
= total_ram
* 3 / 4;
933 return available_ram
;
937 tu_GetPhysicalDeviceMemoryProperties(
938 VkPhysicalDevice physicalDevice
,
939 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
941 pMemoryProperties
->memoryHeapCount
= 1;
942 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
943 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
945 pMemoryProperties
->memoryTypeCount
= 1;
946 pMemoryProperties
->memoryTypes
[0].propertyFlags
= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
947 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
948 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
949 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
953 tu_GetPhysicalDeviceMemoryProperties2(
954 VkPhysicalDevice physicalDevice
,
955 VkPhysicalDeviceMemoryProperties2KHR
*pMemoryProperties
)
957 return tu_GetPhysicalDeviceMemoryProperties(
958 physicalDevice
, &pMemoryProperties
->memoryProperties
);
962 tu_queue_init(struct tu_device
*device
,
963 struct tu_queue
*queue
,
964 uint32_t queue_family_index
,
966 VkDeviceQueueCreateFlags flags
)
968 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
969 queue
->device
= device
;
970 queue
->queue_family_index
= queue_family_index
;
971 queue
->queue_idx
= idx
;
972 queue
->flags
= flags
;
978 tu_queue_finish(struct tu_queue
*queue
)
983 tu_get_device_extension_index(const char *name
)
985 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
986 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
993 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
994 const VkDeviceCreateInfo
*pCreateInfo
,
995 const VkAllocationCallbacks
*pAllocator
,
998 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
1000 struct tu_device
*device
;
1002 /* Check enabled features */
1003 if (pCreateInfo
->pEnabledFeatures
) {
1004 VkPhysicalDeviceFeatures supported_features
;
1005 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
1006 VkBool32
*supported_feature
= (VkBool32
*)&supported_features
;
1007 VkBool32
*enabled_feature
= (VkBool32
*)pCreateInfo
->pEnabledFeatures
;
1008 unsigned num_features
=
1009 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
1010 for (uint32_t i
= 0; i
< num_features
; i
++) {
1011 if (enabled_feature
[i
] && !supported_feature
[i
])
1012 return vk_error(physical_device
->instance
,
1013 VK_ERROR_FEATURE_NOT_PRESENT
);
1017 device
= vk_zalloc2(&physical_device
->instance
->alloc
,
1021 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1023 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1025 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1026 device
->instance
= physical_device
->instance
;
1027 device
->physical_device
= physical_device
;
1030 device
->alloc
= *pAllocator
;
1032 device
->alloc
= physical_device
->instance
->alloc
;
1034 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
1035 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
1036 int index
= tu_get_device_extension_index(ext_name
);
1038 !physical_device
->supported_extensions
.extensions
[index
]) {
1039 vk_free(&device
->alloc
, device
);
1040 return vk_error(physical_device
->instance
,
1041 VK_ERROR_EXTENSION_NOT_PRESENT
);
1044 device
->enabled_extensions
.extensions
[index
] = true;
1047 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
1048 const VkDeviceQueueCreateInfo
*queue_create
=
1049 &pCreateInfo
->pQueueCreateInfos
[i
];
1050 uint32_t qfi
= queue_create
->queueFamilyIndex
;
1051 device
->queues
[qfi
] =
1052 vk_alloc(&device
->alloc
,
1053 queue_create
->queueCount
* sizeof(struct tu_queue
),
1055 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1056 if (!device
->queues
[qfi
]) {
1057 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1061 memset(device
->queues
[qfi
],
1063 queue_create
->queueCount
* sizeof(struct tu_queue
));
1065 device
->queue_count
[qfi
] = queue_create
->queueCount
;
1067 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
1068 result
= tu_queue_init(
1069 device
, &device
->queues
[qfi
][q
], qfi
, q
, queue_create
->flags
);
1070 if (result
!= VK_SUCCESS
)
1075 VkPipelineCacheCreateInfo ci
;
1076 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1079 ci
.pInitialData
= NULL
;
1080 ci
.initialDataSize
= 0;
1083 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1084 if (result
!= VK_SUCCESS
)
1087 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1089 *pDevice
= tu_device_to_handle(device
);
1093 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1094 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1095 tu_queue_finish(&device
->queues
[i
][q
]);
1096 if (device
->queue_count
[i
])
1097 vk_free(&device
->alloc
, device
->queues
[i
]);
1100 vk_free(&device
->alloc
, device
);
1105 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1107 TU_FROM_HANDLE(tu_device
, device
, _device
);
1112 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1113 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1114 tu_queue_finish(&device
->queues
[i
][q
]);
1115 if (device
->queue_count
[i
])
1116 vk_free(&device
->alloc
, device
->queues
[i
]);
1119 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1120 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1122 vk_free(&device
->alloc
, device
);
1126 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1127 VkLayerProperties
*pProperties
)
1129 *pPropertyCount
= 0;
1134 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1135 uint32_t *pPropertyCount
,
1136 VkLayerProperties
*pProperties
)
1138 *pPropertyCount
= 0;
1143 tu_GetDeviceQueue2(VkDevice _device
,
1144 const VkDeviceQueueInfo2
*pQueueInfo
,
1147 TU_FROM_HANDLE(tu_device
, device
, _device
);
1148 struct tu_queue
*queue
;
1151 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1152 if (pQueueInfo
->flags
!= queue
->flags
) {
1153 /* From the Vulkan 1.1.70 spec:
1155 * "The queue returned by vkGetDeviceQueue2 must have the same
1156 * flags value from this structure as that used at device
1157 * creation time in a VkDeviceQueueCreateInfo instance. If no
1158 * matching flags were specified at device creation time then
1159 * pQueue will return VK_NULL_HANDLE."
1161 *pQueue
= VK_NULL_HANDLE
;
1165 *pQueue
= tu_queue_to_handle(queue
);
1169 tu_GetDeviceQueue(VkDevice _device
,
1170 uint32_t queueFamilyIndex
,
1171 uint32_t queueIndex
,
1174 const VkDeviceQueueInfo2 info
=
1175 (VkDeviceQueueInfo2
){.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1176 .queueFamilyIndex
= queueFamilyIndex
,
1177 .queueIndex
= queueIndex
};
1179 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1183 tu_QueueSubmit(VkQueue _queue
,
1184 uint32_t submitCount
,
1185 const VkSubmitInfo
*pSubmits
,
1192 tu_QueueWaitIdle(VkQueue _queue
)
1198 tu_DeviceWaitIdle(VkDevice _device
)
1200 TU_FROM_HANDLE(tu_device
, device
, _device
);
1202 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1203 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1204 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1211 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1212 uint32_t *pPropertyCount
,
1213 VkExtensionProperties
*pProperties
)
1215 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1217 /* We spport no lyaers */
1219 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1221 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1222 if (tu_supported_instance_extensions
.extensions
[i
]) {
1223 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1227 return vk_outarray_status(&out
);
1231 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1232 const char *pLayerName
,
1233 uint32_t *pPropertyCount
,
1234 VkExtensionProperties
*pProperties
)
1236 /* We spport no lyaers */
1237 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1238 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1240 /* We spport no lyaers */
1242 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1244 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1245 if (device
->supported_extensions
.extensions
[i
]) {
1246 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1250 return vk_outarray_status(&out
);
1254 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1256 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1258 return tu_lookup_entrypoint_checked(pName
,
1259 instance
? instance
->api_version
: 0,
1260 instance
? &instance
->enabled_extensions
1265 /* The loader wants us to expose a second GetInstanceProcAddr function
1266 * to work around certain LD_PRELOAD issues seen in apps.
1269 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1270 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1273 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1274 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1276 return tu_GetInstanceProcAddr(instance
, pName
);
1280 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1282 TU_FROM_HANDLE(tu_device
, device
, _device
);
1284 return tu_lookup_entrypoint_checked(pName
,
1285 device
->instance
->api_version
,
1286 &device
->instance
->enabled_extensions
,
1287 &device
->enabled_extensions
);
1291 tu_alloc_memory(struct tu_device
*device
,
1292 const VkMemoryAllocateInfo
*pAllocateInfo
,
1293 const VkAllocationCallbacks
*pAllocator
,
1294 VkDeviceMemory
*pMem
)
1296 struct tu_device_memory
*mem
;
1299 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1301 if (pAllocateInfo
->allocationSize
== 0) {
1302 /* Apparently, this is allowed */
1303 *pMem
= VK_NULL_HANDLE
;
1307 mem
= vk_alloc2(&device
->alloc
,
1311 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1313 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1315 result
= tu_bo_init_new(device
, &mem
->bo
, pAllocateInfo
->allocationSize
);
1316 if (result
!= VK_SUCCESS
) {
1317 vk_free2(&device
->alloc
, pAllocator
, mem
);
1321 mem
->size
= pAllocateInfo
->allocationSize
;
1322 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1325 mem
->user_ptr
= NULL
;
1327 *pMem
= tu_device_memory_to_handle(mem
);
1333 tu_AllocateMemory(VkDevice _device
,
1334 const VkMemoryAllocateInfo
*pAllocateInfo
,
1335 const VkAllocationCallbacks
*pAllocator
,
1336 VkDeviceMemory
*pMem
)
1338 TU_FROM_HANDLE(tu_device
, device
, _device
);
1339 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1343 tu_FreeMemory(VkDevice _device
,
1344 VkDeviceMemory _mem
,
1345 const VkAllocationCallbacks
*pAllocator
)
1347 TU_FROM_HANDLE(tu_device
, device
, _device
);
1348 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1353 tu_bo_finish(device
, &mem
->bo
);
1354 vk_free2(&device
->alloc
, pAllocator
, mem
);
1358 tu_MapMemory(VkDevice _device
,
1359 VkDeviceMemory _memory
,
1360 VkDeviceSize offset
,
1362 VkMemoryMapFlags flags
,
1365 TU_FROM_HANDLE(tu_device
, device
, _device
);
1366 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1374 if (mem
->user_ptr
) {
1375 *ppData
= mem
->user_ptr
;
1376 } else if (!mem
->map
){
1377 result
= tu_bo_map(device
, &mem
->bo
);
1378 if (result
!= VK_SUCCESS
)
1380 *ppData
= mem
->map
= mem
->bo
.map
;
1389 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1393 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1395 /* I do not see any unmapping done by the freedreno Gallium driver. */
1399 tu_FlushMappedMemoryRanges(VkDevice _device
,
1400 uint32_t memoryRangeCount
,
1401 const VkMappedMemoryRange
*pMemoryRanges
)
1407 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1408 uint32_t memoryRangeCount
,
1409 const VkMappedMemoryRange
*pMemoryRanges
)
1415 tu_GetBufferMemoryRequirements(VkDevice _device
,
1417 VkMemoryRequirements
*pMemoryRequirements
)
1419 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1421 pMemoryRequirements
->memoryTypeBits
= 1;
1422 pMemoryRequirements
->alignment
= 16;
1423 pMemoryRequirements
->size
=
1424 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1428 tu_GetBufferMemoryRequirements2(
1430 const VkBufferMemoryRequirementsInfo2KHR
*pInfo
,
1431 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1433 tu_GetBufferMemoryRequirements(
1434 device
, pInfo
->buffer
, &pMemoryRequirements
->memoryRequirements
);
1438 tu_GetImageMemoryRequirements(VkDevice _device
,
1440 VkMemoryRequirements
*pMemoryRequirements
)
1442 TU_FROM_HANDLE(tu_image
, image
, _image
);
1444 pMemoryRequirements
->memoryTypeBits
= 1;
1445 pMemoryRequirements
->size
= image
->size
;
1446 pMemoryRequirements
->alignment
= image
->alignment
;
1450 tu_GetImageMemoryRequirements2(VkDevice device
,
1451 const VkImageMemoryRequirementsInfo2KHR
*pInfo
,
1452 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1454 tu_GetImageMemoryRequirements(
1455 device
, pInfo
->image
, &pMemoryRequirements
->memoryRequirements
);
1459 tu_GetImageSparseMemoryRequirements(
1462 uint32_t *pSparseMemoryRequirementCount
,
1463 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1469 tu_GetImageSparseMemoryRequirements2(
1471 const VkImageSparseMemoryRequirementsInfo2KHR
*pInfo
,
1472 uint32_t *pSparseMemoryRequirementCount
,
1473 VkSparseImageMemoryRequirements2KHR
*pSparseMemoryRequirements
)
1479 tu_GetDeviceMemoryCommitment(VkDevice device
,
1480 VkDeviceMemory memory
,
1481 VkDeviceSize
*pCommittedMemoryInBytes
)
1483 *pCommittedMemoryInBytes
= 0;
1487 tu_BindBufferMemory2(VkDevice device
,
1488 uint32_t bindInfoCount
,
1489 const VkBindBufferMemoryInfoKHR
*pBindInfos
)
1495 tu_BindBufferMemory(VkDevice device
,
1497 VkDeviceMemory memory
,
1498 VkDeviceSize memoryOffset
)
1500 const VkBindBufferMemoryInfoKHR info
= {
1501 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1504 .memoryOffset
= memoryOffset
1507 return tu_BindBufferMemory2(device
, 1, &info
);
1511 tu_BindImageMemory2(VkDevice device
,
1512 uint32_t bindInfoCount
,
1513 const VkBindImageMemoryInfoKHR
*pBindInfos
)
1519 tu_BindImageMemory(VkDevice device
,
1521 VkDeviceMemory memory
,
1522 VkDeviceSize memoryOffset
)
1524 const VkBindImageMemoryInfoKHR info
= {
1525 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1528 .memoryOffset
= memoryOffset
1531 return tu_BindImageMemory2(device
, 1, &info
);
1535 tu_QueueBindSparse(VkQueue _queue
,
1536 uint32_t bindInfoCount
,
1537 const VkBindSparseInfo
*pBindInfo
,
1544 tu_CreateFence(VkDevice _device
,
1545 const VkFenceCreateInfo
*pCreateInfo
,
1546 const VkAllocationCallbacks
*pAllocator
,
1549 TU_FROM_HANDLE(tu_device
, device
, _device
);
1551 struct tu_fence
*fence
= vk_alloc2(&device
->alloc
,
1555 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1558 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1560 *pFence
= tu_fence_to_handle(fence
);
1566 tu_DestroyFence(VkDevice _device
,
1568 const VkAllocationCallbacks
*pAllocator
)
1570 TU_FROM_HANDLE(tu_device
, device
, _device
);
1571 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1576 vk_free2(&device
->alloc
, pAllocator
, fence
);
1580 tu_WaitForFences(VkDevice _device
,
1581 uint32_t fenceCount
,
1582 const VkFence
*pFences
,
1590 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
1596 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
1601 // Queue semaphore functions
1604 tu_CreateSemaphore(VkDevice _device
,
1605 const VkSemaphoreCreateInfo
*pCreateInfo
,
1606 const VkAllocationCallbacks
*pAllocator
,
1607 VkSemaphore
*pSemaphore
)
1609 TU_FROM_HANDLE(tu_device
, device
, _device
);
1611 struct tu_semaphore
*sem
= vk_alloc2(&device
->alloc
,
1615 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1617 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1619 *pSemaphore
= tu_semaphore_to_handle(sem
);
1624 tu_DestroySemaphore(VkDevice _device
,
1625 VkSemaphore _semaphore
,
1626 const VkAllocationCallbacks
*pAllocator
)
1628 TU_FROM_HANDLE(tu_device
, device
, _device
);
1629 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1633 vk_free2(&device
->alloc
, pAllocator
, sem
);
1637 tu_CreateEvent(VkDevice _device
,
1638 const VkEventCreateInfo
*pCreateInfo
,
1639 const VkAllocationCallbacks
*pAllocator
,
1642 TU_FROM_HANDLE(tu_device
, device
, _device
);
1643 struct tu_event
*event
= vk_alloc2(&device
->alloc
,
1647 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1650 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1652 *pEvent
= tu_event_to_handle(event
);
1658 tu_DestroyEvent(VkDevice _device
,
1660 const VkAllocationCallbacks
*pAllocator
)
1662 TU_FROM_HANDLE(tu_device
, device
, _device
);
1663 TU_FROM_HANDLE(tu_event
, event
, _event
);
1667 vk_free2(&device
->alloc
, pAllocator
, event
);
1671 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1673 TU_FROM_HANDLE(tu_event
, event
, _event
);
1675 if (*event
->map
== 1)
1676 return VK_EVENT_SET
;
1677 return VK_EVENT_RESET
;
1681 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1683 TU_FROM_HANDLE(tu_event
, event
, _event
);
1690 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1692 TU_FROM_HANDLE(tu_event
, event
, _event
);
1699 tu_CreateBuffer(VkDevice _device
,
1700 const VkBufferCreateInfo
*pCreateInfo
,
1701 const VkAllocationCallbacks
*pAllocator
,
1704 TU_FROM_HANDLE(tu_device
, device
, _device
);
1705 struct tu_buffer
*buffer
;
1707 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1709 buffer
= vk_alloc2(&device
->alloc
,
1713 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1715 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1717 buffer
->size
= pCreateInfo
->size
;
1718 buffer
->usage
= pCreateInfo
->usage
;
1719 buffer
->flags
= pCreateInfo
->flags
;
1721 *pBuffer
= tu_buffer_to_handle(buffer
);
1727 tu_DestroyBuffer(VkDevice _device
,
1729 const VkAllocationCallbacks
*pAllocator
)
1731 TU_FROM_HANDLE(tu_device
, device
, _device
);
1732 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1737 vk_free2(&device
->alloc
, pAllocator
, buffer
);
1741 tu_surface_max_layer_count(struct tu_image_view
*iview
)
1743 return iview
->type
== VK_IMAGE_VIEW_TYPE_3D
1744 ? iview
->extent
.depth
1745 : (iview
->base_layer
+ iview
->layer_count
);
1749 tu_CreateFramebuffer(VkDevice _device
,
1750 const VkFramebufferCreateInfo
*pCreateInfo
,
1751 const VkAllocationCallbacks
*pAllocator
,
1752 VkFramebuffer
*pFramebuffer
)
1754 TU_FROM_HANDLE(tu_device
, device
, _device
);
1755 struct tu_framebuffer
*framebuffer
;
1757 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1760 sizeof(*framebuffer
) +
1761 sizeof(struct tu_attachment_info
) * pCreateInfo
->attachmentCount
;
1762 framebuffer
= vk_alloc2(
1763 &device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1764 if (framebuffer
== NULL
)
1765 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1767 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1768 framebuffer
->width
= pCreateInfo
->width
;
1769 framebuffer
->height
= pCreateInfo
->height
;
1770 framebuffer
->layers
= pCreateInfo
->layers
;
1771 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1772 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1773 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
1774 framebuffer
->attachments
[i
].attachment
= iview
;
1776 framebuffer
->width
= MIN2(framebuffer
->width
, iview
->extent
.width
);
1777 framebuffer
->height
= MIN2(framebuffer
->height
, iview
->extent
.height
);
1778 framebuffer
->layers
=
1779 MIN2(framebuffer
->layers
, tu_surface_max_layer_count(iview
));
1782 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
1787 tu_DestroyFramebuffer(VkDevice _device
,
1789 const VkAllocationCallbacks
*pAllocator
)
1791 TU_FROM_HANDLE(tu_device
, device
, _device
);
1792 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
1796 vk_free2(&device
->alloc
, pAllocator
, fb
);
1800 tu_init_sampler(struct tu_device
*device
,
1801 struct tu_sampler
*sampler
,
1802 const VkSamplerCreateInfo
*pCreateInfo
)
1807 tu_CreateSampler(VkDevice _device
,
1808 const VkSamplerCreateInfo
*pCreateInfo
,
1809 const VkAllocationCallbacks
*pAllocator
,
1810 VkSampler
*pSampler
)
1812 TU_FROM_HANDLE(tu_device
, device
, _device
);
1813 struct tu_sampler
*sampler
;
1815 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1817 sampler
= vk_alloc2(&device
->alloc
,
1821 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1823 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1825 tu_init_sampler(device
, sampler
, pCreateInfo
);
1826 *pSampler
= tu_sampler_to_handle(sampler
);
1832 tu_DestroySampler(VkDevice _device
,
1834 const VkAllocationCallbacks
*pAllocator
)
1836 TU_FROM_HANDLE(tu_device
, device
, _device
);
1837 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
1841 vk_free2(&device
->alloc
, pAllocator
, sampler
);
1844 /* vk_icd.h does not declare this function, so we declare it here to
1845 * suppress Wmissing-prototypes.
1847 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1848 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
1850 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1851 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
1853 /* For the full details on loader interface versioning, see
1854 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1855 * What follows is a condensed summary, to help you navigate the large and
1856 * confusing official doc.
1858 * - Loader interface v0 is incompatible with later versions. We don't
1861 * - In loader interface v1:
1862 * - The first ICD entrypoint called by the loader is
1863 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1865 * - The ICD must statically expose no other Vulkan symbol unless it is
1866 * linked with -Bsymbolic.
1867 * - Each dispatchable Vulkan handle created by the ICD must be
1868 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1869 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1870 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1871 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1872 * such loader-managed surfaces.
1874 * - Loader interface v2 differs from v1 in:
1875 * - The first ICD entrypoint called by the loader is
1876 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1877 * statically expose this entrypoint.
1879 * - Loader interface v3 differs from v2 in:
1880 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1881 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1882 * because the loader no longer does so.
1884 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
1889 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1890 VkPhysicalDevice physicalDevice
,
1891 const VkPhysicalDeviceExternalSemaphoreInfoKHR
*pExternalSemaphoreInfo
,
1892 VkExternalSemaphorePropertiesKHR
*pExternalSemaphoreProperties
)
1894 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1895 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1896 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1900 tu_GetPhysicalDeviceExternalFenceProperties(
1901 VkPhysicalDevice physicalDevice
,
1902 const VkPhysicalDeviceExternalFenceInfoKHR
*pExternalFenceInfo
,
1903 VkExternalFencePropertiesKHR
*pExternalFenceProperties
)
1905 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1906 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1907 pExternalFenceProperties
->externalFenceFeatures
= 0;
1911 tu_CreateDebugReportCallbackEXT(
1912 VkInstance _instance
,
1913 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
1914 const VkAllocationCallbacks
*pAllocator
,
1915 VkDebugReportCallbackEXT
*pCallback
)
1917 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1918 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
1926 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
1927 VkDebugReportCallbackEXT _callback
,
1928 const VkAllocationCallbacks
*pAllocator
)
1930 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1931 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
1938 tu_DebugReportMessageEXT(VkInstance _instance
,
1939 VkDebugReportFlagsEXT flags
,
1940 VkDebugReportObjectTypeEXT objectType
,
1943 int32_t messageCode
,
1944 const char *pLayerPrefix
,
1945 const char *pMessage
)
1947 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1948 vk_debug_report(&instance
->debug_report_callbacks
,
1959 tu_GetDeviceGroupPeerMemoryFeatures(
1962 uint32_t localDeviceIndex
,
1963 uint32_t remoteDeviceIndex
,
1964 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
1966 assert(localDeviceIndex
== remoteDeviceIndex
);
1968 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
1969 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
1970 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
1971 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;