2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
38 #include <sys/sysinfo.h>
44 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
46 uint32_t mesa_timestamp
;
48 memset(uuid
, 0, VK_UUID_SIZE
);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
53 memcpy(uuid
, &mesa_timestamp
, 4);
54 memcpy((char *)uuid
+ 4, &f
, 2);
55 snprintf((char *)uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
60 tu_get_driver_uuid(void *uuid
)
62 memset(uuid
, 0, VK_UUID_SIZE
);
66 tu_get_device_uuid(void *uuid
)
72 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
74 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
75 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
77 uint32_t gem_handle
= tu_gem_new(dev
, size
, MSM_BO_WC
);
81 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
82 * want immediate backing pages because vkAllocateMemory and friends must
85 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
86 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
87 * maybe I misunderstand.
90 /* TODO: Do we need 'offset' if we have 'iova'? */
91 uint64_t offset
= tu_gem_info_offset(dev
, bo
->gem_handle
);
95 uint64_t iova
= tu_gem_info_iova(dev
, bo
->gem_handle
);
99 *bo
= (struct tu_bo
) {
100 .gem_handle
= gem_handle
,
109 tu_gem_close(dev
, bo
->gem_handle
);
111 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
115 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
120 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
121 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
122 dev
->physical_device
->local_fd
, bo
->offset
);
123 if (map
== MAP_FAILED
)
124 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
130 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
132 assert(bo
->gem_handle
);
135 munmap(bo
->map
, bo
->size
);
137 tu_gem_close(dev
, bo
->gem_handle
);
141 tu_physical_device_init(struct tu_physical_device
*device
,
142 struct tu_instance
*instance
,
143 drmDevicePtr drm_device
)
145 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
146 VkResult result
= VK_SUCCESS
;
147 drmVersionPtr version
;
150 struct fd_pipe
*tmp_pipe
= NULL
;
153 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
155 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
156 "failed to open device %s", path
);
159 /* Version 1.3 added MSM_INFO_IOVA. */
160 const int min_version_major
= 1;
161 const int min_version_minor
= 3;
163 version
= drmGetVersion(fd
);
166 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
167 "failed to query kernel driver version for device %s",
171 if (strcmp(version
->name
, "msm")) {
172 drmFreeVersion(version
);
176 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
177 "device %s does not use the msm kernel driver", path
);
180 if (version
->version_major
!= 1 || version
->version_minor
< 3) {
181 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
182 "kernel driver for device %s has version %d.%d, "
183 "but Vulkan requires version >= %d.%d",
185 version
->version_major
, version
->version_minor
,
186 min_version_major
, min_version_minor
);
187 drmFreeVersion(version
);
192 drmFreeVersion(version
);
194 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
195 tu_logi("Found compatible device '%s'.", path
);
197 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
198 device
->instance
= instance
;
199 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
200 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
202 if (instance
->enabled_extensions
.KHR_display
) {
203 master_fd
= open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
204 if (master_fd
>= 0) {
205 /* TODO: free master_fd is accel is not working? */
209 device
->master_fd
= master_fd
;
210 device
->local_fd
= fd
;
212 device
->drm_device
= fd_device_new_dup(fd
);
213 if (!device
->drm_device
) {
215 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not create the libdrm device");
219 tmp_pipe
= fd_pipe_new(device
->drm_device
, FD_PIPE_3D
);
222 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not open the 3D pipe");
226 if (fd_pipe_get_param(tmp_pipe
, FD_GPU_ID
, &val
)) {
228 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GPU ID");
231 device
->gpu_id
= val
;
233 if (fd_pipe_get_param(tmp_pipe
, FD_GMEM_SIZE
, &val
)) {
235 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GMEM size");
238 device
->gmem_size
= val
;
240 fd_pipe_del(tmp_pipe
);
243 memset(device
->name
, 0, sizeof(device
->name
));
244 sprintf(device
->name
, "FD%d", device
->gpu_id
);
246 switch(device
->gpu_id
) {
250 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
251 "device %s is unsupported", device
->name
);
254 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
256 instance
, VK_ERROR_INITIALIZATION_FAILED
, "cannot generate UUID");
260 /* The gpu id is already embedded in the uuid so we just pass "tu"
261 * when creating the cache.
263 char buf
[VK_UUID_SIZE
* 2 + 1];
264 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
265 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
268 "WARNING: tu is not a conformant vulkan implementation, "
269 "testing use only.\n");
271 tu_get_driver_uuid(&device
->device_uuid
);
272 tu_get_device_uuid(&device
->device_uuid
);
274 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
276 if (result
!= VK_SUCCESS
) {
277 vk_error(instance
, result
);
285 fd_pipe_del(tmp_pipe
);
286 if (device
->drm_device
)
287 fd_device_del(device
->drm_device
);
295 tu_physical_device_finish(struct tu_physical_device
*device
)
297 disk_cache_destroy(device
->disk_cache
);
298 close(device
->local_fd
);
299 if (device
->master_fd
!= -1)
300 close(device
->master_fd
);
304 default_alloc_func(void *pUserData
,
307 VkSystemAllocationScope allocationScope
)
313 default_realloc_func(void *pUserData
,
317 VkSystemAllocationScope allocationScope
)
319 return realloc(pOriginal
, size
);
323 default_free_func(void *pUserData
, void *pMemory
)
328 static const VkAllocationCallbacks default_alloc
= {
330 .pfnAllocation
= default_alloc_func
,
331 .pfnReallocation
= default_realloc_func
,
332 .pfnFree
= default_free_func
,
335 static const struct debug_control tu_debug_options
[] = { { "startup",
340 tu_get_debug_option_name(int id
)
342 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
343 return tu_debug_options
[id
].string
;
347 tu_get_instance_extension_index(const char *name
)
349 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
350 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
357 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
358 const VkAllocationCallbacks
*pAllocator
,
359 VkInstance
*pInstance
)
361 struct tu_instance
*instance
;
364 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
366 uint32_t client_version
;
367 if (pCreateInfo
->pApplicationInfo
&&
368 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
369 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
371 tu_EnumerateInstanceVersion(&client_version
);
374 instance
= vk_zalloc2(&default_alloc
,
378 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
380 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
382 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
385 instance
->alloc
= *pAllocator
;
387 instance
->alloc
= default_alloc
;
389 instance
->api_version
= client_version
;
390 instance
->physical_device_count
= -1;
392 instance
->debug_flags
=
393 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
395 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
396 tu_logi("Created an instance");
398 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
399 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
400 int index
= tu_get_instance_extension_index(ext_name
);
402 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
403 vk_free2(&default_alloc
, pAllocator
, instance
);
404 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
407 instance
->enabled_extensions
.extensions
[index
] = true;
410 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
411 if (result
!= VK_SUCCESS
) {
412 vk_free2(&default_alloc
, pAllocator
, instance
);
413 return vk_error(instance
, result
);
418 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
420 *pInstance
= tu_instance_to_handle(instance
);
426 tu_DestroyInstance(VkInstance _instance
,
427 const VkAllocationCallbacks
*pAllocator
)
429 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
434 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
435 tu_physical_device_finish(instance
->physical_devices
+ i
);
438 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
442 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
444 vk_free(&instance
->alloc
, instance
);
448 tu_enumerate_devices(struct tu_instance
*instance
)
450 /* TODO: Check for more devices ? */
451 drmDevicePtr devices
[8];
452 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
455 instance
->physical_device_count
= 0;
457 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
459 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
460 tu_logi("Found %d drm nodes", max_devices
);
463 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
465 for (unsigned i
= 0; i
< (unsigned)max_devices
; i
++) {
466 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
467 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
469 result
= tu_physical_device_init(instance
->physical_devices
+
470 instance
->physical_device_count
,
473 if (result
== VK_SUCCESS
)
474 ++instance
->physical_device_count
;
475 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
479 drmFreeDevices(devices
, max_devices
);
485 tu_EnumeratePhysicalDevices(VkInstance _instance
,
486 uint32_t *pPhysicalDeviceCount
,
487 VkPhysicalDevice
*pPhysicalDevices
)
489 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
490 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
494 if (instance
->physical_device_count
< 0) {
495 result
= tu_enumerate_devices(instance
);
496 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
500 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
501 vk_outarray_append(&out
, p
) {
502 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
507 return vk_outarray_status(&out
);
511 tu_EnumeratePhysicalDeviceGroups(
512 VkInstance _instance
,
513 uint32_t *pPhysicalDeviceGroupCount
,
514 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
516 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
517 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
, pPhysicalDeviceGroupCount
);
520 if (instance
->physical_device_count
< 0) {
521 result
= tu_enumerate_devices(instance
);
522 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
526 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
527 vk_outarray_append(&out
, p
) {
528 p
->physicalDeviceCount
= 1;
529 p
->physicalDevices
[0] =
530 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
531 p
->subsetAllocation
= false;
535 return vk_outarray_status(&out
);
539 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
540 VkPhysicalDeviceFeatures
*pFeatures
)
542 memset(pFeatures
, 0, sizeof(*pFeatures
));
544 *pFeatures
= (VkPhysicalDeviceFeatures
){
545 .robustBufferAccess
= false,
546 .fullDrawIndexUint32
= false,
547 .imageCubeArray
= false,
548 .independentBlend
= false,
549 .geometryShader
= false,
550 .tessellationShader
= false,
551 .sampleRateShading
= false,
552 .dualSrcBlend
= false,
554 .multiDrawIndirect
= false,
555 .drawIndirectFirstInstance
= false,
557 .depthBiasClamp
= false,
558 .fillModeNonSolid
= false,
559 .depthBounds
= false,
561 .largePoints
= false,
563 .multiViewport
= false,
564 .samplerAnisotropy
= false,
565 .textureCompressionETC2
= false,
566 .textureCompressionASTC_LDR
= false,
567 .textureCompressionBC
= false,
568 .occlusionQueryPrecise
= false,
569 .pipelineStatisticsQuery
= false,
570 .vertexPipelineStoresAndAtomics
= false,
571 .fragmentStoresAndAtomics
= false,
572 .shaderTessellationAndGeometryPointSize
= false,
573 .shaderImageGatherExtended
= false,
574 .shaderStorageImageExtendedFormats
= false,
575 .shaderStorageImageMultisample
= false,
576 .shaderUniformBufferArrayDynamicIndexing
= false,
577 .shaderSampledImageArrayDynamicIndexing
= false,
578 .shaderStorageBufferArrayDynamicIndexing
= false,
579 .shaderStorageImageArrayDynamicIndexing
= false,
580 .shaderStorageImageReadWithoutFormat
= false,
581 .shaderStorageImageWriteWithoutFormat
= false,
582 .shaderClipDistance
= false,
583 .shaderCullDistance
= false,
584 .shaderFloat64
= false,
585 .shaderInt64
= false,
586 .shaderInt16
= false,
587 .sparseBinding
= false,
588 .variableMultisampleRate
= false,
589 .inheritedQueries
= false,
594 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
595 VkPhysicalDeviceFeatures2KHR
*pFeatures
)
597 vk_foreach_struct(ext
, pFeatures
->pNext
)
599 switch (ext
->sType
) {
600 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR
: {
601 VkPhysicalDeviceVariablePointerFeaturesKHR
*features
= (void *)ext
;
602 features
->variablePointersStorageBuffer
= false;
603 features
->variablePointers
= false;
606 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR
: {
607 VkPhysicalDeviceMultiviewFeaturesKHR
*features
=
608 (VkPhysicalDeviceMultiviewFeaturesKHR
*)ext
;
609 features
->multiview
= false;
610 features
->multiviewGeometryShader
= false;
611 features
->multiviewTessellationShader
= false;
614 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
: {
615 VkPhysicalDeviceShaderDrawParameterFeatures
*features
=
616 (VkPhysicalDeviceShaderDrawParameterFeatures
*)ext
;
617 features
->shaderDrawParameters
= false;
620 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
621 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
622 (VkPhysicalDeviceProtectedMemoryFeatures
*)ext
;
623 features
->protectedMemory
= false;
626 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
627 VkPhysicalDevice16BitStorageFeatures
*features
=
628 (VkPhysicalDevice16BitStorageFeatures
*)ext
;
629 features
->storageBuffer16BitAccess
= false;
630 features
->uniformAndStorageBuffer16BitAccess
= false;
631 features
->storagePushConstant16
= false;
632 features
->storageInputOutput16
= false;
635 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
636 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
637 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*)ext
;
638 features
->samplerYcbcrConversion
= false;
641 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
642 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
643 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*)ext
;
644 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
645 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
646 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
647 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
648 features
->shaderSampledImageArrayNonUniformIndexing
= false;
649 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
650 features
->shaderStorageImageArrayNonUniformIndexing
= false;
651 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
652 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
653 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
654 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
655 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
656 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
657 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
658 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
659 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
660 features
->descriptorBindingUpdateUnusedWhilePending
= false;
661 features
->descriptorBindingPartiallyBound
= false;
662 features
->descriptorBindingVariableDescriptorCount
= false;
663 features
->runtimeDescriptorArray
= false;
666 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
667 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
668 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*)ext
;
669 features
->conditionalRendering
= false;
670 features
->inheritedConditionalRendering
= false;
677 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
681 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
682 VkPhysicalDeviceProperties
*pProperties
)
684 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
685 VkSampleCountFlags sample_counts
= 0xf;
687 /* make sure that the entire descriptor set is addressable with a signed
688 * 32-bit int. So the sum of all limits scaled by descriptor size has to
689 * be at most 2 GiB. the combined image & samples object count as one of
690 * both. This limit is for the pipeline layout, not for the set layout, but
691 * there is no set limit, so we just set a pipeline limit. I don't think
692 * any app is going to hit this soon. */
693 size_t max_descriptor_set_size
=
694 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
) /
695 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
696 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
697 32 /* sampler, largest when combined with image */ +
698 64 /* sampled image */ + 64 /* storage image */);
700 VkPhysicalDeviceLimits limits
= {
701 .maxImageDimension1D
= (1 << 14),
702 .maxImageDimension2D
= (1 << 14),
703 .maxImageDimension3D
= (1 << 11),
704 .maxImageDimensionCube
= (1 << 14),
705 .maxImageArrayLayers
= (1 << 11),
706 .maxTexelBufferElements
= 128 * 1024 * 1024,
707 .maxUniformBufferRange
= UINT32_MAX
,
708 .maxStorageBufferRange
= UINT32_MAX
,
709 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
710 .maxMemoryAllocationCount
= UINT32_MAX
,
711 .maxSamplerAllocationCount
= 64 * 1024,
712 .bufferImageGranularity
= 64, /* A cache line */
713 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
714 .maxBoundDescriptorSets
= MAX_SETS
,
715 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
716 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
717 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
718 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
719 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
720 .maxPerStageDescriptorInputAttachments
= max_descriptor_set_size
,
721 .maxPerStageResources
= max_descriptor_set_size
,
722 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
723 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
724 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
725 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
726 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
727 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
728 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
729 .maxDescriptorSetInputAttachments
= max_descriptor_set_size
,
730 .maxVertexInputAttributes
= 32,
731 .maxVertexInputBindings
= 32,
732 .maxVertexInputAttributeOffset
= 2047,
733 .maxVertexInputBindingStride
= 2048,
734 .maxVertexOutputComponents
= 128,
735 .maxTessellationGenerationLevel
= 64,
736 .maxTessellationPatchSize
= 32,
737 .maxTessellationControlPerVertexInputComponents
= 128,
738 .maxTessellationControlPerVertexOutputComponents
= 128,
739 .maxTessellationControlPerPatchOutputComponents
= 120,
740 .maxTessellationControlTotalOutputComponents
= 4096,
741 .maxTessellationEvaluationInputComponents
= 128,
742 .maxTessellationEvaluationOutputComponents
= 128,
743 .maxGeometryShaderInvocations
= 127,
744 .maxGeometryInputComponents
= 64,
745 .maxGeometryOutputComponents
= 128,
746 .maxGeometryOutputVertices
= 256,
747 .maxGeometryTotalOutputComponents
= 1024,
748 .maxFragmentInputComponents
= 128,
749 .maxFragmentOutputAttachments
= 8,
750 .maxFragmentDualSrcAttachments
= 1,
751 .maxFragmentCombinedOutputResources
= 8,
752 .maxComputeSharedMemorySize
= 32768,
753 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
754 .maxComputeWorkGroupInvocations
= 2048,
755 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
756 .subPixelPrecisionBits
= 4 /* FIXME */,
757 .subTexelPrecisionBits
= 4 /* FIXME */,
758 .mipmapPrecisionBits
= 4 /* FIXME */,
759 .maxDrawIndexedIndexValue
= UINT32_MAX
,
760 .maxDrawIndirectCount
= UINT32_MAX
,
761 .maxSamplerLodBias
= 16,
762 .maxSamplerAnisotropy
= 16,
763 .maxViewports
= MAX_VIEWPORTS
,
764 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
765 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
766 .viewportSubPixelBits
= 8,
767 .minMemoryMapAlignment
= 4096, /* A page */
768 .minTexelBufferOffsetAlignment
= 1,
769 .minUniformBufferOffsetAlignment
= 4,
770 .minStorageBufferOffsetAlignment
= 4,
771 .minTexelOffset
= -32,
772 .maxTexelOffset
= 31,
773 .minTexelGatherOffset
= -32,
774 .maxTexelGatherOffset
= 31,
775 .minInterpolationOffset
= -2,
776 .maxInterpolationOffset
= 2,
777 .subPixelInterpolationOffsetBits
= 8,
778 .maxFramebufferWidth
= (1 << 14),
779 .maxFramebufferHeight
= (1 << 14),
780 .maxFramebufferLayers
= (1 << 10),
781 .framebufferColorSampleCounts
= sample_counts
,
782 .framebufferDepthSampleCounts
= sample_counts
,
783 .framebufferStencilSampleCounts
= sample_counts
,
784 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
785 .maxColorAttachments
= MAX_RTS
,
786 .sampledImageColorSampleCounts
= sample_counts
,
787 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
788 .sampledImageDepthSampleCounts
= sample_counts
,
789 .sampledImageStencilSampleCounts
= sample_counts
,
790 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
791 .maxSampleMaskWords
= 1,
792 .timestampComputeAndGraphics
= true,
793 .timestampPeriod
= 1,
794 .maxClipDistances
= 8,
795 .maxCullDistances
= 8,
796 .maxCombinedClipAndCullDistances
= 8,
797 .discreteQueuePriorities
= 1,
798 .pointSizeRange
= { 0.125, 255.875 },
799 .lineWidthRange
= { 0.0, 7.9921875 },
800 .pointSizeGranularity
= (1.0 / 8.0),
801 .lineWidthGranularity
= (1.0 / 128.0),
802 .strictLines
= false, /* FINISHME */
803 .standardSampleLocations
= true,
804 .optimalBufferCopyOffsetAlignment
= 128,
805 .optimalBufferCopyRowPitchAlignment
= 128,
806 .nonCoherentAtomSize
= 64,
809 *pProperties
= (VkPhysicalDeviceProperties
){
810 .apiVersion
= tu_physical_device_api_version(pdevice
),
811 .driverVersion
= vk_get_driver_version(),
812 .vendorID
= 0, /* TODO */
814 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
816 .sparseProperties
= { 0 },
819 strcpy(pProperties
->deviceName
, pdevice
->name
);
820 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
824 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
825 VkPhysicalDeviceProperties2KHR
*pProperties
)
827 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
828 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
830 vk_foreach_struct(ext
, pProperties
->pNext
)
832 switch (ext
->sType
) {
833 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
834 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
835 (VkPhysicalDevicePushDescriptorPropertiesKHR
*)ext
;
836 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
839 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR
: {
840 VkPhysicalDeviceIDPropertiesKHR
*properties
=
841 (VkPhysicalDeviceIDPropertiesKHR
*)ext
;
842 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
843 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
844 properties
->deviceLUIDValid
= false;
847 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR
: {
848 VkPhysicalDeviceMultiviewPropertiesKHR
*properties
=
849 (VkPhysicalDeviceMultiviewPropertiesKHR
*)ext
;
850 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
851 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
854 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR
: {
855 VkPhysicalDevicePointClippingPropertiesKHR
*properties
=
856 (VkPhysicalDevicePointClippingPropertiesKHR
*)ext
;
857 properties
->pointClippingBehavior
=
858 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR
;
861 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
862 VkPhysicalDeviceMaintenance3Properties
*properties
=
863 (VkPhysicalDeviceMaintenance3Properties
*)ext
;
864 /* Make sure everything is addressable by a signed 32-bit int, and
865 * our largest descriptors are 96 bytes. */
866 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
867 /* Our buffer size fields allow only this much */
868 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
877 static const VkQueueFamilyProperties
878 tu_queue_family_properties
= {
879 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
880 VK_QUEUE_COMPUTE_BIT
|
881 VK_QUEUE_TRANSFER_BIT
,
883 .timestampValidBits
= 64,
884 .minImageTransferGranularity
= (VkExtent3D
) { 1, 1, 1 },
888 tu_GetPhysicalDeviceQueueFamilyProperties(
889 VkPhysicalDevice physicalDevice
,
890 uint32_t *pQueueFamilyPropertyCount
,
891 VkQueueFamilyProperties
*pQueueFamilyProperties
)
893 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
895 vk_outarray_append(&out
, p
) {
896 *p
= tu_queue_family_properties
;
901 tu_GetPhysicalDeviceQueueFamilyProperties2(
902 VkPhysicalDevice physicalDevice
,
903 uint32_t *pQueueFamilyPropertyCount
,
904 VkQueueFamilyProperties2KHR
*pQueueFamilyProperties
)
906 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
908 vk_outarray_append(&out
, p
) {
909 p
->queueFamilyProperties
= tu_queue_family_properties
;
914 tu_get_system_heap_size()
919 uint64_t total_ram
= (uint64_t)info
.totalram
* (uint64_t)info
.mem_unit
;
921 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
922 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
924 uint64_t available_ram
;
925 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
926 available_ram
= total_ram
/ 2;
928 available_ram
= total_ram
* 3 / 4;
930 return available_ram
;
934 tu_GetPhysicalDeviceMemoryProperties(
935 VkPhysicalDevice physicalDevice
,
936 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
938 pMemoryProperties
->memoryHeapCount
= 1;
939 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
940 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
942 pMemoryProperties
->memoryTypeCount
= 1;
943 pMemoryProperties
->memoryTypes
[0].propertyFlags
= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
944 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
945 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
946 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
950 tu_GetPhysicalDeviceMemoryProperties2(
951 VkPhysicalDevice physicalDevice
,
952 VkPhysicalDeviceMemoryProperties2KHR
*pMemoryProperties
)
954 return tu_GetPhysicalDeviceMemoryProperties(
955 physicalDevice
, &pMemoryProperties
->memoryProperties
);
959 tu_queue_init(struct tu_device
*device
,
960 struct tu_queue
*queue
,
961 uint32_t queue_family_index
,
963 VkDeviceQueueCreateFlags flags
)
965 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
966 queue
->device
= device
;
967 queue
->queue_family_index
= queue_family_index
;
968 queue
->queue_idx
= idx
;
969 queue
->flags
= flags
;
975 tu_queue_finish(struct tu_queue
*queue
)
980 tu_get_device_extension_index(const char *name
)
982 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
983 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
990 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
991 const VkDeviceCreateInfo
*pCreateInfo
,
992 const VkAllocationCallbacks
*pAllocator
,
995 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
997 struct tu_device
*device
;
999 /* Check enabled features */
1000 if (pCreateInfo
->pEnabledFeatures
) {
1001 VkPhysicalDeviceFeatures supported_features
;
1002 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
1003 VkBool32
*supported_feature
= (VkBool32
*)&supported_features
;
1004 VkBool32
*enabled_feature
= (VkBool32
*)pCreateInfo
->pEnabledFeatures
;
1005 unsigned num_features
=
1006 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
1007 for (uint32_t i
= 0; i
< num_features
; i
++) {
1008 if (enabled_feature
[i
] && !supported_feature
[i
])
1009 return vk_error(physical_device
->instance
,
1010 VK_ERROR_FEATURE_NOT_PRESENT
);
1014 device
= vk_zalloc2(&physical_device
->instance
->alloc
,
1018 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1020 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1022 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1023 device
->instance
= physical_device
->instance
;
1024 device
->physical_device
= physical_device
;
1027 device
->alloc
= *pAllocator
;
1029 device
->alloc
= physical_device
->instance
->alloc
;
1031 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
1032 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
1033 int index
= tu_get_device_extension_index(ext_name
);
1035 !physical_device
->supported_extensions
.extensions
[index
]) {
1036 vk_free(&device
->alloc
, device
);
1037 return vk_error(physical_device
->instance
,
1038 VK_ERROR_EXTENSION_NOT_PRESENT
);
1041 device
->enabled_extensions
.extensions
[index
] = true;
1044 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
1045 const VkDeviceQueueCreateInfo
*queue_create
=
1046 &pCreateInfo
->pQueueCreateInfos
[i
];
1047 uint32_t qfi
= queue_create
->queueFamilyIndex
;
1048 device
->queues
[qfi
] =
1049 vk_alloc(&device
->alloc
,
1050 queue_create
->queueCount
* sizeof(struct tu_queue
),
1052 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1053 if (!device
->queues
[qfi
]) {
1054 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1058 memset(device
->queues
[qfi
],
1060 queue_create
->queueCount
* sizeof(struct tu_queue
));
1062 device
->queue_count
[qfi
] = queue_create
->queueCount
;
1064 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
1065 result
= tu_queue_init(
1066 device
, &device
->queues
[qfi
][q
], qfi
, q
, queue_create
->flags
);
1067 if (result
!= VK_SUCCESS
)
1072 VkPipelineCacheCreateInfo ci
;
1073 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1076 ci
.pInitialData
= NULL
;
1077 ci
.initialDataSize
= 0;
1080 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1081 if (result
!= VK_SUCCESS
)
1084 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1086 *pDevice
= tu_device_to_handle(device
);
1090 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1091 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1092 tu_queue_finish(&device
->queues
[i
][q
]);
1093 if (device
->queue_count
[i
])
1094 vk_free(&device
->alloc
, device
->queues
[i
]);
1097 vk_free(&device
->alloc
, device
);
1102 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1104 TU_FROM_HANDLE(tu_device
, device
, _device
);
1109 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1110 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1111 tu_queue_finish(&device
->queues
[i
][q
]);
1112 if (device
->queue_count
[i
])
1113 vk_free(&device
->alloc
, device
->queues
[i
]);
1116 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1117 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1119 vk_free(&device
->alloc
, device
);
1123 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1124 VkLayerProperties
*pProperties
)
1126 *pPropertyCount
= 0;
1131 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1132 uint32_t *pPropertyCount
,
1133 VkLayerProperties
*pProperties
)
1135 *pPropertyCount
= 0;
1140 tu_GetDeviceQueue2(VkDevice _device
,
1141 const VkDeviceQueueInfo2
*pQueueInfo
,
1144 TU_FROM_HANDLE(tu_device
, device
, _device
);
1145 struct tu_queue
*queue
;
1148 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1149 if (pQueueInfo
->flags
!= queue
->flags
) {
1150 /* From the Vulkan 1.1.70 spec:
1152 * "The queue returned by vkGetDeviceQueue2 must have the same
1153 * flags value from this structure as that used at device
1154 * creation time in a VkDeviceQueueCreateInfo instance. If no
1155 * matching flags were specified at device creation time then
1156 * pQueue will return VK_NULL_HANDLE."
1158 *pQueue
= VK_NULL_HANDLE
;
1162 *pQueue
= tu_queue_to_handle(queue
);
1166 tu_GetDeviceQueue(VkDevice _device
,
1167 uint32_t queueFamilyIndex
,
1168 uint32_t queueIndex
,
1171 const VkDeviceQueueInfo2 info
=
1172 (VkDeviceQueueInfo2
){.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1173 .queueFamilyIndex
= queueFamilyIndex
,
1174 .queueIndex
= queueIndex
};
1176 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1180 tu_QueueSubmit(VkQueue _queue
,
1181 uint32_t submitCount
,
1182 const VkSubmitInfo
*pSubmits
,
1189 tu_QueueWaitIdle(VkQueue _queue
)
1195 tu_DeviceWaitIdle(VkDevice _device
)
1197 TU_FROM_HANDLE(tu_device
, device
, _device
);
1199 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1200 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1201 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1208 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1209 uint32_t *pPropertyCount
,
1210 VkExtensionProperties
*pProperties
)
1212 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1214 /* We spport no lyaers */
1216 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1218 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1219 if (tu_supported_instance_extensions
.extensions
[i
]) {
1220 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1224 return vk_outarray_status(&out
);
1228 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1229 const char *pLayerName
,
1230 uint32_t *pPropertyCount
,
1231 VkExtensionProperties
*pProperties
)
1233 /* We spport no lyaers */
1234 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1235 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1237 /* We spport no lyaers */
1239 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1241 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1242 if (device
->supported_extensions
.extensions
[i
]) {
1243 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1247 return vk_outarray_status(&out
);
1251 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1253 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1255 return tu_lookup_entrypoint_checked(pName
,
1256 instance
? instance
->api_version
: 0,
1257 instance
? &instance
->enabled_extensions
1262 /* The loader wants us to expose a second GetInstanceProcAddr function
1263 * to work around certain LD_PRELOAD issues seen in apps.
1266 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1267 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1270 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1271 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1273 return tu_GetInstanceProcAddr(instance
, pName
);
1277 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1279 TU_FROM_HANDLE(tu_device
, device
, _device
);
1281 return tu_lookup_entrypoint_checked(pName
,
1282 device
->instance
->api_version
,
1283 &device
->instance
->enabled_extensions
,
1284 &device
->enabled_extensions
);
1288 tu_alloc_memory(struct tu_device
*device
,
1289 const VkMemoryAllocateInfo
*pAllocateInfo
,
1290 const VkAllocationCallbacks
*pAllocator
,
1291 VkDeviceMemory
*pMem
)
1293 struct tu_device_memory
*mem
;
1296 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1298 if (pAllocateInfo
->allocationSize
== 0) {
1299 /* Apparently, this is allowed */
1300 *pMem
= VK_NULL_HANDLE
;
1304 mem
= vk_alloc2(&device
->alloc
,
1308 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1310 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1312 result
= tu_bo_init_new(device
, &mem
->bo
, pAllocateInfo
->allocationSize
);
1314 vk_free2(&device
->alloc
, pAllocator
, mem
);
1318 mem
->size
= pAllocateInfo
->allocationSize
;
1319 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1322 mem
->user_ptr
= NULL
;
1324 *pMem
= tu_device_memory_to_handle(mem
);
1330 tu_AllocateMemory(VkDevice _device
,
1331 const VkMemoryAllocateInfo
*pAllocateInfo
,
1332 const VkAllocationCallbacks
*pAllocator
,
1333 VkDeviceMemory
*pMem
)
1335 TU_FROM_HANDLE(tu_device
, device
, _device
);
1336 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1340 tu_FreeMemory(VkDevice _device
,
1341 VkDeviceMemory _mem
,
1342 const VkAllocationCallbacks
*pAllocator
)
1344 TU_FROM_HANDLE(tu_device
, device
, _device
);
1345 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1350 tu_bo_finish(device
, &mem
->bo
);
1351 vk_free2(&device
->alloc
, pAllocator
, mem
);
1355 tu_MapMemory(VkDevice _device
,
1356 VkDeviceMemory _memory
,
1357 VkDeviceSize offset
,
1359 VkMemoryMapFlags flags
,
1362 TU_FROM_HANDLE(tu_device
, device
, _device
);
1363 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1371 if (mem
->user_ptr
) {
1372 *ppData
= mem
->user_ptr
;
1373 } else if (!mem
->map
){
1374 result
= tu_bo_map(device
, &mem
->bo
);
1375 if (result
!= VK_SUCCESS
)
1377 mem
->map
= mem
->bo
.map
;
1386 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1390 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1392 /* I do not see any unmapping done by the freedreno Gallium driver. */
1396 tu_FlushMappedMemoryRanges(VkDevice _device
,
1397 uint32_t memoryRangeCount
,
1398 const VkMappedMemoryRange
*pMemoryRanges
)
1404 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1405 uint32_t memoryRangeCount
,
1406 const VkMappedMemoryRange
*pMemoryRanges
)
1412 tu_GetBufferMemoryRequirements(VkDevice _device
,
1414 VkMemoryRequirements
*pMemoryRequirements
)
1416 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1418 pMemoryRequirements
->memoryTypeBits
= 1;
1419 pMemoryRequirements
->alignment
= 16;
1420 pMemoryRequirements
->size
=
1421 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1425 tu_GetBufferMemoryRequirements2(
1427 const VkBufferMemoryRequirementsInfo2KHR
*pInfo
,
1428 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1430 tu_GetBufferMemoryRequirements(
1431 device
, pInfo
->buffer
, &pMemoryRequirements
->memoryRequirements
);
1435 tu_GetImageMemoryRequirements(VkDevice _device
,
1437 VkMemoryRequirements
*pMemoryRequirements
)
1439 TU_FROM_HANDLE(tu_image
, image
, _image
);
1441 pMemoryRequirements
->memoryTypeBits
= 1;
1442 pMemoryRequirements
->size
= image
->size
;
1443 pMemoryRequirements
->alignment
= image
->alignment
;
1447 tu_GetImageMemoryRequirements2(VkDevice device
,
1448 const VkImageMemoryRequirementsInfo2KHR
*pInfo
,
1449 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1451 tu_GetImageMemoryRequirements(
1452 device
, pInfo
->image
, &pMemoryRequirements
->memoryRequirements
);
1456 tu_GetImageSparseMemoryRequirements(
1459 uint32_t *pSparseMemoryRequirementCount
,
1460 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1466 tu_GetImageSparseMemoryRequirements2(
1468 const VkImageSparseMemoryRequirementsInfo2KHR
*pInfo
,
1469 uint32_t *pSparseMemoryRequirementCount
,
1470 VkSparseImageMemoryRequirements2KHR
*pSparseMemoryRequirements
)
1476 tu_GetDeviceMemoryCommitment(VkDevice device
,
1477 VkDeviceMemory memory
,
1478 VkDeviceSize
*pCommittedMemoryInBytes
)
1480 *pCommittedMemoryInBytes
= 0;
1484 tu_BindBufferMemory2(VkDevice device
,
1485 uint32_t bindInfoCount
,
1486 const VkBindBufferMemoryInfoKHR
*pBindInfos
)
1492 tu_BindBufferMemory(VkDevice device
,
1494 VkDeviceMemory memory
,
1495 VkDeviceSize memoryOffset
)
1497 const VkBindBufferMemoryInfoKHR info
= {
1498 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1501 .memoryOffset
= memoryOffset
1504 return tu_BindBufferMemory2(device
, 1, &info
);
1508 tu_BindImageMemory2(VkDevice device
,
1509 uint32_t bindInfoCount
,
1510 const VkBindImageMemoryInfoKHR
*pBindInfos
)
1516 tu_BindImageMemory(VkDevice device
,
1518 VkDeviceMemory memory
,
1519 VkDeviceSize memoryOffset
)
1521 const VkBindImageMemoryInfoKHR info
= {
1522 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1525 .memoryOffset
= memoryOffset
1528 return tu_BindImageMemory2(device
, 1, &info
);
1532 tu_QueueBindSparse(VkQueue _queue
,
1533 uint32_t bindInfoCount
,
1534 const VkBindSparseInfo
*pBindInfo
,
1541 tu_CreateFence(VkDevice _device
,
1542 const VkFenceCreateInfo
*pCreateInfo
,
1543 const VkAllocationCallbacks
*pAllocator
,
1546 TU_FROM_HANDLE(tu_device
, device
, _device
);
1548 struct tu_fence
*fence
= vk_alloc2(&device
->alloc
,
1552 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1555 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1557 *pFence
= tu_fence_to_handle(fence
);
1563 tu_DestroyFence(VkDevice _device
,
1565 const VkAllocationCallbacks
*pAllocator
)
1567 TU_FROM_HANDLE(tu_device
, device
, _device
);
1568 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1573 vk_free2(&device
->alloc
, pAllocator
, fence
);
1577 tu_WaitForFences(VkDevice _device
,
1578 uint32_t fenceCount
,
1579 const VkFence
*pFences
,
1587 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
1593 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
1598 // Queue semaphore functions
1601 tu_CreateSemaphore(VkDevice _device
,
1602 const VkSemaphoreCreateInfo
*pCreateInfo
,
1603 const VkAllocationCallbacks
*pAllocator
,
1604 VkSemaphore
*pSemaphore
)
1606 TU_FROM_HANDLE(tu_device
, device
, _device
);
1608 struct tu_semaphore
*sem
= vk_alloc2(&device
->alloc
,
1612 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1614 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1616 *pSemaphore
= tu_semaphore_to_handle(sem
);
1621 tu_DestroySemaphore(VkDevice _device
,
1622 VkSemaphore _semaphore
,
1623 const VkAllocationCallbacks
*pAllocator
)
1625 TU_FROM_HANDLE(tu_device
, device
, _device
);
1626 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1630 vk_free2(&device
->alloc
, pAllocator
, sem
);
1634 tu_CreateEvent(VkDevice _device
,
1635 const VkEventCreateInfo
*pCreateInfo
,
1636 const VkAllocationCallbacks
*pAllocator
,
1639 TU_FROM_HANDLE(tu_device
, device
, _device
);
1640 struct tu_event
*event
= vk_alloc2(&device
->alloc
,
1644 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1647 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1649 *pEvent
= tu_event_to_handle(event
);
1655 tu_DestroyEvent(VkDevice _device
,
1657 const VkAllocationCallbacks
*pAllocator
)
1659 TU_FROM_HANDLE(tu_device
, device
, _device
);
1660 TU_FROM_HANDLE(tu_event
, event
, _event
);
1664 vk_free2(&device
->alloc
, pAllocator
, event
);
1668 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1670 TU_FROM_HANDLE(tu_event
, event
, _event
);
1672 if (*event
->map
== 1)
1673 return VK_EVENT_SET
;
1674 return VK_EVENT_RESET
;
1678 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1680 TU_FROM_HANDLE(tu_event
, event
, _event
);
1687 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1689 TU_FROM_HANDLE(tu_event
, event
, _event
);
1696 tu_CreateBuffer(VkDevice _device
,
1697 const VkBufferCreateInfo
*pCreateInfo
,
1698 const VkAllocationCallbacks
*pAllocator
,
1701 TU_FROM_HANDLE(tu_device
, device
, _device
);
1702 struct tu_buffer
*buffer
;
1704 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1706 buffer
= vk_alloc2(&device
->alloc
,
1710 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1712 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1714 buffer
->size
= pCreateInfo
->size
;
1715 buffer
->usage
= pCreateInfo
->usage
;
1716 buffer
->flags
= pCreateInfo
->flags
;
1718 *pBuffer
= tu_buffer_to_handle(buffer
);
1724 tu_DestroyBuffer(VkDevice _device
,
1726 const VkAllocationCallbacks
*pAllocator
)
1728 TU_FROM_HANDLE(tu_device
, device
, _device
);
1729 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1734 vk_free2(&device
->alloc
, pAllocator
, buffer
);
1738 tu_surface_max_layer_count(struct tu_image_view
*iview
)
1740 return iview
->type
== VK_IMAGE_VIEW_TYPE_3D
1741 ? iview
->extent
.depth
1742 : (iview
->base_layer
+ iview
->layer_count
);
1746 tu_CreateFramebuffer(VkDevice _device
,
1747 const VkFramebufferCreateInfo
*pCreateInfo
,
1748 const VkAllocationCallbacks
*pAllocator
,
1749 VkFramebuffer
*pFramebuffer
)
1751 TU_FROM_HANDLE(tu_device
, device
, _device
);
1752 struct tu_framebuffer
*framebuffer
;
1754 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1757 sizeof(*framebuffer
) +
1758 sizeof(struct tu_attachment_info
) * pCreateInfo
->attachmentCount
;
1759 framebuffer
= vk_alloc2(
1760 &device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1761 if (framebuffer
== NULL
)
1762 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1764 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1765 framebuffer
->width
= pCreateInfo
->width
;
1766 framebuffer
->height
= pCreateInfo
->height
;
1767 framebuffer
->layers
= pCreateInfo
->layers
;
1768 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1769 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1770 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
1771 framebuffer
->attachments
[i
].attachment
= iview
;
1773 framebuffer
->width
= MIN2(framebuffer
->width
, iview
->extent
.width
);
1774 framebuffer
->height
= MIN2(framebuffer
->height
, iview
->extent
.height
);
1775 framebuffer
->layers
=
1776 MIN2(framebuffer
->layers
, tu_surface_max_layer_count(iview
));
1779 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
1784 tu_DestroyFramebuffer(VkDevice _device
,
1786 const VkAllocationCallbacks
*pAllocator
)
1788 TU_FROM_HANDLE(tu_device
, device
, _device
);
1789 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
1793 vk_free2(&device
->alloc
, pAllocator
, fb
);
1797 tu_init_sampler(struct tu_device
*device
,
1798 struct tu_sampler
*sampler
,
1799 const VkSamplerCreateInfo
*pCreateInfo
)
1804 tu_CreateSampler(VkDevice _device
,
1805 const VkSamplerCreateInfo
*pCreateInfo
,
1806 const VkAllocationCallbacks
*pAllocator
,
1807 VkSampler
*pSampler
)
1809 TU_FROM_HANDLE(tu_device
, device
, _device
);
1810 struct tu_sampler
*sampler
;
1812 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1814 sampler
= vk_alloc2(&device
->alloc
,
1818 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1820 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1822 tu_init_sampler(device
, sampler
, pCreateInfo
);
1823 *pSampler
= tu_sampler_to_handle(sampler
);
1829 tu_DestroySampler(VkDevice _device
,
1831 const VkAllocationCallbacks
*pAllocator
)
1833 TU_FROM_HANDLE(tu_device
, device
, _device
);
1834 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
1838 vk_free2(&device
->alloc
, pAllocator
, sampler
);
1841 /* vk_icd.h does not declare this function, so we declare it here to
1842 * suppress Wmissing-prototypes.
1844 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1845 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
1847 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1848 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
1850 /* For the full details on loader interface versioning, see
1851 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1852 * What follows is a condensed summary, to help you navigate the large and
1853 * confusing official doc.
1855 * - Loader interface v0 is incompatible with later versions. We don't
1858 * - In loader interface v1:
1859 * - The first ICD entrypoint called by the loader is
1860 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1862 * - The ICD must statically expose no other Vulkan symbol unless it is
1863 * linked with -Bsymbolic.
1864 * - Each dispatchable Vulkan handle created by the ICD must be
1865 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1866 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1867 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1868 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1869 * such loader-managed surfaces.
1871 * - Loader interface v2 differs from v1 in:
1872 * - The first ICD entrypoint called by the loader is
1873 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1874 * statically expose this entrypoint.
1876 * - Loader interface v3 differs from v2 in:
1877 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1878 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1879 * because the loader no longer does so.
1881 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
1886 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1887 VkPhysicalDevice physicalDevice
,
1888 const VkPhysicalDeviceExternalSemaphoreInfoKHR
*pExternalSemaphoreInfo
,
1889 VkExternalSemaphorePropertiesKHR
*pExternalSemaphoreProperties
)
1891 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1892 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1893 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1897 tu_GetPhysicalDeviceExternalFenceProperties(
1898 VkPhysicalDevice physicalDevice
,
1899 const VkPhysicalDeviceExternalFenceInfoKHR
*pExternalFenceInfo
,
1900 VkExternalFencePropertiesKHR
*pExternalFenceProperties
)
1902 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1903 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1904 pExternalFenceProperties
->externalFenceFeatures
= 0;
1908 tu_CreateDebugReportCallbackEXT(
1909 VkInstance _instance
,
1910 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
1911 const VkAllocationCallbacks
*pAllocator
,
1912 VkDebugReportCallbackEXT
*pCallback
)
1914 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1915 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
1923 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
1924 VkDebugReportCallbackEXT _callback
,
1925 const VkAllocationCallbacks
*pAllocator
)
1927 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1928 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
1935 tu_DebugReportMessageEXT(VkInstance _instance
,
1936 VkDebugReportFlagsEXT flags
,
1937 VkDebugReportObjectTypeEXT objectType
,
1940 int32_t messageCode
,
1941 const char *pLayerPrefix
,
1942 const char *pMessage
)
1944 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1945 vk_debug_report(&instance
->debug_report_callbacks
,
1956 tu_GetDeviceGroupPeerMemoryFeatures(
1959 uint32_t localDeviceIndex
,
1960 uint32_t remoteDeviceIndex
,
1961 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
1963 assert(localDeviceIndex
== remoteDeviceIndex
);
1965 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
1966 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
1967 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
1968 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;