2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
38 #include <sys/sysinfo.h>
44 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
46 uint32_t mesa_timestamp
;
48 memset(uuid
, 0, VK_UUID_SIZE
);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
53 memcpy(uuid
, &mesa_timestamp
, 4);
54 memcpy((char *)uuid
+ 4, &f
, 2);
55 snprintf((char *)uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
60 tu_get_driver_uuid(void *uuid
)
62 memset(uuid
, 0, VK_UUID_SIZE
);
66 tu_get_device_uuid(void *uuid
)
73 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
75 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
76 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
78 uint32_t gem_handle
= tu_gem_new(dev
, size
, MSM_BO_WC
);
82 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
83 * want immediate backing pages because vkAllocateMemory and friends must
86 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
87 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
88 * maybe I misunderstand.
91 /* TODO: Do we need 'offset' if we have 'iova'? */
92 uint64_t offset
= tu_gem_info_offset(dev
, gem_handle
);
96 uint64_t iova
= tu_gem_info_iova(dev
, gem_handle
);
100 *bo
= (struct tu_bo
) {
101 .gem_handle
= gem_handle
,
110 tu_gem_close(dev
, bo
->gem_handle
);
112 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
116 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
121 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
122 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
123 dev
->physical_device
->local_fd
, bo
->offset
);
124 if (map
== MAP_FAILED
)
125 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
131 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
133 assert(bo
->gem_handle
);
136 munmap(bo
->map
, bo
->size
);
138 tu_gem_close(dev
, bo
->gem_handle
);
142 tu_physical_device_init(struct tu_physical_device
*device
,
143 struct tu_instance
*instance
,
144 drmDevicePtr drm_device
)
146 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
147 VkResult result
= VK_SUCCESS
;
148 drmVersionPtr version
;
151 struct fd_pipe
*tmp_pipe
= NULL
;
154 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
156 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
157 "failed to open device %s", path
);
160 /* Version 1.3 added MSM_INFO_IOVA. */
161 const int min_version_major
= 1;
162 const int min_version_minor
= 3;
164 version
= drmGetVersion(fd
);
167 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
168 "failed to query kernel driver version for device %s",
172 if (strcmp(version
->name
, "msm")) {
173 drmFreeVersion(version
);
177 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
178 "device %s does not use the msm kernel driver", path
);
181 if (version
->version_major
!= 1 || version
->version_minor
< 3) {
182 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
183 "kernel driver for device %s has version %d.%d, "
184 "but Vulkan requires version >= %d.%d",
186 version
->version_major
, version
->version_minor
,
187 min_version_major
, min_version_minor
);
188 drmFreeVersion(version
);
193 drmFreeVersion(version
);
195 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
196 tu_logi("Found compatible device '%s'.", path
);
198 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
199 device
->instance
= instance
;
200 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
201 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
203 if (instance
->enabled_extensions
.KHR_display
) {
204 master_fd
= open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
205 if (master_fd
>= 0) {
206 /* TODO: free master_fd is accel is not working? */
210 device
->master_fd
= master_fd
;
211 device
->local_fd
= fd
;
213 device
->drm_device
= fd_device_new_dup(fd
);
214 if (!device
->drm_device
) {
216 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not create the libdrm device");
220 tmp_pipe
= fd_pipe_new(device
->drm_device
, FD_PIPE_3D
);
223 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not open the 3D pipe");
227 if (fd_pipe_get_param(tmp_pipe
, FD_GPU_ID
, &val
)) {
229 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GPU ID");
232 device
->gpu_id
= val
;
234 if (fd_pipe_get_param(tmp_pipe
, FD_GMEM_SIZE
, &val
)) {
236 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GMEM size");
239 device
->gmem_size
= val
;
241 fd_pipe_del(tmp_pipe
);
244 memset(device
->name
, 0, sizeof(device
->name
));
245 sprintf(device
->name
, "FD%d", device
->gpu_id
);
247 switch(device
->gpu_id
) {
252 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
253 "device %s is unsupported", device
->name
);
256 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
258 instance
, VK_ERROR_INITIALIZATION_FAILED
, "cannot generate UUID");
262 /* The gpu id is already embedded in the uuid so we just pass "tu"
263 * when creating the cache.
265 char buf
[VK_UUID_SIZE
* 2 + 1];
266 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
267 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
270 "WARNING: tu is not a conformant vulkan implementation, "
271 "testing use only.\n");
273 tu_get_driver_uuid(&device
->device_uuid
);
274 tu_get_device_uuid(&device
->device_uuid
);
276 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
278 if (result
!= VK_SUCCESS
) {
279 vk_error(instance
, result
);
287 fd_pipe_del(tmp_pipe
);
288 if (device
->drm_device
)
289 fd_device_del(device
->drm_device
);
297 tu_physical_device_finish(struct tu_physical_device
*device
)
299 disk_cache_destroy(device
->disk_cache
);
300 close(device
->local_fd
);
301 if (device
->master_fd
!= -1)
302 close(device
->master_fd
);
306 default_alloc_func(void *pUserData
,
309 VkSystemAllocationScope allocationScope
)
315 default_realloc_func(void *pUserData
,
319 VkSystemAllocationScope allocationScope
)
321 return realloc(pOriginal
, size
);
325 default_free_func(void *pUserData
, void *pMemory
)
330 static const VkAllocationCallbacks default_alloc
= {
332 .pfnAllocation
= default_alloc_func
,
333 .pfnReallocation
= default_realloc_func
,
334 .pfnFree
= default_free_func
,
337 static const struct debug_control tu_debug_options
[] = { { "startup",
342 tu_get_debug_option_name(int id
)
344 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
345 return tu_debug_options
[id
].string
;
349 tu_get_instance_extension_index(const char *name
)
351 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
352 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
359 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
360 const VkAllocationCallbacks
*pAllocator
,
361 VkInstance
*pInstance
)
363 struct tu_instance
*instance
;
366 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
368 uint32_t client_version
;
369 if (pCreateInfo
->pApplicationInfo
&&
370 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
371 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
373 tu_EnumerateInstanceVersion(&client_version
);
376 instance
= vk_zalloc2(&default_alloc
,
380 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
382 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
384 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
387 instance
->alloc
= *pAllocator
;
389 instance
->alloc
= default_alloc
;
391 instance
->api_version
= client_version
;
392 instance
->physical_device_count
= -1;
394 instance
->debug_flags
=
395 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
397 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
398 tu_logi("Created an instance");
400 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
401 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
402 int index
= tu_get_instance_extension_index(ext_name
);
404 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
405 vk_free2(&default_alloc
, pAllocator
, instance
);
406 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
409 instance
->enabled_extensions
.extensions
[index
] = true;
412 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
413 if (result
!= VK_SUCCESS
) {
414 vk_free2(&default_alloc
, pAllocator
, instance
);
415 return vk_error(instance
, result
);
420 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
422 *pInstance
= tu_instance_to_handle(instance
);
428 tu_DestroyInstance(VkInstance _instance
,
429 const VkAllocationCallbacks
*pAllocator
)
431 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
436 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
437 tu_physical_device_finish(instance
->physical_devices
+ i
);
440 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
444 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
446 vk_free(&instance
->alloc
, instance
);
450 tu_enumerate_devices(struct tu_instance
*instance
)
452 /* TODO: Check for more devices ? */
453 drmDevicePtr devices
[8];
454 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
457 instance
->physical_device_count
= 0;
459 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
461 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
462 tu_logi("Found %d drm nodes", max_devices
);
465 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
467 for (unsigned i
= 0; i
< (unsigned)max_devices
; i
++) {
468 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
469 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
471 result
= tu_physical_device_init(instance
->physical_devices
+
472 instance
->physical_device_count
,
475 if (result
== VK_SUCCESS
)
476 ++instance
->physical_device_count
;
477 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
481 drmFreeDevices(devices
, max_devices
);
487 tu_EnumeratePhysicalDevices(VkInstance _instance
,
488 uint32_t *pPhysicalDeviceCount
,
489 VkPhysicalDevice
*pPhysicalDevices
)
491 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
492 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
496 if (instance
->physical_device_count
< 0) {
497 result
= tu_enumerate_devices(instance
);
498 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
502 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
503 vk_outarray_append(&out
, p
) {
504 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
509 return vk_outarray_status(&out
);
513 tu_EnumeratePhysicalDeviceGroups(
514 VkInstance _instance
,
515 uint32_t *pPhysicalDeviceGroupCount
,
516 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
518 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
519 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
, pPhysicalDeviceGroupCount
);
522 if (instance
->physical_device_count
< 0) {
523 result
= tu_enumerate_devices(instance
);
524 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
528 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
529 vk_outarray_append(&out
, p
) {
530 p
->physicalDeviceCount
= 1;
531 p
->physicalDevices
[0] =
532 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
533 p
->subsetAllocation
= false;
537 return vk_outarray_status(&out
);
541 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
542 VkPhysicalDeviceFeatures
*pFeatures
)
544 memset(pFeatures
, 0, sizeof(*pFeatures
));
546 *pFeatures
= (VkPhysicalDeviceFeatures
){
547 .robustBufferAccess
= false,
548 .fullDrawIndexUint32
= false,
549 .imageCubeArray
= false,
550 .independentBlend
= false,
551 .geometryShader
= false,
552 .tessellationShader
= false,
553 .sampleRateShading
= false,
554 .dualSrcBlend
= false,
556 .multiDrawIndirect
= false,
557 .drawIndirectFirstInstance
= false,
559 .depthBiasClamp
= false,
560 .fillModeNonSolid
= false,
561 .depthBounds
= false,
563 .largePoints
= false,
565 .multiViewport
= false,
566 .samplerAnisotropy
= false,
567 .textureCompressionETC2
= false,
568 .textureCompressionASTC_LDR
= false,
569 .textureCompressionBC
= false,
570 .occlusionQueryPrecise
= false,
571 .pipelineStatisticsQuery
= false,
572 .vertexPipelineStoresAndAtomics
= false,
573 .fragmentStoresAndAtomics
= false,
574 .shaderTessellationAndGeometryPointSize
= false,
575 .shaderImageGatherExtended
= false,
576 .shaderStorageImageExtendedFormats
= false,
577 .shaderStorageImageMultisample
= false,
578 .shaderUniformBufferArrayDynamicIndexing
= false,
579 .shaderSampledImageArrayDynamicIndexing
= false,
580 .shaderStorageBufferArrayDynamicIndexing
= false,
581 .shaderStorageImageArrayDynamicIndexing
= false,
582 .shaderStorageImageReadWithoutFormat
= false,
583 .shaderStorageImageWriteWithoutFormat
= false,
584 .shaderClipDistance
= false,
585 .shaderCullDistance
= false,
586 .shaderFloat64
= false,
587 .shaderInt64
= false,
588 .shaderInt16
= false,
589 .sparseBinding
= false,
590 .variableMultisampleRate
= false,
591 .inheritedQueries
= false,
596 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
597 VkPhysicalDeviceFeatures2KHR
*pFeatures
)
599 vk_foreach_struct(ext
, pFeatures
->pNext
)
601 switch (ext
->sType
) {
602 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR
: {
603 VkPhysicalDeviceVariablePointerFeaturesKHR
*features
= (void *)ext
;
604 features
->variablePointersStorageBuffer
= false;
605 features
->variablePointers
= false;
608 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR
: {
609 VkPhysicalDeviceMultiviewFeaturesKHR
*features
=
610 (VkPhysicalDeviceMultiviewFeaturesKHR
*)ext
;
611 features
->multiview
= false;
612 features
->multiviewGeometryShader
= false;
613 features
->multiviewTessellationShader
= false;
616 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
: {
617 VkPhysicalDeviceShaderDrawParameterFeatures
*features
=
618 (VkPhysicalDeviceShaderDrawParameterFeatures
*)ext
;
619 features
->shaderDrawParameters
= false;
622 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
623 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
624 (VkPhysicalDeviceProtectedMemoryFeatures
*)ext
;
625 features
->protectedMemory
= false;
628 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
629 VkPhysicalDevice16BitStorageFeatures
*features
=
630 (VkPhysicalDevice16BitStorageFeatures
*)ext
;
631 features
->storageBuffer16BitAccess
= false;
632 features
->uniformAndStorageBuffer16BitAccess
= false;
633 features
->storagePushConstant16
= false;
634 features
->storageInputOutput16
= false;
637 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
638 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
639 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*)ext
;
640 features
->samplerYcbcrConversion
= false;
643 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
644 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
645 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*)ext
;
646 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
647 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
648 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
649 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
650 features
->shaderSampledImageArrayNonUniformIndexing
= false;
651 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
652 features
->shaderStorageImageArrayNonUniformIndexing
= false;
653 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
654 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
655 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
656 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
657 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
658 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
659 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
660 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
661 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
662 features
->descriptorBindingUpdateUnusedWhilePending
= false;
663 features
->descriptorBindingPartiallyBound
= false;
664 features
->descriptorBindingVariableDescriptorCount
= false;
665 features
->runtimeDescriptorArray
= false;
668 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
669 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
670 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*)ext
;
671 features
->conditionalRendering
= false;
672 features
->inheritedConditionalRendering
= false;
679 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
683 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
684 VkPhysicalDeviceProperties
*pProperties
)
686 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
687 VkSampleCountFlags sample_counts
= 0xf;
689 /* make sure that the entire descriptor set is addressable with a signed
690 * 32-bit int. So the sum of all limits scaled by descriptor size has to
691 * be at most 2 GiB. the combined image & samples object count as one of
692 * both. This limit is for the pipeline layout, not for the set layout, but
693 * there is no set limit, so we just set a pipeline limit. I don't think
694 * any app is going to hit this soon. */
695 size_t max_descriptor_set_size
=
696 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
) /
697 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
698 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
699 32 /* sampler, largest when combined with image */ +
700 64 /* sampled image */ + 64 /* storage image */);
702 VkPhysicalDeviceLimits limits
= {
703 .maxImageDimension1D
= (1 << 14),
704 .maxImageDimension2D
= (1 << 14),
705 .maxImageDimension3D
= (1 << 11),
706 .maxImageDimensionCube
= (1 << 14),
707 .maxImageArrayLayers
= (1 << 11),
708 .maxTexelBufferElements
= 128 * 1024 * 1024,
709 .maxUniformBufferRange
= UINT32_MAX
,
710 .maxStorageBufferRange
= UINT32_MAX
,
711 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
712 .maxMemoryAllocationCount
= UINT32_MAX
,
713 .maxSamplerAllocationCount
= 64 * 1024,
714 .bufferImageGranularity
= 64, /* A cache line */
715 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
716 .maxBoundDescriptorSets
= MAX_SETS
,
717 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
718 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
719 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
720 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
721 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
722 .maxPerStageDescriptorInputAttachments
= max_descriptor_set_size
,
723 .maxPerStageResources
= max_descriptor_set_size
,
724 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
725 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
726 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
727 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
728 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
729 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
730 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
731 .maxDescriptorSetInputAttachments
= max_descriptor_set_size
,
732 .maxVertexInputAttributes
= 32,
733 .maxVertexInputBindings
= 32,
734 .maxVertexInputAttributeOffset
= 2047,
735 .maxVertexInputBindingStride
= 2048,
736 .maxVertexOutputComponents
= 128,
737 .maxTessellationGenerationLevel
= 64,
738 .maxTessellationPatchSize
= 32,
739 .maxTessellationControlPerVertexInputComponents
= 128,
740 .maxTessellationControlPerVertexOutputComponents
= 128,
741 .maxTessellationControlPerPatchOutputComponents
= 120,
742 .maxTessellationControlTotalOutputComponents
= 4096,
743 .maxTessellationEvaluationInputComponents
= 128,
744 .maxTessellationEvaluationOutputComponents
= 128,
745 .maxGeometryShaderInvocations
= 127,
746 .maxGeometryInputComponents
= 64,
747 .maxGeometryOutputComponents
= 128,
748 .maxGeometryOutputVertices
= 256,
749 .maxGeometryTotalOutputComponents
= 1024,
750 .maxFragmentInputComponents
= 128,
751 .maxFragmentOutputAttachments
= 8,
752 .maxFragmentDualSrcAttachments
= 1,
753 .maxFragmentCombinedOutputResources
= 8,
754 .maxComputeSharedMemorySize
= 32768,
755 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
756 .maxComputeWorkGroupInvocations
= 2048,
757 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
758 .subPixelPrecisionBits
= 4 /* FIXME */,
759 .subTexelPrecisionBits
= 4 /* FIXME */,
760 .mipmapPrecisionBits
= 4 /* FIXME */,
761 .maxDrawIndexedIndexValue
= UINT32_MAX
,
762 .maxDrawIndirectCount
= UINT32_MAX
,
763 .maxSamplerLodBias
= 16,
764 .maxSamplerAnisotropy
= 16,
765 .maxViewports
= MAX_VIEWPORTS
,
766 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
767 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
768 .viewportSubPixelBits
= 8,
769 .minMemoryMapAlignment
= 4096, /* A page */
770 .minTexelBufferOffsetAlignment
= 1,
771 .minUniformBufferOffsetAlignment
= 4,
772 .minStorageBufferOffsetAlignment
= 4,
773 .minTexelOffset
= -32,
774 .maxTexelOffset
= 31,
775 .minTexelGatherOffset
= -32,
776 .maxTexelGatherOffset
= 31,
777 .minInterpolationOffset
= -2,
778 .maxInterpolationOffset
= 2,
779 .subPixelInterpolationOffsetBits
= 8,
780 .maxFramebufferWidth
= (1 << 14),
781 .maxFramebufferHeight
= (1 << 14),
782 .maxFramebufferLayers
= (1 << 10),
783 .framebufferColorSampleCounts
= sample_counts
,
784 .framebufferDepthSampleCounts
= sample_counts
,
785 .framebufferStencilSampleCounts
= sample_counts
,
786 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
787 .maxColorAttachments
= MAX_RTS
,
788 .sampledImageColorSampleCounts
= sample_counts
,
789 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
790 .sampledImageDepthSampleCounts
= sample_counts
,
791 .sampledImageStencilSampleCounts
= sample_counts
,
792 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
793 .maxSampleMaskWords
= 1,
794 .timestampComputeAndGraphics
= true,
795 .timestampPeriod
= 1,
796 .maxClipDistances
= 8,
797 .maxCullDistances
= 8,
798 .maxCombinedClipAndCullDistances
= 8,
799 .discreteQueuePriorities
= 1,
800 .pointSizeRange
= { 0.125, 255.875 },
801 .lineWidthRange
= { 0.0, 7.9921875 },
802 .pointSizeGranularity
= (1.0 / 8.0),
803 .lineWidthGranularity
= (1.0 / 128.0),
804 .strictLines
= false, /* FINISHME */
805 .standardSampleLocations
= true,
806 .optimalBufferCopyOffsetAlignment
= 128,
807 .optimalBufferCopyRowPitchAlignment
= 128,
808 .nonCoherentAtomSize
= 64,
811 *pProperties
= (VkPhysicalDeviceProperties
){
812 .apiVersion
= tu_physical_device_api_version(pdevice
),
813 .driverVersion
= vk_get_driver_version(),
814 .vendorID
= 0, /* TODO */
816 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
818 .sparseProperties
= { 0 },
821 strcpy(pProperties
->deviceName
, pdevice
->name
);
822 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
826 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
827 VkPhysicalDeviceProperties2KHR
*pProperties
)
829 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
830 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
832 vk_foreach_struct(ext
, pProperties
->pNext
)
834 switch (ext
->sType
) {
835 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
836 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
837 (VkPhysicalDevicePushDescriptorPropertiesKHR
*)ext
;
838 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
841 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR
: {
842 VkPhysicalDeviceIDPropertiesKHR
*properties
=
843 (VkPhysicalDeviceIDPropertiesKHR
*)ext
;
844 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
845 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
846 properties
->deviceLUIDValid
= false;
849 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR
: {
850 VkPhysicalDeviceMultiviewPropertiesKHR
*properties
=
851 (VkPhysicalDeviceMultiviewPropertiesKHR
*)ext
;
852 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
853 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
856 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR
: {
857 VkPhysicalDevicePointClippingPropertiesKHR
*properties
=
858 (VkPhysicalDevicePointClippingPropertiesKHR
*)ext
;
859 properties
->pointClippingBehavior
=
860 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR
;
863 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
864 VkPhysicalDeviceMaintenance3Properties
*properties
=
865 (VkPhysicalDeviceMaintenance3Properties
*)ext
;
866 /* Make sure everything is addressable by a signed 32-bit int, and
867 * our largest descriptors are 96 bytes. */
868 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
869 /* Our buffer size fields allow only this much */
870 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
879 static const VkQueueFamilyProperties
880 tu_queue_family_properties
= {
881 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
882 VK_QUEUE_COMPUTE_BIT
|
883 VK_QUEUE_TRANSFER_BIT
,
885 .timestampValidBits
= 64,
886 .minImageTransferGranularity
= (VkExtent3D
) { 1, 1, 1 },
890 tu_GetPhysicalDeviceQueueFamilyProperties(
891 VkPhysicalDevice physicalDevice
,
892 uint32_t *pQueueFamilyPropertyCount
,
893 VkQueueFamilyProperties
*pQueueFamilyProperties
)
895 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
897 vk_outarray_append(&out
, p
) {
898 *p
= tu_queue_family_properties
;
903 tu_GetPhysicalDeviceQueueFamilyProperties2(
904 VkPhysicalDevice physicalDevice
,
905 uint32_t *pQueueFamilyPropertyCount
,
906 VkQueueFamilyProperties2KHR
*pQueueFamilyProperties
)
908 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
910 vk_outarray_append(&out
, p
) {
911 p
->queueFamilyProperties
= tu_queue_family_properties
;
916 tu_get_system_heap_size()
921 uint64_t total_ram
= (uint64_t)info
.totalram
* (uint64_t)info
.mem_unit
;
923 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
924 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
926 uint64_t available_ram
;
927 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
928 available_ram
= total_ram
/ 2;
930 available_ram
= total_ram
* 3 / 4;
932 return available_ram
;
936 tu_GetPhysicalDeviceMemoryProperties(
937 VkPhysicalDevice physicalDevice
,
938 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
940 pMemoryProperties
->memoryHeapCount
= 1;
941 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
942 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
944 pMemoryProperties
->memoryTypeCount
= 1;
945 pMemoryProperties
->memoryTypes
[0].propertyFlags
= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
946 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
947 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
948 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
952 tu_GetPhysicalDeviceMemoryProperties2(
953 VkPhysicalDevice physicalDevice
,
954 VkPhysicalDeviceMemoryProperties2KHR
*pMemoryProperties
)
956 return tu_GetPhysicalDeviceMemoryProperties(
957 physicalDevice
, &pMemoryProperties
->memoryProperties
);
961 tu_queue_init(struct tu_device
*device
,
962 struct tu_queue
*queue
,
963 uint32_t queue_family_index
,
965 VkDeviceQueueCreateFlags flags
)
967 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
968 queue
->device
= device
;
969 queue
->queue_family_index
= queue_family_index
;
970 queue
->queue_idx
= idx
;
971 queue
->flags
= flags
;
977 tu_queue_finish(struct tu_queue
*queue
)
982 tu_get_device_extension_index(const char *name
)
984 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
985 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
992 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
993 const VkDeviceCreateInfo
*pCreateInfo
,
994 const VkAllocationCallbacks
*pAllocator
,
997 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
999 struct tu_device
*device
;
1001 /* Check enabled features */
1002 if (pCreateInfo
->pEnabledFeatures
) {
1003 VkPhysicalDeviceFeatures supported_features
;
1004 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
1005 VkBool32
*supported_feature
= (VkBool32
*)&supported_features
;
1006 VkBool32
*enabled_feature
= (VkBool32
*)pCreateInfo
->pEnabledFeatures
;
1007 unsigned num_features
=
1008 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
1009 for (uint32_t i
= 0; i
< num_features
; i
++) {
1010 if (enabled_feature
[i
] && !supported_feature
[i
])
1011 return vk_error(physical_device
->instance
,
1012 VK_ERROR_FEATURE_NOT_PRESENT
);
1016 device
= vk_zalloc2(&physical_device
->instance
->alloc
,
1020 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1022 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1024 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1025 device
->instance
= physical_device
->instance
;
1026 device
->physical_device
= physical_device
;
1029 device
->alloc
= *pAllocator
;
1031 device
->alloc
= physical_device
->instance
->alloc
;
1033 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
1034 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
1035 int index
= tu_get_device_extension_index(ext_name
);
1037 !physical_device
->supported_extensions
.extensions
[index
]) {
1038 vk_free(&device
->alloc
, device
);
1039 return vk_error(physical_device
->instance
,
1040 VK_ERROR_EXTENSION_NOT_PRESENT
);
1043 device
->enabled_extensions
.extensions
[index
] = true;
1046 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
1047 const VkDeviceQueueCreateInfo
*queue_create
=
1048 &pCreateInfo
->pQueueCreateInfos
[i
];
1049 uint32_t qfi
= queue_create
->queueFamilyIndex
;
1050 device
->queues
[qfi
] =
1051 vk_alloc(&device
->alloc
,
1052 queue_create
->queueCount
* sizeof(struct tu_queue
),
1054 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1055 if (!device
->queues
[qfi
]) {
1056 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1060 memset(device
->queues
[qfi
],
1062 queue_create
->queueCount
* sizeof(struct tu_queue
));
1064 device
->queue_count
[qfi
] = queue_create
->queueCount
;
1066 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
1067 result
= tu_queue_init(
1068 device
, &device
->queues
[qfi
][q
], qfi
, q
, queue_create
->flags
);
1069 if (result
!= VK_SUCCESS
)
1074 VkPipelineCacheCreateInfo ci
;
1075 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1078 ci
.pInitialData
= NULL
;
1079 ci
.initialDataSize
= 0;
1082 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1083 if (result
!= VK_SUCCESS
)
1086 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1088 *pDevice
= tu_device_to_handle(device
);
1092 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1093 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1094 tu_queue_finish(&device
->queues
[i
][q
]);
1095 if (device
->queue_count
[i
])
1096 vk_free(&device
->alloc
, device
->queues
[i
]);
1099 vk_free(&device
->alloc
, device
);
1104 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1106 TU_FROM_HANDLE(tu_device
, device
, _device
);
1111 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1112 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1113 tu_queue_finish(&device
->queues
[i
][q
]);
1114 if (device
->queue_count
[i
])
1115 vk_free(&device
->alloc
, device
->queues
[i
]);
1118 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1119 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1121 vk_free(&device
->alloc
, device
);
1125 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1126 VkLayerProperties
*pProperties
)
1128 *pPropertyCount
= 0;
1133 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1134 uint32_t *pPropertyCount
,
1135 VkLayerProperties
*pProperties
)
1137 *pPropertyCount
= 0;
1142 tu_GetDeviceQueue2(VkDevice _device
,
1143 const VkDeviceQueueInfo2
*pQueueInfo
,
1146 TU_FROM_HANDLE(tu_device
, device
, _device
);
1147 struct tu_queue
*queue
;
1150 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1151 if (pQueueInfo
->flags
!= queue
->flags
) {
1152 /* From the Vulkan 1.1.70 spec:
1154 * "The queue returned by vkGetDeviceQueue2 must have the same
1155 * flags value from this structure as that used at device
1156 * creation time in a VkDeviceQueueCreateInfo instance. If no
1157 * matching flags were specified at device creation time then
1158 * pQueue will return VK_NULL_HANDLE."
1160 *pQueue
= VK_NULL_HANDLE
;
1164 *pQueue
= tu_queue_to_handle(queue
);
1168 tu_GetDeviceQueue(VkDevice _device
,
1169 uint32_t queueFamilyIndex
,
1170 uint32_t queueIndex
,
1173 const VkDeviceQueueInfo2 info
=
1174 (VkDeviceQueueInfo2
){.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1175 .queueFamilyIndex
= queueFamilyIndex
,
1176 .queueIndex
= queueIndex
};
1178 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1182 tu_QueueSubmit(VkQueue _queue
,
1183 uint32_t submitCount
,
1184 const VkSubmitInfo
*pSubmits
,
1191 tu_QueueWaitIdle(VkQueue _queue
)
1197 tu_DeviceWaitIdle(VkDevice _device
)
1199 TU_FROM_HANDLE(tu_device
, device
, _device
);
1201 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1202 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1203 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1210 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1211 uint32_t *pPropertyCount
,
1212 VkExtensionProperties
*pProperties
)
1214 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1216 /* We spport no lyaers */
1218 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1220 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1221 if (tu_supported_instance_extensions
.extensions
[i
]) {
1222 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1226 return vk_outarray_status(&out
);
1230 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1231 const char *pLayerName
,
1232 uint32_t *pPropertyCount
,
1233 VkExtensionProperties
*pProperties
)
1235 /* We spport no lyaers */
1236 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1237 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1239 /* We spport no lyaers */
1241 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1243 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1244 if (device
->supported_extensions
.extensions
[i
]) {
1245 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1249 return vk_outarray_status(&out
);
1253 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1255 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1257 return tu_lookup_entrypoint_checked(pName
,
1258 instance
? instance
->api_version
: 0,
1259 instance
? &instance
->enabled_extensions
1264 /* The loader wants us to expose a second GetInstanceProcAddr function
1265 * to work around certain LD_PRELOAD issues seen in apps.
1268 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1269 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1272 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1273 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1275 return tu_GetInstanceProcAddr(instance
, pName
);
1279 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1281 TU_FROM_HANDLE(tu_device
, device
, _device
);
1283 return tu_lookup_entrypoint_checked(pName
,
1284 device
->instance
->api_version
,
1285 &device
->instance
->enabled_extensions
,
1286 &device
->enabled_extensions
);
1290 tu_alloc_memory(struct tu_device
*device
,
1291 const VkMemoryAllocateInfo
*pAllocateInfo
,
1292 const VkAllocationCallbacks
*pAllocator
,
1293 VkDeviceMemory
*pMem
)
1295 struct tu_device_memory
*mem
;
1298 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1300 if (pAllocateInfo
->allocationSize
== 0) {
1301 /* Apparently, this is allowed */
1302 *pMem
= VK_NULL_HANDLE
;
1306 mem
= vk_alloc2(&device
->alloc
,
1310 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1312 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1314 result
= tu_bo_init_new(device
, &mem
->bo
, pAllocateInfo
->allocationSize
);
1315 if (result
!= VK_SUCCESS
) {
1316 vk_free2(&device
->alloc
, pAllocator
, mem
);
1320 mem
->size
= pAllocateInfo
->allocationSize
;
1321 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1324 mem
->user_ptr
= NULL
;
1326 *pMem
= tu_device_memory_to_handle(mem
);
1332 tu_AllocateMemory(VkDevice _device
,
1333 const VkMemoryAllocateInfo
*pAllocateInfo
,
1334 const VkAllocationCallbacks
*pAllocator
,
1335 VkDeviceMemory
*pMem
)
1337 TU_FROM_HANDLE(tu_device
, device
, _device
);
1338 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1342 tu_FreeMemory(VkDevice _device
,
1343 VkDeviceMemory _mem
,
1344 const VkAllocationCallbacks
*pAllocator
)
1346 TU_FROM_HANDLE(tu_device
, device
, _device
);
1347 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1352 tu_bo_finish(device
, &mem
->bo
);
1353 vk_free2(&device
->alloc
, pAllocator
, mem
);
1357 tu_MapMemory(VkDevice _device
,
1358 VkDeviceMemory _memory
,
1359 VkDeviceSize offset
,
1361 VkMemoryMapFlags flags
,
1364 TU_FROM_HANDLE(tu_device
, device
, _device
);
1365 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1373 if (mem
->user_ptr
) {
1374 *ppData
= mem
->user_ptr
;
1375 } else if (!mem
->map
){
1376 result
= tu_bo_map(device
, &mem
->bo
);
1377 if (result
!= VK_SUCCESS
)
1379 mem
->map
= mem
->bo
.map
;
1388 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1392 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1394 /* I do not see any unmapping done by the freedreno Gallium driver. */
1398 tu_FlushMappedMemoryRanges(VkDevice _device
,
1399 uint32_t memoryRangeCount
,
1400 const VkMappedMemoryRange
*pMemoryRanges
)
1406 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1407 uint32_t memoryRangeCount
,
1408 const VkMappedMemoryRange
*pMemoryRanges
)
1414 tu_GetBufferMemoryRequirements(VkDevice _device
,
1416 VkMemoryRequirements
*pMemoryRequirements
)
1418 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1420 pMemoryRequirements
->memoryTypeBits
= 1;
1421 pMemoryRequirements
->alignment
= 16;
1422 pMemoryRequirements
->size
=
1423 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1427 tu_GetBufferMemoryRequirements2(
1429 const VkBufferMemoryRequirementsInfo2KHR
*pInfo
,
1430 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1432 tu_GetBufferMemoryRequirements(
1433 device
, pInfo
->buffer
, &pMemoryRequirements
->memoryRequirements
);
1437 tu_GetImageMemoryRequirements(VkDevice _device
,
1439 VkMemoryRequirements
*pMemoryRequirements
)
1441 TU_FROM_HANDLE(tu_image
, image
, _image
);
1443 pMemoryRequirements
->memoryTypeBits
= 1;
1444 pMemoryRequirements
->size
= image
->size
;
1445 pMemoryRequirements
->alignment
= image
->alignment
;
1449 tu_GetImageMemoryRequirements2(VkDevice device
,
1450 const VkImageMemoryRequirementsInfo2KHR
*pInfo
,
1451 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1453 tu_GetImageMemoryRequirements(
1454 device
, pInfo
->image
, &pMemoryRequirements
->memoryRequirements
);
1458 tu_GetImageSparseMemoryRequirements(
1461 uint32_t *pSparseMemoryRequirementCount
,
1462 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1468 tu_GetImageSparseMemoryRequirements2(
1470 const VkImageSparseMemoryRequirementsInfo2KHR
*pInfo
,
1471 uint32_t *pSparseMemoryRequirementCount
,
1472 VkSparseImageMemoryRequirements2KHR
*pSparseMemoryRequirements
)
1478 tu_GetDeviceMemoryCommitment(VkDevice device
,
1479 VkDeviceMemory memory
,
1480 VkDeviceSize
*pCommittedMemoryInBytes
)
1482 *pCommittedMemoryInBytes
= 0;
1486 tu_BindBufferMemory2(VkDevice device
,
1487 uint32_t bindInfoCount
,
1488 const VkBindBufferMemoryInfoKHR
*pBindInfos
)
1494 tu_BindBufferMemory(VkDevice device
,
1496 VkDeviceMemory memory
,
1497 VkDeviceSize memoryOffset
)
1499 const VkBindBufferMemoryInfoKHR info
= {
1500 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1503 .memoryOffset
= memoryOffset
1506 return tu_BindBufferMemory2(device
, 1, &info
);
1510 tu_BindImageMemory2(VkDevice device
,
1511 uint32_t bindInfoCount
,
1512 const VkBindImageMemoryInfoKHR
*pBindInfos
)
1518 tu_BindImageMemory(VkDevice device
,
1520 VkDeviceMemory memory
,
1521 VkDeviceSize memoryOffset
)
1523 const VkBindImageMemoryInfoKHR info
= {
1524 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1527 .memoryOffset
= memoryOffset
1530 return tu_BindImageMemory2(device
, 1, &info
);
1534 tu_QueueBindSparse(VkQueue _queue
,
1535 uint32_t bindInfoCount
,
1536 const VkBindSparseInfo
*pBindInfo
,
1543 tu_CreateFence(VkDevice _device
,
1544 const VkFenceCreateInfo
*pCreateInfo
,
1545 const VkAllocationCallbacks
*pAllocator
,
1548 TU_FROM_HANDLE(tu_device
, device
, _device
);
1550 struct tu_fence
*fence
= vk_alloc2(&device
->alloc
,
1554 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1557 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1559 *pFence
= tu_fence_to_handle(fence
);
1565 tu_DestroyFence(VkDevice _device
,
1567 const VkAllocationCallbacks
*pAllocator
)
1569 TU_FROM_HANDLE(tu_device
, device
, _device
);
1570 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1575 vk_free2(&device
->alloc
, pAllocator
, fence
);
1579 tu_WaitForFences(VkDevice _device
,
1580 uint32_t fenceCount
,
1581 const VkFence
*pFences
,
1589 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
1595 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
1600 // Queue semaphore functions
1603 tu_CreateSemaphore(VkDevice _device
,
1604 const VkSemaphoreCreateInfo
*pCreateInfo
,
1605 const VkAllocationCallbacks
*pAllocator
,
1606 VkSemaphore
*pSemaphore
)
1608 TU_FROM_HANDLE(tu_device
, device
, _device
);
1610 struct tu_semaphore
*sem
= vk_alloc2(&device
->alloc
,
1614 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1616 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1618 *pSemaphore
= tu_semaphore_to_handle(sem
);
1623 tu_DestroySemaphore(VkDevice _device
,
1624 VkSemaphore _semaphore
,
1625 const VkAllocationCallbacks
*pAllocator
)
1627 TU_FROM_HANDLE(tu_device
, device
, _device
);
1628 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1632 vk_free2(&device
->alloc
, pAllocator
, sem
);
1636 tu_CreateEvent(VkDevice _device
,
1637 const VkEventCreateInfo
*pCreateInfo
,
1638 const VkAllocationCallbacks
*pAllocator
,
1641 TU_FROM_HANDLE(tu_device
, device
, _device
);
1642 struct tu_event
*event
= vk_alloc2(&device
->alloc
,
1646 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1649 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1651 *pEvent
= tu_event_to_handle(event
);
1657 tu_DestroyEvent(VkDevice _device
,
1659 const VkAllocationCallbacks
*pAllocator
)
1661 TU_FROM_HANDLE(tu_device
, device
, _device
);
1662 TU_FROM_HANDLE(tu_event
, event
, _event
);
1666 vk_free2(&device
->alloc
, pAllocator
, event
);
1670 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1672 TU_FROM_HANDLE(tu_event
, event
, _event
);
1674 if (*event
->map
== 1)
1675 return VK_EVENT_SET
;
1676 return VK_EVENT_RESET
;
1680 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1682 TU_FROM_HANDLE(tu_event
, event
, _event
);
1689 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1691 TU_FROM_HANDLE(tu_event
, event
, _event
);
1698 tu_CreateBuffer(VkDevice _device
,
1699 const VkBufferCreateInfo
*pCreateInfo
,
1700 const VkAllocationCallbacks
*pAllocator
,
1703 TU_FROM_HANDLE(tu_device
, device
, _device
);
1704 struct tu_buffer
*buffer
;
1706 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1708 buffer
= vk_alloc2(&device
->alloc
,
1712 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1714 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1716 buffer
->size
= pCreateInfo
->size
;
1717 buffer
->usage
= pCreateInfo
->usage
;
1718 buffer
->flags
= pCreateInfo
->flags
;
1720 *pBuffer
= tu_buffer_to_handle(buffer
);
1726 tu_DestroyBuffer(VkDevice _device
,
1728 const VkAllocationCallbacks
*pAllocator
)
1730 TU_FROM_HANDLE(tu_device
, device
, _device
);
1731 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1736 vk_free2(&device
->alloc
, pAllocator
, buffer
);
1740 tu_surface_max_layer_count(struct tu_image_view
*iview
)
1742 return iview
->type
== VK_IMAGE_VIEW_TYPE_3D
1743 ? iview
->extent
.depth
1744 : (iview
->base_layer
+ iview
->layer_count
);
1748 tu_CreateFramebuffer(VkDevice _device
,
1749 const VkFramebufferCreateInfo
*pCreateInfo
,
1750 const VkAllocationCallbacks
*pAllocator
,
1751 VkFramebuffer
*pFramebuffer
)
1753 TU_FROM_HANDLE(tu_device
, device
, _device
);
1754 struct tu_framebuffer
*framebuffer
;
1756 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1759 sizeof(*framebuffer
) +
1760 sizeof(struct tu_attachment_info
) * pCreateInfo
->attachmentCount
;
1761 framebuffer
= vk_alloc2(
1762 &device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1763 if (framebuffer
== NULL
)
1764 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1766 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1767 framebuffer
->width
= pCreateInfo
->width
;
1768 framebuffer
->height
= pCreateInfo
->height
;
1769 framebuffer
->layers
= pCreateInfo
->layers
;
1770 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1771 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1772 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
1773 framebuffer
->attachments
[i
].attachment
= iview
;
1775 framebuffer
->width
= MIN2(framebuffer
->width
, iview
->extent
.width
);
1776 framebuffer
->height
= MIN2(framebuffer
->height
, iview
->extent
.height
);
1777 framebuffer
->layers
=
1778 MIN2(framebuffer
->layers
, tu_surface_max_layer_count(iview
));
1781 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
1786 tu_DestroyFramebuffer(VkDevice _device
,
1788 const VkAllocationCallbacks
*pAllocator
)
1790 TU_FROM_HANDLE(tu_device
, device
, _device
);
1791 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
1795 vk_free2(&device
->alloc
, pAllocator
, fb
);
1799 tu_init_sampler(struct tu_device
*device
,
1800 struct tu_sampler
*sampler
,
1801 const VkSamplerCreateInfo
*pCreateInfo
)
1806 tu_CreateSampler(VkDevice _device
,
1807 const VkSamplerCreateInfo
*pCreateInfo
,
1808 const VkAllocationCallbacks
*pAllocator
,
1809 VkSampler
*pSampler
)
1811 TU_FROM_HANDLE(tu_device
, device
, _device
);
1812 struct tu_sampler
*sampler
;
1814 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1816 sampler
= vk_alloc2(&device
->alloc
,
1820 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1822 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1824 tu_init_sampler(device
, sampler
, pCreateInfo
);
1825 *pSampler
= tu_sampler_to_handle(sampler
);
1831 tu_DestroySampler(VkDevice _device
,
1833 const VkAllocationCallbacks
*pAllocator
)
1835 TU_FROM_HANDLE(tu_device
, device
, _device
);
1836 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
1840 vk_free2(&device
->alloc
, pAllocator
, sampler
);
1843 /* vk_icd.h does not declare this function, so we declare it here to
1844 * suppress Wmissing-prototypes.
1846 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1847 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
1849 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1850 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
1852 /* For the full details on loader interface versioning, see
1853 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1854 * What follows is a condensed summary, to help you navigate the large and
1855 * confusing official doc.
1857 * - Loader interface v0 is incompatible with later versions. We don't
1860 * - In loader interface v1:
1861 * - The first ICD entrypoint called by the loader is
1862 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1864 * - The ICD must statically expose no other Vulkan symbol unless it is
1865 * linked with -Bsymbolic.
1866 * - Each dispatchable Vulkan handle created by the ICD must be
1867 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1868 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1869 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1870 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1871 * such loader-managed surfaces.
1873 * - Loader interface v2 differs from v1 in:
1874 * - The first ICD entrypoint called by the loader is
1875 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1876 * statically expose this entrypoint.
1878 * - Loader interface v3 differs from v2 in:
1879 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1880 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1881 * because the loader no longer does so.
1883 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
1888 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1889 VkPhysicalDevice physicalDevice
,
1890 const VkPhysicalDeviceExternalSemaphoreInfoKHR
*pExternalSemaphoreInfo
,
1891 VkExternalSemaphorePropertiesKHR
*pExternalSemaphoreProperties
)
1893 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1894 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1895 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1899 tu_GetPhysicalDeviceExternalFenceProperties(
1900 VkPhysicalDevice physicalDevice
,
1901 const VkPhysicalDeviceExternalFenceInfoKHR
*pExternalFenceInfo
,
1902 VkExternalFencePropertiesKHR
*pExternalFenceProperties
)
1904 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1905 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1906 pExternalFenceProperties
->externalFenceFeatures
= 0;
1910 tu_CreateDebugReportCallbackEXT(
1911 VkInstance _instance
,
1912 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
1913 const VkAllocationCallbacks
*pAllocator
,
1914 VkDebugReportCallbackEXT
*pCallback
)
1916 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1917 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
1925 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
1926 VkDebugReportCallbackEXT _callback
,
1927 const VkAllocationCallbacks
*pAllocator
)
1929 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1930 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
1937 tu_DebugReportMessageEXT(VkInstance _instance
,
1938 VkDebugReportFlagsEXT flags
,
1939 VkDebugReportObjectTypeEXT objectType
,
1942 int32_t messageCode
,
1943 const char *pLayerPrefix
,
1944 const char *pMessage
)
1946 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1947 vk_debug_report(&instance
->debug_report_callbacks
,
1958 tu_GetDeviceGroupPeerMemoryFeatures(
1961 uint32_t localDeviceIndex
,
1962 uint32_t remoteDeviceIndex
,
1963 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
1965 assert(localDeviceIndex
== remoteDeviceIndex
);
1967 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
1968 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
1969 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
1970 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;