2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
37 #include <sys/sysinfo.h>
42 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
44 uint32_t mesa_timestamp
;
46 memset(uuid
, 0, VK_UUID_SIZE
);
47 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
51 memcpy(uuid
, &mesa_timestamp
, 4);
52 memcpy((char *)uuid
+ 4, &f
, 2);
53 snprintf((char *)uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
58 tu_get_driver_uuid(void *uuid
)
60 memset(uuid
, 0, VK_UUID_SIZE
);
64 tu_get_device_uuid(void *uuid
)
70 tu_physical_device_init(struct tu_physical_device
*device
,
71 struct tu_instance
*instance
,
72 drmDevicePtr drm_device
)
74 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
76 drmVersionPtr version
;
79 struct fd_pipe
*tmp_pipe
= NULL
;
82 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
84 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
85 tu_logi("Could not open device '%s'", path
);
87 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
90 version
= drmGetVersion(fd
);
94 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
95 tu_logi("Could not get the kernel driver version for device '%s'",
98 return vk_errorf(instance
,
99 VK_ERROR_INCOMPATIBLE_DRIVER
,
100 "failed to get version %s: %m",
104 if (strcmp(version
->name
, "msm")) {
105 drmFreeVersion(version
);
110 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
111 tu_logi("Device '%s' is not using the msm kernel driver.", path
);
113 return VK_ERROR_INCOMPATIBLE_DRIVER
;
115 drmFreeVersion(version
);
117 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
118 tu_logi("Found compatible device '%s'.", path
);
120 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
121 device
->instance
= instance
;
122 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
123 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
125 if (instance
->enabled_extensions
.KHR_display
) {
126 master_fd
= open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
127 if (master_fd
>= 0) {
128 /* TODO: free master_fd is accel is not working? */
132 device
->master_fd
= master_fd
;
133 device
->local_fd
= fd
;
135 device
->drm_device
= fd_device_new_dup(fd
);
136 if (!device
->drm_device
) {
138 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not create the libdrm device");
142 tmp_pipe
= fd_pipe_new(device
->drm_device
, FD_PIPE_3D
);
145 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not open the 3D pipe");
149 if (fd_pipe_get_param(tmp_pipe
, FD_GPU_ID
, &val
)) {
151 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GPU ID");
154 device
->gpu_id
= val
;
156 if (fd_pipe_get_param(tmp_pipe
, FD_GMEM_SIZE
, &val
)) {
158 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GMEM size");
161 device
->gmem_size
= val
;
163 fd_pipe_del(tmp_pipe
);
166 memset(device
->name
, 0, sizeof(device
->name
));
167 sprintf(device
->name
, "FD%d", device
->gpu_id
);
169 switch(device
->gpu_id
) {
173 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
174 tu_logi("Device '%s' is not supported.", device
->name
);
176 instance
, VK_ERROR_INITIALIZATION_FAILED
, "unsupported device");
179 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
181 instance
, VK_ERROR_INITIALIZATION_FAILED
, "cannot generate UUID");
185 /* The gpu id is already embedded in the uuid so we just pass "tu"
186 * when creating the cache.
188 char buf
[VK_UUID_SIZE
* 2 + 1];
189 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
190 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
193 "WARNING: tu is not a conformant vulkan implementation, "
194 "testing use only.\n");
196 tu_get_driver_uuid(&device
->device_uuid
);
197 tu_get_device_uuid(&device
->device_uuid
);
199 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
201 if (result
!= VK_SUCCESS
) {
202 vk_error(instance
, result
);
210 fd_pipe_del(tmp_pipe
);
211 if (device
->drm_device
)
212 fd_device_del(device
->drm_device
);
220 tu_physical_device_finish(struct tu_physical_device
*device
)
222 disk_cache_destroy(device
->disk_cache
);
223 close(device
->local_fd
);
224 if (device
->master_fd
!= -1)
225 close(device
->master_fd
);
229 default_alloc_func(void *pUserData
,
232 VkSystemAllocationScope allocationScope
)
238 default_realloc_func(void *pUserData
,
242 VkSystemAllocationScope allocationScope
)
244 return realloc(pOriginal
, size
);
248 default_free_func(void *pUserData
, void *pMemory
)
253 static const VkAllocationCallbacks default_alloc
= {
255 .pfnAllocation
= default_alloc_func
,
256 .pfnReallocation
= default_realloc_func
,
257 .pfnFree
= default_free_func
,
260 static const struct debug_control tu_debug_options
[] = { { "startup",
265 tu_get_debug_option_name(int id
)
267 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
268 return tu_debug_options
[id
].string
;
272 tu_get_instance_extension_index(const char *name
)
274 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
275 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
282 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
283 const VkAllocationCallbacks
*pAllocator
,
284 VkInstance
*pInstance
)
286 struct tu_instance
*instance
;
289 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
291 uint32_t client_version
;
292 if (pCreateInfo
->pApplicationInfo
&&
293 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
294 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
296 tu_EnumerateInstanceVersion(&client_version
);
299 instance
= vk_zalloc2(&default_alloc
,
303 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
305 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
307 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
310 instance
->alloc
= *pAllocator
;
312 instance
->alloc
= default_alloc
;
314 instance
->api_version
= client_version
;
315 instance
->physical_device_count
= -1;
317 instance
->debug_flags
=
318 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
320 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
321 tu_logi("Created an instance");
323 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
324 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
325 int index
= tu_get_instance_extension_index(ext_name
);
327 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
328 vk_free2(&default_alloc
, pAllocator
, instance
);
329 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
332 instance
->enabled_extensions
.extensions
[index
] = true;
335 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
336 if (result
!= VK_SUCCESS
) {
337 vk_free2(&default_alloc
, pAllocator
, instance
);
338 return vk_error(instance
, result
);
343 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
345 *pInstance
= tu_instance_to_handle(instance
);
351 tu_DestroyInstance(VkInstance _instance
,
352 const VkAllocationCallbacks
*pAllocator
)
354 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
359 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
360 tu_physical_device_finish(instance
->physical_devices
+ i
);
363 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
367 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
369 vk_free(&instance
->alloc
, instance
);
373 tu_enumerate_devices(struct tu_instance
*instance
)
375 /* TODO: Check for more devices ? */
376 drmDevicePtr devices
[8];
377 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
380 instance
->physical_device_count
= 0;
382 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
384 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
385 tu_logi("Found %d drm nodes", max_devices
);
388 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
390 for (unsigned i
= 0; i
< (unsigned)max_devices
; i
++) {
391 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
392 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
394 result
= tu_physical_device_init(instance
->physical_devices
+
395 instance
->physical_device_count
,
398 if (result
== VK_SUCCESS
)
399 ++instance
->physical_device_count
;
400 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
404 drmFreeDevices(devices
, max_devices
);
410 tu_EnumeratePhysicalDevices(VkInstance _instance
,
411 uint32_t *pPhysicalDeviceCount
,
412 VkPhysicalDevice
*pPhysicalDevices
)
414 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
417 if (instance
->physical_device_count
< 0) {
418 result
= tu_enumerate_devices(instance
);
419 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
423 if (!pPhysicalDevices
) {
424 *pPhysicalDeviceCount
= instance
->physical_device_count
;
426 *pPhysicalDeviceCount
=
427 MIN2(*pPhysicalDeviceCount
, instance
->physical_device_count
);
428 for (unsigned i
= 0; i
< *pPhysicalDeviceCount
; ++i
)
429 pPhysicalDevices
[i
] =
430 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
433 return *pPhysicalDeviceCount
< instance
->physical_device_count
439 tu_EnumeratePhysicalDeviceGroups(
440 VkInstance _instance
,
441 uint32_t *pPhysicalDeviceGroupCount
,
442 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
444 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
447 if (instance
->physical_device_count
< 0) {
448 result
= tu_enumerate_devices(instance
);
449 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
453 if (!pPhysicalDeviceGroupProperties
) {
454 *pPhysicalDeviceGroupCount
= instance
->physical_device_count
;
456 *pPhysicalDeviceGroupCount
=
457 MIN2(*pPhysicalDeviceGroupCount
, instance
->physical_device_count
);
458 for (unsigned i
= 0; i
< *pPhysicalDeviceGroupCount
; ++i
) {
459 pPhysicalDeviceGroupProperties
[i
].physicalDeviceCount
= 1;
460 pPhysicalDeviceGroupProperties
[i
].physicalDevices
[0] =
461 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
462 pPhysicalDeviceGroupProperties
[i
].subsetAllocation
= false;
465 return *pPhysicalDeviceGroupCount
< instance
->physical_device_count
471 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
472 VkPhysicalDeviceFeatures
*pFeatures
)
474 memset(pFeatures
, 0, sizeof(*pFeatures
));
476 *pFeatures
= (VkPhysicalDeviceFeatures
){
477 .robustBufferAccess
= false,
478 .fullDrawIndexUint32
= false,
479 .imageCubeArray
= false,
480 .independentBlend
= false,
481 .geometryShader
= false,
482 .tessellationShader
= false,
483 .sampleRateShading
= false,
484 .dualSrcBlend
= false,
486 .multiDrawIndirect
= false,
487 .drawIndirectFirstInstance
= false,
489 .depthBiasClamp
= false,
490 .fillModeNonSolid
= false,
491 .depthBounds
= false,
493 .largePoints
= false,
495 .multiViewport
= false,
496 .samplerAnisotropy
= false,
497 .textureCompressionETC2
= false,
498 .textureCompressionASTC_LDR
= false,
499 .textureCompressionBC
= false,
500 .occlusionQueryPrecise
= false,
501 .pipelineStatisticsQuery
= false,
502 .vertexPipelineStoresAndAtomics
= false,
503 .fragmentStoresAndAtomics
= false,
504 .shaderTessellationAndGeometryPointSize
= false,
505 .shaderImageGatherExtended
= false,
506 .shaderStorageImageExtendedFormats
= false,
507 .shaderStorageImageMultisample
= false,
508 .shaderUniformBufferArrayDynamicIndexing
= false,
509 .shaderSampledImageArrayDynamicIndexing
= false,
510 .shaderStorageBufferArrayDynamicIndexing
= false,
511 .shaderStorageImageArrayDynamicIndexing
= false,
512 .shaderStorageImageReadWithoutFormat
= false,
513 .shaderStorageImageWriteWithoutFormat
= false,
514 .shaderClipDistance
= false,
515 .shaderCullDistance
= false,
516 .shaderFloat64
= false,
517 .shaderInt64
= false,
518 .shaderInt16
= false,
519 .sparseBinding
= false,
520 .variableMultisampleRate
= false,
521 .inheritedQueries
= false,
526 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
527 VkPhysicalDeviceFeatures2KHR
*pFeatures
)
529 vk_foreach_struct(ext
, pFeatures
->pNext
)
531 switch (ext
->sType
) {
532 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR
: {
533 VkPhysicalDeviceVariablePointerFeaturesKHR
*features
= (void *)ext
;
534 features
->variablePointersStorageBuffer
= false;
535 features
->variablePointers
= false;
538 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR
: {
539 VkPhysicalDeviceMultiviewFeaturesKHR
*features
=
540 (VkPhysicalDeviceMultiviewFeaturesKHR
*)ext
;
541 features
->multiview
= false;
542 features
->multiviewGeometryShader
= false;
543 features
->multiviewTessellationShader
= false;
546 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
: {
547 VkPhysicalDeviceShaderDrawParameterFeatures
*features
=
548 (VkPhysicalDeviceShaderDrawParameterFeatures
*)ext
;
549 features
->shaderDrawParameters
= false;
552 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
553 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
554 (VkPhysicalDeviceProtectedMemoryFeatures
*)ext
;
555 features
->protectedMemory
= false;
558 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
559 VkPhysicalDevice16BitStorageFeatures
*features
=
560 (VkPhysicalDevice16BitStorageFeatures
*)ext
;
561 features
->storageBuffer16BitAccess
= false;
562 features
->uniformAndStorageBuffer16BitAccess
= false;
563 features
->storagePushConstant16
= false;
564 features
->storageInputOutput16
= false;
567 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
568 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
569 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*)ext
;
570 features
->samplerYcbcrConversion
= false;
573 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
574 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
575 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*)ext
;
576 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
577 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
578 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
579 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
580 features
->shaderSampledImageArrayNonUniformIndexing
= false;
581 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
582 features
->shaderStorageImageArrayNonUniformIndexing
= false;
583 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
584 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
585 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
586 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
587 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
588 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
589 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
590 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
591 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
592 features
->descriptorBindingUpdateUnusedWhilePending
= false;
593 features
->descriptorBindingPartiallyBound
= false;
594 features
->descriptorBindingVariableDescriptorCount
= false;
595 features
->runtimeDescriptorArray
= false;
598 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
599 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
600 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*)ext
;
601 features
->conditionalRendering
= false;
602 features
->inheritedConditionalRendering
= false;
609 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
613 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
614 VkPhysicalDeviceProperties
*pProperties
)
616 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
617 VkSampleCountFlags sample_counts
= 0xf;
619 /* make sure that the entire descriptor set is addressable with a signed
620 * 32-bit int. So the sum of all limits scaled by descriptor size has to
621 * be at most 2 GiB. the combined image & samples object count as one of
622 * both. This limit is for the pipeline layout, not for the set layout, but
623 * there is no set limit, so we just set a pipeline limit. I don't think
624 * any app is going to hit this soon. */
625 size_t max_descriptor_set_size
=
626 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
) /
627 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
628 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
629 32 /* sampler, largest when combined with image */ +
630 64 /* sampled image */ + 64 /* storage image */);
632 VkPhysicalDeviceLimits limits
= {
633 .maxImageDimension1D
= (1 << 14),
634 .maxImageDimension2D
= (1 << 14),
635 .maxImageDimension3D
= (1 << 11),
636 .maxImageDimensionCube
= (1 << 14),
637 .maxImageArrayLayers
= (1 << 11),
638 .maxTexelBufferElements
= 128 * 1024 * 1024,
639 .maxUniformBufferRange
= UINT32_MAX
,
640 .maxStorageBufferRange
= UINT32_MAX
,
641 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
642 .maxMemoryAllocationCount
= UINT32_MAX
,
643 .maxSamplerAllocationCount
= 64 * 1024,
644 .bufferImageGranularity
= 64, /* A cache line */
645 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
646 .maxBoundDescriptorSets
= MAX_SETS
,
647 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
648 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
649 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
650 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
651 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
652 .maxPerStageDescriptorInputAttachments
= max_descriptor_set_size
,
653 .maxPerStageResources
= max_descriptor_set_size
,
654 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
655 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
656 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
657 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
658 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
659 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
660 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
661 .maxDescriptorSetInputAttachments
= max_descriptor_set_size
,
662 .maxVertexInputAttributes
= 32,
663 .maxVertexInputBindings
= 32,
664 .maxVertexInputAttributeOffset
= 2047,
665 .maxVertexInputBindingStride
= 2048,
666 .maxVertexOutputComponents
= 128,
667 .maxTessellationGenerationLevel
= 64,
668 .maxTessellationPatchSize
= 32,
669 .maxTessellationControlPerVertexInputComponents
= 128,
670 .maxTessellationControlPerVertexOutputComponents
= 128,
671 .maxTessellationControlPerPatchOutputComponents
= 120,
672 .maxTessellationControlTotalOutputComponents
= 4096,
673 .maxTessellationEvaluationInputComponents
= 128,
674 .maxTessellationEvaluationOutputComponents
= 128,
675 .maxGeometryShaderInvocations
= 127,
676 .maxGeometryInputComponents
= 64,
677 .maxGeometryOutputComponents
= 128,
678 .maxGeometryOutputVertices
= 256,
679 .maxGeometryTotalOutputComponents
= 1024,
680 .maxFragmentInputComponents
= 128,
681 .maxFragmentOutputAttachments
= 8,
682 .maxFragmentDualSrcAttachments
= 1,
683 .maxFragmentCombinedOutputResources
= 8,
684 .maxComputeSharedMemorySize
= 32768,
685 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
686 .maxComputeWorkGroupInvocations
= 2048,
687 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
688 .subPixelPrecisionBits
= 4 /* FIXME */,
689 .subTexelPrecisionBits
= 4 /* FIXME */,
690 .mipmapPrecisionBits
= 4 /* FIXME */,
691 .maxDrawIndexedIndexValue
= UINT32_MAX
,
692 .maxDrawIndirectCount
= UINT32_MAX
,
693 .maxSamplerLodBias
= 16,
694 .maxSamplerAnisotropy
= 16,
695 .maxViewports
= MAX_VIEWPORTS
,
696 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
697 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
698 .viewportSubPixelBits
= 8,
699 .minMemoryMapAlignment
= 4096, /* A page */
700 .minTexelBufferOffsetAlignment
= 1,
701 .minUniformBufferOffsetAlignment
= 4,
702 .minStorageBufferOffsetAlignment
= 4,
703 .minTexelOffset
= -32,
704 .maxTexelOffset
= 31,
705 .minTexelGatherOffset
= -32,
706 .maxTexelGatherOffset
= 31,
707 .minInterpolationOffset
= -2,
708 .maxInterpolationOffset
= 2,
709 .subPixelInterpolationOffsetBits
= 8,
710 .maxFramebufferWidth
= (1 << 14),
711 .maxFramebufferHeight
= (1 << 14),
712 .maxFramebufferLayers
= (1 << 10),
713 .framebufferColorSampleCounts
= sample_counts
,
714 .framebufferDepthSampleCounts
= sample_counts
,
715 .framebufferStencilSampleCounts
= sample_counts
,
716 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
717 .maxColorAttachments
= MAX_RTS
,
718 .sampledImageColorSampleCounts
= sample_counts
,
719 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
720 .sampledImageDepthSampleCounts
= sample_counts
,
721 .sampledImageStencilSampleCounts
= sample_counts
,
722 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
723 .maxSampleMaskWords
= 1,
724 .timestampComputeAndGraphics
= true,
725 .timestampPeriod
= 1,
726 .maxClipDistances
= 8,
727 .maxCullDistances
= 8,
728 .maxCombinedClipAndCullDistances
= 8,
729 .discreteQueuePriorities
= 1,
730 .pointSizeRange
= { 0.125, 255.875 },
731 .lineWidthRange
= { 0.0, 7.9921875 },
732 .pointSizeGranularity
= (1.0 / 8.0),
733 .lineWidthGranularity
= (1.0 / 128.0),
734 .strictLines
= false, /* FINISHME */
735 .standardSampleLocations
= true,
736 .optimalBufferCopyOffsetAlignment
= 128,
737 .optimalBufferCopyRowPitchAlignment
= 128,
738 .nonCoherentAtomSize
= 64,
741 *pProperties
= (VkPhysicalDeviceProperties
){
742 .apiVersion
= tu_physical_device_api_version(pdevice
),
743 .driverVersion
= vk_get_driver_version(),
744 .vendorID
= 0, /* TODO */
746 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
748 .sparseProperties
= { 0 },
751 strcpy(pProperties
->deviceName
, pdevice
->name
);
752 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
756 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
757 VkPhysicalDeviceProperties2KHR
*pProperties
)
759 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
760 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
762 vk_foreach_struct(ext
, pProperties
->pNext
)
764 switch (ext
->sType
) {
765 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
766 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
767 (VkPhysicalDevicePushDescriptorPropertiesKHR
*)ext
;
768 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
771 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR
: {
772 VkPhysicalDeviceIDPropertiesKHR
*properties
=
773 (VkPhysicalDeviceIDPropertiesKHR
*)ext
;
774 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
775 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
776 properties
->deviceLUIDValid
= false;
779 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR
: {
780 VkPhysicalDeviceMultiviewPropertiesKHR
*properties
=
781 (VkPhysicalDeviceMultiviewPropertiesKHR
*)ext
;
782 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
783 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
786 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR
: {
787 VkPhysicalDevicePointClippingPropertiesKHR
*properties
=
788 (VkPhysicalDevicePointClippingPropertiesKHR
*)ext
;
789 properties
->pointClippingBehavior
=
790 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR
;
793 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
794 VkPhysicalDeviceMaintenance3Properties
*properties
=
795 (VkPhysicalDeviceMaintenance3Properties
*)ext
;
796 /* Make sure everything is addressable by a signed 32-bit int, and
797 * our largest descriptors are 96 bytes. */
798 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
799 /* Our buffer size fields allow only this much */
800 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
810 tu_get_physical_device_queue_family_properties(
811 struct tu_physical_device
*pdevice
,
813 VkQueueFamilyProperties
**pQueueFamilyProperties
)
815 int num_queue_families
= 1;
817 if (pQueueFamilyProperties
== NULL
) {
818 *pCount
= num_queue_families
;
827 *pQueueFamilyProperties
[idx
] = (VkQueueFamilyProperties
){
829 VK_QUEUE_GRAPHICS_BIT
| VK_QUEUE_COMPUTE_BIT
| VK_QUEUE_TRANSFER_BIT
,
831 .timestampValidBits
= 64,
832 .minImageTransferGranularity
= (VkExtent3D
){ 1, 1, 1 },
841 tu_GetPhysicalDeviceQueueFamilyProperties(
842 VkPhysicalDevice physicalDevice
,
844 VkQueueFamilyProperties
*pQueueFamilyProperties
)
846 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
847 if (!pQueueFamilyProperties
) {
848 return tu_get_physical_device_queue_family_properties(
849 pdevice
, pCount
, NULL
);
852 VkQueueFamilyProperties
*properties
[] = {
853 pQueueFamilyProperties
+ 0,
855 tu_get_physical_device_queue_family_properties(pdevice
, pCount
, properties
);
856 assert(*pCount
<= 1);
860 tu_GetPhysicalDeviceQueueFamilyProperties2(
861 VkPhysicalDevice physicalDevice
,
863 VkQueueFamilyProperties2KHR
*pQueueFamilyProperties
)
865 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
866 if (!pQueueFamilyProperties
) {
867 return tu_get_physical_device_queue_family_properties(
868 pdevice
, pCount
, NULL
);
871 VkQueueFamilyProperties
*properties
[] = {
872 &pQueueFamilyProperties
[0].queueFamilyProperties
,
874 tu_get_physical_device_queue_family_properties(pdevice
, pCount
, properties
);
875 assert(*pCount
<= 1);
879 tu_get_system_heap_size()
884 uint64_t total_ram
= (uint64_t)info
.totalram
* (uint64_t)info
.mem_unit
;
886 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
887 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
889 uint64_t available_ram
;
890 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
891 available_ram
= total_ram
/ 2;
893 available_ram
= total_ram
* 3 / 4;
895 return available_ram
;
899 tu_GetPhysicalDeviceMemoryProperties(
900 VkPhysicalDevice physicalDevice
,
901 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
903 pMemoryProperties
->memoryHeapCount
= 1;
904 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
905 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
907 pMemoryProperties
->memoryTypeCount
= 1;
908 pMemoryProperties
->memoryTypes
[0].propertyFlags
= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
909 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
910 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
911 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
915 tu_GetPhysicalDeviceMemoryProperties2(
916 VkPhysicalDevice physicalDevice
,
917 VkPhysicalDeviceMemoryProperties2KHR
*pMemoryProperties
)
919 return tu_GetPhysicalDeviceMemoryProperties(
920 physicalDevice
, &pMemoryProperties
->memoryProperties
);
924 tu_queue_init(struct tu_device
*device
,
925 struct tu_queue
*queue
,
926 uint32_t queue_family_index
,
928 VkDeviceQueueCreateFlags flags
)
930 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
931 queue
->device
= device
;
932 queue
->queue_family_index
= queue_family_index
;
933 queue
->queue_idx
= idx
;
934 queue
->flags
= flags
;
940 tu_queue_finish(struct tu_queue
*queue
)
945 tu_get_device_extension_index(const char *name
)
947 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
948 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
955 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
956 const VkDeviceCreateInfo
*pCreateInfo
,
957 const VkAllocationCallbacks
*pAllocator
,
960 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
962 struct tu_device
*device
;
964 /* Check enabled features */
965 if (pCreateInfo
->pEnabledFeatures
) {
966 VkPhysicalDeviceFeatures supported_features
;
967 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
968 VkBool32
*supported_feature
= (VkBool32
*)&supported_features
;
969 VkBool32
*enabled_feature
= (VkBool32
*)pCreateInfo
->pEnabledFeatures
;
970 unsigned num_features
=
971 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
972 for (uint32_t i
= 0; i
< num_features
; i
++) {
973 if (enabled_feature
[i
] && !supported_feature
[i
])
974 return vk_error(physical_device
->instance
,
975 VK_ERROR_FEATURE_NOT_PRESENT
);
979 device
= vk_zalloc2(&physical_device
->instance
->alloc
,
983 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
985 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
987 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
988 device
->instance
= physical_device
->instance
;
989 device
->physical_device
= physical_device
;
992 device
->alloc
= *pAllocator
;
994 device
->alloc
= physical_device
->instance
->alloc
;
996 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
997 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
998 int index
= tu_get_device_extension_index(ext_name
);
1000 !physical_device
->supported_extensions
.extensions
[index
]) {
1001 vk_free(&device
->alloc
, device
);
1002 return vk_error(physical_device
->instance
,
1003 VK_ERROR_EXTENSION_NOT_PRESENT
);
1006 device
->enabled_extensions
.extensions
[index
] = true;
1009 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
1010 const VkDeviceQueueCreateInfo
*queue_create
=
1011 &pCreateInfo
->pQueueCreateInfos
[i
];
1012 uint32_t qfi
= queue_create
->queueFamilyIndex
;
1013 device
->queues
[qfi
] =
1014 vk_alloc(&device
->alloc
,
1015 queue_create
->queueCount
* sizeof(struct tu_queue
),
1017 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1018 if (!device
->queues
[qfi
]) {
1019 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1023 memset(device
->queues
[qfi
],
1025 queue_create
->queueCount
* sizeof(struct tu_queue
));
1027 device
->queue_count
[qfi
] = queue_create
->queueCount
;
1029 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
1030 result
= tu_queue_init(
1031 device
, &device
->queues
[qfi
][q
], qfi
, q
, queue_create
->flags
);
1032 if (result
!= VK_SUCCESS
)
1037 VkPipelineCacheCreateInfo ci
;
1038 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1041 ci
.pInitialData
= NULL
;
1042 ci
.initialDataSize
= 0;
1045 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1046 if (result
!= VK_SUCCESS
)
1049 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1051 *pDevice
= tu_device_to_handle(device
);
1055 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1056 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1057 tu_queue_finish(&device
->queues
[i
][q
]);
1058 if (device
->queue_count
[i
])
1059 vk_free(&device
->alloc
, device
->queues
[i
]);
1062 vk_free(&device
->alloc
, device
);
1067 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1069 TU_FROM_HANDLE(tu_device
, device
, _device
);
1074 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1075 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1076 tu_queue_finish(&device
->queues
[i
][q
]);
1077 if (device
->queue_count
[i
])
1078 vk_free(&device
->alloc
, device
->queues
[i
]);
1081 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1082 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1084 vk_free(&device
->alloc
, device
);
1088 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1089 VkLayerProperties
*pProperties
)
1091 *pPropertyCount
= 0;
1096 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1097 uint32_t *pPropertyCount
,
1098 VkLayerProperties
*pProperties
)
1100 *pPropertyCount
= 0;
1105 tu_GetDeviceQueue2(VkDevice _device
,
1106 const VkDeviceQueueInfo2
*pQueueInfo
,
1109 TU_FROM_HANDLE(tu_device
, device
, _device
);
1110 struct tu_queue
*queue
;
1113 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1114 if (pQueueInfo
->flags
!= queue
->flags
) {
1115 /* From the Vulkan 1.1.70 spec:
1117 * "The queue returned by vkGetDeviceQueue2 must have the same
1118 * flags value from this structure as that used at device
1119 * creation time in a VkDeviceQueueCreateInfo instance. If no
1120 * matching flags were specified at device creation time then
1121 * pQueue will return VK_NULL_HANDLE."
1123 *pQueue
= VK_NULL_HANDLE
;
1127 *pQueue
= tu_queue_to_handle(queue
);
1131 tu_GetDeviceQueue(VkDevice _device
,
1132 uint32_t queueFamilyIndex
,
1133 uint32_t queueIndex
,
1136 const VkDeviceQueueInfo2 info
=
1137 (VkDeviceQueueInfo2
){.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1138 .queueFamilyIndex
= queueFamilyIndex
,
1139 .queueIndex
= queueIndex
};
1141 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1145 tu_QueueSubmit(VkQueue _queue
,
1146 uint32_t submitCount
,
1147 const VkSubmitInfo
*pSubmits
,
1154 tu_QueueWaitIdle(VkQueue _queue
)
1160 tu_DeviceWaitIdle(VkDevice _device
)
1162 TU_FROM_HANDLE(tu_device
, device
, _device
);
1164 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1165 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1166 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1173 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1174 uint32_t *pPropertyCount
,
1175 VkExtensionProperties
*pProperties
)
1177 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1179 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1180 if (tu_supported_instance_extensions
.extensions
[i
]) {
1181 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1185 return vk_outarray_status(&out
);
1189 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1190 const char *pLayerName
,
1191 uint32_t *pPropertyCount
,
1192 VkExtensionProperties
*pProperties
)
1194 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1195 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1197 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1198 if (device
->supported_extensions
.extensions
[i
]) {
1199 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1203 return vk_outarray_status(&out
);
1207 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1209 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1211 return tu_lookup_entrypoint_checked(pName
,
1212 instance
? instance
->api_version
: 0,
1213 instance
? &instance
->enabled_extensions
1218 /* The loader wants us to expose a second GetInstanceProcAddr function
1219 * to work around certain LD_PRELOAD issues seen in apps.
1222 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1223 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1226 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1227 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1229 return tu_GetInstanceProcAddr(instance
, pName
);
1233 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1235 TU_FROM_HANDLE(tu_device
, device
, _device
);
1237 return tu_lookup_entrypoint_checked(pName
,
1238 device
->instance
->api_version
,
1239 &device
->instance
->enabled_extensions
,
1240 &device
->enabled_extensions
);
1244 tu_alloc_memory(struct tu_device
*device
,
1245 const VkMemoryAllocateInfo
*pAllocateInfo
,
1246 const VkAllocationCallbacks
*pAllocator
,
1247 VkDeviceMemory
*pMem
)
1249 struct tu_device_memory
*mem
;
1251 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1253 if (pAllocateInfo
->allocationSize
== 0) {
1254 /* Apparently, this is allowed */
1255 *pMem
= VK_NULL_HANDLE
;
1259 mem
= vk_alloc2(&device
->alloc
,
1263 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1265 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1267 mem
->bo
= fd_bo_new(device
->physical_device
->drm_device
, pAllocateInfo
->allocationSize
,
1268 DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
1269 DRM_FREEDRENO_GEM_TYPE_KMEM
);
1271 vk_free2(&device
->alloc
, pAllocator
, mem
);
1272 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1274 mem
->size
= pAllocateInfo
->allocationSize
;
1275 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1278 mem
->user_ptr
= NULL
;
1280 *pMem
= tu_device_memory_to_handle(mem
);
1286 tu_AllocateMemory(VkDevice _device
,
1287 const VkMemoryAllocateInfo
*pAllocateInfo
,
1288 const VkAllocationCallbacks
*pAllocator
,
1289 VkDeviceMemory
*pMem
)
1291 TU_FROM_HANDLE(tu_device
, device
, _device
);
1292 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1296 tu_FreeMemory(VkDevice _device
,
1297 VkDeviceMemory _mem
,
1298 const VkAllocationCallbacks
*pAllocator
)
1300 TU_FROM_HANDLE(tu_device
, device
, _device
);
1301 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1309 vk_free2(&device
->alloc
, pAllocator
, mem
);
1313 tu_MapMemory(VkDevice _device
,
1314 VkDeviceMemory _memory
,
1315 VkDeviceSize offset
,
1317 VkMemoryMapFlags flags
,
1320 TU_FROM_HANDLE(tu_device
, device
, _device
);
1321 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1328 if (mem
->user_ptr
) {
1329 *ppData
= mem
->user_ptr
;
1330 } else if (!mem
->map
){
1331 *ppData
= mem
->map
= fd_bo_map(mem
->bo
);
1340 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1344 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1346 /* I do not see any unmapping done by the freedreno Gallium driver. */
1350 tu_FlushMappedMemoryRanges(VkDevice _device
,
1351 uint32_t memoryRangeCount
,
1352 const VkMappedMemoryRange
*pMemoryRanges
)
1358 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1359 uint32_t memoryRangeCount
,
1360 const VkMappedMemoryRange
*pMemoryRanges
)
1366 tu_GetBufferMemoryRequirements(VkDevice _device
,
1368 VkMemoryRequirements
*pMemoryRequirements
)
1370 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1372 pMemoryRequirements
->memoryTypeBits
= 1;
1373 pMemoryRequirements
->alignment
= 16;
1374 pMemoryRequirements
->size
=
1375 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1379 tu_GetBufferMemoryRequirements2(
1381 const VkBufferMemoryRequirementsInfo2KHR
*pInfo
,
1382 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1384 tu_GetBufferMemoryRequirements(
1385 device
, pInfo
->buffer
, &pMemoryRequirements
->memoryRequirements
);
1389 tu_GetImageMemoryRequirements(VkDevice _device
,
1391 VkMemoryRequirements
*pMemoryRequirements
)
1393 TU_FROM_HANDLE(tu_image
, image
, _image
);
1395 pMemoryRequirements
->memoryTypeBits
= 1;
1396 pMemoryRequirements
->size
= image
->size
;
1397 pMemoryRequirements
->alignment
= image
->alignment
;
1401 tu_GetImageMemoryRequirements2(VkDevice device
,
1402 const VkImageMemoryRequirementsInfo2KHR
*pInfo
,
1403 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1405 tu_GetImageMemoryRequirements(
1406 device
, pInfo
->image
, &pMemoryRequirements
->memoryRequirements
);
1410 tu_GetImageSparseMemoryRequirements(
1413 uint32_t *pSparseMemoryRequirementCount
,
1414 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1420 tu_GetImageSparseMemoryRequirements2(
1422 const VkImageSparseMemoryRequirementsInfo2KHR
*pInfo
,
1423 uint32_t *pSparseMemoryRequirementCount
,
1424 VkSparseImageMemoryRequirements2KHR
*pSparseMemoryRequirements
)
1430 tu_GetDeviceMemoryCommitment(VkDevice device
,
1431 VkDeviceMemory memory
,
1432 VkDeviceSize
*pCommittedMemoryInBytes
)
1434 *pCommittedMemoryInBytes
= 0;
1438 tu_BindBufferMemory2(VkDevice device
,
1439 uint32_t bindInfoCount
,
1440 const VkBindBufferMemoryInfoKHR
*pBindInfos
)
1446 tu_BindBufferMemory(VkDevice device
,
1448 VkDeviceMemory memory
,
1449 VkDeviceSize memoryOffset
)
1451 const VkBindBufferMemoryInfoKHR info
= {
1452 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1455 .memoryOffset
= memoryOffset
1458 return tu_BindBufferMemory2(device
, 1, &info
);
1462 tu_BindImageMemory2(VkDevice device
,
1463 uint32_t bindInfoCount
,
1464 const VkBindImageMemoryInfoKHR
*pBindInfos
)
1470 tu_BindImageMemory(VkDevice device
,
1472 VkDeviceMemory memory
,
1473 VkDeviceSize memoryOffset
)
1475 const VkBindImageMemoryInfoKHR info
= {
1476 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1479 .memoryOffset
= memoryOffset
1482 return tu_BindImageMemory2(device
, 1, &info
);
1486 tu_QueueBindSparse(VkQueue _queue
,
1487 uint32_t bindInfoCount
,
1488 const VkBindSparseInfo
*pBindInfo
,
1495 tu_CreateFence(VkDevice _device
,
1496 const VkFenceCreateInfo
*pCreateInfo
,
1497 const VkAllocationCallbacks
*pAllocator
,
1500 TU_FROM_HANDLE(tu_device
, device
, _device
);
1502 struct tu_fence
*fence
= vk_alloc2(&device
->alloc
,
1506 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1509 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1511 *pFence
= tu_fence_to_handle(fence
);
1517 tu_DestroyFence(VkDevice _device
,
1519 const VkAllocationCallbacks
*pAllocator
)
1521 TU_FROM_HANDLE(tu_device
, device
, _device
);
1522 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1527 vk_free2(&device
->alloc
, pAllocator
, fence
);
1531 tu_WaitForFences(VkDevice _device
,
1532 uint32_t fenceCount
,
1533 const VkFence
*pFences
,
1541 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
1547 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
1552 // Queue semaphore functions
1555 tu_CreateSemaphore(VkDevice _device
,
1556 const VkSemaphoreCreateInfo
*pCreateInfo
,
1557 const VkAllocationCallbacks
*pAllocator
,
1558 VkSemaphore
*pSemaphore
)
1560 TU_FROM_HANDLE(tu_device
, device
, _device
);
1562 struct tu_semaphore
*sem
= vk_alloc2(&device
->alloc
,
1566 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1568 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1570 *pSemaphore
= tu_semaphore_to_handle(sem
);
1575 tu_DestroySemaphore(VkDevice _device
,
1576 VkSemaphore _semaphore
,
1577 const VkAllocationCallbacks
*pAllocator
)
1579 TU_FROM_HANDLE(tu_device
, device
, _device
);
1580 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1584 vk_free2(&device
->alloc
, pAllocator
, sem
);
1588 tu_CreateEvent(VkDevice _device
,
1589 const VkEventCreateInfo
*pCreateInfo
,
1590 const VkAllocationCallbacks
*pAllocator
,
1593 TU_FROM_HANDLE(tu_device
, device
, _device
);
1594 struct tu_event
*event
= vk_alloc2(&device
->alloc
,
1598 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1601 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1603 *pEvent
= tu_event_to_handle(event
);
1609 tu_DestroyEvent(VkDevice _device
,
1611 const VkAllocationCallbacks
*pAllocator
)
1613 TU_FROM_HANDLE(tu_device
, device
, _device
);
1614 TU_FROM_HANDLE(tu_event
, event
, _event
);
1618 vk_free2(&device
->alloc
, pAllocator
, event
);
1622 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1624 TU_FROM_HANDLE(tu_event
, event
, _event
);
1626 if (*event
->map
== 1)
1627 return VK_EVENT_SET
;
1628 return VK_EVENT_RESET
;
1632 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1634 TU_FROM_HANDLE(tu_event
, event
, _event
);
1641 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1643 TU_FROM_HANDLE(tu_event
, event
, _event
);
1650 tu_CreateBuffer(VkDevice _device
,
1651 const VkBufferCreateInfo
*pCreateInfo
,
1652 const VkAllocationCallbacks
*pAllocator
,
1655 TU_FROM_HANDLE(tu_device
, device
, _device
);
1656 struct tu_buffer
*buffer
;
1658 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1660 buffer
= vk_alloc2(&device
->alloc
,
1664 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1666 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1668 buffer
->size
= pCreateInfo
->size
;
1669 buffer
->usage
= pCreateInfo
->usage
;
1670 buffer
->flags
= pCreateInfo
->flags
;
1672 *pBuffer
= tu_buffer_to_handle(buffer
);
1678 tu_DestroyBuffer(VkDevice _device
,
1680 const VkAllocationCallbacks
*pAllocator
)
1682 TU_FROM_HANDLE(tu_device
, device
, _device
);
1683 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1688 vk_free2(&device
->alloc
, pAllocator
, buffer
);
1692 tu_surface_max_layer_count(struct tu_image_view
*iview
)
1694 return iview
->type
== VK_IMAGE_VIEW_TYPE_3D
1695 ? iview
->extent
.depth
1696 : (iview
->base_layer
+ iview
->layer_count
);
1700 tu_CreateFramebuffer(VkDevice _device
,
1701 const VkFramebufferCreateInfo
*pCreateInfo
,
1702 const VkAllocationCallbacks
*pAllocator
,
1703 VkFramebuffer
*pFramebuffer
)
1705 TU_FROM_HANDLE(tu_device
, device
, _device
);
1706 struct tu_framebuffer
*framebuffer
;
1708 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1711 sizeof(*framebuffer
) +
1712 sizeof(struct tu_attachment_info
) * pCreateInfo
->attachmentCount
;
1713 framebuffer
= vk_alloc2(
1714 &device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1715 if (framebuffer
== NULL
)
1716 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1718 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1719 framebuffer
->width
= pCreateInfo
->width
;
1720 framebuffer
->height
= pCreateInfo
->height
;
1721 framebuffer
->layers
= pCreateInfo
->layers
;
1722 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1723 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1724 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
1725 framebuffer
->attachments
[i
].attachment
= iview
;
1727 framebuffer
->width
= MIN2(framebuffer
->width
, iview
->extent
.width
);
1728 framebuffer
->height
= MIN2(framebuffer
->height
, iview
->extent
.height
);
1729 framebuffer
->layers
=
1730 MIN2(framebuffer
->layers
, tu_surface_max_layer_count(iview
));
1733 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
1738 tu_DestroyFramebuffer(VkDevice _device
,
1740 const VkAllocationCallbacks
*pAllocator
)
1742 TU_FROM_HANDLE(tu_device
, device
, _device
);
1743 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
1747 vk_free2(&device
->alloc
, pAllocator
, fb
);
1751 tu_init_sampler(struct tu_device
*device
,
1752 struct tu_sampler
*sampler
,
1753 const VkSamplerCreateInfo
*pCreateInfo
)
1758 tu_CreateSampler(VkDevice _device
,
1759 const VkSamplerCreateInfo
*pCreateInfo
,
1760 const VkAllocationCallbacks
*pAllocator
,
1761 VkSampler
*pSampler
)
1763 TU_FROM_HANDLE(tu_device
, device
, _device
);
1764 struct tu_sampler
*sampler
;
1766 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1768 sampler
= vk_alloc2(&device
->alloc
,
1772 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1774 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1776 tu_init_sampler(device
, sampler
, pCreateInfo
);
1777 *pSampler
= tu_sampler_to_handle(sampler
);
1783 tu_DestroySampler(VkDevice _device
,
1785 const VkAllocationCallbacks
*pAllocator
)
1787 TU_FROM_HANDLE(tu_device
, device
, _device
);
1788 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
1792 vk_free2(&device
->alloc
, pAllocator
, sampler
);
1795 /* vk_icd.h does not declare this function, so we declare it here to
1796 * suppress Wmissing-prototypes.
1798 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1799 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
1801 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1802 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
1804 /* For the full details on loader interface versioning, see
1805 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1806 * What follows is a condensed summary, to help you navigate the large and
1807 * confusing official doc.
1809 * - Loader interface v0 is incompatible with later versions. We don't
1812 * - In loader interface v1:
1813 * - The first ICD entrypoint called by the loader is
1814 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1816 * - The ICD must statically expose no other Vulkan symbol unless it is
1817 * linked with -Bsymbolic.
1818 * - Each dispatchable Vulkan handle created by the ICD must be
1819 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1820 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1821 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1822 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1823 * such loader-managed surfaces.
1825 * - Loader interface v2 differs from v1 in:
1826 * - The first ICD entrypoint called by the loader is
1827 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1828 * statically expose this entrypoint.
1830 * - Loader interface v3 differs from v2 in:
1831 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1832 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1833 * because the loader no longer does so.
1835 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
1840 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1841 VkPhysicalDevice physicalDevice
,
1842 const VkPhysicalDeviceExternalSemaphoreInfoKHR
*pExternalSemaphoreInfo
,
1843 VkExternalSemaphorePropertiesKHR
*pExternalSemaphoreProperties
)
1845 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1846 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1847 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1851 tu_GetPhysicalDeviceExternalFenceProperties(
1852 VkPhysicalDevice physicalDevice
,
1853 const VkPhysicalDeviceExternalFenceInfoKHR
*pExternalFenceInfo
,
1854 VkExternalFencePropertiesKHR
*pExternalFenceProperties
)
1856 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1857 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1858 pExternalFenceProperties
->externalFenceFeatures
= 0;
1862 tu_CreateDebugReportCallbackEXT(
1863 VkInstance _instance
,
1864 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
1865 const VkAllocationCallbacks
*pAllocator
,
1866 VkDebugReportCallbackEXT
*pCallback
)
1868 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1869 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
1877 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
1878 VkDebugReportCallbackEXT _callback
,
1879 const VkAllocationCallbacks
*pAllocator
)
1881 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1882 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
1889 tu_DebugReportMessageEXT(VkInstance _instance
,
1890 VkDebugReportFlagsEXT flags
,
1891 VkDebugReportObjectTypeEXT objectType
,
1894 int32_t messageCode
,
1895 const char *pLayerPrefix
,
1896 const char *pMessage
)
1898 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1899 vk_debug_report(&instance
->debug_report_callbacks
,
1910 tu_GetDeviceGroupPeerMemoryFeatures(
1913 uint32_t localDeviceIndex
,
1914 uint32_t remoteDeviceIndex
,
1915 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
1917 assert(localDeviceIndex
== remoteDeviceIndex
);
1919 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
1920 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
1921 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
1922 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;