2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
37 #include <sys/sysinfo.h>
42 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
44 uint32_t mesa_timestamp
;
46 memset(uuid
, 0, VK_UUID_SIZE
);
47 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
51 memcpy(uuid
, &mesa_timestamp
, 4);
52 memcpy((char *)uuid
+ 4, &f
, 2);
53 snprintf((char *)uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
58 tu_get_driver_uuid(void *uuid
)
60 memset(uuid
, 0, VK_UUID_SIZE
);
64 tu_get_device_uuid(void *uuid
)
70 tu_physical_device_init(struct tu_physical_device
*device
,
71 struct tu_instance
*instance
,
72 drmDevicePtr drm_device
)
74 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
75 VkResult result
= VK_SUCCESS
;
76 drmVersionPtr version
;
79 struct fd_pipe
*tmp_pipe
= NULL
;
82 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
84 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
85 "failed to open device %s", path
);
88 /* Version 1.3 added MSM_INFO_IOVA. */
89 const int min_version_major
= 1;
90 const int min_version_minor
= 3;
92 version
= drmGetVersion(fd
);
95 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
96 "failed to query kernel driver version for device %s",
100 if (strcmp(version
->name
, "msm")) {
101 drmFreeVersion(version
);
105 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
106 "device %s does not use the msm kernel driver", path
);
109 if (version
->version_major
!= 1 || version
->version_minor
< 3) {
110 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
111 "kernel driver for device %s has version %d.%d, "
112 "but Vulkan requires version >= %d.%d",
114 version
->version_major
, version
->version_minor
,
115 min_version_major
, min_version_minor
);
116 drmFreeVersion(version
);
121 drmFreeVersion(version
);
123 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
124 tu_logi("Found compatible device '%s'.", path
);
126 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
127 device
->instance
= instance
;
128 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
129 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
131 if (instance
->enabled_extensions
.KHR_display
) {
132 master_fd
= open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
133 if (master_fd
>= 0) {
134 /* TODO: free master_fd is accel is not working? */
138 device
->master_fd
= master_fd
;
139 device
->local_fd
= fd
;
141 device
->drm_device
= fd_device_new_dup(fd
);
142 if (!device
->drm_device
) {
144 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not create the libdrm device");
148 tmp_pipe
= fd_pipe_new(device
->drm_device
, FD_PIPE_3D
);
151 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not open the 3D pipe");
155 if (fd_pipe_get_param(tmp_pipe
, FD_GPU_ID
, &val
)) {
157 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GPU ID");
160 device
->gpu_id
= val
;
162 if (fd_pipe_get_param(tmp_pipe
, FD_GMEM_SIZE
, &val
)) {
164 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GMEM size");
167 device
->gmem_size
= val
;
169 fd_pipe_del(tmp_pipe
);
172 memset(device
->name
, 0, sizeof(device
->name
));
173 sprintf(device
->name
, "FD%d", device
->gpu_id
);
175 switch(device
->gpu_id
) {
179 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
180 "device %s is unsupported", device
->name
);
183 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
185 instance
, VK_ERROR_INITIALIZATION_FAILED
, "cannot generate UUID");
189 /* The gpu id is already embedded in the uuid so we just pass "tu"
190 * when creating the cache.
192 char buf
[VK_UUID_SIZE
* 2 + 1];
193 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
194 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
197 "WARNING: tu is not a conformant vulkan implementation, "
198 "testing use only.\n");
200 tu_get_driver_uuid(&device
->device_uuid
);
201 tu_get_device_uuid(&device
->device_uuid
);
203 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
205 if (result
!= VK_SUCCESS
) {
206 vk_error(instance
, result
);
214 fd_pipe_del(tmp_pipe
);
215 if (device
->drm_device
)
216 fd_device_del(device
->drm_device
);
224 tu_physical_device_finish(struct tu_physical_device
*device
)
226 disk_cache_destroy(device
->disk_cache
);
227 close(device
->local_fd
);
228 if (device
->master_fd
!= -1)
229 close(device
->master_fd
);
233 default_alloc_func(void *pUserData
,
236 VkSystemAllocationScope allocationScope
)
242 default_realloc_func(void *pUserData
,
246 VkSystemAllocationScope allocationScope
)
248 return realloc(pOriginal
, size
);
252 default_free_func(void *pUserData
, void *pMemory
)
257 static const VkAllocationCallbacks default_alloc
= {
259 .pfnAllocation
= default_alloc_func
,
260 .pfnReallocation
= default_realloc_func
,
261 .pfnFree
= default_free_func
,
264 static const struct debug_control tu_debug_options
[] = { { "startup",
269 tu_get_debug_option_name(int id
)
271 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
272 return tu_debug_options
[id
].string
;
276 tu_get_instance_extension_index(const char *name
)
278 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
279 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
286 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
287 const VkAllocationCallbacks
*pAllocator
,
288 VkInstance
*pInstance
)
290 struct tu_instance
*instance
;
293 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
295 uint32_t client_version
;
296 if (pCreateInfo
->pApplicationInfo
&&
297 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
298 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
300 tu_EnumerateInstanceVersion(&client_version
);
303 instance
= vk_zalloc2(&default_alloc
,
307 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
309 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
311 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
314 instance
->alloc
= *pAllocator
;
316 instance
->alloc
= default_alloc
;
318 instance
->api_version
= client_version
;
319 instance
->physical_device_count
= -1;
321 instance
->debug_flags
=
322 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
324 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
325 tu_logi("Created an instance");
327 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
328 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
329 int index
= tu_get_instance_extension_index(ext_name
);
331 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
332 vk_free2(&default_alloc
, pAllocator
, instance
);
333 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
336 instance
->enabled_extensions
.extensions
[index
] = true;
339 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
340 if (result
!= VK_SUCCESS
) {
341 vk_free2(&default_alloc
, pAllocator
, instance
);
342 return vk_error(instance
, result
);
347 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
349 *pInstance
= tu_instance_to_handle(instance
);
355 tu_DestroyInstance(VkInstance _instance
,
356 const VkAllocationCallbacks
*pAllocator
)
358 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
363 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
364 tu_physical_device_finish(instance
->physical_devices
+ i
);
367 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
371 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
373 vk_free(&instance
->alloc
, instance
);
377 tu_enumerate_devices(struct tu_instance
*instance
)
379 /* TODO: Check for more devices ? */
380 drmDevicePtr devices
[8];
381 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
384 instance
->physical_device_count
= 0;
386 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
388 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
389 tu_logi("Found %d drm nodes", max_devices
);
392 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
394 for (unsigned i
= 0; i
< (unsigned)max_devices
; i
++) {
395 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
396 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
398 result
= tu_physical_device_init(instance
->physical_devices
+
399 instance
->physical_device_count
,
402 if (result
== VK_SUCCESS
)
403 ++instance
->physical_device_count
;
404 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
408 drmFreeDevices(devices
, max_devices
);
414 tu_EnumeratePhysicalDevices(VkInstance _instance
,
415 uint32_t *pPhysicalDeviceCount
,
416 VkPhysicalDevice
*pPhysicalDevices
)
418 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
419 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
423 if (instance
->physical_device_count
< 0) {
424 result
= tu_enumerate_devices(instance
);
425 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
429 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
430 vk_outarray_append(&out
, p
) {
431 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
436 return vk_outarray_status(&out
);
440 tu_EnumeratePhysicalDeviceGroups(
441 VkInstance _instance
,
442 uint32_t *pPhysicalDeviceGroupCount
,
443 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
445 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
446 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
, pPhysicalDeviceGroupCount
);
449 if (instance
->physical_device_count
< 0) {
450 result
= tu_enumerate_devices(instance
);
451 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
455 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
456 vk_outarray_append(&out
, p
) {
457 p
->physicalDeviceCount
= 1;
458 p
->physicalDevices
[0] =
459 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
460 p
->subsetAllocation
= false;
464 return vk_outarray_status(&out
);
468 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
469 VkPhysicalDeviceFeatures
*pFeatures
)
471 memset(pFeatures
, 0, sizeof(*pFeatures
));
473 *pFeatures
= (VkPhysicalDeviceFeatures
){
474 .robustBufferAccess
= false,
475 .fullDrawIndexUint32
= false,
476 .imageCubeArray
= false,
477 .independentBlend
= false,
478 .geometryShader
= false,
479 .tessellationShader
= false,
480 .sampleRateShading
= false,
481 .dualSrcBlend
= false,
483 .multiDrawIndirect
= false,
484 .drawIndirectFirstInstance
= false,
486 .depthBiasClamp
= false,
487 .fillModeNonSolid
= false,
488 .depthBounds
= false,
490 .largePoints
= false,
492 .multiViewport
= false,
493 .samplerAnisotropy
= false,
494 .textureCompressionETC2
= false,
495 .textureCompressionASTC_LDR
= false,
496 .textureCompressionBC
= false,
497 .occlusionQueryPrecise
= false,
498 .pipelineStatisticsQuery
= false,
499 .vertexPipelineStoresAndAtomics
= false,
500 .fragmentStoresAndAtomics
= false,
501 .shaderTessellationAndGeometryPointSize
= false,
502 .shaderImageGatherExtended
= false,
503 .shaderStorageImageExtendedFormats
= false,
504 .shaderStorageImageMultisample
= false,
505 .shaderUniformBufferArrayDynamicIndexing
= false,
506 .shaderSampledImageArrayDynamicIndexing
= false,
507 .shaderStorageBufferArrayDynamicIndexing
= false,
508 .shaderStorageImageArrayDynamicIndexing
= false,
509 .shaderStorageImageReadWithoutFormat
= false,
510 .shaderStorageImageWriteWithoutFormat
= false,
511 .shaderClipDistance
= false,
512 .shaderCullDistance
= false,
513 .shaderFloat64
= false,
514 .shaderInt64
= false,
515 .shaderInt16
= false,
516 .sparseBinding
= false,
517 .variableMultisampleRate
= false,
518 .inheritedQueries
= false,
523 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
524 VkPhysicalDeviceFeatures2KHR
*pFeatures
)
526 vk_foreach_struct(ext
, pFeatures
->pNext
)
528 switch (ext
->sType
) {
529 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR
: {
530 VkPhysicalDeviceVariablePointerFeaturesKHR
*features
= (void *)ext
;
531 features
->variablePointersStorageBuffer
= false;
532 features
->variablePointers
= false;
535 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR
: {
536 VkPhysicalDeviceMultiviewFeaturesKHR
*features
=
537 (VkPhysicalDeviceMultiviewFeaturesKHR
*)ext
;
538 features
->multiview
= false;
539 features
->multiviewGeometryShader
= false;
540 features
->multiviewTessellationShader
= false;
543 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
: {
544 VkPhysicalDeviceShaderDrawParameterFeatures
*features
=
545 (VkPhysicalDeviceShaderDrawParameterFeatures
*)ext
;
546 features
->shaderDrawParameters
= false;
549 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
550 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
551 (VkPhysicalDeviceProtectedMemoryFeatures
*)ext
;
552 features
->protectedMemory
= false;
555 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
556 VkPhysicalDevice16BitStorageFeatures
*features
=
557 (VkPhysicalDevice16BitStorageFeatures
*)ext
;
558 features
->storageBuffer16BitAccess
= false;
559 features
->uniformAndStorageBuffer16BitAccess
= false;
560 features
->storagePushConstant16
= false;
561 features
->storageInputOutput16
= false;
564 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
565 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
566 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*)ext
;
567 features
->samplerYcbcrConversion
= false;
570 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
571 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
572 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*)ext
;
573 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
574 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
575 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
576 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
577 features
->shaderSampledImageArrayNonUniformIndexing
= false;
578 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
579 features
->shaderStorageImageArrayNonUniformIndexing
= false;
580 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
581 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
582 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
583 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
584 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
585 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
586 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
587 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
588 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
589 features
->descriptorBindingUpdateUnusedWhilePending
= false;
590 features
->descriptorBindingPartiallyBound
= false;
591 features
->descriptorBindingVariableDescriptorCount
= false;
592 features
->runtimeDescriptorArray
= false;
595 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
596 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
597 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*)ext
;
598 features
->conditionalRendering
= false;
599 features
->inheritedConditionalRendering
= false;
606 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
610 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
611 VkPhysicalDeviceProperties
*pProperties
)
613 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
614 VkSampleCountFlags sample_counts
= 0xf;
616 /* make sure that the entire descriptor set is addressable with a signed
617 * 32-bit int. So the sum of all limits scaled by descriptor size has to
618 * be at most 2 GiB. the combined image & samples object count as one of
619 * both. This limit is for the pipeline layout, not for the set layout, but
620 * there is no set limit, so we just set a pipeline limit. I don't think
621 * any app is going to hit this soon. */
622 size_t max_descriptor_set_size
=
623 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
) /
624 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
625 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
626 32 /* sampler, largest when combined with image */ +
627 64 /* sampled image */ + 64 /* storage image */);
629 VkPhysicalDeviceLimits limits
= {
630 .maxImageDimension1D
= (1 << 14),
631 .maxImageDimension2D
= (1 << 14),
632 .maxImageDimension3D
= (1 << 11),
633 .maxImageDimensionCube
= (1 << 14),
634 .maxImageArrayLayers
= (1 << 11),
635 .maxTexelBufferElements
= 128 * 1024 * 1024,
636 .maxUniformBufferRange
= UINT32_MAX
,
637 .maxStorageBufferRange
= UINT32_MAX
,
638 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
639 .maxMemoryAllocationCount
= UINT32_MAX
,
640 .maxSamplerAllocationCount
= 64 * 1024,
641 .bufferImageGranularity
= 64, /* A cache line */
642 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
643 .maxBoundDescriptorSets
= MAX_SETS
,
644 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
645 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
646 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
647 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
648 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
649 .maxPerStageDescriptorInputAttachments
= max_descriptor_set_size
,
650 .maxPerStageResources
= max_descriptor_set_size
,
651 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
652 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
653 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
654 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
655 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
656 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
657 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
658 .maxDescriptorSetInputAttachments
= max_descriptor_set_size
,
659 .maxVertexInputAttributes
= 32,
660 .maxVertexInputBindings
= 32,
661 .maxVertexInputAttributeOffset
= 2047,
662 .maxVertexInputBindingStride
= 2048,
663 .maxVertexOutputComponents
= 128,
664 .maxTessellationGenerationLevel
= 64,
665 .maxTessellationPatchSize
= 32,
666 .maxTessellationControlPerVertexInputComponents
= 128,
667 .maxTessellationControlPerVertexOutputComponents
= 128,
668 .maxTessellationControlPerPatchOutputComponents
= 120,
669 .maxTessellationControlTotalOutputComponents
= 4096,
670 .maxTessellationEvaluationInputComponents
= 128,
671 .maxTessellationEvaluationOutputComponents
= 128,
672 .maxGeometryShaderInvocations
= 127,
673 .maxGeometryInputComponents
= 64,
674 .maxGeometryOutputComponents
= 128,
675 .maxGeometryOutputVertices
= 256,
676 .maxGeometryTotalOutputComponents
= 1024,
677 .maxFragmentInputComponents
= 128,
678 .maxFragmentOutputAttachments
= 8,
679 .maxFragmentDualSrcAttachments
= 1,
680 .maxFragmentCombinedOutputResources
= 8,
681 .maxComputeSharedMemorySize
= 32768,
682 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
683 .maxComputeWorkGroupInvocations
= 2048,
684 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
685 .subPixelPrecisionBits
= 4 /* FIXME */,
686 .subTexelPrecisionBits
= 4 /* FIXME */,
687 .mipmapPrecisionBits
= 4 /* FIXME */,
688 .maxDrawIndexedIndexValue
= UINT32_MAX
,
689 .maxDrawIndirectCount
= UINT32_MAX
,
690 .maxSamplerLodBias
= 16,
691 .maxSamplerAnisotropy
= 16,
692 .maxViewports
= MAX_VIEWPORTS
,
693 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
694 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
695 .viewportSubPixelBits
= 8,
696 .minMemoryMapAlignment
= 4096, /* A page */
697 .minTexelBufferOffsetAlignment
= 1,
698 .minUniformBufferOffsetAlignment
= 4,
699 .minStorageBufferOffsetAlignment
= 4,
700 .minTexelOffset
= -32,
701 .maxTexelOffset
= 31,
702 .minTexelGatherOffset
= -32,
703 .maxTexelGatherOffset
= 31,
704 .minInterpolationOffset
= -2,
705 .maxInterpolationOffset
= 2,
706 .subPixelInterpolationOffsetBits
= 8,
707 .maxFramebufferWidth
= (1 << 14),
708 .maxFramebufferHeight
= (1 << 14),
709 .maxFramebufferLayers
= (1 << 10),
710 .framebufferColorSampleCounts
= sample_counts
,
711 .framebufferDepthSampleCounts
= sample_counts
,
712 .framebufferStencilSampleCounts
= sample_counts
,
713 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
714 .maxColorAttachments
= MAX_RTS
,
715 .sampledImageColorSampleCounts
= sample_counts
,
716 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
717 .sampledImageDepthSampleCounts
= sample_counts
,
718 .sampledImageStencilSampleCounts
= sample_counts
,
719 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
720 .maxSampleMaskWords
= 1,
721 .timestampComputeAndGraphics
= true,
722 .timestampPeriod
= 1,
723 .maxClipDistances
= 8,
724 .maxCullDistances
= 8,
725 .maxCombinedClipAndCullDistances
= 8,
726 .discreteQueuePriorities
= 1,
727 .pointSizeRange
= { 0.125, 255.875 },
728 .lineWidthRange
= { 0.0, 7.9921875 },
729 .pointSizeGranularity
= (1.0 / 8.0),
730 .lineWidthGranularity
= (1.0 / 128.0),
731 .strictLines
= false, /* FINISHME */
732 .standardSampleLocations
= true,
733 .optimalBufferCopyOffsetAlignment
= 128,
734 .optimalBufferCopyRowPitchAlignment
= 128,
735 .nonCoherentAtomSize
= 64,
738 *pProperties
= (VkPhysicalDeviceProperties
){
739 .apiVersion
= tu_physical_device_api_version(pdevice
),
740 .driverVersion
= vk_get_driver_version(),
741 .vendorID
= 0, /* TODO */
743 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
745 .sparseProperties
= { 0 },
748 strcpy(pProperties
->deviceName
, pdevice
->name
);
749 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
753 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
754 VkPhysicalDeviceProperties2KHR
*pProperties
)
756 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
757 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
759 vk_foreach_struct(ext
, pProperties
->pNext
)
761 switch (ext
->sType
) {
762 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
763 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
764 (VkPhysicalDevicePushDescriptorPropertiesKHR
*)ext
;
765 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
768 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR
: {
769 VkPhysicalDeviceIDPropertiesKHR
*properties
=
770 (VkPhysicalDeviceIDPropertiesKHR
*)ext
;
771 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
772 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
773 properties
->deviceLUIDValid
= false;
776 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR
: {
777 VkPhysicalDeviceMultiviewPropertiesKHR
*properties
=
778 (VkPhysicalDeviceMultiviewPropertiesKHR
*)ext
;
779 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
780 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
783 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR
: {
784 VkPhysicalDevicePointClippingPropertiesKHR
*properties
=
785 (VkPhysicalDevicePointClippingPropertiesKHR
*)ext
;
786 properties
->pointClippingBehavior
=
787 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR
;
790 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
791 VkPhysicalDeviceMaintenance3Properties
*properties
=
792 (VkPhysicalDeviceMaintenance3Properties
*)ext
;
793 /* Make sure everything is addressable by a signed 32-bit int, and
794 * our largest descriptors are 96 bytes. */
795 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
796 /* Our buffer size fields allow only this much */
797 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
806 static const VkQueueFamilyProperties
807 tu_queue_family_properties
= {
808 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
809 VK_QUEUE_COMPUTE_BIT
|
810 VK_QUEUE_TRANSFER_BIT
,
812 .timestampValidBits
= 64,
813 .minImageTransferGranularity
= (VkExtent3D
) { 1, 1, 1 },
817 tu_GetPhysicalDeviceQueueFamilyProperties(
818 VkPhysicalDevice physicalDevice
,
819 uint32_t *pQueueFamilyPropertyCount
,
820 VkQueueFamilyProperties
*pQueueFamilyProperties
)
822 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
824 vk_outarray_append(&out
, p
) {
825 *p
= tu_queue_family_properties
;
830 tu_GetPhysicalDeviceQueueFamilyProperties2(
831 VkPhysicalDevice physicalDevice
,
832 uint32_t *pQueueFamilyPropertyCount
,
833 VkQueueFamilyProperties2KHR
*pQueueFamilyProperties
)
835 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
837 vk_outarray_append(&out
, p
) {
838 p
->queueFamilyProperties
= tu_queue_family_properties
;
843 tu_get_system_heap_size()
848 uint64_t total_ram
= (uint64_t)info
.totalram
* (uint64_t)info
.mem_unit
;
850 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
851 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
853 uint64_t available_ram
;
854 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
855 available_ram
= total_ram
/ 2;
857 available_ram
= total_ram
* 3 / 4;
859 return available_ram
;
863 tu_GetPhysicalDeviceMemoryProperties(
864 VkPhysicalDevice physicalDevice
,
865 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
867 pMemoryProperties
->memoryHeapCount
= 1;
868 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
869 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
871 pMemoryProperties
->memoryTypeCount
= 1;
872 pMemoryProperties
->memoryTypes
[0].propertyFlags
= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
873 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
874 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
875 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
879 tu_GetPhysicalDeviceMemoryProperties2(
880 VkPhysicalDevice physicalDevice
,
881 VkPhysicalDeviceMemoryProperties2KHR
*pMemoryProperties
)
883 return tu_GetPhysicalDeviceMemoryProperties(
884 physicalDevice
, &pMemoryProperties
->memoryProperties
);
888 tu_queue_init(struct tu_device
*device
,
889 struct tu_queue
*queue
,
890 uint32_t queue_family_index
,
892 VkDeviceQueueCreateFlags flags
)
894 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
895 queue
->device
= device
;
896 queue
->queue_family_index
= queue_family_index
;
897 queue
->queue_idx
= idx
;
898 queue
->flags
= flags
;
904 tu_queue_finish(struct tu_queue
*queue
)
909 tu_get_device_extension_index(const char *name
)
911 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
912 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
919 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
920 const VkDeviceCreateInfo
*pCreateInfo
,
921 const VkAllocationCallbacks
*pAllocator
,
924 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
926 struct tu_device
*device
;
928 /* Check enabled features */
929 if (pCreateInfo
->pEnabledFeatures
) {
930 VkPhysicalDeviceFeatures supported_features
;
931 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
932 VkBool32
*supported_feature
= (VkBool32
*)&supported_features
;
933 VkBool32
*enabled_feature
= (VkBool32
*)pCreateInfo
->pEnabledFeatures
;
934 unsigned num_features
=
935 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
936 for (uint32_t i
= 0; i
< num_features
; i
++) {
937 if (enabled_feature
[i
] && !supported_feature
[i
])
938 return vk_error(physical_device
->instance
,
939 VK_ERROR_FEATURE_NOT_PRESENT
);
943 device
= vk_zalloc2(&physical_device
->instance
->alloc
,
947 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
949 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
951 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
952 device
->instance
= physical_device
->instance
;
953 device
->physical_device
= physical_device
;
956 device
->alloc
= *pAllocator
;
958 device
->alloc
= physical_device
->instance
->alloc
;
960 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
961 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
962 int index
= tu_get_device_extension_index(ext_name
);
964 !physical_device
->supported_extensions
.extensions
[index
]) {
965 vk_free(&device
->alloc
, device
);
966 return vk_error(physical_device
->instance
,
967 VK_ERROR_EXTENSION_NOT_PRESENT
);
970 device
->enabled_extensions
.extensions
[index
] = true;
973 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
974 const VkDeviceQueueCreateInfo
*queue_create
=
975 &pCreateInfo
->pQueueCreateInfos
[i
];
976 uint32_t qfi
= queue_create
->queueFamilyIndex
;
977 device
->queues
[qfi
] =
978 vk_alloc(&device
->alloc
,
979 queue_create
->queueCount
* sizeof(struct tu_queue
),
981 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
982 if (!device
->queues
[qfi
]) {
983 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
987 memset(device
->queues
[qfi
],
989 queue_create
->queueCount
* sizeof(struct tu_queue
));
991 device
->queue_count
[qfi
] = queue_create
->queueCount
;
993 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
994 result
= tu_queue_init(
995 device
, &device
->queues
[qfi
][q
], qfi
, q
, queue_create
->flags
);
996 if (result
!= VK_SUCCESS
)
1001 VkPipelineCacheCreateInfo ci
;
1002 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1005 ci
.pInitialData
= NULL
;
1006 ci
.initialDataSize
= 0;
1009 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1010 if (result
!= VK_SUCCESS
)
1013 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1015 *pDevice
= tu_device_to_handle(device
);
1019 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1020 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1021 tu_queue_finish(&device
->queues
[i
][q
]);
1022 if (device
->queue_count
[i
])
1023 vk_free(&device
->alloc
, device
->queues
[i
]);
1026 vk_free(&device
->alloc
, device
);
1031 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1033 TU_FROM_HANDLE(tu_device
, device
, _device
);
1038 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1039 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1040 tu_queue_finish(&device
->queues
[i
][q
]);
1041 if (device
->queue_count
[i
])
1042 vk_free(&device
->alloc
, device
->queues
[i
]);
1045 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1046 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1048 vk_free(&device
->alloc
, device
);
1052 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1053 VkLayerProperties
*pProperties
)
1055 *pPropertyCount
= 0;
1060 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1061 uint32_t *pPropertyCount
,
1062 VkLayerProperties
*pProperties
)
1064 *pPropertyCount
= 0;
1069 tu_GetDeviceQueue2(VkDevice _device
,
1070 const VkDeviceQueueInfo2
*pQueueInfo
,
1073 TU_FROM_HANDLE(tu_device
, device
, _device
);
1074 struct tu_queue
*queue
;
1077 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1078 if (pQueueInfo
->flags
!= queue
->flags
) {
1079 /* From the Vulkan 1.1.70 spec:
1081 * "The queue returned by vkGetDeviceQueue2 must have the same
1082 * flags value from this structure as that used at device
1083 * creation time in a VkDeviceQueueCreateInfo instance. If no
1084 * matching flags were specified at device creation time then
1085 * pQueue will return VK_NULL_HANDLE."
1087 *pQueue
= VK_NULL_HANDLE
;
1091 *pQueue
= tu_queue_to_handle(queue
);
1095 tu_GetDeviceQueue(VkDevice _device
,
1096 uint32_t queueFamilyIndex
,
1097 uint32_t queueIndex
,
1100 const VkDeviceQueueInfo2 info
=
1101 (VkDeviceQueueInfo2
){.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1102 .queueFamilyIndex
= queueFamilyIndex
,
1103 .queueIndex
= queueIndex
};
1105 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1109 tu_QueueSubmit(VkQueue _queue
,
1110 uint32_t submitCount
,
1111 const VkSubmitInfo
*pSubmits
,
1118 tu_QueueWaitIdle(VkQueue _queue
)
1124 tu_DeviceWaitIdle(VkDevice _device
)
1126 TU_FROM_HANDLE(tu_device
, device
, _device
);
1128 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1129 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1130 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1137 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1138 uint32_t *pPropertyCount
,
1139 VkExtensionProperties
*pProperties
)
1141 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1143 /* We spport no lyaers */
1145 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1147 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1148 if (tu_supported_instance_extensions
.extensions
[i
]) {
1149 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1153 return vk_outarray_status(&out
);
1157 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1158 const char *pLayerName
,
1159 uint32_t *pPropertyCount
,
1160 VkExtensionProperties
*pProperties
)
1162 /* We spport no lyaers */
1163 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1164 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1166 /* We spport no lyaers */
1168 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1170 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1171 if (device
->supported_extensions
.extensions
[i
]) {
1172 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1176 return vk_outarray_status(&out
);
1180 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1182 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1184 return tu_lookup_entrypoint_checked(pName
,
1185 instance
? instance
->api_version
: 0,
1186 instance
? &instance
->enabled_extensions
1191 /* The loader wants us to expose a second GetInstanceProcAddr function
1192 * to work around certain LD_PRELOAD issues seen in apps.
1195 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1196 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1199 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1200 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1202 return tu_GetInstanceProcAddr(instance
, pName
);
1206 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1208 TU_FROM_HANDLE(tu_device
, device
, _device
);
1210 return tu_lookup_entrypoint_checked(pName
,
1211 device
->instance
->api_version
,
1212 &device
->instance
->enabled_extensions
,
1213 &device
->enabled_extensions
);
1217 tu_alloc_memory(struct tu_device
*device
,
1218 const VkMemoryAllocateInfo
*pAllocateInfo
,
1219 const VkAllocationCallbacks
*pAllocator
,
1220 VkDeviceMemory
*pMem
)
1222 struct tu_device_memory
*mem
;
1224 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1226 if (pAllocateInfo
->allocationSize
== 0) {
1227 /* Apparently, this is allowed */
1228 *pMem
= VK_NULL_HANDLE
;
1232 mem
= vk_alloc2(&device
->alloc
,
1236 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1238 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1240 mem
->bo
= fd_bo_new(device
->physical_device
->drm_device
, pAllocateInfo
->allocationSize
,
1241 DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
1242 DRM_FREEDRENO_GEM_TYPE_KMEM
);
1244 vk_free2(&device
->alloc
, pAllocator
, mem
);
1245 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1247 mem
->size
= pAllocateInfo
->allocationSize
;
1248 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1251 mem
->user_ptr
= NULL
;
1253 *pMem
= tu_device_memory_to_handle(mem
);
1259 tu_AllocateMemory(VkDevice _device
,
1260 const VkMemoryAllocateInfo
*pAllocateInfo
,
1261 const VkAllocationCallbacks
*pAllocator
,
1262 VkDeviceMemory
*pMem
)
1264 TU_FROM_HANDLE(tu_device
, device
, _device
);
1265 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1269 tu_FreeMemory(VkDevice _device
,
1270 VkDeviceMemory _mem
,
1271 const VkAllocationCallbacks
*pAllocator
)
1273 TU_FROM_HANDLE(tu_device
, device
, _device
);
1274 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1282 vk_free2(&device
->alloc
, pAllocator
, mem
);
1286 tu_MapMemory(VkDevice _device
,
1287 VkDeviceMemory _memory
,
1288 VkDeviceSize offset
,
1290 VkMemoryMapFlags flags
,
1293 TU_FROM_HANDLE(tu_device
, device
, _device
);
1294 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1301 if (mem
->user_ptr
) {
1302 *ppData
= mem
->user_ptr
;
1303 } else if (!mem
->map
){
1304 *ppData
= mem
->map
= fd_bo_map(mem
->bo
);
1313 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1317 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1319 /* I do not see any unmapping done by the freedreno Gallium driver. */
1323 tu_FlushMappedMemoryRanges(VkDevice _device
,
1324 uint32_t memoryRangeCount
,
1325 const VkMappedMemoryRange
*pMemoryRanges
)
1331 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1332 uint32_t memoryRangeCount
,
1333 const VkMappedMemoryRange
*pMemoryRanges
)
1339 tu_GetBufferMemoryRequirements(VkDevice _device
,
1341 VkMemoryRequirements
*pMemoryRequirements
)
1343 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1345 pMemoryRequirements
->memoryTypeBits
= 1;
1346 pMemoryRequirements
->alignment
= 16;
1347 pMemoryRequirements
->size
=
1348 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1352 tu_GetBufferMemoryRequirements2(
1354 const VkBufferMemoryRequirementsInfo2KHR
*pInfo
,
1355 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1357 tu_GetBufferMemoryRequirements(
1358 device
, pInfo
->buffer
, &pMemoryRequirements
->memoryRequirements
);
1362 tu_GetImageMemoryRequirements(VkDevice _device
,
1364 VkMemoryRequirements
*pMemoryRequirements
)
1366 TU_FROM_HANDLE(tu_image
, image
, _image
);
1368 pMemoryRequirements
->memoryTypeBits
= 1;
1369 pMemoryRequirements
->size
= image
->size
;
1370 pMemoryRequirements
->alignment
= image
->alignment
;
1374 tu_GetImageMemoryRequirements2(VkDevice device
,
1375 const VkImageMemoryRequirementsInfo2KHR
*pInfo
,
1376 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1378 tu_GetImageMemoryRequirements(
1379 device
, pInfo
->image
, &pMemoryRequirements
->memoryRequirements
);
1383 tu_GetImageSparseMemoryRequirements(
1386 uint32_t *pSparseMemoryRequirementCount
,
1387 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1393 tu_GetImageSparseMemoryRequirements2(
1395 const VkImageSparseMemoryRequirementsInfo2KHR
*pInfo
,
1396 uint32_t *pSparseMemoryRequirementCount
,
1397 VkSparseImageMemoryRequirements2KHR
*pSparseMemoryRequirements
)
1403 tu_GetDeviceMemoryCommitment(VkDevice device
,
1404 VkDeviceMemory memory
,
1405 VkDeviceSize
*pCommittedMemoryInBytes
)
1407 *pCommittedMemoryInBytes
= 0;
1411 tu_BindBufferMemory2(VkDevice device
,
1412 uint32_t bindInfoCount
,
1413 const VkBindBufferMemoryInfoKHR
*pBindInfos
)
1419 tu_BindBufferMemory(VkDevice device
,
1421 VkDeviceMemory memory
,
1422 VkDeviceSize memoryOffset
)
1424 const VkBindBufferMemoryInfoKHR info
= {
1425 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1428 .memoryOffset
= memoryOffset
1431 return tu_BindBufferMemory2(device
, 1, &info
);
1435 tu_BindImageMemory2(VkDevice device
,
1436 uint32_t bindInfoCount
,
1437 const VkBindImageMemoryInfoKHR
*pBindInfos
)
1443 tu_BindImageMemory(VkDevice device
,
1445 VkDeviceMemory memory
,
1446 VkDeviceSize memoryOffset
)
1448 const VkBindImageMemoryInfoKHR info
= {
1449 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1452 .memoryOffset
= memoryOffset
1455 return tu_BindImageMemory2(device
, 1, &info
);
1459 tu_QueueBindSparse(VkQueue _queue
,
1460 uint32_t bindInfoCount
,
1461 const VkBindSparseInfo
*pBindInfo
,
1468 tu_CreateFence(VkDevice _device
,
1469 const VkFenceCreateInfo
*pCreateInfo
,
1470 const VkAllocationCallbacks
*pAllocator
,
1473 TU_FROM_HANDLE(tu_device
, device
, _device
);
1475 struct tu_fence
*fence
= vk_alloc2(&device
->alloc
,
1479 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1482 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1484 *pFence
= tu_fence_to_handle(fence
);
1490 tu_DestroyFence(VkDevice _device
,
1492 const VkAllocationCallbacks
*pAllocator
)
1494 TU_FROM_HANDLE(tu_device
, device
, _device
);
1495 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1500 vk_free2(&device
->alloc
, pAllocator
, fence
);
1504 tu_WaitForFences(VkDevice _device
,
1505 uint32_t fenceCount
,
1506 const VkFence
*pFences
,
1514 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
1520 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
1525 // Queue semaphore functions
1528 tu_CreateSemaphore(VkDevice _device
,
1529 const VkSemaphoreCreateInfo
*pCreateInfo
,
1530 const VkAllocationCallbacks
*pAllocator
,
1531 VkSemaphore
*pSemaphore
)
1533 TU_FROM_HANDLE(tu_device
, device
, _device
);
1535 struct tu_semaphore
*sem
= vk_alloc2(&device
->alloc
,
1539 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1541 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1543 *pSemaphore
= tu_semaphore_to_handle(sem
);
1548 tu_DestroySemaphore(VkDevice _device
,
1549 VkSemaphore _semaphore
,
1550 const VkAllocationCallbacks
*pAllocator
)
1552 TU_FROM_HANDLE(tu_device
, device
, _device
);
1553 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1557 vk_free2(&device
->alloc
, pAllocator
, sem
);
1561 tu_CreateEvent(VkDevice _device
,
1562 const VkEventCreateInfo
*pCreateInfo
,
1563 const VkAllocationCallbacks
*pAllocator
,
1566 TU_FROM_HANDLE(tu_device
, device
, _device
);
1567 struct tu_event
*event
= vk_alloc2(&device
->alloc
,
1571 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1574 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1576 *pEvent
= tu_event_to_handle(event
);
1582 tu_DestroyEvent(VkDevice _device
,
1584 const VkAllocationCallbacks
*pAllocator
)
1586 TU_FROM_HANDLE(tu_device
, device
, _device
);
1587 TU_FROM_HANDLE(tu_event
, event
, _event
);
1591 vk_free2(&device
->alloc
, pAllocator
, event
);
1595 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1597 TU_FROM_HANDLE(tu_event
, event
, _event
);
1599 if (*event
->map
== 1)
1600 return VK_EVENT_SET
;
1601 return VK_EVENT_RESET
;
1605 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1607 TU_FROM_HANDLE(tu_event
, event
, _event
);
1614 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1616 TU_FROM_HANDLE(tu_event
, event
, _event
);
1623 tu_CreateBuffer(VkDevice _device
,
1624 const VkBufferCreateInfo
*pCreateInfo
,
1625 const VkAllocationCallbacks
*pAllocator
,
1628 TU_FROM_HANDLE(tu_device
, device
, _device
);
1629 struct tu_buffer
*buffer
;
1631 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1633 buffer
= vk_alloc2(&device
->alloc
,
1637 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1639 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1641 buffer
->size
= pCreateInfo
->size
;
1642 buffer
->usage
= pCreateInfo
->usage
;
1643 buffer
->flags
= pCreateInfo
->flags
;
1645 *pBuffer
= tu_buffer_to_handle(buffer
);
1651 tu_DestroyBuffer(VkDevice _device
,
1653 const VkAllocationCallbacks
*pAllocator
)
1655 TU_FROM_HANDLE(tu_device
, device
, _device
);
1656 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1661 vk_free2(&device
->alloc
, pAllocator
, buffer
);
1665 tu_surface_max_layer_count(struct tu_image_view
*iview
)
1667 return iview
->type
== VK_IMAGE_VIEW_TYPE_3D
1668 ? iview
->extent
.depth
1669 : (iview
->base_layer
+ iview
->layer_count
);
1673 tu_CreateFramebuffer(VkDevice _device
,
1674 const VkFramebufferCreateInfo
*pCreateInfo
,
1675 const VkAllocationCallbacks
*pAllocator
,
1676 VkFramebuffer
*pFramebuffer
)
1678 TU_FROM_HANDLE(tu_device
, device
, _device
);
1679 struct tu_framebuffer
*framebuffer
;
1681 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1684 sizeof(*framebuffer
) +
1685 sizeof(struct tu_attachment_info
) * pCreateInfo
->attachmentCount
;
1686 framebuffer
= vk_alloc2(
1687 &device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1688 if (framebuffer
== NULL
)
1689 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1691 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1692 framebuffer
->width
= pCreateInfo
->width
;
1693 framebuffer
->height
= pCreateInfo
->height
;
1694 framebuffer
->layers
= pCreateInfo
->layers
;
1695 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1696 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1697 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
1698 framebuffer
->attachments
[i
].attachment
= iview
;
1700 framebuffer
->width
= MIN2(framebuffer
->width
, iview
->extent
.width
);
1701 framebuffer
->height
= MIN2(framebuffer
->height
, iview
->extent
.height
);
1702 framebuffer
->layers
=
1703 MIN2(framebuffer
->layers
, tu_surface_max_layer_count(iview
));
1706 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
1711 tu_DestroyFramebuffer(VkDevice _device
,
1713 const VkAllocationCallbacks
*pAllocator
)
1715 TU_FROM_HANDLE(tu_device
, device
, _device
);
1716 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
1720 vk_free2(&device
->alloc
, pAllocator
, fb
);
1724 tu_init_sampler(struct tu_device
*device
,
1725 struct tu_sampler
*sampler
,
1726 const VkSamplerCreateInfo
*pCreateInfo
)
1731 tu_CreateSampler(VkDevice _device
,
1732 const VkSamplerCreateInfo
*pCreateInfo
,
1733 const VkAllocationCallbacks
*pAllocator
,
1734 VkSampler
*pSampler
)
1736 TU_FROM_HANDLE(tu_device
, device
, _device
);
1737 struct tu_sampler
*sampler
;
1739 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1741 sampler
= vk_alloc2(&device
->alloc
,
1745 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1747 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1749 tu_init_sampler(device
, sampler
, pCreateInfo
);
1750 *pSampler
= tu_sampler_to_handle(sampler
);
1756 tu_DestroySampler(VkDevice _device
,
1758 const VkAllocationCallbacks
*pAllocator
)
1760 TU_FROM_HANDLE(tu_device
, device
, _device
);
1761 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
1765 vk_free2(&device
->alloc
, pAllocator
, sampler
);
1768 /* vk_icd.h does not declare this function, so we declare it here to
1769 * suppress Wmissing-prototypes.
1771 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1772 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
1774 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1775 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
1777 /* For the full details on loader interface versioning, see
1778 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1779 * What follows is a condensed summary, to help you navigate the large and
1780 * confusing official doc.
1782 * - Loader interface v0 is incompatible with later versions. We don't
1785 * - In loader interface v1:
1786 * - The first ICD entrypoint called by the loader is
1787 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1789 * - The ICD must statically expose no other Vulkan symbol unless it is
1790 * linked with -Bsymbolic.
1791 * - Each dispatchable Vulkan handle created by the ICD must be
1792 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1793 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1794 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1795 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1796 * such loader-managed surfaces.
1798 * - Loader interface v2 differs from v1 in:
1799 * - The first ICD entrypoint called by the loader is
1800 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1801 * statically expose this entrypoint.
1803 * - Loader interface v3 differs from v2 in:
1804 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1805 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1806 * because the loader no longer does so.
1808 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
1813 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1814 VkPhysicalDevice physicalDevice
,
1815 const VkPhysicalDeviceExternalSemaphoreInfoKHR
*pExternalSemaphoreInfo
,
1816 VkExternalSemaphorePropertiesKHR
*pExternalSemaphoreProperties
)
1818 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1819 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1820 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1824 tu_GetPhysicalDeviceExternalFenceProperties(
1825 VkPhysicalDevice physicalDevice
,
1826 const VkPhysicalDeviceExternalFenceInfoKHR
*pExternalFenceInfo
,
1827 VkExternalFencePropertiesKHR
*pExternalFenceProperties
)
1829 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1830 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1831 pExternalFenceProperties
->externalFenceFeatures
= 0;
1835 tu_CreateDebugReportCallbackEXT(
1836 VkInstance _instance
,
1837 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
1838 const VkAllocationCallbacks
*pAllocator
,
1839 VkDebugReportCallbackEXT
*pCallback
)
1841 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1842 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
1850 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
1851 VkDebugReportCallbackEXT _callback
,
1852 const VkAllocationCallbacks
*pAllocator
)
1854 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1855 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
1862 tu_DebugReportMessageEXT(VkInstance _instance
,
1863 VkDebugReportFlagsEXT flags
,
1864 VkDebugReportObjectTypeEXT objectType
,
1867 int32_t messageCode
,
1868 const char *pLayerPrefix
,
1869 const char *pMessage
)
1871 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1872 vk_debug_report(&instance
->debug_report_callbacks
,
1883 tu_GetDeviceGroupPeerMemoryFeatures(
1886 uint32_t localDeviceIndex
,
1887 uint32_t remoteDeviceIndex
,
1888 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
1890 assert(localDeviceIndex
== remoteDeviceIndex
);
1892 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
1893 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
1894 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
1895 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;