2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
37 #include <sys/sysinfo.h>
42 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
44 uint32_t mesa_timestamp
;
46 memset(uuid
, 0, VK_UUID_SIZE
);
47 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
51 memcpy(uuid
, &mesa_timestamp
, 4);
52 memcpy((char *)uuid
+ 4, &f
, 2);
53 snprintf((char *)uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
58 tu_get_driver_uuid(void *uuid
)
60 memset(uuid
, 0, VK_UUID_SIZE
);
64 tu_get_device_uuid(void *uuid
)
70 tu_physical_device_init(struct tu_physical_device
*device
,
71 struct tu_instance
*instance
,
72 drmDevicePtr drm_device
)
74 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
75 VkResult result
= VK_SUCCESS
;
76 drmVersionPtr version
;
79 struct fd_pipe
*tmp_pipe
= NULL
;
82 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
84 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
85 tu_logi("Could not open device '%s'", path
);
87 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
90 version
= drmGetVersion(fd
);
94 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
95 tu_logi("Could not get the kernel driver version for device '%s'",
98 return vk_errorf(instance
,
99 VK_ERROR_INCOMPATIBLE_DRIVER
,
100 "failed to get version %s: %m",
104 if (strcmp(version
->name
, "msm")) {
105 drmFreeVersion(version
);
110 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
111 tu_logi("Device '%s' is not using the msm kernel driver.", path
);
113 return VK_ERROR_INCOMPATIBLE_DRIVER
;
115 drmFreeVersion(version
);
117 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
118 tu_logi("Found compatible device '%s'.", path
);
120 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
121 device
->instance
= instance
;
122 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
123 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
125 if (instance
->enabled_extensions
.KHR_display
) {
126 master_fd
= open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
127 if (master_fd
>= 0) {
128 /* TODO: free master_fd is accel is not working? */
132 device
->master_fd
= master_fd
;
133 device
->local_fd
= fd
;
135 device
->drm_device
= fd_device_new_dup(fd
);
136 if (!device
->drm_device
) {
138 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not create the libdrm device");
142 tmp_pipe
= fd_pipe_new(device
->drm_device
, FD_PIPE_3D
);
145 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not open the 3D pipe");
149 if (fd_pipe_get_param(tmp_pipe
, FD_GPU_ID
, &val
)) {
151 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GPU ID");
154 device
->gpu_id
= val
;
156 if (fd_pipe_get_param(tmp_pipe
, FD_GMEM_SIZE
, &val
)) {
158 instance
, VK_ERROR_INITIALIZATION_FAILED
, "could not get GMEM size");
161 device
->gmem_size
= val
;
163 fd_pipe_del(tmp_pipe
);
166 memset(device
->name
, 0, sizeof(device
->name
));
167 sprintf(device
->name
, "FD%d", device
->gpu_id
);
169 switch(device
->gpu_id
) {
173 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
174 tu_logi("Device '%s' is not supported.", device
->name
);
176 instance
, VK_ERROR_INITIALIZATION_FAILED
, "unsupported device");
179 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
181 instance
, VK_ERROR_INITIALIZATION_FAILED
, "cannot generate UUID");
185 /* The gpu id is already embedded in the uuid so we just pass "tu"
186 * when creating the cache.
188 char buf
[VK_UUID_SIZE
* 2 + 1];
189 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
190 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
193 "WARNING: tu is not a conformant vulkan implementation, "
194 "testing use only.\n");
196 tu_get_driver_uuid(&device
->device_uuid
);
197 tu_get_device_uuid(&device
->device_uuid
);
199 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
201 if (result
!= VK_SUCCESS
) {
202 vk_error(instance
, result
);
210 fd_pipe_del(tmp_pipe
);
211 if (device
->drm_device
)
212 fd_device_del(device
->drm_device
);
220 tu_physical_device_finish(struct tu_physical_device
*device
)
222 disk_cache_destroy(device
->disk_cache
);
223 close(device
->local_fd
);
224 if (device
->master_fd
!= -1)
225 close(device
->master_fd
);
229 default_alloc_func(void *pUserData
,
232 VkSystemAllocationScope allocationScope
)
238 default_realloc_func(void *pUserData
,
242 VkSystemAllocationScope allocationScope
)
244 return realloc(pOriginal
, size
);
248 default_free_func(void *pUserData
, void *pMemory
)
253 static const VkAllocationCallbacks default_alloc
= {
255 .pfnAllocation
= default_alloc_func
,
256 .pfnReallocation
= default_realloc_func
,
257 .pfnFree
= default_free_func
,
260 static const struct debug_control tu_debug_options
[] = { { "startup",
265 tu_get_debug_option_name(int id
)
267 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
268 return tu_debug_options
[id
].string
;
272 tu_get_instance_extension_index(const char *name
)
274 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
275 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
282 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
283 const VkAllocationCallbacks
*pAllocator
,
284 VkInstance
*pInstance
)
286 struct tu_instance
*instance
;
289 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
291 uint32_t client_version
;
292 if (pCreateInfo
->pApplicationInfo
&&
293 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
294 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
296 tu_EnumerateInstanceVersion(&client_version
);
299 instance
= vk_zalloc2(&default_alloc
,
303 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
305 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
307 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
310 instance
->alloc
= *pAllocator
;
312 instance
->alloc
= default_alloc
;
314 instance
->api_version
= client_version
;
315 instance
->physical_device_count
= -1;
317 instance
->debug_flags
=
318 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
320 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
321 tu_logi("Created an instance");
323 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
324 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
325 int index
= tu_get_instance_extension_index(ext_name
);
327 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
328 vk_free2(&default_alloc
, pAllocator
, instance
);
329 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
332 instance
->enabled_extensions
.extensions
[index
] = true;
335 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
336 if (result
!= VK_SUCCESS
) {
337 vk_free2(&default_alloc
, pAllocator
, instance
);
338 return vk_error(instance
, result
);
343 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
345 *pInstance
= tu_instance_to_handle(instance
);
351 tu_DestroyInstance(VkInstance _instance
,
352 const VkAllocationCallbacks
*pAllocator
)
354 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
359 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
360 tu_physical_device_finish(instance
->physical_devices
+ i
);
363 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
367 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
369 vk_free(&instance
->alloc
, instance
);
373 tu_enumerate_devices(struct tu_instance
*instance
)
375 /* TODO: Check for more devices ? */
376 drmDevicePtr devices
[8];
377 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
380 instance
->physical_device_count
= 0;
382 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
384 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
385 tu_logi("Found %d drm nodes", max_devices
);
388 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
390 for (unsigned i
= 0; i
< (unsigned)max_devices
; i
++) {
391 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
392 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
394 result
= tu_physical_device_init(instance
->physical_devices
+
395 instance
->physical_device_count
,
398 if (result
== VK_SUCCESS
)
399 ++instance
->physical_device_count
;
400 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
404 drmFreeDevices(devices
, max_devices
);
410 tu_EnumeratePhysicalDevices(VkInstance _instance
,
411 uint32_t *pPhysicalDeviceCount
,
412 VkPhysicalDevice
*pPhysicalDevices
)
414 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
415 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
419 if (instance
->physical_device_count
< 0) {
420 result
= tu_enumerate_devices(instance
);
421 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
425 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
426 vk_outarray_append(&out
, p
) {
427 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
432 return vk_outarray_status(&out
);
436 tu_EnumeratePhysicalDeviceGroups(
437 VkInstance _instance
,
438 uint32_t *pPhysicalDeviceGroupCount
,
439 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
441 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
442 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
, pPhysicalDeviceGroupCount
);
445 if (instance
->physical_device_count
< 0) {
446 result
= tu_enumerate_devices(instance
);
447 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
451 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
452 vk_outarray_append(&out
, p
) {
453 p
->physicalDeviceCount
= 1;
454 p
->physicalDevices
[0] =
455 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
456 p
->subsetAllocation
= false;
460 return vk_outarray_status(&out
);
464 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
465 VkPhysicalDeviceFeatures
*pFeatures
)
467 memset(pFeatures
, 0, sizeof(*pFeatures
));
469 *pFeatures
= (VkPhysicalDeviceFeatures
){
470 .robustBufferAccess
= false,
471 .fullDrawIndexUint32
= false,
472 .imageCubeArray
= false,
473 .independentBlend
= false,
474 .geometryShader
= false,
475 .tessellationShader
= false,
476 .sampleRateShading
= false,
477 .dualSrcBlend
= false,
479 .multiDrawIndirect
= false,
480 .drawIndirectFirstInstance
= false,
482 .depthBiasClamp
= false,
483 .fillModeNonSolid
= false,
484 .depthBounds
= false,
486 .largePoints
= false,
488 .multiViewport
= false,
489 .samplerAnisotropy
= false,
490 .textureCompressionETC2
= false,
491 .textureCompressionASTC_LDR
= false,
492 .textureCompressionBC
= false,
493 .occlusionQueryPrecise
= false,
494 .pipelineStatisticsQuery
= false,
495 .vertexPipelineStoresAndAtomics
= false,
496 .fragmentStoresAndAtomics
= false,
497 .shaderTessellationAndGeometryPointSize
= false,
498 .shaderImageGatherExtended
= false,
499 .shaderStorageImageExtendedFormats
= false,
500 .shaderStorageImageMultisample
= false,
501 .shaderUniformBufferArrayDynamicIndexing
= false,
502 .shaderSampledImageArrayDynamicIndexing
= false,
503 .shaderStorageBufferArrayDynamicIndexing
= false,
504 .shaderStorageImageArrayDynamicIndexing
= false,
505 .shaderStorageImageReadWithoutFormat
= false,
506 .shaderStorageImageWriteWithoutFormat
= false,
507 .shaderClipDistance
= false,
508 .shaderCullDistance
= false,
509 .shaderFloat64
= false,
510 .shaderInt64
= false,
511 .shaderInt16
= false,
512 .sparseBinding
= false,
513 .variableMultisampleRate
= false,
514 .inheritedQueries
= false,
519 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
520 VkPhysicalDeviceFeatures2KHR
*pFeatures
)
522 vk_foreach_struct(ext
, pFeatures
->pNext
)
524 switch (ext
->sType
) {
525 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR
: {
526 VkPhysicalDeviceVariablePointerFeaturesKHR
*features
= (void *)ext
;
527 features
->variablePointersStorageBuffer
= false;
528 features
->variablePointers
= false;
531 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR
: {
532 VkPhysicalDeviceMultiviewFeaturesKHR
*features
=
533 (VkPhysicalDeviceMultiviewFeaturesKHR
*)ext
;
534 features
->multiview
= false;
535 features
->multiviewGeometryShader
= false;
536 features
->multiviewTessellationShader
= false;
539 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
: {
540 VkPhysicalDeviceShaderDrawParameterFeatures
*features
=
541 (VkPhysicalDeviceShaderDrawParameterFeatures
*)ext
;
542 features
->shaderDrawParameters
= false;
545 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
546 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
547 (VkPhysicalDeviceProtectedMemoryFeatures
*)ext
;
548 features
->protectedMemory
= false;
551 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
552 VkPhysicalDevice16BitStorageFeatures
*features
=
553 (VkPhysicalDevice16BitStorageFeatures
*)ext
;
554 features
->storageBuffer16BitAccess
= false;
555 features
->uniformAndStorageBuffer16BitAccess
= false;
556 features
->storagePushConstant16
= false;
557 features
->storageInputOutput16
= false;
560 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
561 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
562 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*)ext
;
563 features
->samplerYcbcrConversion
= false;
566 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
567 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
568 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*)ext
;
569 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
570 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
571 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
572 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
573 features
->shaderSampledImageArrayNonUniformIndexing
= false;
574 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
575 features
->shaderStorageImageArrayNonUniformIndexing
= false;
576 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
577 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
578 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
579 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
580 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
581 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
582 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
583 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
584 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
585 features
->descriptorBindingUpdateUnusedWhilePending
= false;
586 features
->descriptorBindingPartiallyBound
= false;
587 features
->descriptorBindingVariableDescriptorCount
= false;
588 features
->runtimeDescriptorArray
= false;
591 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
592 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
593 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*)ext
;
594 features
->conditionalRendering
= false;
595 features
->inheritedConditionalRendering
= false;
602 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
606 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
607 VkPhysicalDeviceProperties
*pProperties
)
609 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
610 VkSampleCountFlags sample_counts
= 0xf;
612 /* make sure that the entire descriptor set is addressable with a signed
613 * 32-bit int. So the sum of all limits scaled by descriptor size has to
614 * be at most 2 GiB. the combined image & samples object count as one of
615 * both. This limit is for the pipeline layout, not for the set layout, but
616 * there is no set limit, so we just set a pipeline limit. I don't think
617 * any app is going to hit this soon. */
618 size_t max_descriptor_set_size
=
619 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
) /
620 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
621 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
622 32 /* sampler, largest when combined with image */ +
623 64 /* sampled image */ + 64 /* storage image */);
625 VkPhysicalDeviceLimits limits
= {
626 .maxImageDimension1D
= (1 << 14),
627 .maxImageDimension2D
= (1 << 14),
628 .maxImageDimension3D
= (1 << 11),
629 .maxImageDimensionCube
= (1 << 14),
630 .maxImageArrayLayers
= (1 << 11),
631 .maxTexelBufferElements
= 128 * 1024 * 1024,
632 .maxUniformBufferRange
= UINT32_MAX
,
633 .maxStorageBufferRange
= UINT32_MAX
,
634 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
635 .maxMemoryAllocationCount
= UINT32_MAX
,
636 .maxSamplerAllocationCount
= 64 * 1024,
637 .bufferImageGranularity
= 64, /* A cache line */
638 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
639 .maxBoundDescriptorSets
= MAX_SETS
,
640 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
641 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
642 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
643 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
644 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
645 .maxPerStageDescriptorInputAttachments
= max_descriptor_set_size
,
646 .maxPerStageResources
= max_descriptor_set_size
,
647 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
648 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
649 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
650 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
651 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
652 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
653 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
654 .maxDescriptorSetInputAttachments
= max_descriptor_set_size
,
655 .maxVertexInputAttributes
= 32,
656 .maxVertexInputBindings
= 32,
657 .maxVertexInputAttributeOffset
= 2047,
658 .maxVertexInputBindingStride
= 2048,
659 .maxVertexOutputComponents
= 128,
660 .maxTessellationGenerationLevel
= 64,
661 .maxTessellationPatchSize
= 32,
662 .maxTessellationControlPerVertexInputComponents
= 128,
663 .maxTessellationControlPerVertexOutputComponents
= 128,
664 .maxTessellationControlPerPatchOutputComponents
= 120,
665 .maxTessellationControlTotalOutputComponents
= 4096,
666 .maxTessellationEvaluationInputComponents
= 128,
667 .maxTessellationEvaluationOutputComponents
= 128,
668 .maxGeometryShaderInvocations
= 127,
669 .maxGeometryInputComponents
= 64,
670 .maxGeometryOutputComponents
= 128,
671 .maxGeometryOutputVertices
= 256,
672 .maxGeometryTotalOutputComponents
= 1024,
673 .maxFragmentInputComponents
= 128,
674 .maxFragmentOutputAttachments
= 8,
675 .maxFragmentDualSrcAttachments
= 1,
676 .maxFragmentCombinedOutputResources
= 8,
677 .maxComputeSharedMemorySize
= 32768,
678 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
679 .maxComputeWorkGroupInvocations
= 2048,
680 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
681 .subPixelPrecisionBits
= 4 /* FIXME */,
682 .subTexelPrecisionBits
= 4 /* FIXME */,
683 .mipmapPrecisionBits
= 4 /* FIXME */,
684 .maxDrawIndexedIndexValue
= UINT32_MAX
,
685 .maxDrawIndirectCount
= UINT32_MAX
,
686 .maxSamplerLodBias
= 16,
687 .maxSamplerAnisotropy
= 16,
688 .maxViewports
= MAX_VIEWPORTS
,
689 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
690 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
691 .viewportSubPixelBits
= 8,
692 .minMemoryMapAlignment
= 4096, /* A page */
693 .minTexelBufferOffsetAlignment
= 1,
694 .minUniformBufferOffsetAlignment
= 4,
695 .minStorageBufferOffsetAlignment
= 4,
696 .minTexelOffset
= -32,
697 .maxTexelOffset
= 31,
698 .minTexelGatherOffset
= -32,
699 .maxTexelGatherOffset
= 31,
700 .minInterpolationOffset
= -2,
701 .maxInterpolationOffset
= 2,
702 .subPixelInterpolationOffsetBits
= 8,
703 .maxFramebufferWidth
= (1 << 14),
704 .maxFramebufferHeight
= (1 << 14),
705 .maxFramebufferLayers
= (1 << 10),
706 .framebufferColorSampleCounts
= sample_counts
,
707 .framebufferDepthSampleCounts
= sample_counts
,
708 .framebufferStencilSampleCounts
= sample_counts
,
709 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
710 .maxColorAttachments
= MAX_RTS
,
711 .sampledImageColorSampleCounts
= sample_counts
,
712 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
713 .sampledImageDepthSampleCounts
= sample_counts
,
714 .sampledImageStencilSampleCounts
= sample_counts
,
715 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
716 .maxSampleMaskWords
= 1,
717 .timestampComputeAndGraphics
= true,
718 .timestampPeriod
= 1,
719 .maxClipDistances
= 8,
720 .maxCullDistances
= 8,
721 .maxCombinedClipAndCullDistances
= 8,
722 .discreteQueuePriorities
= 1,
723 .pointSizeRange
= { 0.125, 255.875 },
724 .lineWidthRange
= { 0.0, 7.9921875 },
725 .pointSizeGranularity
= (1.0 / 8.0),
726 .lineWidthGranularity
= (1.0 / 128.0),
727 .strictLines
= false, /* FINISHME */
728 .standardSampleLocations
= true,
729 .optimalBufferCopyOffsetAlignment
= 128,
730 .optimalBufferCopyRowPitchAlignment
= 128,
731 .nonCoherentAtomSize
= 64,
734 *pProperties
= (VkPhysicalDeviceProperties
){
735 .apiVersion
= tu_physical_device_api_version(pdevice
),
736 .driverVersion
= vk_get_driver_version(),
737 .vendorID
= 0, /* TODO */
739 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
741 .sparseProperties
= { 0 },
744 strcpy(pProperties
->deviceName
, pdevice
->name
);
745 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
749 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
750 VkPhysicalDeviceProperties2KHR
*pProperties
)
752 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
753 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
755 vk_foreach_struct(ext
, pProperties
->pNext
)
757 switch (ext
->sType
) {
758 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
759 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
760 (VkPhysicalDevicePushDescriptorPropertiesKHR
*)ext
;
761 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
764 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR
: {
765 VkPhysicalDeviceIDPropertiesKHR
*properties
=
766 (VkPhysicalDeviceIDPropertiesKHR
*)ext
;
767 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
768 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
769 properties
->deviceLUIDValid
= false;
772 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR
: {
773 VkPhysicalDeviceMultiviewPropertiesKHR
*properties
=
774 (VkPhysicalDeviceMultiviewPropertiesKHR
*)ext
;
775 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
776 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
779 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR
: {
780 VkPhysicalDevicePointClippingPropertiesKHR
*properties
=
781 (VkPhysicalDevicePointClippingPropertiesKHR
*)ext
;
782 properties
->pointClippingBehavior
=
783 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR
;
786 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
787 VkPhysicalDeviceMaintenance3Properties
*properties
=
788 (VkPhysicalDeviceMaintenance3Properties
*)ext
;
789 /* Make sure everything is addressable by a signed 32-bit int, and
790 * our largest descriptors are 96 bytes. */
791 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
792 /* Our buffer size fields allow only this much */
793 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
802 static const VkQueueFamilyProperties
803 tu_queue_family_properties
= {
804 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
805 VK_QUEUE_COMPUTE_BIT
|
806 VK_QUEUE_TRANSFER_BIT
,
808 .timestampValidBits
= 64,
809 .minImageTransferGranularity
= (VkExtent3D
) { 1, 1, 1 },
813 tu_GetPhysicalDeviceQueueFamilyProperties(
814 VkPhysicalDevice physicalDevice
,
815 uint32_t *pQueueFamilyPropertyCount
,
816 VkQueueFamilyProperties
*pQueueFamilyProperties
)
818 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
820 vk_outarray_append(&out
, p
) {
821 *p
= tu_queue_family_properties
;
826 tu_GetPhysicalDeviceQueueFamilyProperties2(
827 VkPhysicalDevice physicalDevice
,
828 uint32_t *pQueueFamilyPropertyCount
,
829 VkQueueFamilyProperties2KHR
*pQueueFamilyProperties
)
831 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
833 vk_outarray_append(&out
, p
) {
834 p
->queueFamilyProperties
= tu_queue_family_properties
;
839 tu_get_system_heap_size()
844 uint64_t total_ram
= (uint64_t)info
.totalram
* (uint64_t)info
.mem_unit
;
846 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
847 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
849 uint64_t available_ram
;
850 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
851 available_ram
= total_ram
/ 2;
853 available_ram
= total_ram
* 3 / 4;
855 return available_ram
;
859 tu_GetPhysicalDeviceMemoryProperties(
860 VkPhysicalDevice physicalDevice
,
861 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
863 pMemoryProperties
->memoryHeapCount
= 1;
864 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
865 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
867 pMemoryProperties
->memoryTypeCount
= 1;
868 pMemoryProperties
->memoryTypes
[0].propertyFlags
= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
869 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
870 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
871 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
875 tu_GetPhysicalDeviceMemoryProperties2(
876 VkPhysicalDevice physicalDevice
,
877 VkPhysicalDeviceMemoryProperties2KHR
*pMemoryProperties
)
879 return tu_GetPhysicalDeviceMemoryProperties(
880 physicalDevice
, &pMemoryProperties
->memoryProperties
);
884 tu_queue_init(struct tu_device
*device
,
885 struct tu_queue
*queue
,
886 uint32_t queue_family_index
,
888 VkDeviceQueueCreateFlags flags
)
890 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
891 queue
->device
= device
;
892 queue
->queue_family_index
= queue_family_index
;
893 queue
->queue_idx
= idx
;
894 queue
->flags
= flags
;
900 tu_queue_finish(struct tu_queue
*queue
)
905 tu_get_device_extension_index(const char *name
)
907 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
908 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
915 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
916 const VkDeviceCreateInfo
*pCreateInfo
,
917 const VkAllocationCallbacks
*pAllocator
,
920 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
922 struct tu_device
*device
;
924 /* Check enabled features */
925 if (pCreateInfo
->pEnabledFeatures
) {
926 VkPhysicalDeviceFeatures supported_features
;
927 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
928 VkBool32
*supported_feature
= (VkBool32
*)&supported_features
;
929 VkBool32
*enabled_feature
= (VkBool32
*)pCreateInfo
->pEnabledFeatures
;
930 unsigned num_features
=
931 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
932 for (uint32_t i
= 0; i
< num_features
; i
++) {
933 if (enabled_feature
[i
] && !supported_feature
[i
])
934 return vk_error(physical_device
->instance
,
935 VK_ERROR_FEATURE_NOT_PRESENT
);
939 device
= vk_zalloc2(&physical_device
->instance
->alloc
,
943 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
945 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
947 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
948 device
->instance
= physical_device
->instance
;
949 device
->physical_device
= physical_device
;
952 device
->alloc
= *pAllocator
;
954 device
->alloc
= physical_device
->instance
->alloc
;
956 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
957 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
958 int index
= tu_get_device_extension_index(ext_name
);
960 !physical_device
->supported_extensions
.extensions
[index
]) {
961 vk_free(&device
->alloc
, device
);
962 return vk_error(physical_device
->instance
,
963 VK_ERROR_EXTENSION_NOT_PRESENT
);
966 device
->enabled_extensions
.extensions
[index
] = true;
969 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
970 const VkDeviceQueueCreateInfo
*queue_create
=
971 &pCreateInfo
->pQueueCreateInfos
[i
];
972 uint32_t qfi
= queue_create
->queueFamilyIndex
;
973 device
->queues
[qfi
] =
974 vk_alloc(&device
->alloc
,
975 queue_create
->queueCount
* sizeof(struct tu_queue
),
977 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
978 if (!device
->queues
[qfi
]) {
979 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
983 memset(device
->queues
[qfi
],
985 queue_create
->queueCount
* sizeof(struct tu_queue
));
987 device
->queue_count
[qfi
] = queue_create
->queueCount
;
989 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
990 result
= tu_queue_init(
991 device
, &device
->queues
[qfi
][q
], qfi
, q
, queue_create
->flags
);
992 if (result
!= VK_SUCCESS
)
997 VkPipelineCacheCreateInfo ci
;
998 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1001 ci
.pInitialData
= NULL
;
1002 ci
.initialDataSize
= 0;
1005 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1006 if (result
!= VK_SUCCESS
)
1009 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1011 *pDevice
= tu_device_to_handle(device
);
1015 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1016 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1017 tu_queue_finish(&device
->queues
[i
][q
]);
1018 if (device
->queue_count
[i
])
1019 vk_free(&device
->alloc
, device
->queues
[i
]);
1022 vk_free(&device
->alloc
, device
);
1027 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1029 TU_FROM_HANDLE(tu_device
, device
, _device
);
1034 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1035 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1036 tu_queue_finish(&device
->queues
[i
][q
]);
1037 if (device
->queue_count
[i
])
1038 vk_free(&device
->alloc
, device
->queues
[i
]);
1041 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1042 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1044 vk_free(&device
->alloc
, device
);
1048 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1049 VkLayerProperties
*pProperties
)
1051 *pPropertyCount
= 0;
1056 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1057 uint32_t *pPropertyCount
,
1058 VkLayerProperties
*pProperties
)
1060 *pPropertyCount
= 0;
1065 tu_GetDeviceQueue2(VkDevice _device
,
1066 const VkDeviceQueueInfo2
*pQueueInfo
,
1069 TU_FROM_HANDLE(tu_device
, device
, _device
);
1070 struct tu_queue
*queue
;
1073 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1074 if (pQueueInfo
->flags
!= queue
->flags
) {
1075 /* From the Vulkan 1.1.70 spec:
1077 * "The queue returned by vkGetDeviceQueue2 must have the same
1078 * flags value from this structure as that used at device
1079 * creation time in a VkDeviceQueueCreateInfo instance. If no
1080 * matching flags were specified at device creation time then
1081 * pQueue will return VK_NULL_HANDLE."
1083 *pQueue
= VK_NULL_HANDLE
;
1087 *pQueue
= tu_queue_to_handle(queue
);
1091 tu_GetDeviceQueue(VkDevice _device
,
1092 uint32_t queueFamilyIndex
,
1093 uint32_t queueIndex
,
1096 const VkDeviceQueueInfo2 info
=
1097 (VkDeviceQueueInfo2
){.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1098 .queueFamilyIndex
= queueFamilyIndex
,
1099 .queueIndex
= queueIndex
};
1101 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1105 tu_QueueSubmit(VkQueue _queue
,
1106 uint32_t submitCount
,
1107 const VkSubmitInfo
*pSubmits
,
1114 tu_QueueWaitIdle(VkQueue _queue
)
1120 tu_DeviceWaitIdle(VkDevice _device
)
1122 TU_FROM_HANDLE(tu_device
, device
, _device
);
1124 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1125 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1126 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1133 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1134 uint32_t *pPropertyCount
,
1135 VkExtensionProperties
*pProperties
)
1137 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1139 /* We spport no lyaers */
1141 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1143 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1144 if (tu_supported_instance_extensions
.extensions
[i
]) {
1145 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1149 return vk_outarray_status(&out
);
1153 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1154 const char *pLayerName
,
1155 uint32_t *pPropertyCount
,
1156 VkExtensionProperties
*pProperties
)
1158 /* We spport no lyaers */
1159 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1160 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1162 /* We spport no lyaers */
1164 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1166 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1167 if (device
->supported_extensions
.extensions
[i
]) {
1168 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1172 return vk_outarray_status(&out
);
1176 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1178 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1180 return tu_lookup_entrypoint_checked(pName
,
1181 instance
? instance
->api_version
: 0,
1182 instance
? &instance
->enabled_extensions
1187 /* The loader wants us to expose a second GetInstanceProcAddr function
1188 * to work around certain LD_PRELOAD issues seen in apps.
1191 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1192 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1195 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1196 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1198 return tu_GetInstanceProcAddr(instance
, pName
);
1202 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1204 TU_FROM_HANDLE(tu_device
, device
, _device
);
1206 return tu_lookup_entrypoint_checked(pName
,
1207 device
->instance
->api_version
,
1208 &device
->instance
->enabled_extensions
,
1209 &device
->enabled_extensions
);
1213 tu_alloc_memory(struct tu_device
*device
,
1214 const VkMemoryAllocateInfo
*pAllocateInfo
,
1215 const VkAllocationCallbacks
*pAllocator
,
1216 VkDeviceMemory
*pMem
)
1218 struct tu_device_memory
*mem
;
1220 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1222 if (pAllocateInfo
->allocationSize
== 0) {
1223 /* Apparently, this is allowed */
1224 *pMem
= VK_NULL_HANDLE
;
1228 mem
= vk_alloc2(&device
->alloc
,
1232 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1234 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1236 mem
->bo
= fd_bo_new(device
->physical_device
->drm_device
, pAllocateInfo
->allocationSize
,
1237 DRM_FREEDRENO_GEM_CACHE_WCOMBINE
|
1238 DRM_FREEDRENO_GEM_TYPE_KMEM
);
1240 vk_free2(&device
->alloc
, pAllocator
, mem
);
1241 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1243 mem
->size
= pAllocateInfo
->allocationSize
;
1244 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1247 mem
->user_ptr
= NULL
;
1249 *pMem
= tu_device_memory_to_handle(mem
);
1255 tu_AllocateMemory(VkDevice _device
,
1256 const VkMemoryAllocateInfo
*pAllocateInfo
,
1257 const VkAllocationCallbacks
*pAllocator
,
1258 VkDeviceMemory
*pMem
)
1260 TU_FROM_HANDLE(tu_device
, device
, _device
);
1261 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1265 tu_FreeMemory(VkDevice _device
,
1266 VkDeviceMemory _mem
,
1267 const VkAllocationCallbacks
*pAllocator
)
1269 TU_FROM_HANDLE(tu_device
, device
, _device
);
1270 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1278 vk_free2(&device
->alloc
, pAllocator
, mem
);
1282 tu_MapMemory(VkDevice _device
,
1283 VkDeviceMemory _memory
,
1284 VkDeviceSize offset
,
1286 VkMemoryMapFlags flags
,
1289 TU_FROM_HANDLE(tu_device
, device
, _device
);
1290 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1297 if (mem
->user_ptr
) {
1298 *ppData
= mem
->user_ptr
;
1299 } else if (!mem
->map
){
1300 *ppData
= mem
->map
= fd_bo_map(mem
->bo
);
1309 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1313 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1315 /* I do not see any unmapping done by the freedreno Gallium driver. */
1319 tu_FlushMappedMemoryRanges(VkDevice _device
,
1320 uint32_t memoryRangeCount
,
1321 const VkMappedMemoryRange
*pMemoryRanges
)
1327 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1328 uint32_t memoryRangeCount
,
1329 const VkMappedMemoryRange
*pMemoryRanges
)
1335 tu_GetBufferMemoryRequirements(VkDevice _device
,
1337 VkMemoryRequirements
*pMemoryRequirements
)
1339 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1341 pMemoryRequirements
->memoryTypeBits
= 1;
1342 pMemoryRequirements
->alignment
= 16;
1343 pMemoryRequirements
->size
=
1344 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1348 tu_GetBufferMemoryRequirements2(
1350 const VkBufferMemoryRequirementsInfo2KHR
*pInfo
,
1351 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1353 tu_GetBufferMemoryRequirements(
1354 device
, pInfo
->buffer
, &pMemoryRequirements
->memoryRequirements
);
1358 tu_GetImageMemoryRequirements(VkDevice _device
,
1360 VkMemoryRequirements
*pMemoryRequirements
)
1362 TU_FROM_HANDLE(tu_image
, image
, _image
);
1364 pMemoryRequirements
->memoryTypeBits
= 1;
1365 pMemoryRequirements
->size
= image
->size
;
1366 pMemoryRequirements
->alignment
= image
->alignment
;
1370 tu_GetImageMemoryRequirements2(VkDevice device
,
1371 const VkImageMemoryRequirementsInfo2KHR
*pInfo
,
1372 VkMemoryRequirements2KHR
*pMemoryRequirements
)
1374 tu_GetImageMemoryRequirements(
1375 device
, pInfo
->image
, &pMemoryRequirements
->memoryRequirements
);
1379 tu_GetImageSparseMemoryRequirements(
1382 uint32_t *pSparseMemoryRequirementCount
,
1383 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1389 tu_GetImageSparseMemoryRequirements2(
1391 const VkImageSparseMemoryRequirementsInfo2KHR
*pInfo
,
1392 uint32_t *pSparseMemoryRequirementCount
,
1393 VkSparseImageMemoryRequirements2KHR
*pSparseMemoryRequirements
)
1399 tu_GetDeviceMemoryCommitment(VkDevice device
,
1400 VkDeviceMemory memory
,
1401 VkDeviceSize
*pCommittedMemoryInBytes
)
1403 *pCommittedMemoryInBytes
= 0;
1407 tu_BindBufferMemory2(VkDevice device
,
1408 uint32_t bindInfoCount
,
1409 const VkBindBufferMemoryInfoKHR
*pBindInfos
)
1415 tu_BindBufferMemory(VkDevice device
,
1417 VkDeviceMemory memory
,
1418 VkDeviceSize memoryOffset
)
1420 const VkBindBufferMemoryInfoKHR info
= {
1421 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1424 .memoryOffset
= memoryOffset
1427 return tu_BindBufferMemory2(device
, 1, &info
);
1431 tu_BindImageMemory2(VkDevice device
,
1432 uint32_t bindInfoCount
,
1433 const VkBindImageMemoryInfoKHR
*pBindInfos
)
1439 tu_BindImageMemory(VkDevice device
,
1441 VkDeviceMemory memory
,
1442 VkDeviceSize memoryOffset
)
1444 const VkBindImageMemoryInfoKHR info
= {
1445 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
,
1448 .memoryOffset
= memoryOffset
1451 return tu_BindImageMemory2(device
, 1, &info
);
1455 tu_QueueBindSparse(VkQueue _queue
,
1456 uint32_t bindInfoCount
,
1457 const VkBindSparseInfo
*pBindInfo
,
1464 tu_CreateFence(VkDevice _device
,
1465 const VkFenceCreateInfo
*pCreateInfo
,
1466 const VkAllocationCallbacks
*pAllocator
,
1469 TU_FROM_HANDLE(tu_device
, device
, _device
);
1471 struct tu_fence
*fence
= vk_alloc2(&device
->alloc
,
1475 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1478 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1480 *pFence
= tu_fence_to_handle(fence
);
1486 tu_DestroyFence(VkDevice _device
,
1488 const VkAllocationCallbacks
*pAllocator
)
1490 TU_FROM_HANDLE(tu_device
, device
, _device
);
1491 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1496 vk_free2(&device
->alloc
, pAllocator
, fence
);
1500 tu_WaitForFences(VkDevice _device
,
1501 uint32_t fenceCount
,
1502 const VkFence
*pFences
,
1510 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
1516 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
1521 // Queue semaphore functions
1524 tu_CreateSemaphore(VkDevice _device
,
1525 const VkSemaphoreCreateInfo
*pCreateInfo
,
1526 const VkAllocationCallbacks
*pAllocator
,
1527 VkSemaphore
*pSemaphore
)
1529 TU_FROM_HANDLE(tu_device
, device
, _device
);
1531 struct tu_semaphore
*sem
= vk_alloc2(&device
->alloc
,
1535 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1537 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1539 *pSemaphore
= tu_semaphore_to_handle(sem
);
1544 tu_DestroySemaphore(VkDevice _device
,
1545 VkSemaphore _semaphore
,
1546 const VkAllocationCallbacks
*pAllocator
)
1548 TU_FROM_HANDLE(tu_device
, device
, _device
);
1549 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1553 vk_free2(&device
->alloc
, pAllocator
, sem
);
1557 tu_CreateEvent(VkDevice _device
,
1558 const VkEventCreateInfo
*pCreateInfo
,
1559 const VkAllocationCallbacks
*pAllocator
,
1562 TU_FROM_HANDLE(tu_device
, device
, _device
);
1563 struct tu_event
*event
= vk_alloc2(&device
->alloc
,
1567 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1570 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1572 *pEvent
= tu_event_to_handle(event
);
1578 tu_DestroyEvent(VkDevice _device
,
1580 const VkAllocationCallbacks
*pAllocator
)
1582 TU_FROM_HANDLE(tu_device
, device
, _device
);
1583 TU_FROM_HANDLE(tu_event
, event
, _event
);
1587 vk_free2(&device
->alloc
, pAllocator
, event
);
1591 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1593 TU_FROM_HANDLE(tu_event
, event
, _event
);
1595 if (*event
->map
== 1)
1596 return VK_EVENT_SET
;
1597 return VK_EVENT_RESET
;
1601 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1603 TU_FROM_HANDLE(tu_event
, event
, _event
);
1610 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1612 TU_FROM_HANDLE(tu_event
, event
, _event
);
1619 tu_CreateBuffer(VkDevice _device
,
1620 const VkBufferCreateInfo
*pCreateInfo
,
1621 const VkAllocationCallbacks
*pAllocator
,
1624 TU_FROM_HANDLE(tu_device
, device
, _device
);
1625 struct tu_buffer
*buffer
;
1627 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1629 buffer
= vk_alloc2(&device
->alloc
,
1633 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1635 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1637 buffer
->size
= pCreateInfo
->size
;
1638 buffer
->usage
= pCreateInfo
->usage
;
1639 buffer
->flags
= pCreateInfo
->flags
;
1641 *pBuffer
= tu_buffer_to_handle(buffer
);
1647 tu_DestroyBuffer(VkDevice _device
,
1649 const VkAllocationCallbacks
*pAllocator
)
1651 TU_FROM_HANDLE(tu_device
, device
, _device
);
1652 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1657 vk_free2(&device
->alloc
, pAllocator
, buffer
);
1661 tu_surface_max_layer_count(struct tu_image_view
*iview
)
1663 return iview
->type
== VK_IMAGE_VIEW_TYPE_3D
1664 ? iview
->extent
.depth
1665 : (iview
->base_layer
+ iview
->layer_count
);
1669 tu_CreateFramebuffer(VkDevice _device
,
1670 const VkFramebufferCreateInfo
*pCreateInfo
,
1671 const VkAllocationCallbacks
*pAllocator
,
1672 VkFramebuffer
*pFramebuffer
)
1674 TU_FROM_HANDLE(tu_device
, device
, _device
);
1675 struct tu_framebuffer
*framebuffer
;
1677 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1680 sizeof(*framebuffer
) +
1681 sizeof(struct tu_attachment_info
) * pCreateInfo
->attachmentCount
;
1682 framebuffer
= vk_alloc2(
1683 &device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1684 if (framebuffer
== NULL
)
1685 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1687 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1688 framebuffer
->width
= pCreateInfo
->width
;
1689 framebuffer
->height
= pCreateInfo
->height
;
1690 framebuffer
->layers
= pCreateInfo
->layers
;
1691 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1692 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1693 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
1694 framebuffer
->attachments
[i
].attachment
= iview
;
1696 framebuffer
->width
= MIN2(framebuffer
->width
, iview
->extent
.width
);
1697 framebuffer
->height
= MIN2(framebuffer
->height
, iview
->extent
.height
);
1698 framebuffer
->layers
=
1699 MIN2(framebuffer
->layers
, tu_surface_max_layer_count(iview
));
1702 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
1707 tu_DestroyFramebuffer(VkDevice _device
,
1709 const VkAllocationCallbacks
*pAllocator
)
1711 TU_FROM_HANDLE(tu_device
, device
, _device
);
1712 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
1716 vk_free2(&device
->alloc
, pAllocator
, fb
);
1720 tu_init_sampler(struct tu_device
*device
,
1721 struct tu_sampler
*sampler
,
1722 const VkSamplerCreateInfo
*pCreateInfo
)
1727 tu_CreateSampler(VkDevice _device
,
1728 const VkSamplerCreateInfo
*pCreateInfo
,
1729 const VkAllocationCallbacks
*pAllocator
,
1730 VkSampler
*pSampler
)
1732 TU_FROM_HANDLE(tu_device
, device
, _device
);
1733 struct tu_sampler
*sampler
;
1735 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1737 sampler
= vk_alloc2(&device
->alloc
,
1741 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1743 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1745 tu_init_sampler(device
, sampler
, pCreateInfo
);
1746 *pSampler
= tu_sampler_to_handle(sampler
);
1752 tu_DestroySampler(VkDevice _device
,
1754 const VkAllocationCallbacks
*pAllocator
)
1756 TU_FROM_HANDLE(tu_device
, device
, _device
);
1757 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
1761 vk_free2(&device
->alloc
, pAllocator
, sampler
);
1764 /* vk_icd.h does not declare this function, so we declare it here to
1765 * suppress Wmissing-prototypes.
1767 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1768 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
1770 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1771 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
1773 /* For the full details on loader interface versioning, see
1774 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1775 * What follows is a condensed summary, to help you navigate the large and
1776 * confusing official doc.
1778 * - Loader interface v0 is incompatible with later versions. We don't
1781 * - In loader interface v1:
1782 * - The first ICD entrypoint called by the loader is
1783 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1785 * - The ICD must statically expose no other Vulkan symbol unless it is
1786 * linked with -Bsymbolic.
1787 * - Each dispatchable Vulkan handle created by the ICD must be
1788 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1789 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1790 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1791 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1792 * such loader-managed surfaces.
1794 * - Loader interface v2 differs from v1 in:
1795 * - The first ICD entrypoint called by the loader is
1796 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1797 * statically expose this entrypoint.
1799 * - Loader interface v3 differs from v2 in:
1800 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1801 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1802 * because the loader no longer does so.
1804 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
1809 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1810 VkPhysicalDevice physicalDevice
,
1811 const VkPhysicalDeviceExternalSemaphoreInfoKHR
*pExternalSemaphoreInfo
,
1812 VkExternalSemaphorePropertiesKHR
*pExternalSemaphoreProperties
)
1814 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1815 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1816 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1820 tu_GetPhysicalDeviceExternalFenceProperties(
1821 VkPhysicalDevice physicalDevice
,
1822 const VkPhysicalDeviceExternalFenceInfoKHR
*pExternalFenceInfo
,
1823 VkExternalFencePropertiesKHR
*pExternalFenceProperties
)
1825 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1826 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1827 pExternalFenceProperties
->externalFenceFeatures
= 0;
1831 tu_CreateDebugReportCallbackEXT(
1832 VkInstance _instance
,
1833 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
1834 const VkAllocationCallbacks
*pAllocator
,
1835 VkDebugReportCallbackEXT
*pCallback
)
1837 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1838 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
1846 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
1847 VkDebugReportCallbackEXT _callback
,
1848 const VkAllocationCallbacks
*pAllocator
)
1850 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1851 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
1858 tu_DebugReportMessageEXT(VkInstance _instance
,
1859 VkDebugReportFlagsEXT flags
,
1860 VkDebugReportObjectTypeEXT objectType
,
1863 int32_t messageCode
,
1864 const char *pLayerPrefix
,
1865 const char *pMessage
)
1867 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1868 vk_debug_report(&instance
->debug_report_callbacks
,
1879 tu_GetDeviceGroupPeerMemoryFeatures(
1882 uint32_t localDeviceIndex
,
1883 uint32_t remoteDeviceIndex
,
1884 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
1886 assert(localDeviceIndex
== remoteDeviceIndex
);
1888 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
1889 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
1890 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
1891 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;