2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
35 #include <sys/sysinfo.h>
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
45 #include "drm-uapi/msm_drm.h"
48 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
50 uint32_t mesa_timestamp
;
52 memset(uuid
, 0, VK_UUID_SIZE
);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
57 memcpy(uuid
, &mesa_timestamp
, 4);
58 memcpy((char *) uuid
+ 4, &f
, 2);
59 snprintf((char *) uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
64 tu_get_driver_uuid(void *uuid
)
66 memset(uuid
, 0, VK_UUID_SIZE
);
67 snprintf(uuid
, VK_UUID_SIZE
, "freedreno");
71 tu_get_device_uuid(void *uuid
)
73 memset(uuid
, 0, VK_UUID_SIZE
);
77 tu_bo_init(struct tu_device
*dev
,
82 uint64_t iova
= tu_gem_info_iova(dev
, gem_handle
);
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
86 *bo
= (struct tu_bo
) {
87 .gem_handle
= gem_handle
,
96 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
101 uint32_t gem_handle
= tu_gem_new(dev
, size
, MSM_BO_WC
);
103 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
105 VkResult result
= tu_bo_init(dev
, bo
, gem_handle
, size
);
106 if (result
!= VK_SUCCESS
) {
107 tu_gem_close(dev
, gem_handle
);
108 return vk_error(dev
->instance
, result
);
115 tu_bo_init_dmabuf(struct tu_device
*dev
,
120 uint32_t gem_handle
= tu_gem_import_dmabuf(dev
, fd
, size
);
122 return vk_error(dev
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
124 VkResult result
= tu_bo_init(dev
, bo
, gem_handle
, size
);
125 if (result
!= VK_SUCCESS
) {
126 tu_gem_close(dev
, gem_handle
);
127 return vk_error(dev
->instance
, result
);
134 tu_bo_export_dmabuf(struct tu_device
*dev
, struct tu_bo
*bo
)
136 return tu_gem_export_dmabuf(dev
, bo
->gem_handle
);
140 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
145 uint64_t offset
= tu_gem_info_offset(dev
, bo
->gem_handle
);
147 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
151 dev
->physical_device
->local_fd
, offset
);
152 if (map
== MAP_FAILED
)
153 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
160 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
162 assert(bo
->gem_handle
);
165 munmap(bo
->map
, bo
->size
);
167 tu_gem_close(dev
, bo
->gem_handle
);
171 tu_physical_device_init(struct tu_physical_device
*device
,
172 struct tu_instance
*instance
,
173 drmDevicePtr drm_device
)
175 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
176 VkResult result
= VK_SUCCESS
;
177 drmVersionPtr version
;
181 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
183 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
184 "failed to open device %s", path
);
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major
= 1;
189 const int min_version_minor
= 3;
191 version
= drmGetVersion(fd
);
194 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
195 "failed to query kernel driver version for device %s",
199 if (strcmp(version
->name
, "msm")) {
200 drmFreeVersion(version
);
202 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
203 "device %s does not use the msm kernel driver", path
);
206 if (version
->version_major
!= min_version_major
||
207 version
->version_minor
< min_version_minor
) {
208 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path
, version
->version_major
, version
->version_minor
,
212 min_version_major
, min_version_minor
);
213 drmFreeVersion(version
);
218 drmFreeVersion(version
);
220 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
221 tu_logi("Found compatible device '%s'.", path
);
223 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
224 device
->instance
= instance
;
225 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
226 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
228 if (instance
->enabled_extensions
.KHR_display
) {
230 open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
231 if (master_fd
>= 0) {
232 /* TODO: free master_fd is accel is not working? */
236 device
->master_fd
= master_fd
;
237 device
->local_fd
= fd
;
239 if (tu_drm_get_gpu_id(device
, &device
->gpu_id
)) {
240 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
241 tu_logi("Could not query the GPU ID");
242 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
243 "could not get GPU ID");
247 if (tu_drm_get_gmem_size(device
, &device
->gmem_size
)) {
248 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
249 tu_logi("Could not query the GMEM size");
250 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
251 "could not get GMEM size");
255 if (tu_drm_get_gmem_base(device
, &device
->gmem_base
)) {
256 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
257 tu_logi("Could not query the GMEM size");
258 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
259 "could not get GMEM size");
263 memset(device
->name
, 0, sizeof(device
->name
));
264 sprintf(device
->name
, "FD%d", device
->gpu_id
);
266 switch (device
->gpu_id
) {
268 device
->ccu_offset_gmem
= 0x7c000; /* 0x7e000 in some cases? */
269 device
->ccu_offset_bypass
= 0x10000;
270 device
->tile_align_w
= 64;
271 device
->magic
.PC_UNKNOWN_9805
= 0x0;
272 device
->magic
.SP_UNKNOWN_A0F8
= 0x0;
276 device
->ccu_offset_gmem
= 0xf8000;
277 device
->ccu_offset_bypass
= 0x20000;
278 device
->tile_align_w
= 64;
279 device
->magic
.PC_UNKNOWN_9805
= 0x1;
280 device
->magic
.SP_UNKNOWN_A0F8
= 0x1;
283 device
->ccu_offset_gmem
= 0x114000;
284 device
->ccu_offset_bypass
= 0x30000;
285 device
->tile_align_w
= 96;
286 device
->magic
.PC_UNKNOWN_9805
= 0x2;
287 device
->magic
.SP_UNKNOWN_A0F8
= 0x2;
290 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
291 "device %s is unsupported", device
->name
);
294 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
295 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
296 "cannot generate UUID");
300 /* The gpu id is already embedded in the uuid so we just pass "tu"
301 * when creating the cache.
303 char buf
[VK_UUID_SIZE
* 2 + 1];
304 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
305 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
307 fprintf(stderr
, "WARNING: tu is not a conformant vulkan implementation, "
308 "testing use only.\n");
310 tu_get_driver_uuid(&device
->device_uuid
);
311 tu_get_device_uuid(&device
->device_uuid
);
313 tu_fill_device_extension_table(device
, &device
->supported_extensions
);
315 if (result
!= VK_SUCCESS
) {
316 vk_error(instance
, result
);
320 result
= tu_wsi_init(device
);
321 if (result
!= VK_SUCCESS
) {
322 vk_error(instance
, result
);
336 tu_physical_device_finish(struct tu_physical_device
*device
)
338 tu_wsi_finish(device
);
340 disk_cache_destroy(device
->disk_cache
);
341 close(device
->local_fd
);
342 if (device
->master_fd
!= -1)
343 close(device
->master_fd
);
346 static VKAPI_ATTR
void *
347 default_alloc_func(void *pUserData
,
350 VkSystemAllocationScope allocationScope
)
355 static VKAPI_ATTR
void *
356 default_realloc_func(void *pUserData
,
360 VkSystemAllocationScope allocationScope
)
362 return realloc(pOriginal
, size
);
365 static VKAPI_ATTR
void
366 default_free_func(void *pUserData
, void *pMemory
)
371 static const VkAllocationCallbacks default_alloc
= {
373 .pfnAllocation
= default_alloc_func
,
374 .pfnReallocation
= default_realloc_func
,
375 .pfnFree
= default_free_func
,
378 static const struct debug_control tu_debug_options
[] = {
379 { "startup", TU_DEBUG_STARTUP
},
380 { "nir", TU_DEBUG_NIR
},
381 { "ir3", TU_DEBUG_IR3
},
382 { "nobin", TU_DEBUG_NOBIN
},
383 { "sysmem", TU_DEBUG_SYSMEM
},
384 { "forcebin", TU_DEBUG_FORCEBIN
},
389 tu_get_debug_option_name(int id
)
391 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
392 return tu_debug_options
[id
].string
;
396 tu_get_instance_extension_index(const char *name
)
398 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
399 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
406 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
407 const VkAllocationCallbacks
*pAllocator
,
408 VkInstance
*pInstance
)
410 struct tu_instance
*instance
;
413 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
415 uint32_t client_version
;
416 if (pCreateInfo
->pApplicationInfo
&&
417 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
418 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
420 tu_EnumerateInstanceVersion(&client_version
);
423 instance
= vk_zalloc2(&default_alloc
, pAllocator
, sizeof(*instance
), 8,
424 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
426 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
428 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
431 instance
->alloc
= *pAllocator
;
433 instance
->alloc
= default_alloc
;
435 instance
->api_version
= client_version
;
436 instance
->physical_device_count
= -1;
438 instance
->debug_flags
=
439 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
441 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
442 tu_logi("Created an instance");
444 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
445 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
446 int index
= tu_get_instance_extension_index(ext_name
);
448 if (index
< 0 || !tu_supported_instance_extensions
.extensions
[index
]) {
449 vk_free2(&default_alloc
, pAllocator
, instance
);
450 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
453 instance
->enabled_extensions
.extensions
[index
] = true;
456 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
457 if (result
!= VK_SUCCESS
) {
458 vk_free2(&default_alloc
, pAllocator
, instance
);
459 return vk_error(instance
, result
);
462 glsl_type_singleton_init_or_ref();
464 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
466 *pInstance
= tu_instance_to_handle(instance
);
472 tu_DestroyInstance(VkInstance _instance
,
473 const VkAllocationCallbacks
*pAllocator
)
475 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
480 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
481 tu_physical_device_finish(instance
->physical_devices
+ i
);
484 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
486 glsl_type_singleton_decref();
488 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
490 vk_free(&instance
->alloc
, instance
);
494 tu_enumerate_devices(struct tu_instance
*instance
)
496 /* TODO: Check for more devices ? */
497 drmDevicePtr devices
[8];
498 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
501 instance
->physical_device_count
= 0;
503 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
505 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
506 tu_logi("Found %d drm nodes", max_devices
);
509 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
511 for (unsigned i
= 0; i
< (unsigned) max_devices
; i
++) {
512 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
513 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
515 result
= tu_physical_device_init(
516 instance
->physical_devices
+ instance
->physical_device_count
,
517 instance
, devices
[i
]);
518 if (result
== VK_SUCCESS
)
519 ++instance
->physical_device_count
;
520 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
524 drmFreeDevices(devices
, max_devices
);
530 tu_EnumeratePhysicalDevices(VkInstance _instance
,
531 uint32_t *pPhysicalDeviceCount
,
532 VkPhysicalDevice
*pPhysicalDevices
)
534 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
535 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
539 if (instance
->physical_device_count
< 0) {
540 result
= tu_enumerate_devices(instance
);
541 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
545 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
546 vk_outarray_append(&out
, p
)
548 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
552 return vk_outarray_status(&out
);
556 tu_EnumeratePhysicalDeviceGroups(
557 VkInstance _instance
,
558 uint32_t *pPhysicalDeviceGroupCount
,
559 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
561 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
562 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
,
563 pPhysicalDeviceGroupCount
);
566 if (instance
->physical_device_count
< 0) {
567 result
= tu_enumerate_devices(instance
);
568 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
572 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
573 vk_outarray_append(&out
, p
)
575 p
->physicalDeviceCount
= 1;
576 p
->physicalDevices
[0] =
577 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
578 p
->subsetAllocation
= false;
582 return vk_outarray_status(&out
);
586 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
587 VkPhysicalDeviceFeatures
*pFeatures
)
589 memset(pFeatures
, 0, sizeof(*pFeatures
));
591 *pFeatures
= (VkPhysicalDeviceFeatures
) {
592 .robustBufferAccess
= false,
593 .fullDrawIndexUint32
= true,
594 .imageCubeArray
= true,
595 .independentBlend
= true,
596 .geometryShader
= true,
597 .tessellationShader
= false,
598 .sampleRateShading
= true,
599 .dualSrcBlend
= true,
601 .multiDrawIndirect
= false,
602 .drawIndirectFirstInstance
= false,
604 .depthBiasClamp
= false,
605 .fillModeNonSolid
= false,
606 .depthBounds
= false,
608 .largePoints
= false,
610 .multiViewport
= false,
611 .samplerAnisotropy
= true,
612 .textureCompressionETC2
= true,
613 .textureCompressionASTC_LDR
= true,
614 .textureCompressionBC
= true,
615 .occlusionQueryPrecise
= true,
616 .pipelineStatisticsQuery
= false,
617 .vertexPipelineStoresAndAtomics
= false,
618 .fragmentStoresAndAtomics
= false,
619 .shaderTessellationAndGeometryPointSize
= false,
620 .shaderImageGatherExtended
= false,
621 .shaderStorageImageExtendedFormats
= false,
622 .shaderStorageImageMultisample
= false,
623 .shaderUniformBufferArrayDynamicIndexing
= false,
624 .shaderSampledImageArrayDynamicIndexing
= false,
625 .shaderStorageBufferArrayDynamicIndexing
= false,
626 .shaderStorageImageArrayDynamicIndexing
= false,
627 .shaderStorageImageReadWithoutFormat
= false,
628 .shaderStorageImageWriteWithoutFormat
= false,
629 .shaderClipDistance
= false,
630 .shaderCullDistance
= false,
631 .shaderFloat64
= false,
632 .shaderInt64
= false,
633 .shaderInt16
= false,
634 .sparseBinding
= false,
635 .variableMultisampleRate
= false,
636 .inheritedQueries
= false,
641 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
642 VkPhysicalDeviceFeatures2
*pFeatures
)
644 vk_foreach_struct(ext
, pFeatures
->pNext
)
646 switch (ext
->sType
) {
647 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES
: {
648 VkPhysicalDeviceVariablePointersFeatures
*features
= (void *) ext
;
649 features
->variablePointersStorageBuffer
= false;
650 features
->variablePointers
= false;
653 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES
: {
654 VkPhysicalDeviceMultiviewFeatures
*features
=
655 (VkPhysicalDeviceMultiviewFeatures
*) ext
;
656 features
->multiview
= false;
657 features
->multiviewGeometryShader
= false;
658 features
->multiviewTessellationShader
= false;
661 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES
: {
662 VkPhysicalDeviceShaderDrawParametersFeatures
*features
=
663 (VkPhysicalDeviceShaderDrawParametersFeatures
*) ext
;
664 features
->shaderDrawParameters
= false;
667 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
668 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
669 (VkPhysicalDeviceProtectedMemoryFeatures
*) ext
;
670 features
->protectedMemory
= false;
673 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
674 VkPhysicalDevice16BitStorageFeatures
*features
=
675 (VkPhysicalDevice16BitStorageFeatures
*) ext
;
676 features
->storageBuffer16BitAccess
= false;
677 features
->uniformAndStorageBuffer16BitAccess
= false;
678 features
->storagePushConstant16
= false;
679 features
->storageInputOutput16
= false;
682 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
683 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
684 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*) ext
;
685 features
->samplerYcbcrConversion
= false;
688 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
689 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
690 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*) ext
;
691 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
692 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
693 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
694 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
695 features
->shaderSampledImageArrayNonUniformIndexing
= false;
696 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
697 features
->shaderStorageImageArrayNonUniformIndexing
= false;
698 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
699 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
700 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
701 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
702 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
703 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
704 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
705 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
706 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
707 features
->descriptorBindingUpdateUnusedWhilePending
= false;
708 features
->descriptorBindingPartiallyBound
= false;
709 features
->descriptorBindingVariableDescriptorCount
= false;
710 features
->runtimeDescriptorArray
= false;
713 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
714 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
715 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*) ext
;
716 features
->conditionalRendering
= false;
717 features
->inheritedConditionalRendering
= false;
720 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT
: {
721 VkPhysicalDeviceTransformFeedbackFeaturesEXT
*features
=
722 (VkPhysicalDeviceTransformFeedbackFeaturesEXT
*) ext
;
723 features
->transformFeedback
= true;
724 features
->geometryStreams
= false;
731 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
735 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
736 VkPhysicalDeviceProperties
*pProperties
)
738 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
739 VkSampleCountFlags sample_counts
=
740 VK_SAMPLE_COUNT_1_BIT
| VK_SAMPLE_COUNT_2_BIT
| VK_SAMPLE_COUNT_4_BIT
;
742 /* I have no idea what the maximum size is, but the hardware supports very
743 * large numbers of descriptors (at least 2^16). This limit is based on
744 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
745 * we don't have to think about what to do if that overflows, but really
746 * nothing is likely to get close to this.
748 const size_t max_descriptor_set_size
= (1 << 28) / A6XX_TEX_CONST_DWORDS
;
750 VkPhysicalDeviceLimits limits
= {
751 .maxImageDimension1D
= (1 << 14),
752 .maxImageDimension2D
= (1 << 14),
753 .maxImageDimension3D
= (1 << 11),
754 .maxImageDimensionCube
= (1 << 14),
755 .maxImageArrayLayers
= (1 << 11),
756 .maxTexelBufferElements
= 128 * 1024 * 1024,
757 .maxUniformBufferRange
= MAX_UNIFORM_BUFFER_RANGE
,
758 .maxStorageBufferRange
= MAX_STORAGE_BUFFER_RANGE
,
759 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
760 .maxMemoryAllocationCount
= UINT32_MAX
,
761 .maxSamplerAllocationCount
= 64 * 1024,
762 .bufferImageGranularity
= 64, /* A cache line */
763 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
764 .maxBoundDescriptorSets
= MAX_SETS
,
765 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
766 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
767 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
768 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
769 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
770 .maxPerStageDescriptorInputAttachments
= MAX_RTS
,
771 .maxPerStageResources
= max_descriptor_set_size
,
772 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
773 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
774 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
775 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
776 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
777 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
778 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
779 .maxDescriptorSetInputAttachments
= MAX_RTS
,
780 .maxVertexInputAttributes
= 32,
781 .maxVertexInputBindings
= 32,
782 .maxVertexInputAttributeOffset
= 4095,
783 .maxVertexInputBindingStride
= 2048,
784 .maxVertexOutputComponents
= 128,
785 .maxTessellationGenerationLevel
= 64,
786 .maxTessellationPatchSize
= 32,
787 .maxTessellationControlPerVertexInputComponents
= 128,
788 .maxTessellationControlPerVertexOutputComponents
= 128,
789 .maxTessellationControlPerPatchOutputComponents
= 120,
790 .maxTessellationControlTotalOutputComponents
= 4096,
791 .maxTessellationEvaluationInputComponents
= 128,
792 .maxTessellationEvaluationOutputComponents
= 128,
793 .maxGeometryShaderInvocations
= 32,
794 .maxGeometryInputComponents
= 64,
795 .maxGeometryOutputComponents
= 128,
796 .maxGeometryOutputVertices
= 256,
797 .maxGeometryTotalOutputComponents
= 1024,
798 .maxFragmentInputComponents
= 124,
799 .maxFragmentOutputAttachments
= 8,
800 .maxFragmentDualSrcAttachments
= 1,
801 .maxFragmentCombinedOutputResources
= 8,
802 .maxComputeSharedMemorySize
= 32768,
803 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
804 .maxComputeWorkGroupInvocations
= 2048,
805 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
806 .subPixelPrecisionBits
= 8,
807 .subTexelPrecisionBits
= 4 /* FIXME */,
808 .mipmapPrecisionBits
= 4 /* FIXME */,
809 .maxDrawIndexedIndexValue
= UINT32_MAX
,
810 .maxDrawIndirectCount
= UINT32_MAX
,
811 .maxSamplerLodBias
= 16,
812 .maxSamplerAnisotropy
= 16,
813 .maxViewports
= MAX_VIEWPORTS
,
814 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
815 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
816 .viewportSubPixelBits
= 8,
817 .minMemoryMapAlignment
= 4096, /* A page */
818 .minTexelBufferOffsetAlignment
= 64,
819 .minUniformBufferOffsetAlignment
= 64,
820 .minStorageBufferOffsetAlignment
= 64,
821 .minTexelOffset
= -32,
822 .maxTexelOffset
= 31,
823 .minTexelGatherOffset
= -32,
824 .maxTexelGatherOffset
= 31,
825 .minInterpolationOffset
= -2,
826 .maxInterpolationOffset
= 2,
827 .subPixelInterpolationOffsetBits
= 8,
828 .maxFramebufferWidth
= (1 << 14),
829 .maxFramebufferHeight
= (1 << 14),
830 .maxFramebufferLayers
= (1 << 10),
831 .framebufferColorSampleCounts
= sample_counts
,
832 .framebufferDepthSampleCounts
= sample_counts
,
833 .framebufferStencilSampleCounts
= sample_counts
,
834 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
835 .maxColorAttachments
= MAX_RTS
,
836 .sampledImageColorSampleCounts
= sample_counts
,
837 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
838 .sampledImageDepthSampleCounts
= sample_counts
,
839 .sampledImageStencilSampleCounts
= sample_counts
,
840 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
841 .maxSampleMaskWords
= 1,
842 .timestampComputeAndGraphics
= true,
843 .timestampPeriod
= 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
844 .maxClipDistances
= 8,
845 .maxCullDistances
= 8,
846 .maxCombinedClipAndCullDistances
= 8,
847 .discreteQueuePriorities
= 1,
848 .pointSizeRange
= { 0.125, 255.875 },
849 .lineWidthRange
= { 0.0, 7.9921875 },
850 .pointSizeGranularity
= (1.0 / 8.0),
851 .lineWidthGranularity
= (1.0 / 128.0),
852 .strictLines
= false, /* FINISHME */
853 .standardSampleLocations
= true,
854 .optimalBufferCopyOffsetAlignment
= 128,
855 .optimalBufferCopyRowPitchAlignment
= 128,
856 .nonCoherentAtomSize
= 64,
859 *pProperties
= (VkPhysicalDeviceProperties
) {
860 .apiVersion
= tu_physical_device_api_version(pdevice
),
861 .driverVersion
= vk_get_driver_version(),
862 .vendorID
= 0, /* TODO */
864 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
866 .sparseProperties
= { 0 },
869 strcpy(pProperties
->deviceName
, pdevice
->name
);
870 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
874 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
875 VkPhysicalDeviceProperties2
*pProperties
)
877 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
878 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
880 vk_foreach_struct(ext
, pProperties
->pNext
)
882 switch (ext
->sType
) {
883 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
884 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
885 (VkPhysicalDevicePushDescriptorPropertiesKHR
*) ext
;
886 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
889 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES
: {
890 VkPhysicalDeviceIDProperties
*properties
=
891 (VkPhysicalDeviceIDProperties
*) ext
;
892 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
893 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
894 properties
->deviceLUIDValid
= false;
897 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES
: {
898 VkPhysicalDeviceMultiviewProperties
*properties
=
899 (VkPhysicalDeviceMultiviewProperties
*) ext
;
900 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
901 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
904 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES
: {
905 VkPhysicalDevicePointClippingProperties
*properties
=
906 (VkPhysicalDevicePointClippingProperties
*) ext
;
907 properties
->pointClippingBehavior
=
908 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES
;
911 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
912 VkPhysicalDeviceMaintenance3Properties
*properties
=
913 (VkPhysicalDeviceMaintenance3Properties
*) ext
;
914 /* Make sure everything is addressable by a signed 32-bit int, and
915 * our largest descriptors are 96 bytes. */
916 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
917 /* Our buffer size fields allow only this much */
918 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
921 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT
: {
922 VkPhysicalDeviceTransformFeedbackPropertiesEXT
*properties
=
923 (VkPhysicalDeviceTransformFeedbackPropertiesEXT
*)ext
;
925 properties
->maxTransformFeedbackStreams
= IR3_MAX_SO_STREAMS
;
926 properties
->maxTransformFeedbackBuffers
= IR3_MAX_SO_BUFFERS
;
927 properties
->maxTransformFeedbackBufferSize
= UINT32_MAX
;
928 properties
->maxTransformFeedbackStreamDataSize
= 512;
929 properties
->maxTransformFeedbackBufferDataSize
= 512;
930 properties
->maxTransformFeedbackBufferDataStride
= 512;
931 properties
->transformFeedbackQueries
= true;
932 properties
->transformFeedbackStreamsLinesTriangles
= false;
933 properties
->transformFeedbackRasterizationStreamSelect
= false;
934 properties
->transformFeedbackDraw
= true;
937 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT
: {
938 VkPhysicalDeviceSampleLocationsPropertiesEXT
*properties
=
939 (VkPhysicalDeviceSampleLocationsPropertiesEXT
*)ext
;
940 properties
->sampleLocationSampleCounts
= 0;
941 if (pdevice
->supported_extensions
.EXT_sample_locations
) {
942 properties
->sampleLocationSampleCounts
=
943 VK_SAMPLE_COUNT_1_BIT
| VK_SAMPLE_COUNT_2_BIT
| VK_SAMPLE_COUNT_4_BIT
;
945 properties
->maxSampleLocationGridSize
= (VkExtent2D
) { 1 , 1 };
946 properties
->sampleLocationCoordinateRange
[0] = 0.0f
;
947 properties
->sampleLocationCoordinateRange
[1] = 0.9375f
;
948 properties
->sampleLocationSubPixelBits
= 4;
949 properties
->variableSampleLocations
= true;
952 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES
: {
953 VkPhysicalDeviceSamplerFilterMinmaxProperties
*properties
=
954 (VkPhysicalDeviceSamplerFilterMinmaxProperties
*)ext
;
955 properties
->filterMinmaxImageComponentMapping
= true;
956 properties
->filterMinmaxSingleComponentFormats
= true;
966 static const VkQueueFamilyProperties tu_queue_family_properties
= {
968 VK_QUEUE_GRAPHICS_BIT
| VK_QUEUE_COMPUTE_BIT
| VK_QUEUE_TRANSFER_BIT
,
970 .timestampValidBits
= 48,
971 .minImageTransferGranularity
= { 1, 1, 1 },
975 tu_GetPhysicalDeviceQueueFamilyProperties(
976 VkPhysicalDevice physicalDevice
,
977 uint32_t *pQueueFamilyPropertyCount
,
978 VkQueueFamilyProperties
*pQueueFamilyProperties
)
980 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
982 vk_outarray_append(&out
, p
) { *p
= tu_queue_family_properties
; }
986 tu_GetPhysicalDeviceQueueFamilyProperties2(
987 VkPhysicalDevice physicalDevice
,
988 uint32_t *pQueueFamilyPropertyCount
,
989 VkQueueFamilyProperties2
*pQueueFamilyProperties
)
991 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
993 vk_outarray_append(&out
, p
)
995 p
->queueFamilyProperties
= tu_queue_family_properties
;
1000 tu_get_system_heap_size()
1002 struct sysinfo info
;
1005 uint64_t total_ram
= (uint64_t) info
.totalram
* (uint64_t) info
.mem_unit
;
1007 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1008 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1010 uint64_t available_ram
;
1011 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
1012 available_ram
= total_ram
/ 2;
1014 available_ram
= total_ram
* 3 / 4;
1016 return available_ram
;
1020 tu_GetPhysicalDeviceMemoryProperties(
1021 VkPhysicalDevice physicalDevice
,
1022 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
1024 pMemoryProperties
->memoryHeapCount
= 1;
1025 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
1026 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
1028 pMemoryProperties
->memoryTypeCount
= 1;
1029 pMemoryProperties
->memoryTypes
[0].propertyFlags
=
1030 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
1031 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
1032 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
1033 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
1037 tu_GetPhysicalDeviceMemoryProperties2(
1038 VkPhysicalDevice physicalDevice
,
1039 VkPhysicalDeviceMemoryProperties2
*pMemoryProperties
)
1041 return tu_GetPhysicalDeviceMemoryProperties(
1042 physicalDevice
, &pMemoryProperties
->memoryProperties
);
1046 tu_queue_init(struct tu_device
*device
,
1047 struct tu_queue
*queue
,
1048 uint32_t queue_family_index
,
1050 VkDeviceQueueCreateFlags flags
)
1052 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1053 queue
->device
= device
;
1054 queue
->queue_family_index
= queue_family_index
;
1055 queue
->queue_idx
= idx
;
1056 queue
->flags
= flags
;
1058 int ret
= tu_drm_submitqueue_new(device
, 0, &queue
->msm_queue_id
);
1060 return VK_ERROR_INITIALIZATION_FAILED
;
1062 tu_fence_init(&queue
->submit_fence
, false);
1068 tu_queue_finish(struct tu_queue
*queue
)
1070 tu_fence_finish(&queue
->submit_fence
);
1071 tu_drm_submitqueue_close(queue
->device
, queue
->msm_queue_id
);
1075 tu_get_device_extension_index(const char *name
)
1077 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
1078 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
1084 struct PACKED bcolor_entry
{
1096 uint32_t z24
; /* also s8? */
1097 uint16_t srgb
[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1099 } border_color
[] = {
1100 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = {},
1101 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = {},
1102 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = {
1103 .fp32
[3] = 0x3f800000,
1111 .rgb10a2
= 0xc0000000,
1114 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = {
1118 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = {
1119 .fp32
[0 ... 3] = 0x3f800000,
1120 .ui16
[0 ... 3] = 0xffff,
1121 .si16
[0 ... 3] = 0x7fff,
1122 .fp16
[0 ... 3] = 0x3c00,
1126 .ui8
[0 ... 3] = 0xff,
1127 .si8
[0 ... 3] = 0x7f,
1128 .rgb10a2
= 0xffffffff,
1130 .srgb
[0 ... 3] = 0x3c00,
1132 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = {
1140 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
1141 const VkDeviceCreateInfo
*pCreateInfo
,
1142 const VkAllocationCallbacks
*pAllocator
,
1145 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
1147 struct tu_device
*device
;
1149 /* Check enabled features */
1150 if (pCreateInfo
->pEnabledFeatures
) {
1151 VkPhysicalDeviceFeatures supported_features
;
1152 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
1153 VkBool32
*supported_feature
= (VkBool32
*) &supported_features
;
1154 VkBool32
*enabled_feature
= (VkBool32
*) pCreateInfo
->pEnabledFeatures
;
1155 unsigned num_features
=
1156 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
1157 for (uint32_t i
= 0; i
< num_features
; i
++) {
1158 if (enabled_feature
[i
] && !supported_feature
[i
])
1159 return vk_error(physical_device
->instance
,
1160 VK_ERROR_FEATURE_NOT_PRESENT
);
1164 device
= vk_zalloc2(&physical_device
->instance
->alloc
, pAllocator
,
1165 sizeof(*device
), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1167 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1169 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1170 device
->instance
= physical_device
->instance
;
1171 device
->physical_device
= physical_device
;
1174 device
->alloc
= *pAllocator
;
1176 device
->alloc
= physical_device
->instance
->alloc
;
1178 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
1179 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
1180 int index
= tu_get_device_extension_index(ext_name
);
1182 !physical_device
->supported_extensions
.extensions
[index
]) {
1183 vk_free(&device
->alloc
, device
);
1184 return vk_error(physical_device
->instance
,
1185 VK_ERROR_EXTENSION_NOT_PRESENT
);
1188 device
->enabled_extensions
.extensions
[index
] = true;
1191 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
1192 const VkDeviceQueueCreateInfo
*queue_create
=
1193 &pCreateInfo
->pQueueCreateInfos
[i
];
1194 uint32_t qfi
= queue_create
->queueFamilyIndex
;
1195 device
->queues
[qfi
] = vk_alloc(
1196 &device
->alloc
, queue_create
->queueCount
* sizeof(struct tu_queue
),
1197 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1198 if (!device
->queues
[qfi
]) {
1199 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1203 memset(device
->queues
[qfi
], 0,
1204 queue_create
->queueCount
* sizeof(struct tu_queue
));
1206 device
->queue_count
[qfi
] = queue_create
->queueCount
;
1208 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
1209 result
= tu_queue_init(device
, &device
->queues
[qfi
][q
], qfi
, q
,
1210 queue_create
->flags
);
1211 if (result
!= VK_SUCCESS
)
1216 device
->compiler
= ir3_compiler_create(NULL
, physical_device
->gpu_id
);
1217 if (!device
->compiler
)
1220 #define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1221 #define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
1223 device
->vsc_draw_strm_pitch
= 0x440 * 4;
1224 device
->vsc_prim_strm_pitch
= 0x1040 * 4;
1226 result
= tu_bo_init_new(device
, &device
->vsc_draw_strm
, VSC_DRAW_STRM_SIZE(device
->vsc_draw_strm_pitch
));
1227 if (result
!= VK_SUCCESS
)
1230 result
= tu_bo_init_new(device
, &device
->vsc_prim_strm
, VSC_PRIM_STRM_SIZE(device
->vsc_prim_strm_pitch
));
1231 if (result
!= VK_SUCCESS
)
1232 goto fail_vsc_data2
;
1234 STATIC_ASSERT(sizeof(struct bcolor_entry
) == 128);
1235 result
= tu_bo_init_new(device
, &device
->border_color
, sizeof(border_color
));
1236 if (result
!= VK_SUCCESS
)
1237 goto fail_border_color
;
1239 result
= tu_bo_map(device
, &device
->border_color
);
1240 if (result
!= VK_SUCCESS
)
1241 goto fail_border_color_map
;
1243 memcpy(device
->border_color
.map
, border_color
, sizeof(border_color
));
1245 VkPipelineCacheCreateInfo ci
;
1246 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1249 ci
.pInitialData
= NULL
;
1250 ci
.initialDataSize
= 0;
1253 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1254 if (result
!= VK_SUCCESS
)
1255 goto fail_pipeline_cache
;
1257 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1259 *pDevice
= tu_device_to_handle(device
);
1262 fail_pipeline_cache
:
1263 fail_border_color_map
:
1264 tu_bo_finish(device
, &device
->border_color
);
1267 tu_bo_finish(device
, &device
->vsc_prim_strm
);
1270 tu_bo_finish(device
, &device
->vsc_draw_strm
);
1273 ralloc_free(device
->compiler
);
1276 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1277 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1278 tu_queue_finish(&device
->queues
[i
][q
]);
1279 if (device
->queue_count
[i
])
1280 vk_free(&device
->alloc
, device
->queues
[i
]);
1283 vk_free(&device
->alloc
, device
);
1288 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1290 TU_FROM_HANDLE(tu_device
, device
, _device
);
1295 tu_bo_finish(device
, &device
->vsc_draw_strm
);
1296 tu_bo_finish(device
, &device
->vsc_prim_strm
);
1298 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1299 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1300 tu_queue_finish(&device
->queues
[i
][q
]);
1301 if (device
->queue_count
[i
])
1302 vk_free(&device
->alloc
, device
->queues
[i
]);
1305 /* the compiler does not use pAllocator */
1306 ralloc_free(device
->compiler
);
1308 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1309 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1311 vk_free(&device
->alloc
, device
);
1315 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1316 VkLayerProperties
*pProperties
)
1318 *pPropertyCount
= 0;
1323 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1324 uint32_t *pPropertyCount
,
1325 VkLayerProperties
*pProperties
)
1327 *pPropertyCount
= 0;
1332 tu_GetDeviceQueue2(VkDevice _device
,
1333 const VkDeviceQueueInfo2
*pQueueInfo
,
1336 TU_FROM_HANDLE(tu_device
, device
, _device
);
1337 struct tu_queue
*queue
;
1340 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1341 if (pQueueInfo
->flags
!= queue
->flags
) {
1342 /* From the Vulkan 1.1.70 spec:
1344 * "The queue returned by vkGetDeviceQueue2 must have the same
1345 * flags value from this structure as that used at device
1346 * creation time in a VkDeviceQueueCreateInfo instance. If no
1347 * matching flags were specified at device creation time then
1348 * pQueue will return VK_NULL_HANDLE."
1350 *pQueue
= VK_NULL_HANDLE
;
1354 *pQueue
= tu_queue_to_handle(queue
);
1358 tu_GetDeviceQueue(VkDevice _device
,
1359 uint32_t queueFamilyIndex
,
1360 uint32_t queueIndex
,
1363 const VkDeviceQueueInfo2 info
=
1364 (VkDeviceQueueInfo2
) { .sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1365 .queueFamilyIndex
= queueFamilyIndex
,
1366 .queueIndex
= queueIndex
};
1368 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1372 tu_QueueSubmit(VkQueue _queue
,
1373 uint32_t submitCount
,
1374 const VkSubmitInfo
*pSubmits
,
1377 TU_FROM_HANDLE(tu_queue
, queue
, _queue
);
1379 for (uint32_t i
= 0; i
< submitCount
; ++i
) {
1380 const VkSubmitInfo
*submit
= pSubmits
+ i
;
1381 const bool last_submit
= (i
== submitCount
- 1);
1382 struct tu_bo_list bo_list
;
1383 tu_bo_list_init(&bo_list
);
1385 uint32_t entry_count
= 0;
1386 for (uint32_t j
= 0; j
< submit
->commandBufferCount
; ++j
) {
1387 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, submit
->pCommandBuffers
[j
]);
1388 entry_count
+= cmdbuf
->cs
.entry_count
;
1391 struct drm_msm_gem_submit_cmd cmds
[entry_count
];
1392 uint32_t entry_idx
= 0;
1393 for (uint32_t j
= 0; j
< submit
->commandBufferCount
; ++j
) {
1394 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, submit
->pCommandBuffers
[j
]);
1395 struct tu_cs
*cs
= &cmdbuf
->cs
;
1396 for (unsigned i
= 0; i
< cs
->entry_count
; ++i
, ++entry_idx
) {
1397 cmds
[entry_idx
].type
= MSM_SUBMIT_CMD_BUF
;
1398 cmds
[entry_idx
].submit_idx
=
1399 tu_bo_list_add(&bo_list
, cs
->entries
[i
].bo
,
1400 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1401 cmds
[entry_idx
].submit_offset
= cs
->entries
[i
].offset
;
1402 cmds
[entry_idx
].size
= cs
->entries
[i
].size
;
1403 cmds
[entry_idx
].pad
= 0;
1404 cmds
[entry_idx
].nr_relocs
= 0;
1405 cmds
[entry_idx
].relocs
= 0;
1408 tu_bo_list_merge(&bo_list
, &cmdbuf
->bo_list
);
1411 uint32_t flags
= MSM_PIPE_3D0
;
1413 flags
|= MSM_SUBMIT_FENCE_FD_OUT
;
1416 struct drm_msm_gem_submit req
= {
1418 .queueid
= queue
->msm_queue_id
,
1419 .bos
= (uint64_t)(uintptr_t) bo_list
.bo_infos
,
1420 .nr_bos
= bo_list
.count
,
1421 .cmds
= (uint64_t)(uintptr_t)cmds
,
1422 .nr_cmds
= entry_count
,
1425 int ret
= drmCommandWriteRead(queue
->device
->physical_device
->local_fd
,
1429 fprintf(stderr
, "submit failed: %s\n", strerror(errno
));
1433 tu_bo_list_destroy(&bo_list
);
1436 /* no need to merge fences as queue execution is serialized */
1437 tu_fence_update_fd(&queue
->submit_fence
, req
.fence_fd
);
1441 if (_fence
!= VK_NULL_HANDLE
) {
1442 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1443 tu_fence_copy(fence
, &queue
->submit_fence
);
1450 tu_QueueWaitIdle(VkQueue _queue
)
1452 TU_FROM_HANDLE(tu_queue
, queue
, _queue
);
1454 tu_fence_wait_idle(&queue
->submit_fence
);
1460 tu_DeviceWaitIdle(VkDevice _device
)
1462 TU_FROM_HANDLE(tu_device
, device
, _device
);
1464 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1465 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1466 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1473 tu_ImportSemaphoreFdKHR(VkDevice _device
,
1474 const VkImportSemaphoreFdInfoKHR
*pImportSemaphoreFdInfo
)
1482 tu_GetSemaphoreFdKHR(VkDevice _device
,
1483 const VkSemaphoreGetFdInfoKHR
*pGetFdInfo
,
1492 tu_ImportFenceFdKHR(VkDevice _device
,
1493 const VkImportFenceFdInfoKHR
*pImportFenceFdInfo
)
1501 tu_GetFenceFdKHR(VkDevice _device
,
1502 const VkFenceGetFdInfoKHR
*pGetFdInfo
,
1511 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1512 uint32_t *pPropertyCount
,
1513 VkExtensionProperties
*pProperties
)
1515 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1517 /* We spport no lyaers */
1519 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1521 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1522 if (tu_supported_instance_extensions
.extensions
[i
]) {
1523 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1527 return vk_outarray_status(&out
);
1531 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1532 const char *pLayerName
,
1533 uint32_t *pPropertyCount
,
1534 VkExtensionProperties
*pProperties
)
1536 /* We spport no lyaers */
1537 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1538 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1540 /* We spport no lyaers */
1542 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1544 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1545 if (device
->supported_extensions
.extensions
[i
]) {
1546 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1550 return vk_outarray_status(&out
);
1554 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1556 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1558 return tu_lookup_entrypoint_checked(
1559 pName
, instance
? instance
->api_version
: 0,
1560 instance
? &instance
->enabled_extensions
: NULL
, NULL
);
1563 /* The loader wants us to expose a second GetInstanceProcAddr function
1564 * to work around certain LD_PRELOAD issues seen in apps.
1567 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1568 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1571 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1572 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1574 return tu_GetInstanceProcAddr(instance
, pName
);
1578 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1580 TU_FROM_HANDLE(tu_device
, device
, _device
);
1582 return tu_lookup_entrypoint_checked(pName
, device
->instance
->api_version
,
1583 &device
->instance
->enabled_extensions
,
1584 &device
->enabled_extensions
);
1588 tu_alloc_memory(struct tu_device
*device
,
1589 const VkMemoryAllocateInfo
*pAllocateInfo
,
1590 const VkAllocationCallbacks
*pAllocator
,
1591 VkDeviceMemory
*pMem
)
1593 struct tu_device_memory
*mem
;
1596 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1598 if (pAllocateInfo
->allocationSize
== 0) {
1599 /* Apparently, this is allowed */
1600 *pMem
= VK_NULL_HANDLE
;
1604 mem
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*mem
), 8,
1605 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1607 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1609 const VkImportMemoryFdInfoKHR
*fd_info
=
1610 vk_find_struct_const(pAllocateInfo
->pNext
, IMPORT_MEMORY_FD_INFO_KHR
);
1611 if (fd_info
&& !fd_info
->handleType
)
1615 assert(fd_info
->handleType
==
1616 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT
||
1617 fd_info
->handleType
==
1618 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
);
1621 * TODO Importing the same fd twice gives us the same handle without
1622 * reference counting. We need to maintain a per-instance handle-to-bo
1623 * table and add reference count to tu_bo.
1625 result
= tu_bo_init_dmabuf(device
, &mem
->bo
,
1626 pAllocateInfo
->allocationSize
, fd_info
->fd
);
1627 if (result
== VK_SUCCESS
) {
1628 /* take ownership and close the fd */
1633 tu_bo_init_new(device
, &mem
->bo
, pAllocateInfo
->allocationSize
);
1636 if (result
!= VK_SUCCESS
) {
1637 vk_free2(&device
->alloc
, pAllocator
, mem
);
1641 mem
->size
= pAllocateInfo
->allocationSize
;
1642 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1645 mem
->user_ptr
= NULL
;
1647 *pMem
= tu_device_memory_to_handle(mem
);
1653 tu_AllocateMemory(VkDevice _device
,
1654 const VkMemoryAllocateInfo
*pAllocateInfo
,
1655 const VkAllocationCallbacks
*pAllocator
,
1656 VkDeviceMemory
*pMem
)
1658 TU_FROM_HANDLE(tu_device
, device
, _device
);
1659 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1663 tu_FreeMemory(VkDevice _device
,
1664 VkDeviceMemory _mem
,
1665 const VkAllocationCallbacks
*pAllocator
)
1667 TU_FROM_HANDLE(tu_device
, device
, _device
);
1668 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1673 tu_bo_finish(device
, &mem
->bo
);
1674 vk_free2(&device
->alloc
, pAllocator
, mem
);
1678 tu_MapMemory(VkDevice _device
,
1679 VkDeviceMemory _memory
,
1680 VkDeviceSize offset
,
1682 VkMemoryMapFlags flags
,
1685 TU_FROM_HANDLE(tu_device
, device
, _device
);
1686 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1694 if (mem
->user_ptr
) {
1695 *ppData
= mem
->user_ptr
;
1696 } else if (!mem
->map
) {
1697 result
= tu_bo_map(device
, &mem
->bo
);
1698 if (result
!= VK_SUCCESS
)
1700 *ppData
= mem
->map
= mem
->bo
.map
;
1709 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1713 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1715 /* I do not see any unmapping done by the freedreno Gallium driver. */
1719 tu_FlushMappedMemoryRanges(VkDevice _device
,
1720 uint32_t memoryRangeCount
,
1721 const VkMappedMemoryRange
*pMemoryRanges
)
1727 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1728 uint32_t memoryRangeCount
,
1729 const VkMappedMemoryRange
*pMemoryRanges
)
1735 tu_GetBufferMemoryRequirements(VkDevice _device
,
1737 VkMemoryRequirements
*pMemoryRequirements
)
1739 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1741 pMemoryRequirements
->memoryTypeBits
= 1;
1742 pMemoryRequirements
->alignment
= 64;
1743 pMemoryRequirements
->size
=
1744 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1748 tu_GetBufferMemoryRequirements2(
1750 const VkBufferMemoryRequirementsInfo2
*pInfo
,
1751 VkMemoryRequirements2
*pMemoryRequirements
)
1753 tu_GetBufferMemoryRequirements(device
, pInfo
->buffer
,
1754 &pMemoryRequirements
->memoryRequirements
);
1758 tu_GetImageMemoryRequirements(VkDevice _device
,
1760 VkMemoryRequirements
*pMemoryRequirements
)
1762 TU_FROM_HANDLE(tu_image
, image
, _image
);
1764 pMemoryRequirements
->memoryTypeBits
= 1;
1765 pMemoryRequirements
->size
= image
->layout
.size
;
1766 pMemoryRequirements
->alignment
= image
->layout
.base_align
;
1770 tu_GetImageMemoryRequirements2(VkDevice device
,
1771 const VkImageMemoryRequirementsInfo2
*pInfo
,
1772 VkMemoryRequirements2
*pMemoryRequirements
)
1774 tu_GetImageMemoryRequirements(device
, pInfo
->image
,
1775 &pMemoryRequirements
->memoryRequirements
);
1779 tu_GetImageSparseMemoryRequirements(
1782 uint32_t *pSparseMemoryRequirementCount
,
1783 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1789 tu_GetImageSparseMemoryRequirements2(
1791 const VkImageSparseMemoryRequirementsInfo2
*pInfo
,
1792 uint32_t *pSparseMemoryRequirementCount
,
1793 VkSparseImageMemoryRequirements2
*pSparseMemoryRequirements
)
1799 tu_GetDeviceMemoryCommitment(VkDevice device
,
1800 VkDeviceMemory memory
,
1801 VkDeviceSize
*pCommittedMemoryInBytes
)
1803 *pCommittedMemoryInBytes
= 0;
1807 tu_BindBufferMemory2(VkDevice device
,
1808 uint32_t bindInfoCount
,
1809 const VkBindBufferMemoryInfo
*pBindInfos
)
1811 for (uint32_t i
= 0; i
< bindInfoCount
; ++i
) {
1812 TU_FROM_HANDLE(tu_device_memory
, mem
, pBindInfos
[i
].memory
);
1813 TU_FROM_HANDLE(tu_buffer
, buffer
, pBindInfos
[i
].buffer
);
1816 buffer
->bo
= &mem
->bo
;
1817 buffer
->bo_offset
= pBindInfos
[i
].memoryOffset
;
1826 tu_BindBufferMemory(VkDevice device
,
1828 VkDeviceMemory memory
,
1829 VkDeviceSize memoryOffset
)
1831 const VkBindBufferMemoryInfo info
= {
1832 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO
,
1835 .memoryOffset
= memoryOffset
1838 return tu_BindBufferMemory2(device
, 1, &info
);
1842 tu_BindImageMemory2(VkDevice device
,
1843 uint32_t bindInfoCount
,
1844 const VkBindImageMemoryInfo
*pBindInfos
)
1846 for (uint32_t i
= 0; i
< bindInfoCount
; ++i
) {
1847 TU_FROM_HANDLE(tu_image
, image
, pBindInfos
[i
].image
);
1848 TU_FROM_HANDLE(tu_device_memory
, mem
, pBindInfos
[i
].memory
);
1851 image
->bo
= &mem
->bo
;
1852 image
->bo_offset
= pBindInfos
[i
].memoryOffset
;
1855 image
->bo_offset
= 0;
1863 tu_BindImageMemory(VkDevice device
,
1865 VkDeviceMemory memory
,
1866 VkDeviceSize memoryOffset
)
1868 const VkBindImageMemoryInfo info
= {
1869 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO
,
1872 .memoryOffset
= memoryOffset
1875 return tu_BindImageMemory2(device
, 1, &info
);
1879 tu_QueueBindSparse(VkQueue _queue
,
1880 uint32_t bindInfoCount
,
1881 const VkBindSparseInfo
*pBindInfo
,
1887 // Queue semaphore functions
1890 tu_CreateSemaphore(VkDevice _device
,
1891 const VkSemaphoreCreateInfo
*pCreateInfo
,
1892 const VkAllocationCallbacks
*pAllocator
,
1893 VkSemaphore
*pSemaphore
)
1895 TU_FROM_HANDLE(tu_device
, device
, _device
);
1897 struct tu_semaphore
*sem
=
1898 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*sem
), 8,
1899 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1901 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1903 *pSemaphore
= tu_semaphore_to_handle(sem
);
1908 tu_DestroySemaphore(VkDevice _device
,
1909 VkSemaphore _semaphore
,
1910 const VkAllocationCallbacks
*pAllocator
)
1912 TU_FROM_HANDLE(tu_device
, device
, _device
);
1913 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
1917 vk_free2(&device
->alloc
, pAllocator
, sem
);
1921 tu_CreateEvent(VkDevice _device
,
1922 const VkEventCreateInfo
*pCreateInfo
,
1923 const VkAllocationCallbacks
*pAllocator
,
1926 TU_FROM_HANDLE(tu_device
, device
, _device
);
1927 struct tu_event
*event
=
1928 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*event
), 8,
1929 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1932 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1934 VkResult result
= tu_bo_init_new(device
, &event
->bo
, 0x1000);
1935 if (result
!= VK_SUCCESS
)
1938 result
= tu_bo_map(device
, &event
->bo
);
1939 if (result
!= VK_SUCCESS
)
1942 *pEvent
= tu_event_to_handle(event
);
1947 tu_bo_finish(device
, &event
->bo
);
1949 vk_free2(&device
->alloc
, pAllocator
, event
);
1950 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1954 tu_DestroyEvent(VkDevice _device
,
1956 const VkAllocationCallbacks
*pAllocator
)
1958 TU_FROM_HANDLE(tu_device
, device
, _device
);
1959 TU_FROM_HANDLE(tu_event
, event
, _event
);
1964 tu_bo_finish(device
, &event
->bo
);
1965 vk_free2(&device
->alloc
, pAllocator
, event
);
1969 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
1971 TU_FROM_HANDLE(tu_event
, event
, _event
);
1973 if (*(uint64_t*) event
->bo
.map
== 1)
1974 return VK_EVENT_SET
;
1975 return VK_EVENT_RESET
;
1979 tu_SetEvent(VkDevice _device
, VkEvent _event
)
1981 TU_FROM_HANDLE(tu_event
, event
, _event
);
1982 *(uint64_t*) event
->bo
.map
= 1;
1988 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
1990 TU_FROM_HANDLE(tu_event
, event
, _event
);
1991 *(uint64_t*) event
->bo
.map
= 0;
1997 tu_CreateBuffer(VkDevice _device
,
1998 const VkBufferCreateInfo
*pCreateInfo
,
1999 const VkAllocationCallbacks
*pAllocator
,
2002 TU_FROM_HANDLE(tu_device
, device
, _device
);
2003 struct tu_buffer
*buffer
;
2005 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
2007 buffer
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*buffer
), 8,
2008 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2010 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2012 buffer
->size
= pCreateInfo
->size
;
2013 buffer
->usage
= pCreateInfo
->usage
;
2014 buffer
->flags
= pCreateInfo
->flags
;
2016 *pBuffer
= tu_buffer_to_handle(buffer
);
2022 tu_DestroyBuffer(VkDevice _device
,
2024 const VkAllocationCallbacks
*pAllocator
)
2026 TU_FROM_HANDLE(tu_device
, device
, _device
);
2027 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
2032 vk_free2(&device
->alloc
, pAllocator
, buffer
);
2036 tu_CreateFramebuffer(VkDevice _device
,
2037 const VkFramebufferCreateInfo
*pCreateInfo
,
2038 const VkAllocationCallbacks
*pAllocator
,
2039 VkFramebuffer
*pFramebuffer
)
2041 TU_FROM_HANDLE(tu_device
, device
, _device
);
2042 struct tu_framebuffer
*framebuffer
;
2044 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
2046 size_t size
= sizeof(*framebuffer
) + sizeof(struct tu_attachment_info
) *
2047 pCreateInfo
->attachmentCount
;
2048 framebuffer
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
2049 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2050 if (framebuffer
== NULL
)
2051 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2053 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
2054 framebuffer
->width
= pCreateInfo
->width
;
2055 framebuffer
->height
= pCreateInfo
->height
;
2056 framebuffer
->layers
= pCreateInfo
->layers
;
2057 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2058 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
2059 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
2060 framebuffer
->attachments
[i
].attachment
= iview
;
2063 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
2068 tu_DestroyFramebuffer(VkDevice _device
,
2070 const VkAllocationCallbacks
*pAllocator
)
2072 TU_FROM_HANDLE(tu_device
, device
, _device
);
2073 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
2077 vk_free2(&device
->alloc
, pAllocator
, fb
);
2080 static enum a6xx_tex_clamp
2081 tu6_tex_wrap(VkSamplerAddressMode address_mode
)
2083 switch (address_mode
) {
2084 case VK_SAMPLER_ADDRESS_MODE_REPEAT
:
2085 return A6XX_TEX_REPEAT
;
2086 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT
:
2087 return A6XX_TEX_MIRROR_REPEAT
;
2088 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE
:
2089 return A6XX_TEX_CLAMP_TO_EDGE
;
2090 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER
:
2091 return A6XX_TEX_CLAMP_TO_BORDER
;
2092 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
:
2093 /* only works for PoT.. need to emulate otherwise! */
2094 return A6XX_TEX_MIRROR_CLAMP
;
2096 unreachable("illegal tex wrap mode");
2101 static enum a6xx_tex_filter
2102 tu6_tex_filter(VkFilter filter
, unsigned aniso
)
2105 case VK_FILTER_NEAREST
:
2106 return A6XX_TEX_NEAREST
;
2107 case VK_FILTER_LINEAR
:
2108 return aniso
? A6XX_TEX_ANISO
: A6XX_TEX_LINEAR
;
2109 case VK_FILTER_CUBIC_EXT
:
2110 return A6XX_TEX_CUBIC
;
2112 unreachable("illegal texture filter");
2117 static inline enum adreno_compare_func
2118 tu6_compare_func(VkCompareOp op
)
2120 return (enum adreno_compare_func
) op
;
2124 tu_init_sampler(struct tu_device
*device
,
2125 struct tu_sampler
*sampler
,
2126 const VkSamplerCreateInfo
*pCreateInfo
)
2128 const struct VkSamplerReductionModeCreateInfo
*reduction
=
2129 vk_find_struct_const(pCreateInfo
->pNext
, SAMPLER_REDUCTION_MODE_CREATE_INFO
);
2131 unsigned aniso
= pCreateInfo
->anisotropyEnable
?
2132 util_last_bit(MIN2((uint32_t)pCreateInfo
->maxAnisotropy
>> 1, 8)) : 0;
2133 bool miplinear
= (pCreateInfo
->mipmapMode
== VK_SAMPLER_MIPMAP_MODE_LINEAR
);
2135 sampler
->descriptor
[0] =
2136 COND(miplinear
, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR
) |
2137 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo
->magFilter
, aniso
)) |
2138 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo
->minFilter
, aniso
)) |
2139 A6XX_TEX_SAMP_0_ANISO(aniso
) |
2140 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo
->addressModeU
)) |
2141 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo
->addressModeV
)) |
2142 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo
->addressModeW
)) |
2143 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo
->mipLodBias
);
2144 sampler
->descriptor
[1] =
2145 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2146 COND(pCreateInfo
->unnormalizedCoordinates
, A6XX_TEX_SAMP_1_UNNORM_COORDS
) |
2147 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo
->minLod
) |
2148 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo
->maxLod
) |
2149 COND(pCreateInfo
->compareEnable
,
2150 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo
->compareOp
)));
2151 /* This is an offset into the border_color BO, which we fill with all the
2152 * possible Vulkan border colors in the correct order, so we can just use
2153 * the Vulkan enum with no translation necessary.
2155 sampler
->descriptor
[2] =
2156 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo
->borderColor
*
2157 sizeof(struct bcolor_entry
));
2158 sampler
->descriptor
[3] = 0;
2161 /* note: vulkan enum matches hw */
2162 sampler
->descriptor
[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(reduction
->reductionMode
);
2166 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2171 tu_CreateSampler(VkDevice _device
,
2172 const VkSamplerCreateInfo
*pCreateInfo
,
2173 const VkAllocationCallbacks
*pAllocator
,
2174 VkSampler
*pSampler
)
2176 TU_FROM_HANDLE(tu_device
, device
, _device
);
2177 struct tu_sampler
*sampler
;
2179 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
2181 sampler
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*sampler
), 8,
2182 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2184 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2186 tu_init_sampler(device
, sampler
, pCreateInfo
);
2187 *pSampler
= tu_sampler_to_handle(sampler
);
2193 tu_DestroySampler(VkDevice _device
,
2195 const VkAllocationCallbacks
*pAllocator
)
2197 TU_FROM_HANDLE(tu_device
, device
, _device
);
2198 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
2202 vk_free2(&device
->alloc
, pAllocator
, sampler
);
2205 /* vk_icd.h does not declare this function, so we declare it here to
2206 * suppress Wmissing-prototypes.
2208 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2209 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
2211 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2212 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
2214 /* For the full details on loader interface versioning, see
2215 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2216 * What follows is a condensed summary, to help you navigate the large and
2217 * confusing official doc.
2219 * - Loader interface v0 is incompatible with later versions. We don't
2222 * - In loader interface v1:
2223 * - The first ICD entrypoint called by the loader is
2224 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2226 * - The ICD must statically expose no other Vulkan symbol unless it
2227 * is linked with -Bsymbolic.
2228 * - Each dispatchable Vulkan handle created by the ICD must be
2229 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2230 * ICD must initialize VK_LOADER_DATA.loadMagic to
2232 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2233 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2234 * such loader-managed surfaces.
2236 * - Loader interface v2 differs from v1 in:
2237 * - The first ICD entrypoint called by the loader is
2238 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2239 * statically expose this entrypoint.
2241 * - Loader interface v3 differs from v2 in:
2242 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2243 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2244 * because the loader no longer does so.
2246 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
2251 tu_GetMemoryFdKHR(VkDevice _device
,
2252 const VkMemoryGetFdInfoKHR
*pGetFdInfo
,
2255 TU_FROM_HANDLE(tu_device
, device
, _device
);
2256 TU_FROM_HANDLE(tu_device_memory
, memory
, pGetFdInfo
->memory
);
2258 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR
);
2260 /* At the moment, we support only the below handle types. */
2261 assert(pGetFdInfo
->handleType
==
2262 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT
||
2263 pGetFdInfo
->handleType
==
2264 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
);
2266 int prime_fd
= tu_bo_export_dmabuf(device
, &memory
->bo
);
2268 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
2275 tu_GetMemoryFdPropertiesKHR(VkDevice _device
,
2276 VkExternalMemoryHandleTypeFlagBits handleType
,
2278 VkMemoryFdPropertiesKHR
*pMemoryFdProperties
)
2280 assert(handleType
== VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
);
2281 pMemoryFdProperties
->memoryTypeBits
= 1;
2286 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2287 VkPhysicalDevice physicalDevice
,
2288 const VkPhysicalDeviceExternalSemaphoreInfo
*pExternalSemaphoreInfo
,
2289 VkExternalSemaphoreProperties
*pExternalSemaphoreProperties
)
2291 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
2292 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
2293 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
2297 tu_GetPhysicalDeviceExternalFenceProperties(
2298 VkPhysicalDevice physicalDevice
,
2299 const VkPhysicalDeviceExternalFenceInfo
*pExternalFenceInfo
,
2300 VkExternalFenceProperties
*pExternalFenceProperties
)
2302 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
2303 pExternalFenceProperties
->compatibleHandleTypes
= 0;
2304 pExternalFenceProperties
->externalFenceFeatures
= 0;
2308 tu_CreateDebugReportCallbackEXT(
2309 VkInstance _instance
,
2310 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
2311 const VkAllocationCallbacks
*pAllocator
,
2312 VkDebugReportCallbackEXT
*pCallback
)
2314 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
2315 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
2316 pCreateInfo
, pAllocator
,
2317 &instance
->alloc
, pCallback
);
2321 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
2322 VkDebugReportCallbackEXT _callback
,
2323 const VkAllocationCallbacks
*pAllocator
)
2325 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
2326 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
2327 _callback
, pAllocator
, &instance
->alloc
);
2331 tu_DebugReportMessageEXT(VkInstance _instance
,
2332 VkDebugReportFlagsEXT flags
,
2333 VkDebugReportObjectTypeEXT objectType
,
2336 int32_t messageCode
,
2337 const char *pLayerPrefix
,
2338 const char *pMessage
)
2340 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
2341 vk_debug_report(&instance
->debug_report_callbacks
, flags
, objectType
,
2342 object
, location
, messageCode
, pLayerPrefix
, pMessage
);
2346 tu_GetDeviceGroupPeerMemoryFeatures(
2349 uint32_t localDeviceIndex
,
2350 uint32_t remoteDeviceIndex
,
2351 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
2353 assert(localDeviceIndex
== remoteDeviceIndex
);
2355 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
2356 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
2357 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
2358 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;
2361 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2362 VkPhysicalDevice physicalDevice
,
2363 VkSampleCountFlagBits samples
,
2364 VkMultisamplePropertiesEXT
* pMultisampleProperties
)
2366 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
2368 if (samples
<= VK_SAMPLE_COUNT_4_BIT
&& pdevice
->supported_extensions
.EXT_sample_locations
)
2369 pMultisampleProperties
->maxSampleLocationGridSize
= (VkExtent2D
){ 1, 1 };
2371 pMultisampleProperties
->maxSampleLocationGridSize
= (VkExtent2D
){ 0, 0 };