2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
35 #include <sys/sysinfo.h>
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
46 #include "drm-uapi/msm_drm.h"
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
52 tu_semaphore_remove_temp(struct tu_device
*device
,
53 struct tu_semaphore
*sem
);
56 tu_device_get_cache_uuid(uint16_t family
, void *uuid
)
58 uint32_t mesa_timestamp
;
60 memset(uuid
, 0, VK_UUID_SIZE
);
61 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid
,
65 memcpy(uuid
, &mesa_timestamp
, 4);
66 memcpy((char *) uuid
+ 4, &f
, 2);
67 snprintf((char *) uuid
+ 6, VK_UUID_SIZE
- 10, "tu");
72 tu_bo_init(struct tu_device
*dev
,
77 uint64_t iova
= tu_gem_info_iova(dev
, gem_handle
);
79 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
81 *bo
= (struct tu_bo
) {
82 .gem_handle
= gem_handle
,
91 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
93 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
94 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
96 uint32_t gem_handle
= tu_gem_new(dev
, size
, MSM_BO_WC
);
98 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
100 VkResult result
= tu_bo_init(dev
, bo
, gem_handle
, size
);
101 if (result
!= VK_SUCCESS
) {
102 tu_gem_close(dev
, gem_handle
);
103 return vk_error(dev
->instance
, result
);
110 tu_bo_init_dmabuf(struct tu_device
*dev
,
115 uint32_t gem_handle
= tu_gem_import_dmabuf(dev
, fd
, size
);
117 return vk_error(dev
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
119 VkResult result
= tu_bo_init(dev
, bo
, gem_handle
, size
);
120 if (result
!= VK_SUCCESS
) {
121 tu_gem_close(dev
, gem_handle
);
122 return vk_error(dev
->instance
, result
);
129 tu_bo_export_dmabuf(struct tu_device
*dev
, struct tu_bo
*bo
)
131 return tu_gem_export_dmabuf(dev
, bo
->gem_handle
);
135 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
140 uint64_t offset
= tu_gem_info_offset(dev
, bo
->gem_handle
);
142 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
144 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
145 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
146 dev
->physical_device
->local_fd
, offset
);
147 if (map
== MAP_FAILED
)
148 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
155 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
157 assert(bo
->gem_handle
);
160 munmap(bo
->map
, bo
->size
);
162 tu_gem_close(dev
, bo
->gem_handle
);
166 tu_physical_device_init(struct tu_physical_device
*device
,
167 struct tu_instance
*instance
,
168 drmDevicePtr drm_device
)
170 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
171 VkResult result
= VK_SUCCESS
;
172 drmVersionPtr version
;
176 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
178 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
179 "failed to open device %s", path
);
182 /* Version 1.3 added MSM_INFO_IOVA. */
183 const int min_version_major
= 1;
184 const int min_version_minor
= 3;
186 version
= drmGetVersion(fd
);
189 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
190 "failed to query kernel driver version for device %s",
194 if (strcmp(version
->name
, "msm")) {
195 drmFreeVersion(version
);
197 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
198 "device %s does not use the msm kernel driver", path
);
201 if (version
->version_major
!= min_version_major
||
202 version
->version_minor
< min_version_minor
) {
203 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
204 "kernel driver for device %s has version %d.%d, "
205 "but Vulkan requires version >= %d.%d",
206 path
, version
->version_major
, version
->version_minor
,
207 min_version_major
, min_version_minor
);
208 drmFreeVersion(version
);
213 device
->msm_major_version
= version
->version_major
;
214 device
->msm_minor_version
= version
->version_minor
;
216 drmFreeVersion(version
);
218 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
219 tu_logi("Found compatible device '%s'.", path
);
221 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
222 device
->instance
= instance
;
223 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
224 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
226 if (instance
->enabled_extensions
.KHR_display
) {
228 open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
229 if (master_fd
>= 0) {
230 /* TODO: free master_fd is accel is not working? */
234 device
->master_fd
= master_fd
;
235 device
->local_fd
= fd
;
237 if (tu_drm_get_gpu_id(device
, &device
->gpu_id
)) {
238 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
239 tu_logi("Could not query the GPU ID");
240 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
241 "could not get GPU ID");
245 if (tu_drm_get_gmem_size(device
, &device
->gmem_size
)) {
246 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
247 tu_logi("Could not query the GMEM size");
248 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
249 "could not get GMEM size");
253 if (tu_drm_get_gmem_base(device
, &device
->gmem_base
)) {
254 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
255 tu_logi("Could not query the GMEM size");
256 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
257 "could not get GMEM size");
261 memset(device
->name
, 0, sizeof(device
->name
));
262 sprintf(device
->name
, "FD%d", device
->gpu_id
);
264 switch (device
->gpu_id
) {
266 device
->ccu_offset_gmem
= 0x7c000; /* 0x7e000 in some cases? */
267 device
->ccu_offset_bypass
= 0x10000;
268 device
->tile_align_w
= 64;
269 device
->magic
.PC_UNKNOWN_9805
= 0x0;
270 device
->magic
.SP_UNKNOWN_A0F8
= 0x0;
274 device
->ccu_offset_gmem
= 0xf8000;
275 device
->ccu_offset_bypass
= 0x20000;
276 device
->tile_align_w
= 64;
277 device
->magic
.PC_UNKNOWN_9805
= 0x1;
278 device
->magic
.SP_UNKNOWN_A0F8
= 0x1;
281 device
->ccu_offset_gmem
= 0x114000;
282 device
->ccu_offset_bypass
= 0x30000;
283 device
->tile_align_w
= 96;
284 device
->magic
.PC_UNKNOWN_9805
= 0x2;
285 device
->magic
.SP_UNKNOWN_A0F8
= 0x2;
288 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
289 "device %s is unsupported", device
->name
);
292 if (tu_device_get_cache_uuid(device
->gpu_id
, device
->cache_uuid
)) {
293 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
294 "cannot generate UUID");
298 /* The gpu id is already embedded in the uuid so we just pass "tu"
299 * when creating the cache.
301 char buf
[VK_UUID_SIZE
* 2 + 1];
302 disk_cache_format_hex_id(buf
, device
->cache_uuid
, VK_UUID_SIZE
* 2);
303 device
->disk_cache
= disk_cache_create(device
->name
, buf
, 0);
305 fprintf(stderr
, "WARNING: tu is not a conformant vulkan implementation, "
306 "testing use only.\n");
308 fd_get_driver_uuid(device
->driver_uuid
);
309 fd_get_device_uuid(device
->device_uuid
, device
->gpu_id
);
311 tu_physical_device_get_supported_extensions(device
, &device
->supported_extensions
);
313 if (result
!= VK_SUCCESS
) {
314 vk_error(instance
, result
);
318 result
= tu_wsi_init(device
);
319 if (result
!= VK_SUCCESS
) {
320 vk_error(instance
, result
);
334 tu_physical_device_finish(struct tu_physical_device
*device
)
336 tu_wsi_finish(device
);
338 disk_cache_destroy(device
->disk_cache
);
339 close(device
->local_fd
);
340 if (device
->master_fd
!= -1)
341 close(device
->master_fd
);
344 static VKAPI_ATTR
void *
345 default_alloc_func(void *pUserData
,
348 VkSystemAllocationScope allocationScope
)
353 static VKAPI_ATTR
void *
354 default_realloc_func(void *pUserData
,
358 VkSystemAllocationScope allocationScope
)
360 return realloc(pOriginal
, size
);
363 static VKAPI_ATTR
void
364 default_free_func(void *pUserData
, void *pMemory
)
369 static const VkAllocationCallbacks default_alloc
= {
371 .pfnAllocation
= default_alloc_func
,
372 .pfnReallocation
= default_realloc_func
,
373 .pfnFree
= default_free_func
,
376 static const struct debug_control tu_debug_options
[] = {
377 { "startup", TU_DEBUG_STARTUP
},
378 { "nir", TU_DEBUG_NIR
},
379 { "ir3", TU_DEBUG_IR3
},
380 { "nobin", TU_DEBUG_NOBIN
},
381 { "sysmem", TU_DEBUG_SYSMEM
},
382 { "forcebin", TU_DEBUG_FORCEBIN
},
383 { "noubwc", TU_DEBUG_NOUBWC
},
388 tu_get_debug_option_name(int id
)
390 assert(id
< ARRAY_SIZE(tu_debug_options
) - 1);
391 return tu_debug_options
[id
].string
;
395 tu_get_instance_extension_index(const char *name
)
397 for (unsigned i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; ++i
) {
398 if (strcmp(name
, tu_instance_extensions
[i
].extensionName
) == 0)
405 tu_CreateInstance(const VkInstanceCreateInfo
*pCreateInfo
,
406 const VkAllocationCallbacks
*pAllocator
,
407 VkInstance
*pInstance
)
409 struct tu_instance
*instance
;
412 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
414 uint32_t client_version
;
415 if (pCreateInfo
->pApplicationInfo
&&
416 pCreateInfo
->pApplicationInfo
->apiVersion
!= 0) {
417 client_version
= pCreateInfo
->pApplicationInfo
->apiVersion
;
419 tu_EnumerateInstanceVersion(&client_version
);
422 instance
= vk_zalloc2(&default_alloc
, pAllocator
, sizeof(*instance
), 8,
423 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
425 return vk_error(NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
);
427 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
430 instance
->alloc
= *pAllocator
;
432 instance
->alloc
= default_alloc
;
434 instance
->api_version
= client_version
;
435 instance
->physical_device_count
= -1;
437 instance
->debug_flags
=
438 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options
);
440 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
441 tu_logi("Created an instance");
443 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
444 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
445 int index
= tu_get_instance_extension_index(ext_name
);
447 if (index
< 0 || !tu_instance_extensions_supported
.extensions
[index
]) {
448 vk_free2(&default_alloc
, pAllocator
, instance
);
449 return vk_error(instance
, VK_ERROR_EXTENSION_NOT_PRESENT
);
452 instance
->enabled_extensions
.extensions
[index
] = true;
455 result
= vk_debug_report_instance_init(&instance
->debug_report_callbacks
);
456 if (result
!= VK_SUCCESS
) {
457 vk_free2(&default_alloc
, pAllocator
, instance
);
458 return vk_error(instance
, result
);
461 glsl_type_singleton_init_or_ref();
463 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
465 *pInstance
= tu_instance_to_handle(instance
);
471 tu_DestroyInstance(VkInstance _instance
,
472 const VkAllocationCallbacks
*pAllocator
)
474 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
479 for (int i
= 0; i
< instance
->physical_device_count
; ++i
) {
480 tu_physical_device_finish(instance
->physical_devices
+ i
);
483 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
485 glsl_type_singleton_decref();
487 vk_debug_report_instance_destroy(&instance
->debug_report_callbacks
);
489 vk_free(&instance
->alloc
, instance
);
493 tu_enumerate_devices(struct tu_instance
*instance
)
495 /* TODO: Check for more devices ? */
496 drmDevicePtr devices
[8];
497 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
500 instance
->physical_device_count
= 0;
502 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
504 if (instance
->debug_flags
& TU_DEBUG_STARTUP
) {
506 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices
));
508 tu_logi("Found %d drm nodes", max_devices
);
512 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
514 for (unsigned i
= 0; i
< (unsigned) max_devices
; i
++) {
515 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
516 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
518 result
= tu_physical_device_init(
519 instance
->physical_devices
+ instance
->physical_device_count
,
520 instance
, devices
[i
]);
521 if (result
== VK_SUCCESS
)
522 ++instance
->physical_device_count
;
523 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
527 drmFreeDevices(devices
, max_devices
);
533 tu_EnumeratePhysicalDevices(VkInstance _instance
,
534 uint32_t *pPhysicalDeviceCount
,
535 VkPhysicalDevice
*pPhysicalDevices
)
537 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
538 VK_OUTARRAY_MAKE(out
, pPhysicalDevices
, pPhysicalDeviceCount
);
542 if (instance
->physical_device_count
< 0) {
543 result
= tu_enumerate_devices(instance
);
544 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
548 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
549 vk_outarray_append(&out
, p
)
551 *p
= tu_physical_device_to_handle(instance
->physical_devices
+ i
);
555 return vk_outarray_status(&out
);
559 tu_EnumeratePhysicalDeviceGroups(
560 VkInstance _instance
,
561 uint32_t *pPhysicalDeviceGroupCount
,
562 VkPhysicalDeviceGroupProperties
*pPhysicalDeviceGroupProperties
)
564 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
565 VK_OUTARRAY_MAKE(out
, pPhysicalDeviceGroupProperties
,
566 pPhysicalDeviceGroupCount
);
569 if (instance
->physical_device_count
< 0) {
570 result
= tu_enumerate_devices(instance
);
571 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
575 for (uint32_t i
= 0; i
< instance
->physical_device_count
; ++i
) {
576 vk_outarray_append(&out
, p
)
578 p
->physicalDeviceCount
= 1;
579 p
->physicalDevices
[0] =
580 tu_physical_device_to_handle(instance
->physical_devices
+ i
);
581 p
->subsetAllocation
= false;
585 return vk_outarray_status(&out
);
589 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice
,
590 VkPhysicalDeviceFeatures
*pFeatures
)
592 memset(pFeatures
, 0, sizeof(*pFeatures
));
594 *pFeatures
= (VkPhysicalDeviceFeatures
) {
595 .robustBufferAccess
= true,
596 .fullDrawIndexUint32
= true,
597 .imageCubeArray
= true,
598 .independentBlend
= true,
599 .geometryShader
= true,
600 .tessellationShader
= true,
601 .sampleRateShading
= true,
602 .dualSrcBlend
= true,
604 .multiDrawIndirect
= true,
605 .drawIndirectFirstInstance
= true,
607 .depthBiasClamp
= true,
608 .fillModeNonSolid
= true,
613 .multiViewport
= false,
614 .samplerAnisotropy
= true,
615 .textureCompressionETC2
= true,
616 .textureCompressionASTC_LDR
= true,
617 .textureCompressionBC
= true,
618 .occlusionQueryPrecise
= true,
619 .pipelineStatisticsQuery
= false,
620 .vertexPipelineStoresAndAtomics
= false,
621 .fragmentStoresAndAtomics
= false,
622 .shaderTessellationAndGeometryPointSize
= false,
623 .shaderImageGatherExtended
= false,
624 .shaderStorageImageExtendedFormats
= false,
625 .shaderStorageImageMultisample
= false,
626 .shaderUniformBufferArrayDynamicIndexing
= false,
627 .shaderSampledImageArrayDynamicIndexing
= false,
628 .shaderStorageBufferArrayDynamicIndexing
= false,
629 .shaderStorageImageArrayDynamicIndexing
= false,
630 .shaderStorageImageReadWithoutFormat
= false,
631 .shaderStorageImageWriteWithoutFormat
= false,
632 .shaderClipDistance
= false,
633 .shaderCullDistance
= false,
634 .shaderFloat64
= false,
635 .shaderInt64
= false,
636 .shaderInt16
= false,
637 .sparseBinding
= false,
638 .variableMultisampleRate
= false,
639 .inheritedQueries
= false,
644 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice
,
645 VkPhysicalDeviceFeatures2
*pFeatures
)
647 vk_foreach_struct(ext
, pFeatures
->pNext
)
649 switch (ext
->sType
) {
650 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES
: {
651 VkPhysicalDeviceVulkan11Features
*features
= (void *) ext
;
652 features
->storageBuffer16BitAccess
= false;
653 features
->uniformAndStorageBuffer16BitAccess
= false;
654 features
->storagePushConstant16
= false;
655 features
->storageInputOutput16
= false;
656 features
->multiview
= false;
657 features
->multiviewGeometryShader
= false;
658 features
->multiviewTessellationShader
= false;
659 features
->variablePointersStorageBuffer
= false;
660 features
->variablePointers
= false;
661 features
->protectedMemory
= false;
662 features
->samplerYcbcrConversion
= true;
663 features
->shaderDrawParameters
= true;
666 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES
: {
667 VkPhysicalDeviceVariablePointersFeatures
*features
= (void *) ext
;
668 features
->variablePointersStorageBuffer
= false;
669 features
->variablePointers
= false;
672 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES
: {
673 VkPhysicalDeviceMultiviewFeatures
*features
=
674 (VkPhysicalDeviceMultiviewFeatures
*) ext
;
675 features
->multiview
= false;
676 features
->multiviewGeometryShader
= false;
677 features
->multiviewTessellationShader
= false;
680 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES
: {
681 VkPhysicalDeviceShaderDrawParametersFeatures
*features
=
682 (VkPhysicalDeviceShaderDrawParametersFeatures
*) ext
;
683 features
->shaderDrawParameters
= true;
686 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES
: {
687 VkPhysicalDeviceProtectedMemoryFeatures
*features
=
688 (VkPhysicalDeviceProtectedMemoryFeatures
*) ext
;
689 features
->protectedMemory
= false;
692 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES
: {
693 VkPhysicalDevice16BitStorageFeatures
*features
=
694 (VkPhysicalDevice16BitStorageFeatures
*) ext
;
695 features
->storageBuffer16BitAccess
= false;
696 features
->uniformAndStorageBuffer16BitAccess
= false;
697 features
->storagePushConstant16
= false;
698 features
->storageInputOutput16
= false;
701 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES
: {
702 VkPhysicalDeviceSamplerYcbcrConversionFeatures
*features
=
703 (VkPhysicalDeviceSamplerYcbcrConversionFeatures
*) ext
;
704 features
->samplerYcbcrConversion
= true;
707 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT
: {
708 VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*features
=
709 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT
*) ext
;
710 features
->shaderInputAttachmentArrayDynamicIndexing
= false;
711 features
->shaderUniformTexelBufferArrayDynamicIndexing
= false;
712 features
->shaderStorageTexelBufferArrayDynamicIndexing
= false;
713 features
->shaderUniformBufferArrayNonUniformIndexing
= false;
714 features
->shaderSampledImageArrayNonUniformIndexing
= false;
715 features
->shaderStorageBufferArrayNonUniformIndexing
= false;
716 features
->shaderStorageImageArrayNonUniformIndexing
= false;
717 features
->shaderInputAttachmentArrayNonUniformIndexing
= false;
718 features
->shaderUniformTexelBufferArrayNonUniformIndexing
= false;
719 features
->shaderStorageTexelBufferArrayNonUniformIndexing
= false;
720 features
->descriptorBindingUniformBufferUpdateAfterBind
= false;
721 features
->descriptorBindingSampledImageUpdateAfterBind
= false;
722 features
->descriptorBindingStorageImageUpdateAfterBind
= false;
723 features
->descriptorBindingStorageBufferUpdateAfterBind
= false;
724 features
->descriptorBindingUniformTexelBufferUpdateAfterBind
= false;
725 features
->descriptorBindingStorageTexelBufferUpdateAfterBind
= false;
726 features
->descriptorBindingUpdateUnusedWhilePending
= false;
727 features
->descriptorBindingPartiallyBound
= false;
728 features
->descriptorBindingVariableDescriptorCount
= false;
729 features
->runtimeDescriptorArray
= false;
732 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT
: {
733 VkPhysicalDeviceConditionalRenderingFeaturesEXT
*features
=
734 (VkPhysicalDeviceConditionalRenderingFeaturesEXT
*) ext
;
735 features
->conditionalRendering
= false;
736 features
->inheritedConditionalRendering
= false;
739 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT
: {
740 VkPhysicalDeviceTransformFeedbackFeaturesEXT
*features
=
741 (VkPhysicalDeviceTransformFeedbackFeaturesEXT
*) ext
;
742 features
->transformFeedback
= true;
743 features
->geometryStreams
= false;
746 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT
: {
747 VkPhysicalDeviceIndexTypeUint8FeaturesEXT
*features
=
748 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT
*)ext
;
749 features
->indexTypeUint8
= true;
752 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT
: {
753 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT
*features
=
754 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT
*)ext
;
755 features
->vertexAttributeInstanceRateDivisor
= true;
756 features
->vertexAttributeInstanceRateZeroDivisor
= true;
763 return tu_GetPhysicalDeviceFeatures(physicalDevice
, &pFeatures
->features
);
767 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice
,
768 VkPhysicalDeviceProperties
*pProperties
)
770 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
771 VkSampleCountFlags sample_counts
=
772 VK_SAMPLE_COUNT_1_BIT
| VK_SAMPLE_COUNT_2_BIT
| VK_SAMPLE_COUNT_4_BIT
;
774 /* I have no idea what the maximum size is, but the hardware supports very
775 * large numbers of descriptors (at least 2^16). This limit is based on
776 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
777 * we don't have to think about what to do if that overflows, but really
778 * nothing is likely to get close to this.
780 const size_t max_descriptor_set_size
= (1 << 28) / A6XX_TEX_CONST_DWORDS
;
782 VkPhysicalDeviceLimits limits
= {
783 .maxImageDimension1D
= (1 << 14),
784 .maxImageDimension2D
= (1 << 14),
785 .maxImageDimension3D
= (1 << 11),
786 .maxImageDimensionCube
= (1 << 14),
787 .maxImageArrayLayers
= (1 << 11),
788 .maxTexelBufferElements
= 128 * 1024 * 1024,
789 .maxUniformBufferRange
= MAX_UNIFORM_BUFFER_RANGE
,
790 .maxStorageBufferRange
= MAX_STORAGE_BUFFER_RANGE
,
791 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
792 .maxMemoryAllocationCount
= UINT32_MAX
,
793 .maxSamplerAllocationCount
= 64 * 1024,
794 .bufferImageGranularity
= 64, /* A cache line */
795 .sparseAddressSpaceSize
= 0xffffffffu
, /* buffer max size */
796 .maxBoundDescriptorSets
= MAX_SETS
,
797 .maxPerStageDescriptorSamplers
= max_descriptor_set_size
,
798 .maxPerStageDescriptorUniformBuffers
= max_descriptor_set_size
,
799 .maxPerStageDescriptorStorageBuffers
= max_descriptor_set_size
,
800 .maxPerStageDescriptorSampledImages
= max_descriptor_set_size
,
801 .maxPerStageDescriptorStorageImages
= max_descriptor_set_size
,
802 .maxPerStageDescriptorInputAttachments
= MAX_RTS
,
803 .maxPerStageResources
= max_descriptor_set_size
,
804 .maxDescriptorSetSamplers
= max_descriptor_set_size
,
805 .maxDescriptorSetUniformBuffers
= max_descriptor_set_size
,
806 .maxDescriptorSetUniformBuffersDynamic
= MAX_DYNAMIC_UNIFORM_BUFFERS
,
807 .maxDescriptorSetStorageBuffers
= max_descriptor_set_size
,
808 .maxDescriptorSetStorageBuffersDynamic
= MAX_DYNAMIC_STORAGE_BUFFERS
,
809 .maxDescriptorSetSampledImages
= max_descriptor_set_size
,
810 .maxDescriptorSetStorageImages
= max_descriptor_set_size
,
811 .maxDescriptorSetInputAttachments
= MAX_RTS
,
812 .maxVertexInputAttributes
= 32,
813 .maxVertexInputBindings
= 32,
814 .maxVertexInputAttributeOffset
= 4095,
815 .maxVertexInputBindingStride
= 2048,
816 .maxVertexOutputComponents
= 128,
817 .maxTessellationGenerationLevel
= 64,
818 .maxTessellationPatchSize
= 32,
819 .maxTessellationControlPerVertexInputComponents
= 128,
820 .maxTessellationControlPerVertexOutputComponents
= 128,
821 .maxTessellationControlPerPatchOutputComponents
= 120,
822 .maxTessellationControlTotalOutputComponents
= 4096,
823 .maxTessellationEvaluationInputComponents
= 128,
824 .maxTessellationEvaluationOutputComponents
= 128,
825 .maxGeometryShaderInvocations
= 32,
826 .maxGeometryInputComponents
= 64,
827 .maxGeometryOutputComponents
= 128,
828 .maxGeometryOutputVertices
= 256,
829 .maxGeometryTotalOutputComponents
= 1024,
830 .maxFragmentInputComponents
= 124,
831 .maxFragmentOutputAttachments
= 8,
832 .maxFragmentDualSrcAttachments
= 1,
833 .maxFragmentCombinedOutputResources
= 8,
834 .maxComputeSharedMemorySize
= 32768,
835 .maxComputeWorkGroupCount
= { 65535, 65535, 65535 },
836 .maxComputeWorkGroupInvocations
= 2048,
837 .maxComputeWorkGroupSize
= { 2048, 2048, 2048 },
838 .subPixelPrecisionBits
= 8,
839 .subTexelPrecisionBits
= 8,
840 .mipmapPrecisionBits
= 8,
841 .maxDrawIndexedIndexValue
= UINT32_MAX
,
842 .maxDrawIndirectCount
= UINT32_MAX
,
843 .maxSamplerLodBias
= 4095.0 / 256.0, /* [-16, 15.99609375] */
844 .maxSamplerAnisotropy
= 16,
845 .maxViewports
= MAX_VIEWPORTS
,
846 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
847 .viewportBoundsRange
= { INT16_MIN
, INT16_MAX
},
848 .viewportSubPixelBits
= 8,
849 .minMemoryMapAlignment
= 4096, /* A page */
850 .minTexelBufferOffsetAlignment
= 64,
851 .minUniformBufferOffsetAlignment
= 64,
852 .minStorageBufferOffsetAlignment
= 64,
853 .minTexelOffset
= -16,
854 .maxTexelOffset
= 15,
855 .minTexelGatherOffset
= -32,
856 .maxTexelGatherOffset
= 31,
857 .minInterpolationOffset
= -0.5,
858 .maxInterpolationOffset
= 0.4375,
859 .subPixelInterpolationOffsetBits
= 4,
860 .maxFramebufferWidth
= (1 << 14),
861 .maxFramebufferHeight
= (1 << 14),
862 .maxFramebufferLayers
= (1 << 10),
863 .framebufferColorSampleCounts
= sample_counts
,
864 .framebufferDepthSampleCounts
= sample_counts
,
865 .framebufferStencilSampleCounts
= sample_counts
,
866 .framebufferNoAttachmentsSampleCounts
= sample_counts
,
867 .maxColorAttachments
= MAX_RTS
,
868 .sampledImageColorSampleCounts
= sample_counts
,
869 .sampledImageIntegerSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
870 .sampledImageDepthSampleCounts
= sample_counts
,
871 .sampledImageStencilSampleCounts
= sample_counts
,
872 .storageImageSampleCounts
= VK_SAMPLE_COUNT_1_BIT
,
873 .maxSampleMaskWords
= 1,
874 .timestampComputeAndGraphics
= true,
875 .timestampPeriod
= 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
876 .maxClipDistances
= 8,
877 .maxCullDistances
= 8,
878 .maxCombinedClipAndCullDistances
= 8,
879 .discreteQueuePriorities
= 1,
880 .pointSizeRange
= { 1, 4092 },
881 .lineWidthRange
= { 0.0, 7.9921875 },
882 .pointSizeGranularity
= 0.0625,
883 .lineWidthGranularity
= (1.0 / 128.0),
884 .strictLines
= false, /* FINISHME */
885 .standardSampleLocations
= true,
886 .optimalBufferCopyOffsetAlignment
= 128,
887 .optimalBufferCopyRowPitchAlignment
= 128,
888 .nonCoherentAtomSize
= 64,
891 *pProperties
= (VkPhysicalDeviceProperties
) {
892 .apiVersion
= tu_physical_device_api_version(pdevice
),
893 .driverVersion
= vk_get_driver_version(),
894 .vendorID
= 0, /* TODO */
896 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
898 .sparseProperties
= { 0 },
901 strcpy(pProperties
->deviceName
, pdevice
->name
);
902 memcpy(pProperties
->pipelineCacheUUID
, pdevice
->cache_uuid
, VK_UUID_SIZE
);
906 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice
,
907 VkPhysicalDeviceProperties2
*pProperties
)
909 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
910 tu_GetPhysicalDeviceProperties(physicalDevice
, &pProperties
->properties
);
912 vk_foreach_struct(ext
, pProperties
->pNext
)
914 switch (ext
->sType
) {
915 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR
: {
916 VkPhysicalDevicePushDescriptorPropertiesKHR
*properties
=
917 (VkPhysicalDevicePushDescriptorPropertiesKHR
*) ext
;
918 properties
->maxPushDescriptors
= MAX_PUSH_DESCRIPTORS
;
921 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES
: {
922 VkPhysicalDeviceIDProperties
*properties
=
923 (VkPhysicalDeviceIDProperties
*) ext
;
924 memcpy(properties
->driverUUID
, pdevice
->driver_uuid
, VK_UUID_SIZE
);
925 memcpy(properties
->deviceUUID
, pdevice
->device_uuid
, VK_UUID_SIZE
);
926 properties
->deviceLUIDValid
= false;
929 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES
: {
930 VkPhysicalDeviceMultiviewProperties
*properties
=
931 (VkPhysicalDeviceMultiviewProperties
*) ext
;
932 properties
->maxMultiviewViewCount
= MAX_VIEWS
;
933 properties
->maxMultiviewInstanceIndex
= INT_MAX
;
936 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES
: {
937 VkPhysicalDevicePointClippingProperties
*properties
=
938 (VkPhysicalDevicePointClippingProperties
*) ext
;
939 properties
->pointClippingBehavior
=
940 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES
;
943 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES
: {
944 VkPhysicalDeviceMaintenance3Properties
*properties
=
945 (VkPhysicalDeviceMaintenance3Properties
*) ext
;
946 /* Make sure everything is addressable by a signed 32-bit int, and
947 * our largest descriptors are 96 bytes. */
948 properties
->maxPerSetDescriptors
= (1ull << 31) / 96;
949 /* Our buffer size fields allow only this much */
950 properties
->maxMemoryAllocationSize
= 0xFFFFFFFFull
;
953 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT
: {
954 VkPhysicalDeviceTransformFeedbackPropertiesEXT
*properties
=
955 (VkPhysicalDeviceTransformFeedbackPropertiesEXT
*)ext
;
957 properties
->maxTransformFeedbackStreams
= IR3_MAX_SO_STREAMS
;
958 properties
->maxTransformFeedbackBuffers
= IR3_MAX_SO_BUFFERS
;
959 properties
->maxTransformFeedbackBufferSize
= UINT32_MAX
;
960 properties
->maxTransformFeedbackStreamDataSize
= 512;
961 properties
->maxTransformFeedbackBufferDataSize
= 512;
962 properties
->maxTransformFeedbackBufferDataStride
= 512;
963 properties
->transformFeedbackQueries
= true;
964 properties
->transformFeedbackStreamsLinesTriangles
= false;
965 properties
->transformFeedbackRasterizationStreamSelect
= false;
966 properties
->transformFeedbackDraw
= true;
969 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT
: {
970 VkPhysicalDeviceSampleLocationsPropertiesEXT
*properties
=
971 (VkPhysicalDeviceSampleLocationsPropertiesEXT
*)ext
;
972 properties
->sampleLocationSampleCounts
= 0;
973 if (pdevice
->supported_extensions
.EXT_sample_locations
) {
974 properties
->sampleLocationSampleCounts
=
975 VK_SAMPLE_COUNT_1_BIT
| VK_SAMPLE_COUNT_2_BIT
| VK_SAMPLE_COUNT_4_BIT
;
977 properties
->maxSampleLocationGridSize
= (VkExtent2D
) { 1 , 1 };
978 properties
->sampleLocationCoordinateRange
[0] = 0.0f
;
979 properties
->sampleLocationCoordinateRange
[1] = 0.9375f
;
980 properties
->sampleLocationSubPixelBits
= 4;
981 properties
->variableSampleLocations
= true;
984 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES
: {
985 VkPhysicalDeviceSamplerFilterMinmaxProperties
*properties
=
986 (VkPhysicalDeviceSamplerFilterMinmaxProperties
*)ext
;
987 properties
->filterMinmaxImageComponentMapping
= true;
988 properties
->filterMinmaxSingleComponentFormats
= true;
991 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES
: {
992 VkPhysicalDeviceSubgroupProperties
*properties
=
993 (VkPhysicalDeviceSubgroupProperties
*)ext
;
994 properties
->subgroupSize
= 64;
995 properties
->supportedStages
= VK_SHADER_STAGE_COMPUTE_BIT
;
996 properties
->supportedOperations
= VK_SUBGROUP_FEATURE_BASIC_BIT
|
997 VK_SUBGROUP_FEATURE_VOTE_BIT
;
998 properties
->quadOperationsInAllStages
= false;
1001 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT
: {
1002 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT
*props
=
1003 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT
*)ext
;
1004 props
->maxVertexAttribDivisor
= UINT32_MAX
;
1013 static const VkQueueFamilyProperties tu_queue_family_properties
= {
1015 VK_QUEUE_GRAPHICS_BIT
| VK_QUEUE_COMPUTE_BIT
| VK_QUEUE_TRANSFER_BIT
,
1017 .timestampValidBits
= 48,
1018 .minImageTransferGranularity
= { 1, 1, 1 },
1022 tu_GetPhysicalDeviceQueueFamilyProperties(
1023 VkPhysicalDevice physicalDevice
,
1024 uint32_t *pQueueFamilyPropertyCount
,
1025 VkQueueFamilyProperties
*pQueueFamilyProperties
)
1027 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
1029 vk_outarray_append(&out
, p
) { *p
= tu_queue_family_properties
; }
1033 tu_GetPhysicalDeviceQueueFamilyProperties2(
1034 VkPhysicalDevice physicalDevice
,
1035 uint32_t *pQueueFamilyPropertyCount
,
1036 VkQueueFamilyProperties2
*pQueueFamilyProperties
)
1038 VK_OUTARRAY_MAKE(out
, pQueueFamilyProperties
, pQueueFamilyPropertyCount
);
1040 vk_outarray_append(&out
, p
)
1042 p
->queueFamilyProperties
= tu_queue_family_properties
;
1047 tu_get_system_heap_size()
1049 struct sysinfo info
;
1052 uint64_t total_ram
= (uint64_t) info
.totalram
* (uint64_t) info
.mem_unit
;
1054 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1055 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1057 uint64_t available_ram
;
1058 if (total_ram
<= 4ull * 1024ull * 1024ull * 1024ull)
1059 available_ram
= total_ram
/ 2;
1061 available_ram
= total_ram
* 3 / 4;
1063 return available_ram
;
1067 tu_GetPhysicalDeviceMemoryProperties(
1068 VkPhysicalDevice physicalDevice
,
1069 VkPhysicalDeviceMemoryProperties
*pMemoryProperties
)
1071 pMemoryProperties
->memoryHeapCount
= 1;
1072 pMemoryProperties
->memoryHeaps
[0].size
= tu_get_system_heap_size();
1073 pMemoryProperties
->memoryHeaps
[0].flags
= VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
;
1075 pMemoryProperties
->memoryTypeCount
= 1;
1076 pMemoryProperties
->memoryTypes
[0].propertyFlags
=
1077 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
|
1078 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
1079 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
;
1080 pMemoryProperties
->memoryTypes
[0].heapIndex
= 0;
1084 tu_GetPhysicalDeviceMemoryProperties2(
1085 VkPhysicalDevice physicalDevice
,
1086 VkPhysicalDeviceMemoryProperties2
*pMemoryProperties
)
1088 return tu_GetPhysicalDeviceMemoryProperties(
1089 physicalDevice
, &pMemoryProperties
->memoryProperties
);
1093 tu_queue_init(struct tu_device
*device
,
1094 struct tu_queue
*queue
,
1095 uint32_t queue_family_index
,
1097 VkDeviceQueueCreateFlags flags
)
1099 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1100 queue
->device
= device
;
1101 queue
->queue_family_index
= queue_family_index
;
1102 queue
->queue_idx
= idx
;
1103 queue
->flags
= flags
;
1105 int ret
= tu_drm_submitqueue_new(device
, 0, &queue
->msm_queue_id
);
1107 return VK_ERROR_INITIALIZATION_FAILED
;
1109 tu_fence_init(&queue
->submit_fence
, false);
1115 tu_queue_finish(struct tu_queue
*queue
)
1117 tu_fence_finish(&queue
->submit_fence
);
1118 tu_drm_submitqueue_close(queue
->device
, queue
->msm_queue_id
);
1122 tu_get_device_extension_index(const char *name
)
1124 for (unsigned i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; ++i
) {
1125 if (strcmp(name
, tu_device_extensions
[i
].extensionName
) == 0)
1131 struct PACKED bcolor_entry
{
1143 uint32_t z24
; /* also s8? */
1144 uint16_t srgb
[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1146 } border_color
[] = {
1147 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = {},
1148 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = {},
1149 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = {
1150 .fp32
[3] = 0x3f800000,
1158 .rgb10a2
= 0xc0000000,
1161 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = {
1165 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = {
1166 .fp32
[0 ... 3] = 0x3f800000,
1167 .ui16
[0 ... 3] = 0xffff,
1168 .si16
[0 ... 3] = 0x7fff,
1169 .fp16
[0 ... 3] = 0x3c00,
1173 .ui8
[0 ... 3] = 0xff,
1174 .si8
[0 ... 3] = 0x7f,
1175 .rgb10a2
= 0xffffffff,
1177 .srgb
[0 ... 3] = 0x3c00,
1179 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = {
1186 tu_CreateDevice(VkPhysicalDevice physicalDevice
,
1187 const VkDeviceCreateInfo
*pCreateInfo
,
1188 const VkAllocationCallbacks
*pAllocator
,
1191 TU_FROM_HANDLE(tu_physical_device
, physical_device
, physicalDevice
);
1193 struct tu_device
*device
;
1195 /* Check enabled features */
1196 if (pCreateInfo
->pEnabledFeatures
) {
1197 VkPhysicalDeviceFeatures supported_features
;
1198 tu_GetPhysicalDeviceFeatures(physicalDevice
, &supported_features
);
1199 VkBool32
*supported_feature
= (VkBool32
*) &supported_features
;
1200 VkBool32
*enabled_feature
= (VkBool32
*) pCreateInfo
->pEnabledFeatures
;
1201 unsigned num_features
=
1202 sizeof(VkPhysicalDeviceFeatures
) / sizeof(VkBool32
);
1203 for (uint32_t i
= 0; i
< num_features
; i
++) {
1204 if (enabled_feature
[i
] && !supported_feature
[i
])
1205 return vk_error(physical_device
->instance
,
1206 VK_ERROR_FEATURE_NOT_PRESENT
);
1210 device
= vk_zalloc2(&physical_device
->instance
->alloc
, pAllocator
,
1211 sizeof(*device
), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1213 return vk_error(physical_device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1215 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
1216 device
->instance
= physical_device
->instance
;
1217 device
->physical_device
= physical_device
;
1218 device
->_lost
= false;
1221 device
->alloc
= *pAllocator
;
1223 device
->alloc
= physical_device
->instance
->alloc
;
1225 for (uint32_t i
= 0; i
< pCreateInfo
->enabledExtensionCount
; i
++) {
1226 const char *ext_name
= pCreateInfo
->ppEnabledExtensionNames
[i
];
1227 int index
= tu_get_device_extension_index(ext_name
);
1229 !physical_device
->supported_extensions
.extensions
[index
]) {
1230 vk_free(&device
->alloc
, device
);
1231 return vk_error(physical_device
->instance
,
1232 VK_ERROR_EXTENSION_NOT_PRESENT
);
1235 device
->enabled_extensions
.extensions
[index
] = true;
1238 for (unsigned i
= 0; i
< pCreateInfo
->queueCreateInfoCount
; i
++) {
1239 const VkDeviceQueueCreateInfo
*queue_create
=
1240 &pCreateInfo
->pQueueCreateInfos
[i
];
1241 uint32_t qfi
= queue_create
->queueFamilyIndex
;
1242 device
->queues
[qfi
] = vk_alloc(
1243 &device
->alloc
, queue_create
->queueCount
* sizeof(struct tu_queue
),
1244 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1245 if (!device
->queues
[qfi
]) {
1246 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1250 memset(device
->queues
[qfi
], 0,
1251 queue_create
->queueCount
* sizeof(struct tu_queue
));
1253 device
->queue_count
[qfi
] = queue_create
->queueCount
;
1255 for (unsigned q
= 0; q
< queue_create
->queueCount
; q
++) {
1256 result
= tu_queue_init(device
, &device
->queues
[qfi
][q
], qfi
, q
,
1257 queue_create
->flags
);
1258 if (result
!= VK_SUCCESS
)
1263 device
->compiler
= ir3_compiler_create(NULL
, physical_device
->gpu_id
);
1264 if (!device
->compiler
)
1267 /* initial sizes, these will increase if there is overflow */
1268 device
->vsc_draw_strm_pitch
= 0x1000 + VSC_PAD
;
1269 device
->vsc_prim_strm_pitch
= 0x4000 + VSC_PAD
;
1271 STATIC_ASSERT(sizeof(border_color
) == sizeof(((struct tu6_global
*) 0)->border_color
));
1272 result
= tu_bo_init_new(device
, &device
->global_bo
, sizeof(struct tu6_global
));
1273 if (result
!= VK_SUCCESS
)
1274 goto fail_global_bo
;
1276 result
= tu_bo_map(device
, &device
->global_bo
);
1277 if (result
!= VK_SUCCESS
)
1278 goto fail_global_bo_map
;
1280 memcpy(device
->global_bo
.map
+ gb_offset(border_color
), border_color
, sizeof(border_color
));
1282 VkPipelineCacheCreateInfo ci
;
1283 ci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
;
1286 ci
.pInitialData
= NULL
;
1287 ci
.initialDataSize
= 0;
1290 tu_CreatePipelineCache(tu_device_to_handle(device
), &ci
, NULL
, &pc
);
1291 if (result
!= VK_SUCCESS
)
1292 goto fail_pipeline_cache
;
1294 device
->mem_cache
= tu_pipeline_cache_from_handle(pc
);
1296 for (unsigned i
= 0; i
< ARRAY_SIZE(device
->scratch_bos
); i
++)
1297 mtx_init(&device
->scratch_bos
[i
].construct_mtx
, mtx_plain
);
1299 mtx_init(&device
->vsc_pitch_mtx
, mtx_plain
);
1301 *pDevice
= tu_device_to_handle(device
);
1304 fail_pipeline_cache
:
1306 tu_bo_finish(device
, &device
->global_bo
);
1309 ralloc_free(device
->compiler
);
1312 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1313 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1314 tu_queue_finish(&device
->queues
[i
][q
]);
1315 if (device
->queue_count
[i
])
1316 vk_free(&device
->alloc
, device
->queues
[i
]);
1319 vk_free(&device
->alloc
, device
);
1324 tu_DestroyDevice(VkDevice _device
, const VkAllocationCallbacks
*pAllocator
)
1326 TU_FROM_HANDLE(tu_device
, device
, _device
);
1331 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1332 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++)
1333 tu_queue_finish(&device
->queues
[i
][q
]);
1334 if (device
->queue_count
[i
])
1335 vk_free(&device
->alloc
, device
->queues
[i
]);
1338 for (unsigned i
= 0; i
< ARRAY_SIZE(device
->scratch_bos
); i
++) {
1339 if (device
->scratch_bos
[i
].initialized
)
1340 tu_bo_finish(device
, &device
->scratch_bos
[i
].bo
);
1343 ir3_compiler_destroy(device
->compiler
);
1345 VkPipelineCache pc
= tu_pipeline_cache_to_handle(device
->mem_cache
);
1346 tu_DestroyPipelineCache(tu_device_to_handle(device
), pc
, NULL
);
1348 vk_free(&device
->alloc
, device
);
1352 _tu_device_set_lost(struct tu_device
*device
,
1353 const char *file
, int line
,
1354 const char *msg
, ...)
1356 /* Set the flag indicating that waits should return in finite time even
1357 * after device loss.
1359 p_atomic_inc(&device
->_lost
);
1361 /* TODO: Report the log message through VkDebugReportCallbackEXT instead */
1362 fprintf(stderr
, "%s:%d: ", file
, line
);
1365 vfprintf(stderr
, msg
, ap
);
1368 if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
1371 return VK_ERROR_DEVICE_LOST
;
1375 tu_get_scratch_bo(struct tu_device
*dev
, uint64_t size
, struct tu_bo
**bo
)
1377 unsigned size_log2
= MAX2(util_logbase2_ceil64(size
), MIN_SCRATCH_BO_SIZE_LOG2
);
1378 unsigned index
= size_log2
- MIN_SCRATCH_BO_SIZE_LOG2
;
1379 assert(index
< ARRAY_SIZE(dev
->scratch_bos
));
1381 for (unsigned i
= index
; i
< ARRAY_SIZE(dev
->scratch_bos
); i
++) {
1382 if (p_atomic_read(&dev
->scratch_bos
[i
].initialized
)) {
1383 /* Fast path: just return the already-allocated BO. */
1384 *bo
= &dev
->scratch_bos
[i
].bo
;
1389 /* Slow path: actually allocate the BO. We take a lock because the process
1390 * of allocating it is slow, and we don't want to block the CPU while it
1393 mtx_lock(&dev
->scratch_bos
[index
].construct_mtx
);
1395 /* Another thread may have allocated it already while we were waiting on
1396 * the lock. We need to check this in order to avoid double-allocating.
1398 if (dev
->scratch_bos
[index
].initialized
) {
1399 mtx_unlock(&dev
->scratch_bos
[index
].construct_mtx
);
1400 *bo
= &dev
->scratch_bos
[index
].bo
;
1404 unsigned bo_size
= 1ull << size_log2
;
1405 VkResult result
= tu_bo_init_new(dev
, &dev
->scratch_bos
[index
].bo
, bo_size
);
1406 if (result
!= VK_SUCCESS
) {
1407 mtx_unlock(&dev
->scratch_bos
[index
].construct_mtx
);
1411 p_atomic_set(&dev
->scratch_bos
[index
].initialized
, true);
1413 mtx_unlock(&dev
->scratch_bos
[index
].construct_mtx
);
1415 *bo
= &dev
->scratch_bos
[index
].bo
;
1420 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount
,
1421 VkLayerProperties
*pProperties
)
1423 *pPropertyCount
= 0;
1428 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice
,
1429 uint32_t *pPropertyCount
,
1430 VkLayerProperties
*pProperties
)
1432 *pPropertyCount
= 0;
1437 tu_GetDeviceQueue2(VkDevice _device
,
1438 const VkDeviceQueueInfo2
*pQueueInfo
,
1441 TU_FROM_HANDLE(tu_device
, device
, _device
);
1442 struct tu_queue
*queue
;
1445 &device
->queues
[pQueueInfo
->queueFamilyIndex
][pQueueInfo
->queueIndex
];
1446 if (pQueueInfo
->flags
!= queue
->flags
) {
1447 /* From the Vulkan 1.1.70 spec:
1449 * "The queue returned by vkGetDeviceQueue2 must have the same
1450 * flags value from this structure as that used at device
1451 * creation time in a VkDeviceQueueCreateInfo instance. If no
1452 * matching flags were specified at device creation time then
1453 * pQueue will return VK_NULL_HANDLE."
1455 *pQueue
= VK_NULL_HANDLE
;
1459 *pQueue
= tu_queue_to_handle(queue
);
1463 tu_GetDeviceQueue(VkDevice _device
,
1464 uint32_t queueFamilyIndex
,
1465 uint32_t queueIndex
,
1468 const VkDeviceQueueInfo2 info
=
1469 (VkDeviceQueueInfo2
) { .sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
,
1470 .queueFamilyIndex
= queueFamilyIndex
,
1471 .queueIndex
= queueIndex
};
1473 tu_GetDeviceQueue2(_device
, &info
, pQueue
);
1477 tu_get_semaphore_syncobjs(const VkSemaphore
*sems
,
1480 struct drm_msm_gem_submit_syncobj
**out
,
1481 uint32_t *out_count
)
1483 uint32_t syncobj_count
= 0;
1484 struct drm_msm_gem_submit_syncobj
*syncobjs
;
1486 for (uint32_t i
= 0; i
< sem_count
; ++i
) {
1487 TU_FROM_HANDLE(tu_semaphore
, sem
, sems
[i
]);
1489 struct tu_semaphore_part
*part
=
1490 sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
?
1491 &sem
->temporary
: &sem
->permanent
;
1493 if (part
->kind
== TU_SEMAPHORE_SYNCOBJ
)
1498 *out_count
= syncobj_count
;
1502 *out
= syncobjs
= calloc(syncobj_count
, sizeof (*syncobjs
));
1504 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1506 for (uint32_t i
= 0, j
= 0; i
< sem_count
; ++i
) {
1507 TU_FROM_HANDLE(tu_semaphore
, sem
, sems
[i
]);
1509 struct tu_semaphore_part
*part
=
1510 sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
?
1511 &sem
->temporary
: &sem
->permanent
;
1513 if (part
->kind
== TU_SEMAPHORE_SYNCOBJ
) {
1514 syncobjs
[j
].handle
= part
->syncobj
;
1515 syncobjs
[j
].flags
= wait
? MSM_SUBMIT_SYNCOBJ_RESET
: 0;
1525 tu_semaphores_remove_temp(struct tu_device
*device
,
1526 const VkSemaphore
*sems
,
1529 for (uint32_t i
= 0; i
< sem_count
; ++i
) {
1530 TU_FROM_HANDLE(tu_semaphore
, sem
, sems
[i
]);
1531 tu_semaphore_remove_temp(device
, sem
);
1536 tu_QueueSubmit(VkQueue _queue
,
1537 uint32_t submitCount
,
1538 const VkSubmitInfo
*pSubmits
,
1541 TU_FROM_HANDLE(tu_queue
, queue
, _queue
);
1544 for (uint32_t i
= 0; i
< submitCount
; ++i
) {
1545 const VkSubmitInfo
*submit
= pSubmits
+ i
;
1546 const bool last_submit
= (i
== submitCount
- 1);
1547 struct drm_msm_gem_submit_syncobj
*in_syncobjs
= NULL
, *out_syncobjs
= NULL
;
1548 uint32_t nr_in_syncobjs
, nr_out_syncobjs
;
1549 struct tu_bo_list bo_list
;
1550 tu_bo_list_init(&bo_list
);
1552 result
= tu_get_semaphore_syncobjs(pSubmits
[i
].pWaitSemaphores
,
1553 pSubmits
[i
].waitSemaphoreCount
,
1554 false, &in_syncobjs
, &nr_in_syncobjs
);
1555 if (result
!= VK_SUCCESS
) {
1556 return tu_device_set_lost(queue
->device
,
1557 "failed to allocate space for semaphore submission\n");
1560 result
= tu_get_semaphore_syncobjs(pSubmits
[i
].pSignalSemaphores
,
1561 pSubmits
[i
].signalSemaphoreCount
,
1562 false, &out_syncobjs
, &nr_out_syncobjs
);
1563 if (result
!= VK_SUCCESS
) {
1565 return tu_device_set_lost(queue
->device
,
1566 "failed to allocate space for semaphore submission\n");
1569 uint32_t entry_count
= 0;
1570 for (uint32_t j
= 0; j
< submit
->commandBufferCount
; ++j
) {
1571 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, submit
->pCommandBuffers
[j
]);
1572 entry_count
+= cmdbuf
->cs
.entry_count
;
1575 struct drm_msm_gem_submit_cmd cmds
[entry_count
];
1576 uint32_t entry_idx
= 0;
1577 for (uint32_t j
= 0; j
< submit
->commandBufferCount
; ++j
) {
1578 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, submit
->pCommandBuffers
[j
]);
1579 struct tu_cs
*cs
= &cmdbuf
->cs
;
1580 for (unsigned i
= 0; i
< cs
->entry_count
; ++i
, ++entry_idx
) {
1581 cmds
[entry_idx
].type
= MSM_SUBMIT_CMD_BUF
;
1582 cmds
[entry_idx
].submit_idx
=
1583 tu_bo_list_add(&bo_list
, cs
->entries
[i
].bo
,
1584 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
1585 cmds
[entry_idx
].submit_offset
= cs
->entries
[i
].offset
;
1586 cmds
[entry_idx
].size
= cs
->entries
[i
].size
;
1587 cmds
[entry_idx
].pad
= 0;
1588 cmds
[entry_idx
].nr_relocs
= 0;
1589 cmds
[entry_idx
].relocs
= 0;
1592 tu_bo_list_merge(&bo_list
, &cmdbuf
->bo_list
);
1595 uint32_t flags
= MSM_PIPE_3D0
;
1596 if (nr_in_syncobjs
) {
1597 flags
|= MSM_SUBMIT_SYNCOBJ_IN
;
1599 if (nr_out_syncobjs
) {
1600 flags
|= MSM_SUBMIT_SYNCOBJ_OUT
;
1604 flags
|= MSM_SUBMIT_FENCE_FD_OUT
;
1607 struct drm_msm_gem_submit req
= {
1609 .queueid
= queue
->msm_queue_id
,
1610 .bos
= (uint64_t)(uintptr_t) bo_list
.bo_infos
,
1611 .nr_bos
= bo_list
.count
,
1612 .cmds
= (uint64_t)(uintptr_t)cmds
,
1613 .nr_cmds
= entry_count
,
1614 .in_syncobjs
= (uint64_t)(uintptr_t)in_syncobjs
,
1615 .out_syncobjs
= (uint64_t)(uintptr_t)out_syncobjs
,
1616 .nr_in_syncobjs
= nr_in_syncobjs
,
1617 .nr_out_syncobjs
= nr_out_syncobjs
,
1618 .syncobj_stride
= sizeof(struct drm_msm_gem_submit_syncobj
),
1621 int ret
= drmCommandWriteRead(queue
->device
->physical_device
->local_fd
,
1627 return tu_device_set_lost(queue
->device
, "submit failed: %s\n",
1631 tu_bo_list_destroy(&bo_list
);
1635 tu_semaphores_remove_temp(queue
->device
, pSubmits
[i
].pWaitSemaphores
,
1636 pSubmits
[i
].waitSemaphoreCount
);
1638 /* no need to merge fences as queue execution is serialized */
1639 tu_fence_update_fd(&queue
->submit_fence
, req
.fence_fd
);
1640 } else if (last_submit
) {
1641 close(req
.fence_fd
);
1645 if (_fence
!= VK_NULL_HANDLE
) {
1646 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
1647 tu_fence_copy(fence
, &queue
->submit_fence
);
1654 tu_QueueWaitIdle(VkQueue _queue
)
1656 TU_FROM_HANDLE(tu_queue
, queue
, _queue
);
1658 if (tu_device_is_lost(queue
->device
))
1659 return VK_ERROR_DEVICE_LOST
;
1661 tu_fence_wait_idle(&queue
->submit_fence
);
1667 tu_DeviceWaitIdle(VkDevice _device
)
1669 TU_FROM_HANDLE(tu_device
, device
, _device
);
1671 if (tu_device_is_lost(device
))
1672 return VK_ERROR_DEVICE_LOST
;
1674 for (unsigned i
= 0; i
< TU_MAX_QUEUE_FAMILIES
; i
++) {
1675 for (unsigned q
= 0; q
< device
->queue_count
[i
]; q
++) {
1676 tu_QueueWaitIdle(tu_queue_to_handle(&device
->queues
[i
][q
]));
1683 tu_EnumerateInstanceExtensionProperties(const char *pLayerName
,
1684 uint32_t *pPropertyCount
,
1685 VkExtensionProperties
*pProperties
)
1687 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1689 /* We spport no lyaers */
1691 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1693 for (int i
= 0; i
< TU_INSTANCE_EXTENSION_COUNT
; i
++) {
1694 if (tu_instance_extensions_supported
.extensions
[i
]) {
1695 vk_outarray_append(&out
, prop
) { *prop
= tu_instance_extensions
[i
]; }
1699 return vk_outarray_status(&out
);
1703 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice
,
1704 const char *pLayerName
,
1705 uint32_t *pPropertyCount
,
1706 VkExtensionProperties
*pProperties
)
1708 /* We spport no lyaers */
1709 TU_FROM_HANDLE(tu_physical_device
, device
, physicalDevice
);
1710 VK_OUTARRAY_MAKE(out
, pProperties
, pPropertyCount
);
1712 /* We spport no lyaers */
1714 return vk_error(NULL
, VK_ERROR_LAYER_NOT_PRESENT
);
1716 for (int i
= 0; i
< TU_DEVICE_EXTENSION_COUNT
; i
++) {
1717 if (device
->supported_extensions
.extensions
[i
]) {
1718 vk_outarray_append(&out
, prop
) { *prop
= tu_device_extensions
[i
]; }
1722 return vk_outarray_status(&out
);
1726 tu_GetInstanceProcAddr(VkInstance _instance
, const char *pName
)
1728 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
1730 return tu_lookup_entrypoint_checked(
1731 pName
, instance
? instance
->api_version
: 0,
1732 instance
? &instance
->enabled_extensions
: NULL
, NULL
);
1735 /* The loader wants us to expose a second GetInstanceProcAddr function
1736 * to work around certain LD_PRELOAD issues seen in apps.
1739 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1740 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
);
1743 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1744 vk_icdGetInstanceProcAddr(VkInstance instance
, const char *pName
)
1746 return tu_GetInstanceProcAddr(instance
, pName
);
1750 tu_GetDeviceProcAddr(VkDevice _device
, const char *pName
)
1752 TU_FROM_HANDLE(tu_device
, device
, _device
);
1754 return tu_lookup_entrypoint_checked(pName
, device
->instance
->api_version
,
1755 &device
->instance
->enabled_extensions
,
1756 &device
->enabled_extensions
);
1760 tu_alloc_memory(struct tu_device
*device
,
1761 const VkMemoryAllocateInfo
*pAllocateInfo
,
1762 const VkAllocationCallbacks
*pAllocator
,
1763 VkDeviceMemory
*pMem
)
1765 struct tu_device_memory
*mem
;
1768 assert(pAllocateInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
);
1770 if (pAllocateInfo
->allocationSize
== 0) {
1771 /* Apparently, this is allowed */
1772 *pMem
= VK_NULL_HANDLE
;
1776 mem
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*mem
), 8,
1777 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1779 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1781 const VkImportMemoryFdInfoKHR
*fd_info
=
1782 vk_find_struct_const(pAllocateInfo
->pNext
, IMPORT_MEMORY_FD_INFO_KHR
);
1783 if (fd_info
&& !fd_info
->handleType
)
1787 assert(fd_info
->handleType
==
1788 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT
||
1789 fd_info
->handleType
==
1790 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
);
1793 * TODO Importing the same fd twice gives us the same handle without
1794 * reference counting. We need to maintain a per-instance handle-to-bo
1795 * table and add reference count to tu_bo.
1797 result
= tu_bo_init_dmabuf(device
, &mem
->bo
,
1798 pAllocateInfo
->allocationSize
, fd_info
->fd
);
1799 if (result
== VK_SUCCESS
) {
1800 /* take ownership and close the fd */
1805 tu_bo_init_new(device
, &mem
->bo
, pAllocateInfo
->allocationSize
);
1808 if (result
!= VK_SUCCESS
) {
1809 vk_free2(&device
->alloc
, pAllocator
, mem
);
1813 mem
->size
= pAllocateInfo
->allocationSize
;
1814 mem
->type_index
= pAllocateInfo
->memoryTypeIndex
;
1817 mem
->user_ptr
= NULL
;
1819 *pMem
= tu_device_memory_to_handle(mem
);
1825 tu_AllocateMemory(VkDevice _device
,
1826 const VkMemoryAllocateInfo
*pAllocateInfo
,
1827 const VkAllocationCallbacks
*pAllocator
,
1828 VkDeviceMemory
*pMem
)
1830 TU_FROM_HANDLE(tu_device
, device
, _device
);
1831 return tu_alloc_memory(device
, pAllocateInfo
, pAllocator
, pMem
);
1835 tu_FreeMemory(VkDevice _device
,
1836 VkDeviceMemory _mem
,
1837 const VkAllocationCallbacks
*pAllocator
)
1839 TU_FROM_HANDLE(tu_device
, device
, _device
);
1840 TU_FROM_HANDLE(tu_device_memory
, mem
, _mem
);
1845 tu_bo_finish(device
, &mem
->bo
);
1846 vk_free2(&device
->alloc
, pAllocator
, mem
);
1850 tu_MapMemory(VkDevice _device
,
1851 VkDeviceMemory _memory
,
1852 VkDeviceSize offset
,
1854 VkMemoryMapFlags flags
,
1857 TU_FROM_HANDLE(tu_device
, device
, _device
);
1858 TU_FROM_HANDLE(tu_device_memory
, mem
, _memory
);
1866 if (mem
->user_ptr
) {
1867 *ppData
= mem
->user_ptr
;
1868 } else if (!mem
->map
) {
1869 result
= tu_bo_map(device
, &mem
->bo
);
1870 if (result
!= VK_SUCCESS
)
1872 *ppData
= mem
->map
= mem
->bo
.map
;
1881 return vk_error(device
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
1885 tu_UnmapMemory(VkDevice _device
, VkDeviceMemory _memory
)
1887 /* I do not see any unmapping done by the freedreno Gallium driver. */
1891 tu_FlushMappedMemoryRanges(VkDevice _device
,
1892 uint32_t memoryRangeCount
,
1893 const VkMappedMemoryRange
*pMemoryRanges
)
1899 tu_InvalidateMappedMemoryRanges(VkDevice _device
,
1900 uint32_t memoryRangeCount
,
1901 const VkMappedMemoryRange
*pMemoryRanges
)
1907 tu_GetBufferMemoryRequirements(VkDevice _device
,
1909 VkMemoryRequirements
*pMemoryRequirements
)
1911 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1913 pMemoryRequirements
->memoryTypeBits
= 1;
1914 pMemoryRequirements
->alignment
= 64;
1915 pMemoryRequirements
->size
=
1916 align64(buffer
->size
, pMemoryRequirements
->alignment
);
1920 tu_GetBufferMemoryRequirements2(
1922 const VkBufferMemoryRequirementsInfo2
*pInfo
,
1923 VkMemoryRequirements2
*pMemoryRequirements
)
1925 tu_GetBufferMemoryRequirements(device
, pInfo
->buffer
,
1926 &pMemoryRequirements
->memoryRequirements
);
1930 tu_GetImageMemoryRequirements(VkDevice _device
,
1932 VkMemoryRequirements
*pMemoryRequirements
)
1934 TU_FROM_HANDLE(tu_image
, image
, _image
);
1936 pMemoryRequirements
->memoryTypeBits
= 1;
1937 pMemoryRequirements
->size
= image
->layout
.size
;
1938 pMemoryRequirements
->alignment
= image
->layout
.base_align
;
1942 tu_GetImageMemoryRequirements2(VkDevice device
,
1943 const VkImageMemoryRequirementsInfo2
*pInfo
,
1944 VkMemoryRequirements2
*pMemoryRequirements
)
1946 tu_GetImageMemoryRequirements(device
, pInfo
->image
,
1947 &pMemoryRequirements
->memoryRequirements
);
1951 tu_GetImageSparseMemoryRequirements(
1954 uint32_t *pSparseMemoryRequirementCount
,
1955 VkSparseImageMemoryRequirements
*pSparseMemoryRequirements
)
1961 tu_GetImageSparseMemoryRequirements2(
1963 const VkImageSparseMemoryRequirementsInfo2
*pInfo
,
1964 uint32_t *pSparseMemoryRequirementCount
,
1965 VkSparseImageMemoryRequirements2
*pSparseMemoryRequirements
)
1971 tu_GetDeviceMemoryCommitment(VkDevice device
,
1972 VkDeviceMemory memory
,
1973 VkDeviceSize
*pCommittedMemoryInBytes
)
1975 *pCommittedMemoryInBytes
= 0;
1979 tu_BindBufferMemory2(VkDevice device
,
1980 uint32_t bindInfoCount
,
1981 const VkBindBufferMemoryInfo
*pBindInfos
)
1983 for (uint32_t i
= 0; i
< bindInfoCount
; ++i
) {
1984 TU_FROM_HANDLE(tu_device_memory
, mem
, pBindInfos
[i
].memory
);
1985 TU_FROM_HANDLE(tu_buffer
, buffer
, pBindInfos
[i
].buffer
);
1988 buffer
->bo
= &mem
->bo
;
1989 buffer
->bo_offset
= pBindInfos
[i
].memoryOffset
;
1998 tu_BindBufferMemory(VkDevice device
,
2000 VkDeviceMemory memory
,
2001 VkDeviceSize memoryOffset
)
2003 const VkBindBufferMemoryInfo info
= {
2004 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO
,
2007 .memoryOffset
= memoryOffset
2010 return tu_BindBufferMemory2(device
, 1, &info
);
2014 tu_BindImageMemory2(VkDevice device
,
2015 uint32_t bindInfoCount
,
2016 const VkBindImageMemoryInfo
*pBindInfos
)
2018 for (uint32_t i
= 0; i
< bindInfoCount
; ++i
) {
2019 TU_FROM_HANDLE(tu_image
, image
, pBindInfos
[i
].image
);
2020 TU_FROM_HANDLE(tu_device_memory
, mem
, pBindInfos
[i
].memory
);
2023 image
->bo
= &mem
->bo
;
2024 image
->bo_offset
= pBindInfos
[i
].memoryOffset
;
2027 image
->bo_offset
= 0;
2035 tu_BindImageMemory(VkDevice device
,
2037 VkDeviceMemory memory
,
2038 VkDeviceSize memoryOffset
)
2040 const VkBindImageMemoryInfo info
= {
2041 .sType
= VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO
,
2044 .memoryOffset
= memoryOffset
2047 return tu_BindImageMemory2(device
, 1, &info
);
2051 tu_QueueBindSparse(VkQueue _queue
,
2052 uint32_t bindInfoCount
,
2053 const VkBindSparseInfo
*pBindInfo
,
2059 // Queue semaphore functions
2063 tu_semaphore_part_destroy(struct tu_device
*device
,
2064 struct tu_semaphore_part
*part
)
2066 switch(part
->kind
) {
2067 case TU_SEMAPHORE_NONE
:
2069 case TU_SEMAPHORE_SYNCOBJ
:
2070 drmSyncobjDestroy(device
->physical_device
->local_fd
, part
->syncobj
);
2073 part
->kind
= TU_SEMAPHORE_NONE
;
2077 tu_semaphore_remove_temp(struct tu_device
*device
,
2078 struct tu_semaphore
*sem
)
2080 if (sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
) {
2081 tu_semaphore_part_destroy(device
, &sem
->temporary
);
2086 tu_CreateSemaphore(VkDevice _device
,
2087 const VkSemaphoreCreateInfo
*pCreateInfo
,
2088 const VkAllocationCallbacks
*pAllocator
,
2089 VkSemaphore
*pSemaphore
)
2091 TU_FROM_HANDLE(tu_device
, device
, _device
);
2093 struct tu_semaphore
*sem
=
2094 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*sem
), 8,
2095 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2097 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2099 const VkExportSemaphoreCreateInfo
*export
=
2100 vk_find_struct_const(pCreateInfo
->pNext
, EXPORT_SEMAPHORE_CREATE_INFO
);
2101 VkExternalSemaphoreHandleTypeFlags handleTypes
=
2102 export
? export
->handleTypes
: 0;
2104 sem
->permanent
.kind
= TU_SEMAPHORE_NONE
;
2105 sem
->temporary
.kind
= TU_SEMAPHORE_NONE
;
2108 if (drmSyncobjCreate(device
->physical_device
->local_fd
, 0, &sem
->permanent
.syncobj
) < 0) {
2109 vk_free2(&device
->alloc
, pAllocator
, sem
);
2110 return VK_ERROR_OUT_OF_HOST_MEMORY
;
2112 sem
->permanent
.kind
= TU_SEMAPHORE_SYNCOBJ
;
2114 *pSemaphore
= tu_semaphore_to_handle(sem
);
2119 tu_DestroySemaphore(VkDevice _device
,
2120 VkSemaphore _semaphore
,
2121 const VkAllocationCallbacks
*pAllocator
)
2123 TU_FROM_HANDLE(tu_device
, device
, _device
);
2124 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
2128 tu_semaphore_part_destroy(device
, &sem
->permanent
);
2129 tu_semaphore_part_destroy(device
, &sem
->temporary
);
2131 vk_free2(&device
->alloc
, pAllocator
, sem
);
2135 tu_CreateEvent(VkDevice _device
,
2136 const VkEventCreateInfo
*pCreateInfo
,
2137 const VkAllocationCallbacks
*pAllocator
,
2140 TU_FROM_HANDLE(tu_device
, device
, _device
);
2141 struct tu_event
*event
=
2142 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*event
), 8,
2143 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2146 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2148 VkResult result
= tu_bo_init_new(device
, &event
->bo
, 0x1000);
2149 if (result
!= VK_SUCCESS
)
2152 result
= tu_bo_map(device
, &event
->bo
);
2153 if (result
!= VK_SUCCESS
)
2156 *pEvent
= tu_event_to_handle(event
);
2161 tu_bo_finish(device
, &event
->bo
);
2163 vk_free2(&device
->alloc
, pAllocator
, event
);
2164 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2168 tu_DestroyEvent(VkDevice _device
,
2170 const VkAllocationCallbacks
*pAllocator
)
2172 TU_FROM_HANDLE(tu_device
, device
, _device
);
2173 TU_FROM_HANDLE(tu_event
, event
, _event
);
2178 tu_bo_finish(device
, &event
->bo
);
2179 vk_free2(&device
->alloc
, pAllocator
, event
);
2183 tu_GetEventStatus(VkDevice _device
, VkEvent _event
)
2185 TU_FROM_HANDLE(tu_event
, event
, _event
);
2187 if (*(uint64_t*) event
->bo
.map
== 1)
2188 return VK_EVENT_SET
;
2189 return VK_EVENT_RESET
;
2193 tu_SetEvent(VkDevice _device
, VkEvent _event
)
2195 TU_FROM_HANDLE(tu_event
, event
, _event
);
2196 *(uint64_t*) event
->bo
.map
= 1;
2202 tu_ResetEvent(VkDevice _device
, VkEvent _event
)
2204 TU_FROM_HANDLE(tu_event
, event
, _event
);
2205 *(uint64_t*) event
->bo
.map
= 0;
2211 tu_CreateBuffer(VkDevice _device
,
2212 const VkBufferCreateInfo
*pCreateInfo
,
2213 const VkAllocationCallbacks
*pAllocator
,
2216 TU_FROM_HANDLE(tu_device
, device
, _device
);
2217 struct tu_buffer
*buffer
;
2219 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
2221 buffer
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*buffer
), 8,
2222 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2224 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2226 buffer
->size
= pCreateInfo
->size
;
2227 buffer
->usage
= pCreateInfo
->usage
;
2228 buffer
->flags
= pCreateInfo
->flags
;
2230 *pBuffer
= tu_buffer_to_handle(buffer
);
2236 tu_DestroyBuffer(VkDevice _device
,
2238 const VkAllocationCallbacks
*pAllocator
)
2240 TU_FROM_HANDLE(tu_device
, device
, _device
);
2241 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
2246 vk_free2(&device
->alloc
, pAllocator
, buffer
);
2250 tu_CreateFramebuffer(VkDevice _device
,
2251 const VkFramebufferCreateInfo
*pCreateInfo
,
2252 const VkAllocationCallbacks
*pAllocator
,
2253 VkFramebuffer
*pFramebuffer
)
2255 TU_FROM_HANDLE(tu_device
, device
, _device
);
2256 TU_FROM_HANDLE(tu_render_pass
, pass
, pCreateInfo
->renderPass
);
2257 struct tu_framebuffer
*framebuffer
;
2259 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
2261 size_t size
= sizeof(*framebuffer
) + sizeof(struct tu_attachment_info
) *
2262 pCreateInfo
->attachmentCount
;
2263 framebuffer
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
2264 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2265 if (framebuffer
== NULL
)
2266 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2268 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
2269 framebuffer
->width
= pCreateInfo
->width
;
2270 framebuffer
->height
= pCreateInfo
->height
;
2271 framebuffer
->layers
= pCreateInfo
->layers
;
2272 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2273 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
2274 struct tu_image_view
*iview
= tu_image_view_from_handle(_iview
);
2275 framebuffer
->attachments
[i
].attachment
= iview
;
2278 tu_framebuffer_tiling_config(framebuffer
, device
, pass
);
2280 *pFramebuffer
= tu_framebuffer_to_handle(framebuffer
);
2285 tu_DestroyFramebuffer(VkDevice _device
,
2287 const VkAllocationCallbacks
*pAllocator
)
2289 TU_FROM_HANDLE(tu_device
, device
, _device
);
2290 TU_FROM_HANDLE(tu_framebuffer
, fb
, _fb
);
2294 vk_free2(&device
->alloc
, pAllocator
, fb
);
2298 tu_init_sampler(struct tu_device
*device
,
2299 struct tu_sampler
*sampler
,
2300 const VkSamplerCreateInfo
*pCreateInfo
)
2302 const struct VkSamplerReductionModeCreateInfo
*reduction
=
2303 vk_find_struct_const(pCreateInfo
->pNext
, SAMPLER_REDUCTION_MODE_CREATE_INFO
);
2304 const struct VkSamplerYcbcrConversionInfo
*ycbcr_conversion
=
2305 vk_find_struct_const(pCreateInfo
->pNext
, SAMPLER_YCBCR_CONVERSION_INFO
);
2307 unsigned aniso
= pCreateInfo
->anisotropyEnable
?
2308 util_last_bit(MIN2((uint32_t)pCreateInfo
->maxAnisotropy
>> 1, 8)) : 0;
2309 bool miplinear
= (pCreateInfo
->mipmapMode
== VK_SAMPLER_MIPMAP_MODE_LINEAR
);
2310 float min_lod
= CLAMP(pCreateInfo
->minLod
, 0.0f
, 4095.0f
/ 256.0f
);
2311 float max_lod
= CLAMP(pCreateInfo
->maxLod
, 0.0f
, 4095.0f
/ 256.0f
);
2313 sampler
->descriptor
[0] =
2314 COND(miplinear
, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR
) |
2315 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo
->magFilter
, aniso
)) |
2316 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo
->minFilter
, aniso
)) |
2317 A6XX_TEX_SAMP_0_ANISO(aniso
) |
2318 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo
->addressModeU
)) |
2319 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo
->addressModeV
)) |
2320 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo
->addressModeW
)) |
2321 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo
->mipLodBias
);
2322 sampler
->descriptor
[1] =
2323 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2324 COND(pCreateInfo
->unnormalizedCoordinates
, A6XX_TEX_SAMP_1_UNNORM_COORDS
) |
2325 A6XX_TEX_SAMP_1_MIN_LOD(min_lod
) |
2326 A6XX_TEX_SAMP_1_MAX_LOD(max_lod
) |
2327 COND(pCreateInfo
->compareEnable
,
2328 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo
->compareOp
)));
2329 /* This is an offset into the border_color BO, which we fill with all the
2330 * possible Vulkan border colors in the correct order, so we can just use
2331 * the Vulkan enum with no translation necessary.
2333 sampler
->descriptor
[2] =
2334 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo
->borderColor
*
2335 sizeof(struct bcolor_entry
));
2336 sampler
->descriptor
[3] = 0;
2339 sampler
->descriptor
[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2340 tu6_reduction_mode(reduction
->reductionMode
));
2343 sampler
->ycbcr_sampler
= ycbcr_conversion
?
2344 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion
->conversion
) : NULL
;
2346 if (sampler
->ycbcr_sampler
&&
2347 sampler
->ycbcr_sampler
->chroma_filter
== VK_FILTER_LINEAR
) {
2348 sampler
->descriptor
[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR
;
2352 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2357 tu_CreateSampler(VkDevice _device
,
2358 const VkSamplerCreateInfo
*pCreateInfo
,
2359 const VkAllocationCallbacks
*pAllocator
,
2360 VkSampler
*pSampler
)
2362 TU_FROM_HANDLE(tu_device
, device
, _device
);
2363 struct tu_sampler
*sampler
;
2365 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
2367 sampler
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*sampler
), 8,
2368 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
2370 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
2372 tu_init_sampler(device
, sampler
, pCreateInfo
);
2373 *pSampler
= tu_sampler_to_handle(sampler
);
2379 tu_DestroySampler(VkDevice _device
,
2381 const VkAllocationCallbacks
*pAllocator
)
2383 TU_FROM_HANDLE(tu_device
, device
, _device
);
2384 TU_FROM_HANDLE(tu_sampler
, sampler
, _sampler
);
2388 vk_free2(&device
->alloc
, pAllocator
, sampler
);
2391 /* vk_icd.h does not declare this function, so we declare it here to
2392 * suppress Wmissing-prototypes.
2394 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2395 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
);
2397 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2398 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion
)
2400 /* For the full details on loader interface versioning, see
2401 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2402 * What follows is a condensed summary, to help you navigate the large and
2403 * confusing official doc.
2405 * - Loader interface v0 is incompatible with later versions. We don't
2408 * - In loader interface v1:
2409 * - The first ICD entrypoint called by the loader is
2410 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2412 * - The ICD must statically expose no other Vulkan symbol unless it
2413 * is linked with -Bsymbolic.
2414 * - Each dispatchable Vulkan handle created by the ICD must be
2415 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2416 * ICD must initialize VK_LOADER_DATA.loadMagic to
2418 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2419 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2420 * such loader-managed surfaces.
2422 * - Loader interface v2 differs from v1 in:
2423 * - The first ICD entrypoint called by the loader is
2424 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2425 * statically expose this entrypoint.
2427 * - Loader interface v3 differs from v2 in:
2428 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2429 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2430 * because the loader no longer does so.
2432 *pSupportedVersion
= MIN2(*pSupportedVersion
, 3u);
2437 tu_GetMemoryFdKHR(VkDevice _device
,
2438 const VkMemoryGetFdInfoKHR
*pGetFdInfo
,
2441 TU_FROM_HANDLE(tu_device
, device
, _device
);
2442 TU_FROM_HANDLE(tu_device_memory
, memory
, pGetFdInfo
->memory
);
2444 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR
);
2446 /* At the moment, we support only the below handle types. */
2447 assert(pGetFdInfo
->handleType
==
2448 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT
||
2449 pGetFdInfo
->handleType
==
2450 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
);
2452 int prime_fd
= tu_bo_export_dmabuf(device
, &memory
->bo
);
2454 return vk_error(device
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
2461 tu_GetMemoryFdPropertiesKHR(VkDevice _device
,
2462 VkExternalMemoryHandleTypeFlagBits handleType
,
2464 VkMemoryFdPropertiesKHR
*pMemoryFdProperties
)
2466 assert(handleType
== VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
);
2467 pMemoryFdProperties
->memoryTypeBits
= 1;
2472 tu_ImportFenceFdKHR(VkDevice _device
,
2473 const VkImportFenceFdInfoKHR
*pImportFenceFdInfo
)
2481 tu_GetFenceFdKHR(VkDevice _device
,
2482 const VkFenceGetFdInfoKHR
*pGetFdInfo
,
2491 tu_ImportSemaphoreFdKHR(VkDevice _device
,
2492 const VkImportSemaphoreFdInfoKHR
*pImportSemaphoreFdInfo
)
2494 TU_FROM_HANDLE(tu_device
, device
, _device
);
2495 TU_FROM_HANDLE(tu_semaphore
, sem
, pImportSemaphoreFdInfo
->semaphore
);
2497 struct tu_semaphore_part
*dst
= NULL
;
2499 if (pImportSemaphoreFdInfo
->flags
& VK_SEMAPHORE_IMPORT_TEMPORARY_BIT
) {
2500 dst
= &sem
->temporary
;
2502 dst
= &sem
->permanent
;
2505 uint32_t syncobj
= dst
->kind
== TU_SEMAPHORE_SYNCOBJ
? dst
->syncobj
: 0;
2507 switch(pImportSemaphoreFdInfo
->handleType
) {
2508 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
: {
2509 uint32_t old_syncobj
= syncobj
;
2510 ret
= drmSyncobjFDToHandle(device
->physical_device
->local_fd
, pImportSemaphoreFdInfo
->fd
, &syncobj
);
2512 close(pImportSemaphoreFdInfo
->fd
);
2514 drmSyncobjDestroy(device
->physical_device
->local_fd
, old_syncobj
);
2518 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
: {
2520 ret
= drmSyncobjCreate(device
->physical_device
->local_fd
, 0, &syncobj
);
2524 if (pImportSemaphoreFdInfo
->fd
== -1) {
2525 ret
= drmSyncobjSignal(device
->physical_device
->local_fd
, &syncobj
, 1);
2527 ret
= drmSyncobjImportSyncFile(device
->physical_device
->local_fd
, syncobj
, pImportSemaphoreFdInfo
->fd
);
2530 close(pImportSemaphoreFdInfo
->fd
);
2534 unreachable("Unhandled semaphore handle type");
2538 return VK_ERROR_INVALID_EXTERNAL_HANDLE
;
2540 dst
->syncobj
= syncobj
;
2541 dst
->kind
= TU_SEMAPHORE_SYNCOBJ
;
2547 tu_GetSemaphoreFdKHR(VkDevice _device
,
2548 const VkSemaphoreGetFdInfoKHR
*pGetFdInfo
,
2551 TU_FROM_HANDLE(tu_device
, device
, _device
);
2552 TU_FROM_HANDLE(tu_semaphore
, sem
, pGetFdInfo
->semaphore
);
2554 uint32_t syncobj_handle
;
2556 if (sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
) {
2557 assert(sem
->temporary
.kind
== TU_SEMAPHORE_SYNCOBJ
);
2558 syncobj_handle
= sem
->temporary
.syncobj
;
2560 assert(sem
->permanent
.kind
== TU_SEMAPHORE_SYNCOBJ
);
2561 syncobj_handle
= sem
->permanent
.syncobj
;
2564 switch(pGetFdInfo
->handleType
) {
2565 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
:
2566 ret
= drmSyncobjHandleToFD(device
->physical_device
->local_fd
, syncobj_handle
, pFd
);
2568 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
:
2569 ret
= drmSyncobjExportSyncFile(device
->physical_device
->local_fd
, syncobj_handle
, pFd
);
2571 if (sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
) {
2572 tu_semaphore_part_destroy(device
, &sem
->temporary
);
2574 drmSyncobjReset(device
->physical_device
->local_fd
, &syncobj_handle
, 1);
2579 unreachable("Unhandled semaphore handle type");
2583 return vk_error(device
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
2588 static bool tu_has_syncobj(struct tu_physical_device
*pdev
)
2591 if (drmGetCap(pdev
->local_fd
, DRM_CAP_SYNCOBJ
, &value
))
2593 return value
&& pdev
->msm_major_version
== 1 && pdev
->msm_minor_version
>= 6;
2597 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2598 VkPhysicalDevice physicalDevice
,
2599 const VkPhysicalDeviceExternalSemaphoreInfo
*pExternalSemaphoreInfo
,
2600 VkExternalSemaphoreProperties
*pExternalSemaphoreProperties
)
2602 TU_FROM_HANDLE(tu_physical_device
, pdev
, physicalDevice
);
2604 if (tu_has_syncobj(pdev
) &&
2605 (pExternalSemaphoreInfo
->handleType
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
||
2606 pExternalSemaphoreInfo
->handleType
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
)) {
2607 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
| VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
2608 pExternalSemaphoreProperties
->compatibleHandleTypes
= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
| VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
2609 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT
|
2610 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT
;
2612 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
2613 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
2614 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
2619 tu_GetPhysicalDeviceExternalFenceProperties(
2620 VkPhysicalDevice physicalDevice
,
2621 const VkPhysicalDeviceExternalFenceInfo
*pExternalFenceInfo
,
2622 VkExternalFenceProperties
*pExternalFenceProperties
)
2624 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
2625 pExternalFenceProperties
->compatibleHandleTypes
= 0;
2626 pExternalFenceProperties
->externalFenceFeatures
= 0;
2630 tu_CreateDebugReportCallbackEXT(
2631 VkInstance _instance
,
2632 const VkDebugReportCallbackCreateInfoEXT
*pCreateInfo
,
2633 const VkAllocationCallbacks
*pAllocator
,
2634 VkDebugReportCallbackEXT
*pCallback
)
2636 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
2637 return vk_create_debug_report_callback(&instance
->debug_report_callbacks
,
2638 pCreateInfo
, pAllocator
,
2639 &instance
->alloc
, pCallback
);
2643 tu_DestroyDebugReportCallbackEXT(VkInstance _instance
,
2644 VkDebugReportCallbackEXT _callback
,
2645 const VkAllocationCallbacks
*pAllocator
)
2647 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
2648 vk_destroy_debug_report_callback(&instance
->debug_report_callbacks
,
2649 _callback
, pAllocator
, &instance
->alloc
);
2653 tu_DebugReportMessageEXT(VkInstance _instance
,
2654 VkDebugReportFlagsEXT flags
,
2655 VkDebugReportObjectTypeEXT objectType
,
2658 int32_t messageCode
,
2659 const char *pLayerPrefix
,
2660 const char *pMessage
)
2662 TU_FROM_HANDLE(tu_instance
, instance
, _instance
);
2663 vk_debug_report(&instance
->debug_report_callbacks
, flags
, objectType
,
2664 object
, location
, messageCode
, pLayerPrefix
, pMessage
);
2668 tu_GetDeviceGroupPeerMemoryFeatures(
2671 uint32_t localDeviceIndex
,
2672 uint32_t remoteDeviceIndex
,
2673 VkPeerMemoryFeatureFlags
*pPeerMemoryFeatures
)
2675 assert(localDeviceIndex
== remoteDeviceIndex
);
2677 *pPeerMemoryFeatures
= VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
|
2678 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT
|
2679 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT
|
2680 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT
;
2683 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2684 VkPhysicalDevice physicalDevice
,
2685 VkSampleCountFlagBits samples
,
2686 VkMultisamplePropertiesEXT
* pMultisampleProperties
)
2688 TU_FROM_HANDLE(tu_physical_device
, pdevice
, physicalDevice
);
2690 if (samples
<= VK_SAMPLE_COUNT_4_BIT
&& pdevice
->supported_extensions
.EXT_sample_locations
)
2691 pMultisampleProperties
->maxSampleLocationGridSize
= (VkExtent2D
){ 1, 1 };
2693 pMultisampleProperties
->maxSampleLocationGridSize
= (VkExtent2D
){ 0, 0 };