turnip: workaround for a630 d24_unorm_s8_uint fails
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
50
51 static void
52 tu_semaphore_remove_temp(struct tu_device *device,
53 struct tu_semaphore *sem);
54
55 static int
56 tu_device_get_cache_uuid(uint16_t family, void *uuid)
57 {
58 uint32_t mesa_timestamp;
59 uint16_t f = family;
60 memset(uuid, 0, VK_UUID_SIZE);
61 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
62 &mesa_timestamp))
63 return -1;
64
65 memcpy(uuid, &mesa_timestamp, 4);
66 memcpy((char *) uuid + 4, &f, 2);
67 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
68 return 0;
69 }
70
71 static VkResult
72 tu_bo_init(struct tu_device *dev,
73 struct tu_bo *bo,
74 uint32_t gem_handle,
75 uint64_t size)
76 {
77 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
78 if (!iova)
79 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
80
81 *bo = (struct tu_bo) {
82 .gem_handle = gem_handle,
83 .size = size,
84 .iova = iova,
85 };
86
87 return VK_SUCCESS;
88 }
89
90 VkResult
91 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
92 {
93 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
94 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
95 */
96 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
97 if (!gem_handle)
98 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
99
100 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
101 if (result != VK_SUCCESS) {
102 tu_gem_close(dev, gem_handle);
103 return vk_error(dev->instance, result);
104 }
105
106 return VK_SUCCESS;
107 }
108
109 VkResult
110 tu_bo_init_dmabuf(struct tu_device *dev,
111 struct tu_bo *bo,
112 uint64_t size,
113 int fd)
114 {
115 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
116 if (!gem_handle)
117 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
118
119 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
120 if (result != VK_SUCCESS) {
121 tu_gem_close(dev, gem_handle);
122 return vk_error(dev->instance, result);
123 }
124
125 return VK_SUCCESS;
126 }
127
128 int
129 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
130 {
131 return tu_gem_export_dmabuf(dev, bo->gem_handle);
132 }
133
134 VkResult
135 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
136 {
137 if (bo->map)
138 return VK_SUCCESS;
139
140 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
141 if (!offset)
142 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
143
144 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
145 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
146 dev->physical_device->local_fd, offset);
147 if (map == MAP_FAILED)
148 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
149
150 bo->map = map;
151 return VK_SUCCESS;
152 }
153
154 void
155 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
156 {
157 assert(bo->gem_handle);
158
159 if (bo->map)
160 munmap(bo->map, bo->size);
161
162 tu_gem_close(dev, bo->gem_handle);
163 }
164
165 static VkResult
166 tu_physical_device_init(struct tu_physical_device *device,
167 struct tu_instance *instance,
168 drmDevicePtr drm_device)
169 {
170 const char *path = drm_device->nodes[DRM_NODE_RENDER];
171 VkResult result = VK_SUCCESS;
172 drmVersionPtr version;
173 int fd;
174 int master_fd = -1;
175
176 fd = open(path, O_RDWR | O_CLOEXEC);
177 if (fd < 0) {
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "failed to open device %s", path);
180 }
181
182 /* Version 1.3 added MSM_INFO_IOVA. */
183 const int min_version_major = 1;
184 const int min_version_minor = 3;
185
186 version = drmGetVersion(fd);
187 if (!version) {
188 close(fd);
189 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
190 "failed to query kernel driver version for device %s",
191 path);
192 }
193
194 if (strcmp(version->name, "msm")) {
195 drmFreeVersion(version);
196 close(fd);
197 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
198 "device %s does not use the msm kernel driver", path);
199 }
200
201 if (version->version_major != min_version_major ||
202 version->version_minor < min_version_minor) {
203 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "kernel driver for device %s has version %d.%d, "
205 "but Vulkan requires version >= %d.%d",
206 path, version->version_major, version->version_minor,
207 min_version_major, min_version_minor);
208 drmFreeVersion(version);
209 close(fd);
210 return result;
211 }
212
213 device->msm_major_version = version->version_major;
214 device->msm_minor_version = version->version_minor;
215
216 drmFreeVersion(version);
217
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Found compatible device '%s'.", path);
220
221 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
222 device->instance = instance;
223 assert(strlen(path) < ARRAY_SIZE(device->path));
224 strncpy(device->path, path, ARRAY_SIZE(device->path));
225
226 if (instance->enabled_extensions.KHR_display) {
227 master_fd =
228 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
229 if (master_fd >= 0) {
230 /* TODO: free master_fd is accel is not working? */
231 }
232 }
233
234 device->master_fd = master_fd;
235 device->local_fd = fd;
236
237 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
238 if (instance->debug_flags & TU_DEBUG_STARTUP)
239 tu_logi("Could not query the GPU ID");
240 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
241 "could not get GPU ID");
242 goto fail;
243 }
244
245 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
246 if (instance->debug_flags & TU_DEBUG_STARTUP)
247 tu_logi("Could not query the GMEM size");
248 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
249 "could not get GMEM size");
250 goto fail;
251 }
252
253 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
254 if (instance->debug_flags & TU_DEBUG_STARTUP)
255 tu_logi("Could not query the GMEM size");
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "could not get GMEM size");
258 goto fail;
259 }
260
261 memset(device->name, 0, sizeof(device->name));
262 sprintf(device->name, "FD%d", device->gpu_id);
263
264 device->limited_z24s8 = (device->gpu_id == 630);
265
266 switch (device->gpu_id) {
267 case 618:
268 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
269 device->ccu_offset_bypass = 0x10000;
270 device->tile_align_w = 32;
271 device->magic.PC_UNKNOWN_9805 = 0x0;
272 device->magic.SP_UNKNOWN_A0F8 = 0x0;
273 break;
274 case 630:
275 case 640:
276 device->ccu_offset_gmem = 0xf8000;
277 device->ccu_offset_bypass = 0x20000;
278 device->tile_align_w = 32;
279 device->magic.PC_UNKNOWN_9805 = 0x1;
280 device->magic.SP_UNKNOWN_A0F8 = 0x1;
281 break;
282 case 650:
283 device->ccu_offset_gmem = 0x114000;
284 device->ccu_offset_bypass = 0x30000;
285 device->tile_align_w = 96;
286 device->magic.PC_UNKNOWN_9805 = 0x2;
287 device->magic.SP_UNKNOWN_A0F8 = 0x2;
288 break;
289 default:
290 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
291 "device %s is unsupported", device->name);
292 goto fail;
293 }
294 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
295 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
296 "cannot generate UUID");
297 goto fail;
298 }
299
300 /* The gpu id is already embedded in the uuid so we just pass "tu"
301 * when creating the cache.
302 */
303 char buf[VK_UUID_SIZE * 2 + 1];
304 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
305 device->disk_cache = disk_cache_create(device->name, buf, 0);
306
307 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
308 "testing use only.\n");
309
310 fd_get_driver_uuid(device->driver_uuid);
311 fd_get_device_uuid(device->device_uuid, device->gpu_id);
312
313 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
314
315 if (result != VK_SUCCESS) {
316 vk_error(instance, result);
317 goto fail;
318 }
319
320 result = tu_wsi_init(device);
321 if (result != VK_SUCCESS) {
322 vk_error(instance, result);
323 goto fail;
324 }
325
326 return VK_SUCCESS;
327
328 fail:
329 close(fd);
330 if (master_fd != -1)
331 close(master_fd);
332 return result;
333 }
334
335 static void
336 tu_physical_device_finish(struct tu_physical_device *device)
337 {
338 tu_wsi_finish(device);
339
340 disk_cache_destroy(device->disk_cache);
341 close(device->local_fd);
342 if (device->master_fd != -1)
343 close(device->master_fd);
344
345 vk_object_base_finish(&device->base);
346 }
347
348 static VKAPI_ATTR void *
349 default_alloc_func(void *pUserData,
350 size_t size,
351 size_t align,
352 VkSystemAllocationScope allocationScope)
353 {
354 return malloc(size);
355 }
356
357 static VKAPI_ATTR void *
358 default_realloc_func(void *pUserData,
359 void *pOriginal,
360 size_t size,
361 size_t align,
362 VkSystemAllocationScope allocationScope)
363 {
364 return realloc(pOriginal, size);
365 }
366
367 static VKAPI_ATTR void
368 default_free_func(void *pUserData, void *pMemory)
369 {
370 free(pMemory);
371 }
372
373 static const VkAllocationCallbacks default_alloc = {
374 .pUserData = NULL,
375 .pfnAllocation = default_alloc_func,
376 .pfnReallocation = default_realloc_func,
377 .pfnFree = default_free_func,
378 };
379
380 static const struct debug_control tu_debug_options[] = {
381 { "startup", TU_DEBUG_STARTUP },
382 { "nir", TU_DEBUG_NIR },
383 { "ir3", TU_DEBUG_IR3 },
384 { "nobin", TU_DEBUG_NOBIN },
385 { "sysmem", TU_DEBUG_SYSMEM },
386 { "forcebin", TU_DEBUG_FORCEBIN },
387 { "noubwc", TU_DEBUG_NOUBWC },
388 { NULL, 0 }
389 };
390
391 const char *
392 tu_get_debug_option_name(int id)
393 {
394 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
395 return tu_debug_options[id].string;
396 }
397
398 static int
399 tu_get_instance_extension_index(const char *name)
400 {
401 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
402 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
403 return i;
404 }
405 return -1;
406 }
407
408 VkResult
409 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
410 const VkAllocationCallbacks *pAllocator,
411 VkInstance *pInstance)
412 {
413 struct tu_instance *instance;
414 VkResult result;
415
416 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
417
418 uint32_t client_version;
419 if (pCreateInfo->pApplicationInfo &&
420 pCreateInfo->pApplicationInfo->apiVersion != 0) {
421 client_version = pCreateInfo->pApplicationInfo->apiVersion;
422 } else {
423 tu_EnumerateInstanceVersion(&client_version);
424 }
425
426 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
427 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
428
429 if (!instance)
430 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
431
432 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
433
434 if (pAllocator)
435 instance->alloc = *pAllocator;
436 else
437 instance->alloc = default_alloc;
438
439 instance->api_version = client_version;
440 instance->physical_device_count = -1;
441
442 instance->debug_flags =
443 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
444
445 if (instance->debug_flags & TU_DEBUG_STARTUP)
446 tu_logi("Created an instance");
447
448 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
449 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
450 int index = tu_get_instance_extension_index(ext_name);
451
452 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
453 vk_object_base_finish(&instance->base);
454 vk_free2(&default_alloc, pAllocator, instance);
455 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
456 }
457
458 instance->enabled_extensions.extensions[index] = true;
459 }
460
461 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
462 if (result != VK_SUCCESS) {
463 vk_object_base_finish(&instance->base);
464 vk_free2(&default_alloc, pAllocator, instance);
465 return vk_error(instance, result);
466 }
467
468 glsl_type_singleton_init_or_ref();
469
470 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
471
472 *pInstance = tu_instance_to_handle(instance);
473
474 return VK_SUCCESS;
475 }
476
477 void
478 tu_DestroyInstance(VkInstance _instance,
479 const VkAllocationCallbacks *pAllocator)
480 {
481 TU_FROM_HANDLE(tu_instance, instance, _instance);
482
483 if (!instance)
484 return;
485
486 for (int i = 0; i < instance->physical_device_count; ++i) {
487 tu_physical_device_finish(instance->physical_devices + i);
488 }
489
490 VG(VALGRIND_DESTROY_MEMPOOL(instance));
491
492 glsl_type_singleton_decref();
493
494 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
495
496 vk_object_base_finish(&instance->base);
497 vk_free(&instance->alloc, instance);
498 }
499
500 static VkResult
501 tu_enumerate_devices(struct tu_instance *instance)
502 {
503 /* TODO: Check for more devices ? */
504 drmDevicePtr devices[8];
505 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
506 int max_devices;
507
508 instance->physical_device_count = 0;
509
510 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
511
512 if (instance->debug_flags & TU_DEBUG_STARTUP) {
513 if (max_devices < 0)
514 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
515 else
516 tu_logi("Found %d drm nodes", max_devices);
517 }
518
519 if (max_devices < 1)
520 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
521
522 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
523 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
524 devices[i]->bustype == DRM_BUS_PLATFORM) {
525
526 result = tu_physical_device_init(
527 instance->physical_devices + instance->physical_device_count,
528 instance, devices[i]);
529 if (result == VK_SUCCESS)
530 ++instance->physical_device_count;
531 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
532 break;
533 }
534 }
535 drmFreeDevices(devices, max_devices);
536
537 return result;
538 }
539
540 VkResult
541 tu_EnumeratePhysicalDevices(VkInstance _instance,
542 uint32_t *pPhysicalDeviceCount,
543 VkPhysicalDevice *pPhysicalDevices)
544 {
545 TU_FROM_HANDLE(tu_instance, instance, _instance);
546 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
547
548 VkResult result;
549
550 if (instance->physical_device_count < 0) {
551 result = tu_enumerate_devices(instance);
552 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
553 return result;
554 }
555
556 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
557 vk_outarray_append(&out, p)
558 {
559 *p = tu_physical_device_to_handle(instance->physical_devices + i);
560 }
561 }
562
563 return vk_outarray_status(&out);
564 }
565
566 VkResult
567 tu_EnumeratePhysicalDeviceGroups(
568 VkInstance _instance,
569 uint32_t *pPhysicalDeviceGroupCount,
570 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
571 {
572 TU_FROM_HANDLE(tu_instance, instance, _instance);
573 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
574 pPhysicalDeviceGroupCount);
575 VkResult result;
576
577 if (instance->physical_device_count < 0) {
578 result = tu_enumerate_devices(instance);
579 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
580 return result;
581 }
582
583 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
584 vk_outarray_append(&out, p)
585 {
586 p->physicalDeviceCount = 1;
587 p->physicalDevices[0] =
588 tu_physical_device_to_handle(instance->physical_devices + i);
589 p->subsetAllocation = false;
590 }
591 }
592
593 return vk_outarray_status(&out);
594 }
595
596 void
597 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
598 VkPhysicalDeviceFeatures *pFeatures)
599 {
600 memset(pFeatures, 0, sizeof(*pFeatures));
601
602 *pFeatures = (VkPhysicalDeviceFeatures) {
603 .robustBufferAccess = true,
604 .fullDrawIndexUint32 = true,
605 .imageCubeArray = true,
606 .independentBlend = true,
607 .geometryShader = true,
608 .tessellationShader = true,
609 .sampleRateShading = true,
610 .dualSrcBlend = true,
611 .logicOp = true,
612 .multiDrawIndirect = true,
613 .drawIndirectFirstInstance = true,
614 .depthClamp = true,
615 .depthBiasClamp = true,
616 .fillModeNonSolid = true,
617 .depthBounds = true,
618 .wideLines = false,
619 .largePoints = true,
620 .alphaToOne = true,
621 .multiViewport = false,
622 .samplerAnisotropy = true,
623 .textureCompressionETC2 = true,
624 .textureCompressionASTC_LDR = true,
625 .textureCompressionBC = true,
626 .occlusionQueryPrecise = true,
627 .pipelineStatisticsQuery = false,
628 .vertexPipelineStoresAndAtomics = true,
629 .fragmentStoresAndAtomics = true,
630 .shaderTessellationAndGeometryPointSize = false,
631 .shaderImageGatherExtended = false,
632 .shaderStorageImageExtendedFormats = false,
633 .shaderStorageImageMultisample = false,
634 .shaderUniformBufferArrayDynamicIndexing = true,
635 .shaderSampledImageArrayDynamicIndexing = true,
636 .shaderStorageBufferArrayDynamicIndexing = true,
637 .shaderStorageImageArrayDynamicIndexing = true,
638 .shaderStorageImageReadWithoutFormat = false,
639 .shaderStorageImageWriteWithoutFormat = false,
640 .shaderClipDistance = false,
641 .shaderCullDistance = false,
642 .shaderFloat64 = false,
643 .shaderInt64 = false,
644 .shaderInt16 = false,
645 .sparseBinding = false,
646 .variableMultisampleRate = false,
647 .inheritedQueries = false,
648 };
649 }
650
651 void
652 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
653 VkPhysicalDeviceFeatures2 *pFeatures)
654 {
655 vk_foreach_struct(ext, pFeatures->pNext)
656 {
657 switch (ext->sType) {
658 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
659 VkPhysicalDeviceVulkan11Features *features = (void *) ext;
660 features->storageBuffer16BitAccess = false;
661 features->uniformAndStorageBuffer16BitAccess = false;
662 features->storagePushConstant16 = false;
663 features->storageInputOutput16 = false;
664 features->multiview = false;
665 features->multiviewGeometryShader = false;
666 features->multiviewTessellationShader = false;
667 features->variablePointersStorageBuffer = true;
668 features->variablePointers = true;
669 features->protectedMemory = false;
670 features->samplerYcbcrConversion = true;
671 features->shaderDrawParameters = true;
672 break;
673 }
674 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: {
675 VkPhysicalDeviceVulkan12Features *features = (void *) ext;
676 features->samplerMirrorClampToEdge = true;
677 features->drawIndirectCount = true;
678 features->storageBuffer8BitAccess = false;
679 features->uniformAndStorageBuffer8BitAccess = false;
680 features->storagePushConstant8 = false;
681 features->shaderBufferInt64Atomics = false;
682 features->shaderSharedInt64Atomics = false;
683 features->shaderFloat16 = false;
684 features->shaderInt8 = false;
685
686 features->descriptorIndexing = false;
687 features->shaderInputAttachmentArrayDynamicIndexing = false;
688 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
689 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
690 features->shaderUniformBufferArrayNonUniformIndexing = false;
691 features->shaderSampledImageArrayNonUniformIndexing = false;
692 features->shaderStorageBufferArrayNonUniformIndexing = false;
693 features->shaderStorageImageArrayNonUniformIndexing = false;
694 features->shaderInputAttachmentArrayNonUniformIndexing = false;
695 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
696 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
697 features->descriptorBindingUniformBufferUpdateAfterBind = false;
698 features->descriptorBindingSampledImageUpdateAfterBind = false;
699 features->descriptorBindingStorageImageUpdateAfterBind = false;
700 features->descriptorBindingStorageBufferUpdateAfterBind = false;
701 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
702 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
703 features->descriptorBindingUpdateUnusedWhilePending = false;
704 features->descriptorBindingPartiallyBound = false;
705 features->descriptorBindingVariableDescriptorCount = false;
706 features->runtimeDescriptorArray = false;
707
708 features->samplerFilterMinmax = true;
709 features->scalarBlockLayout = false;
710 features->imagelessFramebuffer = false;
711 features->uniformBufferStandardLayout = false;
712 features->shaderSubgroupExtendedTypes = false;
713 features->separateDepthStencilLayouts = false;
714 features->hostQueryReset = false;
715 features->timelineSemaphore = false;
716 features->bufferDeviceAddress = false;
717 features->bufferDeviceAddressCaptureReplay = false;
718 features->bufferDeviceAddressMultiDevice = false;
719 features->vulkanMemoryModel = false;
720 features->vulkanMemoryModelDeviceScope = false;
721 features->vulkanMemoryModelAvailabilityVisibilityChains = false;
722 features->shaderOutputViewportIndex = false;
723 features->shaderOutputLayer = false;
724 features->subgroupBroadcastDynamicId = false;
725 break;
726 }
727 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
728 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
729 features->variablePointersStorageBuffer = true;
730 features->variablePointers = true;
731 break;
732 }
733 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
734 VkPhysicalDeviceMultiviewFeatures *features =
735 (VkPhysicalDeviceMultiviewFeatures *) ext;
736 features->multiview = false;
737 features->multiviewGeometryShader = false;
738 features->multiviewTessellationShader = false;
739 break;
740 }
741 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
742 VkPhysicalDeviceShaderDrawParametersFeatures *features =
743 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
744 features->shaderDrawParameters = true;
745 break;
746 }
747 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
748 VkPhysicalDeviceProtectedMemoryFeatures *features =
749 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
750 features->protectedMemory = false;
751 break;
752 }
753 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
754 VkPhysicalDevice16BitStorageFeatures *features =
755 (VkPhysicalDevice16BitStorageFeatures *) ext;
756 features->storageBuffer16BitAccess = false;
757 features->uniformAndStorageBuffer16BitAccess = false;
758 features->storagePushConstant16 = false;
759 features->storageInputOutput16 = false;
760 break;
761 }
762 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
763 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
764 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
765 features->samplerYcbcrConversion = true;
766 break;
767 }
768 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
769 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
770 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
771 features->shaderInputAttachmentArrayDynamicIndexing = false;
772 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
773 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
774 features->shaderUniformBufferArrayNonUniformIndexing = false;
775 features->shaderSampledImageArrayNonUniformIndexing = false;
776 features->shaderStorageBufferArrayNonUniformIndexing = false;
777 features->shaderStorageImageArrayNonUniformIndexing = false;
778 features->shaderInputAttachmentArrayNonUniformIndexing = false;
779 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
780 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
781 features->descriptorBindingUniformBufferUpdateAfterBind = false;
782 features->descriptorBindingSampledImageUpdateAfterBind = false;
783 features->descriptorBindingStorageImageUpdateAfterBind = false;
784 features->descriptorBindingStorageBufferUpdateAfterBind = false;
785 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
786 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
787 features->descriptorBindingUpdateUnusedWhilePending = false;
788 features->descriptorBindingPartiallyBound = false;
789 features->descriptorBindingVariableDescriptorCount = false;
790 features->runtimeDescriptorArray = false;
791 break;
792 }
793 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
794 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
795 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
796 features->conditionalRendering = false;
797 features->inheritedConditionalRendering = false;
798 break;
799 }
800 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
801 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
802 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
803 features->transformFeedback = true;
804 features->geometryStreams = false;
805 break;
806 }
807 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
808 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
809 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
810 features->indexTypeUint8 = true;
811 break;
812 }
813 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
814 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
815 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
816 features->vertexAttributeInstanceRateDivisor = true;
817 features->vertexAttributeInstanceRateZeroDivisor = true;
818 break;
819 }
820 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
821 VkPhysicalDevicePrivateDataFeaturesEXT *features =
822 (VkPhysicalDevicePrivateDataFeaturesEXT *)ext;
823 features->privateData = true;
824 break;
825 }
826 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
827 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
828 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
829 features->depthClipEnable = true;
830 break;
831 }
832 default:
833 break;
834 }
835 }
836 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
837 }
838
839 void
840 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
841 VkPhysicalDeviceProperties *pProperties)
842 {
843 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
844 VkSampleCountFlags sample_counts =
845 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
846
847 /* I have no idea what the maximum size is, but the hardware supports very
848 * large numbers of descriptors (at least 2^16). This limit is based on
849 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
850 * we don't have to think about what to do if that overflows, but really
851 * nothing is likely to get close to this.
852 */
853 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
854
855 VkPhysicalDeviceLimits limits = {
856 .maxImageDimension1D = (1 << 14),
857 .maxImageDimension2D = (1 << 14),
858 .maxImageDimension3D = (1 << 11),
859 .maxImageDimensionCube = (1 << 14),
860 .maxImageArrayLayers = (1 << 11),
861 .maxTexelBufferElements = 128 * 1024 * 1024,
862 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
863 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
864 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
865 .maxMemoryAllocationCount = UINT32_MAX,
866 .maxSamplerAllocationCount = 64 * 1024,
867 .bufferImageGranularity = 64, /* A cache line */
868 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
869 .maxBoundDescriptorSets = MAX_SETS,
870 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
871 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
872 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
873 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
874 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
875 .maxPerStageDescriptorInputAttachments = MAX_RTS,
876 .maxPerStageResources = max_descriptor_set_size,
877 .maxDescriptorSetSamplers = max_descriptor_set_size,
878 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
879 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
880 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
881 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
882 .maxDescriptorSetSampledImages = max_descriptor_set_size,
883 .maxDescriptorSetStorageImages = max_descriptor_set_size,
884 .maxDescriptorSetInputAttachments = MAX_RTS,
885 .maxVertexInputAttributes = 32,
886 .maxVertexInputBindings = 32,
887 .maxVertexInputAttributeOffset = 4095,
888 .maxVertexInputBindingStride = 2048,
889 .maxVertexOutputComponents = 128,
890 .maxTessellationGenerationLevel = 64,
891 .maxTessellationPatchSize = 32,
892 .maxTessellationControlPerVertexInputComponents = 128,
893 .maxTessellationControlPerVertexOutputComponents = 128,
894 .maxTessellationControlPerPatchOutputComponents = 120,
895 .maxTessellationControlTotalOutputComponents = 4096,
896 .maxTessellationEvaluationInputComponents = 128,
897 .maxTessellationEvaluationOutputComponents = 128,
898 .maxGeometryShaderInvocations = 32,
899 .maxGeometryInputComponents = 64,
900 .maxGeometryOutputComponents = 128,
901 .maxGeometryOutputVertices = 256,
902 .maxGeometryTotalOutputComponents = 1024,
903 .maxFragmentInputComponents = 124,
904 .maxFragmentOutputAttachments = 8,
905 .maxFragmentDualSrcAttachments = 1,
906 .maxFragmentCombinedOutputResources = 8,
907 .maxComputeSharedMemorySize = 32768,
908 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
909 .maxComputeWorkGroupInvocations = 2048,
910 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
911 .subPixelPrecisionBits = 8,
912 .subTexelPrecisionBits = 8,
913 .mipmapPrecisionBits = 8,
914 .maxDrawIndexedIndexValue = UINT32_MAX,
915 .maxDrawIndirectCount = UINT32_MAX,
916 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
917 .maxSamplerAnisotropy = 16,
918 .maxViewports = MAX_VIEWPORTS,
919 .maxViewportDimensions = { (1 << 14), (1 << 14) },
920 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
921 .viewportSubPixelBits = 8,
922 .minMemoryMapAlignment = 4096, /* A page */
923 .minTexelBufferOffsetAlignment = 64,
924 .minUniformBufferOffsetAlignment = 64,
925 .minStorageBufferOffsetAlignment = 64,
926 .minTexelOffset = -16,
927 .maxTexelOffset = 15,
928 .minTexelGatherOffset = -32,
929 .maxTexelGatherOffset = 31,
930 .minInterpolationOffset = -0.5,
931 .maxInterpolationOffset = 0.4375,
932 .subPixelInterpolationOffsetBits = 4,
933 .maxFramebufferWidth = (1 << 14),
934 .maxFramebufferHeight = (1 << 14),
935 .maxFramebufferLayers = (1 << 10),
936 .framebufferColorSampleCounts = sample_counts,
937 .framebufferDepthSampleCounts = sample_counts,
938 .framebufferStencilSampleCounts = sample_counts,
939 .framebufferNoAttachmentsSampleCounts = sample_counts,
940 .maxColorAttachments = MAX_RTS,
941 .sampledImageColorSampleCounts = sample_counts,
942 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
943 .sampledImageDepthSampleCounts = sample_counts,
944 .sampledImageStencilSampleCounts = sample_counts,
945 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
946 .maxSampleMaskWords = 1,
947 .timestampComputeAndGraphics = true,
948 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
949 .maxClipDistances = 8,
950 .maxCullDistances = 8,
951 .maxCombinedClipAndCullDistances = 8,
952 .discreteQueuePriorities = 1,
953 .pointSizeRange = { 1, 4092 },
954 .lineWidthRange = { 0.0, 7.9921875 },
955 .pointSizeGranularity = 0.0625,
956 .lineWidthGranularity = (1.0 / 128.0),
957 .strictLines = false, /* FINISHME */
958 .standardSampleLocations = true,
959 .optimalBufferCopyOffsetAlignment = 128,
960 .optimalBufferCopyRowPitchAlignment = 128,
961 .nonCoherentAtomSize = 64,
962 };
963
964 *pProperties = (VkPhysicalDeviceProperties) {
965 .apiVersion = tu_physical_device_api_version(pdevice),
966 .driverVersion = vk_get_driver_version(),
967 .vendorID = 0, /* TODO */
968 .deviceID = 0,
969 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
970 .limits = limits,
971 .sparseProperties = { 0 },
972 };
973
974 strcpy(pProperties->deviceName, pdevice->name);
975 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
976 }
977
978 void
979 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
980 VkPhysicalDeviceProperties2 *pProperties)
981 {
982 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
983 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
984
985 vk_foreach_struct(ext, pProperties->pNext)
986 {
987 switch (ext->sType) {
988 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
989 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
990 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
991 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
992 break;
993 }
994 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
995 VkPhysicalDeviceIDProperties *properties =
996 (VkPhysicalDeviceIDProperties *) ext;
997 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
998 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
999 properties->deviceLUIDValid = false;
1000 break;
1001 }
1002 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1003 VkPhysicalDeviceMultiviewProperties *properties =
1004 (VkPhysicalDeviceMultiviewProperties *) ext;
1005 properties->maxMultiviewViewCount = MAX_VIEWS;
1006 properties->maxMultiviewInstanceIndex = INT_MAX;
1007 break;
1008 }
1009 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1010 VkPhysicalDevicePointClippingProperties *properties =
1011 (VkPhysicalDevicePointClippingProperties *) ext;
1012 properties->pointClippingBehavior =
1013 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
1014 break;
1015 }
1016 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1017 VkPhysicalDeviceMaintenance3Properties *properties =
1018 (VkPhysicalDeviceMaintenance3Properties *) ext;
1019 /* Make sure everything is addressable by a signed 32-bit int, and
1020 * our largest descriptors are 96 bytes. */
1021 properties->maxPerSetDescriptors = (1ull << 31) / 96;
1022 /* Our buffer size fields allow only this much */
1023 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
1024 break;
1025 }
1026 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
1027 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
1028 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
1029
1030 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
1031 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
1032 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
1033 properties->maxTransformFeedbackStreamDataSize = 512;
1034 properties->maxTransformFeedbackBufferDataSize = 512;
1035 properties->maxTransformFeedbackBufferDataStride = 512;
1036 properties->transformFeedbackQueries = true;
1037 properties->transformFeedbackStreamsLinesTriangles = false;
1038 properties->transformFeedbackRasterizationStreamSelect = false;
1039 properties->transformFeedbackDraw = true;
1040 break;
1041 }
1042 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
1043 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
1044 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
1045 properties->sampleLocationSampleCounts = 0;
1046 if (pdevice->supported_extensions.EXT_sample_locations) {
1047 properties->sampleLocationSampleCounts =
1048 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
1049 }
1050 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
1051 properties->sampleLocationCoordinateRange[0] = 0.0f;
1052 properties->sampleLocationCoordinateRange[1] = 0.9375f;
1053 properties->sampleLocationSubPixelBits = 4;
1054 properties->variableSampleLocations = true;
1055 break;
1056 }
1057 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
1058 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
1059 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
1060 properties->filterMinmaxImageComponentMapping = true;
1061 properties->filterMinmaxSingleComponentFormats = true;
1062 break;
1063 }
1064 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1065 VkPhysicalDeviceSubgroupProperties *properties =
1066 (VkPhysicalDeviceSubgroupProperties *)ext;
1067 properties->subgroupSize = 64;
1068 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
1069 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1070 VK_SUBGROUP_FEATURE_VOTE_BIT;
1071 properties->quadOperationsInAllStages = false;
1072 break;
1073 }
1074 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1075 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
1076 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1077 props->maxVertexAttribDivisor = UINT32_MAX;
1078 break;
1079 }
1080 default:
1081 break;
1082 }
1083 }
1084 }
1085
1086 static const VkQueueFamilyProperties tu_queue_family_properties = {
1087 .queueFlags =
1088 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
1089 .queueCount = 1,
1090 .timestampValidBits = 48,
1091 .minImageTransferGranularity = { 1, 1, 1 },
1092 };
1093
1094 void
1095 tu_GetPhysicalDeviceQueueFamilyProperties(
1096 VkPhysicalDevice physicalDevice,
1097 uint32_t *pQueueFamilyPropertyCount,
1098 VkQueueFamilyProperties *pQueueFamilyProperties)
1099 {
1100 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1101
1102 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
1103 }
1104
1105 void
1106 tu_GetPhysicalDeviceQueueFamilyProperties2(
1107 VkPhysicalDevice physicalDevice,
1108 uint32_t *pQueueFamilyPropertyCount,
1109 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1110 {
1111 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1112
1113 vk_outarray_append(&out, p)
1114 {
1115 p->queueFamilyProperties = tu_queue_family_properties;
1116 }
1117 }
1118
1119 static uint64_t
1120 tu_get_system_heap_size()
1121 {
1122 struct sysinfo info;
1123 sysinfo(&info);
1124
1125 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1126
1127 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1128 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1129 */
1130 uint64_t available_ram;
1131 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1132 available_ram = total_ram / 2;
1133 else
1134 available_ram = total_ram * 3 / 4;
1135
1136 return available_ram;
1137 }
1138
1139 void
1140 tu_GetPhysicalDeviceMemoryProperties(
1141 VkPhysicalDevice physicalDevice,
1142 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1143 {
1144 pMemoryProperties->memoryHeapCount = 1;
1145 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1146 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1147
1148 pMemoryProperties->memoryTypeCount = 1;
1149 pMemoryProperties->memoryTypes[0].propertyFlags =
1150 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1151 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1152 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1153 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1154 }
1155
1156 void
1157 tu_GetPhysicalDeviceMemoryProperties2(
1158 VkPhysicalDevice physicalDevice,
1159 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1160 {
1161 return tu_GetPhysicalDeviceMemoryProperties(
1162 physicalDevice, &pMemoryProperties->memoryProperties);
1163 }
1164
1165 static VkResult
1166 tu_queue_init(struct tu_device *device,
1167 struct tu_queue *queue,
1168 uint32_t queue_family_index,
1169 int idx,
1170 VkDeviceQueueCreateFlags flags)
1171 {
1172 vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
1173
1174 queue->device = device;
1175 queue->queue_family_index = queue_family_index;
1176 queue->queue_idx = idx;
1177 queue->flags = flags;
1178
1179 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1180 if (ret)
1181 return VK_ERROR_INITIALIZATION_FAILED;
1182
1183 tu_fence_init(&queue->submit_fence, false);
1184
1185 return VK_SUCCESS;
1186 }
1187
1188 static void
1189 tu_queue_finish(struct tu_queue *queue)
1190 {
1191 tu_fence_finish(&queue->submit_fence);
1192 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1193 }
1194
1195 static int
1196 tu_get_device_extension_index(const char *name)
1197 {
1198 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1199 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1200 return i;
1201 }
1202 return -1;
1203 }
1204
1205 struct PACKED bcolor_entry {
1206 uint32_t fp32[4];
1207 uint16_t ui16[4];
1208 int16_t si16[4];
1209 uint16_t fp16[4];
1210 uint16_t rgb565;
1211 uint16_t rgb5a1;
1212 uint16_t rgba4;
1213 uint8_t __pad0[2];
1214 uint8_t ui8[4];
1215 int8_t si8[4];
1216 uint32_t rgb10a2;
1217 uint32_t z24; /* also s8? */
1218 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1219 uint8_t __pad1[56];
1220 } border_color[] = {
1221 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1222 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1223 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1224 .fp32[3] = 0x3f800000,
1225 .ui16[3] = 0xffff,
1226 .si16[3] = 0x7fff,
1227 .fp16[3] = 0x3c00,
1228 .rgb5a1 = 0x8000,
1229 .rgba4 = 0xf000,
1230 .ui8[3] = 0xff,
1231 .si8[3] = 0x7f,
1232 .rgb10a2 = 0xc0000000,
1233 .srgb[3] = 0x3c00,
1234 },
1235 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1236 .fp32[3] = 1,
1237 .fp16[3] = 1,
1238 },
1239 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1240 .fp32[0 ... 3] = 0x3f800000,
1241 .ui16[0 ... 3] = 0xffff,
1242 .si16[0 ... 3] = 0x7fff,
1243 .fp16[0 ... 3] = 0x3c00,
1244 .rgb565 = 0xffff,
1245 .rgb5a1 = 0xffff,
1246 .rgba4 = 0xffff,
1247 .ui8[0 ... 3] = 0xff,
1248 .si8[0 ... 3] = 0x7f,
1249 .rgb10a2 = 0xffffffff,
1250 .z24 = 0xffffff,
1251 .srgb[0 ... 3] = 0x3c00,
1252 },
1253 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1254 .fp32[0 ... 3] = 1,
1255 .fp16[0 ... 3] = 1,
1256 },
1257 };
1258
1259 VkResult
1260 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1261 const VkDeviceCreateInfo *pCreateInfo,
1262 const VkAllocationCallbacks *pAllocator,
1263 VkDevice *pDevice)
1264 {
1265 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1266 VkResult result;
1267 struct tu_device *device;
1268
1269 /* Check enabled features */
1270 if (pCreateInfo->pEnabledFeatures) {
1271 VkPhysicalDeviceFeatures supported_features;
1272 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1273 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1274 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1275 unsigned num_features =
1276 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1277 for (uint32_t i = 0; i < num_features; i++) {
1278 if (enabled_feature[i] && !supported_feature[i])
1279 return vk_error(physical_device->instance,
1280 VK_ERROR_FEATURE_NOT_PRESENT);
1281 }
1282 }
1283
1284 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1285 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1286 if (!device)
1287 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1288
1289 vk_device_init(&device->vk, pCreateInfo,
1290 &physical_device->instance->alloc, pAllocator);
1291
1292 device->instance = physical_device->instance;
1293 device->physical_device = physical_device;
1294 device->_lost = false;
1295
1296 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1297 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1298 int index = tu_get_device_extension_index(ext_name);
1299 if (index < 0 ||
1300 !physical_device->supported_extensions.extensions[index]) {
1301 vk_free(&device->vk.alloc, device);
1302 return vk_error(physical_device->instance,
1303 VK_ERROR_EXTENSION_NOT_PRESENT);
1304 }
1305
1306 device->enabled_extensions.extensions[index] = true;
1307 }
1308
1309 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1310 const VkDeviceQueueCreateInfo *queue_create =
1311 &pCreateInfo->pQueueCreateInfos[i];
1312 uint32_t qfi = queue_create->queueFamilyIndex;
1313 device->queues[qfi] = vk_alloc(
1314 &device->vk.alloc, queue_create->queueCount * sizeof(struct tu_queue),
1315 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1316 if (!device->queues[qfi]) {
1317 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1318 goto fail_queues;
1319 }
1320
1321 memset(device->queues[qfi], 0,
1322 queue_create->queueCount * sizeof(struct tu_queue));
1323
1324 device->queue_count[qfi] = queue_create->queueCount;
1325
1326 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1327 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1328 queue_create->flags);
1329 if (result != VK_SUCCESS)
1330 goto fail_queues;
1331 }
1332 }
1333
1334 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1335 if (!device->compiler)
1336 goto fail_queues;
1337
1338 /* initial sizes, these will increase if there is overflow */
1339 device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
1340 device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
1341
1342 STATIC_ASSERT(sizeof(border_color) == sizeof(((struct tu6_global*) 0)->border_color));
1343 result = tu_bo_init_new(device, &device->global_bo, sizeof(struct tu6_global));
1344 if (result != VK_SUCCESS)
1345 goto fail_global_bo;
1346
1347 result = tu_bo_map(device, &device->global_bo);
1348 if (result != VK_SUCCESS)
1349 goto fail_global_bo_map;
1350
1351 memcpy(device->global_bo.map + gb_offset(border_color), border_color, sizeof(border_color));
1352 tu_init_clear_blit_shaders(device->global_bo.map);
1353
1354 VkPipelineCacheCreateInfo ci;
1355 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1356 ci.pNext = NULL;
1357 ci.flags = 0;
1358 ci.pInitialData = NULL;
1359 ci.initialDataSize = 0;
1360 VkPipelineCache pc;
1361 result =
1362 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1363 if (result != VK_SUCCESS)
1364 goto fail_pipeline_cache;
1365
1366 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1367
1368 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1369 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1370
1371 mtx_init(&device->vsc_pitch_mtx, mtx_plain);
1372
1373 *pDevice = tu_device_to_handle(device);
1374 return VK_SUCCESS;
1375
1376 fail_pipeline_cache:
1377 fail_global_bo_map:
1378 tu_bo_finish(device, &device->global_bo);
1379
1380 fail_global_bo:
1381 ralloc_free(device->compiler);
1382
1383 fail_queues:
1384 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1385 for (unsigned q = 0; q < device->queue_count[i]; q++)
1386 tu_queue_finish(&device->queues[i][q]);
1387 if (device->queue_count[i])
1388 vk_object_free(&device->vk, NULL, device->queues[i]);
1389 }
1390
1391 vk_free(&device->vk.alloc, device);
1392 return result;
1393 }
1394
1395 void
1396 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1397 {
1398 TU_FROM_HANDLE(tu_device, device, _device);
1399
1400 if (!device)
1401 return;
1402
1403 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1404 for (unsigned q = 0; q < device->queue_count[i]; q++)
1405 tu_queue_finish(&device->queues[i][q]);
1406 if (device->queue_count[i])
1407 vk_object_free(&device->vk, NULL, device->queues[i]);
1408 }
1409
1410 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1411 if (device->scratch_bos[i].initialized)
1412 tu_bo_finish(device, &device->scratch_bos[i].bo);
1413 }
1414
1415 ir3_compiler_destroy(device->compiler);
1416
1417 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1418 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1419
1420 vk_free(&device->vk.alloc, device);
1421 }
1422
1423 VkResult
1424 _tu_device_set_lost(struct tu_device *device,
1425 const char *file, int line,
1426 const char *msg, ...)
1427 {
1428 /* Set the flag indicating that waits should return in finite time even
1429 * after device loss.
1430 */
1431 p_atomic_inc(&device->_lost);
1432
1433 /* TODO: Report the log message through VkDebugReportCallbackEXT instead */
1434 fprintf(stderr, "%s:%d: ", file, line);
1435 va_list ap;
1436 va_start(ap, msg);
1437 vfprintf(stderr, msg, ap);
1438 va_end(ap);
1439
1440 if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
1441 abort();
1442
1443 return VK_ERROR_DEVICE_LOST;
1444 }
1445
1446 VkResult
1447 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1448 {
1449 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1450 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1451 assert(index < ARRAY_SIZE(dev->scratch_bos));
1452
1453 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1454 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1455 /* Fast path: just return the already-allocated BO. */
1456 *bo = &dev->scratch_bos[i].bo;
1457 return VK_SUCCESS;
1458 }
1459 }
1460
1461 /* Slow path: actually allocate the BO. We take a lock because the process
1462 * of allocating it is slow, and we don't want to block the CPU while it
1463 * finishes.
1464 */
1465 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1466
1467 /* Another thread may have allocated it already while we were waiting on
1468 * the lock. We need to check this in order to avoid double-allocating.
1469 */
1470 if (dev->scratch_bos[index].initialized) {
1471 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1472 *bo = &dev->scratch_bos[index].bo;
1473 return VK_SUCCESS;
1474 }
1475
1476 unsigned bo_size = 1ull << size_log2;
1477 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1478 if (result != VK_SUCCESS) {
1479 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1480 return result;
1481 }
1482
1483 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1484
1485 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1486
1487 *bo = &dev->scratch_bos[index].bo;
1488 return VK_SUCCESS;
1489 }
1490
1491 VkResult
1492 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1493 VkLayerProperties *pProperties)
1494 {
1495 *pPropertyCount = 0;
1496 return VK_SUCCESS;
1497 }
1498
1499 VkResult
1500 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1501 uint32_t *pPropertyCount,
1502 VkLayerProperties *pProperties)
1503 {
1504 *pPropertyCount = 0;
1505 return VK_SUCCESS;
1506 }
1507
1508 void
1509 tu_GetDeviceQueue2(VkDevice _device,
1510 const VkDeviceQueueInfo2 *pQueueInfo,
1511 VkQueue *pQueue)
1512 {
1513 TU_FROM_HANDLE(tu_device, device, _device);
1514 struct tu_queue *queue;
1515
1516 queue =
1517 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1518 if (pQueueInfo->flags != queue->flags) {
1519 /* From the Vulkan 1.1.70 spec:
1520 *
1521 * "The queue returned by vkGetDeviceQueue2 must have the same
1522 * flags value from this structure as that used at device
1523 * creation time in a VkDeviceQueueCreateInfo instance. If no
1524 * matching flags were specified at device creation time then
1525 * pQueue will return VK_NULL_HANDLE."
1526 */
1527 *pQueue = VK_NULL_HANDLE;
1528 return;
1529 }
1530
1531 *pQueue = tu_queue_to_handle(queue);
1532 }
1533
1534 void
1535 tu_GetDeviceQueue(VkDevice _device,
1536 uint32_t queueFamilyIndex,
1537 uint32_t queueIndex,
1538 VkQueue *pQueue)
1539 {
1540 const VkDeviceQueueInfo2 info =
1541 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1542 .queueFamilyIndex = queueFamilyIndex,
1543 .queueIndex = queueIndex };
1544
1545 tu_GetDeviceQueue2(_device, &info, pQueue);
1546 }
1547
1548 static VkResult
1549 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
1550 uint32_t sem_count,
1551 bool wait,
1552 struct drm_msm_gem_submit_syncobj **out,
1553 uint32_t *out_count)
1554 {
1555 uint32_t syncobj_count = 0;
1556 struct drm_msm_gem_submit_syncobj *syncobjs;
1557
1558 for (uint32_t i = 0; i < sem_count; ++i) {
1559 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1560
1561 struct tu_semaphore_part *part =
1562 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1563 &sem->temporary : &sem->permanent;
1564
1565 if (part->kind == TU_SEMAPHORE_SYNCOBJ)
1566 ++syncobj_count;
1567 }
1568
1569 *out = NULL;
1570 *out_count = syncobj_count;
1571 if (!syncobj_count)
1572 return VK_SUCCESS;
1573
1574 *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
1575 if (!syncobjs)
1576 return VK_ERROR_OUT_OF_HOST_MEMORY;
1577
1578 for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
1579 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1580
1581 struct tu_semaphore_part *part =
1582 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1583 &sem->temporary : &sem->permanent;
1584
1585 if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
1586 syncobjs[j].handle = part->syncobj;
1587 syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
1588 ++j;
1589 }
1590 }
1591
1592 return VK_SUCCESS;
1593 }
1594
1595
1596 static void
1597 tu_semaphores_remove_temp(struct tu_device *device,
1598 const VkSemaphore *sems,
1599 uint32_t sem_count)
1600 {
1601 for (uint32_t i = 0; i < sem_count; ++i) {
1602 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1603 tu_semaphore_remove_temp(device, sem);
1604 }
1605 }
1606
1607 VkResult
1608 tu_QueueSubmit(VkQueue _queue,
1609 uint32_t submitCount,
1610 const VkSubmitInfo *pSubmits,
1611 VkFence _fence)
1612 {
1613 TU_FROM_HANDLE(tu_queue, queue, _queue);
1614 VkResult result;
1615
1616 for (uint32_t i = 0; i < submitCount; ++i) {
1617 const VkSubmitInfo *submit = pSubmits + i;
1618 const bool last_submit = (i == submitCount - 1);
1619 struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
1620 uint32_t nr_in_syncobjs, nr_out_syncobjs;
1621 struct tu_bo_list bo_list;
1622 tu_bo_list_init(&bo_list);
1623
1624 result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
1625 pSubmits[i].waitSemaphoreCount,
1626 false, &in_syncobjs, &nr_in_syncobjs);
1627 if (result != VK_SUCCESS) {
1628 return tu_device_set_lost(queue->device,
1629 "failed to allocate space for semaphore submission\n");
1630 }
1631
1632 result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
1633 pSubmits[i].signalSemaphoreCount,
1634 false, &out_syncobjs, &nr_out_syncobjs);
1635 if (result != VK_SUCCESS) {
1636 free(in_syncobjs);
1637 return tu_device_set_lost(queue->device,
1638 "failed to allocate space for semaphore submission\n");
1639 }
1640
1641 uint32_t entry_count = 0;
1642 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1643 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1644 entry_count += cmdbuf->cs.entry_count;
1645 }
1646
1647 struct drm_msm_gem_submit_cmd cmds[entry_count];
1648 uint32_t entry_idx = 0;
1649 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1650 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1651 struct tu_cs *cs = &cmdbuf->cs;
1652 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1653 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1654 cmds[entry_idx].submit_idx =
1655 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1656 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1657 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1658 cmds[entry_idx].size = cs->entries[i].size;
1659 cmds[entry_idx].pad = 0;
1660 cmds[entry_idx].nr_relocs = 0;
1661 cmds[entry_idx].relocs = 0;
1662 }
1663
1664 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1665 }
1666
1667 uint32_t flags = MSM_PIPE_3D0;
1668 if (nr_in_syncobjs) {
1669 flags |= MSM_SUBMIT_SYNCOBJ_IN;
1670 }
1671 if (nr_out_syncobjs) {
1672 flags |= MSM_SUBMIT_SYNCOBJ_OUT;
1673 }
1674
1675 if (last_submit) {
1676 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1677 }
1678
1679 struct drm_msm_gem_submit req = {
1680 .flags = flags,
1681 .queueid = queue->msm_queue_id,
1682 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1683 .nr_bos = bo_list.count,
1684 .cmds = (uint64_t)(uintptr_t)cmds,
1685 .nr_cmds = entry_count,
1686 .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
1687 .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
1688 .nr_in_syncobjs = nr_in_syncobjs,
1689 .nr_out_syncobjs = nr_out_syncobjs,
1690 .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
1691 };
1692
1693 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1694 DRM_MSM_GEM_SUBMIT,
1695 &req, sizeof(req));
1696 if (ret) {
1697 free(in_syncobjs);
1698 free(out_syncobjs);
1699 return tu_device_set_lost(queue->device, "submit failed: %s\n",
1700 strerror(errno));
1701 }
1702
1703 tu_bo_list_destroy(&bo_list);
1704 free(in_syncobjs);
1705 free(out_syncobjs);
1706
1707 tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
1708 pSubmits[i].waitSemaphoreCount);
1709 if (last_submit) {
1710 /* no need to merge fences as queue execution is serialized */
1711 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1712 } else if (last_submit) {
1713 close(req.fence_fd);
1714 }
1715 }
1716
1717 if (_fence != VK_NULL_HANDLE) {
1718 TU_FROM_HANDLE(tu_fence, fence, _fence);
1719 tu_fence_copy(fence, &queue->submit_fence);
1720 }
1721
1722 return VK_SUCCESS;
1723 }
1724
1725 VkResult
1726 tu_QueueWaitIdle(VkQueue _queue)
1727 {
1728 TU_FROM_HANDLE(tu_queue, queue, _queue);
1729
1730 if (tu_device_is_lost(queue->device))
1731 return VK_ERROR_DEVICE_LOST;
1732
1733 tu_fence_wait_idle(&queue->submit_fence);
1734
1735 return VK_SUCCESS;
1736 }
1737
1738 VkResult
1739 tu_DeviceWaitIdle(VkDevice _device)
1740 {
1741 TU_FROM_HANDLE(tu_device, device, _device);
1742
1743 if (tu_device_is_lost(device))
1744 return VK_ERROR_DEVICE_LOST;
1745
1746 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1747 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1748 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1749 }
1750 }
1751 return VK_SUCCESS;
1752 }
1753
1754 VkResult
1755 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1756 uint32_t *pPropertyCount,
1757 VkExtensionProperties *pProperties)
1758 {
1759 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1760
1761 /* We spport no lyaers */
1762 if (pLayerName)
1763 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1764
1765 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1766 if (tu_instance_extensions_supported.extensions[i]) {
1767 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1768 }
1769 }
1770
1771 return vk_outarray_status(&out);
1772 }
1773
1774 VkResult
1775 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1776 const char *pLayerName,
1777 uint32_t *pPropertyCount,
1778 VkExtensionProperties *pProperties)
1779 {
1780 /* We spport no lyaers */
1781 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1782 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1783
1784 /* We spport no lyaers */
1785 if (pLayerName)
1786 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1787
1788 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1789 if (device->supported_extensions.extensions[i]) {
1790 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1791 }
1792 }
1793
1794 return vk_outarray_status(&out);
1795 }
1796
1797 PFN_vkVoidFunction
1798 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1799 {
1800 TU_FROM_HANDLE(tu_instance, instance, _instance);
1801
1802 return tu_lookup_entrypoint_checked(
1803 pName, instance ? instance->api_version : 0,
1804 instance ? &instance->enabled_extensions : NULL, NULL);
1805 }
1806
1807 /* The loader wants us to expose a second GetInstanceProcAddr function
1808 * to work around certain LD_PRELOAD issues seen in apps.
1809 */
1810 PUBLIC
1811 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1812 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1813
1814 PUBLIC
1815 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1816 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1817 {
1818 return tu_GetInstanceProcAddr(instance, pName);
1819 }
1820
1821 PFN_vkVoidFunction
1822 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1823 {
1824 TU_FROM_HANDLE(tu_device, device, _device);
1825
1826 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1827 &device->instance->enabled_extensions,
1828 &device->enabled_extensions);
1829 }
1830
1831 static VkResult
1832 tu_alloc_memory(struct tu_device *device,
1833 const VkMemoryAllocateInfo *pAllocateInfo,
1834 const VkAllocationCallbacks *pAllocator,
1835 VkDeviceMemory *pMem)
1836 {
1837 struct tu_device_memory *mem;
1838 VkResult result;
1839
1840 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1841
1842 if (pAllocateInfo->allocationSize == 0) {
1843 /* Apparently, this is allowed */
1844 *pMem = VK_NULL_HANDLE;
1845 return VK_SUCCESS;
1846 }
1847
1848 mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
1849 VK_OBJECT_TYPE_DEVICE_MEMORY);
1850 if (mem == NULL)
1851 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1852
1853 const VkImportMemoryFdInfoKHR *fd_info =
1854 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1855 if (fd_info && !fd_info->handleType)
1856 fd_info = NULL;
1857
1858 if (fd_info) {
1859 assert(fd_info->handleType ==
1860 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1861 fd_info->handleType ==
1862 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1863
1864 /*
1865 * TODO Importing the same fd twice gives us the same handle without
1866 * reference counting. We need to maintain a per-instance handle-to-bo
1867 * table and add reference count to tu_bo.
1868 */
1869 result = tu_bo_init_dmabuf(device, &mem->bo,
1870 pAllocateInfo->allocationSize, fd_info->fd);
1871 if (result == VK_SUCCESS) {
1872 /* take ownership and close the fd */
1873 close(fd_info->fd);
1874 }
1875 } else {
1876 result =
1877 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1878 }
1879
1880 if (result != VK_SUCCESS) {
1881 vk_object_free(&device->vk, pAllocator, mem);
1882 return result;
1883 }
1884
1885 mem->size = pAllocateInfo->allocationSize;
1886 mem->type_index = pAllocateInfo->memoryTypeIndex;
1887
1888 mem->map = NULL;
1889 mem->user_ptr = NULL;
1890
1891 *pMem = tu_device_memory_to_handle(mem);
1892
1893 return VK_SUCCESS;
1894 }
1895
1896 VkResult
1897 tu_AllocateMemory(VkDevice _device,
1898 const VkMemoryAllocateInfo *pAllocateInfo,
1899 const VkAllocationCallbacks *pAllocator,
1900 VkDeviceMemory *pMem)
1901 {
1902 TU_FROM_HANDLE(tu_device, device, _device);
1903 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1904 }
1905
1906 void
1907 tu_FreeMemory(VkDevice _device,
1908 VkDeviceMemory _mem,
1909 const VkAllocationCallbacks *pAllocator)
1910 {
1911 TU_FROM_HANDLE(tu_device, device, _device);
1912 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1913
1914 if (mem == NULL)
1915 return;
1916
1917 tu_bo_finish(device, &mem->bo);
1918 vk_object_free(&device->vk, pAllocator, mem);
1919 }
1920
1921 VkResult
1922 tu_MapMemory(VkDevice _device,
1923 VkDeviceMemory _memory,
1924 VkDeviceSize offset,
1925 VkDeviceSize size,
1926 VkMemoryMapFlags flags,
1927 void **ppData)
1928 {
1929 TU_FROM_HANDLE(tu_device, device, _device);
1930 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1931 VkResult result;
1932
1933 if (mem == NULL) {
1934 *ppData = NULL;
1935 return VK_SUCCESS;
1936 }
1937
1938 if (mem->user_ptr) {
1939 *ppData = mem->user_ptr;
1940 } else if (!mem->map) {
1941 result = tu_bo_map(device, &mem->bo);
1942 if (result != VK_SUCCESS)
1943 return result;
1944 *ppData = mem->map = mem->bo.map;
1945 } else
1946 *ppData = mem->map;
1947
1948 if (*ppData) {
1949 *ppData += offset;
1950 return VK_SUCCESS;
1951 }
1952
1953 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1954 }
1955
1956 void
1957 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1958 {
1959 /* I do not see any unmapping done by the freedreno Gallium driver. */
1960 }
1961
1962 VkResult
1963 tu_FlushMappedMemoryRanges(VkDevice _device,
1964 uint32_t memoryRangeCount,
1965 const VkMappedMemoryRange *pMemoryRanges)
1966 {
1967 return VK_SUCCESS;
1968 }
1969
1970 VkResult
1971 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1972 uint32_t memoryRangeCount,
1973 const VkMappedMemoryRange *pMemoryRanges)
1974 {
1975 return VK_SUCCESS;
1976 }
1977
1978 void
1979 tu_GetBufferMemoryRequirements(VkDevice _device,
1980 VkBuffer _buffer,
1981 VkMemoryRequirements *pMemoryRequirements)
1982 {
1983 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1984
1985 pMemoryRequirements->memoryTypeBits = 1;
1986 pMemoryRequirements->alignment = 64;
1987 pMemoryRequirements->size =
1988 align64(buffer->size, pMemoryRequirements->alignment);
1989 }
1990
1991 void
1992 tu_GetBufferMemoryRequirements2(
1993 VkDevice device,
1994 const VkBufferMemoryRequirementsInfo2 *pInfo,
1995 VkMemoryRequirements2 *pMemoryRequirements)
1996 {
1997 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1998 &pMemoryRequirements->memoryRequirements);
1999 }
2000
2001 void
2002 tu_GetImageMemoryRequirements(VkDevice _device,
2003 VkImage _image,
2004 VkMemoryRequirements *pMemoryRequirements)
2005 {
2006 TU_FROM_HANDLE(tu_image, image, _image);
2007
2008 pMemoryRequirements->memoryTypeBits = 1;
2009 pMemoryRequirements->size = image->total_size;
2010 pMemoryRequirements->alignment = image->layout[0].base_align;
2011 }
2012
2013 void
2014 tu_GetImageMemoryRequirements2(VkDevice device,
2015 const VkImageMemoryRequirementsInfo2 *pInfo,
2016 VkMemoryRequirements2 *pMemoryRequirements)
2017 {
2018 tu_GetImageMemoryRequirements(device, pInfo->image,
2019 &pMemoryRequirements->memoryRequirements);
2020 }
2021
2022 void
2023 tu_GetImageSparseMemoryRequirements(
2024 VkDevice device,
2025 VkImage image,
2026 uint32_t *pSparseMemoryRequirementCount,
2027 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
2028 {
2029 tu_stub();
2030 }
2031
2032 void
2033 tu_GetImageSparseMemoryRequirements2(
2034 VkDevice device,
2035 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
2036 uint32_t *pSparseMemoryRequirementCount,
2037 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
2038 {
2039 tu_stub();
2040 }
2041
2042 void
2043 tu_GetDeviceMemoryCommitment(VkDevice device,
2044 VkDeviceMemory memory,
2045 VkDeviceSize *pCommittedMemoryInBytes)
2046 {
2047 *pCommittedMemoryInBytes = 0;
2048 }
2049
2050 VkResult
2051 tu_BindBufferMemory2(VkDevice device,
2052 uint32_t bindInfoCount,
2053 const VkBindBufferMemoryInfo *pBindInfos)
2054 {
2055 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2056 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2057 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
2058
2059 if (mem) {
2060 buffer->bo = &mem->bo;
2061 buffer->bo_offset = pBindInfos[i].memoryOffset;
2062 } else {
2063 buffer->bo = NULL;
2064 }
2065 }
2066 return VK_SUCCESS;
2067 }
2068
2069 VkResult
2070 tu_BindBufferMemory(VkDevice device,
2071 VkBuffer buffer,
2072 VkDeviceMemory memory,
2073 VkDeviceSize memoryOffset)
2074 {
2075 const VkBindBufferMemoryInfo info = {
2076 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2077 .buffer = buffer,
2078 .memory = memory,
2079 .memoryOffset = memoryOffset
2080 };
2081
2082 return tu_BindBufferMemory2(device, 1, &info);
2083 }
2084
2085 VkResult
2086 tu_BindImageMemory2(VkDevice device,
2087 uint32_t bindInfoCount,
2088 const VkBindImageMemoryInfo *pBindInfos)
2089 {
2090 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2091 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
2092 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2093
2094 if (mem) {
2095 image->bo = &mem->bo;
2096 image->bo_offset = pBindInfos[i].memoryOffset;
2097 } else {
2098 image->bo = NULL;
2099 image->bo_offset = 0;
2100 }
2101 }
2102
2103 return VK_SUCCESS;
2104 }
2105
2106 VkResult
2107 tu_BindImageMemory(VkDevice device,
2108 VkImage image,
2109 VkDeviceMemory memory,
2110 VkDeviceSize memoryOffset)
2111 {
2112 const VkBindImageMemoryInfo info = {
2113 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2114 .image = image,
2115 .memory = memory,
2116 .memoryOffset = memoryOffset
2117 };
2118
2119 return tu_BindImageMemory2(device, 1, &info);
2120 }
2121
2122 VkResult
2123 tu_QueueBindSparse(VkQueue _queue,
2124 uint32_t bindInfoCount,
2125 const VkBindSparseInfo *pBindInfo,
2126 VkFence _fence)
2127 {
2128 return VK_SUCCESS;
2129 }
2130
2131 // Queue semaphore functions
2132
2133
2134 static void
2135 tu_semaphore_part_destroy(struct tu_device *device,
2136 struct tu_semaphore_part *part)
2137 {
2138 switch(part->kind) {
2139 case TU_SEMAPHORE_NONE:
2140 break;
2141 case TU_SEMAPHORE_SYNCOBJ:
2142 drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
2143 break;
2144 }
2145 part->kind = TU_SEMAPHORE_NONE;
2146 }
2147
2148 static void
2149 tu_semaphore_remove_temp(struct tu_device *device,
2150 struct tu_semaphore *sem)
2151 {
2152 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2153 tu_semaphore_part_destroy(device, &sem->temporary);
2154 }
2155 }
2156
2157 VkResult
2158 tu_CreateSemaphore(VkDevice _device,
2159 const VkSemaphoreCreateInfo *pCreateInfo,
2160 const VkAllocationCallbacks *pAllocator,
2161 VkSemaphore *pSemaphore)
2162 {
2163 TU_FROM_HANDLE(tu_device, device, _device);
2164
2165 struct tu_semaphore *sem =
2166 vk_object_alloc(&device->vk, pAllocator, sizeof(*sem),
2167 VK_OBJECT_TYPE_SEMAPHORE);
2168 if (!sem)
2169 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2170
2171 const VkExportSemaphoreCreateInfo *export =
2172 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
2173 VkExternalSemaphoreHandleTypeFlags handleTypes =
2174 export ? export->handleTypes : 0;
2175
2176 sem->permanent.kind = TU_SEMAPHORE_NONE;
2177 sem->temporary.kind = TU_SEMAPHORE_NONE;
2178
2179 if (handleTypes) {
2180 if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
2181 vk_free2(&device->vk.alloc, pAllocator, sem);
2182 return VK_ERROR_OUT_OF_HOST_MEMORY;
2183 }
2184 sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
2185 }
2186 *pSemaphore = tu_semaphore_to_handle(sem);
2187 return VK_SUCCESS;
2188 }
2189
2190 void
2191 tu_DestroySemaphore(VkDevice _device,
2192 VkSemaphore _semaphore,
2193 const VkAllocationCallbacks *pAllocator)
2194 {
2195 TU_FROM_HANDLE(tu_device, device, _device);
2196 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
2197 if (!_semaphore)
2198 return;
2199
2200 tu_semaphore_part_destroy(device, &sem->permanent);
2201 tu_semaphore_part_destroy(device, &sem->temporary);
2202
2203 vk_object_free(&device->vk, pAllocator, sem);
2204 }
2205
2206 VkResult
2207 tu_CreateEvent(VkDevice _device,
2208 const VkEventCreateInfo *pCreateInfo,
2209 const VkAllocationCallbacks *pAllocator,
2210 VkEvent *pEvent)
2211 {
2212 TU_FROM_HANDLE(tu_device, device, _device);
2213
2214 struct tu_event *event =
2215 vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
2216 VK_OBJECT_TYPE_EVENT);
2217 if (!event)
2218 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2219
2220 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2221 if (result != VK_SUCCESS)
2222 goto fail_alloc;
2223
2224 result = tu_bo_map(device, &event->bo);
2225 if (result != VK_SUCCESS)
2226 goto fail_map;
2227
2228 *pEvent = tu_event_to_handle(event);
2229
2230 return VK_SUCCESS;
2231
2232 fail_map:
2233 tu_bo_finish(device, &event->bo);
2234 fail_alloc:
2235 vk_object_free(&device->vk, pAllocator, event);
2236 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2237 }
2238
2239 void
2240 tu_DestroyEvent(VkDevice _device,
2241 VkEvent _event,
2242 const VkAllocationCallbacks *pAllocator)
2243 {
2244 TU_FROM_HANDLE(tu_device, device, _device);
2245 TU_FROM_HANDLE(tu_event, event, _event);
2246
2247 if (!event)
2248 return;
2249
2250 tu_bo_finish(device, &event->bo);
2251 vk_object_free(&device->vk, pAllocator, event);
2252 }
2253
2254 VkResult
2255 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2256 {
2257 TU_FROM_HANDLE(tu_event, event, _event);
2258
2259 if (*(uint64_t*) event->bo.map == 1)
2260 return VK_EVENT_SET;
2261 return VK_EVENT_RESET;
2262 }
2263
2264 VkResult
2265 tu_SetEvent(VkDevice _device, VkEvent _event)
2266 {
2267 TU_FROM_HANDLE(tu_event, event, _event);
2268 *(uint64_t*) event->bo.map = 1;
2269
2270 return VK_SUCCESS;
2271 }
2272
2273 VkResult
2274 tu_ResetEvent(VkDevice _device, VkEvent _event)
2275 {
2276 TU_FROM_HANDLE(tu_event, event, _event);
2277 *(uint64_t*) event->bo.map = 0;
2278
2279 return VK_SUCCESS;
2280 }
2281
2282 VkResult
2283 tu_CreateBuffer(VkDevice _device,
2284 const VkBufferCreateInfo *pCreateInfo,
2285 const VkAllocationCallbacks *pAllocator,
2286 VkBuffer *pBuffer)
2287 {
2288 TU_FROM_HANDLE(tu_device, device, _device);
2289 struct tu_buffer *buffer;
2290
2291 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2292
2293 buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
2294 VK_OBJECT_TYPE_BUFFER);
2295 if (buffer == NULL)
2296 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2297
2298 buffer->size = pCreateInfo->size;
2299 buffer->usage = pCreateInfo->usage;
2300 buffer->flags = pCreateInfo->flags;
2301
2302 *pBuffer = tu_buffer_to_handle(buffer);
2303
2304 return VK_SUCCESS;
2305 }
2306
2307 void
2308 tu_DestroyBuffer(VkDevice _device,
2309 VkBuffer _buffer,
2310 const VkAllocationCallbacks *pAllocator)
2311 {
2312 TU_FROM_HANDLE(tu_device, device, _device);
2313 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2314
2315 if (!buffer)
2316 return;
2317
2318 vk_object_free(&device->vk, pAllocator, buffer);
2319 }
2320
2321 VkResult
2322 tu_CreateFramebuffer(VkDevice _device,
2323 const VkFramebufferCreateInfo *pCreateInfo,
2324 const VkAllocationCallbacks *pAllocator,
2325 VkFramebuffer *pFramebuffer)
2326 {
2327 TU_FROM_HANDLE(tu_device, device, _device);
2328 TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
2329 struct tu_framebuffer *framebuffer;
2330
2331 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2332
2333 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2334 pCreateInfo->attachmentCount;
2335 framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
2336 VK_OBJECT_TYPE_FRAMEBUFFER);
2337 if (framebuffer == NULL)
2338 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2339
2340 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2341 framebuffer->width = pCreateInfo->width;
2342 framebuffer->height = pCreateInfo->height;
2343 framebuffer->layers = pCreateInfo->layers;
2344 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2345 VkImageView _iview = pCreateInfo->pAttachments[i];
2346 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2347 framebuffer->attachments[i].attachment = iview;
2348 }
2349
2350 tu_framebuffer_tiling_config(framebuffer, device, pass);
2351
2352 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2353 return VK_SUCCESS;
2354 }
2355
2356 void
2357 tu_DestroyFramebuffer(VkDevice _device,
2358 VkFramebuffer _fb,
2359 const VkAllocationCallbacks *pAllocator)
2360 {
2361 TU_FROM_HANDLE(tu_device, device, _device);
2362 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2363
2364 if (!fb)
2365 return;
2366
2367 vk_object_free(&device->vk, pAllocator, fb);
2368 }
2369
2370 static void
2371 tu_init_sampler(struct tu_device *device,
2372 struct tu_sampler *sampler,
2373 const VkSamplerCreateInfo *pCreateInfo)
2374 {
2375 const struct VkSamplerReductionModeCreateInfo *reduction =
2376 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2377 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2378 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2379
2380 unsigned aniso = pCreateInfo->anisotropyEnable ?
2381 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2382 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2383 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2384 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2385
2386 sampler->descriptor[0] =
2387 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2388 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2389 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2390 A6XX_TEX_SAMP_0_ANISO(aniso) |
2391 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2392 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2393 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2394 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2395 sampler->descriptor[1] =
2396 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2397 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2398 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2399 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2400 COND(pCreateInfo->compareEnable,
2401 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2402 /* This is an offset into the border_color BO, which we fill with all the
2403 * possible Vulkan border colors in the correct order, so we can just use
2404 * the Vulkan enum with no translation necessary.
2405 */
2406 sampler->descriptor[2] =
2407 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2408 sizeof(struct bcolor_entry));
2409 sampler->descriptor[3] = 0;
2410
2411 if (reduction) {
2412 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2413 tu6_reduction_mode(reduction->reductionMode));
2414 }
2415
2416 sampler->ycbcr_sampler = ycbcr_conversion ?
2417 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2418
2419 if (sampler->ycbcr_sampler &&
2420 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2421 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2422 }
2423
2424 /* TODO:
2425 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2426 */
2427 }
2428
2429 VkResult
2430 tu_CreateSampler(VkDevice _device,
2431 const VkSamplerCreateInfo *pCreateInfo,
2432 const VkAllocationCallbacks *pAllocator,
2433 VkSampler *pSampler)
2434 {
2435 TU_FROM_HANDLE(tu_device, device, _device);
2436 struct tu_sampler *sampler;
2437
2438 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2439
2440 sampler = vk_object_alloc(&device->vk, pAllocator, sizeof(*sampler),
2441 VK_OBJECT_TYPE_SAMPLER);
2442 if (!sampler)
2443 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2444
2445 tu_init_sampler(device, sampler, pCreateInfo);
2446 *pSampler = tu_sampler_to_handle(sampler);
2447
2448 return VK_SUCCESS;
2449 }
2450
2451 void
2452 tu_DestroySampler(VkDevice _device,
2453 VkSampler _sampler,
2454 const VkAllocationCallbacks *pAllocator)
2455 {
2456 TU_FROM_HANDLE(tu_device, device, _device);
2457 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2458
2459 if (!sampler)
2460 return;
2461
2462 vk_object_free(&device->vk, pAllocator, sampler);
2463 }
2464
2465 /* vk_icd.h does not declare this function, so we declare it here to
2466 * suppress Wmissing-prototypes.
2467 */
2468 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2469 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2470
2471 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2472 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2473 {
2474 /* For the full details on loader interface versioning, see
2475 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2476 * What follows is a condensed summary, to help you navigate the large and
2477 * confusing official doc.
2478 *
2479 * - Loader interface v0 is incompatible with later versions. We don't
2480 * support it.
2481 *
2482 * - In loader interface v1:
2483 * - The first ICD entrypoint called by the loader is
2484 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2485 * entrypoint.
2486 * - The ICD must statically expose no other Vulkan symbol unless it
2487 * is linked with -Bsymbolic.
2488 * - Each dispatchable Vulkan handle created by the ICD must be
2489 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2490 * ICD must initialize VK_LOADER_DATA.loadMagic to
2491 * ICD_LOADER_MAGIC.
2492 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2493 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2494 * such loader-managed surfaces.
2495 *
2496 * - Loader interface v2 differs from v1 in:
2497 * - The first ICD entrypoint called by the loader is
2498 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2499 * statically expose this entrypoint.
2500 *
2501 * - Loader interface v3 differs from v2 in:
2502 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2503 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2504 * because the loader no longer does so.
2505 */
2506 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2507 return VK_SUCCESS;
2508 }
2509
2510 VkResult
2511 tu_GetMemoryFdKHR(VkDevice _device,
2512 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2513 int *pFd)
2514 {
2515 TU_FROM_HANDLE(tu_device, device, _device);
2516 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2517
2518 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2519
2520 /* At the moment, we support only the below handle types. */
2521 assert(pGetFdInfo->handleType ==
2522 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2523 pGetFdInfo->handleType ==
2524 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2525
2526 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2527 if (prime_fd < 0)
2528 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2529
2530 *pFd = prime_fd;
2531 return VK_SUCCESS;
2532 }
2533
2534 VkResult
2535 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2536 VkExternalMemoryHandleTypeFlagBits handleType,
2537 int fd,
2538 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2539 {
2540 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2541 pMemoryFdProperties->memoryTypeBits = 1;
2542 return VK_SUCCESS;
2543 }
2544
2545 VkResult
2546 tu_ImportFenceFdKHR(VkDevice _device,
2547 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
2548 {
2549 tu_stub();
2550
2551 return VK_SUCCESS;
2552 }
2553
2554 VkResult
2555 tu_GetFenceFdKHR(VkDevice _device,
2556 const VkFenceGetFdInfoKHR *pGetFdInfo,
2557 int *pFd)
2558 {
2559 tu_stub();
2560
2561 return VK_SUCCESS;
2562 }
2563
2564 VkResult
2565 tu_ImportSemaphoreFdKHR(VkDevice _device,
2566 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
2567 {
2568 TU_FROM_HANDLE(tu_device, device, _device);
2569 TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
2570 int ret;
2571 struct tu_semaphore_part *dst = NULL;
2572
2573 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2574 dst = &sem->temporary;
2575 } else {
2576 dst = &sem->permanent;
2577 }
2578
2579 uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
2580
2581 switch(pImportSemaphoreFdInfo->handleType) {
2582 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
2583 uint32_t old_syncobj = syncobj;
2584 ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
2585 if (ret == 0) {
2586 close(pImportSemaphoreFdInfo->fd);
2587 if (old_syncobj)
2588 drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
2589 }
2590 break;
2591 }
2592 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
2593 if (!syncobj) {
2594 ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
2595 if (ret)
2596 break;
2597 }
2598 if (pImportSemaphoreFdInfo->fd == -1) {
2599 ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
2600 } else {
2601 ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
2602 }
2603 if (!ret)
2604 close(pImportSemaphoreFdInfo->fd);
2605 break;
2606 }
2607 default:
2608 unreachable("Unhandled semaphore handle type");
2609 }
2610
2611 if (ret) {
2612 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2613 }
2614 dst->syncobj = syncobj;
2615 dst->kind = TU_SEMAPHORE_SYNCOBJ;
2616
2617 return VK_SUCCESS;
2618 }
2619
2620 VkResult
2621 tu_GetSemaphoreFdKHR(VkDevice _device,
2622 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
2623 int *pFd)
2624 {
2625 TU_FROM_HANDLE(tu_device, device, _device);
2626 TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
2627 int ret;
2628 uint32_t syncobj_handle;
2629
2630 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2631 assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
2632 syncobj_handle = sem->temporary.syncobj;
2633 } else {
2634 assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
2635 syncobj_handle = sem->permanent.syncobj;
2636 }
2637
2638 switch(pGetFdInfo->handleType) {
2639 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
2640 ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
2641 break;
2642 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2643 ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
2644 if (!ret) {
2645 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2646 tu_semaphore_part_destroy(device, &sem->temporary);
2647 } else {
2648 drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
2649 }
2650 }
2651 break;
2652 default:
2653 unreachable("Unhandled semaphore handle type");
2654 }
2655
2656 if (ret)
2657 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
2658 return VK_SUCCESS;
2659 }
2660
2661
2662 static bool tu_has_syncobj(struct tu_physical_device *pdev)
2663 {
2664 uint64_t value;
2665 if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
2666 return false;
2667 return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
2668 }
2669
2670 void
2671 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2672 VkPhysicalDevice physicalDevice,
2673 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2674 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2675 {
2676 TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
2677
2678 if (tu_has_syncobj(pdev) &&
2679 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
2680 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
2681 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2682 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2683 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
2684 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
2685 } else {
2686 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2687 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2688 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2689 }
2690 }
2691
2692 void
2693 tu_GetPhysicalDeviceExternalFenceProperties(
2694 VkPhysicalDevice physicalDevice,
2695 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2696 VkExternalFenceProperties *pExternalFenceProperties)
2697 {
2698 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2699 pExternalFenceProperties->compatibleHandleTypes = 0;
2700 pExternalFenceProperties->externalFenceFeatures = 0;
2701 }
2702
2703 VkResult
2704 tu_CreateDebugReportCallbackEXT(
2705 VkInstance _instance,
2706 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2707 const VkAllocationCallbacks *pAllocator,
2708 VkDebugReportCallbackEXT *pCallback)
2709 {
2710 TU_FROM_HANDLE(tu_instance, instance, _instance);
2711 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2712 pCreateInfo, pAllocator,
2713 &instance->alloc, pCallback);
2714 }
2715
2716 void
2717 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2718 VkDebugReportCallbackEXT _callback,
2719 const VkAllocationCallbacks *pAllocator)
2720 {
2721 TU_FROM_HANDLE(tu_instance, instance, _instance);
2722 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2723 _callback, pAllocator, &instance->alloc);
2724 }
2725
2726 void
2727 tu_DebugReportMessageEXT(VkInstance _instance,
2728 VkDebugReportFlagsEXT flags,
2729 VkDebugReportObjectTypeEXT objectType,
2730 uint64_t object,
2731 size_t location,
2732 int32_t messageCode,
2733 const char *pLayerPrefix,
2734 const char *pMessage)
2735 {
2736 TU_FROM_HANDLE(tu_instance, instance, _instance);
2737 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2738 object, location, messageCode, pLayerPrefix, pMessage);
2739 }
2740
2741 void
2742 tu_GetDeviceGroupPeerMemoryFeatures(
2743 VkDevice device,
2744 uint32_t heapIndex,
2745 uint32_t localDeviceIndex,
2746 uint32_t remoteDeviceIndex,
2747 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2748 {
2749 assert(localDeviceIndex == remoteDeviceIndex);
2750
2751 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2752 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2753 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2754 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2755 }
2756
2757 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2758 VkPhysicalDevice physicalDevice,
2759 VkSampleCountFlagBits samples,
2760 VkMultisamplePropertiesEXT* pMultisampleProperties)
2761 {
2762 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2763
2764 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2765 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2766 else
2767 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2768 }
2769
2770
2771 VkResult
2772 tu_CreatePrivateDataSlotEXT(VkDevice _device,
2773 const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
2774 const VkAllocationCallbacks* pAllocator,
2775 VkPrivateDataSlotEXT* pPrivateDataSlot)
2776 {
2777 TU_FROM_HANDLE(tu_device, device, _device);
2778 return vk_private_data_slot_create(&device->vk,
2779 pCreateInfo,
2780 pAllocator,
2781 pPrivateDataSlot);
2782 }
2783
2784 void
2785 tu_DestroyPrivateDataSlotEXT(VkDevice _device,
2786 VkPrivateDataSlotEXT privateDataSlot,
2787 const VkAllocationCallbacks* pAllocator)
2788 {
2789 TU_FROM_HANDLE(tu_device, device, _device);
2790 vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
2791 }
2792
2793 VkResult
2794 tu_SetPrivateDataEXT(VkDevice _device,
2795 VkObjectType objectType,
2796 uint64_t objectHandle,
2797 VkPrivateDataSlotEXT privateDataSlot,
2798 uint64_t data)
2799 {
2800 TU_FROM_HANDLE(tu_device, device, _device);
2801 return vk_object_base_set_private_data(&device->vk,
2802 objectType,
2803 objectHandle,
2804 privateDataSlot,
2805 data);
2806 }
2807
2808 void
2809 tu_GetPrivateDataEXT(VkDevice _device,
2810 VkObjectType objectType,
2811 uint64_t objectHandle,
2812 VkPrivateDataSlotEXT privateDataSlot,
2813 uint64_t* pData)
2814 {
2815 TU_FROM_HANDLE(tu_device, device, _device);
2816 vk_object_base_get_private_data(&device->vk,
2817 objectType,
2818 objectHandle,
2819 privateDataSlot,
2820 pData);
2821 }