tu: Enable vertex & fragment stores & atomics
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
50
51 static void
52 tu_semaphore_remove_temp(struct tu_device *device,
53 struct tu_semaphore *sem);
54
55 static int
56 tu_device_get_cache_uuid(uint16_t family, void *uuid)
57 {
58 uint32_t mesa_timestamp;
59 uint16_t f = family;
60 memset(uuid, 0, VK_UUID_SIZE);
61 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
62 &mesa_timestamp))
63 return -1;
64
65 memcpy(uuid, &mesa_timestamp, 4);
66 memcpy((char *) uuid + 4, &f, 2);
67 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
68 return 0;
69 }
70
71 static VkResult
72 tu_bo_init(struct tu_device *dev,
73 struct tu_bo *bo,
74 uint32_t gem_handle,
75 uint64_t size)
76 {
77 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
78 if (!iova)
79 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
80
81 *bo = (struct tu_bo) {
82 .gem_handle = gem_handle,
83 .size = size,
84 .iova = iova,
85 };
86
87 return VK_SUCCESS;
88 }
89
90 VkResult
91 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
92 {
93 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
94 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
95 */
96 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
97 if (!gem_handle)
98 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
99
100 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
101 if (result != VK_SUCCESS) {
102 tu_gem_close(dev, gem_handle);
103 return vk_error(dev->instance, result);
104 }
105
106 return VK_SUCCESS;
107 }
108
109 VkResult
110 tu_bo_init_dmabuf(struct tu_device *dev,
111 struct tu_bo *bo,
112 uint64_t size,
113 int fd)
114 {
115 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
116 if (!gem_handle)
117 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
118
119 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
120 if (result != VK_SUCCESS) {
121 tu_gem_close(dev, gem_handle);
122 return vk_error(dev->instance, result);
123 }
124
125 return VK_SUCCESS;
126 }
127
128 int
129 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
130 {
131 return tu_gem_export_dmabuf(dev, bo->gem_handle);
132 }
133
134 VkResult
135 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
136 {
137 if (bo->map)
138 return VK_SUCCESS;
139
140 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
141 if (!offset)
142 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
143
144 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
145 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
146 dev->physical_device->local_fd, offset);
147 if (map == MAP_FAILED)
148 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
149
150 bo->map = map;
151 return VK_SUCCESS;
152 }
153
154 void
155 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
156 {
157 assert(bo->gem_handle);
158
159 if (bo->map)
160 munmap(bo->map, bo->size);
161
162 tu_gem_close(dev, bo->gem_handle);
163 }
164
165 static VkResult
166 tu_physical_device_init(struct tu_physical_device *device,
167 struct tu_instance *instance,
168 drmDevicePtr drm_device)
169 {
170 const char *path = drm_device->nodes[DRM_NODE_RENDER];
171 VkResult result = VK_SUCCESS;
172 drmVersionPtr version;
173 int fd;
174 int master_fd = -1;
175
176 fd = open(path, O_RDWR | O_CLOEXEC);
177 if (fd < 0) {
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "failed to open device %s", path);
180 }
181
182 /* Version 1.3 added MSM_INFO_IOVA. */
183 const int min_version_major = 1;
184 const int min_version_minor = 3;
185
186 version = drmGetVersion(fd);
187 if (!version) {
188 close(fd);
189 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
190 "failed to query kernel driver version for device %s",
191 path);
192 }
193
194 if (strcmp(version->name, "msm")) {
195 drmFreeVersion(version);
196 close(fd);
197 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
198 "device %s does not use the msm kernel driver", path);
199 }
200
201 if (version->version_major != min_version_major ||
202 version->version_minor < min_version_minor) {
203 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "kernel driver for device %s has version %d.%d, "
205 "but Vulkan requires version >= %d.%d",
206 path, version->version_major, version->version_minor,
207 min_version_major, min_version_minor);
208 drmFreeVersion(version);
209 close(fd);
210 return result;
211 }
212
213 device->msm_major_version = version->version_major;
214 device->msm_minor_version = version->version_minor;
215
216 drmFreeVersion(version);
217
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Found compatible device '%s'.", path);
220
221 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
222 device->instance = instance;
223 assert(strlen(path) < ARRAY_SIZE(device->path));
224 strncpy(device->path, path, ARRAY_SIZE(device->path));
225
226 if (instance->enabled_extensions.KHR_display) {
227 master_fd =
228 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
229 if (master_fd >= 0) {
230 /* TODO: free master_fd is accel is not working? */
231 }
232 }
233
234 device->master_fd = master_fd;
235 device->local_fd = fd;
236
237 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
238 if (instance->debug_flags & TU_DEBUG_STARTUP)
239 tu_logi("Could not query the GPU ID");
240 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
241 "could not get GPU ID");
242 goto fail;
243 }
244
245 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
246 if (instance->debug_flags & TU_DEBUG_STARTUP)
247 tu_logi("Could not query the GMEM size");
248 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
249 "could not get GMEM size");
250 goto fail;
251 }
252
253 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
254 if (instance->debug_flags & TU_DEBUG_STARTUP)
255 tu_logi("Could not query the GMEM size");
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "could not get GMEM size");
258 goto fail;
259 }
260
261 memset(device->name, 0, sizeof(device->name));
262 sprintf(device->name, "FD%d", device->gpu_id);
263
264 switch (device->gpu_id) {
265 case 618:
266 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
267 device->ccu_offset_bypass = 0x10000;
268 device->tile_align_w = 32;
269 device->magic.PC_UNKNOWN_9805 = 0x0;
270 device->magic.SP_UNKNOWN_A0F8 = 0x0;
271 break;
272 case 630:
273 case 640:
274 device->ccu_offset_gmem = 0xf8000;
275 device->ccu_offset_bypass = 0x20000;
276 device->tile_align_w = 32;
277 device->magic.PC_UNKNOWN_9805 = 0x1;
278 device->magic.SP_UNKNOWN_A0F8 = 0x1;
279 break;
280 case 650:
281 device->ccu_offset_gmem = 0x114000;
282 device->ccu_offset_bypass = 0x30000;
283 device->tile_align_w = 96;
284 device->magic.PC_UNKNOWN_9805 = 0x2;
285 device->magic.SP_UNKNOWN_A0F8 = 0x2;
286 break;
287 default:
288 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
289 "device %s is unsupported", device->name);
290 goto fail;
291 }
292 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
293 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
294 "cannot generate UUID");
295 goto fail;
296 }
297
298 /* The gpu id is already embedded in the uuid so we just pass "tu"
299 * when creating the cache.
300 */
301 char buf[VK_UUID_SIZE * 2 + 1];
302 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
303 device->disk_cache = disk_cache_create(device->name, buf, 0);
304
305 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
306 "testing use only.\n");
307
308 fd_get_driver_uuid(device->driver_uuid);
309 fd_get_device_uuid(device->device_uuid, device->gpu_id);
310
311 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
312
313 if (result != VK_SUCCESS) {
314 vk_error(instance, result);
315 goto fail;
316 }
317
318 result = tu_wsi_init(device);
319 if (result != VK_SUCCESS) {
320 vk_error(instance, result);
321 goto fail;
322 }
323
324 return VK_SUCCESS;
325
326 fail:
327 close(fd);
328 if (master_fd != -1)
329 close(master_fd);
330 return result;
331 }
332
333 static void
334 tu_physical_device_finish(struct tu_physical_device *device)
335 {
336 tu_wsi_finish(device);
337
338 disk_cache_destroy(device->disk_cache);
339 close(device->local_fd);
340 if (device->master_fd != -1)
341 close(device->master_fd);
342
343 vk_object_base_finish(&device->base);
344 }
345
346 static VKAPI_ATTR void *
347 default_alloc_func(void *pUserData,
348 size_t size,
349 size_t align,
350 VkSystemAllocationScope allocationScope)
351 {
352 return malloc(size);
353 }
354
355 static VKAPI_ATTR void *
356 default_realloc_func(void *pUserData,
357 void *pOriginal,
358 size_t size,
359 size_t align,
360 VkSystemAllocationScope allocationScope)
361 {
362 return realloc(pOriginal, size);
363 }
364
365 static VKAPI_ATTR void
366 default_free_func(void *pUserData, void *pMemory)
367 {
368 free(pMemory);
369 }
370
371 static const VkAllocationCallbacks default_alloc = {
372 .pUserData = NULL,
373 .pfnAllocation = default_alloc_func,
374 .pfnReallocation = default_realloc_func,
375 .pfnFree = default_free_func,
376 };
377
378 static const struct debug_control tu_debug_options[] = {
379 { "startup", TU_DEBUG_STARTUP },
380 { "nir", TU_DEBUG_NIR },
381 { "ir3", TU_DEBUG_IR3 },
382 { "nobin", TU_DEBUG_NOBIN },
383 { "sysmem", TU_DEBUG_SYSMEM },
384 { "forcebin", TU_DEBUG_FORCEBIN },
385 { "noubwc", TU_DEBUG_NOUBWC },
386 { NULL, 0 }
387 };
388
389 const char *
390 tu_get_debug_option_name(int id)
391 {
392 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
393 return tu_debug_options[id].string;
394 }
395
396 static int
397 tu_get_instance_extension_index(const char *name)
398 {
399 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
400 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
401 return i;
402 }
403 return -1;
404 }
405
406 VkResult
407 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
408 const VkAllocationCallbacks *pAllocator,
409 VkInstance *pInstance)
410 {
411 struct tu_instance *instance;
412 VkResult result;
413
414 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
415
416 uint32_t client_version;
417 if (pCreateInfo->pApplicationInfo &&
418 pCreateInfo->pApplicationInfo->apiVersion != 0) {
419 client_version = pCreateInfo->pApplicationInfo->apiVersion;
420 } else {
421 tu_EnumerateInstanceVersion(&client_version);
422 }
423
424 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
425 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
426
427 if (!instance)
428 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
429
430 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
431
432 if (pAllocator)
433 instance->alloc = *pAllocator;
434 else
435 instance->alloc = default_alloc;
436
437 instance->api_version = client_version;
438 instance->physical_device_count = -1;
439
440 instance->debug_flags =
441 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
442
443 if (instance->debug_flags & TU_DEBUG_STARTUP)
444 tu_logi("Created an instance");
445
446 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
447 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
448 int index = tu_get_instance_extension_index(ext_name);
449
450 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
451 vk_object_base_finish(&instance->base);
452 vk_free2(&default_alloc, pAllocator, instance);
453 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
454 }
455
456 instance->enabled_extensions.extensions[index] = true;
457 }
458
459 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
460 if (result != VK_SUCCESS) {
461 vk_object_base_finish(&instance->base);
462 vk_free2(&default_alloc, pAllocator, instance);
463 return vk_error(instance, result);
464 }
465
466 glsl_type_singleton_init_or_ref();
467
468 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
469
470 *pInstance = tu_instance_to_handle(instance);
471
472 return VK_SUCCESS;
473 }
474
475 void
476 tu_DestroyInstance(VkInstance _instance,
477 const VkAllocationCallbacks *pAllocator)
478 {
479 TU_FROM_HANDLE(tu_instance, instance, _instance);
480
481 if (!instance)
482 return;
483
484 for (int i = 0; i < instance->physical_device_count; ++i) {
485 tu_physical_device_finish(instance->physical_devices + i);
486 }
487
488 VG(VALGRIND_DESTROY_MEMPOOL(instance));
489
490 glsl_type_singleton_decref();
491
492 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
493
494 vk_object_base_finish(&instance->base);
495 vk_free(&instance->alloc, instance);
496 }
497
498 static VkResult
499 tu_enumerate_devices(struct tu_instance *instance)
500 {
501 /* TODO: Check for more devices ? */
502 drmDevicePtr devices[8];
503 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
504 int max_devices;
505
506 instance->physical_device_count = 0;
507
508 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
509
510 if (instance->debug_flags & TU_DEBUG_STARTUP) {
511 if (max_devices < 0)
512 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
513 else
514 tu_logi("Found %d drm nodes", max_devices);
515 }
516
517 if (max_devices < 1)
518 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
519
520 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
521 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
522 devices[i]->bustype == DRM_BUS_PLATFORM) {
523
524 result = tu_physical_device_init(
525 instance->physical_devices + instance->physical_device_count,
526 instance, devices[i]);
527 if (result == VK_SUCCESS)
528 ++instance->physical_device_count;
529 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
530 break;
531 }
532 }
533 drmFreeDevices(devices, max_devices);
534
535 return result;
536 }
537
538 VkResult
539 tu_EnumeratePhysicalDevices(VkInstance _instance,
540 uint32_t *pPhysicalDeviceCount,
541 VkPhysicalDevice *pPhysicalDevices)
542 {
543 TU_FROM_HANDLE(tu_instance, instance, _instance);
544 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
545
546 VkResult result;
547
548 if (instance->physical_device_count < 0) {
549 result = tu_enumerate_devices(instance);
550 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
551 return result;
552 }
553
554 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
555 vk_outarray_append(&out, p)
556 {
557 *p = tu_physical_device_to_handle(instance->physical_devices + i);
558 }
559 }
560
561 return vk_outarray_status(&out);
562 }
563
564 VkResult
565 tu_EnumeratePhysicalDeviceGroups(
566 VkInstance _instance,
567 uint32_t *pPhysicalDeviceGroupCount,
568 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
569 {
570 TU_FROM_HANDLE(tu_instance, instance, _instance);
571 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
572 pPhysicalDeviceGroupCount);
573 VkResult result;
574
575 if (instance->physical_device_count < 0) {
576 result = tu_enumerate_devices(instance);
577 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
578 return result;
579 }
580
581 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
582 vk_outarray_append(&out, p)
583 {
584 p->physicalDeviceCount = 1;
585 p->physicalDevices[0] =
586 tu_physical_device_to_handle(instance->physical_devices + i);
587 p->subsetAllocation = false;
588 }
589 }
590
591 return vk_outarray_status(&out);
592 }
593
594 void
595 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
596 VkPhysicalDeviceFeatures *pFeatures)
597 {
598 memset(pFeatures, 0, sizeof(*pFeatures));
599
600 *pFeatures = (VkPhysicalDeviceFeatures) {
601 .robustBufferAccess = true,
602 .fullDrawIndexUint32 = true,
603 .imageCubeArray = true,
604 .independentBlend = true,
605 .geometryShader = true,
606 .tessellationShader = true,
607 .sampleRateShading = true,
608 .dualSrcBlend = true,
609 .logicOp = true,
610 .multiDrawIndirect = true,
611 .drawIndirectFirstInstance = true,
612 .depthClamp = true,
613 .depthBiasClamp = true,
614 .fillModeNonSolid = true,
615 .depthBounds = true,
616 .wideLines = false,
617 .largePoints = true,
618 .alphaToOne = true,
619 .multiViewport = false,
620 .samplerAnisotropy = true,
621 .textureCompressionETC2 = true,
622 .textureCompressionASTC_LDR = true,
623 .textureCompressionBC = true,
624 .occlusionQueryPrecise = true,
625 .pipelineStatisticsQuery = false,
626 .vertexPipelineStoresAndAtomics = true,
627 .fragmentStoresAndAtomics = true,
628 .shaderTessellationAndGeometryPointSize = false,
629 .shaderImageGatherExtended = false,
630 .shaderStorageImageExtendedFormats = false,
631 .shaderStorageImageMultisample = false,
632 .shaderUniformBufferArrayDynamicIndexing = false,
633 .shaderSampledImageArrayDynamicIndexing = false,
634 .shaderStorageBufferArrayDynamicIndexing = false,
635 .shaderStorageImageArrayDynamicIndexing = false,
636 .shaderStorageImageReadWithoutFormat = false,
637 .shaderStorageImageWriteWithoutFormat = false,
638 .shaderClipDistance = false,
639 .shaderCullDistance = false,
640 .shaderFloat64 = false,
641 .shaderInt64 = false,
642 .shaderInt16 = false,
643 .sparseBinding = false,
644 .variableMultisampleRate = false,
645 .inheritedQueries = false,
646 };
647 }
648
649 void
650 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
651 VkPhysicalDeviceFeatures2 *pFeatures)
652 {
653 vk_foreach_struct(ext, pFeatures->pNext)
654 {
655 switch (ext->sType) {
656 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
657 VkPhysicalDeviceVulkan11Features *features = (void *) ext;
658 features->storageBuffer16BitAccess = false;
659 features->uniformAndStorageBuffer16BitAccess = false;
660 features->storagePushConstant16 = false;
661 features->storageInputOutput16 = false;
662 features->multiview = false;
663 features->multiviewGeometryShader = false;
664 features->multiviewTessellationShader = false;
665 features->variablePointersStorageBuffer = true;
666 features->variablePointers = true;
667 features->protectedMemory = false;
668 features->samplerYcbcrConversion = true;
669 features->shaderDrawParameters = true;
670 break;
671 }
672 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: {
673 VkPhysicalDeviceVulkan12Features *features = (void *) ext;
674 features->samplerMirrorClampToEdge = true;
675 features->drawIndirectCount = true;
676 features->storageBuffer8BitAccess = false;
677 features->uniformAndStorageBuffer8BitAccess = false;
678 features->storagePushConstant8 = false;
679 features->shaderBufferInt64Atomics = false;
680 features->shaderSharedInt64Atomics = false;
681 features->shaderFloat16 = false;
682 features->shaderInt8 = false;
683
684 features->descriptorIndexing = false;
685 features->shaderInputAttachmentArrayDynamicIndexing = false;
686 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
687 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
688 features->shaderUniformBufferArrayNonUniformIndexing = false;
689 features->shaderSampledImageArrayNonUniformIndexing = false;
690 features->shaderStorageBufferArrayNonUniformIndexing = false;
691 features->shaderStorageImageArrayNonUniformIndexing = false;
692 features->shaderInputAttachmentArrayNonUniformIndexing = false;
693 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
694 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
695 features->descriptorBindingUniformBufferUpdateAfterBind = false;
696 features->descriptorBindingSampledImageUpdateAfterBind = false;
697 features->descriptorBindingStorageImageUpdateAfterBind = false;
698 features->descriptorBindingStorageBufferUpdateAfterBind = false;
699 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
700 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
701 features->descriptorBindingUpdateUnusedWhilePending = false;
702 features->descriptorBindingPartiallyBound = false;
703 features->descriptorBindingVariableDescriptorCount = false;
704 features->runtimeDescriptorArray = false;
705
706 features->samplerFilterMinmax = true;
707 features->scalarBlockLayout = false;
708 features->imagelessFramebuffer = false;
709 features->uniformBufferStandardLayout = false;
710 features->shaderSubgroupExtendedTypes = false;
711 features->separateDepthStencilLayouts = false;
712 features->hostQueryReset = false;
713 features->timelineSemaphore = false;
714 features->bufferDeviceAddress = false;
715 features->bufferDeviceAddressCaptureReplay = false;
716 features->bufferDeviceAddressMultiDevice = false;
717 features->vulkanMemoryModel = false;
718 features->vulkanMemoryModelDeviceScope = false;
719 features->vulkanMemoryModelAvailabilityVisibilityChains = false;
720 features->shaderOutputViewportIndex = false;
721 features->shaderOutputLayer = false;
722 features->subgroupBroadcastDynamicId = false;
723 break;
724 }
725 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
726 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
727 features->variablePointersStorageBuffer = true;
728 features->variablePointers = true;
729 break;
730 }
731 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
732 VkPhysicalDeviceMultiviewFeatures *features =
733 (VkPhysicalDeviceMultiviewFeatures *) ext;
734 features->multiview = false;
735 features->multiviewGeometryShader = false;
736 features->multiviewTessellationShader = false;
737 break;
738 }
739 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
740 VkPhysicalDeviceShaderDrawParametersFeatures *features =
741 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
742 features->shaderDrawParameters = true;
743 break;
744 }
745 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
746 VkPhysicalDeviceProtectedMemoryFeatures *features =
747 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
748 features->protectedMemory = false;
749 break;
750 }
751 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
752 VkPhysicalDevice16BitStorageFeatures *features =
753 (VkPhysicalDevice16BitStorageFeatures *) ext;
754 features->storageBuffer16BitAccess = false;
755 features->uniformAndStorageBuffer16BitAccess = false;
756 features->storagePushConstant16 = false;
757 features->storageInputOutput16 = false;
758 break;
759 }
760 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
761 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
762 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
763 features->samplerYcbcrConversion = true;
764 break;
765 }
766 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
767 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
768 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
769 features->shaderInputAttachmentArrayDynamicIndexing = false;
770 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
771 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
772 features->shaderUniformBufferArrayNonUniformIndexing = false;
773 features->shaderSampledImageArrayNonUniformIndexing = false;
774 features->shaderStorageBufferArrayNonUniformIndexing = false;
775 features->shaderStorageImageArrayNonUniformIndexing = false;
776 features->shaderInputAttachmentArrayNonUniformIndexing = false;
777 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
778 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
779 features->descriptorBindingUniformBufferUpdateAfterBind = false;
780 features->descriptorBindingSampledImageUpdateAfterBind = false;
781 features->descriptorBindingStorageImageUpdateAfterBind = false;
782 features->descriptorBindingStorageBufferUpdateAfterBind = false;
783 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
784 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
785 features->descriptorBindingUpdateUnusedWhilePending = false;
786 features->descriptorBindingPartiallyBound = false;
787 features->descriptorBindingVariableDescriptorCount = false;
788 features->runtimeDescriptorArray = false;
789 break;
790 }
791 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
792 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
793 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
794 features->conditionalRendering = false;
795 features->inheritedConditionalRendering = false;
796 break;
797 }
798 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
799 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
800 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
801 features->transformFeedback = true;
802 features->geometryStreams = false;
803 break;
804 }
805 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
806 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
807 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
808 features->indexTypeUint8 = true;
809 break;
810 }
811 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
812 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
813 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
814 features->vertexAttributeInstanceRateDivisor = true;
815 features->vertexAttributeInstanceRateZeroDivisor = true;
816 break;
817 }
818 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
819 VkPhysicalDevicePrivateDataFeaturesEXT *features =
820 (VkPhysicalDevicePrivateDataFeaturesEXT *)ext;
821 features->privateData = true;
822 break;
823 }
824 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
825 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
826 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
827 features->depthClipEnable = true;
828 break;
829 }
830 default:
831 break;
832 }
833 }
834 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
835 }
836
837 void
838 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
839 VkPhysicalDeviceProperties *pProperties)
840 {
841 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
842 VkSampleCountFlags sample_counts =
843 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
844
845 /* I have no idea what the maximum size is, but the hardware supports very
846 * large numbers of descriptors (at least 2^16). This limit is based on
847 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
848 * we don't have to think about what to do if that overflows, but really
849 * nothing is likely to get close to this.
850 */
851 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
852
853 VkPhysicalDeviceLimits limits = {
854 .maxImageDimension1D = (1 << 14),
855 .maxImageDimension2D = (1 << 14),
856 .maxImageDimension3D = (1 << 11),
857 .maxImageDimensionCube = (1 << 14),
858 .maxImageArrayLayers = (1 << 11),
859 .maxTexelBufferElements = 128 * 1024 * 1024,
860 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
861 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
862 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
863 .maxMemoryAllocationCount = UINT32_MAX,
864 .maxSamplerAllocationCount = 64 * 1024,
865 .bufferImageGranularity = 64, /* A cache line */
866 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
867 .maxBoundDescriptorSets = MAX_SETS,
868 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
869 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
870 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
871 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
872 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
873 .maxPerStageDescriptorInputAttachments = MAX_RTS,
874 .maxPerStageResources = max_descriptor_set_size,
875 .maxDescriptorSetSamplers = max_descriptor_set_size,
876 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
877 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
878 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
879 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
880 .maxDescriptorSetSampledImages = max_descriptor_set_size,
881 .maxDescriptorSetStorageImages = max_descriptor_set_size,
882 .maxDescriptorSetInputAttachments = MAX_RTS,
883 .maxVertexInputAttributes = 32,
884 .maxVertexInputBindings = 32,
885 .maxVertexInputAttributeOffset = 4095,
886 .maxVertexInputBindingStride = 2048,
887 .maxVertexOutputComponents = 128,
888 .maxTessellationGenerationLevel = 64,
889 .maxTessellationPatchSize = 32,
890 .maxTessellationControlPerVertexInputComponents = 128,
891 .maxTessellationControlPerVertexOutputComponents = 128,
892 .maxTessellationControlPerPatchOutputComponents = 120,
893 .maxTessellationControlTotalOutputComponents = 4096,
894 .maxTessellationEvaluationInputComponents = 128,
895 .maxTessellationEvaluationOutputComponents = 128,
896 .maxGeometryShaderInvocations = 32,
897 .maxGeometryInputComponents = 64,
898 .maxGeometryOutputComponents = 128,
899 .maxGeometryOutputVertices = 256,
900 .maxGeometryTotalOutputComponents = 1024,
901 .maxFragmentInputComponents = 124,
902 .maxFragmentOutputAttachments = 8,
903 .maxFragmentDualSrcAttachments = 1,
904 .maxFragmentCombinedOutputResources = 8,
905 .maxComputeSharedMemorySize = 32768,
906 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
907 .maxComputeWorkGroupInvocations = 2048,
908 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
909 .subPixelPrecisionBits = 8,
910 .subTexelPrecisionBits = 8,
911 .mipmapPrecisionBits = 8,
912 .maxDrawIndexedIndexValue = UINT32_MAX,
913 .maxDrawIndirectCount = UINT32_MAX,
914 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
915 .maxSamplerAnisotropy = 16,
916 .maxViewports = MAX_VIEWPORTS,
917 .maxViewportDimensions = { (1 << 14), (1 << 14) },
918 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
919 .viewportSubPixelBits = 8,
920 .minMemoryMapAlignment = 4096, /* A page */
921 .minTexelBufferOffsetAlignment = 64,
922 .minUniformBufferOffsetAlignment = 64,
923 .minStorageBufferOffsetAlignment = 64,
924 .minTexelOffset = -16,
925 .maxTexelOffset = 15,
926 .minTexelGatherOffset = -32,
927 .maxTexelGatherOffset = 31,
928 .minInterpolationOffset = -0.5,
929 .maxInterpolationOffset = 0.4375,
930 .subPixelInterpolationOffsetBits = 4,
931 .maxFramebufferWidth = (1 << 14),
932 .maxFramebufferHeight = (1 << 14),
933 .maxFramebufferLayers = (1 << 10),
934 .framebufferColorSampleCounts = sample_counts,
935 .framebufferDepthSampleCounts = sample_counts,
936 .framebufferStencilSampleCounts = sample_counts,
937 .framebufferNoAttachmentsSampleCounts = sample_counts,
938 .maxColorAttachments = MAX_RTS,
939 .sampledImageColorSampleCounts = sample_counts,
940 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
941 .sampledImageDepthSampleCounts = sample_counts,
942 .sampledImageStencilSampleCounts = sample_counts,
943 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
944 .maxSampleMaskWords = 1,
945 .timestampComputeAndGraphics = true,
946 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
947 .maxClipDistances = 8,
948 .maxCullDistances = 8,
949 .maxCombinedClipAndCullDistances = 8,
950 .discreteQueuePriorities = 1,
951 .pointSizeRange = { 1, 4092 },
952 .lineWidthRange = { 0.0, 7.9921875 },
953 .pointSizeGranularity = 0.0625,
954 .lineWidthGranularity = (1.0 / 128.0),
955 .strictLines = false, /* FINISHME */
956 .standardSampleLocations = true,
957 .optimalBufferCopyOffsetAlignment = 128,
958 .optimalBufferCopyRowPitchAlignment = 128,
959 .nonCoherentAtomSize = 64,
960 };
961
962 *pProperties = (VkPhysicalDeviceProperties) {
963 .apiVersion = tu_physical_device_api_version(pdevice),
964 .driverVersion = vk_get_driver_version(),
965 .vendorID = 0, /* TODO */
966 .deviceID = 0,
967 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
968 .limits = limits,
969 .sparseProperties = { 0 },
970 };
971
972 strcpy(pProperties->deviceName, pdevice->name);
973 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
974 }
975
976 void
977 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
978 VkPhysicalDeviceProperties2 *pProperties)
979 {
980 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
981 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
982
983 vk_foreach_struct(ext, pProperties->pNext)
984 {
985 switch (ext->sType) {
986 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
987 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
988 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
989 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
990 break;
991 }
992 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
993 VkPhysicalDeviceIDProperties *properties =
994 (VkPhysicalDeviceIDProperties *) ext;
995 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
996 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
997 properties->deviceLUIDValid = false;
998 break;
999 }
1000 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1001 VkPhysicalDeviceMultiviewProperties *properties =
1002 (VkPhysicalDeviceMultiviewProperties *) ext;
1003 properties->maxMultiviewViewCount = MAX_VIEWS;
1004 properties->maxMultiviewInstanceIndex = INT_MAX;
1005 break;
1006 }
1007 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1008 VkPhysicalDevicePointClippingProperties *properties =
1009 (VkPhysicalDevicePointClippingProperties *) ext;
1010 properties->pointClippingBehavior =
1011 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
1012 break;
1013 }
1014 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1015 VkPhysicalDeviceMaintenance3Properties *properties =
1016 (VkPhysicalDeviceMaintenance3Properties *) ext;
1017 /* Make sure everything is addressable by a signed 32-bit int, and
1018 * our largest descriptors are 96 bytes. */
1019 properties->maxPerSetDescriptors = (1ull << 31) / 96;
1020 /* Our buffer size fields allow only this much */
1021 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
1022 break;
1023 }
1024 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
1025 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
1026 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
1027
1028 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
1029 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
1030 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
1031 properties->maxTransformFeedbackStreamDataSize = 512;
1032 properties->maxTransformFeedbackBufferDataSize = 512;
1033 properties->maxTransformFeedbackBufferDataStride = 512;
1034 properties->transformFeedbackQueries = true;
1035 properties->transformFeedbackStreamsLinesTriangles = false;
1036 properties->transformFeedbackRasterizationStreamSelect = false;
1037 properties->transformFeedbackDraw = true;
1038 break;
1039 }
1040 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
1041 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
1042 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
1043 properties->sampleLocationSampleCounts = 0;
1044 if (pdevice->supported_extensions.EXT_sample_locations) {
1045 properties->sampleLocationSampleCounts =
1046 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
1047 }
1048 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
1049 properties->sampleLocationCoordinateRange[0] = 0.0f;
1050 properties->sampleLocationCoordinateRange[1] = 0.9375f;
1051 properties->sampleLocationSubPixelBits = 4;
1052 properties->variableSampleLocations = true;
1053 break;
1054 }
1055 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
1056 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
1057 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
1058 properties->filterMinmaxImageComponentMapping = true;
1059 properties->filterMinmaxSingleComponentFormats = true;
1060 break;
1061 }
1062 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1063 VkPhysicalDeviceSubgroupProperties *properties =
1064 (VkPhysicalDeviceSubgroupProperties *)ext;
1065 properties->subgroupSize = 64;
1066 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
1067 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1068 VK_SUBGROUP_FEATURE_VOTE_BIT;
1069 properties->quadOperationsInAllStages = false;
1070 break;
1071 }
1072 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1073 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
1074 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1075 props->maxVertexAttribDivisor = UINT32_MAX;
1076 break;
1077 }
1078 default:
1079 break;
1080 }
1081 }
1082 }
1083
1084 static const VkQueueFamilyProperties tu_queue_family_properties = {
1085 .queueFlags =
1086 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
1087 .queueCount = 1,
1088 .timestampValidBits = 48,
1089 .minImageTransferGranularity = { 1, 1, 1 },
1090 };
1091
1092 void
1093 tu_GetPhysicalDeviceQueueFamilyProperties(
1094 VkPhysicalDevice physicalDevice,
1095 uint32_t *pQueueFamilyPropertyCount,
1096 VkQueueFamilyProperties *pQueueFamilyProperties)
1097 {
1098 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1099
1100 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
1101 }
1102
1103 void
1104 tu_GetPhysicalDeviceQueueFamilyProperties2(
1105 VkPhysicalDevice physicalDevice,
1106 uint32_t *pQueueFamilyPropertyCount,
1107 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1108 {
1109 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1110
1111 vk_outarray_append(&out, p)
1112 {
1113 p->queueFamilyProperties = tu_queue_family_properties;
1114 }
1115 }
1116
1117 static uint64_t
1118 tu_get_system_heap_size()
1119 {
1120 struct sysinfo info;
1121 sysinfo(&info);
1122
1123 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1124
1125 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1126 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1127 */
1128 uint64_t available_ram;
1129 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1130 available_ram = total_ram / 2;
1131 else
1132 available_ram = total_ram * 3 / 4;
1133
1134 return available_ram;
1135 }
1136
1137 void
1138 tu_GetPhysicalDeviceMemoryProperties(
1139 VkPhysicalDevice physicalDevice,
1140 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1141 {
1142 pMemoryProperties->memoryHeapCount = 1;
1143 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1144 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1145
1146 pMemoryProperties->memoryTypeCount = 1;
1147 pMemoryProperties->memoryTypes[0].propertyFlags =
1148 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1149 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1150 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1151 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1152 }
1153
1154 void
1155 tu_GetPhysicalDeviceMemoryProperties2(
1156 VkPhysicalDevice physicalDevice,
1157 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1158 {
1159 return tu_GetPhysicalDeviceMemoryProperties(
1160 physicalDevice, &pMemoryProperties->memoryProperties);
1161 }
1162
1163 static VkResult
1164 tu_queue_init(struct tu_device *device,
1165 struct tu_queue *queue,
1166 uint32_t queue_family_index,
1167 int idx,
1168 VkDeviceQueueCreateFlags flags)
1169 {
1170 vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
1171
1172 queue->device = device;
1173 queue->queue_family_index = queue_family_index;
1174 queue->queue_idx = idx;
1175 queue->flags = flags;
1176
1177 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1178 if (ret)
1179 return VK_ERROR_INITIALIZATION_FAILED;
1180
1181 tu_fence_init(&queue->submit_fence, false);
1182
1183 return VK_SUCCESS;
1184 }
1185
1186 static void
1187 tu_queue_finish(struct tu_queue *queue)
1188 {
1189 tu_fence_finish(&queue->submit_fence);
1190 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1191 }
1192
1193 static int
1194 tu_get_device_extension_index(const char *name)
1195 {
1196 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1197 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1198 return i;
1199 }
1200 return -1;
1201 }
1202
1203 struct PACKED bcolor_entry {
1204 uint32_t fp32[4];
1205 uint16_t ui16[4];
1206 int16_t si16[4];
1207 uint16_t fp16[4];
1208 uint16_t rgb565;
1209 uint16_t rgb5a1;
1210 uint16_t rgba4;
1211 uint8_t __pad0[2];
1212 uint8_t ui8[4];
1213 int8_t si8[4];
1214 uint32_t rgb10a2;
1215 uint32_t z24; /* also s8? */
1216 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1217 uint8_t __pad1[56];
1218 } border_color[] = {
1219 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1220 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1221 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1222 .fp32[3] = 0x3f800000,
1223 .ui16[3] = 0xffff,
1224 .si16[3] = 0x7fff,
1225 .fp16[3] = 0x3c00,
1226 .rgb5a1 = 0x8000,
1227 .rgba4 = 0xf000,
1228 .ui8[3] = 0xff,
1229 .si8[3] = 0x7f,
1230 .rgb10a2 = 0xc0000000,
1231 .srgb[3] = 0x3c00,
1232 },
1233 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1234 .fp32[3] = 1,
1235 .fp16[3] = 1,
1236 },
1237 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1238 .fp32[0 ... 3] = 0x3f800000,
1239 .ui16[0 ... 3] = 0xffff,
1240 .si16[0 ... 3] = 0x7fff,
1241 .fp16[0 ... 3] = 0x3c00,
1242 .rgb565 = 0xffff,
1243 .rgb5a1 = 0xffff,
1244 .rgba4 = 0xffff,
1245 .ui8[0 ... 3] = 0xff,
1246 .si8[0 ... 3] = 0x7f,
1247 .rgb10a2 = 0xffffffff,
1248 .z24 = 0xffffff,
1249 .srgb[0 ... 3] = 0x3c00,
1250 },
1251 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1252 .fp32[0 ... 3] = 1,
1253 .fp16[0 ... 3] = 1,
1254 },
1255 };
1256
1257 VkResult
1258 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1259 const VkDeviceCreateInfo *pCreateInfo,
1260 const VkAllocationCallbacks *pAllocator,
1261 VkDevice *pDevice)
1262 {
1263 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1264 VkResult result;
1265 struct tu_device *device;
1266
1267 /* Check enabled features */
1268 if (pCreateInfo->pEnabledFeatures) {
1269 VkPhysicalDeviceFeatures supported_features;
1270 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1271 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1272 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1273 unsigned num_features =
1274 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1275 for (uint32_t i = 0; i < num_features; i++) {
1276 if (enabled_feature[i] && !supported_feature[i])
1277 return vk_error(physical_device->instance,
1278 VK_ERROR_FEATURE_NOT_PRESENT);
1279 }
1280 }
1281
1282 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1283 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1284 if (!device)
1285 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1286
1287 vk_device_init(&device->vk, pCreateInfo,
1288 &physical_device->instance->alloc, pAllocator);
1289
1290 device->instance = physical_device->instance;
1291 device->physical_device = physical_device;
1292 device->_lost = false;
1293
1294 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1295 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1296 int index = tu_get_device_extension_index(ext_name);
1297 if (index < 0 ||
1298 !physical_device->supported_extensions.extensions[index]) {
1299 vk_free(&device->vk.alloc, device);
1300 return vk_error(physical_device->instance,
1301 VK_ERROR_EXTENSION_NOT_PRESENT);
1302 }
1303
1304 device->enabled_extensions.extensions[index] = true;
1305 }
1306
1307 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1308 const VkDeviceQueueCreateInfo *queue_create =
1309 &pCreateInfo->pQueueCreateInfos[i];
1310 uint32_t qfi = queue_create->queueFamilyIndex;
1311 device->queues[qfi] = vk_alloc(
1312 &device->vk.alloc, queue_create->queueCount * sizeof(struct tu_queue),
1313 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1314 if (!device->queues[qfi]) {
1315 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1316 goto fail_queues;
1317 }
1318
1319 memset(device->queues[qfi], 0,
1320 queue_create->queueCount * sizeof(struct tu_queue));
1321
1322 device->queue_count[qfi] = queue_create->queueCount;
1323
1324 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1325 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1326 queue_create->flags);
1327 if (result != VK_SUCCESS)
1328 goto fail_queues;
1329 }
1330 }
1331
1332 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1333 if (!device->compiler)
1334 goto fail_queues;
1335
1336 /* initial sizes, these will increase if there is overflow */
1337 device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
1338 device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
1339
1340 STATIC_ASSERT(sizeof(border_color) == sizeof(((struct tu6_global*) 0)->border_color));
1341 result = tu_bo_init_new(device, &device->global_bo, sizeof(struct tu6_global));
1342 if (result != VK_SUCCESS)
1343 goto fail_global_bo;
1344
1345 result = tu_bo_map(device, &device->global_bo);
1346 if (result != VK_SUCCESS)
1347 goto fail_global_bo_map;
1348
1349 memcpy(device->global_bo.map + gb_offset(border_color), border_color, sizeof(border_color));
1350 tu_init_clear_blit_shaders(device->global_bo.map);
1351
1352 VkPipelineCacheCreateInfo ci;
1353 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1354 ci.pNext = NULL;
1355 ci.flags = 0;
1356 ci.pInitialData = NULL;
1357 ci.initialDataSize = 0;
1358 VkPipelineCache pc;
1359 result =
1360 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1361 if (result != VK_SUCCESS)
1362 goto fail_pipeline_cache;
1363
1364 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1365
1366 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1367 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1368
1369 mtx_init(&device->vsc_pitch_mtx, mtx_plain);
1370
1371 *pDevice = tu_device_to_handle(device);
1372 return VK_SUCCESS;
1373
1374 fail_pipeline_cache:
1375 fail_global_bo_map:
1376 tu_bo_finish(device, &device->global_bo);
1377
1378 fail_global_bo:
1379 ralloc_free(device->compiler);
1380
1381 fail_queues:
1382 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1383 for (unsigned q = 0; q < device->queue_count[i]; q++)
1384 tu_queue_finish(&device->queues[i][q]);
1385 if (device->queue_count[i])
1386 vk_object_free(&device->vk, NULL, device->queues[i]);
1387 }
1388
1389 vk_free(&device->vk.alloc, device);
1390 return result;
1391 }
1392
1393 void
1394 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1395 {
1396 TU_FROM_HANDLE(tu_device, device, _device);
1397
1398 if (!device)
1399 return;
1400
1401 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1402 for (unsigned q = 0; q < device->queue_count[i]; q++)
1403 tu_queue_finish(&device->queues[i][q]);
1404 if (device->queue_count[i])
1405 vk_object_free(&device->vk, NULL, device->queues[i]);
1406 }
1407
1408 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1409 if (device->scratch_bos[i].initialized)
1410 tu_bo_finish(device, &device->scratch_bos[i].bo);
1411 }
1412
1413 ir3_compiler_destroy(device->compiler);
1414
1415 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1416 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1417
1418 vk_free(&device->vk.alloc, device);
1419 }
1420
1421 VkResult
1422 _tu_device_set_lost(struct tu_device *device,
1423 const char *file, int line,
1424 const char *msg, ...)
1425 {
1426 /* Set the flag indicating that waits should return in finite time even
1427 * after device loss.
1428 */
1429 p_atomic_inc(&device->_lost);
1430
1431 /* TODO: Report the log message through VkDebugReportCallbackEXT instead */
1432 fprintf(stderr, "%s:%d: ", file, line);
1433 va_list ap;
1434 va_start(ap, msg);
1435 vfprintf(stderr, msg, ap);
1436 va_end(ap);
1437
1438 if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
1439 abort();
1440
1441 return VK_ERROR_DEVICE_LOST;
1442 }
1443
1444 VkResult
1445 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1446 {
1447 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1448 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1449 assert(index < ARRAY_SIZE(dev->scratch_bos));
1450
1451 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1452 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1453 /* Fast path: just return the already-allocated BO. */
1454 *bo = &dev->scratch_bos[i].bo;
1455 return VK_SUCCESS;
1456 }
1457 }
1458
1459 /* Slow path: actually allocate the BO. We take a lock because the process
1460 * of allocating it is slow, and we don't want to block the CPU while it
1461 * finishes.
1462 */
1463 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1464
1465 /* Another thread may have allocated it already while we were waiting on
1466 * the lock. We need to check this in order to avoid double-allocating.
1467 */
1468 if (dev->scratch_bos[index].initialized) {
1469 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1470 *bo = &dev->scratch_bos[index].bo;
1471 return VK_SUCCESS;
1472 }
1473
1474 unsigned bo_size = 1ull << size_log2;
1475 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1476 if (result != VK_SUCCESS) {
1477 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1478 return result;
1479 }
1480
1481 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1482
1483 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1484
1485 *bo = &dev->scratch_bos[index].bo;
1486 return VK_SUCCESS;
1487 }
1488
1489 VkResult
1490 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1491 VkLayerProperties *pProperties)
1492 {
1493 *pPropertyCount = 0;
1494 return VK_SUCCESS;
1495 }
1496
1497 VkResult
1498 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1499 uint32_t *pPropertyCount,
1500 VkLayerProperties *pProperties)
1501 {
1502 *pPropertyCount = 0;
1503 return VK_SUCCESS;
1504 }
1505
1506 void
1507 tu_GetDeviceQueue2(VkDevice _device,
1508 const VkDeviceQueueInfo2 *pQueueInfo,
1509 VkQueue *pQueue)
1510 {
1511 TU_FROM_HANDLE(tu_device, device, _device);
1512 struct tu_queue *queue;
1513
1514 queue =
1515 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1516 if (pQueueInfo->flags != queue->flags) {
1517 /* From the Vulkan 1.1.70 spec:
1518 *
1519 * "The queue returned by vkGetDeviceQueue2 must have the same
1520 * flags value from this structure as that used at device
1521 * creation time in a VkDeviceQueueCreateInfo instance. If no
1522 * matching flags were specified at device creation time then
1523 * pQueue will return VK_NULL_HANDLE."
1524 */
1525 *pQueue = VK_NULL_HANDLE;
1526 return;
1527 }
1528
1529 *pQueue = tu_queue_to_handle(queue);
1530 }
1531
1532 void
1533 tu_GetDeviceQueue(VkDevice _device,
1534 uint32_t queueFamilyIndex,
1535 uint32_t queueIndex,
1536 VkQueue *pQueue)
1537 {
1538 const VkDeviceQueueInfo2 info =
1539 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1540 .queueFamilyIndex = queueFamilyIndex,
1541 .queueIndex = queueIndex };
1542
1543 tu_GetDeviceQueue2(_device, &info, pQueue);
1544 }
1545
1546 static VkResult
1547 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
1548 uint32_t sem_count,
1549 bool wait,
1550 struct drm_msm_gem_submit_syncobj **out,
1551 uint32_t *out_count)
1552 {
1553 uint32_t syncobj_count = 0;
1554 struct drm_msm_gem_submit_syncobj *syncobjs;
1555
1556 for (uint32_t i = 0; i < sem_count; ++i) {
1557 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1558
1559 struct tu_semaphore_part *part =
1560 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1561 &sem->temporary : &sem->permanent;
1562
1563 if (part->kind == TU_SEMAPHORE_SYNCOBJ)
1564 ++syncobj_count;
1565 }
1566
1567 *out = NULL;
1568 *out_count = syncobj_count;
1569 if (!syncobj_count)
1570 return VK_SUCCESS;
1571
1572 *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
1573 if (!syncobjs)
1574 return VK_ERROR_OUT_OF_HOST_MEMORY;
1575
1576 for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
1577 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1578
1579 struct tu_semaphore_part *part =
1580 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1581 &sem->temporary : &sem->permanent;
1582
1583 if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
1584 syncobjs[j].handle = part->syncobj;
1585 syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
1586 ++j;
1587 }
1588 }
1589
1590 return VK_SUCCESS;
1591 }
1592
1593
1594 static void
1595 tu_semaphores_remove_temp(struct tu_device *device,
1596 const VkSemaphore *sems,
1597 uint32_t sem_count)
1598 {
1599 for (uint32_t i = 0; i < sem_count; ++i) {
1600 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1601 tu_semaphore_remove_temp(device, sem);
1602 }
1603 }
1604
1605 VkResult
1606 tu_QueueSubmit(VkQueue _queue,
1607 uint32_t submitCount,
1608 const VkSubmitInfo *pSubmits,
1609 VkFence _fence)
1610 {
1611 TU_FROM_HANDLE(tu_queue, queue, _queue);
1612 VkResult result;
1613
1614 for (uint32_t i = 0; i < submitCount; ++i) {
1615 const VkSubmitInfo *submit = pSubmits + i;
1616 const bool last_submit = (i == submitCount - 1);
1617 struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
1618 uint32_t nr_in_syncobjs, nr_out_syncobjs;
1619 struct tu_bo_list bo_list;
1620 tu_bo_list_init(&bo_list);
1621
1622 result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
1623 pSubmits[i].waitSemaphoreCount,
1624 false, &in_syncobjs, &nr_in_syncobjs);
1625 if (result != VK_SUCCESS) {
1626 return tu_device_set_lost(queue->device,
1627 "failed to allocate space for semaphore submission\n");
1628 }
1629
1630 result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
1631 pSubmits[i].signalSemaphoreCount,
1632 false, &out_syncobjs, &nr_out_syncobjs);
1633 if (result != VK_SUCCESS) {
1634 free(in_syncobjs);
1635 return tu_device_set_lost(queue->device,
1636 "failed to allocate space for semaphore submission\n");
1637 }
1638
1639 uint32_t entry_count = 0;
1640 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1641 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1642 entry_count += cmdbuf->cs.entry_count;
1643 }
1644
1645 struct drm_msm_gem_submit_cmd cmds[entry_count];
1646 uint32_t entry_idx = 0;
1647 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1648 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1649 struct tu_cs *cs = &cmdbuf->cs;
1650 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1651 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1652 cmds[entry_idx].submit_idx =
1653 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1654 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1655 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1656 cmds[entry_idx].size = cs->entries[i].size;
1657 cmds[entry_idx].pad = 0;
1658 cmds[entry_idx].nr_relocs = 0;
1659 cmds[entry_idx].relocs = 0;
1660 }
1661
1662 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1663 }
1664
1665 uint32_t flags = MSM_PIPE_3D0;
1666 if (nr_in_syncobjs) {
1667 flags |= MSM_SUBMIT_SYNCOBJ_IN;
1668 }
1669 if (nr_out_syncobjs) {
1670 flags |= MSM_SUBMIT_SYNCOBJ_OUT;
1671 }
1672
1673 if (last_submit) {
1674 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1675 }
1676
1677 struct drm_msm_gem_submit req = {
1678 .flags = flags,
1679 .queueid = queue->msm_queue_id,
1680 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1681 .nr_bos = bo_list.count,
1682 .cmds = (uint64_t)(uintptr_t)cmds,
1683 .nr_cmds = entry_count,
1684 .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
1685 .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
1686 .nr_in_syncobjs = nr_in_syncobjs,
1687 .nr_out_syncobjs = nr_out_syncobjs,
1688 .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
1689 };
1690
1691 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1692 DRM_MSM_GEM_SUBMIT,
1693 &req, sizeof(req));
1694 if (ret) {
1695 free(in_syncobjs);
1696 free(out_syncobjs);
1697 return tu_device_set_lost(queue->device, "submit failed: %s\n",
1698 strerror(errno));
1699 }
1700
1701 tu_bo_list_destroy(&bo_list);
1702 free(in_syncobjs);
1703 free(out_syncobjs);
1704
1705 tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
1706 pSubmits[i].waitSemaphoreCount);
1707 if (last_submit) {
1708 /* no need to merge fences as queue execution is serialized */
1709 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1710 } else if (last_submit) {
1711 close(req.fence_fd);
1712 }
1713 }
1714
1715 if (_fence != VK_NULL_HANDLE) {
1716 TU_FROM_HANDLE(tu_fence, fence, _fence);
1717 tu_fence_copy(fence, &queue->submit_fence);
1718 }
1719
1720 return VK_SUCCESS;
1721 }
1722
1723 VkResult
1724 tu_QueueWaitIdle(VkQueue _queue)
1725 {
1726 TU_FROM_HANDLE(tu_queue, queue, _queue);
1727
1728 if (tu_device_is_lost(queue->device))
1729 return VK_ERROR_DEVICE_LOST;
1730
1731 tu_fence_wait_idle(&queue->submit_fence);
1732
1733 return VK_SUCCESS;
1734 }
1735
1736 VkResult
1737 tu_DeviceWaitIdle(VkDevice _device)
1738 {
1739 TU_FROM_HANDLE(tu_device, device, _device);
1740
1741 if (tu_device_is_lost(device))
1742 return VK_ERROR_DEVICE_LOST;
1743
1744 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1745 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1746 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1747 }
1748 }
1749 return VK_SUCCESS;
1750 }
1751
1752 VkResult
1753 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1754 uint32_t *pPropertyCount,
1755 VkExtensionProperties *pProperties)
1756 {
1757 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1758
1759 /* We spport no lyaers */
1760 if (pLayerName)
1761 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1762
1763 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1764 if (tu_instance_extensions_supported.extensions[i]) {
1765 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1766 }
1767 }
1768
1769 return vk_outarray_status(&out);
1770 }
1771
1772 VkResult
1773 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1774 const char *pLayerName,
1775 uint32_t *pPropertyCount,
1776 VkExtensionProperties *pProperties)
1777 {
1778 /* We spport no lyaers */
1779 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1780 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1781
1782 /* We spport no lyaers */
1783 if (pLayerName)
1784 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1785
1786 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1787 if (device->supported_extensions.extensions[i]) {
1788 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1789 }
1790 }
1791
1792 return vk_outarray_status(&out);
1793 }
1794
1795 PFN_vkVoidFunction
1796 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1797 {
1798 TU_FROM_HANDLE(tu_instance, instance, _instance);
1799
1800 return tu_lookup_entrypoint_checked(
1801 pName, instance ? instance->api_version : 0,
1802 instance ? &instance->enabled_extensions : NULL, NULL);
1803 }
1804
1805 /* The loader wants us to expose a second GetInstanceProcAddr function
1806 * to work around certain LD_PRELOAD issues seen in apps.
1807 */
1808 PUBLIC
1809 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1810 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1811
1812 PUBLIC
1813 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1814 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1815 {
1816 return tu_GetInstanceProcAddr(instance, pName);
1817 }
1818
1819 PFN_vkVoidFunction
1820 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1821 {
1822 TU_FROM_HANDLE(tu_device, device, _device);
1823
1824 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1825 &device->instance->enabled_extensions,
1826 &device->enabled_extensions);
1827 }
1828
1829 static VkResult
1830 tu_alloc_memory(struct tu_device *device,
1831 const VkMemoryAllocateInfo *pAllocateInfo,
1832 const VkAllocationCallbacks *pAllocator,
1833 VkDeviceMemory *pMem)
1834 {
1835 struct tu_device_memory *mem;
1836 VkResult result;
1837
1838 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1839
1840 if (pAllocateInfo->allocationSize == 0) {
1841 /* Apparently, this is allowed */
1842 *pMem = VK_NULL_HANDLE;
1843 return VK_SUCCESS;
1844 }
1845
1846 mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
1847 VK_OBJECT_TYPE_DEVICE_MEMORY);
1848 if (mem == NULL)
1849 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1850
1851 const VkImportMemoryFdInfoKHR *fd_info =
1852 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1853 if (fd_info && !fd_info->handleType)
1854 fd_info = NULL;
1855
1856 if (fd_info) {
1857 assert(fd_info->handleType ==
1858 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1859 fd_info->handleType ==
1860 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1861
1862 /*
1863 * TODO Importing the same fd twice gives us the same handle without
1864 * reference counting. We need to maintain a per-instance handle-to-bo
1865 * table and add reference count to tu_bo.
1866 */
1867 result = tu_bo_init_dmabuf(device, &mem->bo,
1868 pAllocateInfo->allocationSize, fd_info->fd);
1869 if (result == VK_SUCCESS) {
1870 /* take ownership and close the fd */
1871 close(fd_info->fd);
1872 }
1873 } else {
1874 result =
1875 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1876 }
1877
1878 if (result != VK_SUCCESS) {
1879 vk_object_free(&device->vk, pAllocator, mem);
1880 return result;
1881 }
1882
1883 mem->size = pAllocateInfo->allocationSize;
1884 mem->type_index = pAllocateInfo->memoryTypeIndex;
1885
1886 mem->map = NULL;
1887 mem->user_ptr = NULL;
1888
1889 *pMem = tu_device_memory_to_handle(mem);
1890
1891 return VK_SUCCESS;
1892 }
1893
1894 VkResult
1895 tu_AllocateMemory(VkDevice _device,
1896 const VkMemoryAllocateInfo *pAllocateInfo,
1897 const VkAllocationCallbacks *pAllocator,
1898 VkDeviceMemory *pMem)
1899 {
1900 TU_FROM_HANDLE(tu_device, device, _device);
1901 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1902 }
1903
1904 void
1905 tu_FreeMemory(VkDevice _device,
1906 VkDeviceMemory _mem,
1907 const VkAllocationCallbacks *pAllocator)
1908 {
1909 TU_FROM_HANDLE(tu_device, device, _device);
1910 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1911
1912 if (mem == NULL)
1913 return;
1914
1915 tu_bo_finish(device, &mem->bo);
1916 vk_object_free(&device->vk, pAllocator, mem);
1917 }
1918
1919 VkResult
1920 tu_MapMemory(VkDevice _device,
1921 VkDeviceMemory _memory,
1922 VkDeviceSize offset,
1923 VkDeviceSize size,
1924 VkMemoryMapFlags flags,
1925 void **ppData)
1926 {
1927 TU_FROM_HANDLE(tu_device, device, _device);
1928 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1929 VkResult result;
1930
1931 if (mem == NULL) {
1932 *ppData = NULL;
1933 return VK_SUCCESS;
1934 }
1935
1936 if (mem->user_ptr) {
1937 *ppData = mem->user_ptr;
1938 } else if (!mem->map) {
1939 result = tu_bo_map(device, &mem->bo);
1940 if (result != VK_SUCCESS)
1941 return result;
1942 *ppData = mem->map = mem->bo.map;
1943 } else
1944 *ppData = mem->map;
1945
1946 if (*ppData) {
1947 *ppData += offset;
1948 return VK_SUCCESS;
1949 }
1950
1951 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1952 }
1953
1954 void
1955 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1956 {
1957 /* I do not see any unmapping done by the freedreno Gallium driver. */
1958 }
1959
1960 VkResult
1961 tu_FlushMappedMemoryRanges(VkDevice _device,
1962 uint32_t memoryRangeCount,
1963 const VkMappedMemoryRange *pMemoryRanges)
1964 {
1965 return VK_SUCCESS;
1966 }
1967
1968 VkResult
1969 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1970 uint32_t memoryRangeCount,
1971 const VkMappedMemoryRange *pMemoryRanges)
1972 {
1973 return VK_SUCCESS;
1974 }
1975
1976 void
1977 tu_GetBufferMemoryRequirements(VkDevice _device,
1978 VkBuffer _buffer,
1979 VkMemoryRequirements *pMemoryRequirements)
1980 {
1981 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1982
1983 pMemoryRequirements->memoryTypeBits = 1;
1984 pMemoryRequirements->alignment = 64;
1985 pMemoryRequirements->size =
1986 align64(buffer->size, pMemoryRequirements->alignment);
1987 }
1988
1989 void
1990 tu_GetBufferMemoryRequirements2(
1991 VkDevice device,
1992 const VkBufferMemoryRequirementsInfo2 *pInfo,
1993 VkMemoryRequirements2 *pMemoryRequirements)
1994 {
1995 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1996 &pMemoryRequirements->memoryRequirements);
1997 }
1998
1999 void
2000 tu_GetImageMemoryRequirements(VkDevice _device,
2001 VkImage _image,
2002 VkMemoryRequirements *pMemoryRequirements)
2003 {
2004 TU_FROM_HANDLE(tu_image, image, _image);
2005
2006 pMemoryRequirements->memoryTypeBits = 1;
2007 pMemoryRequirements->size = image->total_size;
2008 pMemoryRequirements->alignment = image->layout[0].base_align;
2009 }
2010
2011 void
2012 tu_GetImageMemoryRequirements2(VkDevice device,
2013 const VkImageMemoryRequirementsInfo2 *pInfo,
2014 VkMemoryRequirements2 *pMemoryRequirements)
2015 {
2016 tu_GetImageMemoryRequirements(device, pInfo->image,
2017 &pMemoryRequirements->memoryRequirements);
2018 }
2019
2020 void
2021 tu_GetImageSparseMemoryRequirements(
2022 VkDevice device,
2023 VkImage image,
2024 uint32_t *pSparseMemoryRequirementCount,
2025 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
2026 {
2027 tu_stub();
2028 }
2029
2030 void
2031 tu_GetImageSparseMemoryRequirements2(
2032 VkDevice device,
2033 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
2034 uint32_t *pSparseMemoryRequirementCount,
2035 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
2036 {
2037 tu_stub();
2038 }
2039
2040 void
2041 tu_GetDeviceMemoryCommitment(VkDevice device,
2042 VkDeviceMemory memory,
2043 VkDeviceSize *pCommittedMemoryInBytes)
2044 {
2045 *pCommittedMemoryInBytes = 0;
2046 }
2047
2048 VkResult
2049 tu_BindBufferMemory2(VkDevice device,
2050 uint32_t bindInfoCount,
2051 const VkBindBufferMemoryInfo *pBindInfos)
2052 {
2053 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2054 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2055 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
2056
2057 if (mem) {
2058 buffer->bo = &mem->bo;
2059 buffer->bo_offset = pBindInfos[i].memoryOffset;
2060 } else {
2061 buffer->bo = NULL;
2062 }
2063 }
2064 return VK_SUCCESS;
2065 }
2066
2067 VkResult
2068 tu_BindBufferMemory(VkDevice device,
2069 VkBuffer buffer,
2070 VkDeviceMemory memory,
2071 VkDeviceSize memoryOffset)
2072 {
2073 const VkBindBufferMemoryInfo info = {
2074 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2075 .buffer = buffer,
2076 .memory = memory,
2077 .memoryOffset = memoryOffset
2078 };
2079
2080 return tu_BindBufferMemory2(device, 1, &info);
2081 }
2082
2083 VkResult
2084 tu_BindImageMemory2(VkDevice device,
2085 uint32_t bindInfoCount,
2086 const VkBindImageMemoryInfo *pBindInfos)
2087 {
2088 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2089 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
2090 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2091
2092 if (mem) {
2093 image->bo = &mem->bo;
2094 image->bo_offset = pBindInfos[i].memoryOffset;
2095 } else {
2096 image->bo = NULL;
2097 image->bo_offset = 0;
2098 }
2099 }
2100
2101 return VK_SUCCESS;
2102 }
2103
2104 VkResult
2105 tu_BindImageMemory(VkDevice device,
2106 VkImage image,
2107 VkDeviceMemory memory,
2108 VkDeviceSize memoryOffset)
2109 {
2110 const VkBindImageMemoryInfo info = {
2111 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2112 .image = image,
2113 .memory = memory,
2114 .memoryOffset = memoryOffset
2115 };
2116
2117 return tu_BindImageMemory2(device, 1, &info);
2118 }
2119
2120 VkResult
2121 tu_QueueBindSparse(VkQueue _queue,
2122 uint32_t bindInfoCount,
2123 const VkBindSparseInfo *pBindInfo,
2124 VkFence _fence)
2125 {
2126 return VK_SUCCESS;
2127 }
2128
2129 // Queue semaphore functions
2130
2131
2132 static void
2133 tu_semaphore_part_destroy(struct tu_device *device,
2134 struct tu_semaphore_part *part)
2135 {
2136 switch(part->kind) {
2137 case TU_SEMAPHORE_NONE:
2138 break;
2139 case TU_SEMAPHORE_SYNCOBJ:
2140 drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
2141 break;
2142 }
2143 part->kind = TU_SEMAPHORE_NONE;
2144 }
2145
2146 static void
2147 tu_semaphore_remove_temp(struct tu_device *device,
2148 struct tu_semaphore *sem)
2149 {
2150 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2151 tu_semaphore_part_destroy(device, &sem->temporary);
2152 }
2153 }
2154
2155 VkResult
2156 tu_CreateSemaphore(VkDevice _device,
2157 const VkSemaphoreCreateInfo *pCreateInfo,
2158 const VkAllocationCallbacks *pAllocator,
2159 VkSemaphore *pSemaphore)
2160 {
2161 TU_FROM_HANDLE(tu_device, device, _device);
2162
2163 struct tu_semaphore *sem =
2164 vk_object_alloc(&device->vk, pAllocator, sizeof(*sem),
2165 VK_OBJECT_TYPE_SEMAPHORE);
2166 if (!sem)
2167 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2168
2169 const VkExportSemaphoreCreateInfo *export =
2170 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
2171 VkExternalSemaphoreHandleTypeFlags handleTypes =
2172 export ? export->handleTypes : 0;
2173
2174 sem->permanent.kind = TU_SEMAPHORE_NONE;
2175 sem->temporary.kind = TU_SEMAPHORE_NONE;
2176
2177 if (handleTypes) {
2178 if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
2179 vk_free2(&device->vk.alloc, pAllocator, sem);
2180 return VK_ERROR_OUT_OF_HOST_MEMORY;
2181 }
2182 sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
2183 }
2184 *pSemaphore = tu_semaphore_to_handle(sem);
2185 return VK_SUCCESS;
2186 }
2187
2188 void
2189 tu_DestroySemaphore(VkDevice _device,
2190 VkSemaphore _semaphore,
2191 const VkAllocationCallbacks *pAllocator)
2192 {
2193 TU_FROM_HANDLE(tu_device, device, _device);
2194 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
2195 if (!_semaphore)
2196 return;
2197
2198 tu_semaphore_part_destroy(device, &sem->permanent);
2199 tu_semaphore_part_destroy(device, &sem->temporary);
2200
2201 vk_object_free(&device->vk, pAllocator, sem);
2202 }
2203
2204 VkResult
2205 tu_CreateEvent(VkDevice _device,
2206 const VkEventCreateInfo *pCreateInfo,
2207 const VkAllocationCallbacks *pAllocator,
2208 VkEvent *pEvent)
2209 {
2210 TU_FROM_HANDLE(tu_device, device, _device);
2211
2212 struct tu_event *event =
2213 vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
2214 VK_OBJECT_TYPE_EVENT);
2215 if (!event)
2216 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2217
2218 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2219 if (result != VK_SUCCESS)
2220 goto fail_alloc;
2221
2222 result = tu_bo_map(device, &event->bo);
2223 if (result != VK_SUCCESS)
2224 goto fail_map;
2225
2226 *pEvent = tu_event_to_handle(event);
2227
2228 return VK_SUCCESS;
2229
2230 fail_map:
2231 tu_bo_finish(device, &event->bo);
2232 fail_alloc:
2233 vk_object_free(&device->vk, pAllocator, event);
2234 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2235 }
2236
2237 void
2238 tu_DestroyEvent(VkDevice _device,
2239 VkEvent _event,
2240 const VkAllocationCallbacks *pAllocator)
2241 {
2242 TU_FROM_HANDLE(tu_device, device, _device);
2243 TU_FROM_HANDLE(tu_event, event, _event);
2244
2245 if (!event)
2246 return;
2247
2248 tu_bo_finish(device, &event->bo);
2249 vk_object_free(&device->vk, pAllocator, event);
2250 }
2251
2252 VkResult
2253 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2254 {
2255 TU_FROM_HANDLE(tu_event, event, _event);
2256
2257 if (*(uint64_t*) event->bo.map == 1)
2258 return VK_EVENT_SET;
2259 return VK_EVENT_RESET;
2260 }
2261
2262 VkResult
2263 tu_SetEvent(VkDevice _device, VkEvent _event)
2264 {
2265 TU_FROM_HANDLE(tu_event, event, _event);
2266 *(uint64_t*) event->bo.map = 1;
2267
2268 return VK_SUCCESS;
2269 }
2270
2271 VkResult
2272 tu_ResetEvent(VkDevice _device, VkEvent _event)
2273 {
2274 TU_FROM_HANDLE(tu_event, event, _event);
2275 *(uint64_t*) event->bo.map = 0;
2276
2277 return VK_SUCCESS;
2278 }
2279
2280 VkResult
2281 tu_CreateBuffer(VkDevice _device,
2282 const VkBufferCreateInfo *pCreateInfo,
2283 const VkAllocationCallbacks *pAllocator,
2284 VkBuffer *pBuffer)
2285 {
2286 TU_FROM_HANDLE(tu_device, device, _device);
2287 struct tu_buffer *buffer;
2288
2289 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2290
2291 buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
2292 VK_OBJECT_TYPE_BUFFER);
2293 if (buffer == NULL)
2294 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2295
2296 buffer->size = pCreateInfo->size;
2297 buffer->usage = pCreateInfo->usage;
2298 buffer->flags = pCreateInfo->flags;
2299
2300 *pBuffer = tu_buffer_to_handle(buffer);
2301
2302 return VK_SUCCESS;
2303 }
2304
2305 void
2306 tu_DestroyBuffer(VkDevice _device,
2307 VkBuffer _buffer,
2308 const VkAllocationCallbacks *pAllocator)
2309 {
2310 TU_FROM_HANDLE(tu_device, device, _device);
2311 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2312
2313 if (!buffer)
2314 return;
2315
2316 vk_object_free(&device->vk, pAllocator, buffer);
2317 }
2318
2319 VkResult
2320 tu_CreateFramebuffer(VkDevice _device,
2321 const VkFramebufferCreateInfo *pCreateInfo,
2322 const VkAllocationCallbacks *pAllocator,
2323 VkFramebuffer *pFramebuffer)
2324 {
2325 TU_FROM_HANDLE(tu_device, device, _device);
2326 TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
2327 struct tu_framebuffer *framebuffer;
2328
2329 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2330
2331 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2332 pCreateInfo->attachmentCount;
2333 framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
2334 VK_OBJECT_TYPE_FRAMEBUFFER);
2335 if (framebuffer == NULL)
2336 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2337
2338 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2339 framebuffer->width = pCreateInfo->width;
2340 framebuffer->height = pCreateInfo->height;
2341 framebuffer->layers = pCreateInfo->layers;
2342 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2343 VkImageView _iview = pCreateInfo->pAttachments[i];
2344 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2345 framebuffer->attachments[i].attachment = iview;
2346 }
2347
2348 tu_framebuffer_tiling_config(framebuffer, device, pass);
2349
2350 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2351 return VK_SUCCESS;
2352 }
2353
2354 void
2355 tu_DestroyFramebuffer(VkDevice _device,
2356 VkFramebuffer _fb,
2357 const VkAllocationCallbacks *pAllocator)
2358 {
2359 TU_FROM_HANDLE(tu_device, device, _device);
2360 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2361
2362 if (!fb)
2363 return;
2364
2365 vk_object_free(&device->vk, pAllocator, fb);
2366 }
2367
2368 static void
2369 tu_init_sampler(struct tu_device *device,
2370 struct tu_sampler *sampler,
2371 const VkSamplerCreateInfo *pCreateInfo)
2372 {
2373 const struct VkSamplerReductionModeCreateInfo *reduction =
2374 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2375 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2376 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2377
2378 unsigned aniso = pCreateInfo->anisotropyEnable ?
2379 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2380 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2381 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2382 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2383
2384 sampler->descriptor[0] =
2385 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2386 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2387 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2388 A6XX_TEX_SAMP_0_ANISO(aniso) |
2389 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2390 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2391 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2392 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2393 sampler->descriptor[1] =
2394 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2395 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2396 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2397 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2398 COND(pCreateInfo->compareEnable,
2399 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2400 /* This is an offset into the border_color BO, which we fill with all the
2401 * possible Vulkan border colors in the correct order, so we can just use
2402 * the Vulkan enum with no translation necessary.
2403 */
2404 sampler->descriptor[2] =
2405 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2406 sizeof(struct bcolor_entry));
2407 sampler->descriptor[3] = 0;
2408
2409 if (reduction) {
2410 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2411 tu6_reduction_mode(reduction->reductionMode));
2412 }
2413
2414 sampler->ycbcr_sampler = ycbcr_conversion ?
2415 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2416
2417 if (sampler->ycbcr_sampler &&
2418 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2419 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2420 }
2421
2422 /* TODO:
2423 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2424 */
2425 }
2426
2427 VkResult
2428 tu_CreateSampler(VkDevice _device,
2429 const VkSamplerCreateInfo *pCreateInfo,
2430 const VkAllocationCallbacks *pAllocator,
2431 VkSampler *pSampler)
2432 {
2433 TU_FROM_HANDLE(tu_device, device, _device);
2434 struct tu_sampler *sampler;
2435
2436 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2437
2438 sampler = vk_object_alloc(&device->vk, pAllocator, sizeof(*sampler),
2439 VK_OBJECT_TYPE_SAMPLER);
2440 if (!sampler)
2441 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2442
2443 tu_init_sampler(device, sampler, pCreateInfo);
2444 *pSampler = tu_sampler_to_handle(sampler);
2445
2446 return VK_SUCCESS;
2447 }
2448
2449 void
2450 tu_DestroySampler(VkDevice _device,
2451 VkSampler _sampler,
2452 const VkAllocationCallbacks *pAllocator)
2453 {
2454 TU_FROM_HANDLE(tu_device, device, _device);
2455 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2456
2457 if (!sampler)
2458 return;
2459
2460 vk_object_free(&device->vk, pAllocator, sampler);
2461 }
2462
2463 /* vk_icd.h does not declare this function, so we declare it here to
2464 * suppress Wmissing-prototypes.
2465 */
2466 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2467 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2468
2469 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2470 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2471 {
2472 /* For the full details on loader interface versioning, see
2473 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2474 * What follows is a condensed summary, to help you navigate the large and
2475 * confusing official doc.
2476 *
2477 * - Loader interface v0 is incompatible with later versions. We don't
2478 * support it.
2479 *
2480 * - In loader interface v1:
2481 * - The first ICD entrypoint called by the loader is
2482 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2483 * entrypoint.
2484 * - The ICD must statically expose no other Vulkan symbol unless it
2485 * is linked with -Bsymbolic.
2486 * - Each dispatchable Vulkan handle created by the ICD must be
2487 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2488 * ICD must initialize VK_LOADER_DATA.loadMagic to
2489 * ICD_LOADER_MAGIC.
2490 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2491 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2492 * such loader-managed surfaces.
2493 *
2494 * - Loader interface v2 differs from v1 in:
2495 * - The first ICD entrypoint called by the loader is
2496 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2497 * statically expose this entrypoint.
2498 *
2499 * - Loader interface v3 differs from v2 in:
2500 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2501 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2502 * because the loader no longer does so.
2503 */
2504 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2505 return VK_SUCCESS;
2506 }
2507
2508 VkResult
2509 tu_GetMemoryFdKHR(VkDevice _device,
2510 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2511 int *pFd)
2512 {
2513 TU_FROM_HANDLE(tu_device, device, _device);
2514 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2515
2516 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2517
2518 /* At the moment, we support only the below handle types. */
2519 assert(pGetFdInfo->handleType ==
2520 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2521 pGetFdInfo->handleType ==
2522 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2523
2524 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2525 if (prime_fd < 0)
2526 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2527
2528 *pFd = prime_fd;
2529 return VK_SUCCESS;
2530 }
2531
2532 VkResult
2533 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2534 VkExternalMemoryHandleTypeFlagBits handleType,
2535 int fd,
2536 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2537 {
2538 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2539 pMemoryFdProperties->memoryTypeBits = 1;
2540 return VK_SUCCESS;
2541 }
2542
2543 VkResult
2544 tu_ImportFenceFdKHR(VkDevice _device,
2545 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
2546 {
2547 tu_stub();
2548
2549 return VK_SUCCESS;
2550 }
2551
2552 VkResult
2553 tu_GetFenceFdKHR(VkDevice _device,
2554 const VkFenceGetFdInfoKHR *pGetFdInfo,
2555 int *pFd)
2556 {
2557 tu_stub();
2558
2559 return VK_SUCCESS;
2560 }
2561
2562 VkResult
2563 tu_ImportSemaphoreFdKHR(VkDevice _device,
2564 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
2565 {
2566 TU_FROM_HANDLE(tu_device, device, _device);
2567 TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
2568 int ret;
2569 struct tu_semaphore_part *dst = NULL;
2570
2571 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2572 dst = &sem->temporary;
2573 } else {
2574 dst = &sem->permanent;
2575 }
2576
2577 uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
2578
2579 switch(pImportSemaphoreFdInfo->handleType) {
2580 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
2581 uint32_t old_syncobj = syncobj;
2582 ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
2583 if (ret == 0) {
2584 close(pImportSemaphoreFdInfo->fd);
2585 if (old_syncobj)
2586 drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
2587 }
2588 break;
2589 }
2590 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
2591 if (!syncobj) {
2592 ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
2593 if (ret)
2594 break;
2595 }
2596 if (pImportSemaphoreFdInfo->fd == -1) {
2597 ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
2598 } else {
2599 ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
2600 }
2601 if (!ret)
2602 close(pImportSemaphoreFdInfo->fd);
2603 break;
2604 }
2605 default:
2606 unreachable("Unhandled semaphore handle type");
2607 }
2608
2609 if (ret) {
2610 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2611 }
2612 dst->syncobj = syncobj;
2613 dst->kind = TU_SEMAPHORE_SYNCOBJ;
2614
2615 return VK_SUCCESS;
2616 }
2617
2618 VkResult
2619 tu_GetSemaphoreFdKHR(VkDevice _device,
2620 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
2621 int *pFd)
2622 {
2623 TU_FROM_HANDLE(tu_device, device, _device);
2624 TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
2625 int ret;
2626 uint32_t syncobj_handle;
2627
2628 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2629 assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
2630 syncobj_handle = sem->temporary.syncobj;
2631 } else {
2632 assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
2633 syncobj_handle = sem->permanent.syncobj;
2634 }
2635
2636 switch(pGetFdInfo->handleType) {
2637 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
2638 ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
2639 break;
2640 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2641 ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
2642 if (!ret) {
2643 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2644 tu_semaphore_part_destroy(device, &sem->temporary);
2645 } else {
2646 drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
2647 }
2648 }
2649 break;
2650 default:
2651 unreachable("Unhandled semaphore handle type");
2652 }
2653
2654 if (ret)
2655 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
2656 return VK_SUCCESS;
2657 }
2658
2659
2660 static bool tu_has_syncobj(struct tu_physical_device *pdev)
2661 {
2662 uint64_t value;
2663 if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
2664 return false;
2665 return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
2666 }
2667
2668 void
2669 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2670 VkPhysicalDevice physicalDevice,
2671 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2672 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2673 {
2674 TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
2675
2676 if (tu_has_syncobj(pdev) &&
2677 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
2678 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
2679 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2680 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2681 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
2682 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
2683 } else {
2684 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2685 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2686 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2687 }
2688 }
2689
2690 void
2691 tu_GetPhysicalDeviceExternalFenceProperties(
2692 VkPhysicalDevice physicalDevice,
2693 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2694 VkExternalFenceProperties *pExternalFenceProperties)
2695 {
2696 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2697 pExternalFenceProperties->compatibleHandleTypes = 0;
2698 pExternalFenceProperties->externalFenceFeatures = 0;
2699 }
2700
2701 VkResult
2702 tu_CreateDebugReportCallbackEXT(
2703 VkInstance _instance,
2704 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2705 const VkAllocationCallbacks *pAllocator,
2706 VkDebugReportCallbackEXT *pCallback)
2707 {
2708 TU_FROM_HANDLE(tu_instance, instance, _instance);
2709 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2710 pCreateInfo, pAllocator,
2711 &instance->alloc, pCallback);
2712 }
2713
2714 void
2715 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2716 VkDebugReportCallbackEXT _callback,
2717 const VkAllocationCallbacks *pAllocator)
2718 {
2719 TU_FROM_HANDLE(tu_instance, instance, _instance);
2720 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2721 _callback, pAllocator, &instance->alloc);
2722 }
2723
2724 void
2725 tu_DebugReportMessageEXT(VkInstance _instance,
2726 VkDebugReportFlagsEXT flags,
2727 VkDebugReportObjectTypeEXT objectType,
2728 uint64_t object,
2729 size_t location,
2730 int32_t messageCode,
2731 const char *pLayerPrefix,
2732 const char *pMessage)
2733 {
2734 TU_FROM_HANDLE(tu_instance, instance, _instance);
2735 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2736 object, location, messageCode, pLayerPrefix, pMessage);
2737 }
2738
2739 void
2740 tu_GetDeviceGroupPeerMemoryFeatures(
2741 VkDevice device,
2742 uint32_t heapIndex,
2743 uint32_t localDeviceIndex,
2744 uint32_t remoteDeviceIndex,
2745 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2746 {
2747 assert(localDeviceIndex == remoteDeviceIndex);
2748
2749 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2750 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2751 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2752 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2753 }
2754
2755 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2756 VkPhysicalDevice physicalDevice,
2757 VkSampleCountFlagBits samples,
2758 VkMultisamplePropertiesEXT* pMultisampleProperties)
2759 {
2760 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2761
2762 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2763 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2764 else
2765 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2766 }
2767
2768
2769 VkResult
2770 tu_CreatePrivateDataSlotEXT(VkDevice _device,
2771 const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
2772 const VkAllocationCallbacks* pAllocator,
2773 VkPrivateDataSlotEXT* pPrivateDataSlot)
2774 {
2775 TU_FROM_HANDLE(tu_device, device, _device);
2776 return vk_private_data_slot_create(&device->vk,
2777 pCreateInfo,
2778 pAllocator,
2779 pPrivateDataSlot);
2780 }
2781
2782 void
2783 tu_DestroyPrivateDataSlotEXT(VkDevice _device,
2784 VkPrivateDataSlotEXT privateDataSlot,
2785 const VkAllocationCallbacks* pAllocator)
2786 {
2787 TU_FROM_HANDLE(tu_device, device, _device);
2788 vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
2789 }
2790
2791 VkResult
2792 tu_SetPrivateDataEXT(VkDevice _device,
2793 VkObjectType objectType,
2794 uint64_t objectHandle,
2795 VkPrivateDataSlotEXT privateDataSlot,
2796 uint64_t data)
2797 {
2798 TU_FROM_HANDLE(tu_device, device, _device);
2799 return vk_object_base_set_private_data(&device->vk,
2800 objectType,
2801 objectHandle,
2802 privateDataSlot,
2803 data);
2804 }
2805
2806 void
2807 tu_GetPrivateDataEXT(VkDevice _device,
2808 VkObjectType objectType,
2809 uint64_t objectHandle,
2810 VkPrivateDataSlotEXT privateDataSlot,
2811 uint64_t* pData)
2812 {
2813 TU_FROM_HANDLE(tu_device, device, _device);
2814 vk_object_base_get_private_data(&device->vk,
2815 objectType,
2816 objectHandle,
2817 privateDataSlot,
2818 pData);
2819 }