tu: Implement VK_EXT_conditional_rendering
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
50
51 static void
52 tu_semaphore_remove_temp(struct tu_device *device,
53 struct tu_semaphore *sem);
54
55 static int
56 tu_device_get_cache_uuid(uint16_t family, void *uuid)
57 {
58 uint32_t mesa_timestamp;
59 uint16_t f = family;
60 memset(uuid, 0, VK_UUID_SIZE);
61 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
62 &mesa_timestamp))
63 return -1;
64
65 memcpy(uuid, &mesa_timestamp, 4);
66 memcpy((char *) uuid + 4, &f, 2);
67 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
68 return 0;
69 }
70
71 static VkResult
72 tu_bo_init(struct tu_device *dev,
73 struct tu_bo *bo,
74 uint32_t gem_handle,
75 uint64_t size)
76 {
77 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
78 if (!iova)
79 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
80
81 *bo = (struct tu_bo) {
82 .gem_handle = gem_handle,
83 .size = size,
84 .iova = iova,
85 };
86
87 return VK_SUCCESS;
88 }
89
90 VkResult
91 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
92 {
93 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
94 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
95 */
96 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
97 if (!gem_handle)
98 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
99
100 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
101 if (result != VK_SUCCESS) {
102 tu_gem_close(dev, gem_handle);
103 return vk_error(dev->instance, result);
104 }
105
106 return VK_SUCCESS;
107 }
108
109 VkResult
110 tu_bo_init_dmabuf(struct tu_device *dev,
111 struct tu_bo *bo,
112 uint64_t size,
113 int fd)
114 {
115 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
116 if (!gem_handle)
117 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
118
119 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
120 if (result != VK_SUCCESS) {
121 tu_gem_close(dev, gem_handle);
122 return vk_error(dev->instance, result);
123 }
124
125 return VK_SUCCESS;
126 }
127
128 int
129 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
130 {
131 return tu_gem_export_dmabuf(dev, bo->gem_handle);
132 }
133
134 VkResult
135 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
136 {
137 if (bo->map)
138 return VK_SUCCESS;
139
140 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
141 if (!offset)
142 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
143
144 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
145 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
146 dev->physical_device->local_fd, offset);
147 if (map == MAP_FAILED)
148 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
149
150 bo->map = map;
151 return VK_SUCCESS;
152 }
153
154 void
155 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
156 {
157 assert(bo->gem_handle);
158
159 if (bo->map)
160 munmap(bo->map, bo->size);
161
162 tu_gem_close(dev, bo->gem_handle);
163 }
164
165 static VkResult
166 tu_physical_device_init(struct tu_physical_device *device,
167 struct tu_instance *instance,
168 drmDevicePtr drm_device)
169 {
170 const char *path = drm_device->nodes[DRM_NODE_RENDER];
171 VkResult result = VK_SUCCESS;
172 drmVersionPtr version;
173 int fd;
174 int master_fd = -1;
175
176 fd = open(path, O_RDWR | O_CLOEXEC);
177 if (fd < 0) {
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "failed to open device %s", path);
180 }
181
182 /* Version 1.3 added MSM_INFO_IOVA. */
183 const int min_version_major = 1;
184 const int min_version_minor = 3;
185
186 version = drmGetVersion(fd);
187 if (!version) {
188 close(fd);
189 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
190 "failed to query kernel driver version for device %s",
191 path);
192 }
193
194 if (strcmp(version->name, "msm")) {
195 drmFreeVersion(version);
196 close(fd);
197 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
198 "device %s does not use the msm kernel driver", path);
199 }
200
201 if (version->version_major != min_version_major ||
202 version->version_minor < min_version_minor) {
203 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "kernel driver for device %s has version %d.%d, "
205 "but Vulkan requires version >= %d.%d",
206 path, version->version_major, version->version_minor,
207 min_version_major, min_version_minor);
208 drmFreeVersion(version);
209 close(fd);
210 return result;
211 }
212
213 device->msm_major_version = version->version_major;
214 device->msm_minor_version = version->version_minor;
215
216 drmFreeVersion(version);
217
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Found compatible device '%s'.", path);
220
221 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
222 device->instance = instance;
223 assert(strlen(path) < ARRAY_SIZE(device->path));
224 strncpy(device->path, path, ARRAY_SIZE(device->path));
225
226 if (instance->enabled_extensions.KHR_display) {
227 master_fd =
228 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
229 if (master_fd >= 0) {
230 /* TODO: free master_fd is accel is not working? */
231 }
232 }
233
234 device->master_fd = master_fd;
235 device->local_fd = fd;
236
237 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
238 if (instance->debug_flags & TU_DEBUG_STARTUP)
239 tu_logi("Could not query the GPU ID");
240 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
241 "could not get GPU ID");
242 goto fail;
243 }
244
245 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
246 if (instance->debug_flags & TU_DEBUG_STARTUP)
247 tu_logi("Could not query the GMEM size");
248 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
249 "could not get GMEM size");
250 goto fail;
251 }
252
253 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
254 if (instance->debug_flags & TU_DEBUG_STARTUP)
255 tu_logi("Could not query the GMEM size");
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "could not get GMEM size");
258 goto fail;
259 }
260
261 memset(device->name, 0, sizeof(device->name));
262 sprintf(device->name, "FD%d", device->gpu_id);
263
264 device->limited_z24s8 = (device->gpu_id == 630);
265
266 switch (device->gpu_id) {
267 case 618:
268 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
269 device->ccu_offset_bypass = 0x10000;
270 device->tile_align_w = 32;
271 device->magic.PC_UNKNOWN_9805 = 0x0;
272 device->magic.SP_UNKNOWN_A0F8 = 0x0;
273 break;
274 case 630:
275 case 640:
276 device->ccu_offset_gmem = 0xf8000;
277 device->ccu_offset_bypass = 0x20000;
278 device->tile_align_w = 32;
279 device->magic.PC_UNKNOWN_9805 = 0x1;
280 device->magic.SP_UNKNOWN_A0F8 = 0x1;
281 break;
282 case 650:
283 device->ccu_offset_gmem = 0x114000;
284 device->ccu_offset_bypass = 0x30000;
285 device->tile_align_w = 96;
286 device->magic.PC_UNKNOWN_9805 = 0x2;
287 device->magic.SP_UNKNOWN_A0F8 = 0x2;
288 break;
289 default:
290 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
291 "device %s is unsupported", device->name);
292 goto fail;
293 }
294 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
295 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
296 "cannot generate UUID");
297 goto fail;
298 }
299
300 /* The gpu id is already embedded in the uuid so we just pass "tu"
301 * when creating the cache.
302 */
303 char buf[VK_UUID_SIZE * 2 + 1];
304 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
305 device->disk_cache = disk_cache_create(device->name, buf, 0);
306
307 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
308 "testing use only.\n");
309
310 fd_get_driver_uuid(device->driver_uuid);
311 fd_get_device_uuid(device->device_uuid, device->gpu_id);
312
313 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
314
315 if (result != VK_SUCCESS) {
316 vk_error(instance, result);
317 goto fail;
318 }
319
320 result = tu_wsi_init(device);
321 if (result != VK_SUCCESS) {
322 vk_error(instance, result);
323 goto fail;
324 }
325
326 return VK_SUCCESS;
327
328 fail:
329 close(fd);
330 if (master_fd != -1)
331 close(master_fd);
332 return result;
333 }
334
335 static void
336 tu_physical_device_finish(struct tu_physical_device *device)
337 {
338 tu_wsi_finish(device);
339
340 disk_cache_destroy(device->disk_cache);
341 close(device->local_fd);
342 if (device->master_fd != -1)
343 close(device->master_fd);
344
345 vk_object_base_finish(&device->base);
346 }
347
348 static VKAPI_ATTR void *
349 default_alloc_func(void *pUserData,
350 size_t size,
351 size_t align,
352 VkSystemAllocationScope allocationScope)
353 {
354 return malloc(size);
355 }
356
357 static VKAPI_ATTR void *
358 default_realloc_func(void *pUserData,
359 void *pOriginal,
360 size_t size,
361 size_t align,
362 VkSystemAllocationScope allocationScope)
363 {
364 return realloc(pOriginal, size);
365 }
366
367 static VKAPI_ATTR void
368 default_free_func(void *pUserData, void *pMemory)
369 {
370 free(pMemory);
371 }
372
373 static const VkAllocationCallbacks default_alloc = {
374 .pUserData = NULL,
375 .pfnAllocation = default_alloc_func,
376 .pfnReallocation = default_realloc_func,
377 .pfnFree = default_free_func,
378 };
379
380 static const struct debug_control tu_debug_options[] = {
381 { "startup", TU_DEBUG_STARTUP },
382 { "nir", TU_DEBUG_NIR },
383 { "ir3", TU_DEBUG_IR3 },
384 { "nobin", TU_DEBUG_NOBIN },
385 { "sysmem", TU_DEBUG_SYSMEM },
386 { "forcebin", TU_DEBUG_FORCEBIN },
387 { "noubwc", TU_DEBUG_NOUBWC },
388 { NULL, 0 }
389 };
390
391 const char *
392 tu_get_debug_option_name(int id)
393 {
394 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
395 return tu_debug_options[id].string;
396 }
397
398 static int
399 tu_get_instance_extension_index(const char *name)
400 {
401 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
402 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
403 return i;
404 }
405 return -1;
406 }
407
408 VkResult
409 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
410 const VkAllocationCallbacks *pAllocator,
411 VkInstance *pInstance)
412 {
413 struct tu_instance *instance;
414 VkResult result;
415
416 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
417
418 uint32_t client_version;
419 if (pCreateInfo->pApplicationInfo &&
420 pCreateInfo->pApplicationInfo->apiVersion != 0) {
421 client_version = pCreateInfo->pApplicationInfo->apiVersion;
422 } else {
423 tu_EnumerateInstanceVersion(&client_version);
424 }
425
426 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
427 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
428
429 if (!instance)
430 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
431
432 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
433
434 if (pAllocator)
435 instance->alloc = *pAllocator;
436 else
437 instance->alloc = default_alloc;
438
439 instance->api_version = client_version;
440 instance->physical_device_count = -1;
441
442 instance->debug_flags =
443 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
444
445 if (instance->debug_flags & TU_DEBUG_STARTUP)
446 tu_logi("Created an instance");
447
448 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
449 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
450 int index = tu_get_instance_extension_index(ext_name);
451
452 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
453 vk_object_base_finish(&instance->base);
454 vk_free2(&default_alloc, pAllocator, instance);
455 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
456 }
457
458 instance->enabled_extensions.extensions[index] = true;
459 }
460
461 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
462 if (result != VK_SUCCESS) {
463 vk_object_base_finish(&instance->base);
464 vk_free2(&default_alloc, pAllocator, instance);
465 return vk_error(instance, result);
466 }
467
468 glsl_type_singleton_init_or_ref();
469
470 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
471
472 *pInstance = tu_instance_to_handle(instance);
473
474 return VK_SUCCESS;
475 }
476
477 void
478 tu_DestroyInstance(VkInstance _instance,
479 const VkAllocationCallbacks *pAllocator)
480 {
481 TU_FROM_HANDLE(tu_instance, instance, _instance);
482
483 if (!instance)
484 return;
485
486 for (int i = 0; i < instance->physical_device_count; ++i) {
487 tu_physical_device_finish(instance->physical_devices + i);
488 }
489
490 VG(VALGRIND_DESTROY_MEMPOOL(instance));
491
492 glsl_type_singleton_decref();
493
494 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
495
496 vk_object_base_finish(&instance->base);
497 vk_free(&instance->alloc, instance);
498 }
499
500 static VkResult
501 tu_enumerate_devices(struct tu_instance *instance)
502 {
503 /* TODO: Check for more devices ? */
504 drmDevicePtr devices[8];
505 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
506 int max_devices;
507
508 instance->physical_device_count = 0;
509
510 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
511
512 if (instance->debug_flags & TU_DEBUG_STARTUP) {
513 if (max_devices < 0)
514 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
515 else
516 tu_logi("Found %d drm nodes", max_devices);
517 }
518
519 if (max_devices < 1)
520 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
521
522 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
523 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
524 devices[i]->bustype == DRM_BUS_PLATFORM) {
525
526 result = tu_physical_device_init(
527 instance->physical_devices + instance->physical_device_count,
528 instance, devices[i]);
529 if (result == VK_SUCCESS)
530 ++instance->physical_device_count;
531 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
532 break;
533 }
534 }
535 drmFreeDevices(devices, max_devices);
536
537 return result;
538 }
539
540 VkResult
541 tu_EnumeratePhysicalDevices(VkInstance _instance,
542 uint32_t *pPhysicalDeviceCount,
543 VkPhysicalDevice *pPhysicalDevices)
544 {
545 TU_FROM_HANDLE(tu_instance, instance, _instance);
546 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
547
548 VkResult result;
549
550 if (instance->physical_device_count < 0) {
551 result = tu_enumerate_devices(instance);
552 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
553 return result;
554 }
555
556 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
557 vk_outarray_append(&out, p)
558 {
559 *p = tu_physical_device_to_handle(instance->physical_devices + i);
560 }
561 }
562
563 return vk_outarray_status(&out);
564 }
565
566 VkResult
567 tu_EnumeratePhysicalDeviceGroups(
568 VkInstance _instance,
569 uint32_t *pPhysicalDeviceGroupCount,
570 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
571 {
572 TU_FROM_HANDLE(tu_instance, instance, _instance);
573 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
574 pPhysicalDeviceGroupCount);
575 VkResult result;
576
577 if (instance->physical_device_count < 0) {
578 result = tu_enumerate_devices(instance);
579 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
580 return result;
581 }
582
583 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
584 vk_outarray_append(&out, p)
585 {
586 p->physicalDeviceCount = 1;
587 p->physicalDevices[0] =
588 tu_physical_device_to_handle(instance->physical_devices + i);
589 p->subsetAllocation = false;
590 }
591 }
592
593 return vk_outarray_status(&out);
594 }
595
596 void
597 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
598 VkPhysicalDeviceFeatures *pFeatures)
599 {
600 memset(pFeatures, 0, sizeof(*pFeatures));
601
602 *pFeatures = (VkPhysicalDeviceFeatures) {
603 .robustBufferAccess = true,
604 .fullDrawIndexUint32 = true,
605 .imageCubeArray = true,
606 .independentBlend = true,
607 .geometryShader = true,
608 .tessellationShader = true,
609 .sampleRateShading = true,
610 .dualSrcBlend = true,
611 .logicOp = true,
612 .multiDrawIndirect = true,
613 .drawIndirectFirstInstance = true,
614 .depthClamp = true,
615 .depthBiasClamp = true,
616 .fillModeNonSolid = true,
617 .depthBounds = true,
618 .wideLines = false,
619 .largePoints = true,
620 .alphaToOne = true,
621 .multiViewport = false,
622 .samplerAnisotropy = true,
623 .textureCompressionETC2 = true,
624 .textureCompressionASTC_LDR = true,
625 .textureCompressionBC = true,
626 .occlusionQueryPrecise = true,
627 .pipelineStatisticsQuery = false,
628 .vertexPipelineStoresAndAtomics = true,
629 .fragmentStoresAndAtomics = true,
630 .shaderTessellationAndGeometryPointSize = false,
631 .shaderImageGatherExtended = false,
632 .shaderStorageImageExtendedFormats = false,
633 .shaderStorageImageMultisample = false,
634 .shaderUniformBufferArrayDynamicIndexing = true,
635 .shaderSampledImageArrayDynamicIndexing = true,
636 .shaderStorageBufferArrayDynamicIndexing = true,
637 .shaderStorageImageArrayDynamicIndexing = true,
638 .shaderStorageImageReadWithoutFormat = false,
639 .shaderStorageImageWriteWithoutFormat = false,
640 .shaderClipDistance = false,
641 .shaderCullDistance = false,
642 .shaderFloat64 = false,
643 .shaderInt64 = false,
644 .shaderInt16 = false,
645 .sparseBinding = false,
646 .variableMultisampleRate = false,
647 .inheritedQueries = false,
648 };
649 }
650
651 void
652 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
653 VkPhysicalDeviceFeatures2 *pFeatures)
654 {
655 vk_foreach_struct(ext, pFeatures->pNext)
656 {
657 switch (ext->sType) {
658 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
659 VkPhysicalDeviceVulkan11Features *features = (void *) ext;
660 features->storageBuffer16BitAccess = false;
661 features->uniformAndStorageBuffer16BitAccess = false;
662 features->storagePushConstant16 = false;
663 features->storageInputOutput16 = false;
664 features->multiview = false;
665 features->multiviewGeometryShader = false;
666 features->multiviewTessellationShader = false;
667 features->variablePointersStorageBuffer = true;
668 features->variablePointers = true;
669 features->protectedMemory = false;
670 features->samplerYcbcrConversion = true;
671 features->shaderDrawParameters = true;
672 break;
673 }
674 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: {
675 VkPhysicalDeviceVulkan12Features *features = (void *) ext;
676 features->samplerMirrorClampToEdge = true;
677 features->drawIndirectCount = true;
678 features->storageBuffer8BitAccess = false;
679 features->uniformAndStorageBuffer8BitAccess = false;
680 features->storagePushConstant8 = false;
681 features->shaderBufferInt64Atomics = false;
682 features->shaderSharedInt64Atomics = false;
683 features->shaderFloat16 = false;
684 features->shaderInt8 = false;
685
686 features->descriptorIndexing = false;
687 features->shaderInputAttachmentArrayDynamicIndexing = false;
688 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
689 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
690 features->shaderUniformBufferArrayNonUniformIndexing = false;
691 features->shaderSampledImageArrayNonUniformIndexing = false;
692 features->shaderStorageBufferArrayNonUniformIndexing = false;
693 features->shaderStorageImageArrayNonUniformIndexing = false;
694 features->shaderInputAttachmentArrayNonUniformIndexing = false;
695 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
696 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
697 features->descriptorBindingUniformBufferUpdateAfterBind = false;
698 features->descriptorBindingSampledImageUpdateAfterBind = false;
699 features->descriptorBindingStorageImageUpdateAfterBind = false;
700 features->descriptorBindingStorageBufferUpdateAfterBind = false;
701 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
702 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
703 features->descriptorBindingUpdateUnusedWhilePending = false;
704 features->descriptorBindingPartiallyBound = false;
705 features->descriptorBindingVariableDescriptorCount = false;
706 features->runtimeDescriptorArray = false;
707
708 features->samplerFilterMinmax = true;
709 features->scalarBlockLayout = false;
710 features->imagelessFramebuffer = false;
711 features->uniformBufferStandardLayout = false;
712 features->shaderSubgroupExtendedTypes = false;
713 features->separateDepthStencilLayouts = false;
714 features->hostQueryReset = false;
715 features->timelineSemaphore = false;
716 features->bufferDeviceAddress = false;
717 features->bufferDeviceAddressCaptureReplay = false;
718 features->bufferDeviceAddressMultiDevice = false;
719 features->vulkanMemoryModel = false;
720 features->vulkanMemoryModelDeviceScope = false;
721 features->vulkanMemoryModelAvailabilityVisibilityChains = false;
722 features->shaderOutputViewportIndex = false;
723 features->shaderOutputLayer = false;
724 features->subgroupBroadcastDynamicId = false;
725 break;
726 }
727 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
728 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
729 features->variablePointersStorageBuffer = true;
730 features->variablePointers = true;
731 break;
732 }
733 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
734 VkPhysicalDeviceMultiviewFeatures *features =
735 (VkPhysicalDeviceMultiviewFeatures *) ext;
736 features->multiview = false;
737 features->multiviewGeometryShader = false;
738 features->multiviewTessellationShader = false;
739 break;
740 }
741 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
742 VkPhysicalDeviceShaderDrawParametersFeatures *features =
743 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
744 features->shaderDrawParameters = true;
745 break;
746 }
747 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
748 VkPhysicalDeviceProtectedMemoryFeatures *features =
749 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
750 features->protectedMemory = false;
751 break;
752 }
753 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
754 VkPhysicalDevice16BitStorageFeatures *features =
755 (VkPhysicalDevice16BitStorageFeatures *) ext;
756 features->storageBuffer16BitAccess = false;
757 features->uniformAndStorageBuffer16BitAccess = false;
758 features->storagePushConstant16 = false;
759 features->storageInputOutput16 = false;
760 break;
761 }
762 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
763 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
764 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
765 features->samplerYcbcrConversion = true;
766 break;
767 }
768 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
769 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
770 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
771 features->shaderInputAttachmentArrayDynamicIndexing = false;
772 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
773 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
774 features->shaderUniformBufferArrayNonUniformIndexing = false;
775 features->shaderSampledImageArrayNonUniformIndexing = false;
776 features->shaderStorageBufferArrayNonUniformIndexing = false;
777 features->shaderStorageImageArrayNonUniformIndexing = false;
778 features->shaderInputAttachmentArrayNonUniformIndexing = false;
779 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
780 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
781 features->descriptorBindingUniformBufferUpdateAfterBind = false;
782 features->descriptorBindingSampledImageUpdateAfterBind = false;
783 features->descriptorBindingStorageImageUpdateAfterBind = false;
784 features->descriptorBindingStorageBufferUpdateAfterBind = false;
785 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
786 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
787 features->descriptorBindingUpdateUnusedWhilePending = false;
788 features->descriptorBindingPartiallyBound = false;
789 features->descriptorBindingVariableDescriptorCount = false;
790 features->runtimeDescriptorArray = false;
791 break;
792 }
793 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
794 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
795 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
796 features->conditionalRendering = true;
797 features->inheritedConditionalRendering = true;
798 break;
799 }
800 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
801 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
802 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
803 features->transformFeedback = true;
804 features->geometryStreams = false;
805 break;
806 }
807 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
808 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
809 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
810 features->indexTypeUint8 = true;
811 break;
812 }
813 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
814 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
815 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
816 features->vertexAttributeInstanceRateDivisor = true;
817 features->vertexAttributeInstanceRateZeroDivisor = true;
818 break;
819 }
820 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
821 VkPhysicalDevicePrivateDataFeaturesEXT *features =
822 (VkPhysicalDevicePrivateDataFeaturesEXT *)ext;
823 features->privateData = true;
824 break;
825 }
826 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
827 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
828 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
829 features->depthClipEnable = true;
830 break;
831 }
832 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
833 VkPhysicalDevice4444FormatsFeaturesEXT *features = (void *)ext;
834 features->formatA4R4G4B4 = true;
835 features->formatA4B4G4R4 = true;
836 break;
837 }
838 default:
839 break;
840 }
841 }
842 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
843 }
844
845 void
846 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
847 VkPhysicalDeviceProperties *pProperties)
848 {
849 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
850 VkSampleCountFlags sample_counts =
851 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
852
853 /* I have no idea what the maximum size is, but the hardware supports very
854 * large numbers of descriptors (at least 2^16). This limit is based on
855 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
856 * we don't have to think about what to do if that overflows, but really
857 * nothing is likely to get close to this.
858 */
859 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
860
861 VkPhysicalDeviceLimits limits = {
862 .maxImageDimension1D = (1 << 14),
863 .maxImageDimension2D = (1 << 14),
864 .maxImageDimension3D = (1 << 11),
865 .maxImageDimensionCube = (1 << 14),
866 .maxImageArrayLayers = (1 << 11),
867 .maxTexelBufferElements = 128 * 1024 * 1024,
868 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
869 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
870 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
871 .maxMemoryAllocationCount = UINT32_MAX,
872 .maxSamplerAllocationCount = 64 * 1024,
873 .bufferImageGranularity = 64, /* A cache line */
874 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
875 .maxBoundDescriptorSets = MAX_SETS,
876 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
877 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
878 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
879 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
880 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
881 .maxPerStageDescriptorInputAttachments = MAX_RTS,
882 .maxPerStageResources = max_descriptor_set_size,
883 .maxDescriptorSetSamplers = max_descriptor_set_size,
884 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
885 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
886 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
887 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
888 .maxDescriptorSetSampledImages = max_descriptor_set_size,
889 .maxDescriptorSetStorageImages = max_descriptor_set_size,
890 .maxDescriptorSetInputAttachments = MAX_RTS,
891 .maxVertexInputAttributes = 32,
892 .maxVertexInputBindings = 32,
893 .maxVertexInputAttributeOffset = 4095,
894 .maxVertexInputBindingStride = 2048,
895 .maxVertexOutputComponents = 128,
896 .maxTessellationGenerationLevel = 64,
897 .maxTessellationPatchSize = 32,
898 .maxTessellationControlPerVertexInputComponents = 128,
899 .maxTessellationControlPerVertexOutputComponents = 128,
900 .maxTessellationControlPerPatchOutputComponents = 120,
901 .maxTessellationControlTotalOutputComponents = 4096,
902 .maxTessellationEvaluationInputComponents = 128,
903 .maxTessellationEvaluationOutputComponents = 128,
904 .maxGeometryShaderInvocations = 32,
905 .maxGeometryInputComponents = 64,
906 .maxGeometryOutputComponents = 128,
907 .maxGeometryOutputVertices = 256,
908 .maxGeometryTotalOutputComponents = 1024,
909 .maxFragmentInputComponents = 124,
910 .maxFragmentOutputAttachments = 8,
911 .maxFragmentDualSrcAttachments = 1,
912 .maxFragmentCombinedOutputResources = 8,
913 .maxComputeSharedMemorySize = 32768,
914 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
915 .maxComputeWorkGroupInvocations = 2048,
916 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
917 .subPixelPrecisionBits = 8,
918 .subTexelPrecisionBits = 8,
919 .mipmapPrecisionBits = 8,
920 .maxDrawIndexedIndexValue = UINT32_MAX,
921 .maxDrawIndirectCount = UINT32_MAX,
922 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
923 .maxSamplerAnisotropy = 16,
924 .maxViewports = MAX_VIEWPORTS,
925 .maxViewportDimensions = { (1 << 14), (1 << 14) },
926 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
927 .viewportSubPixelBits = 8,
928 .minMemoryMapAlignment = 4096, /* A page */
929 .minTexelBufferOffsetAlignment = 64,
930 .minUniformBufferOffsetAlignment = 64,
931 .minStorageBufferOffsetAlignment = 64,
932 .minTexelOffset = -16,
933 .maxTexelOffset = 15,
934 .minTexelGatherOffset = -32,
935 .maxTexelGatherOffset = 31,
936 .minInterpolationOffset = -0.5,
937 .maxInterpolationOffset = 0.4375,
938 .subPixelInterpolationOffsetBits = 4,
939 .maxFramebufferWidth = (1 << 14),
940 .maxFramebufferHeight = (1 << 14),
941 .maxFramebufferLayers = (1 << 10),
942 .framebufferColorSampleCounts = sample_counts,
943 .framebufferDepthSampleCounts = sample_counts,
944 .framebufferStencilSampleCounts = sample_counts,
945 .framebufferNoAttachmentsSampleCounts = sample_counts,
946 .maxColorAttachments = MAX_RTS,
947 .sampledImageColorSampleCounts = sample_counts,
948 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
949 .sampledImageDepthSampleCounts = sample_counts,
950 .sampledImageStencilSampleCounts = sample_counts,
951 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
952 .maxSampleMaskWords = 1,
953 .timestampComputeAndGraphics = true,
954 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
955 .maxClipDistances = 8,
956 .maxCullDistances = 8,
957 .maxCombinedClipAndCullDistances = 8,
958 .discreteQueuePriorities = 1,
959 .pointSizeRange = { 1, 4092 },
960 .lineWidthRange = { 0.0, 7.9921875 },
961 .pointSizeGranularity = 0.0625,
962 .lineWidthGranularity = (1.0 / 128.0),
963 .strictLines = false, /* FINISHME */
964 .standardSampleLocations = true,
965 .optimalBufferCopyOffsetAlignment = 128,
966 .optimalBufferCopyRowPitchAlignment = 128,
967 .nonCoherentAtomSize = 64,
968 };
969
970 *pProperties = (VkPhysicalDeviceProperties) {
971 .apiVersion = tu_physical_device_api_version(pdevice),
972 .driverVersion = vk_get_driver_version(),
973 .vendorID = 0, /* TODO */
974 .deviceID = 0,
975 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
976 .limits = limits,
977 .sparseProperties = { 0 },
978 };
979
980 strcpy(pProperties->deviceName, pdevice->name);
981 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
982 }
983
984 void
985 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
986 VkPhysicalDeviceProperties2 *pProperties)
987 {
988 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
989 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
990
991 vk_foreach_struct(ext, pProperties->pNext)
992 {
993 switch (ext->sType) {
994 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
995 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
996 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
997 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
998 break;
999 }
1000 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
1001 VkPhysicalDeviceIDProperties *properties =
1002 (VkPhysicalDeviceIDProperties *) ext;
1003 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1004 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1005 properties->deviceLUIDValid = false;
1006 break;
1007 }
1008 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
1009 VkPhysicalDeviceMultiviewProperties *properties =
1010 (VkPhysicalDeviceMultiviewProperties *) ext;
1011 properties->maxMultiviewViewCount = MAX_VIEWS;
1012 properties->maxMultiviewInstanceIndex = INT_MAX;
1013 break;
1014 }
1015 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
1016 VkPhysicalDevicePointClippingProperties *properties =
1017 (VkPhysicalDevicePointClippingProperties *) ext;
1018 properties->pointClippingBehavior =
1019 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
1020 break;
1021 }
1022 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1023 VkPhysicalDeviceMaintenance3Properties *properties =
1024 (VkPhysicalDeviceMaintenance3Properties *) ext;
1025 /* Make sure everything is addressable by a signed 32-bit int, and
1026 * our largest descriptors are 96 bytes. */
1027 properties->maxPerSetDescriptors = (1ull << 31) / 96;
1028 /* Our buffer size fields allow only this much */
1029 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
1030 break;
1031 }
1032 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
1033 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
1034 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
1035
1036 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
1037 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
1038 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
1039 properties->maxTransformFeedbackStreamDataSize = 512;
1040 properties->maxTransformFeedbackBufferDataSize = 512;
1041 properties->maxTransformFeedbackBufferDataStride = 512;
1042 properties->transformFeedbackQueries = true;
1043 properties->transformFeedbackStreamsLinesTriangles = false;
1044 properties->transformFeedbackRasterizationStreamSelect = false;
1045 properties->transformFeedbackDraw = true;
1046 break;
1047 }
1048 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
1049 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
1050 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
1051 properties->sampleLocationSampleCounts = 0;
1052 if (pdevice->supported_extensions.EXT_sample_locations) {
1053 properties->sampleLocationSampleCounts =
1054 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
1055 }
1056 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
1057 properties->sampleLocationCoordinateRange[0] = 0.0f;
1058 properties->sampleLocationCoordinateRange[1] = 0.9375f;
1059 properties->sampleLocationSubPixelBits = 4;
1060 properties->variableSampleLocations = true;
1061 break;
1062 }
1063 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
1064 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
1065 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
1066 properties->filterMinmaxImageComponentMapping = true;
1067 properties->filterMinmaxSingleComponentFormats = true;
1068 break;
1069 }
1070 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1071 VkPhysicalDeviceSubgroupProperties *properties =
1072 (VkPhysicalDeviceSubgroupProperties *)ext;
1073 properties->subgroupSize = 64;
1074 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
1075 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
1076 VK_SUBGROUP_FEATURE_VOTE_BIT;
1077 properties->quadOperationsInAllStages = false;
1078 break;
1079 }
1080 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1081 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
1082 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1083 props->maxVertexAttribDivisor = UINT32_MAX;
1084 break;
1085 }
1086 default:
1087 break;
1088 }
1089 }
1090 }
1091
1092 static const VkQueueFamilyProperties tu_queue_family_properties = {
1093 .queueFlags =
1094 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
1095 .queueCount = 1,
1096 .timestampValidBits = 48,
1097 .minImageTransferGranularity = { 1, 1, 1 },
1098 };
1099
1100 void
1101 tu_GetPhysicalDeviceQueueFamilyProperties(
1102 VkPhysicalDevice physicalDevice,
1103 uint32_t *pQueueFamilyPropertyCount,
1104 VkQueueFamilyProperties *pQueueFamilyProperties)
1105 {
1106 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1107
1108 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
1109 }
1110
1111 void
1112 tu_GetPhysicalDeviceQueueFamilyProperties2(
1113 VkPhysicalDevice physicalDevice,
1114 uint32_t *pQueueFamilyPropertyCount,
1115 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1116 {
1117 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1118
1119 vk_outarray_append(&out, p)
1120 {
1121 p->queueFamilyProperties = tu_queue_family_properties;
1122 }
1123 }
1124
1125 static uint64_t
1126 tu_get_system_heap_size()
1127 {
1128 struct sysinfo info;
1129 sysinfo(&info);
1130
1131 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1132
1133 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1134 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1135 */
1136 uint64_t available_ram;
1137 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1138 available_ram = total_ram / 2;
1139 else
1140 available_ram = total_ram * 3 / 4;
1141
1142 return available_ram;
1143 }
1144
1145 void
1146 tu_GetPhysicalDeviceMemoryProperties(
1147 VkPhysicalDevice physicalDevice,
1148 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1149 {
1150 pMemoryProperties->memoryHeapCount = 1;
1151 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1152 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1153
1154 pMemoryProperties->memoryTypeCount = 1;
1155 pMemoryProperties->memoryTypes[0].propertyFlags =
1156 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1157 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1158 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1159 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1160 }
1161
1162 void
1163 tu_GetPhysicalDeviceMemoryProperties2(
1164 VkPhysicalDevice physicalDevice,
1165 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1166 {
1167 return tu_GetPhysicalDeviceMemoryProperties(
1168 physicalDevice, &pMemoryProperties->memoryProperties);
1169 }
1170
1171 static VkResult
1172 tu_queue_init(struct tu_device *device,
1173 struct tu_queue *queue,
1174 uint32_t queue_family_index,
1175 int idx,
1176 VkDeviceQueueCreateFlags flags)
1177 {
1178 vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
1179
1180 queue->device = device;
1181 queue->queue_family_index = queue_family_index;
1182 queue->queue_idx = idx;
1183 queue->flags = flags;
1184
1185 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1186 if (ret)
1187 return VK_ERROR_INITIALIZATION_FAILED;
1188
1189 tu_fence_init(&queue->submit_fence, false);
1190
1191 return VK_SUCCESS;
1192 }
1193
1194 static void
1195 tu_queue_finish(struct tu_queue *queue)
1196 {
1197 tu_fence_finish(&queue->submit_fence);
1198 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1199 }
1200
1201 static int
1202 tu_get_device_extension_index(const char *name)
1203 {
1204 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1205 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1206 return i;
1207 }
1208 return -1;
1209 }
1210
1211 struct PACKED bcolor_entry {
1212 uint32_t fp32[4];
1213 uint16_t ui16[4];
1214 int16_t si16[4];
1215 uint16_t fp16[4];
1216 uint16_t rgb565;
1217 uint16_t rgb5a1;
1218 uint16_t rgba4;
1219 uint8_t __pad0[2];
1220 uint8_t ui8[4];
1221 int8_t si8[4];
1222 uint32_t rgb10a2;
1223 uint32_t z24; /* also s8? */
1224 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1225 uint8_t __pad1[56];
1226 } border_color[] = {
1227 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1228 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1229 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1230 .fp32[3] = 0x3f800000,
1231 .ui16[3] = 0xffff,
1232 .si16[3] = 0x7fff,
1233 .fp16[3] = 0x3c00,
1234 .rgb5a1 = 0x8000,
1235 .rgba4 = 0xf000,
1236 .ui8[3] = 0xff,
1237 .si8[3] = 0x7f,
1238 .rgb10a2 = 0xc0000000,
1239 .srgb[3] = 0x3c00,
1240 },
1241 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1242 .fp32[3] = 1,
1243 .fp16[3] = 1,
1244 },
1245 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1246 .fp32[0 ... 3] = 0x3f800000,
1247 .ui16[0 ... 3] = 0xffff,
1248 .si16[0 ... 3] = 0x7fff,
1249 .fp16[0 ... 3] = 0x3c00,
1250 .rgb565 = 0xffff,
1251 .rgb5a1 = 0xffff,
1252 .rgba4 = 0xffff,
1253 .ui8[0 ... 3] = 0xff,
1254 .si8[0 ... 3] = 0x7f,
1255 .rgb10a2 = 0xffffffff,
1256 .z24 = 0xffffff,
1257 .srgb[0 ... 3] = 0x3c00,
1258 },
1259 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1260 .fp32[0 ... 3] = 1,
1261 .fp16[0 ... 3] = 1,
1262 },
1263 };
1264
1265 VkResult
1266 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1267 const VkDeviceCreateInfo *pCreateInfo,
1268 const VkAllocationCallbacks *pAllocator,
1269 VkDevice *pDevice)
1270 {
1271 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1272 VkResult result;
1273 struct tu_device *device;
1274
1275 /* Check enabled features */
1276 if (pCreateInfo->pEnabledFeatures) {
1277 VkPhysicalDeviceFeatures supported_features;
1278 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1279 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1280 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1281 unsigned num_features =
1282 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1283 for (uint32_t i = 0; i < num_features; i++) {
1284 if (enabled_feature[i] && !supported_feature[i])
1285 return vk_error(physical_device->instance,
1286 VK_ERROR_FEATURE_NOT_PRESENT);
1287 }
1288 }
1289
1290 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1291 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1292 if (!device)
1293 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1294
1295 vk_device_init(&device->vk, pCreateInfo,
1296 &physical_device->instance->alloc, pAllocator);
1297
1298 device->instance = physical_device->instance;
1299 device->physical_device = physical_device;
1300 device->_lost = false;
1301
1302 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1303 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1304 int index = tu_get_device_extension_index(ext_name);
1305 if (index < 0 ||
1306 !physical_device->supported_extensions.extensions[index]) {
1307 vk_free(&device->vk.alloc, device);
1308 return vk_error(physical_device->instance,
1309 VK_ERROR_EXTENSION_NOT_PRESENT);
1310 }
1311
1312 device->enabled_extensions.extensions[index] = true;
1313 }
1314
1315 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1316 const VkDeviceQueueCreateInfo *queue_create =
1317 &pCreateInfo->pQueueCreateInfos[i];
1318 uint32_t qfi = queue_create->queueFamilyIndex;
1319 device->queues[qfi] = vk_alloc(
1320 &device->vk.alloc, queue_create->queueCount * sizeof(struct tu_queue),
1321 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1322 if (!device->queues[qfi]) {
1323 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1324 goto fail_queues;
1325 }
1326
1327 memset(device->queues[qfi], 0,
1328 queue_create->queueCount * sizeof(struct tu_queue));
1329
1330 device->queue_count[qfi] = queue_create->queueCount;
1331
1332 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1333 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1334 queue_create->flags);
1335 if (result != VK_SUCCESS)
1336 goto fail_queues;
1337 }
1338 }
1339
1340 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1341 if (!device->compiler)
1342 goto fail_queues;
1343
1344 /* initial sizes, these will increase if there is overflow */
1345 device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
1346 device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
1347
1348 STATIC_ASSERT(sizeof(border_color) == sizeof(((struct tu6_global*) 0)->border_color));
1349 result = tu_bo_init_new(device, &device->global_bo, sizeof(struct tu6_global));
1350 if (result != VK_SUCCESS)
1351 goto fail_global_bo;
1352
1353 result = tu_bo_map(device, &device->global_bo);
1354 if (result != VK_SUCCESS)
1355 goto fail_global_bo_map;
1356
1357 struct tu6_global *global = device->global_bo.map;
1358 memcpy(global->border_color, border_color, sizeof(border_color));
1359 global->predicate = 0;
1360 tu_init_clear_blit_shaders(global);
1361
1362 VkPipelineCacheCreateInfo ci;
1363 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1364 ci.pNext = NULL;
1365 ci.flags = 0;
1366 ci.pInitialData = NULL;
1367 ci.initialDataSize = 0;
1368 VkPipelineCache pc;
1369 result =
1370 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1371 if (result != VK_SUCCESS)
1372 goto fail_pipeline_cache;
1373
1374 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1375
1376 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1377 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1378
1379 mtx_init(&device->vsc_pitch_mtx, mtx_plain);
1380
1381 *pDevice = tu_device_to_handle(device);
1382 return VK_SUCCESS;
1383
1384 fail_pipeline_cache:
1385 fail_global_bo_map:
1386 tu_bo_finish(device, &device->global_bo);
1387
1388 fail_global_bo:
1389 ralloc_free(device->compiler);
1390
1391 fail_queues:
1392 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1393 for (unsigned q = 0; q < device->queue_count[i]; q++)
1394 tu_queue_finish(&device->queues[i][q]);
1395 if (device->queue_count[i])
1396 vk_object_free(&device->vk, NULL, device->queues[i]);
1397 }
1398
1399 vk_free(&device->vk.alloc, device);
1400 return result;
1401 }
1402
1403 void
1404 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1405 {
1406 TU_FROM_HANDLE(tu_device, device, _device);
1407
1408 if (!device)
1409 return;
1410
1411 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1412 for (unsigned q = 0; q < device->queue_count[i]; q++)
1413 tu_queue_finish(&device->queues[i][q]);
1414 if (device->queue_count[i])
1415 vk_object_free(&device->vk, NULL, device->queues[i]);
1416 }
1417
1418 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1419 if (device->scratch_bos[i].initialized)
1420 tu_bo_finish(device, &device->scratch_bos[i].bo);
1421 }
1422
1423 ir3_compiler_destroy(device->compiler);
1424
1425 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1426 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1427
1428 vk_free(&device->vk.alloc, device);
1429 }
1430
1431 VkResult
1432 _tu_device_set_lost(struct tu_device *device,
1433 const char *file, int line,
1434 const char *msg, ...)
1435 {
1436 /* Set the flag indicating that waits should return in finite time even
1437 * after device loss.
1438 */
1439 p_atomic_inc(&device->_lost);
1440
1441 /* TODO: Report the log message through VkDebugReportCallbackEXT instead */
1442 fprintf(stderr, "%s:%d: ", file, line);
1443 va_list ap;
1444 va_start(ap, msg);
1445 vfprintf(stderr, msg, ap);
1446 va_end(ap);
1447
1448 if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
1449 abort();
1450
1451 return VK_ERROR_DEVICE_LOST;
1452 }
1453
1454 VkResult
1455 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1456 {
1457 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1458 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1459 assert(index < ARRAY_SIZE(dev->scratch_bos));
1460
1461 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1462 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1463 /* Fast path: just return the already-allocated BO. */
1464 *bo = &dev->scratch_bos[i].bo;
1465 return VK_SUCCESS;
1466 }
1467 }
1468
1469 /* Slow path: actually allocate the BO. We take a lock because the process
1470 * of allocating it is slow, and we don't want to block the CPU while it
1471 * finishes.
1472 */
1473 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1474
1475 /* Another thread may have allocated it already while we were waiting on
1476 * the lock. We need to check this in order to avoid double-allocating.
1477 */
1478 if (dev->scratch_bos[index].initialized) {
1479 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1480 *bo = &dev->scratch_bos[index].bo;
1481 return VK_SUCCESS;
1482 }
1483
1484 unsigned bo_size = 1ull << size_log2;
1485 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1486 if (result != VK_SUCCESS) {
1487 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1488 return result;
1489 }
1490
1491 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1492
1493 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1494
1495 *bo = &dev->scratch_bos[index].bo;
1496 return VK_SUCCESS;
1497 }
1498
1499 VkResult
1500 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1501 VkLayerProperties *pProperties)
1502 {
1503 *pPropertyCount = 0;
1504 return VK_SUCCESS;
1505 }
1506
1507 VkResult
1508 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1509 uint32_t *pPropertyCount,
1510 VkLayerProperties *pProperties)
1511 {
1512 *pPropertyCount = 0;
1513 return VK_SUCCESS;
1514 }
1515
1516 void
1517 tu_GetDeviceQueue2(VkDevice _device,
1518 const VkDeviceQueueInfo2 *pQueueInfo,
1519 VkQueue *pQueue)
1520 {
1521 TU_FROM_HANDLE(tu_device, device, _device);
1522 struct tu_queue *queue;
1523
1524 queue =
1525 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1526 if (pQueueInfo->flags != queue->flags) {
1527 /* From the Vulkan 1.1.70 spec:
1528 *
1529 * "The queue returned by vkGetDeviceQueue2 must have the same
1530 * flags value from this structure as that used at device
1531 * creation time in a VkDeviceQueueCreateInfo instance. If no
1532 * matching flags were specified at device creation time then
1533 * pQueue will return VK_NULL_HANDLE."
1534 */
1535 *pQueue = VK_NULL_HANDLE;
1536 return;
1537 }
1538
1539 *pQueue = tu_queue_to_handle(queue);
1540 }
1541
1542 void
1543 tu_GetDeviceQueue(VkDevice _device,
1544 uint32_t queueFamilyIndex,
1545 uint32_t queueIndex,
1546 VkQueue *pQueue)
1547 {
1548 const VkDeviceQueueInfo2 info =
1549 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1550 .queueFamilyIndex = queueFamilyIndex,
1551 .queueIndex = queueIndex };
1552
1553 tu_GetDeviceQueue2(_device, &info, pQueue);
1554 }
1555
1556 static VkResult
1557 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
1558 uint32_t sem_count,
1559 bool wait,
1560 struct drm_msm_gem_submit_syncobj **out,
1561 uint32_t *out_count)
1562 {
1563 uint32_t syncobj_count = 0;
1564 struct drm_msm_gem_submit_syncobj *syncobjs;
1565
1566 for (uint32_t i = 0; i < sem_count; ++i) {
1567 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1568
1569 struct tu_semaphore_part *part =
1570 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1571 &sem->temporary : &sem->permanent;
1572
1573 if (part->kind == TU_SEMAPHORE_SYNCOBJ)
1574 ++syncobj_count;
1575 }
1576
1577 *out = NULL;
1578 *out_count = syncobj_count;
1579 if (!syncobj_count)
1580 return VK_SUCCESS;
1581
1582 *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
1583 if (!syncobjs)
1584 return VK_ERROR_OUT_OF_HOST_MEMORY;
1585
1586 for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
1587 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1588
1589 struct tu_semaphore_part *part =
1590 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1591 &sem->temporary : &sem->permanent;
1592
1593 if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
1594 syncobjs[j].handle = part->syncobj;
1595 syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
1596 ++j;
1597 }
1598 }
1599
1600 return VK_SUCCESS;
1601 }
1602
1603
1604 static void
1605 tu_semaphores_remove_temp(struct tu_device *device,
1606 const VkSemaphore *sems,
1607 uint32_t sem_count)
1608 {
1609 for (uint32_t i = 0; i < sem_count; ++i) {
1610 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1611 tu_semaphore_remove_temp(device, sem);
1612 }
1613 }
1614
1615 VkResult
1616 tu_QueueSubmit(VkQueue _queue,
1617 uint32_t submitCount,
1618 const VkSubmitInfo *pSubmits,
1619 VkFence _fence)
1620 {
1621 TU_FROM_HANDLE(tu_queue, queue, _queue);
1622 VkResult result;
1623
1624 for (uint32_t i = 0; i < submitCount; ++i) {
1625 const VkSubmitInfo *submit = pSubmits + i;
1626 const bool last_submit = (i == submitCount - 1);
1627 struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
1628 uint32_t nr_in_syncobjs, nr_out_syncobjs;
1629 struct tu_bo_list bo_list;
1630 tu_bo_list_init(&bo_list);
1631
1632 result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
1633 pSubmits[i].waitSemaphoreCount,
1634 false, &in_syncobjs, &nr_in_syncobjs);
1635 if (result != VK_SUCCESS) {
1636 return tu_device_set_lost(queue->device,
1637 "failed to allocate space for semaphore submission\n");
1638 }
1639
1640 result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
1641 pSubmits[i].signalSemaphoreCount,
1642 false, &out_syncobjs, &nr_out_syncobjs);
1643 if (result != VK_SUCCESS) {
1644 free(in_syncobjs);
1645 return tu_device_set_lost(queue->device,
1646 "failed to allocate space for semaphore submission\n");
1647 }
1648
1649 uint32_t entry_count = 0;
1650 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1651 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1652 entry_count += cmdbuf->cs.entry_count;
1653 }
1654
1655 struct drm_msm_gem_submit_cmd cmds[entry_count];
1656 uint32_t entry_idx = 0;
1657 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1658 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1659 struct tu_cs *cs = &cmdbuf->cs;
1660 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1661 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1662 cmds[entry_idx].submit_idx =
1663 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1664 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1665 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1666 cmds[entry_idx].size = cs->entries[i].size;
1667 cmds[entry_idx].pad = 0;
1668 cmds[entry_idx].nr_relocs = 0;
1669 cmds[entry_idx].relocs = 0;
1670 }
1671
1672 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1673 }
1674
1675 uint32_t flags = MSM_PIPE_3D0;
1676 if (nr_in_syncobjs) {
1677 flags |= MSM_SUBMIT_SYNCOBJ_IN;
1678 }
1679 if (nr_out_syncobjs) {
1680 flags |= MSM_SUBMIT_SYNCOBJ_OUT;
1681 }
1682
1683 if (last_submit) {
1684 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1685 }
1686
1687 struct drm_msm_gem_submit req = {
1688 .flags = flags,
1689 .queueid = queue->msm_queue_id,
1690 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1691 .nr_bos = bo_list.count,
1692 .cmds = (uint64_t)(uintptr_t)cmds,
1693 .nr_cmds = entry_count,
1694 .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
1695 .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
1696 .nr_in_syncobjs = nr_in_syncobjs,
1697 .nr_out_syncobjs = nr_out_syncobjs,
1698 .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
1699 };
1700
1701 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1702 DRM_MSM_GEM_SUBMIT,
1703 &req, sizeof(req));
1704 if (ret) {
1705 free(in_syncobjs);
1706 free(out_syncobjs);
1707 return tu_device_set_lost(queue->device, "submit failed: %s\n",
1708 strerror(errno));
1709 }
1710
1711 tu_bo_list_destroy(&bo_list);
1712 free(in_syncobjs);
1713 free(out_syncobjs);
1714
1715 tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
1716 pSubmits[i].waitSemaphoreCount);
1717 if (last_submit) {
1718 /* no need to merge fences as queue execution is serialized */
1719 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1720 } else if (last_submit) {
1721 close(req.fence_fd);
1722 }
1723 }
1724
1725 if (_fence != VK_NULL_HANDLE) {
1726 TU_FROM_HANDLE(tu_fence, fence, _fence);
1727 tu_fence_copy(fence, &queue->submit_fence);
1728 }
1729
1730 return VK_SUCCESS;
1731 }
1732
1733 VkResult
1734 tu_QueueWaitIdle(VkQueue _queue)
1735 {
1736 TU_FROM_HANDLE(tu_queue, queue, _queue);
1737
1738 if (tu_device_is_lost(queue->device))
1739 return VK_ERROR_DEVICE_LOST;
1740
1741 tu_fence_wait_idle(&queue->submit_fence);
1742
1743 return VK_SUCCESS;
1744 }
1745
1746 VkResult
1747 tu_DeviceWaitIdle(VkDevice _device)
1748 {
1749 TU_FROM_HANDLE(tu_device, device, _device);
1750
1751 if (tu_device_is_lost(device))
1752 return VK_ERROR_DEVICE_LOST;
1753
1754 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1755 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1756 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1757 }
1758 }
1759 return VK_SUCCESS;
1760 }
1761
1762 VkResult
1763 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1764 uint32_t *pPropertyCount,
1765 VkExtensionProperties *pProperties)
1766 {
1767 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1768
1769 /* We spport no lyaers */
1770 if (pLayerName)
1771 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1772
1773 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1774 if (tu_instance_extensions_supported.extensions[i]) {
1775 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1776 }
1777 }
1778
1779 return vk_outarray_status(&out);
1780 }
1781
1782 VkResult
1783 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1784 const char *pLayerName,
1785 uint32_t *pPropertyCount,
1786 VkExtensionProperties *pProperties)
1787 {
1788 /* We spport no lyaers */
1789 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1790 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1791
1792 /* We spport no lyaers */
1793 if (pLayerName)
1794 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1795
1796 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1797 if (device->supported_extensions.extensions[i]) {
1798 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1799 }
1800 }
1801
1802 return vk_outarray_status(&out);
1803 }
1804
1805 PFN_vkVoidFunction
1806 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1807 {
1808 TU_FROM_HANDLE(tu_instance, instance, _instance);
1809
1810 return tu_lookup_entrypoint_checked(
1811 pName, instance ? instance->api_version : 0,
1812 instance ? &instance->enabled_extensions : NULL, NULL);
1813 }
1814
1815 /* The loader wants us to expose a second GetInstanceProcAddr function
1816 * to work around certain LD_PRELOAD issues seen in apps.
1817 */
1818 PUBLIC
1819 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1820 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1821
1822 PUBLIC
1823 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1824 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1825 {
1826 return tu_GetInstanceProcAddr(instance, pName);
1827 }
1828
1829 PFN_vkVoidFunction
1830 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1831 {
1832 TU_FROM_HANDLE(tu_device, device, _device);
1833
1834 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1835 &device->instance->enabled_extensions,
1836 &device->enabled_extensions);
1837 }
1838
1839 static VkResult
1840 tu_alloc_memory(struct tu_device *device,
1841 const VkMemoryAllocateInfo *pAllocateInfo,
1842 const VkAllocationCallbacks *pAllocator,
1843 VkDeviceMemory *pMem)
1844 {
1845 struct tu_device_memory *mem;
1846 VkResult result;
1847
1848 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1849
1850 if (pAllocateInfo->allocationSize == 0) {
1851 /* Apparently, this is allowed */
1852 *pMem = VK_NULL_HANDLE;
1853 return VK_SUCCESS;
1854 }
1855
1856 mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
1857 VK_OBJECT_TYPE_DEVICE_MEMORY);
1858 if (mem == NULL)
1859 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1860
1861 const VkImportMemoryFdInfoKHR *fd_info =
1862 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1863 if (fd_info && !fd_info->handleType)
1864 fd_info = NULL;
1865
1866 if (fd_info) {
1867 assert(fd_info->handleType ==
1868 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1869 fd_info->handleType ==
1870 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1871
1872 /*
1873 * TODO Importing the same fd twice gives us the same handle without
1874 * reference counting. We need to maintain a per-instance handle-to-bo
1875 * table and add reference count to tu_bo.
1876 */
1877 result = tu_bo_init_dmabuf(device, &mem->bo,
1878 pAllocateInfo->allocationSize, fd_info->fd);
1879 if (result == VK_SUCCESS) {
1880 /* take ownership and close the fd */
1881 close(fd_info->fd);
1882 }
1883 } else {
1884 result =
1885 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1886 }
1887
1888 if (result != VK_SUCCESS) {
1889 vk_object_free(&device->vk, pAllocator, mem);
1890 return result;
1891 }
1892
1893 mem->size = pAllocateInfo->allocationSize;
1894 mem->type_index = pAllocateInfo->memoryTypeIndex;
1895
1896 mem->map = NULL;
1897 mem->user_ptr = NULL;
1898
1899 *pMem = tu_device_memory_to_handle(mem);
1900
1901 return VK_SUCCESS;
1902 }
1903
1904 VkResult
1905 tu_AllocateMemory(VkDevice _device,
1906 const VkMemoryAllocateInfo *pAllocateInfo,
1907 const VkAllocationCallbacks *pAllocator,
1908 VkDeviceMemory *pMem)
1909 {
1910 TU_FROM_HANDLE(tu_device, device, _device);
1911 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1912 }
1913
1914 void
1915 tu_FreeMemory(VkDevice _device,
1916 VkDeviceMemory _mem,
1917 const VkAllocationCallbacks *pAllocator)
1918 {
1919 TU_FROM_HANDLE(tu_device, device, _device);
1920 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1921
1922 if (mem == NULL)
1923 return;
1924
1925 tu_bo_finish(device, &mem->bo);
1926 vk_object_free(&device->vk, pAllocator, mem);
1927 }
1928
1929 VkResult
1930 tu_MapMemory(VkDevice _device,
1931 VkDeviceMemory _memory,
1932 VkDeviceSize offset,
1933 VkDeviceSize size,
1934 VkMemoryMapFlags flags,
1935 void **ppData)
1936 {
1937 TU_FROM_HANDLE(tu_device, device, _device);
1938 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1939 VkResult result;
1940
1941 if (mem == NULL) {
1942 *ppData = NULL;
1943 return VK_SUCCESS;
1944 }
1945
1946 if (mem->user_ptr) {
1947 *ppData = mem->user_ptr;
1948 } else if (!mem->map) {
1949 result = tu_bo_map(device, &mem->bo);
1950 if (result != VK_SUCCESS)
1951 return result;
1952 *ppData = mem->map = mem->bo.map;
1953 } else
1954 *ppData = mem->map;
1955
1956 if (*ppData) {
1957 *ppData += offset;
1958 return VK_SUCCESS;
1959 }
1960
1961 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1962 }
1963
1964 void
1965 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1966 {
1967 /* I do not see any unmapping done by the freedreno Gallium driver. */
1968 }
1969
1970 VkResult
1971 tu_FlushMappedMemoryRanges(VkDevice _device,
1972 uint32_t memoryRangeCount,
1973 const VkMappedMemoryRange *pMemoryRanges)
1974 {
1975 return VK_SUCCESS;
1976 }
1977
1978 VkResult
1979 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1980 uint32_t memoryRangeCount,
1981 const VkMappedMemoryRange *pMemoryRanges)
1982 {
1983 return VK_SUCCESS;
1984 }
1985
1986 void
1987 tu_GetBufferMemoryRequirements(VkDevice _device,
1988 VkBuffer _buffer,
1989 VkMemoryRequirements *pMemoryRequirements)
1990 {
1991 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1992
1993 pMemoryRequirements->memoryTypeBits = 1;
1994 pMemoryRequirements->alignment = 64;
1995 pMemoryRequirements->size =
1996 align64(buffer->size, pMemoryRequirements->alignment);
1997 }
1998
1999 void
2000 tu_GetBufferMemoryRequirements2(
2001 VkDevice device,
2002 const VkBufferMemoryRequirementsInfo2 *pInfo,
2003 VkMemoryRequirements2 *pMemoryRequirements)
2004 {
2005 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
2006 &pMemoryRequirements->memoryRequirements);
2007 }
2008
2009 void
2010 tu_GetImageMemoryRequirements(VkDevice _device,
2011 VkImage _image,
2012 VkMemoryRequirements *pMemoryRequirements)
2013 {
2014 TU_FROM_HANDLE(tu_image, image, _image);
2015
2016 pMemoryRequirements->memoryTypeBits = 1;
2017 pMemoryRequirements->size = image->total_size;
2018 pMemoryRequirements->alignment = image->layout[0].base_align;
2019 }
2020
2021 void
2022 tu_GetImageMemoryRequirements2(VkDevice device,
2023 const VkImageMemoryRequirementsInfo2 *pInfo,
2024 VkMemoryRequirements2 *pMemoryRequirements)
2025 {
2026 tu_GetImageMemoryRequirements(device, pInfo->image,
2027 &pMemoryRequirements->memoryRequirements);
2028 }
2029
2030 void
2031 tu_GetImageSparseMemoryRequirements(
2032 VkDevice device,
2033 VkImage image,
2034 uint32_t *pSparseMemoryRequirementCount,
2035 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
2036 {
2037 tu_stub();
2038 }
2039
2040 void
2041 tu_GetImageSparseMemoryRequirements2(
2042 VkDevice device,
2043 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
2044 uint32_t *pSparseMemoryRequirementCount,
2045 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
2046 {
2047 tu_stub();
2048 }
2049
2050 void
2051 tu_GetDeviceMemoryCommitment(VkDevice device,
2052 VkDeviceMemory memory,
2053 VkDeviceSize *pCommittedMemoryInBytes)
2054 {
2055 *pCommittedMemoryInBytes = 0;
2056 }
2057
2058 VkResult
2059 tu_BindBufferMemory2(VkDevice device,
2060 uint32_t bindInfoCount,
2061 const VkBindBufferMemoryInfo *pBindInfos)
2062 {
2063 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2064 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2065 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
2066
2067 if (mem) {
2068 buffer->bo = &mem->bo;
2069 buffer->bo_offset = pBindInfos[i].memoryOffset;
2070 } else {
2071 buffer->bo = NULL;
2072 }
2073 }
2074 return VK_SUCCESS;
2075 }
2076
2077 VkResult
2078 tu_BindBufferMemory(VkDevice device,
2079 VkBuffer buffer,
2080 VkDeviceMemory memory,
2081 VkDeviceSize memoryOffset)
2082 {
2083 const VkBindBufferMemoryInfo info = {
2084 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2085 .buffer = buffer,
2086 .memory = memory,
2087 .memoryOffset = memoryOffset
2088 };
2089
2090 return tu_BindBufferMemory2(device, 1, &info);
2091 }
2092
2093 VkResult
2094 tu_BindImageMemory2(VkDevice device,
2095 uint32_t bindInfoCount,
2096 const VkBindImageMemoryInfo *pBindInfos)
2097 {
2098 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2099 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
2100 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2101
2102 if (mem) {
2103 image->bo = &mem->bo;
2104 image->bo_offset = pBindInfos[i].memoryOffset;
2105 } else {
2106 image->bo = NULL;
2107 image->bo_offset = 0;
2108 }
2109 }
2110
2111 return VK_SUCCESS;
2112 }
2113
2114 VkResult
2115 tu_BindImageMemory(VkDevice device,
2116 VkImage image,
2117 VkDeviceMemory memory,
2118 VkDeviceSize memoryOffset)
2119 {
2120 const VkBindImageMemoryInfo info = {
2121 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2122 .image = image,
2123 .memory = memory,
2124 .memoryOffset = memoryOffset
2125 };
2126
2127 return tu_BindImageMemory2(device, 1, &info);
2128 }
2129
2130 VkResult
2131 tu_QueueBindSparse(VkQueue _queue,
2132 uint32_t bindInfoCount,
2133 const VkBindSparseInfo *pBindInfo,
2134 VkFence _fence)
2135 {
2136 return VK_SUCCESS;
2137 }
2138
2139 // Queue semaphore functions
2140
2141
2142 static void
2143 tu_semaphore_part_destroy(struct tu_device *device,
2144 struct tu_semaphore_part *part)
2145 {
2146 switch(part->kind) {
2147 case TU_SEMAPHORE_NONE:
2148 break;
2149 case TU_SEMAPHORE_SYNCOBJ:
2150 drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
2151 break;
2152 }
2153 part->kind = TU_SEMAPHORE_NONE;
2154 }
2155
2156 static void
2157 tu_semaphore_remove_temp(struct tu_device *device,
2158 struct tu_semaphore *sem)
2159 {
2160 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2161 tu_semaphore_part_destroy(device, &sem->temporary);
2162 }
2163 }
2164
2165 VkResult
2166 tu_CreateSemaphore(VkDevice _device,
2167 const VkSemaphoreCreateInfo *pCreateInfo,
2168 const VkAllocationCallbacks *pAllocator,
2169 VkSemaphore *pSemaphore)
2170 {
2171 TU_FROM_HANDLE(tu_device, device, _device);
2172
2173 struct tu_semaphore *sem =
2174 vk_object_alloc(&device->vk, pAllocator, sizeof(*sem),
2175 VK_OBJECT_TYPE_SEMAPHORE);
2176 if (!sem)
2177 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2178
2179 const VkExportSemaphoreCreateInfo *export =
2180 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
2181 VkExternalSemaphoreHandleTypeFlags handleTypes =
2182 export ? export->handleTypes : 0;
2183
2184 sem->permanent.kind = TU_SEMAPHORE_NONE;
2185 sem->temporary.kind = TU_SEMAPHORE_NONE;
2186
2187 if (handleTypes) {
2188 if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
2189 vk_free2(&device->vk.alloc, pAllocator, sem);
2190 return VK_ERROR_OUT_OF_HOST_MEMORY;
2191 }
2192 sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
2193 }
2194 *pSemaphore = tu_semaphore_to_handle(sem);
2195 return VK_SUCCESS;
2196 }
2197
2198 void
2199 tu_DestroySemaphore(VkDevice _device,
2200 VkSemaphore _semaphore,
2201 const VkAllocationCallbacks *pAllocator)
2202 {
2203 TU_FROM_HANDLE(tu_device, device, _device);
2204 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
2205 if (!_semaphore)
2206 return;
2207
2208 tu_semaphore_part_destroy(device, &sem->permanent);
2209 tu_semaphore_part_destroy(device, &sem->temporary);
2210
2211 vk_object_free(&device->vk, pAllocator, sem);
2212 }
2213
2214 VkResult
2215 tu_CreateEvent(VkDevice _device,
2216 const VkEventCreateInfo *pCreateInfo,
2217 const VkAllocationCallbacks *pAllocator,
2218 VkEvent *pEvent)
2219 {
2220 TU_FROM_HANDLE(tu_device, device, _device);
2221
2222 struct tu_event *event =
2223 vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
2224 VK_OBJECT_TYPE_EVENT);
2225 if (!event)
2226 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2227
2228 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2229 if (result != VK_SUCCESS)
2230 goto fail_alloc;
2231
2232 result = tu_bo_map(device, &event->bo);
2233 if (result != VK_SUCCESS)
2234 goto fail_map;
2235
2236 *pEvent = tu_event_to_handle(event);
2237
2238 return VK_SUCCESS;
2239
2240 fail_map:
2241 tu_bo_finish(device, &event->bo);
2242 fail_alloc:
2243 vk_object_free(&device->vk, pAllocator, event);
2244 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2245 }
2246
2247 void
2248 tu_DestroyEvent(VkDevice _device,
2249 VkEvent _event,
2250 const VkAllocationCallbacks *pAllocator)
2251 {
2252 TU_FROM_HANDLE(tu_device, device, _device);
2253 TU_FROM_HANDLE(tu_event, event, _event);
2254
2255 if (!event)
2256 return;
2257
2258 tu_bo_finish(device, &event->bo);
2259 vk_object_free(&device->vk, pAllocator, event);
2260 }
2261
2262 VkResult
2263 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2264 {
2265 TU_FROM_HANDLE(tu_event, event, _event);
2266
2267 if (*(uint64_t*) event->bo.map == 1)
2268 return VK_EVENT_SET;
2269 return VK_EVENT_RESET;
2270 }
2271
2272 VkResult
2273 tu_SetEvent(VkDevice _device, VkEvent _event)
2274 {
2275 TU_FROM_HANDLE(tu_event, event, _event);
2276 *(uint64_t*) event->bo.map = 1;
2277
2278 return VK_SUCCESS;
2279 }
2280
2281 VkResult
2282 tu_ResetEvent(VkDevice _device, VkEvent _event)
2283 {
2284 TU_FROM_HANDLE(tu_event, event, _event);
2285 *(uint64_t*) event->bo.map = 0;
2286
2287 return VK_SUCCESS;
2288 }
2289
2290 VkResult
2291 tu_CreateBuffer(VkDevice _device,
2292 const VkBufferCreateInfo *pCreateInfo,
2293 const VkAllocationCallbacks *pAllocator,
2294 VkBuffer *pBuffer)
2295 {
2296 TU_FROM_HANDLE(tu_device, device, _device);
2297 struct tu_buffer *buffer;
2298
2299 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2300
2301 buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
2302 VK_OBJECT_TYPE_BUFFER);
2303 if (buffer == NULL)
2304 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2305
2306 buffer->size = pCreateInfo->size;
2307 buffer->usage = pCreateInfo->usage;
2308 buffer->flags = pCreateInfo->flags;
2309
2310 *pBuffer = tu_buffer_to_handle(buffer);
2311
2312 return VK_SUCCESS;
2313 }
2314
2315 void
2316 tu_DestroyBuffer(VkDevice _device,
2317 VkBuffer _buffer,
2318 const VkAllocationCallbacks *pAllocator)
2319 {
2320 TU_FROM_HANDLE(tu_device, device, _device);
2321 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2322
2323 if (!buffer)
2324 return;
2325
2326 vk_object_free(&device->vk, pAllocator, buffer);
2327 }
2328
2329 VkResult
2330 tu_CreateFramebuffer(VkDevice _device,
2331 const VkFramebufferCreateInfo *pCreateInfo,
2332 const VkAllocationCallbacks *pAllocator,
2333 VkFramebuffer *pFramebuffer)
2334 {
2335 TU_FROM_HANDLE(tu_device, device, _device);
2336 TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
2337 struct tu_framebuffer *framebuffer;
2338
2339 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2340
2341 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2342 pCreateInfo->attachmentCount;
2343 framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
2344 VK_OBJECT_TYPE_FRAMEBUFFER);
2345 if (framebuffer == NULL)
2346 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2347
2348 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2349 framebuffer->width = pCreateInfo->width;
2350 framebuffer->height = pCreateInfo->height;
2351 framebuffer->layers = pCreateInfo->layers;
2352 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2353 VkImageView _iview = pCreateInfo->pAttachments[i];
2354 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2355 framebuffer->attachments[i].attachment = iview;
2356 }
2357
2358 tu_framebuffer_tiling_config(framebuffer, device, pass);
2359
2360 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2361 return VK_SUCCESS;
2362 }
2363
2364 void
2365 tu_DestroyFramebuffer(VkDevice _device,
2366 VkFramebuffer _fb,
2367 const VkAllocationCallbacks *pAllocator)
2368 {
2369 TU_FROM_HANDLE(tu_device, device, _device);
2370 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2371
2372 if (!fb)
2373 return;
2374
2375 vk_object_free(&device->vk, pAllocator, fb);
2376 }
2377
2378 static void
2379 tu_init_sampler(struct tu_device *device,
2380 struct tu_sampler *sampler,
2381 const VkSamplerCreateInfo *pCreateInfo)
2382 {
2383 const struct VkSamplerReductionModeCreateInfo *reduction =
2384 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2385 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2386 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2387
2388 unsigned aniso = pCreateInfo->anisotropyEnable ?
2389 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2390 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2391 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2392 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2393
2394 sampler->descriptor[0] =
2395 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2396 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2397 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2398 A6XX_TEX_SAMP_0_ANISO(aniso) |
2399 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2400 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2401 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2402 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2403 sampler->descriptor[1] =
2404 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2405 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2406 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2407 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2408 COND(pCreateInfo->compareEnable,
2409 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2410 /* This is an offset into the border_color BO, which we fill with all the
2411 * possible Vulkan border colors in the correct order, so we can just use
2412 * the Vulkan enum with no translation necessary.
2413 */
2414 sampler->descriptor[2] =
2415 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2416 sizeof(struct bcolor_entry));
2417 sampler->descriptor[3] = 0;
2418
2419 if (reduction) {
2420 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2421 tu6_reduction_mode(reduction->reductionMode));
2422 }
2423
2424 sampler->ycbcr_sampler = ycbcr_conversion ?
2425 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2426
2427 if (sampler->ycbcr_sampler &&
2428 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2429 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2430 }
2431
2432 /* TODO:
2433 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2434 */
2435 }
2436
2437 VkResult
2438 tu_CreateSampler(VkDevice _device,
2439 const VkSamplerCreateInfo *pCreateInfo,
2440 const VkAllocationCallbacks *pAllocator,
2441 VkSampler *pSampler)
2442 {
2443 TU_FROM_HANDLE(tu_device, device, _device);
2444 struct tu_sampler *sampler;
2445
2446 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2447
2448 sampler = vk_object_alloc(&device->vk, pAllocator, sizeof(*sampler),
2449 VK_OBJECT_TYPE_SAMPLER);
2450 if (!sampler)
2451 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2452
2453 tu_init_sampler(device, sampler, pCreateInfo);
2454 *pSampler = tu_sampler_to_handle(sampler);
2455
2456 return VK_SUCCESS;
2457 }
2458
2459 void
2460 tu_DestroySampler(VkDevice _device,
2461 VkSampler _sampler,
2462 const VkAllocationCallbacks *pAllocator)
2463 {
2464 TU_FROM_HANDLE(tu_device, device, _device);
2465 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2466
2467 if (!sampler)
2468 return;
2469
2470 vk_object_free(&device->vk, pAllocator, sampler);
2471 }
2472
2473 /* vk_icd.h does not declare this function, so we declare it here to
2474 * suppress Wmissing-prototypes.
2475 */
2476 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2477 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2478
2479 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2480 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2481 {
2482 /* For the full details on loader interface versioning, see
2483 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2484 * What follows is a condensed summary, to help you navigate the large and
2485 * confusing official doc.
2486 *
2487 * - Loader interface v0 is incompatible with later versions. We don't
2488 * support it.
2489 *
2490 * - In loader interface v1:
2491 * - The first ICD entrypoint called by the loader is
2492 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2493 * entrypoint.
2494 * - The ICD must statically expose no other Vulkan symbol unless it
2495 * is linked with -Bsymbolic.
2496 * - Each dispatchable Vulkan handle created by the ICD must be
2497 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2498 * ICD must initialize VK_LOADER_DATA.loadMagic to
2499 * ICD_LOADER_MAGIC.
2500 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2501 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2502 * such loader-managed surfaces.
2503 *
2504 * - Loader interface v2 differs from v1 in:
2505 * - The first ICD entrypoint called by the loader is
2506 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2507 * statically expose this entrypoint.
2508 *
2509 * - Loader interface v3 differs from v2 in:
2510 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2511 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2512 * because the loader no longer does so.
2513 */
2514 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2515 return VK_SUCCESS;
2516 }
2517
2518 VkResult
2519 tu_GetMemoryFdKHR(VkDevice _device,
2520 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2521 int *pFd)
2522 {
2523 TU_FROM_HANDLE(tu_device, device, _device);
2524 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2525
2526 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2527
2528 /* At the moment, we support only the below handle types. */
2529 assert(pGetFdInfo->handleType ==
2530 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2531 pGetFdInfo->handleType ==
2532 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2533
2534 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2535 if (prime_fd < 0)
2536 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2537
2538 *pFd = prime_fd;
2539 return VK_SUCCESS;
2540 }
2541
2542 VkResult
2543 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2544 VkExternalMemoryHandleTypeFlagBits handleType,
2545 int fd,
2546 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2547 {
2548 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2549 pMemoryFdProperties->memoryTypeBits = 1;
2550 return VK_SUCCESS;
2551 }
2552
2553 VkResult
2554 tu_ImportFenceFdKHR(VkDevice _device,
2555 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
2556 {
2557 tu_stub();
2558
2559 return VK_SUCCESS;
2560 }
2561
2562 VkResult
2563 tu_GetFenceFdKHR(VkDevice _device,
2564 const VkFenceGetFdInfoKHR *pGetFdInfo,
2565 int *pFd)
2566 {
2567 tu_stub();
2568
2569 return VK_SUCCESS;
2570 }
2571
2572 VkResult
2573 tu_ImportSemaphoreFdKHR(VkDevice _device,
2574 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
2575 {
2576 TU_FROM_HANDLE(tu_device, device, _device);
2577 TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
2578 int ret;
2579 struct tu_semaphore_part *dst = NULL;
2580
2581 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2582 dst = &sem->temporary;
2583 } else {
2584 dst = &sem->permanent;
2585 }
2586
2587 uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
2588
2589 switch(pImportSemaphoreFdInfo->handleType) {
2590 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
2591 uint32_t old_syncobj = syncobj;
2592 ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
2593 if (ret == 0) {
2594 close(pImportSemaphoreFdInfo->fd);
2595 if (old_syncobj)
2596 drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
2597 }
2598 break;
2599 }
2600 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
2601 if (!syncobj) {
2602 ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
2603 if (ret)
2604 break;
2605 }
2606 if (pImportSemaphoreFdInfo->fd == -1) {
2607 ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
2608 } else {
2609 ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
2610 }
2611 if (!ret)
2612 close(pImportSemaphoreFdInfo->fd);
2613 break;
2614 }
2615 default:
2616 unreachable("Unhandled semaphore handle type");
2617 }
2618
2619 if (ret) {
2620 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2621 }
2622 dst->syncobj = syncobj;
2623 dst->kind = TU_SEMAPHORE_SYNCOBJ;
2624
2625 return VK_SUCCESS;
2626 }
2627
2628 VkResult
2629 tu_GetSemaphoreFdKHR(VkDevice _device,
2630 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
2631 int *pFd)
2632 {
2633 TU_FROM_HANDLE(tu_device, device, _device);
2634 TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
2635 int ret;
2636 uint32_t syncobj_handle;
2637
2638 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2639 assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
2640 syncobj_handle = sem->temporary.syncobj;
2641 } else {
2642 assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
2643 syncobj_handle = sem->permanent.syncobj;
2644 }
2645
2646 switch(pGetFdInfo->handleType) {
2647 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
2648 ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
2649 break;
2650 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2651 ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
2652 if (!ret) {
2653 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2654 tu_semaphore_part_destroy(device, &sem->temporary);
2655 } else {
2656 drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
2657 }
2658 }
2659 break;
2660 default:
2661 unreachable("Unhandled semaphore handle type");
2662 }
2663
2664 if (ret)
2665 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
2666 return VK_SUCCESS;
2667 }
2668
2669
2670 static bool tu_has_syncobj(struct tu_physical_device *pdev)
2671 {
2672 uint64_t value;
2673 if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
2674 return false;
2675 return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
2676 }
2677
2678 void
2679 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2680 VkPhysicalDevice physicalDevice,
2681 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2682 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2683 {
2684 TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
2685
2686 if (tu_has_syncobj(pdev) &&
2687 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
2688 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
2689 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2690 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2691 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
2692 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
2693 } else {
2694 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2695 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2696 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2697 }
2698 }
2699
2700 void
2701 tu_GetPhysicalDeviceExternalFenceProperties(
2702 VkPhysicalDevice physicalDevice,
2703 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2704 VkExternalFenceProperties *pExternalFenceProperties)
2705 {
2706 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2707 pExternalFenceProperties->compatibleHandleTypes = 0;
2708 pExternalFenceProperties->externalFenceFeatures = 0;
2709 }
2710
2711 VkResult
2712 tu_CreateDebugReportCallbackEXT(
2713 VkInstance _instance,
2714 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2715 const VkAllocationCallbacks *pAllocator,
2716 VkDebugReportCallbackEXT *pCallback)
2717 {
2718 TU_FROM_HANDLE(tu_instance, instance, _instance);
2719 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2720 pCreateInfo, pAllocator,
2721 &instance->alloc, pCallback);
2722 }
2723
2724 void
2725 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2726 VkDebugReportCallbackEXT _callback,
2727 const VkAllocationCallbacks *pAllocator)
2728 {
2729 TU_FROM_HANDLE(tu_instance, instance, _instance);
2730 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2731 _callback, pAllocator, &instance->alloc);
2732 }
2733
2734 void
2735 tu_DebugReportMessageEXT(VkInstance _instance,
2736 VkDebugReportFlagsEXT flags,
2737 VkDebugReportObjectTypeEXT objectType,
2738 uint64_t object,
2739 size_t location,
2740 int32_t messageCode,
2741 const char *pLayerPrefix,
2742 const char *pMessage)
2743 {
2744 TU_FROM_HANDLE(tu_instance, instance, _instance);
2745 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2746 object, location, messageCode, pLayerPrefix, pMessage);
2747 }
2748
2749 void
2750 tu_GetDeviceGroupPeerMemoryFeatures(
2751 VkDevice device,
2752 uint32_t heapIndex,
2753 uint32_t localDeviceIndex,
2754 uint32_t remoteDeviceIndex,
2755 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2756 {
2757 assert(localDeviceIndex == remoteDeviceIndex);
2758
2759 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2760 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2761 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2762 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2763 }
2764
2765 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2766 VkPhysicalDevice physicalDevice,
2767 VkSampleCountFlagBits samples,
2768 VkMultisamplePropertiesEXT* pMultisampleProperties)
2769 {
2770 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2771
2772 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2773 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2774 else
2775 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2776 }
2777
2778
2779 VkResult
2780 tu_CreatePrivateDataSlotEXT(VkDevice _device,
2781 const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
2782 const VkAllocationCallbacks* pAllocator,
2783 VkPrivateDataSlotEXT* pPrivateDataSlot)
2784 {
2785 TU_FROM_HANDLE(tu_device, device, _device);
2786 return vk_private_data_slot_create(&device->vk,
2787 pCreateInfo,
2788 pAllocator,
2789 pPrivateDataSlot);
2790 }
2791
2792 void
2793 tu_DestroyPrivateDataSlotEXT(VkDevice _device,
2794 VkPrivateDataSlotEXT privateDataSlot,
2795 const VkAllocationCallbacks* pAllocator)
2796 {
2797 TU_FROM_HANDLE(tu_device, device, _device);
2798 vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
2799 }
2800
2801 VkResult
2802 tu_SetPrivateDataEXT(VkDevice _device,
2803 VkObjectType objectType,
2804 uint64_t objectHandle,
2805 VkPrivateDataSlotEXT privateDataSlot,
2806 uint64_t data)
2807 {
2808 TU_FROM_HANDLE(tu_device, device, _device);
2809 return vk_object_base_set_private_data(&device->vk,
2810 objectType,
2811 objectHandle,
2812 privateDataSlot,
2813 data);
2814 }
2815
2816 void
2817 tu_GetPrivateDataEXT(VkDevice _device,
2818 VkObjectType objectType,
2819 uint64_t objectHandle,
2820 VkPrivateDataSlotEXT privateDataSlot,
2821 uint64_t* pData)
2822 {
2823 TU_FROM_HANDLE(tu_device, device, _device);
2824 vk_object_base_get_private_data(&device->vk,
2825 objectType,
2826 objectHandle,
2827 privateDataSlot,
2828 pData);
2829 }