ec3fa4ceff67f99b19d46b249e58871fcc8264d4
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <msm_drm.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/strtod.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 static int
46 tu_device_get_cache_uuid(uint16_t family, void *uuid)
47 {
48 uint32_t mesa_timestamp;
49 uint16_t f = family;
50 memset(uuid, 0, VK_UUID_SIZE);
51 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
52 &mesa_timestamp))
53 return -1;
54
55 memcpy(uuid, &mesa_timestamp, 4);
56 memcpy((char *) uuid + 4, &f, 2);
57 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
58 return 0;
59 }
60
61 static void
62 tu_get_driver_uuid(void *uuid)
63 {
64 memset(uuid, 0, VK_UUID_SIZE);
65 snprintf(uuid, VK_UUID_SIZE, "freedreno");
66 }
67
68 static void
69 tu_get_device_uuid(void *uuid)
70 {
71 memset(uuid, 0, VK_UUID_SIZE);
72 }
73
74 VkResult
75 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
76 {
77 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
78 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
79 */
80 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
81 if (!gem_handle)
82 goto fail_new;
83
84 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
85 * want immediate backing pages because vkAllocateMemory and friends must
86 * not lazily fail.
87 *
88 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
89 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
90 * maybe I misunderstand.
91 */
92
93 /* TODO: Do we need 'offset' if we have 'iova'? */
94 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
95 if (!offset)
96 goto fail_info;
97
98 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
99 if (!iova)
100 goto fail_info;
101
102 *bo = (struct tu_bo) {
103 .gem_handle = gem_handle,
104 .size = size,
105 .offset = offset,
106 .iova = iova,
107 };
108
109 return VK_SUCCESS;
110
111 fail_info:
112 tu_gem_close(dev, bo->gem_handle);
113 fail_new:
114 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
115 }
116
117 VkResult
118 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
119 {
120 if (bo->map)
121 return VK_SUCCESS;
122
123 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
124 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
125 dev->physical_device->local_fd, bo->offset);
126 if (map == MAP_FAILED)
127 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
128
129 bo->map = map;
130 return VK_SUCCESS;
131 }
132
133 void
134 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
135 {
136 assert(bo->gem_handle);
137
138 if (bo->map)
139 munmap(bo->map, bo->size);
140
141 tu_gem_close(dev, bo->gem_handle);
142 }
143
144 static VkResult
145 tu_physical_device_init(struct tu_physical_device *device,
146 struct tu_instance *instance,
147 drmDevicePtr drm_device)
148 {
149 const char *path = drm_device->nodes[DRM_NODE_RENDER];
150 VkResult result = VK_SUCCESS;
151 drmVersionPtr version;
152 int fd;
153 int master_fd = -1;
154 uint64_t val;
155
156 fd = open(path, O_RDWR | O_CLOEXEC);
157 if (fd < 0) {
158 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
159 "failed to open device %s", path);
160 }
161
162 /* Version 1.3 added MSM_INFO_IOVA. */
163 const int min_version_major = 1;
164 const int min_version_minor = 3;
165
166 version = drmGetVersion(fd);
167 if (!version) {
168 close(fd);
169 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
170 "failed to query kernel driver version for device %s",
171 path);
172 }
173
174 if (strcmp(version->name, "msm")) {
175 drmFreeVersion(version);
176 if (master_fd != -1)
177 close(master_fd);
178 close(fd);
179 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
180 "device %s does not use the msm kernel driver", path);
181 }
182
183 if (version->version_major != min_version_major ||
184 version->version_minor < min_version_minor) {
185 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
186 "kernel driver for device %s has version %d.%d, "
187 "but Vulkan requires version >= %d.%d",
188 path, version->version_major, version->version_minor,
189 min_version_major, min_version_minor);
190 drmFreeVersion(version);
191 close(fd);
192 return result;
193 }
194
195 drmFreeVersion(version);
196
197 if (instance->debug_flags & TU_DEBUG_STARTUP)
198 tu_logi("Found compatible device '%s'.", path);
199
200 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
201 device->instance = instance;
202 assert(strlen(path) < ARRAY_SIZE(device->path));
203 strncpy(device->path, path, ARRAY_SIZE(device->path));
204
205 if (instance->enabled_extensions.KHR_display) {
206 master_fd =
207 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
208 if (master_fd >= 0) {
209 /* TODO: free master_fd is accel is not working? */
210 }
211 }
212
213 device->master_fd = master_fd;
214 device->local_fd = fd;
215
216 device->drm_device = fd_device_new_dup(fd);
217 if (!device->drm_device) {
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Could not create the libdrm device");
220 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
221 "could not create the libdrm device");
222 goto fail;
223 }
224
225 if (tu_drm_query_param(device, MSM_PARAM_GPU_ID, &val)) {
226 if (instance->debug_flags & TU_DEBUG_STARTUP)
227 tu_logi("Could not query the GPU ID");
228 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
229 "could not get GPU ID");
230 goto fail;
231 }
232 device->gpu_id = val;
233
234 if (tu_drm_query_param(device, MSM_PARAM_GMEM_SIZE, &val)) {
235 if (instance->debug_flags & TU_DEBUG_STARTUP)
236 tu_logi("Could not query the GMEM size");
237 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
238 "could not get GMEM size");
239 goto fail;
240 }
241 device->gmem_size = val;
242
243 memset(device->name, 0, sizeof(device->name));
244 sprintf(device->name, "FD%d", device->gpu_id);
245
246 switch (device->gpu_id) {
247 case 530:
248 case 630:
249 break;
250 default:
251 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
252 "device %s is unsupported", device->name);
253 goto fail;
254 }
255 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "cannot generate UUID");
258 goto fail;
259 }
260
261 /* The gpu id is already embedded in the uuid so we just pass "tu"
262 * when creating the cache.
263 */
264 char buf[VK_UUID_SIZE * 2 + 1];
265 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
266 device->disk_cache = disk_cache_create(device->name, buf, 0);
267
268 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
269 "testing use only.\n");
270
271 tu_get_driver_uuid(&device->device_uuid);
272 tu_get_device_uuid(&device->device_uuid);
273
274 tu_fill_device_extension_table(device, &device->supported_extensions);
275
276 if (result != VK_SUCCESS) {
277 vk_error(instance, result);
278 goto fail;
279 }
280
281 return VK_SUCCESS;
282
283 fail:
284 if (device->drm_device)
285 fd_device_del(device->drm_device);
286 close(fd);
287 if (master_fd != -1)
288 close(master_fd);
289 return result;
290 }
291
292 static void
293 tu_physical_device_finish(struct tu_physical_device *device)
294 {
295 disk_cache_destroy(device->disk_cache);
296 close(device->local_fd);
297 if (device->master_fd != -1)
298 close(device->master_fd);
299 }
300
301 static void *
302 default_alloc_func(void *pUserData,
303 size_t size,
304 size_t align,
305 VkSystemAllocationScope allocationScope)
306 {
307 return malloc(size);
308 }
309
310 static void *
311 default_realloc_func(void *pUserData,
312 void *pOriginal,
313 size_t size,
314 size_t align,
315 VkSystemAllocationScope allocationScope)
316 {
317 return realloc(pOriginal, size);
318 }
319
320 static void
321 default_free_func(void *pUserData, void *pMemory)
322 {
323 free(pMemory);
324 }
325
326 static const VkAllocationCallbacks default_alloc = {
327 .pUserData = NULL,
328 .pfnAllocation = default_alloc_func,
329 .pfnReallocation = default_realloc_func,
330 .pfnFree = default_free_func,
331 };
332
333 static const struct debug_control tu_debug_options[] = {
334 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
335 };
336
337 const char *
338 tu_get_debug_option_name(int id)
339 {
340 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
341 return tu_debug_options[id].string;
342 }
343
344 static int
345 tu_get_instance_extension_index(const char *name)
346 {
347 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
348 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
349 return i;
350 }
351 return -1;
352 }
353
354 VkResult
355 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
356 const VkAllocationCallbacks *pAllocator,
357 VkInstance *pInstance)
358 {
359 struct tu_instance *instance;
360 VkResult result;
361
362 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
363
364 uint32_t client_version;
365 if (pCreateInfo->pApplicationInfo &&
366 pCreateInfo->pApplicationInfo->apiVersion != 0) {
367 client_version = pCreateInfo->pApplicationInfo->apiVersion;
368 } else {
369 tu_EnumerateInstanceVersion(&client_version);
370 }
371
372 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
373 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
374 if (!instance)
375 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
376
377 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
378
379 if (pAllocator)
380 instance->alloc = *pAllocator;
381 else
382 instance->alloc = default_alloc;
383
384 instance->api_version = client_version;
385 instance->physical_device_count = -1;
386
387 instance->debug_flags =
388 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
389
390 if (instance->debug_flags & TU_DEBUG_STARTUP)
391 tu_logi("Created an instance");
392
393 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
394 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
395 int index = tu_get_instance_extension_index(ext_name);
396
397 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
398 vk_free2(&default_alloc, pAllocator, instance);
399 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
400 }
401
402 instance->enabled_extensions.extensions[index] = true;
403 }
404
405 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
406 if (result != VK_SUCCESS) {
407 vk_free2(&default_alloc, pAllocator, instance);
408 return vk_error(instance, result);
409 }
410
411 _mesa_locale_init();
412
413 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
414
415 *pInstance = tu_instance_to_handle(instance);
416
417 return VK_SUCCESS;
418 }
419
420 void
421 tu_DestroyInstance(VkInstance _instance,
422 const VkAllocationCallbacks *pAllocator)
423 {
424 TU_FROM_HANDLE(tu_instance, instance, _instance);
425
426 if (!instance)
427 return;
428
429 for (int i = 0; i < instance->physical_device_count; ++i) {
430 tu_physical_device_finish(instance->physical_devices + i);
431 }
432
433 VG(VALGRIND_DESTROY_MEMPOOL(instance));
434
435 _mesa_locale_fini();
436
437 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
438
439 vk_free(&instance->alloc, instance);
440 }
441
442 static VkResult
443 tu_enumerate_devices(struct tu_instance *instance)
444 {
445 /* TODO: Check for more devices ? */
446 drmDevicePtr devices[8];
447 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
448 int max_devices;
449
450 instance->physical_device_count = 0;
451
452 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
453
454 if (instance->debug_flags & TU_DEBUG_STARTUP)
455 tu_logi("Found %d drm nodes", max_devices);
456
457 if (max_devices < 1)
458 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
459
460 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
461 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
462 devices[i]->bustype == DRM_BUS_PLATFORM) {
463
464 result = tu_physical_device_init(
465 instance->physical_devices + instance->physical_device_count,
466 instance, devices[i]);
467 if (result == VK_SUCCESS)
468 ++instance->physical_device_count;
469 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
470 break;
471 }
472 }
473 drmFreeDevices(devices, max_devices);
474
475 return result;
476 }
477
478 VkResult
479 tu_EnumeratePhysicalDevices(VkInstance _instance,
480 uint32_t *pPhysicalDeviceCount,
481 VkPhysicalDevice *pPhysicalDevices)
482 {
483 TU_FROM_HANDLE(tu_instance, instance, _instance);
484 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
485
486 VkResult result;
487
488 if (instance->physical_device_count < 0) {
489 result = tu_enumerate_devices(instance);
490 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
491 return result;
492 }
493
494 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
495 vk_outarray_append(&out, p)
496 {
497 *p = tu_physical_device_to_handle(instance->physical_devices + i);
498 }
499 }
500
501 return vk_outarray_status(&out);
502 }
503
504 VkResult
505 tu_EnumeratePhysicalDeviceGroups(
506 VkInstance _instance,
507 uint32_t *pPhysicalDeviceGroupCount,
508 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
509 {
510 TU_FROM_HANDLE(tu_instance, instance, _instance);
511 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
512 pPhysicalDeviceGroupCount);
513 VkResult result;
514
515 if (instance->physical_device_count < 0) {
516 result = tu_enumerate_devices(instance);
517 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
518 return result;
519 }
520
521 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
522 vk_outarray_append(&out, p)
523 {
524 p->physicalDeviceCount = 1;
525 p->physicalDevices[0] =
526 tu_physical_device_to_handle(instance->physical_devices + i);
527 p->subsetAllocation = false;
528 }
529 }
530
531 return vk_outarray_status(&out);
532 }
533
534 void
535 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
536 VkPhysicalDeviceFeatures *pFeatures)
537 {
538 memset(pFeatures, 0, sizeof(*pFeatures));
539
540 *pFeatures = (VkPhysicalDeviceFeatures) {
541 .robustBufferAccess = false,
542 .fullDrawIndexUint32 = false,
543 .imageCubeArray = false,
544 .independentBlend = false,
545 .geometryShader = false,
546 .tessellationShader = false,
547 .sampleRateShading = false,
548 .dualSrcBlend = false,
549 .logicOp = false,
550 .multiDrawIndirect = false,
551 .drawIndirectFirstInstance = false,
552 .depthClamp = false,
553 .depthBiasClamp = false,
554 .fillModeNonSolid = false,
555 .depthBounds = false,
556 .wideLines = false,
557 .largePoints = false,
558 .alphaToOne = false,
559 .multiViewport = false,
560 .samplerAnisotropy = false,
561 .textureCompressionETC2 = false,
562 .textureCompressionASTC_LDR = false,
563 .textureCompressionBC = false,
564 .occlusionQueryPrecise = false,
565 .pipelineStatisticsQuery = false,
566 .vertexPipelineStoresAndAtomics = false,
567 .fragmentStoresAndAtomics = false,
568 .shaderTessellationAndGeometryPointSize = false,
569 .shaderImageGatherExtended = false,
570 .shaderStorageImageExtendedFormats = false,
571 .shaderStorageImageMultisample = false,
572 .shaderUniformBufferArrayDynamicIndexing = false,
573 .shaderSampledImageArrayDynamicIndexing = false,
574 .shaderStorageBufferArrayDynamicIndexing = false,
575 .shaderStorageImageArrayDynamicIndexing = false,
576 .shaderStorageImageReadWithoutFormat = false,
577 .shaderStorageImageWriteWithoutFormat = false,
578 .shaderClipDistance = false,
579 .shaderCullDistance = false,
580 .shaderFloat64 = false,
581 .shaderInt64 = false,
582 .shaderInt16 = false,
583 .sparseBinding = false,
584 .variableMultisampleRate = false,
585 .inheritedQueries = false,
586 };
587 }
588
589 void
590 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
591 VkPhysicalDeviceFeatures2KHR *pFeatures)
592 {
593 vk_foreach_struct(ext, pFeatures->pNext)
594 {
595 switch (ext->sType) {
596 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
597 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
598 features->variablePointersStorageBuffer = false;
599 features->variablePointers = false;
600 break;
601 }
602 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
603 VkPhysicalDeviceMultiviewFeaturesKHR *features =
604 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
605 features->multiview = false;
606 features->multiviewGeometryShader = false;
607 features->multiviewTessellationShader = false;
608 break;
609 }
610 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
611 VkPhysicalDeviceShaderDrawParameterFeatures *features =
612 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
613 features->shaderDrawParameters = false;
614 break;
615 }
616 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
617 VkPhysicalDeviceProtectedMemoryFeatures *features =
618 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
619 features->protectedMemory = false;
620 break;
621 }
622 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
623 VkPhysicalDevice16BitStorageFeatures *features =
624 (VkPhysicalDevice16BitStorageFeatures *) ext;
625 features->storageBuffer16BitAccess = false;
626 features->uniformAndStorageBuffer16BitAccess = false;
627 features->storagePushConstant16 = false;
628 features->storageInputOutput16 = false;
629 break;
630 }
631 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
632 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
633 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
634 features->samplerYcbcrConversion = false;
635 break;
636 }
637 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
638 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
639 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
640 features->shaderInputAttachmentArrayDynamicIndexing = false;
641 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
642 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
643 features->shaderUniformBufferArrayNonUniformIndexing = false;
644 features->shaderSampledImageArrayNonUniformIndexing = false;
645 features->shaderStorageBufferArrayNonUniformIndexing = false;
646 features->shaderStorageImageArrayNonUniformIndexing = false;
647 features->shaderInputAttachmentArrayNonUniformIndexing = false;
648 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
649 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
650 features->descriptorBindingUniformBufferUpdateAfterBind = false;
651 features->descriptorBindingSampledImageUpdateAfterBind = false;
652 features->descriptorBindingStorageImageUpdateAfterBind = false;
653 features->descriptorBindingStorageBufferUpdateAfterBind = false;
654 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
655 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
656 features->descriptorBindingUpdateUnusedWhilePending = false;
657 features->descriptorBindingPartiallyBound = false;
658 features->descriptorBindingVariableDescriptorCount = false;
659 features->runtimeDescriptorArray = false;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
663 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
664 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
665 features->conditionalRendering = false;
666 features->inheritedConditionalRendering = false;
667 break;
668 }
669 default:
670 break;
671 }
672 }
673 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
674 }
675
676 void
677 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
678 VkPhysicalDeviceProperties *pProperties)
679 {
680 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
681 VkSampleCountFlags sample_counts = 0xf;
682
683 /* make sure that the entire descriptor set is addressable with a signed
684 * 32-bit int. So the sum of all limits scaled by descriptor size has to
685 * be at most 2 GiB. the combined image & samples object count as one of
686 * both. This limit is for the pipeline layout, not for the set layout, but
687 * there is no set limit, so we just set a pipeline limit. I don't think
688 * any app is going to hit this soon. */
689 size_t max_descriptor_set_size =
690 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
691 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
692 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
693 32 /* sampler, largest when combined with image */ +
694 64 /* sampled image */ + 64 /* storage image */);
695
696 VkPhysicalDeviceLimits limits = {
697 .maxImageDimension1D = (1 << 14),
698 .maxImageDimension2D = (1 << 14),
699 .maxImageDimension3D = (1 << 11),
700 .maxImageDimensionCube = (1 << 14),
701 .maxImageArrayLayers = (1 << 11),
702 .maxTexelBufferElements = 128 * 1024 * 1024,
703 .maxUniformBufferRange = UINT32_MAX,
704 .maxStorageBufferRange = UINT32_MAX,
705 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
706 .maxMemoryAllocationCount = UINT32_MAX,
707 .maxSamplerAllocationCount = 64 * 1024,
708 .bufferImageGranularity = 64, /* A cache line */
709 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
710 .maxBoundDescriptorSets = MAX_SETS,
711 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
712 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
713 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
714 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
715 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
716 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
717 .maxPerStageResources = max_descriptor_set_size,
718 .maxDescriptorSetSamplers = max_descriptor_set_size,
719 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
720 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
721 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
722 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
723 .maxDescriptorSetSampledImages = max_descriptor_set_size,
724 .maxDescriptorSetStorageImages = max_descriptor_set_size,
725 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
726 .maxVertexInputAttributes = 32,
727 .maxVertexInputBindings = 32,
728 .maxVertexInputAttributeOffset = 2047,
729 .maxVertexInputBindingStride = 2048,
730 .maxVertexOutputComponents = 128,
731 .maxTessellationGenerationLevel = 64,
732 .maxTessellationPatchSize = 32,
733 .maxTessellationControlPerVertexInputComponents = 128,
734 .maxTessellationControlPerVertexOutputComponents = 128,
735 .maxTessellationControlPerPatchOutputComponents = 120,
736 .maxTessellationControlTotalOutputComponents = 4096,
737 .maxTessellationEvaluationInputComponents = 128,
738 .maxTessellationEvaluationOutputComponents = 128,
739 .maxGeometryShaderInvocations = 127,
740 .maxGeometryInputComponents = 64,
741 .maxGeometryOutputComponents = 128,
742 .maxGeometryOutputVertices = 256,
743 .maxGeometryTotalOutputComponents = 1024,
744 .maxFragmentInputComponents = 128,
745 .maxFragmentOutputAttachments = 8,
746 .maxFragmentDualSrcAttachments = 1,
747 .maxFragmentCombinedOutputResources = 8,
748 .maxComputeSharedMemorySize = 32768,
749 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
750 .maxComputeWorkGroupInvocations = 2048,
751 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
752 .subPixelPrecisionBits = 4 /* FIXME */,
753 .subTexelPrecisionBits = 4 /* FIXME */,
754 .mipmapPrecisionBits = 4 /* FIXME */,
755 .maxDrawIndexedIndexValue = UINT32_MAX,
756 .maxDrawIndirectCount = UINT32_MAX,
757 .maxSamplerLodBias = 16,
758 .maxSamplerAnisotropy = 16,
759 .maxViewports = MAX_VIEWPORTS,
760 .maxViewportDimensions = { (1 << 14), (1 << 14) },
761 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
762 .viewportSubPixelBits = 8,
763 .minMemoryMapAlignment = 4096, /* A page */
764 .minTexelBufferOffsetAlignment = 1,
765 .minUniformBufferOffsetAlignment = 4,
766 .minStorageBufferOffsetAlignment = 4,
767 .minTexelOffset = -32,
768 .maxTexelOffset = 31,
769 .minTexelGatherOffset = -32,
770 .maxTexelGatherOffset = 31,
771 .minInterpolationOffset = -2,
772 .maxInterpolationOffset = 2,
773 .subPixelInterpolationOffsetBits = 8,
774 .maxFramebufferWidth = (1 << 14),
775 .maxFramebufferHeight = (1 << 14),
776 .maxFramebufferLayers = (1 << 10),
777 .framebufferColorSampleCounts = sample_counts,
778 .framebufferDepthSampleCounts = sample_counts,
779 .framebufferStencilSampleCounts = sample_counts,
780 .framebufferNoAttachmentsSampleCounts = sample_counts,
781 .maxColorAttachments = MAX_RTS,
782 .sampledImageColorSampleCounts = sample_counts,
783 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
784 .sampledImageDepthSampleCounts = sample_counts,
785 .sampledImageStencilSampleCounts = sample_counts,
786 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
787 .maxSampleMaskWords = 1,
788 .timestampComputeAndGraphics = true,
789 .timestampPeriod = 1,
790 .maxClipDistances = 8,
791 .maxCullDistances = 8,
792 .maxCombinedClipAndCullDistances = 8,
793 .discreteQueuePriorities = 1,
794 .pointSizeRange = { 0.125, 255.875 },
795 .lineWidthRange = { 0.0, 7.9921875 },
796 .pointSizeGranularity = (1.0 / 8.0),
797 .lineWidthGranularity = (1.0 / 128.0),
798 .strictLines = false, /* FINISHME */
799 .standardSampleLocations = true,
800 .optimalBufferCopyOffsetAlignment = 128,
801 .optimalBufferCopyRowPitchAlignment = 128,
802 .nonCoherentAtomSize = 64,
803 };
804
805 *pProperties = (VkPhysicalDeviceProperties) {
806 .apiVersion = tu_physical_device_api_version(pdevice),
807 .driverVersion = vk_get_driver_version(),
808 .vendorID = 0, /* TODO */
809 .deviceID = 0,
810 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
811 .limits = limits,
812 .sparseProperties = { 0 },
813 };
814
815 strcpy(pProperties->deviceName, pdevice->name);
816 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
817 }
818
819 void
820 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
821 VkPhysicalDeviceProperties2KHR *pProperties)
822 {
823 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
824 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
825
826 vk_foreach_struct(ext, pProperties->pNext)
827 {
828 switch (ext->sType) {
829 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
830 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
831 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
832 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
833 break;
834 }
835 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
836 VkPhysicalDeviceIDPropertiesKHR *properties =
837 (VkPhysicalDeviceIDPropertiesKHR *) ext;
838 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
839 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
840 properties->deviceLUIDValid = false;
841 break;
842 }
843 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
844 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
845 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
846 properties->maxMultiviewViewCount = MAX_VIEWS;
847 properties->maxMultiviewInstanceIndex = INT_MAX;
848 break;
849 }
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
851 VkPhysicalDevicePointClippingPropertiesKHR *properties =
852 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
853 properties->pointClippingBehavior =
854 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
855 break;
856 }
857 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
858 VkPhysicalDeviceMaintenance3Properties *properties =
859 (VkPhysicalDeviceMaintenance3Properties *) ext;
860 /* Make sure everything is addressable by a signed 32-bit int, and
861 * our largest descriptors are 96 bytes. */
862 properties->maxPerSetDescriptors = (1ull << 31) / 96;
863 /* Our buffer size fields allow only this much */
864 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
865 break;
866 }
867 default:
868 break;
869 }
870 }
871 }
872
873 static const VkQueueFamilyProperties tu_queue_family_properties = {
874 .queueFlags =
875 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
876 .queueCount = 1,
877 .timestampValidBits = 64,
878 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
879 };
880
881 void
882 tu_GetPhysicalDeviceQueueFamilyProperties(
883 VkPhysicalDevice physicalDevice,
884 uint32_t *pQueueFamilyPropertyCount,
885 VkQueueFamilyProperties *pQueueFamilyProperties)
886 {
887 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
888
889 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
890 }
891
892 void
893 tu_GetPhysicalDeviceQueueFamilyProperties2(
894 VkPhysicalDevice physicalDevice,
895 uint32_t *pQueueFamilyPropertyCount,
896 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
897 {
898 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
899
900 vk_outarray_append(&out, p)
901 {
902 p->queueFamilyProperties = tu_queue_family_properties;
903 }
904 }
905
906 static uint64_t
907 tu_get_system_heap_size()
908 {
909 struct sysinfo info;
910 sysinfo(&info);
911
912 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
913
914 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
915 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
916 */
917 uint64_t available_ram;
918 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
919 available_ram = total_ram / 2;
920 else
921 available_ram = total_ram * 3 / 4;
922
923 return available_ram;
924 }
925
926 void
927 tu_GetPhysicalDeviceMemoryProperties(
928 VkPhysicalDevice physicalDevice,
929 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
930 {
931 pMemoryProperties->memoryHeapCount = 1;
932 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
933 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
934
935 pMemoryProperties->memoryTypeCount = 1;
936 pMemoryProperties->memoryTypes[0].propertyFlags =
937 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
938 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
939 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
940 pMemoryProperties->memoryTypes[0].heapIndex = 0;
941 }
942
943 void
944 tu_GetPhysicalDeviceMemoryProperties2(
945 VkPhysicalDevice physicalDevice,
946 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
947 {
948 return tu_GetPhysicalDeviceMemoryProperties(
949 physicalDevice, &pMemoryProperties->memoryProperties);
950 }
951
952 static VkResult
953 tu_queue_init(struct tu_device *device,
954 struct tu_queue *queue,
955 uint32_t queue_family_index,
956 int idx,
957 VkDeviceQueueCreateFlags flags)
958 {
959 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
960 queue->device = device;
961 queue->queue_family_index = queue_family_index;
962 queue->queue_idx = idx;
963 queue->flags = flags;
964
965 struct drm_msm_submitqueue req = {
966 .flags = 0,
967 .prio = 0,
968 };
969
970 int ret = drmCommandWriteRead(device->physical_device->local_fd,
971 DRM_MSM_SUBMITQUEUE_NEW,
972 &req, sizeof(req));
973 if (ret)
974 return VK_ERROR_INITIALIZATION_FAILED;
975
976 queue->msm_queue_id = req.id;
977 return VK_SUCCESS;
978 }
979
980 static void
981 tu_queue_finish(struct tu_queue *queue)
982 {
983 drmCommandWrite(queue->device->physical_device->local_fd,
984 DRM_MSM_SUBMITQUEUE_CLOSE,
985 &queue->msm_queue_id, sizeof(uint32_t));
986 }
987
988 static int
989 tu_get_device_extension_index(const char *name)
990 {
991 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
992 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
993 return i;
994 }
995 return -1;
996 }
997
998 VkResult
999 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1000 const VkDeviceCreateInfo *pCreateInfo,
1001 const VkAllocationCallbacks *pAllocator,
1002 VkDevice *pDevice)
1003 {
1004 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1005 VkResult result;
1006 struct tu_device *device;
1007
1008 /* Check enabled features */
1009 if (pCreateInfo->pEnabledFeatures) {
1010 VkPhysicalDeviceFeatures supported_features;
1011 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1012 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1013 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1014 unsigned num_features =
1015 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1016 for (uint32_t i = 0; i < num_features; i++) {
1017 if (enabled_feature[i] && !supported_feature[i])
1018 return vk_error(physical_device->instance,
1019 VK_ERROR_FEATURE_NOT_PRESENT);
1020 }
1021 }
1022
1023 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1024 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1025 if (!device)
1026 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1027
1028 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1029 device->instance = physical_device->instance;
1030 device->physical_device = physical_device;
1031
1032 if (pAllocator)
1033 device->alloc = *pAllocator;
1034 else
1035 device->alloc = physical_device->instance->alloc;
1036
1037 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1038 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1039 int index = tu_get_device_extension_index(ext_name);
1040 if (index < 0 ||
1041 !physical_device->supported_extensions.extensions[index]) {
1042 vk_free(&device->alloc, device);
1043 return vk_error(physical_device->instance,
1044 VK_ERROR_EXTENSION_NOT_PRESENT);
1045 }
1046
1047 device->enabled_extensions.extensions[index] = true;
1048 }
1049
1050 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1051 const VkDeviceQueueCreateInfo *queue_create =
1052 &pCreateInfo->pQueueCreateInfos[i];
1053 uint32_t qfi = queue_create->queueFamilyIndex;
1054 device->queues[qfi] = vk_alloc(
1055 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1056 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1057 if (!device->queues[qfi]) {
1058 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1059 goto fail;
1060 }
1061
1062 memset(device->queues[qfi], 0,
1063 queue_create->queueCount * sizeof(struct tu_queue));
1064
1065 device->queue_count[qfi] = queue_create->queueCount;
1066
1067 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1068 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1069 queue_create->flags);
1070 if (result != VK_SUCCESS)
1071 goto fail;
1072 }
1073 }
1074
1075 VkPipelineCacheCreateInfo ci;
1076 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1077 ci.pNext = NULL;
1078 ci.flags = 0;
1079 ci.pInitialData = NULL;
1080 ci.initialDataSize = 0;
1081 VkPipelineCache pc;
1082 result =
1083 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1084 if (result != VK_SUCCESS)
1085 goto fail;
1086
1087 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1088
1089 *pDevice = tu_device_to_handle(device);
1090 return VK_SUCCESS;
1091
1092 fail:
1093 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1094 for (unsigned q = 0; q < device->queue_count[i]; q++)
1095 tu_queue_finish(&device->queues[i][q]);
1096 if (device->queue_count[i])
1097 vk_free(&device->alloc, device->queues[i]);
1098 }
1099
1100 vk_free(&device->alloc, device);
1101 return result;
1102 }
1103
1104 void
1105 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1106 {
1107 TU_FROM_HANDLE(tu_device, device, _device);
1108
1109 if (!device)
1110 return;
1111
1112 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1113 for (unsigned q = 0; q < device->queue_count[i]; q++)
1114 tu_queue_finish(&device->queues[i][q]);
1115 if (device->queue_count[i])
1116 vk_free(&device->alloc, device->queues[i]);
1117 }
1118
1119 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1120 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1121
1122 vk_free(&device->alloc, device);
1123 }
1124
1125 VkResult
1126 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1127 VkLayerProperties *pProperties)
1128 {
1129 *pPropertyCount = 0;
1130 return VK_SUCCESS;
1131 }
1132
1133 VkResult
1134 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1135 uint32_t *pPropertyCount,
1136 VkLayerProperties *pProperties)
1137 {
1138 *pPropertyCount = 0;
1139 return VK_SUCCESS;
1140 }
1141
1142 void
1143 tu_GetDeviceQueue2(VkDevice _device,
1144 const VkDeviceQueueInfo2 *pQueueInfo,
1145 VkQueue *pQueue)
1146 {
1147 TU_FROM_HANDLE(tu_device, device, _device);
1148 struct tu_queue *queue;
1149
1150 queue =
1151 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1152 if (pQueueInfo->flags != queue->flags) {
1153 /* From the Vulkan 1.1.70 spec:
1154 *
1155 * "The queue returned by vkGetDeviceQueue2 must have the same
1156 * flags value from this structure as that used at device
1157 * creation time in a VkDeviceQueueCreateInfo instance. If no
1158 * matching flags were specified at device creation time then
1159 * pQueue will return VK_NULL_HANDLE."
1160 */
1161 *pQueue = VK_NULL_HANDLE;
1162 return;
1163 }
1164
1165 *pQueue = tu_queue_to_handle(queue);
1166 }
1167
1168 void
1169 tu_GetDeviceQueue(VkDevice _device,
1170 uint32_t queueFamilyIndex,
1171 uint32_t queueIndex,
1172 VkQueue *pQueue)
1173 {
1174 const VkDeviceQueueInfo2 info =
1175 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1176 .queueFamilyIndex = queueFamilyIndex,
1177 .queueIndex = queueIndex };
1178
1179 tu_GetDeviceQueue2(_device, &info, pQueue);
1180 }
1181
1182 VkResult
1183 tu_QueueSubmit(VkQueue _queue,
1184 uint32_t submitCount,
1185 const VkSubmitInfo *pSubmits,
1186 VkFence _fence)
1187 {
1188 return VK_SUCCESS;
1189 }
1190
1191 VkResult
1192 tu_QueueWaitIdle(VkQueue _queue)
1193 {
1194 return VK_SUCCESS;
1195 }
1196
1197 VkResult
1198 tu_DeviceWaitIdle(VkDevice _device)
1199 {
1200 TU_FROM_HANDLE(tu_device, device, _device);
1201
1202 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1203 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1204 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1205 }
1206 }
1207 return VK_SUCCESS;
1208 }
1209
1210 VkResult
1211 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1212 uint32_t *pPropertyCount,
1213 VkExtensionProperties *pProperties)
1214 {
1215 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1216
1217 /* We spport no lyaers */
1218 if (pLayerName)
1219 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1220
1221 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1222 if (tu_supported_instance_extensions.extensions[i]) {
1223 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1224 }
1225 }
1226
1227 return vk_outarray_status(&out);
1228 }
1229
1230 VkResult
1231 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1232 const char *pLayerName,
1233 uint32_t *pPropertyCount,
1234 VkExtensionProperties *pProperties)
1235 {
1236 /* We spport no lyaers */
1237 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1238 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1239
1240 /* We spport no lyaers */
1241 if (pLayerName)
1242 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1243
1244 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1245 if (device->supported_extensions.extensions[i]) {
1246 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1247 }
1248 }
1249
1250 return vk_outarray_status(&out);
1251 }
1252
1253 PFN_vkVoidFunction
1254 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1255 {
1256 TU_FROM_HANDLE(tu_instance, instance, _instance);
1257
1258 return tu_lookup_entrypoint_checked(
1259 pName, instance ? instance->api_version : 0,
1260 instance ? &instance->enabled_extensions : NULL, NULL);
1261 }
1262
1263 /* The loader wants us to expose a second GetInstanceProcAddr function
1264 * to work around certain LD_PRELOAD issues seen in apps.
1265 */
1266 PUBLIC
1267 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1268 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1269
1270 PUBLIC
1271 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1272 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1273 {
1274 return tu_GetInstanceProcAddr(instance, pName);
1275 }
1276
1277 PFN_vkVoidFunction
1278 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1279 {
1280 TU_FROM_HANDLE(tu_device, device, _device);
1281
1282 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1283 &device->instance->enabled_extensions,
1284 &device->enabled_extensions);
1285 }
1286
1287 static VkResult
1288 tu_alloc_memory(struct tu_device *device,
1289 const VkMemoryAllocateInfo *pAllocateInfo,
1290 const VkAllocationCallbacks *pAllocator,
1291 VkDeviceMemory *pMem)
1292 {
1293 struct tu_device_memory *mem;
1294 VkResult result;
1295
1296 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1297
1298 if (pAllocateInfo->allocationSize == 0) {
1299 /* Apparently, this is allowed */
1300 *pMem = VK_NULL_HANDLE;
1301 return VK_SUCCESS;
1302 }
1303
1304 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1305 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1306 if (mem == NULL)
1307 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1308
1309 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1310 if (result != VK_SUCCESS) {
1311 vk_free2(&device->alloc, pAllocator, mem);
1312 return result;
1313 }
1314
1315 mem->size = pAllocateInfo->allocationSize;
1316 mem->type_index = pAllocateInfo->memoryTypeIndex;
1317
1318 mem->map = NULL;
1319 mem->user_ptr = NULL;
1320
1321 *pMem = tu_device_memory_to_handle(mem);
1322
1323 return VK_SUCCESS;
1324 }
1325
1326 VkResult
1327 tu_AllocateMemory(VkDevice _device,
1328 const VkMemoryAllocateInfo *pAllocateInfo,
1329 const VkAllocationCallbacks *pAllocator,
1330 VkDeviceMemory *pMem)
1331 {
1332 TU_FROM_HANDLE(tu_device, device, _device);
1333 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1334 }
1335
1336 void
1337 tu_FreeMemory(VkDevice _device,
1338 VkDeviceMemory _mem,
1339 const VkAllocationCallbacks *pAllocator)
1340 {
1341 TU_FROM_HANDLE(tu_device, device, _device);
1342 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1343
1344 if (mem == NULL)
1345 return;
1346
1347 tu_bo_finish(device, &mem->bo);
1348 vk_free2(&device->alloc, pAllocator, mem);
1349 }
1350
1351 VkResult
1352 tu_MapMemory(VkDevice _device,
1353 VkDeviceMemory _memory,
1354 VkDeviceSize offset,
1355 VkDeviceSize size,
1356 VkMemoryMapFlags flags,
1357 void **ppData)
1358 {
1359 TU_FROM_HANDLE(tu_device, device, _device);
1360 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1361 VkResult result;
1362
1363 if (mem == NULL) {
1364 *ppData = NULL;
1365 return VK_SUCCESS;
1366 }
1367
1368 if (mem->user_ptr) {
1369 *ppData = mem->user_ptr;
1370 } else if (!mem->map) {
1371 result = tu_bo_map(device, &mem->bo);
1372 if (result != VK_SUCCESS)
1373 return result;
1374 *ppData = mem->map = mem->bo.map;
1375 } else
1376 *ppData = mem->map;
1377
1378 if (*ppData) {
1379 *ppData += offset;
1380 return VK_SUCCESS;
1381 }
1382
1383 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1384 }
1385
1386 void
1387 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1388 {
1389 /* I do not see any unmapping done by the freedreno Gallium driver. */
1390 }
1391
1392 VkResult
1393 tu_FlushMappedMemoryRanges(VkDevice _device,
1394 uint32_t memoryRangeCount,
1395 const VkMappedMemoryRange *pMemoryRanges)
1396 {
1397 return VK_SUCCESS;
1398 }
1399
1400 VkResult
1401 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1402 uint32_t memoryRangeCount,
1403 const VkMappedMemoryRange *pMemoryRanges)
1404 {
1405 return VK_SUCCESS;
1406 }
1407
1408 void
1409 tu_GetBufferMemoryRequirements(VkDevice _device,
1410 VkBuffer _buffer,
1411 VkMemoryRequirements *pMemoryRequirements)
1412 {
1413 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1414
1415 pMemoryRequirements->memoryTypeBits = 1;
1416 pMemoryRequirements->alignment = 16;
1417 pMemoryRequirements->size =
1418 align64(buffer->size, pMemoryRequirements->alignment);
1419 }
1420
1421 void
1422 tu_GetBufferMemoryRequirements2(
1423 VkDevice device,
1424 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1425 VkMemoryRequirements2KHR *pMemoryRequirements)
1426 {
1427 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1428 &pMemoryRequirements->memoryRequirements);
1429 }
1430
1431 void
1432 tu_GetImageMemoryRequirements(VkDevice _device,
1433 VkImage _image,
1434 VkMemoryRequirements *pMemoryRequirements)
1435 {
1436 TU_FROM_HANDLE(tu_image, image, _image);
1437
1438 pMemoryRequirements->memoryTypeBits = 1;
1439 pMemoryRequirements->size = image->size;
1440 pMemoryRequirements->alignment = image->alignment;
1441 }
1442
1443 void
1444 tu_GetImageMemoryRequirements2(VkDevice device,
1445 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1446 VkMemoryRequirements2KHR *pMemoryRequirements)
1447 {
1448 tu_GetImageMemoryRequirements(device, pInfo->image,
1449 &pMemoryRequirements->memoryRequirements);
1450 }
1451
1452 void
1453 tu_GetImageSparseMemoryRequirements(
1454 VkDevice device,
1455 VkImage image,
1456 uint32_t *pSparseMemoryRequirementCount,
1457 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1458 {
1459 tu_stub();
1460 }
1461
1462 void
1463 tu_GetImageSparseMemoryRequirements2(
1464 VkDevice device,
1465 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1466 uint32_t *pSparseMemoryRequirementCount,
1467 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1468 {
1469 tu_stub();
1470 }
1471
1472 void
1473 tu_GetDeviceMemoryCommitment(VkDevice device,
1474 VkDeviceMemory memory,
1475 VkDeviceSize *pCommittedMemoryInBytes)
1476 {
1477 *pCommittedMemoryInBytes = 0;
1478 }
1479
1480 VkResult
1481 tu_BindBufferMemory2(VkDevice device,
1482 uint32_t bindInfoCount,
1483 const VkBindBufferMemoryInfoKHR *pBindInfos)
1484 {
1485 return VK_SUCCESS;
1486 }
1487
1488 VkResult
1489 tu_BindBufferMemory(VkDevice device,
1490 VkBuffer buffer,
1491 VkDeviceMemory memory,
1492 VkDeviceSize memoryOffset)
1493 {
1494 const VkBindBufferMemoryInfoKHR info = {
1495 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1496 .buffer = buffer,
1497 .memory = memory,
1498 .memoryOffset = memoryOffset
1499 };
1500
1501 return tu_BindBufferMemory2(device, 1, &info);
1502 }
1503
1504 VkResult
1505 tu_BindImageMemory2(VkDevice device,
1506 uint32_t bindInfoCount,
1507 const VkBindImageMemoryInfoKHR *pBindInfos)
1508 {
1509 return VK_SUCCESS;
1510 }
1511
1512 VkResult
1513 tu_BindImageMemory(VkDevice device,
1514 VkImage image,
1515 VkDeviceMemory memory,
1516 VkDeviceSize memoryOffset)
1517 {
1518 const VkBindImageMemoryInfoKHR info = {
1519 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1520 .image = image,
1521 .memory = memory,
1522 .memoryOffset = memoryOffset
1523 };
1524
1525 return tu_BindImageMemory2(device, 1, &info);
1526 }
1527
1528 VkResult
1529 tu_QueueBindSparse(VkQueue _queue,
1530 uint32_t bindInfoCount,
1531 const VkBindSparseInfo *pBindInfo,
1532 VkFence _fence)
1533 {
1534 return VK_SUCCESS;
1535 }
1536
1537 VkResult
1538 tu_CreateFence(VkDevice _device,
1539 const VkFenceCreateInfo *pCreateInfo,
1540 const VkAllocationCallbacks *pAllocator,
1541 VkFence *pFence)
1542 {
1543 TU_FROM_HANDLE(tu_device, device, _device);
1544
1545 struct tu_fence *fence =
1546 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1547 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1548
1549 if (!fence)
1550 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1551
1552 *pFence = tu_fence_to_handle(fence);
1553
1554 return VK_SUCCESS;
1555 }
1556
1557 void
1558 tu_DestroyFence(VkDevice _device,
1559 VkFence _fence,
1560 const VkAllocationCallbacks *pAllocator)
1561 {
1562 TU_FROM_HANDLE(tu_device, device, _device);
1563 TU_FROM_HANDLE(tu_fence, fence, _fence);
1564
1565 if (!fence)
1566 return;
1567
1568 vk_free2(&device->alloc, pAllocator, fence);
1569 }
1570
1571 VkResult
1572 tu_WaitForFences(VkDevice _device,
1573 uint32_t fenceCount,
1574 const VkFence *pFences,
1575 VkBool32 waitAll,
1576 uint64_t timeout)
1577 {
1578 return VK_SUCCESS;
1579 }
1580
1581 VkResult
1582 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1583 {
1584 return VK_SUCCESS;
1585 }
1586
1587 VkResult
1588 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1589 {
1590 return VK_SUCCESS;
1591 }
1592
1593 // Queue semaphore functions
1594
1595 VkResult
1596 tu_CreateSemaphore(VkDevice _device,
1597 const VkSemaphoreCreateInfo *pCreateInfo,
1598 const VkAllocationCallbacks *pAllocator,
1599 VkSemaphore *pSemaphore)
1600 {
1601 TU_FROM_HANDLE(tu_device, device, _device);
1602
1603 struct tu_semaphore *sem =
1604 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1605 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1606 if (!sem)
1607 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1608
1609 *pSemaphore = tu_semaphore_to_handle(sem);
1610 return VK_SUCCESS;
1611 }
1612
1613 void
1614 tu_DestroySemaphore(VkDevice _device,
1615 VkSemaphore _semaphore,
1616 const VkAllocationCallbacks *pAllocator)
1617 {
1618 TU_FROM_HANDLE(tu_device, device, _device);
1619 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1620 if (!_semaphore)
1621 return;
1622
1623 vk_free2(&device->alloc, pAllocator, sem);
1624 }
1625
1626 VkResult
1627 tu_CreateEvent(VkDevice _device,
1628 const VkEventCreateInfo *pCreateInfo,
1629 const VkAllocationCallbacks *pAllocator,
1630 VkEvent *pEvent)
1631 {
1632 TU_FROM_HANDLE(tu_device, device, _device);
1633 struct tu_event *event =
1634 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1635 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1636
1637 if (!event)
1638 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1639
1640 *pEvent = tu_event_to_handle(event);
1641
1642 return VK_SUCCESS;
1643 }
1644
1645 void
1646 tu_DestroyEvent(VkDevice _device,
1647 VkEvent _event,
1648 const VkAllocationCallbacks *pAllocator)
1649 {
1650 TU_FROM_HANDLE(tu_device, device, _device);
1651 TU_FROM_HANDLE(tu_event, event, _event);
1652
1653 if (!event)
1654 return;
1655 vk_free2(&device->alloc, pAllocator, event);
1656 }
1657
1658 VkResult
1659 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1660 {
1661 TU_FROM_HANDLE(tu_event, event, _event);
1662
1663 if (*event->map == 1)
1664 return VK_EVENT_SET;
1665 return VK_EVENT_RESET;
1666 }
1667
1668 VkResult
1669 tu_SetEvent(VkDevice _device, VkEvent _event)
1670 {
1671 TU_FROM_HANDLE(tu_event, event, _event);
1672 *event->map = 1;
1673
1674 return VK_SUCCESS;
1675 }
1676
1677 VkResult
1678 tu_ResetEvent(VkDevice _device, VkEvent _event)
1679 {
1680 TU_FROM_HANDLE(tu_event, event, _event);
1681 *event->map = 0;
1682
1683 return VK_SUCCESS;
1684 }
1685
1686 VkResult
1687 tu_CreateBuffer(VkDevice _device,
1688 const VkBufferCreateInfo *pCreateInfo,
1689 const VkAllocationCallbacks *pAllocator,
1690 VkBuffer *pBuffer)
1691 {
1692 TU_FROM_HANDLE(tu_device, device, _device);
1693 struct tu_buffer *buffer;
1694
1695 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1696
1697 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1698 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1699 if (buffer == NULL)
1700 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1701
1702 buffer->size = pCreateInfo->size;
1703 buffer->usage = pCreateInfo->usage;
1704 buffer->flags = pCreateInfo->flags;
1705
1706 *pBuffer = tu_buffer_to_handle(buffer);
1707
1708 return VK_SUCCESS;
1709 }
1710
1711 void
1712 tu_DestroyBuffer(VkDevice _device,
1713 VkBuffer _buffer,
1714 const VkAllocationCallbacks *pAllocator)
1715 {
1716 TU_FROM_HANDLE(tu_device, device, _device);
1717 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1718
1719 if (!buffer)
1720 return;
1721
1722 vk_free2(&device->alloc, pAllocator, buffer);
1723 }
1724
1725 static uint32_t
1726 tu_surface_max_layer_count(struct tu_image_view *iview)
1727 {
1728 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1729 ? iview->extent.depth
1730 : (iview->base_layer + iview->layer_count);
1731 }
1732
1733 VkResult
1734 tu_CreateFramebuffer(VkDevice _device,
1735 const VkFramebufferCreateInfo *pCreateInfo,
1736 const VkAllocationCallbacks *pAllocator,
1737 VkFramebuffer *pFramebuffer)
1738 {
1739 TU_FROM_HANDLE(tu_device, device, _device);
1740 struct tu_framebuffer *framebuffer;
1741
1742 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1743
1744 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1745 pCreateInfo->attachmentCount;
1746 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1747 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1748 if (framebuffer == NULL)
1749 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1750
1751 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1752 framebuffer->width = pCreateInfo->width;
1753 framebuffer->height = pCreateInfo->height;
1754 framebuffer->layers = pCreateInfo->layers;
1755 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1756 VkImageView _iview = pCreateInfo->pAttachments[i];
1757 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1758 framebuffer->attachments[i].attachment = iview;
1759
1760 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1761 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1762 framebuffer->layers =
1763 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1764 }
1765
1766 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1767 return VK_SUCCESS;
1768 }
1769
1770 void
1771 tu_DestroyFramebuffer(VkDevice _device,
1772 VkFramebuffer _fb,
1773 const VkAllocationCallbacks *pAllocator)
1774 {
1775 TU_FROM_HANDLE(tu_device, device, _device);
1776 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1777
1778 if (!fb)
1779 return;
1780 vk_free2(&device->alloc, pAllocator, fb);
1781 }
1782
1783 static void
1784 tu_init_sampler(struct tu_device *device,
1785 struct tu_sampler *sampler,
1786 const VkSamplerCreateInfo *pCreateInfo)
1787 {
1788 }
1789
1790 VkResult
1791 tu_CreateSampler(VkDevice _device,
1792 const VkSamplerCreateInfo *pCreateInfo,
1793 const VkAllocationCallbacks *pAllocator,
1794 VkSampler *pSampler)
1795 {
1796 TU_FROM_HANDLE(tu_device, device, _device);
1797 struct tu_sampler *sampler;
1798
1799 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1800
1801 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1802 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1803 if (!sampler)
1804 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1805
1806 tu_init_sampler(device, sampler, pCreateInfo);
1807 *pSampler = tu_sampler_to_handle(sampler);
1808
1809 return VK_SUCCESS;
1810 }
1811
1812 void
1813 tu_DestroySampler(VkDevice _device,
1814 VkSampler _sampler,
1815 const VkAllocationCallbacks *pAllocator)
1816 {
1817 TU_FROM_HANDLE(tu_device, device, _device);
1818 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1819
1820 if (!sampler)
1821 return;
1822 vk_free2(&device->alloc, pAllocator, sampler);
1823 }
1824
1825 /* vk_icd.h does not declare this function, so we declare it here to
1826 * suppress Wmissing-prototypes.
1827 */
1828 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1829 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1830
1831 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1832 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1833 {
1834 /* For the full details on loader interface versioning, see
1835 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1836 * What follows is a condensed summary, to help you navigate the large and
1837 * confusing official doc.
1838 *
1839 * - Loader interface v0 is incompatible with later versions. We don't
1840 * support it.
1841 *
1842 * - In loader interface v1:
1843 * - The first ICD entrypoint called by the loader is
1844 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1845 * entrypoint.
1846 * - The ICD must statically expose no other Vulkan symbol unless it
1847 * is linked with -Bsymbolic.
1848 * - Each dispatchable Vulkan handle created by the ICD must be
1849 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1850 * ICD must initialize VK_LOADER_DATA.loadMagic to
1851 * ICD_LOADER_MAGIC.
1852 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1853 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1854 * such loader-managed surfaces.
1855 *
1856 * - Loader interface v2 differs from v1 in:
1857 * - The first ICD entrypoint called by the loader is
1858 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1859 * statically expose this entrypoint.
1860 *
1861 * - Loader interface v3 differs from v2 in:
1862 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1863 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1864 * because the loader no longer does so.
1865 */
1866 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1867 return VK_SUCCESS;
1868 }
1869
1870 void
1871 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1872 VkPhysicalDevice physicalDevice,
1873 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1874 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1875 {
1876 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1877 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1878 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1879 }
1880
1881 void
1882 tu_GetPhysicalDeviceExternalFenceProperties(
1883 VkPhysicalDevice physicalDevice,
1884 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1885 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1886 {
1887 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1888 pExternalFenceProperties->compatibleHandleTypes = 0;
1889 pExternalFenceProperties->externalFenceFeatures = 0;
1890 }
1891
1892 VkResult
1893 tu_CreateDebugReportCallbackEXT(
1894 VkInstance _instance,
1895 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1896 const VkAllocationCallbacks *pAllocator,
1897 VkDebugReportCallbackEXT *pCallback)
1898 {
1899 TU_FROM_HANDLE(tu_instance, instance, _instance);
1900 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1901 pCreateInfo, pAllocator,
1902 &instance->alloc, pCallback);
1903 }
1904
1905 void
1906 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1907 VkDebugReportCallbackEXT _callback,
1908 const VkAllocationCallbacks *pAllocator)
1909 {
1910 TU_FROM_HANDLE(tu_instance, instance, _instance);
1911 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1912 _callback, pAllocator, &instance->alloc);
1913 }
1914
1915 void
1916 tu_DebugReportMessageEXT(VkInstance _instance,
1917 VkDebugReportFlagsEXT flags,
1918 VkDebugReportObjectTypeEXT objectType,
1919 uint64_t object,
1920 size_t location,
1921 int32_t messageCode,
1922 const char *pLayerPrefix,
1923 const char *pMessage)
1924 {
1925 TU_FROM_HANDLE(tu_instance, instance, _instance);
1926 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1927 object, location, messageCode, pLayerPrefix, pMessage);
1928 }
1929
1930 void
1931 tu_GetDeviceGroupPeerMemoryFeatures(
1932 VkDevice device,
1933 uint32_t heapIndex,
1934 uint32_t localDeviceIndex,
1935 uint32_t remoteDeviceIndex,
1936 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1937 {
1938 assert(localDeviceIndex == remoteDeviceIndex);
1939
1940 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1941 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1942 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1943 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1944 }