turnip: Replace fd_bo with tu_bo
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/sysinfo.h>
39 #include <unistd.h>
40 #include <xf86drm.h>
41 #include <msm_drm.h>
42
43 static int
44 tu_device_get_cache_uuid(uint16_t family, void *uuid)
45 {
46 uint32_t mesa_timestamp;
47 uint16_t f = family;
48 memset(uuid, 0, VK_UUID_SIZE);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
50 &mesa_timestamp))
51 return -1;
52
53 memcpy(uuid, &mesa_timestamp, 4);
54 memcpy((char *)uuid + 4, &f, 2);
55 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
56 return 0;
57 }
58
59 static void
60 tu_get_driver_uuid(void *uuid)
61 {
62 memset(uuid, 0, VK_UUID_SIZE);
63 }
64
65 static void
66 tu_get_device_uuid(void *uuid)
67 {
68 stub();
69 }
70
71 VkResult
72 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
73 {
74 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
75 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
76 */
77 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
78 if (!gem_handle)
79 goto fail_new;
80
81 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
82 * want immediate backing pages because vkAllocateMemory and friends must
83 * not lazily fail.
84 *
85 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
86 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
87 * maybe I misunderstand.
88 */
89
90 /* TODO: Do we need 'offset' if we have 'iova'? */
91 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
92 if (!offset)
93 goto fail_info;
94
95 uint64_t iova = tu_gem_info_iova(dev, bo->gem_handle);
96 if (!iova)
97 goto fail_info;
98
99 *bo = (struct tu_bo) {
100 .gem_handle = gem_handle,
101 .size = size,
102 .offset = offset,
103 .iova = iova,
104 };
105
106 return VK_SUCCESS;
107
108 fail_info:
109 tu_gem_close(dev, bo->gem_handle);
110 fail_new:
111 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
112 }
113
114 VkResult
115 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
116 {
117 if (bo->map)
118 return VK_SUCCESS;
119
120 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
121 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
122 dev->physical_device->local_fd, bo->offset);
123 if (map == MAP_FAILED)
124 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
125
126 return VK_SUCCESS;
127 }
128
129 void
130 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
131 {
132 assert(bo->gem_handle);
133
134 if (bo->map)
135 munmap(bo->map, bo->size);
136
137 tu_gem_close(dev, bo->gem_handle);
138 }
139
140 static VkResult
141 tu_physical_device_init(struct tu_physical_device *device,
142 struct tu_instance *instance,
143 drmDevicePtr drm_device)
144 {
145 const char *path = drm_device->nodes[DRM_NODE_RENDER];
146 VkResult result = VK_SUCCESS;
147 drmVersionPtr version;
148 int fd;
149 int master_fd = -1;
150 struct fd_pipe *tmp_pipe = NULL;
151 uint64_t val;
152
153 fd = open(path, O_RDWR | O_CLOEXEC);
154 if (fd < 0) {
155 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
156 "failed to open device %s", path);
157 }
158
159 /* Version 1.3 added MSM_INFO_IOVA. */
160 const int min_version_major = 1;
161 const int min_version_minor = 3;
162
163 version = drmGetVersion(fd);
164 if (!version) {
165 close(fd);
166 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
167 "failed to query kernel driver version for device %s",
168 path);
169 }
170
171 if (strcmp(version->name, "msm")) {
172 drmFreeVersion(version);
173 if (master_fd != -1)
174 close(master_fd);
175 close(fd);
176 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
177 "device %s does not use the msm kernel driver", path);
178 }
179
180 if (version->version_major != 1 || version->version_minor < 3) {
181 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
182 "kernel driver for device %s has version %d.%d, "
183 "but Vulkan requires version >= %d.%d",
184 path,
185 version->version_major, version->version_minor,
186 min_version_major, min_version_minor);
187 drmFreeVersion(version);
188 close(fd);
189 return result;
190 }
191
192 drmFreeVersion(version);
193
194 if (instance->debug_flags & TU_DEBUG_STARTUP)
195 tu_logi("Found compatible device '%s'.", path);
196
197 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
198 device->instance = instance;
199 assert(strlen(path) < ARRAY_SIZE(device->path));
200 strncpy(device->path, path, ARRAY_SIZE(device->path));
201
202 if (instance->enabled_extensions.KHR_display) {
203 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
204 if (master_fd >= 0) {
205 /* TODO: free master_fd is accel is not working? */
206 }
207 }
208
209 device->master_fd = master_fd;
210 device->local_fd = fd;
211
212 device->drm_device = fd_device_new_dup(fd);
213 if (!device->drm_device) {
214 result = vk_errorf(
215 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
216 goto fail;
217 }
218
219 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
220 if (!tmp_pipe) {
221 result = vk_errorf(
222 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
223 goto fail;
224 }
225
226 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
227 result = vk_errorf(
228 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
229 goto fail;
230 }
231 device->gpu_id = val;
232
233 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
234 result = vk_errorf(
235 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
236 goto fail;
237 }
238 device->gmem_size = val;
239
240 fd_pipe_del(tmp_pipe);
241 tmp_pipe = NULL;
242
243 memset(device->name, 0, sizeof(device->name));
244 sprintf(device->name, "FD%d", device->gpu_id);
245
246 switch(device->gpu_id) {
247 case 530:
248 break;
249 default:
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "device %s is unsupported", device->name);
252 goto fail;
253 }
254 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
255 result = vk_errorf(
256 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
257 goto fail;
258 }
259
260 /* The gpu id is already embedded in the uuid so we just pass "tu"
261 * when creating the cache.
262 */
263 char buf[VK_UUID_SIZE * 2 + 1];
264 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
265 device->disk_cache = disk_cache_create(device->name, buf, 0);
266
267 fprintf(stderr,
268 "WARNING: tu is not a conformant vulkan implementation, "
269 "testing use only.\n");
270
271 tu_get_driver_uuid(&device->device_uuid);
272 tu_get_device_uuid(&device->device_uuid);
273
274 tu_fill_device_extension_table(device, &device->supported_extensions);
275
276 if (result != VK_SUCCESS) {
277 vk_error(instance, result);
278 goto fail;
279 }
280
281 return VK_SUCCESS;
282
283 fail:
284 if (tmp_pipe)
285 fd_pipe_del(tmp_pipe);
286 if (device->drm_device)
287 fd_device_del(device->drm_device);
288 close(fd);
289 if (master_fd != -1)
290 close(master_fd);
291 return result;
292 }
293
294 static void
295 tu_physical_device_finish(struct tu_physical_device *device)
296 {
297 disk_cache_destroy(device->disk_cache);
298 close(device->local_fd);
299 if (device->master_fd != -1)
300 close(device->master_fd);
301 }
302
303 static void *
304 default_alloc_func(void *pUserData,
305 size_t size,
306 size_t align,
307 VkSystemAllocationScope allocationScope)
308 {
309 return malloc(size);
310 }
311
312 static void *
313 default_realloc_func(void *pUserData,
314 void *pOriginal,
315 size_t size,
316 size_t align,
317 VkSystemAllocationScope allocationScope)
318 {
319 return realloc(pOriginal, size);
320 }
321
322 static void
323 default_free_func(void *pUserData, void *pMemory)
324 {
325 free(pMemory);
326 }
327
328 static const VkAllocationCallbacks default_alloc = {
329 .pUserData = NULL,
330 .pfnAllocation = default_alloc_func,
331 .pfnReallocation = default_realloc_func,
332 .pfnFree = default_free_func,
333 };
334
335 static const struct debug_control tu_debug_options[] = { { "startup",
336 TU_DEBUG_STARTUP },
337 { NULL, 0 } };
338
339 const char *
340 tu_get_debug_option_name(int id)
341 {
342 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
343 return tu_debug_options[id].string;
344 }
345
346 static int
347 tu_get_instance_extension_index(const char *name)
348 {
349 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
350 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
351 return i;
352 }
353 return -1;
354 }
355
356 VkResult
357 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
358 const VkAllocationCallbacks *pAllocator,
359 VkInstance *pInstance)
360 {
361 struct tu_instance *instance;
362 VkResult result;
363
364 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
365
366 uint32_t client_version;
367 if (pCreateInfo->pApplicationInfo &&
368 pCreateInfo->pApplicationInfo->apiVersion != 0) {
369 client_version = pCreateInfo->pApplicationInfo->apiVersion;
370 } else {
371 tu_EnumerateInstanceVersion(&client_version);
372 }
373
374 instance = vk_zalloc2(&default_alloc,
375 pAllocator,
376 sizeof(*instance),
377 8,
378 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
379 if (!instance)
380 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
381
382 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
383
384 if (pAllocator)
385 instance->alloc = *pAllocator;
386 else
387 instance->alloc = default_alloc;
388
389 instance->api_version = client_version;
390 instance->physical_device_count = -1;
391
392 instance->debug_flags =
393 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
394
395 if (instance->debug_flags & TU_DEBUG_STARTUP)
396 tu_logi("Created an instance");
397
398 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
399 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
400 int index = tu_get_instance_extension_index(ext_name);
401
402 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
403 vk_free2(&default_alloc, pAllocator, instance);
404 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
405 }
406
407 instance->enabled_extensions.extensions[index] = true;
408 }
409
410 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
411 if (result != VK_SUCCESS) {
412 vk_free2(&default_alloc, pAllocator, instance);
413 return vk_error(instance, result);
414 }
415
416 _mesa_locale_init();
417
418 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
419
420 *pInstance = tu_instance_to_handle(instance);
421
422 return VK_SUCCESS;
423 }
424
425 void
426 tu_DestroyInstance(VkInstance _instance,
427 const VkAllocationCallbacks *pAllocator)
428 {
429 TU_FROM_HANDLE(tu_instance, instance, _instance);
430
431 if (!instance)
432 return;
433
434 for (int i = 0; i < instance->physical_device_count; ++i) {
435 tu_physical_device_finish(instance->physical_devices + i);
436 }
437
438 VG(VALGRIND_DESTROY_MEMPOOL(instance));
439
440 _mesa_locale_fini();
441
442 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
443
444 vk_free(&instance->alloc, instance);
445 }
446
447 static VkResult
448 tu_enumerate_devices(struct tu_instance *instance)
449 {
450 /* TODO: Check for more devices ? */
451 drmDevicePtr devices[8];
452 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
453 int max_devices;
454
455 instance->physical_device_count = 0;
456
457 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
458
459 if (instance->debug_flags & TU_DEBUG_STARTUP)
460 tu_logi("Found %d drm nodes", max_devices);
461
462 if (max_devices < 1)
463 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
464
465 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
466 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
467 devices[i]->bustype == DRM_BUS_PLATFORM) {
468
469 result = tu_physical_device_init(instance->physical_devices +
470 instance->physical_device_count,
471 instance,
472 devices[i]);
473 if (result == VK_SUCCESS)
474 ++instance->physical_device_count;
475 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
476 break;
477 }
478 }
479 drmFreeDevices(devices, max_devices);
480
481 return result;
482 }
483
484 VkResult
485 tu_EnumeratePhysicalDevices(VkInstance _instance,
486 uint32_t *pPhysicalDeviceCount,
487 VkPhysicalDevice *pPhysicalDevices)
488 {
489 TU_FROM_HANDLE(tu_instance, instance, _instance);
490 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
491
492 VkResult result;
493
494 if (instance->physical_device_count < 0) {
495 result = tu_enumerate_devices(instance);
496 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
497 return result;
498 }
499
500 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
501 vk_outarray_append(&out, p) {
502 *p = tu_physical_device_to_handle(instance->physical_devices + i);
503 }
504
505 }
506
507 return vk_outarray_status(&out);
508 }
509
510 VkResult
511 tu_EnumeratePhysicalDeviceGroups(
512 VkInstance _instance,
513 uint32_t *pPhysicalDeviceGroupCount,
514 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
515 {
516 TU_FROM_HANDLE(tu_instance, instance, _instance);
517 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount);
518 VkResult result;
519
520 if (instance->physical_device_count < 0) {
521 result = tu_enumerate_devices(instance);
522 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
523 return result;
524 }
525
526 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
527 vk_outarray_append(&out, p) {
528 p->physicalDeviceCount = 1;
529 p->physicalDevices[0] =
530 tu_physical_device_to_handle(instance->physical_devices + i);
531 p->subsetAllocation = false;
532 }
533 }
534
535 return vk_outarray_status(&out);
536 }
537
538 void
539 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
540 VkPhysicalDeviceFeatures *pFeatures)
541 {
542 memset(pFeatures, 0, sizeof(*pFeatures));
543
544 *pFeatures = (VkPhysicalDeviceFeatures){
545 .robustBufferAccess = false,
546 .fullDrawIndexUint32 = false,
547 .imageCubeArray = false,
548 .independentBlend = false,
549 .geometryShader = false,
550 .tessellationShader = false,
551 .sampleRateShading = false,
552 .dualSrcBlend = false,
553 .logicOp = false,
554 .multiDrawIndirect = false,
555 .drawIndirectFirstInstance = false,
556 .depthClamp = false,
557 .depthBiasClamp = false,
558 .fillModeNonSolid = false,
559 .depthBounds = false,
560 .wideLines = false,
561 .largePoints = false,
562 .alphaToOne = false,
563 .multiViewport = false,
564 .samplerAnisotropy = false,
565 .textureCompressionETC2 = false,
566 .textureCompressionASTC_LDR = false,
567 .textureCompressionBC = false,
568 .occlusionQueryPrecise = false,
569 .pipelineStatisticsQuery = false,
570 .vertexPipelineStoresAndAtomics = false,
571 .fragmentStoresAndAtomics = false,
572 .shaderTessellationAndGeometryPointSize = false,
573 .shaderImageGatherExtended = false,
574 .shaderStorageImageExtendedFormats = false,
575 .shaderStorageImageMultisample = false,
576 .shaderUniformBufferArrayDynamicIndexing = false,
577 .shaderSampledImageArrayDynamicIndexing = false,
578 .shaderStorageBufferArrayDynamicIndexing = false,
579 .shaderStorageImageArrayDynamicIndexing = false,
580 .shaderStorageImageReadWithoutFormat = false,
581 .shaderStorageImageWriteWithoutFormat = false,
582 .shaderClipDistance = false,
583 .shaderCullDistance = false,
584 .shaderFloat64 = false,
585 .shaderInt64 = false,
586 .shaderInt16 = false,
587 .sparseBinding = false,
588 .variableMultisampleRate = false,
589 .inheritedQueries = false,
590 };
591 }
592
593 void
594 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
595 VkPhysicalDeviceFeatures2KHR *pFeatures)
596 {
597 vk_foreach_struct(ext, pFeatures->pNext)
598 {
599 switch (ext->sType) {
600 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
601 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
602 features->variablePointersStorageBuffer = false;
603 features->variablePointers = false;
604 break;
605 }
606 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
607 VkPhysicalDeviceMultiviewFeaturesKHR *features =
608 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
609 features->multiview = false;
610 features->multiviewGeometryShader = false;
611 features->multiviewTessellationShader = false;
612 break;
613 }
614 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
615 VkPhysicalDeviceShaderDrawParameterFeatures *features =
616 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
617 features->shaderDrawParameters = false;
618 break;
619 }
620 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
621 VkPhysicalDeviceProtectedMemoryFeatures *features =
622 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
623 features->protectedMemory = false;
624 break;
625 }
626 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
627 VkPhysicalDevice16BitStorageFeatures *features =
628 (VkPhysicalDevice16BitStorageFeatures *)ext;
629 features->storageBuffer16BitAccess = false;
630 features->uniformAndStorageBuffer16BitAccess = false;
631 features->storagePushConstant16 = false;
632 features->storageInputOutput16 = false;
633 break;
634 }
635 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
636 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
637 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
638 features->samplerYcbcrConversion = false;
639 break;
640 }
641 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
642 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
643 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
644 features->shaderInputAttachmentArrayDynamicIndexing = false;
645 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
646 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
647 features->shaderUniformBufferArrayNonUniformIndexing = false;
648 features->shaderSampledImageArrayNonUniformIndexing = false;
649 features->shaderStorageBufferArrayNonUniformIndexing = false;
650 features->shaderStorageImageArrayNonUniformIndexing = false;
651 features->shaderInputAttachmentArrayNonUniformIndexing = false;
652 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
653 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
654 features->descriptorBindingUniformBufferUpdateAfterBind = false;
655 features->descriptorBindingSampledImageUpdateAfterBind = false;
656 features->descriptorBindingStorageImageUpdateAfterBind = false;
657 features->descriptorBindingStorageBufferUpdateAfterBind = false;
658 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
659 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
660 features->descriptorBindingUpdateUnusedWhilePending = false;
661 features->descriptorBindingPartiallyBound = false;
662 features->descriptorBindingVariableDescriptorCount = false;
663 features->runtimeDescriptorArray = false;
664 break;
665 }
666 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
667 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
668 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
669 features->conditionalRendering = false;
670 features->inheritedConditionalRendering = false;
671 break;
672 }
673 default:
674 break;
675 }
676 }
677 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
678 }
679
680 void
681 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
682 VkPhysicalDeviceProperties *pProperties)
683 {
684 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
685 VkSampleCountFlags sample_counts = 0xf;
686
687 /* make sure that the entire descriptor set is addressable with a signed
688 * 32-bit int. So the sum of all limits scaled by descriptor size has to
689 * be at most 2 GiB. the combined image & samples object count as one of
690 * both. This limit is for the pipeline layout, not for the set layout, but
691 * there is no set limit, so we just set a pipeline limit. I don't think
692 * any app is going to hit this soon. */
693 size_t max_descriptor_set_size =
694 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
695 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
696 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
697 32 /* sampler, largest when combined with image */ +
698 64 /* sampled image */ + 64 /* storage image */);
699
700 VkPhysicalDeviceLimits limits = {
701 .maxImageDimension1D = (1 << 14),
702 .maxImageDimension2D = (1 << 14),
703 .maxImageDimension3D = (1 << 11),
704 .maxImageDimensionCube = (1 << 14),
705 .maxImageArrayLayers = (1 << 11),
706 .maxTexelBufferElements = 128 * 1024 * 1024,
707 .maxUniformBufferRange = UINT32_MAX,
708 .maxStorageBufferRange = UINT32_MAX,
709 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
710 .maxMemoryAllocationCount = UINT32_MAX,
711 .maxSamplerAllocationCount = 64 * 1024,
712 .bufferImageGranularity = 64, /* A cache line */
713 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
714 .maxBoundDescriptorSets = MAX_SETS,
715 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
716 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
717 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
718 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
719 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
720 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
721 .maxPerStageResources = max_descriptor_set_size,
722 .maxDescriptorSetSamplers = max_descriptor_set_size,
723 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
724 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
725 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
726 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
727 .maxDescriptorSetSampledImages = max_descriptor_set_size,
728 .maxDescriptorSetStorageImages = max_descriptor_set_size,
729 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
730 .maxVertexInputAttributes = 32,
731 .maxVertexInputBindings = 32,
732 .maxVertexInputAttributeOffset = 2047,
733 .maxVertexInputBindingStride = 2048,
734 .maxVertexOutputComponents = 128,
735 .maxTessellationGenerationLevel = 64,
736 .maxTessellationPatchSize = 32,
737 .maxTessellationControlPerVertexInputComponents = 128,
738 .maxTessellationControlPerVertexOutputComponents = 128,
739 .maxTessellationControlPerPatchOutputComponents = 120,
740 .maxTessellationControlTotalOutputComponents = 4096,
741 .maxTessellationEvaluationInputComponents = 128,
742 .maxTessellationEvaluationOutputComponents = 128,
743 .maxGeometryShaderInvocations = 127,
744 .maxGeometryInputComponents = 64,
745 .maxGeometryOutputComponents = 128,
746 .maxGeometryOutputVertices = 256,
747 .maxGeometryTotalOutputComponents = 1024,
748 .maxFragmentInputComponents = 128,
749 .maxFragmentOutputAttachments = 8,
750 .maxFragmentDualSrcAttachments = 1,
751 .maxFragmentCombinedOutputResources = 8,
752 .maxComputeSharedMemorySize = 32768,
753 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
754 .maxComputeWorkGroupInvocations = 2048,
755 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
756 .subPixelPrecisionBits = 4 /* FIXME */,
757 .subTexelPrecisionBits = 4 /* FIXME */,
758 .mipmapPrecisionBits = 4 /* FIXME */,
759 .maxDrawIndexedIndexValue = UINT32_MAX,
760 .maxDrawIndirectCount = UINT32_MAX,
761 .maxSamplerLodBias = 16,
762 .maxSamplerAnisotropy = 16,
763 .maxViewports = MAX_VIEWPORTS,
764 .maxViewportDimensions = { (1 << 14), (1 << 14) },
765 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
766 .viewportSubPixelBits = 8,
767 .minMemoryMapAlignment = 4096, /* A page */
768 .minTexelBufferOffsetAlignment = 1,
769 .minUniformBufferOffsetAlignment = 4,
770 .minStorageBufferOffsetAlignment = 4,
771 .minTexelOffset = -32,
772 .maxTexelOffset = 31,
773 .minTexelGatherOffset = -32,
774 .maxTexelGatherOffset = 31,
775 .minInterpolationOffset = -2,
776 .maxInterpolationOffset = 2,
777 .subPixelInterpolationOffsetBits = 8,
778 .maxFramebufferWidth = (1 << 14),
779 .maxFramebufferHeight = (1 << 14),
780 .maxFramebufferLayers = (1 << 10),
781 .framebufferColorSampleCounts = sample_counts,
782 .framebufferDepthSampleCounts = sample_counts,
783 .framebufferStencilSampleCounts = sample_counts,
784 .framebufferNoAttachmentsSampleCounts = sample_counts,
785 .maxColorAttachments = MAX_RTS,
786 .sampledImageColorSampleCounts = sample_counts,
787 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
788 .sampledImageDepthSampleCounts = sample_counts,
789 .sampledImageStencilSampleCounts = sample_counts,
790 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
791 .maxSampleMaskWords = 1,
792 .timestampComputeAndGraphics = true,
793 .timestampPeriod = 1,
794 .maxClipDistances = 8,
795 .maxCullDistances = 8,
796 .maxCombinedClipAndCullDistances = 8,
797 .discreteQueuePriorities = 1,
798 .pointSizeRange = { 0.125, 255.875 },
799 .lineWidthRange = { 0.0, 7.9921875 },
800 .pointSizeGranularity = (1.0 / 8.0),
801 .lineWidthGranularity = (1.0 / 128.0),
802 .strictLines = false, /* FINISHME */
803 .standardSampleLocations = true,
804 .optimalBufferCopyOffsetAlignment = 128,
805 .optimalBufferCopyRowPitchAlignment = 128,
806 .nonCoherentAtomSize = 64,
807 };
808
809 *pProperties = (VkPhysicalDeviceProperties){
810 .apiVersion = tu_physical_device_api_version(pdevice),
811 .driverVersion = vk_get_driver_version(),
812 .vendorID = 0, /* TODO */
813 .deviceID = 0,
814 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
815 .limits = limits,
816 .sparseProperties = { 0 },
817 };
818
819 strcpy(pProperties->deviceName, pdevice->name);
820 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
821 }
822
823 void
824 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
825 VkPhysicalDeviceProperties2KHR *pProperties)
826 {
827 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
828 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
829
830 vk_foreach_struct(ext, pProperties->pNext)
831 {
832 switch (ext->sType) {
833 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
834 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
835 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
836 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
837 break;
838 }
839 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
840 VkPhysicalDeviceIDPropertiesKHR *properties =
841 (VkPhysicalDeviceIDPropertiesKHR *)ext;
842 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
843 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
844 properties->deviceLUIDValid = false;
845 break;
846 }
847 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
848 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
849 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
850 properties->maxMultiviewViewCount = MAX_VIEWS;
851 properties->maxMultiviewInstanceIndex = INT_MAX;
852 break;
853 }
854 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
855 VkPhysicalDevicePointClippingPropertiesKHR *properties =
856 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
857 properties->pointClippingBehavior =
858 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
859 break;
860 }
861 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
862 VkPhysicalDeviceMaintenance3Properties *properties =
863 (VkPhysicalDeviceMaintenance3Properties *)ext;
864 /* Make sure everything is addressable by a signed 32-bit int, and
865 * our largest descriptors are 96 bytes. */
866 properties->maxPerSetDescriptors = (1ull << 31) / 96;
867 /* Our buffer size fields allow only this much */
868 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
869 break;
870 }
871 default:
872 break;
873 }
874 }
875 }
876
877 static const VkQueueFamilyProperties
878 tu_queue_family_properties = {
879 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
880 VK_QUEUE_COMPUTE_BIT |
881 VK_QUEUE_TRANSFER_BIT,
882 .queueCount = 1,
883 .timestampValidBits = 64,
884 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
885 };
886
887 void
888 tu_GetPhysicalDeviceQueueFamilyProperties(
889 VkPhysicalDevice physicalDevice,
890 uint32_t *pQueueFamilyPropertyCount,
891 VkQueueFamilyProperties *pQueueFamilyProperties)
892 {
893 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
894
895 vk_outarray_append(&out, p) {
896 *p = tu_queue_family_properties;
897 }
898 }
899
900 void
901 tu_GetPhysicalDeviceQueueFamilyProperties2(
902 VkPhysicalDevice physicalDevice,
903 uint32_t *pQueueFamilyPropertyCount,
904 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
905 {
906 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
907
908 vk_outarray_append(&out, p) {
909 p->queueFamilyProperties = tu_queue_family_properties;
910 }
911 }
912
913 static uint64_t
914 tu_get_system_heap_size()
915 {
916 struct sysinfo info;
917 sysinfo(&info);
918
919 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
920
921 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
922 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
923 */
924 uint64_t available_ram;
925 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
926 available_ram = total_ram / 2;
927 else
928 available_ram = total_ram * 3 / 4;
929
930 return available_ram;
931 }
932
933 void
934 tu_GetPhysicalDeviceMemoryProperties(
935 VkPhysicalDevice physicalDevice,
936 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
937 {
938 pMemoryProperties->memoryHeapCount = 1;
939 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
940 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
941
942 pMemoryProperties->memoryTypeCount = 1;
943 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
944 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
945 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
946 pMemoryProperties->memoryTypes[0].heapIndex = 0;
947 }
948
949 void
950 tu_GetPhysicalDeviceMemoryProperties2(
951 VkPhysicalDevice physicalDevice,
952 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
953 {
954 return tu_GetPhysicalDeviceMemoryProperties(
955 physicalDevice, &pMemoryProperties->memoryProperties);
956 }
957
958 static int
959 tu_queue_init(struct tu_device *device,
960 struct tu_queue *queue,
961 uint32_t queue_family_index,
962 int idx,
963 VkDeviceQueueCreateFlags flags)
964 {
965 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
966 queue->device = device;
967 queue->queue_family_index = queue_family_index;
968 queue->queue_idx = idx;
969 queue->flags = flags;
970
971 return VK_SUCCESS;
972 }
973
974 static void
975 tu_queue_finish(struct tu_queue *queue)
976 {
977 }
978
979 static int
980 tu_get_device_extension_index(const char *name)
981 {
982 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
983 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
984 return i;
985 }
986 return -1;
987 }
988
989 VkResult
990 tu_CreateDevice(VkPhysicalDevice physicalDevice,
991 const VkDeviceCreateInfo *pCreateInfo,
992 const VkAllocationCallbacks *pAllocator,
993 VkDevice *pDevice)
994 {
995 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
996 VkResult result;
997 struct tu_device *device;
998
999 /* Check enabled features */
1000 if (pCreateInfo->pEnabledFeatures) {
1001 VkPhysicalDeviceFeatures supported_features;
1002 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1003 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1004 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1005 unsigned num_features =
1006 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1007 for (uint32_t i = 0; i < num_features; i++) {
1008 if (enabled_feature[i] && !supported_feature[i])
1009 return vk_error(physical_device->instance,
1010 VK_ERROR_FEATURE_NOT_PRESENT);
1011 }
1012 }
1013
1014 device = vk_zalloc2(&physical_device->instance->alloc,
1015 pAllocator,
1016 sizeof(*device),
1017 8,
1018 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1019 if (!device)
1020 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1021
1022 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1023 device->instance = physical_device->instance;
1024 device->physical_device = physical_device;
1025
1026 if (pAllocator)
1027 device->alloc = *pAllocator;
1028 else
1029 device->alloc = physical_device->instance->alloc;
1030
1031 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1032 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1033 int index = tu_get_device_extension_index(ext_name);
1034 if (index < 0 ||
1035 !physical_device->supported_extensions.extensions[index]) {
1036 vk_free(&device->alloc, device);
1037 return vk_error(physical_device->instance,
1038 VK_ERROR_EXTENSION_NOT_PRESENT);
1039 }
1040
1041 device->enabled_extensions.extensions[index] = true;
1042 }
1043
1044 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1045 const VkDeviceQueueCreateInfo *queue_create =
1046 &pCreateInfo->pQueueCreateInfos[i];
1047 uint32_t qfi = queue_create->queueFamilyIndex;
1048 device->queues[qfi] =
1049 vk_alloc(&device->alloc,
1050 queue_create->queueCount * sizeof(struct tu_queue),
1051 8,
1052 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1053 if (!device->queues[qfi]) {
1054 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1055 goto fail;
1056 }
1057
1058 memset(device->queues[qfi],
1059 0,
1060 queue_create->queueCount * sizeof(struct tu_queue));
1061
1062 device->queue_count[qfi] = queue_create->queueCount;
1063
1064 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1065 result = tu_queue_init(
1066 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
1067 if (result != VK_SUCCESS)
1068 goto fail;
1069 }
1070 }
1071
1072 VkPipelineCacheCreateInfo ci;
1073 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1074 ci.pNext = NULL;
1075 ci.flags = 0;
1076 ci.pInitialData = NULL;
1077 ci.initialDataSize = 0;
1078 VkPipelineCache pc;
1079 result =
1080 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1081 if (result != VK_SUCCESS)
1082 goto fail;
1083
1084 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1085
1086 *pDevice = tu_device_to_handle(device);
1087 return VK_SUCCESS;
1088
1089 fail:
1090 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1091 for (unsigned q = 0; q < device->queue_count[i]; q++)
1092 tu_queue_finish(&device->queues[i][q]);
1093 if (device->queue_count[i])
1094 vk_free(&device->alloc, device->queues[i]);
1095 }
1096
1097 vk_free(&device->alloc, device);
1098 return result;
1099 }
1100
1101 void
1102 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1103 {
1104 TU_FROM_HANDLE(tu_device, device, _device);
1105
1106 if (!device)
1107 return;
1108
1109 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1110 for (unsigned q = 0; q < device->queue_count[i]; q++)
1111 tu_queue_finish(&device->queues[i][q]);
1112 if (device->queue_count[i])
1113 vk_free(&device->alloc, device->queues[i]);
1114 }
1115
1116 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1117 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1118
1119 vk_free(&device->alloc, device);
1120 }
1121
1122 VkResult
1123 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1124 VkLayerProperties *pProperties)
1125 {
1126 *pPropertyCount = 0;
1127 return VK_SUCCESS;
1128 }
1129
1130 VkResult
1131 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1132 uint32_t *pPropertyCount,
1133 VkLayerProperties *pProperties)
1134 {
1135 *pPropertyCount = 0;
1136 return VK_SUCCESS;
1137 }
1138
1139 void
1140 tu_GetDeviceQueue2(VkDevice _device,
1141 const VkDeviceQueueInfo2 *pQueueInfo,
1142 VkQueue *pQueue)
1143 {
1144 TU_FROM_HANDLE(tu_device, device, _device);
1145 struct tu_queue *queue;
1146
1147 queue =
1148 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1149 if (pQueueInfo->flags != queue->flags) {
1150 /* From the Vulkan 1.1.70 spec:
1151 *
1152 * "The queue returned by vkGetDeviceQueue2 must have the same
1153 * flags value from this structure as that used at device
1154 * creation time in a VkDeviceQueueCreateInfo instance. If no
1155 * matching flags were specified at device creation time then
1156 * pQueue will return VK_NULL_HANDLE."
1157 */
1158 *pQueue = VK_NULL_HANDLE;
1159 return;
1160 }
1161
1162 *pQueue = tu_queue_to_handle(queue);
1163 }
1164
1165 void
1166 tu_GetDeviceQueue(VkDevice _device,
1167 uint32_t queueFamilyIndex,
1168 uint32_t queueIndex,
1169 VkQueue *pQueue)
1170 {
1171 const VkDeviceQueueInfo2 info =
1172 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1173 .queueFamilyIndex = queueFamilyIndex,
1174 .queueIndex = queueIndex };
1175
1176 tu_GetDeviceQueue2(_device, &info, pQueue);
1177 }
1178
1179 VkResult
1180 tu_QueueSubmit(VkQueue _queue,
1181 uint32_t submitCount,
1182 const VkSubmitInfo *pSubmits,
1183 VkFence _fence)
1184 {
1185 return VK_SUCCESS;
1186 }
1187
1188 VkResult
1189 tu_QueueWaitIdle(VkQueue _queue)
1190 {
1191 return VK_SUCCESS;
1192 }
1193
1194 VkResult
1195 tu_DeviceWaitIdle(VkDevice _device)
1196 {
1197 TU_FROM_HANDLE(tu_device, device, _device);
1198
1199 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1200 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1201 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1202 }
1203 }
1204 return VK_SUCCESS;
1205 }
1206
1207 VkResult
1208 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1209 uint32_t *pPropertyCount,
1210 VkExtensionProperties *pProperties)
1211 {
1212 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1213
1214 /* We spport no lyaers */
1215 if (pLayerName)
1216 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1217
1218 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1219 if (tu_supported_instance_extensions.extensions[i]) {
1220 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1221 }
1222 }
1223
1224 return vk_outarray_status(&out);
1225 }
1226
1227 VkResult
1228 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1229 const char *pLayerName,
1230 uint32_t *pPropertyCount,
1231 VkExtensionProperties *pProperties)
1232 {
1233 /* We spport no lyaers */
1234 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1235 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1236
1237 /* We spport no lyaers */
1238 if (pLayerName)
1239 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1240
1241 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1242 if (device->supported_extensions.extensions[i]) {
1243 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1244 }
1245 }
1246
1247 return vk_outarray_status(&out);
1248 }
1249
1250 PFN_vkVoidFunction
1251 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1252 {
1253 TU_FROM_HANDLE(tu_instance, instance, _instance);
1254
1255 return tu_lookup_entrypoint_checked(pName,
1256 instance ? instance->api_version : 0,
1257 instance ? &instance->enabled_extensions
1258 : NULL,
1259 NULL);
1260 }
1261
1262 /* The loader wants us to expose a second GetInstanceProcAddr function
1263 * to work around certain LD_PRELOAD issues seen in apps.
1264 */
1265 PUBLIC
1266 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1267 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1268
1269 PUBLIC
1270 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1271 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1272 {
1273 return tu_GetInstanceProcAddr(instance, pName);
1274 }
1275
1276 PFN_vkVoidFunction
1277 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1278 {
1279 TU_FROM_HANDLE(tu_device, device, _device);
1280
1281 return tu_lookup_entrypoint_checked(pName,
1282 device->instance->api_version,
1283 &device->instance->enabled_extensions,
1284 &device->enabled_extensions);
1285 }
1286
1287 static VkResult
1288 tu_alloc_memory(struct tu_device *device,
1289 const VkMemoryAllocateInfo *pAllocateInfo,
1290 const VkAllocationCallbacks *pAllocator,
1291 VkDeviceMemory *pMem)
1292 {
1293 struct tu_device_memory *mem;
1294 VkResult result;
1295
1296 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1297
1298 if (pAllocateInfo->allocationSize == 0) {
1299 /* Apparently, this is allowed */
1300 *pMem = VK_NULL_HANDLE;
1301 return VK_SUCCESS;
1302 }
1303
1304 mem = vk_alloc2(&device->alloc,
1305 pAllocator,
1306 sizeof(*mem),
1307 8,
1308 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1309 if (mem == NULL)
1310 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1311
1312 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1313 if (!result) {
1314 vk_free2(&device->alloc, pAllocator, mem);
1315 return result;
1316 }
1317
1318 mem->size = pAllocateInfo->allocationSize;
1319 mem->type_index = pAllocateInfo->memoryTypeIndex;
1320
1321 mem->map = NULL;
1322 mem->user_ptr = NULL;
1323
1324 *pMem = tu_device_memory_to_handle(mem);
1325
1326 return VK_SUCCESS;
1327 }
1328
1329 VkResult
1330 tu_AllocateMemory(VkDevice _device,
1331 const VkMemoryAllocateInfo *pAllocateInfo,
1332 const VkAllocationCallbacks *pAllocator,
1333 VkDeviceMemory *pMem)
1334 {
1335 TU_FROM_HANDLE(tu_device, device, _device);
1336 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1337 }
1338
1339 void
1340 tu_FreeMemory(VkDevice _device,
1341 VkDeviceMemory _mem,
1342 const VkAllocationCallbacks *pAllocator)
1343 {
1344 TU_FROM_HANDLE(tu_device, device, _device);
1345 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1346
1347 if (mem == NULL)
1348 return;
1349
1350 tu_bo_finish(device, &mem->bo);
1351 vk_free2(&device->alloc, pAllocator, mem);
1352 }
1353
1354 VkResult
1355 tu_MapMemory(VkDevice _device,
1356 VkDeviceMemory _memory,
1357 VkDeviceSize offset,
1358 VkDeviceSize size,
1359 VkMemoryMapFlags flags,
1360 void **ppData)
1361 {
1362 TU_FROM_HANDLE(tu_device, device, _device);
1363 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1364 VkResult result;
1365
1366 if (mem == NULL) {
1367 *ppData = NULL;
1368 return VK_SUCCESS;
1369 }
1370
1371 if (mem->user_ptr) {
1372 *ppData = mem->user_ptr;
1373 } else if (!mem->map){
1374 result = tu_bo_map(device, &mem->bo);
1375 if (result != VK_SUCCESS)
1376 return result;
1377 mem->map = mem->bo.map;
1378 } else
1379 *ppData = mem->map;
1380
1381 if (*ppData) {
1382 *ppData += offset;
1383 return VK_SUCCESS;
1384 }
1385
1386 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1387 }
1388
1389 void
1390 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1391 {
1392 /* I do not see any unmapping done by the freedreno Gallium driver. */
1393 }
1394
1395 VkResult
1396 tu_FlushMappedMemoryRanges(VkDevice _device,
1397 uint32_t memoryRangeCount,
1398 const VkMappedMemoryRange *pMemoryRanges)
1399 {
1400 return VK_SUCCESS;
1401 }
1402
1403 VkResult
1404 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1405 uint32_t memoryRangeCount,
1406 const VkMappedMemoryRange *pMemoryRanges)
1407 {
1408 return VK_SUCCESS;
1409 }
1410
1411 void
1412 tu_GetBufferMemoryRequirements(VkDevice _device,
1413 VkBuffer _buffer,
1414 VkMemoryRequirements *pMemoryRequirements)
1415 {
1416 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1417
1418 pMemoryRequirements->memoryTypeBits = 1;
1419 pMemoryRequirements->alignment = 16;
1420 pMemoryRequirements->size =
1421 align64(buffer->size, pMemoryRequirements->alignment);
1422 }
1423
1424 void
1425 tu_GetBufferMemoryRequirements2(
1426 VkDevice device,
1427 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1428 VkMemoryRequirements2KHR *pMemoryRequirements)
1429 {
1430 tu_GetBufferMemoryRequirements(
1431 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1432 }
1433
1434 void
1435 tu_GetImageMemoryRequirements(VkDevice _device,
1436 VkImage _image,
1437 VkMemoryRequirements *pMemoryRequirements)
1438 {
1439 TU_FROM_HANDLE(tu_image, image, _image);
1440
1441 pMemoryRequirements->memoryTypeBits = 1;
1442 pMemoryRequirements->size = image->size;
1443 pMemoryRequirements->alignment = image->alignment;
1444 }
1445
1446 void
1447 tu_GetImageMemoryRequirements2(VkDevice device,
1448 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1449 VkMemoryRequirements2KHR *pMemoryRequirements)
1450 {
1451 tu_GetImageMemoryRequirements(
1452 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1453 }
1454
1455 void
1456 tu_GetImageSparseMemoryRequirements(
1457 VkDevice device,
1458 VkImage image,
1459 uint32_t *pSparseMemoryRequirementCount,
1460 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1461 {
1462 stub();
1463 }
1464
1465 void
1466 tu_GetImageSparseMemoryRequirements2(
1467 VkDevice device,
1468 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1469 uint32_t *pSparseMemoryRequirementCount,
1470 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1471 {
1472 stub();
1473 }
1474
1475 void
1476 tu_GetDeviceMemoryCommitment(VkDevice device,
1477 VkDeviceMemory memory,
1478 VkDeviceSize *pCommittedMemoryInBytes)
1479 {
1480 *pCommittedMemoryInBytes = 0;
1481 }
1482
1483 VkResult
1484 tu_BindBufferMemory2(VkDevice device,
1485 uint32_t bindInfoCount,
1486 const VkBindBufferMemoryInfoKHR *pBindInfos)
1487 {
1488 return VK_SUCCESS;
1489 }
1490
1491 VkResult
1492 tu_BindBufferMemory(VkDevice device,
1493 VkBuffer buffer,
1494 VkDeviceMemory memory,
1495 VkDeviceSize memoryOffset)
1496 {
1497 const VkBindBufferMemoryInfoKHR info = {
1498 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1499 .buffer = buffer,
1500 .memory = memory,
1501 .memoryOffset = memoryOffset
1502 };
1503
1504 return tu_BindBufferMemory2(device, 1, &info);
1505 }
1506
1507 VkResult
1508 tu_BindImageMemory2(VkDevice device,
1509 uint32_t bindInfoCount,
1510 const VkBindImageMemoryInfoKHR *pBindInfos)
1511 {
1512 return VK_SUCCESS;
1513 }
1514
1515 VkResult
1516 tu_BindImageMemory(VkDevice device,
1517 VkImage image,
1518 VkDeviceMemory memory,
1519 VkDeviceSize memoryOffset)
1520 {
1521 const VkBindImageMemoryInfoKHR info = {
1522 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1523 .image = image,
1524 .memory = memory,
1525 .memoryOffset = memoryOffset
1526 };
1527
1528 return tu_BindImageMemory2(device, 1, &info);
1529 }
1530
1531 VkResult
1532 tu_QueueBindSparse(VkQueue _queue,
1533 uint32_t bindInfoCount,
1534 const VkBindSparseInfo *pBindInfo,
1535 VkFence _fence)
1536 {
1537 return VK_SUCCESS;
1538 }
1539
1540 VkResult
1541 tu_CreateFence(VkDevice _device,
1542 const VkFenceCreateInfo *pCreateInfo,
1543 const VkAllocationCallbacks *pAllocator,
1544 VkFence *pFence)
1545 {
1546 TU_FROM_HANDLE(tu_device, device, _device);
1547
1548 struct tu_fence *fence = vk_alloc2(&device->alloc,
1549 pAllocator,
1550 sizeof(*fence),
1551 8,
1552 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1553
1554 if (!fence)
1555 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1556
1557 *pFence = tu_fence_to_handle(fence);
1558
1559 return VK_SUCCESS;
1560 }
1561
1562 void
1563 tu_DestroyFence(VkDevice _device,
1564 VkFence _fence,
1565 const VkAllocationCallbacks *pAllocator)
1566 {
1567 TU_FROM_HANDLE(tu_device, device, _device);
1568 TU_FROM_HANDLE(tu_fence, fence, _fence);
1569
1570 if (!fence)
1571 return;
1572
1573 vk_free2(&device->alloc, pAllocator, fence);
1574 }
1575
1576 VkResult
1577 tu_WaitForFences(VkDevice _device,
1578 uint32_t fenceCount,
1579 const VkFence *pFences,
1580 VkBool32 waitAll,
1581 uint64_t timeout)
1582 {
1583 return VK_SUCCESS;
1584 }
1585
1586 VkResult
1587 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1588 {
1589 return VK_SUCCESS;
1590 }
1591
1592 VkResult
1593 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1594 {
1595 return VK_SUCCESS;
1596 }
1597
1598 // Queue semaphore functions
1599
1600 VkResult
1601 tu_CreateSemaphore(VkDevice _device,
1602 const VkSemaphoreCreateInfo *pCreateInfo,
1603 const VkAllocationCallbacks *pAllocator,
1604 VkSemaphore *pSemaphore)
1605 {
1606 TU_FROM_HANDLE(tu_device, device, _device);
1607
1608 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1609 pAllocator,
1610 sizeof(*sem),
1611 8,
1612 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1613 if (!sem)
1614 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1615
1616 *pSemaphore = tu_semaphore_to_handle(sem);
1617 return VK_SUCCESS;
1618 }
1619
1620 void
1621 tu_DestroySemaphore(VkDevice _device,
1622 VkSemaphore _semaphore,
1623 const VkAllocationCallbacks *pAllocator)
1624 {
1625 TU_FROM_HANDLE(tu_device, device, _device);
1626 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1627 if (!_semaphore)
1628 return;
1629
1630 vk_free2(&device->alloc, pAllocator, sem);
1631 }
1632
1633 VkResult
1634 tu_CreateEvent(VkDevice _device,
1635 const VkEventCreateInfo *pCreateInfo,
1636 const VkAllocationCallbacks *pAllocator,
1637 VkEvent *pEvent)
1638 {
1639 TU_FROM_HANDLE(tu_device, device, _device);
1640 struct tu_event *event = vk_alloc2(&device->alloc,
1641 pAllocator,
1642 sizeof(*event),
1643 8,
1644 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1645
1646 if (!event)
1647 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1648
1649 *pEvent = tu_event_to_handle(event);
1650
1651 return VK_SUCCESS;
1652 }
1653
1654 void
1655 tu_DestroyEvent(VkDevice _device,
1656 VkEvent _event,
1657 const VkAllocationCallbacks *pAllocator)
1658 {
1659 TU_FROM_HANDLE(tu_device, device, _device);
1660 TU_FROM_HANDLE(tu_event, event, _event);
1661
1662 if (!event)
1663 return;
1664 vk_free2(&device->alloc, pAllocator, event);
1665 }
1666
1667 VkResult
1668 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1669 {
1670 TU_FROM_HANDLE(tu_event, event, _event);
1671
1672 if (*event->map == 1)
1673 return VK_EVENT_SET;
1674 return VK_EVENT_RESET;
1675 }
1676
1677 VkResult
1678 tu_SetEvent(VkDevice _device, VkEvent _event)
1679 {
1680 TU_FROM_HANDLE(tu_event, event, _event);
1681 *event->map = 1;
1682
1683 return VK_SUCCESS;
1684 }
1685
1686 VkResult
1687 tu_ResetEvent(VkDevice _device, VkEvent _event)
1688 {
1689 TU_FROM_HANDLE(tu_event, event, _event);
1690 *event->map = 0;
1691
1692 return VK_SUCCESS;
1693 }
1694
1695 VkResult
1696 tu_CreateBuffer(VkDevice _device,
1697 const VkBufferCreateInfo *pCreateInfo,
1698 const VkAllocationCallbacks *pAllocator,
1699 VkBuffer *pBuffer)
1700 {
1701 TU_FROM_HANDLE(tu_device, device, _device);
1702 struct tu_buffer *buffer;
1703
1704 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1705
1706 buffer = vk_alloc2(&device->alloc,
1707 pAllocator,
1708 sizeof(*buffer),
1709 8,
1710 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1711 if (buffer == NULL)
1712 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1713
1714 buffer->size = pCreateInfo->size;
1715 buffer->usage = pCreateInfo->usage;
1716 buffer->flags = pCreateInfo->flags;
1717
1718 *pBuffer = tu_buffer_to_handle(buffer);
1719
1720 return VK_SUCCESS;
1721 }
1722
1723 void
1724 tu_DestroyBuffer(VkDevice _device,
1725 VkBuffer _buffer,
1726 const VkAllocationCallbacks *pAllocator)
1727 {
1728 TU_FROM_HANDLE(tu_device, device, _device);
1729 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1730
1731 if (!buffer)
1732 return;
1733
1734 vk_free2(&device->alloc, pAllocator, buffer);
1735 }
1736
1737 static uint32_t
1738 tu_surface_max_layer_count(struct tu_image_view *iview)
1739 {
1740 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1741 ? iview->extent.depth
1742 : (iview->base_layer + iview->layer_count);
1743 }
1744
1745 VkResult
1746 tu_CreateFramebuffer(VkDevice _device,
1747 const VkFramebufferCreateInfo *pCreateInfo,
1748 const VkAllocationCallbacks *pAllocator,
1749 VkFramebuffer *pFramebuffer)
1750 {
1751 TU_FROM_HANDLE(tu_device, device, _device);
1752 struct tu_framebuffer *framebuffer;
1753
1754 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1755
1756 size_t size =
1757 sizeof(*framebuffer) +
1758 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1759 framebuffer = vk_alloc2(
1760 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1761 if (framebuffer == NULL)
1762 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1763
1764 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1765 framebuffer->width = pCreateInfo->width;
1766 framebuffer->height = pCreateInfo->height;
1767 framebuffer->layers = pCreateInfo->layers;
1768 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1769 VkImageView _iview = pCreateInfo->pAttachments[i];
1770 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1771 framebuffer->attachments[i].attachment = iview;
1772
1773 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1774 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1775 framebuffer->layers =
1776 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1777 }
1778
1779 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1780 return VK_SUCCESS;
1781 }
1782
1783 void
1784 tu_DestroyFramebuffer(VkDevice _device,
1785 VkFramebuffer _fb,
1786 const VkAllocationCallbacks *pAllocator)
1787 {
1788 TU_FROM_HANDLE(tu_device, device, _device);
1789 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1790
1791 if (!fb)
1792 return;
1793 vk_free2(&device->alloc, pAllocator, fb);
1794 }
1795
1796 static void
1797 tu_init_sampler(struct tu_device *device,
1798 struct tu_sampler *sampler,
1799 const VkSamplerCreateInfo *pCreateInfo)
1800 {
1801 }
1802
1803 VkResult
1804 tu_CreateSampler(VkDevice _device,
1805 const VkSamplerCreateInfo *pCreateInfo,
1806 const VkAllocationCallbacks *pAllocator,
1807 VkSampler *pSampler)
1808 {
1809 TU_FROM_HANDLE(tu_device, device, _device);
1810 struct tu_sampler *sampler;
1811
1812 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1813
1814 sampler = vk_alloc2(&device->alloc,
1815 pAllocator,
1816 sizeof(*sampler),
1817 8,
1818 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1819 if (!sampler)
1820 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1821
1822 tu_init_sampler(device, sampler, pCreateInfo);
1823 *pSampler = tu_sampler_to_handle(sampler);
1824
1825 return VK_SUCCESS;
1826 }
1827
1828 void
1829 tu_DestroySampler(VkDevice _device,
1830 VkSampler _sampler,
1831 const VkAllocationCallbacks *pAllocator)
1832 {
1833 TU_FROM_HANDLE(tu_device, device, _device);
1834 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1835
1836 if (!sampler)
1837 return;
1838 vk_free2(&device->alloc, pAllocator, sampler);
1839 }
1840
1841 /* vk_icd.h does not declare this function, so we declare it here to
1842 * suppress Wmissing-prototypes.
1843 */
1844 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1845 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1846
1847 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1848 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1849 {
1850 /* For the full details on loader interface versioning, see
1851 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1852 * What follows is a condensed summary, to help you navigate the large and
1853 * confusing official doc.
1854 *
1855 * - Loader interface v0 is incompatible with later versions. We don't
1856 * support it.
1857 *
1858 * - In loader interface v1:
1859 * - The first ICD entrypoint called by the loader is
1860 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1861 * entrypoint.
1862 * - The ICD must statically expose no other Vulkan symbol unless it is
1863 * linked with -Bsymbolic.
1864 * - Each dispatchable Vulkan handle created by the ICD must be
1865 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1866 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1867 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1868 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1869 * such loader-managed surfaces.
1870 *
1871 * - Loader interface v2 differs from v1 in:
1872 * - The first ICD entrypoint called by the loader is
1873 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1874 * statically expose this entrypoint.
1875 *
1876 * - Loader interface v3 differs from v2 in:
1877 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1878 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1879 * because the loader no longer does so.
1880 */
1881 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1882 return VK_SUCCESS;
1883 }
1884
1885 void
1886 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1887 VkPhysicalDevice physicalDevice,
1888 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1889 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1890 {
1891 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1892 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1893 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1894 }
1895
1896 void
1897 tu_GetPhysicalDeviceExternalFenceProperties(
1898 VkPhysicalDevice physicalDevice,
1899 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1900 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1901 {
1902 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1903 pExternalFenceProperties->compatibleHandleTypes = 0;
1904 pExternalFenceProperties->externalFenceFeatures = 0;
1905 }
1906
1907 VkResult
1908 tu_CreateDebugReportCallbackEXT(
1909 VkInstance _instance,
1910 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1911 const VkAllocationCallbacks *pAllocator,
1912 VkDebugReportCallbackEXT *pCallback)
1913 {
1914 TU_FROM_HANDLE(tu_instance, instance, _instance);
1915 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1916 pCreateInfo,
1917 pAllocator,
1918 &instance->alloc,
1919 pCallback);
1920 }
1921
1922 void
1923 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1924 VkDebugReportCallbackEXT _callback,
1925 const VkAllocationCallbacks *pAllocator)
1926 {
1927 TU_FROM_HANDLE(tu_instance, instance, _instance);
1928 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1929 _callback,
1930 pAllocator,
1931 &instance->alloc);
1932 }
1933
1934 void
1935 tu_DebugReportMessageEXT(VkInstance _instance,
1936 VkDebugReportFlagsEXT flags,
1937 VkDebugReportObjectTypeEXT objectType,
1938 uint64_t object,
1939 size_t location,
1940 int32_t messageCode,
1941 const char *pLayerPrefix,
1942 const char *pMessage)
1943 {
1944 TU_FROM_HANDLE(tu_instance, instance, _instance);
1945 vk_debug_report(&instance->debug_report_callbacks,
1946 flags,
1947 objectType,
1948 object,
1949 location,
1950 messageCode,
1951 pLayerPrefix,
1952 pMessage);
1953 }
1954
1955 void
1956 tu_GetDeviceGroupPeerMemoryFeatures(
1957 VkDevice device,
1958 uint32_t heapIndex,
1959 uint32_t localDeviceIndex,
1960 uint32_t remoteDeviceIndex,
1961 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1962 {
1963 assert(localDeviceIndex == remoteDeviceIndex);
1964
1965 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1966 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1967 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1968 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1969 }