turnip: Stop hardcoding the msm version check.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/sysinfo.h>
39 #include <unistd.h>
40 #include <xf86drm.h>
41 #include <msm_drm.h>
42
43 static int
44 tu_device_get_cache_uuid(uint16_t family, void *uuid)
45 {
46 uint32_t mesa_timestamp;
47 uint16_t f = family;
48 memset(uuid, 0, VK_UUID_SIZE);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
50 &mesa_timestamp))
51 return -1;
52
53 memcpy(uuid, &mesa_timestamp, 4);
54 memcpy((char *)uuid + 4, &f, 2);
55 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
56 return 0;
57 }
58
59 static void
60 tu_get_driver_uuid(void *uuid)
61 {
62 memset(uuid, 0, VK_UUID_SIZE);
63 }
64
65 static void
66 tu_get_device_uuid(void *uuid)
67 {
68 tu_use_args(uuid);
69 tu_stub();
70 }
71
72 VkResult
73 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
74 {
75 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
76 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
77 */
78 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
79 if (!gem_handle)
80 goto fail_new;
81
82 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
83 * want immediate backing pages because vkAllocateMemory and friends must
84 * not lazily fail.
85 *
86 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
87 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
88 * maybe I misunderstand.
89 */
90
91 /* TODO: Do we need 'offset' if we have 'iova'? */
92 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
93 if (!offset)
94 goto fail_info;
95
96 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
97 if (!iova)
98 goto fail_info;
99
100 *bo = (struct tu_bo) {
101 .gem_handle = gem_handle,
102 .size = size,
103 .offset = offset,
104 .iova = iova,
105 };
106
107 return VK_SUCCESS;
108
109 fail_info:
110 tu_gem_close(dev, bo->gem_handle);
111 fail_new:
112 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113 }
114
115 VkResult
116 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
117 {
118 if (bo->map)
119 return VK_SUCCESS;
120
121 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
122 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
123 dev->physical_device->local_fd, bo->offset);
124 if (map == MAP_FAILED)
125 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
126
127 bo->map = map;
128 return VK_SUCCESS;
129 }
130
131 void
132 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
133 {
134 assert(bo->gem_handle);
135
136 if (bo->map)
137 munmap(bo->map, bo->size);
138
139 tu_gem_close(dev, bo->gem_handle);
140 }
141
142 static VkResult
143 tu_physical_device_init(struct tu_physical_device *device,
144 struct tu_instance *instance,
145 drmDevicePtr drm_device)
146 {
147 const char *path = drm_device->nodes[DRM_NODE_RENDER];
148 VkResult result = VK_SUCCESS;
149 drmVersionPtr version;
150 int fd;
151 int master_fd = -1;
152 struct fd_pipe *tmp_pipe = NULL;
153 uint64_t val;
154
155 fd = open(path, O_RDWR | O_CLOEXEC);
156 if (fd < 0) {
157 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
158 "failed to open device %s", path);
159 }
160
161 /* Version 1.3 added MSM_INFO_IOVA. */
162 const int min_version_major = 1;
163 const int min_version_minor = 3;
164
165 version = drmGetVersion(fd);
166 if (!version) {
167 close(fd);
168 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
169 "failed to query kernel driver version for device %s",
170 path);
171 }
172
173 if (strcmp(version->name, "msm")) {
174 drmFreeVersion(version);
175 if (master_fd != -1)
176 close(master_fd);
177 close(fd);
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "device %s does not use the msm kernel driver", path);
180 }
181
182 if (version->version_major != min_version_major ||
183 version->version_minor < min_version_minor) {
184 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
185 "kernel driver for device %s has version %d.%d, "
186 "but Vulkan requires version >= %d.%d",
187 path,
188 version->version_major, version->version_minor,
189 min_version_major, min_version_minor);
190 drmFreeVersion(version);
191 close(fd);
192 return result;
193 }
194
195 drmFreeVersion(version);
196
197 if (instance->debug_flags & TU_DEBUG_STARTUP)
198 tu_logi("Found compatible device '%s'.", path);
199
200 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
201 device->instance = instance;
202 assert(strlen(path) < ARRAY_SIZE(device->path));
203 strncpy(device->path, path, ARRAY_SIZE(device->path));
204
205 if (instance->enabled_extensions.KHR_display) {
206 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
207 if (master_fd >= 0) {
208 /* TODO: free master_fd is accel is not working? */
209 }
210 }
211
212 device->master_fd = master_fd;
213 device->local_fd = fd;
214
215 device->drm_device = fd_device_new_dup(fd);
216 if (!device->drm_device) {
217 result = vk_errorf(
218 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
219 goto fail;
220 }
221
222 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
223 if (!tmp_pipe) {
224 result = vk_errorf(
225 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
226 goto fail;
227 }
228
229 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
230 result = vk_errorf(
231 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
232 goto fail;
233 }
234 device->gpu_id = val;
235
236 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
237 result = vk_errorf(
238 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
239 goto fail;
240 }
241 device->gmem_size = val;
242
243 fd_pipe_del(tmp_pipe);
244 tmp_pipe = NULL;
245
246 memset(device->name, 0, sizeof(device->name));
247 sprintf(device->name, "FD%d", device->gpu_id);
248
249 switch(device->gpu_id) {
250 case 530:
251 case 630:
252 break;
253 default:
254 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
255 "device %s is unsupported", device->name);
256 goto fail;
257 }
258 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
259 result = vk_errorf(
260 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
261 goto fail;
262 }
263
264 /* The gpu id is already embedded in the uuid so we just pass "tu"
265 * when creating the cache.
266 */
267 char buf[VK_UUID_SIZE * 2 + 1];
268 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
269 device->disk_cache = disk_cache_create(device->name, buf, 0);
270
271 fprintf(stderr,
272 "WARNING: tu is not a conformant vulkan implementation, "
273 "testing use only.\n");
274
275 tu_get_driver_uuid(&device->device_uuid);
276 tu_get_device_uuid(&device->device_uuid);
277
278 tu_fill_device_extension_table(device, &device->supported_extensions);
279
280 if (result != VK_SUCCESS) {
281 vk_error(instance, result);
282 goto fail;
283 }
284
285 return VK_SUCCESS;
286
287 fail:
288 if (tmp_pipe)
289 fd_pipe_del(tmp_pipe);
290 if (device->drm_device)
291 fd_device_del(device->drm_device);
292 close(fd);
293 if (master_fd != -1)
294 close(master_fd);
295 return result;
296 }
297
298 static void
299 tu_physical_device_finish(struct tu_physical_device *device)
300 {
301 disk_cache_destroy(device->disk_cache);
302 close(device->local_fd);
303 if (device->master_fd != -1)
304 close(device->master_fd);
305 }
306
307 static void *
308 default_alloc_func(void *pUserData,
309 size_t size,
310 size_t align,
311 VkSystemAllocationScope allocationScope)
312 {
313 return malloc(size);
314 }
315
316 static void *
317 default_realloc_func(void *pUserData,
318 void *pOriginal,
319 size_t size,
320 size_t align,
321 VkSystemAllocationScope allocationScope)
322 {
323 return realloc(pOriginal, size);
324 }
325
326 static void
327 default_free_func(void *pUserData, void *pMemory)
328 {
329 free(pMemory);
330 }
331
332 static const VkAllocationCallbacks default_alloc = {
333 .pUserData = NULL,
334 .pfnAllocation = default_alloc_func,
335 .pfnReallocation = default_realloc_func,
336 .pfnFree = default_free_func,
337 };
338
339 static const struct debug_control tu_debug_options[] = { { "startup",
340 TU_DEBUG_STARTUP },
341 { NULL, 0 } };
342
343 const char *
344 tu_get_debug_option_name(int id)
345 {
346 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
347 return tu_debug_options[id].string;
348 }
349
350 static int
351 tu_get_instance_extension_index(const char *name)
352 {
353 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
354 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
355 return i;
356 }
357 return -1;
358 }
359
360 VkResult
361 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
362 const VkAllocationCallbacks *pAllocator,
363 VkInstance *pInstance)
364 {
365 struct tu_instance *instance;
366 VkResult result;
367
368 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
369
370 uint32_t client_version;
371 if (pCreateInfo->pApplicationInfo &&
372 pCreateInfo->pApplicationInfo->apiVersion != 0) {
373 client_version = pCreateInfo->pApplicationInfo->apiVersion;
374 } else {
375 tu_EnumerateInstanceVersion(&client_version);
376 }
377
378 instance = vk_zalloc2(&default_alloc,
379 pAllocator,
380 sizeof(*instance),
381 8,
382 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
383 if (!instance)
384 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
385
386 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
387
388 if (pAllocator)
389 instance->alloc = *pAllocator;
390 else
391 instance->alloc = default_alloc;
392
393 instance->api_version = client_version;
394 instance->physical_device_count = -1;
395
396 instance->debug_flags =
397 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
398
399 if (instance->debug_flags & TU_DEBUG_STARTUP)
400 tu_logi("Created an instance");
401
402 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
403 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
404 int index = tu_get_instance_extension_index(ext_name);
405
406 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
407 vk_free2(&default_alloc, pAllocator, instance);
408 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
409 }
410
411 instance->enabled_extensions.extensions[index] = true;
412 }
413
414 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
415 if (result != VK_SUCCESS) {
416 vk_free2(&default_alloc, pAllocator, instance);
417 return vk_error(instance, result);
418 }
419
420 _mesa_locale_init();
421
422 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
423
424 *pInstance = tu_instance_to_handle(instance);
425
426 return VK_SUCCESS;
427 }
428
429 void
430 tu_DestroyInstance(VkInstance _instance,
431 const VkAllocationCallbacks *pAllocator)
432 {
433 TU_FROM_HANDLE(tu_instance, instance, _instance);
434
435 if (!instance)
436 return;
437
438 for (int i = 0; i < instance->physical_device_count; ++i) {
439 tu_physical_device_finish(instance->physical_devices + i);
440 }
441
442 VG(VALGRIND_DESTROY_MEMPOOL(instance));
443
444 _mesa_locale_fini();
445
446 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
447
448 vk_free(&instance->alloc, instance);
449 }
450
451 static VkResult
452 tu_enumerate_devices(struct tu_instance *instance)
453 {
454 /* TODO: Check for more devices ? */
455 drmDevicePtr devices[8];
456 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
457 int max_devices;
458
459 instance->physical_device_count = 0;
460
461 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
462
463 if (instance->debug_flags & TU_DEBUG_STARTUP)
464 tu_logi("Found %d drm nodes", max_devices);
465
466 if (max_devices < 1)
467 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
468
469 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
470 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
471 devices[i]->bustype == DRM_BUS_PLATFORM) {
472
473 result = tu_physical_device_init(instance->physical_devices +
474 instance->physical_device_count,
475 instance,
476 devices[i]);
477 if (result == VK_SUCCESS)
478 ++instance->physical_device_count;
479 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
480 break;
481 }
482 }
483 drmFreeDevices(devices, max_devices);
484
485 return result;
486 }
487
488 VkResult
489 tu_EnumeratePhysicalDevices(VkInstance _instance,
490 uint32_t *pPhysicalDeviceCount,
491 VkPhysicalDevice *pPhysicalDevices)
492 {
493 TU_FROM_HANDLE(tu_instance, instance, _instance);
494 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
495
496 VkResult result;
497
498 if (instance->physical_device_count < 0) {
499 result = tu_enumerate_devices(instance);
500 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
501 return result;
502 }
503
504 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
505 vk_outarray_append(&out, p) {
506 *p = tu_physical_device_to_handle(instance->physical_devices + i);
507 }
508
509 }
510
511 return vk_outarray_status(&out);
512 }
513
514 VkResult
515 tu_EnumeratePhysicalDeviceGroups(
516 VkInstance _instance,
517 uint32_t *pPhysicalDeviceGroupCount,
518 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
519 {
520 TU_FROM_HANDLE(tu_instance, instance, _instance);
521 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount);
522 VkResult result;
523
524 if (instance->physical_device_count < 0) {
525 result = tu_enumerate_devices(instance);
526 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
527 return result;
528 }
529
530 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
531 vk_outarray_append(&out, p) {
532 p->physicalDeviceCount = 1;
533 p->physicalDevices[0] =
534 tu_physical_device_to_handle(instance->physical_devices + i);
535 p->subsetAllocation = false;
536 }
537 }
538
539 return vk_outarray_status(&out);
540 }
541
542 void
543 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
544 VkPhysicalDeviceFeatures *pFeatures)
545 {
546 memset(pFeatures, 0, sizeof(*pFeatures));
547
548 *pFeatures = (VkPhysicalDeviceFeatures){
549 .robustBufferAccess = false,
550 .fullDrawIndexUint32 = false,
551 .imageCubeArray = false,
552 .independentBlend = false,
553 .geometryShader = false,
554 .tessellationShader = false,
555 .sampleRateShading = false,
556 .dualSrcBlend = false,
557 .logicOp = false,
558 .multiDrawIndirect = false,
559 .drawIndirectFirstInstance = false,
560 .depthClamp = false,
561 .depthBiasClamp = false,
562 .fillModeNonSolid = false,
563 .depthBounds = false,
564 .wideLines = false,
565 .largePoints = false,
566 .alphaToOne = false,
567 .multiViewport = false,
568 .samplerAnisotropy = false,
569 .textureCompressionETC2 = false,
570 .textureCompressionASTC_LDR = false,
571 .textureCompressionBC = false,
572 .occlusionQueryPrecise = false,
573 .pipelineStatisticsQuery = false,
574 .vertexPipelineStoresAndAtomics = false,
575 .fragmentStoresAndAtomics = false,
576 .shaderTessellationAndGeometryPointSize = false,
577 .shaderImageGatherExtended = false,
578 .shaderStorageImageExtendedFormats = false,
579 .shaderStorageImageMultisample = false,
580 .shaderUniformBufferArrayDynamicIndexing = false,
581 .shaderSampledImageArrayDynamicIndexing = false,
582 .shaderStorageBufferArrayDynamicIndexing = false,
583 .shaderStorageImageArrayDynamicIndexing = false,
584 .shaderStorageImageReadWithoutFormat = false,
585 .shaderStorageImageWriteWithoutFormat = false,
586 .shaderClipDistance = false,
587 .shaderCullDistance = false,
588 .shaderFloat64 = false,
589 .shaderInt64 = false,
590 .shaderInt16 = false,
591 .sparseBinding = false,
592 .variableMultisampleRate = false,
593 .inheritedQueries = false,
594 };
595 }
596
597 void
598 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
599 VkPhysicalDeviceFeatures2KHR *pFeatures)
600 {
601 vk_foreach_struct(ext, pFeatures->pNext)
602 {
603 switch (ext->sType) {
604 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
605 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
606 features->variablePointersStorageBuffer = false;
607 features->variablePointers = false;
608 break;
609 }
610 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
611 VkPhysicalDeviceMultiviewFeaturesKHR *features =
612 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
613 features->multiview = false;
614 features->multiviewGeometryShader = false;
615 features->multiviewTessellationShader = false;
616 break;
617 }
618 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
619 VkPhysicalDeviceShaderDrawParameterFeatures *features =
620 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
621 features->shaderDrawParameters = false;
622 break;
623 }
624 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
625 VkPhysicalDeviceProtectedMemoryFeatures *features =
626 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
627 features->protectedMemory = false;
628 break;
629 }
630 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
631 VkPhysicalDevice16BitStorageFeatures *features =
632 (VkPhysicalDevice16BitStorageFeatures *)ext;
633 features->storageBuffer16BitAccess = false;
634 features->uniformAndStorageBuffer16BitAccess = false;
635 features->storagePushConstant16 = false;
636 features->storageInputOutput16 = false;
637 break;
638 }
639 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
640 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
641 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
642 features->samplerYcbcrConversion = false;
643 break;
644 }
645 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
646 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
647 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
648 features->shaderInputAttachmentArrayDynamicIndexing = false;
649 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
650 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
651 features->shaderUniformBufferArrayNonUniformIndexing = false;
652 features->shaderSampledImageArrayNonUniformIndexing = false;
653 features->shaderStorageBufferArrayNonUniformIndexing = false;
654 features->shaderStorageImageArrayNonUniformIndexing = false;
655 features->shaderInputAttachmentArrayNonUniformIndexing = false;
656 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
657 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
658 features->descriptorBindingUniformBufferUpdateAfterBind = false;
659 features->descriptorBindingSampledImageUpdateAfterBind = false;
660 features->descriptorBindingStorageImageUpdateAfterBind = false;
661 features->descriptorBindingStorageBufferUpdateAfterBind = false;
662 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
663 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
664 features->descriptorBindingUpdateUnusedWhilePending = false;
665 features->descriptorBindingPartiallyBound = false;
666 features->descriptorBindingVariableDescriptorCount = false;
667 features->runtimeDescriptorArray = false;
668 break;
669 }
670 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
671 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
672 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
673 features->conditionalRendering = false;
674 features->inheritedConditionalRendering = false;
675 break;
676 }
677 default:
678 break;
679 }
680 }
681 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
682 }
683
684 void
685 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
686 VkPhysicalDeviceProperties *pProperties)
687 {
688 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
689 VkSampleCountFlags sample_counts = 0xf;
690
691 /* make sure that the entire descriptor set is addressable with a signed
692 * 32-bit int. So the sum of all limits scaled by descriptor size has to
693 * be at most 2 GiB. the combined image & samples object count as one of
694 * both. This limit is for the pipeline layout, not for the set layout, but
695 * there is no set limit, so we just set a pipeline limit. I don't think
696 * any app is going to hit this soon. */
697 size_t max_descriptor_set_size =
698 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
699 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
700 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
701 32 /* sampler, largest when combined with image */ +
702 64 /* sampled image */ + 64 /* storage image */);
703
704 VkPhysicalDeviceLimits limits = {
705 .maxImageDimension1D = (1 << 14),
706 .maxImageDimension2D = (1 << 14),
707 .maxImageDimension3D = (1 << 11),
708 .maxImageDimensionCube = (1 << 14),
709 .maxImageArrayLayers = (1 << 11),
710 .maxTexelBufferElements = 128 * 1024 * 1024,
711 .maxUniformBufferRange = UINT32_MAX,
712 .maxStorageBufferRange = UINT32_MAX,
713 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
714 .maxMemoryAllocationCount = UINT32_MAX,
715 .maxSamplerAllocationCount = 64 * 1024,
716 .bufferImageGranularity = 64, /* A cache line */
717 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
718 .maxBoundDescriptorSets = MAX_SETS,
719 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
720 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
721 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
722 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
723 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
724 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
725 .maxPerStageResources = max_descriptor_set_size,
726 .maxDescriptorSetSamplers = max_descriptor_set_size,
727 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
728 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
729 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
730 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
731 .maxDescriptorSetSampledImages = max_descriptor_set_size,
732 .maxDescriptorSetStorageImages = max_descriptor_set_size,
733 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
734 .maxVertexInputAttributes = 32,
735 .maxVertexInputBindings = 32,
736 .maxVertexInputAttributeOffset = 2047,
737 .maxVertexInputBindingStride = 2048,
738 .maxVertexOutputComponents = 128,
739 .maxTessellationGenerationLevel = 64,
740 .maxTessellationPatchSize = 32,
741 .maxTessellationControlPerVertexInputComponents = 128,
742 .maxTessellationControlPerVertexOutputComponents = 128,
743 .maxTessellationControlPerPatchOutputComponents = 120,
744 .maxTessellationControlTotalOutputComponents = 4096,
745 .maxTessellationEvaluationInputComponents = 128,
746 .maxTessellationEvaluationOutputComponents = 128,
747 .maxGeometryShaderInvocations = 127,
748 .maxGeometryInputComponents = 64,
749 .maxGeometryOutputComponents = 128,
750 .maxGeometryOutputVertices = 256,
751 .maxGeometryTotalOutputComponents = 1024,
752 .maxFragmentInputComponents = 128,
753 .maxFragmentOutputAttachments = 8,
754 .maxFragmentDualSrcAttachments = 1,
755 .maxFragmentCombinedOutputResources = 8,
756 .maxComputeSharedMemorySize = 32768,
757 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
758 .maxComputeWorkGroupInvocations = 2048,
759 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
760 .subPixelPrecisionBits = 4 /* FIXME */,
761 .subTexelPrecisionBits = 4 /* FIXME */,
762 .mipmapPrecisionBits = 4 /* FIXME */,
763 .maxDrawIndexedIndexValue = UINT32_MAX,
764 .maxDrawIndirectCount = UINT32_MAX,
765 .maxSamplerLodBias = 16,
766 .maxSamplerAnisotropy = 16,
767 .maxViewports = MAX_VIEWPORTS,
768 .maxViewportDimensions = { (1 << 14), (1 << 14) },
769 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
770 .viewportSubPixelBits = 8,
771 .minMemoryMapAlignment = 4096, /* A page */
772 .minTexelBufferOffsetAlignment = 1,
773 .minUniformBufferOffsetAlignment = 4,
774 .minStorageBufferOffsetAlignment = 4,
775 .minTexelOffset = -32,
776 .maxTexelOffset = 31,
777 .minTexelGatherOffset = -32,
778 .maxTexelGatherOffset = 31,
779 .minInterpolationOffset = -2,
780 .maxInterpolationOffset = 2,
781 .subPixelInterpolationOffsetBits = 8,
782 .maxFramebufferWidth = (1 << 14),
783 .maxFramebufferHeight = (1 << 14),
784 .maxFramebufferLayers = (1 << 10),
785 .framebufferColorSampleCounts = sample_counts,
786 .framebufferDepthSampleCounts = sample_counts,
787 .framebufferStencilSampleCounts = sample_counts,
788 .framebufferNoAttachmentsSampleCounts = sample_counts,
789 .maxColorAttachments = MAX_RTS,
790 .sampledImageColorSampleCounts = sample_counts,
791 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
792 .sampledImageDepthSampleCounts = sample_counts,
793 .sampledImageStencilSampleCounts = sample_counts,
794 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
795 .maxSampleMaskWords = 1,
796 .timestampComputeAndGraphics = true,
797 .timestampPeriod = 1,
798 .maxClipDistances = 8,
799 .maxCullDistances = 8,
800 .maxCombinedClipAndCullDistances = 8,
801 .discreteQueuePriorities = 1,
802 .pointSizeRange = { 0.125, 255.875 },
803 .lineWidthRange = { 0.0, 7.9921875 },
804 .pointSizeGranularity = (1.0 / 8.0),
805 .lineWidthGranularity = (1.0 / 128.0),
806 .strictLines = false, /* FINISHME */
807 .standardSampleLocations = true,
808 .optimalBufferCopyOffsetAlignment = 128,
809 .optimalBufferCopyRowPitchAlignment = 128,
810 .nonCoherentAtomSize = 64,
811 };
812
813 *pProperties = (VkPhysicalDeviceProperties){
814 .apiVersion = tu_physical_device_api_version(pdevice),
815 .driverVersion = vk_get_driver_version(),
816 .vendorID = 0, /* TODO */
817 .deviceID = 0,
818 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
819 .limits = limits,
820 .sparseProperties = { 0 },
821 };
822
823 strcpy(pProperties->deviceName, pdevice->name);
824 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
825 }
826
827 void
828 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
829 VkPhysicalDeviceProperties2KHR *pProperties)
830 {
831 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
832 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
833
834 vk_foreach_struct(ext, pProperties->pNext)
835 {
836 switch (ext->sType) {
837 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
838 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
839 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
840 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
841 break;
842 }
843 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
844 VkPhysicalDeviceIDPropertiesKHR *properties =
845 (VkPhysicalDeviceIDPropertiesKHR *)ext;
846 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
847 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
848 properties->deviceLUIDValid = false;
849 break;
850 }
851 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
852 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
853 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
854 properties->maxMultiviewViewCount = MAX_VIEWS;
855 properties->maxMultiviewInstanceIndex = INT_MAX;
856 break;
857 }
858 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
859 VkPhysicalDevicePointClippingPropertiesKHR *properties =
860 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
861 properties->pointClippingBehavior =
862 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
863 break;
864 }
865 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
866 VkPhysicalDeviceMaintenance3Properties *properties =
867 (VkPhysicalDeviceMaintenance3Properties *)ext;
868 /* Make sure everything is addressable by a signed 32-bit int, and
869 * our largest descriptors are 96 bytes. */
870 properties->maxPerSetDescriptors = (1ull << 31) / 96;
871 /* Our buffer size fields allow only this much */
872 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
873 break;
874 }
875 default:
876 break;
877 }
878 }
879 }
880
881 static const VkQueueFamilyProperties
882 tu_queue_family_properties = {
883 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
884 VK_QUEUE_COMPUTE_BIT |
885 VK_QUEUE_TRANSFER_BIT,
886 .queueCount = 1,
887 .timestampValidBits = 64,
888 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
889 };
890
891 void
892 tu_GetPhysicalDeviceQueueFamilyProperties(
893 VkPhysicalDevice physicalDevice,
894 uint32_t *pQueueFamilyPropertyCount,
895 VkQueueFamilyProperties *pQueueFamilyProperties)
896 {
897 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
898
899 vk_outarray_append(&out, p) {
900 *p = tu_queue_family_properties;
901 }
902 }
903
904 void
905 tu_GetPhysicalDeviceQueueFamilyProperties2(
906 VkPhysicalDevice physicalDevice,
907 uint32_t *pQueueFamilyPropertyCount,
908 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
909 {
910 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
911
912 vk_outarray_append(&out, p) {
913 p->queueFamilyProperties = tu_queue_family_properties;
914 }
915 }
916
917 static uint64_t
918 tu_get_system_heap_size()
919 {
920 struct sysinfo info;
921 sysinfo(&info);
922
923 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
924
925 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
926 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
927 */
928 uint64_t available_ram;
929 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
930 available_ram = total_ram / 2;
931 else
932 available_ram = total_ram * 3 / 4;
933
934 return available_ram;
935 }
936
937 void
938 tu_GetPhysicalDeviceMemoryProperties(
939 VkPhysicalDevice physicalDevice,
940 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
941 {
942 pMemoryProperties->memoryHeapCount = 1;
943 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
944 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
945
946 pMemoryProperties->memoryTypeCount = 1;
947 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
948 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
949 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
950 pMemoryProperties->memoryTypes[0].heapIndex = 0;
951 }
952
953 void
954 tu_GetPhysicalDeviceMemoryProperties2(
955 VkPhysicalDevice physicalDevice,
956 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
957 {
958 return tu_GetPhysicalDeviceMemoryProperties(
959 physicalDevice, &pMemoryProperties->memoryProperties);
960 }
961
962 static int
963 tu_queue_init(struct tu_device *device,
964 struct tu_queue *queue,
965 uint32_t queue_family_index,
966 int idx,
967 VkDeviceQueueCreateFlags flags)
968 {
969 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
970 queue->device = device;
971 queue->queue_family_index = queue_family_index;
972 queue->queue_idx = idx;
973 queue->flags = flags;
974
975 return VK_SUCCESS;
976 }
977
978 static void
979 tu_queue_finish(struct tu_queue *queue)
980 {
981 }
982
983 static int
984 tu_get_device_extension_index(const char *name)
985 {
986 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
987 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
988 return i;
989 }
990 return -1;
991 }
992
993 VkResult
994 tu_CreateDevice(VkPhysicalDevice physicalDevice,
995 const VkDeviceCreateInfo *pCreateInfo,
996 const VkAllocationCallbacks *pAllocator,
997 VkDevice *pDevice)
998 {
999 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1000 VkResult result;
1001 struct tu_device *device;
1002
1003 /* Check enabled features */
1004 if (pCreateInfo->pEnabledFeatures) {
1005 VkPhysicalDeviceFeatures supported_features;
1006 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1007 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1008 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1009 unsigned num_features =
1010 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1011 for (uint32_t i = 0; i < num_features; i++) {
1012 if (enabled_feature[i] && !supported_feature[i])
1013 return vk_error(physical_device->instance,
1014 VK_ERROR_FEATURE_NOT_PRESENT);
1015 }
1016 }
1017
1018 device = vk_zalloc2(&physical_device->instance->alloc,
1019 pAllocator,
1020 sizeof(*device),
1021 8,
1022 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1023 if (!device)
1024 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1025
1026 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1027 device->instance = physical_device->instance;
1028 device->physical_device = physical_device;
1029
1030 if (pAllocator)
1031 device->alloc = *pAllocator;
1032 else
1033 device->alloc = physical_device->instance->alloc;
1034
1035 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1036 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1037 int index = tu_get_device_extension_index(ext_name);
1038 if (index < 0 ||
1039 !physical_device->supported_extensions.extensions[index]) {
1040 vk_free(&device->alloc, device);
1041 return vk_error(physical_device->instance,
1042 VK_ERROR_EXTENSION_NOT_PRESENT);
1043 }
1044
1045 device->enabled_extensions.extensions[index] = true;
1046 }
1047
1048 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1049 const VkDeviceQueueCreateInfo *queue_create =
1050 &pCreateInfo->pQueueCreateInfos[i];
1051 uint32_t qfi = queue_create->queueFamilyIndex;
1052 device->queues[qfi] =
1053 vk_alloc(&device->alloc,
1054 queue_create->queueCount * sizeof(struct tu_queue),
1055 8,
1056 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1057 if (!device->queues[qfi]) {
1058 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1059 goto fail;
1060 }
1061
1062 memset(device->queues[qfi],
1063 0,
1064 queue_create->queueCount * sizeof(struct tu_queue));
1065
1066 device->queue_count[qfi] = queue_create->queueCount;
1067
1068 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1069 result = tu_queue_init(
1070 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
1071 if (result != VK_SUCCESS)
1072 goto fail;
1073 }
1074 }
1075
1076 VkPipelineCacheCreateInfo ci;
1077 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1078 ci.pNext = NULL;
1079 ci.flags = 0;
1080 ci.pInitialData = NULL;
1081 ci.initialDataSize = 0;
1082 VkPipelineCache pc;
1083 result =
1084 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1085 if (result != VK_SUCCESS)
1086 goto fail;
1087
1088 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1089
1090 *pDevice = tu_device_to_handle(device);
1091 return VK_SUCCESS;
1092
1093 fail:
1094 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1095 for (unsigned q = 0; q < device->queue_count[i]; q++)
1096 tu_queue_finish(&device->queues[i][q]);
1097 if (device->queue_count[i])
1098 vk_free(&device->alloc, device->queues[i]);
1099 }
1100
1101 vk_free(&device->alloc, device);
1102 return result;
1103 }
1104
1105 void
1106 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1107 {
1108 TU_FROM_HANDLE(tu_device, device, _device);
1109
1110 if (!device)
1111 return;
1112
1113 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1114 for (unsigned q = 0; q < device->queue_count[i]; q++)
1115 tu_queue_finish(&device->queues[i][q]);
1116 if (device->queue_count[i])
1117 vk_free(&device->alloc, device->queues[i]);
1118 }
1119
1120 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1121 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1122
1123 vk_free(&device->alloc, device);
1124 }
1125
1126 VkResult
1127 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1128 VkLayerProperties *pProperties)
1129 {
1130 *pPropertyCount = 0;
1131 return VK_SUCCESS;
1132 }
1133
1134 VkResult
1135 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1136 uint32_t *pPropertyCount,
1137 VkLayerProperties *pProperties)
1138 {
1139 *pPropertyCount = 0;
1140 return VK_SUCCESS;
1141 }
1142
1143 void
1144 tu_GetDeviceQueue2(VkDevice _device,
1145 const VkDeviceQueueInfo2 *pQueueInfo,
1146 VkQueue *pQueue)
1147 {
1148 TU_FROM_HANDLE(tu_device, device, _device);
1149 struct tu_queue *queue;
1150
1151 queue =
1152 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1153 if (pQueueInfo->flags != queue->flags) {
1154 /* From the Vulkan 1.1.70 spec:
1155 *
1156 * "The queue returned by vkGetDeviceQueue2 must have the same
1157 * flags value from this structure as that used at device
1158 * creation time in a VkDeviceQueueCreateInfo instance. If no
1159 * matching flags were specified at device creation time then
1160 * pQueue will return VK_NULL_HANDLE."
1161 */
1162 *pQueue = VK_NULL_HANDLE;
1163 return;
1164 }
1165
1166 *pQueue = tu_queue_to_handle(queue);
1167 }
1168
1169 void
1170 tu_GetDeviceQueue(VkDevice _device,
1171 uint32_t queueFamilyIndex,
1172 uint32_t queueIndex,
1173 VkQueue *pQueue)
1174 {
1175 const VkDeviceQueueInfo2 info =
1176 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1177 .queueFamilyIndex = queueFamilyIndex,
1178 .queueIndex = queueIndex };
1179
1180 tu_GetDeviceQueue2(_device, &info, pQueue);
1181 }
1182
1183 VkResult
1184 tu_QueueSubmit(VkQueue _queue,
1185 uint32_t submitCount,
1186 const VkSubmitInfo *pSubmits,
1187 VkFence _fence)
1188 {
1189 return VK_SUCCESS;
1190 }
1191
1192 VkResult
1193 tu_QueueWaitIdle(VkQueue _queue)
1194 {
1195 return VK_SUCCESS;
1196 }
1197
1198 VkResult
1199 tu_DeviceWaitIdle(VkDevice _device)
1200 {
1201 TU_FROM_HANDLE(tu_device, device, _device);
1202
1203 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1204 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1205 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1206 }
1207 }
1208 return VK_SUCCESS;
1209 }
1210
1211 VkResult
1212 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1213 uint32_t *pPropertyCount,
1214 VkExtensionProperties *pProperties)
1215 {
1216 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1217
1218 /* We spport no lyaers */
1219 if (pLayerName)
1220 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1221
1222 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1223 if (tu_supported_instance_extensions.extensions[i]) {
1224 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1225 }
1226 }
1227
1228 return vk_outarray_status(&out);
1229 }
1230
1231 VkResult
1232 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1233 const char *pLayerName,
1234 uint32_t *pPropertyCount,
1235 VkExtensionProperties *pProperties)
1236 {
1237 /* We spport no lyaers */
1238 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1239 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1240
1241 /* We spport no lyaers */
1242 if (pLayerName)
1243 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1244
1245 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1246 if (device->supported_extensions.extensions[i]) {
1247 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1248 }
1249 }
1250
1251 return vk_outarray_status(&out);
1252 }
1253
1254 PFN_vkVoidFunction
1255 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1256 {
1257 TU_FROM_HANDLE(tu_instance, instance, _instance);
1258
1259 return tu_lookup_entrypoint_checked(pName,
1260 instance ? instance->api_version : 0,
1261 instance ? &instance->enabled_extensions
1262 : NULL,
1263 NULL);
1264 }
1265
1266 /* The loader wants us to expose a second GetInstanceProcAddr function
1267 * to work around certain LD_PRELOAD issues seen in apps.
1268 */
1269 PUBLIC
1270 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1271 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1272
1273 PUBLIC
1274 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1275 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1276 {
1277 return tu_GetInstanceProcAddr(instance, pName);
1278 }
1279
1280 PFN_vkVoidFunction
1281 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1282 {
1283 TU_FROM_HANDLE(tu_device, device, _device);
1284
1285 return tu_lookup_entrypoint_checked(pName,
1286 device->instance->api_version,
1287 &device->instance->enabled_extensions,
1288 &device->enabled_extensions);
1289 }
1290
1291 static VkResult
1292 tu_alloc_memory(struct tu_device *device,
1293 const VkMemoryAllocateInfo *pAllocateInfo,
1294 const VkAllocationCallbacks *pAllocator,
1295 VkDeviceMemory *pMem)
1296 {
1297 struct tu_device_memory *mem;
1298 VkResult result;
1299
1300 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1301
1302 if (pAllocateInfo->allocationSize == 0) {
1303 /* Apparently, this is allowed */
1304 *pMem = VK_NULL_HANDLE;
1305 return VK_SUCCESS;
1306 }
1307
1308 mem = vk_alloc2(&device->alloc,
1309 pAllocator,
1310 sizeof(*mem),
1311 8,
1312 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1313 if (mem == NULL)
1314 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1315
1316 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1317 if (result != VK_SUCCESS) {
1318 vk_free2(&device->alloc, pAllocator, mem);
1319 return result;
1320 }
1321
1322 mem->size = pAllocateInfo->allocationSize;
1323 mem->type_index = pAllocateInfo->memoryTypeIndex;
1324
1325 mem->map = NULL;
1326 mem->user_ptr = NULL;
1327
1328 *pMem = tu_device_memory_to_handle(mem);
1329
1330 return VK_SUCCESS;
1331 }
1332
1333 VkResult
1334 tu_AllocateMemory(VkDevice _device,
1335 const VkMemoryAllocateInfo *pAllocateInfo,
1336 const VkAllocationCallbacks *pAllocator,
1337 VkDeviceMemory *pMem)
1338 {
1339 TU_FROM_HANDLE(tu_device, device, _device);
1340 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1341 }
1342
1343 void
1344 tu_FreeMemory(VkDevice _device,
1345 VkDeviceMemory _mem,
1346 const VkAllocationCallbacks *pAllocator)
1347 {
1348 TU_FROM_HANDLE(tu_device, device, _device);
1349 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1350
1351 if (mem == NULL)
1352 return;
1353
1354 tu_bo_finish(device, &mem->bo);
1355 vk_free2(&device->alloc, pAllocator, mem);
1356 }
1357
1358 VkResult
1359 tu_MapMemory(VkDevice _device,
1360 VkDeviceMemory _memory,
1361 VkDeviceSize offset,
1362 VkDeviceSize size,
1363 VkMemoryMapFlags flags,
1364 void **ppData)
1365 {
1366 TU_FROM_HANDLE(tu_device, device, _device);
1367 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1368 VkResult result;
1369
1370 if (mem == NULL) {
1371 *ppData = NULL;
1372 return VK_SUCCESS;
1373 }
1374
1375 if (mem->user_ptr) {
1376 *ppData = mem->user_ptr;
1377 } else if (!mem->map){
1378 result = tu_bo_map(device, &mem->bo);
1379 if (result != VK_SUCCESS)
1380 return result;
1381 *ppData = mem->map = mem->bo.map;
1382 } else
1383 *ppData = mem->map;
1384
1385 if (*ppData) {
1386 *ppData += offset;
1387 return VK_SUCCESS;
1388 }
1389
1390 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1391 }
1392
1393 void
1394 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1395 {
1396 /* I do not see any unmapping done by the freedreno Gallium driver. */
1397 }
1398
1399 VkResult
1400 tu_FlushMappedMemoryRanges(VkDevice _device,
1401 uint32_t memoryRangeCount,
1402 const VkMappedMemoryRange *pMemoryRanges)
1403 {
1404 return VK_SUCCESS;
1405 }
1406
1407 VkResult
1408 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1409 uint32_t memoryRangeCount,
1410 const VkMappedMemoryRange *pMemoryRanges)
1411 {
1412 return VK_SUCCESS;
1413 }
1414
1415 void
1416 tu_GetBufferMemoryRequirements(VkDevice _device,
1417 VkBuffer _buffer,
1418 VkMemoryRequirements *pMemoryRequirements)
1419 {
1420 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1421
1422 pMemoryRequirements->memoryTypeBits = 1;
1423 pMemoryRequirements->alignment = 16;
1424 pMemoryRequirements->size =
1425 align64(buffer->size, pMemoryRequirements->alignment);
1426 }
1427
1428 void
1429 tu_GetBufferMemoryRequirements2(
1430 VkDevice device,
1431 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1432 VkMemoryRequirements2KHR *pMemoryRequirements)
1433 {
1434 tu_GetBufferMemoryRequirements(
1435 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1436 }
1437
1438 void
1439 tu_GetImageMemoryRequirements(VkDevice _device,
1440 VkImage _image,
1441 VkMemoryRequirements *pMemoryRequirements)
1442 {
1443 TU_FROM_HANDLE(tu_image, image, _image);
1444
1445 pMemoryRequirements->memoryTypeBits = 1;
1446 pMemoryRequirements->size = image->size;
1447 pMemoryRequirements->alignment = image->alignment;
1448 }
1449
1450 void
1451 tu_GetImageMemoryRequirements2(VkDevice device,
1452 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1453 VkMemoryRequirements2KHR *pMemoryRequirements)
1454 {
1455 tu_GetImageMemoryRequirements(
1456 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1457 }
1458
1459 void
1460 tu_GetImageSparseMemoryRequirements(
1461 VkDevice device,
1462 VkImage image,
1463 uint32_t *pSparseMemoryRequirementCount,
1464 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1465 {
1466 tu_stub();
1467 }
1468
1469 void
1470 tu_GetImageSparseMemoryRequirements2(
1471 VkDevice device,
1472 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1473 uint32_t *pSparseMemoryRequirementCount,
1474 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1475 {
1476 tu_stub();
1477 }
1478
1479 void
1480 tu_GetDeviceMemoryCommitment(VkDevice device,
1481 VkDeviceMemory memory,
1482 VkDeviceSize *pCommittedMemoryInBytes)
1483 {
1484 *pCommittedMemoryInBytes = 0;
1485 }
1486
1487 VkResult
1488 tu_BindBufferMemory2(VkDevice device,
1489 uint32_t bindInfoCount,
1490 const VkBindBufferMemoryInfoKHR *pBindInfos)
1491 {
1492 return VK_SUCCESS;
1493 }
1494
1495 VkResult
1496 tu_BindBufferMemory(VkDevice device,
1497 VkBuffer buffer,
1498 VkDeviceMemory memory,
1499 VkDeviceSize memoryOffset)
1500 {
1501 const VkBindBufferMemoryInfoKHR info = {
1502 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1503 .buffer = buffer,
1504 .memory = memory,
1505 .memoryOffset = memoryOffset
1506 };
1507
1508 return tu_BindBufferMemory2(device, 1, &info);
1509 }
1510
1511 VkResult
1512 tu_BindImageMemory2(VkDevice device,
1513 uint32_t bindInfoCount,
1514 const VkBindImageMemoryInfoKHR *pBindInfos)
1515 {
1516 return VK_SUCCESS;
1517 }
1518
1519 VkResult
1520 tu_BindImageMemory(VkDevice device,
1521 VkImage image,
1522 VkDeviceMemory memory,
1523 VkDeviceSize memoryOffset)
1524 {
1525 const VkBindImageMemoryInfoKHR info = {
1526 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1527 .image = image,
1528 .memory = memory,
1529 .memoryOffset = memoryOffset
1530 };
1531
1532 return tu_BindImageMemory2(device, 1, &info);
1533 }
1534
1535 VkResult
1536 tu_QueueBindSparse(VkQueue _queue,
1537 uint32_t bindInfoCount,
1538 const VkBindSparseInfo *pBindInfo,
1539 VkFence _fence)
1540 {
1541 return VK_SUCCESS;
1542 }
1543
1544 VkResult
1545 tu_CreateFence(VkDevice _device,
1546 const VkFenceCreateInfo *pCreateInfo,
1547 const VkAllocationCallbacks *pAllocator,
1548 VkFence *pFence)
1549 {
1550 TU_FROM_HANDLE(tu_device, device, _device);
1551
1552 struct tu_fence *fence = vk_alloc2(&device->alloc,
1553 pAllocator,
1554 sizeof(*fence),
1555 8,
1556 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1557
1558 if (!fence)
1559 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1560
1561 *pFence = tu_fence_to_handle(fence);
1562
1563 return VK_SUCCESS;
1564 }
1565
1566 void
1567 tu_DestroyFence(VkDevice _device,
1568 VkFence _fence,
1569 const VkAllocationCallbacks *pAllocator)
1570 {
1571 TU_FROM_HANDLE(tu_device, device, _device);
1572 TU_FROM_HANDLE(tu_fence, fence, _fence);
1573
1574 if (!fence)
1575 return;
1576
1577 vk_free2(&device->alloc, pAllocator, fence);
1578 }
1579
1580 VkResult
1581 tu_WaitForFences(VkDevice _device,
1582 uint32_t fenceCount,
1583 const VkFence *pFences,
1584 VkBool32 waitAll,
1585 uint64_t timeout)
1586 {
1587 return VK_SUCCESS;
1588 }
1589
1590 VkResult
1591 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1592 {
1593 return VK_SUCCESS;
1594 }
1595
1596 VkResult
1597 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1598 {
1599 return VK_SUCCESS;
1600 }
1601
1602 // Queue semaphore functions
1603
1604 VkResult
1605 tu_CreateSemaphore(VkDevice _device,
1606 const VkSemaphoreCreateInfo *pCreateInfo,
1607 const VkAllocationCallbacks *pAllocator,
1608 VkSemaphore *pSemaphore)
1609 {
1610 TU_FROM_HANDLE(tu_device, device, _device);
1611
1612 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1613 pAllocator,
1614 sizeof(*sem),
1615 8,
1616 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1617 if (!sem)
1618 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1619
1620 *pSemaphore = tu_semaphore_to_handle(sem);
1621 return VK_SUCCESS;
1622 }
1623
1624 void
1625 tu_DestroySemaphore(VkDevice _device,
1626 VkSemaphore _semaphore,
1627 const VkAllocationCallbacks *pAllocator)
1628 {
1629 TU_FROM_HANDLE(tu_device, device, _device);
1630 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1631 if (!_semaphore)
1632 return;
1633
1634 vk_free2(&device->alloc, pAllocator, sem);
1635 }
1636
1637 VkResult
1638 tu_CreateEvent(VkDevice _device,
1639 const VkEventCreateInfo *pCreateInfo,
1640 const VkAllocationCallbacks *pAllocator,
1641 VkEvent *pEvent)
1642 {
1643 TU_FROM_HANDLE(tu_device, device, _device);
1644 struct tu_event *event = vk_alloc2(&device->alloc,
1645 pAllocator,
1646 sizeof(*event),
1647 8,
1648 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1649
1650 if (!event)
1651 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1652
1653 *pEvent = tu_event_to_handle(event);
1654
1655 return VK_SUCCESS;
1656 }
1657
1658 void
1659 tu_DestroyEvent(VkDevice _device,
1660 VkEvent _event,
1661 const VkAllocationCallbacks *pAllocator)
1662 {
1663 TU_FROM_HANDLE(tu_device, device, _device);
1664 TU_FROM_HANDLE(tu_event, event, _event);
1665
1666 if (!event)
1667 return;
1668 vk_free2(&device->alloc, pAllocator, event);
1669 }
1670
1671 VkResult
1672 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1673 {
1674 TU_FROM_HANDLE(tu_event, event, _event);
1675
1676 if (*event->map == 1)
1677 return VK_EVENT_SET;
1678 return VK_EVENT_RESET;
1679 }
1680
1681 VkResult
1682 tu_SetEvent(VkDevice _device, VkEvent _event)
1683 {
1684 TU_FROM_HANDLE(tu_event, event, _event);
1685 *event->map = 1;
1686
1687 return VK_SUCCESS;
1688 }
1689
1690 VkResult
1691 tu_ResetEvent(VkDevice _device, VkEvent _event)
1692 {
1693 TU_FROM_HANDLE(tu_event, event, _event);
1694 *event->map = 0;
1695
1696 return VK_SUCCESS;
1697 }
1698
1699 VkResult
1700 tu_CreateBuffer(VkDevice _device,
1701 const VkBufferCreateInfo *pCreateInfo,
1702 const VkAllocationCallbacks *pAllocator,
1703 VkBuffer *pBuffer)
1704 {
1705 TU_FROM_HANDLE(tu_device, device, _device);
1706 struct tu_buffer *buffer;
1707
1708 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1709
1710 buffer = vk_alloc2(&device->alloc,
1711 pAllocator,
1712 sizeof(*buffer),
1713 8,
1714 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1715 if (buffer == NULL)
1716 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1717
1718 buffer->size = pCreateInfo->size;
1719 buffer->usage = pCreateInfo->usage;
1720 buffer->flags = pCreateInfo->flags;
1721
1722 *pBuffer = tu_buffer_to_handle(buffer);
1723
1724 return VK_SUCCESS;
1725 }
1726
1727 void
1728 tu_DestroyBuffer(VkDevice _device,
1729 VkBuffer _buffer,
1730 const VkAllocationCallbacks *pAllocator)
1731 {
1732 TU_FROM_HANDLE(tu_device, device, _device);
1733 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1734
1735 if (!buffer)
1736 return;
1737
1738 vk_free2(&device->alloc, pAllocator, buffer);
1739 }
1740
1741 static uint32_t
1742 tu_surface_max_layer_count(struct tu_image_view *iview)
1743 {
1744 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1745 ? iview->extent.depth
1746 : (iview->base_layer + iview->layer_count);
1747 }
1748
1749 VkResult
1750 tu_CreateFramebuffer(VkDevice _device,
1751 const VkFramebufferCreateInfo *pCreateInfo,
1752 const VkAllocationCallbacks *pAllocator,
1753 VkFramebuffer *pFramebuffer)
1754 {
1755 TU_FROM_HANDLE(tu_device, device, _device);
1756 struct tu_framebuffer *framebuffer;
1757
1758 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1759
1760 size_t size =
1761 sizeof(*framebuffer) +
1762 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1763 framebuffer = vk_alloc2(
1764 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1765 if (framebuffer == NULL)
1766 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1767
1768 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1769 framebuffer->width = pCreateInfo->width;
1770 framebuffer->height = pCreateInfo->height;
1771 framebuffer->layers = pCreateInfo->layers;
1772 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1773 VkImageView _iview = pCreateInfo->pAttachments[i];
1774 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1775 framebuffer->attachments[i].attachment = iview;
1776
1777 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1778 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1779 framebuffer->layers =
1780 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1781 }
1782
1783 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1784 return VK_SUCCESS;
1785 }
1786
1787 void
1788 tu_DestroyFramebuffer(VkDevice _device,
1789 VkFramebuffer _fb,
1790 const VkAllocationCallbacks *pAllocator)
1791 {
1792 TU_FROM_HANDLE(tu_device, device, _device);
1793 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1794
1795 if (!fb)
1796 return;
1797 vk_free2(&device->alloc, pAllocator, fb);
1798 }
1799
1800 static void
1801 tu_init_sampler(struct tu_device *device,
1802 struct tu_sampler *sampler,
1803 const VkSamplerCreateInfo *pCreateInfo)
1804 {
1805 }
1806
1807 VkResult
1808 tu_CreateSampler(VkDevice _device,
1809 const VkSamplerCreateInfo *pCreateInfo,
1810 const VkAllocationCallbacks *pAllocator,
1811 VkSampler *pSampler)
1812 {
1813 TU_FROM_HANDLE(tu_device, device, _device);
1814 struct tu_sampler *sampler;
1815
1816 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1817
1818 sampler = vk_alloc2(&device->alloc,
1819 pAllocator,
1820 sizeof(*sampler),
1821 8,
1822 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1823 if (!sampler)
1824 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1825
1826 tu_init_sampler(device, sampler, pCreateInfo);
1827 *pSampler = tu_sampler_to_handle(sampler);
1828
1829 return VK_SUCCESS;
1830 }
1831
1832 void
1833 tu_DestroySampler(VkDevice _device,
1834 VkSampler _sampler,
1835 const VkAllocationCallbacks *pAllocator)
1836 {
1837 TU_FROM_HANDLE(tu_device, device, _device);
1838 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1839
1840 if (!sampler)
1841 return;
1842 vk_free2(&device->alloc, pAllocator, sampler);
1843 }
1844
1845 /* vk_icd.h does not declare this function, so we declare it here to
1846 * suppress Wmissing-prototypes.
1847 */
1848 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1849 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1850
1851 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1852 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1853 {
1854 /* For the full details on loader interface versioning, see
1855 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1856 * What follows is a condensed summary, to help you navigate the large and
1857 * confusing official doc.
1858 *
1859 * - Loader interface v0 is incompatible with later versions. We don't
1860 * support it.
1861 *
1862 * - In loader interface v1:
1863 * - The first ICD entrypoint called by the loader is
1864 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1865 * entrypoint.
1866 * - The ICD must statically expose no other Vulkan symbol unless it is
1867 * linked with -Bsymbolic.
1868 * - Each dispatchable Vulkan handle created by the ICD must be
1869 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1870 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1871 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1872 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1873 * such loader-managed surfaces.
1874 *
1875 * - Loader interface v2 differs from v1 in:
1876 * - The first ICD entrypoint called by the loader is
1877 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1878 * statically expose this entrypoint.
1879 *
1880 * - Loader interface v3 differs from v2 in:
1881 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1882 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1883 * because the loader no longer does so.
1884 */
1885 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1886 return VK_SUCCESS;
1887 }
1888
1889 void
1890 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1891 VkPhysicalDevice physicalDevice,
1892 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1893 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1894 {
1895 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1896 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1897 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1898 }
1899
1900 void
1901 tu_GetPhysicalDeviceExternalFenceProperties(
1902 VkPhysicalDevice physicalDevice,
1903 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1904 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1905 {
1906 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1907 pExternalFenceProperties->compatibleHandleTypes = 0;
1908 pExternalFenceProperties->externalFenceFeatures = 0;
1909 }
1910
1911 VkResult
1912 tu_CreateDebugReportCallbackEXT(
1913 VkInstance _instance,
1914 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1915 const VkAllocationCallbacks *pAllocator,
1916 VkDebugReportCallbackEXT *pCallback)
1917 {
1918 TU_FROM_HANDLE(tu_instance, instance, _instance);
1919 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1920 pCreateInfo,
1921 pAllocator,
1922 &instance->alloc,
1923 pCallback);
1924 }
1925
1926 void
1927 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1928 VkDebugReportCallbackEXT _callback,
1929 const VkAllocationCallbacks *pAllocator)
1930 {
1931 TU_FROM_HANDLE(tu_instance, instance, _instance);
1932 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1933 _callback,
1934 pAllocator,
1935 &instance->alloc);
1936 }
1937
1938 void
1939 tu_DebugReportMessageEXT(VkInstance _instance,
1940 VkDebugReportFlagsEXT flags,
1941 VkDebugReportObjectTypeEXT objectType,
1942 uint64_t object,
1943 size_t location,
1944 int32_t messageCode,
1945 const char *pLayerPrefix,
1946 const char *pMessage)
1947 {
1948 TU_FROM_HANDLE(tu_instance, instance, _instance);
1949 vk_debug_report(&instance->debug_report_callbacks,
1950 flags,
1951 objectType,
1952 object,
1953 location,
1954 messageCode,
1955 pLayerPrefix,
1956 pMessage);
1957 }
1958
1959 void
1960 tu_GetDeviceGroupPeerMemoryFeatures(
1961 VkDevice device,
1962 uint32_t heapIndex,
1963 uint32_t localDeviceIndex,
1964 uint32_t remoteDeviceIndex,
1965 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1966 {
1967 assert(localDeviceIndex == remoteDeviceIndex);
1968
1969 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1970 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1971 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1972 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1973 }