turnip: Fix bo allocation after we stopped using libdrm_freedreno ...
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/sysinfo.h>
39 #include <unistd.h>
40 #include <xf86drm.h>
41 #include <msm_drm.h>
42
43 static int
44 tu_device_get_cache_uuid(uint16_t family, void *uuid)
45 {
46 uint32_t mesa_timestamp;
47 uint16_t f = family;
48 memset(uuid, 0, VK_UUID_SIZE);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
50 &mesa_timestamp))
51 return -1;
52
53 memcpy(uuid, &mesa_timestamp, 4);
54 memcpy((char *)uuid + 4, &f, 2);
55 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
56 return 0;
57 }
58
59 static void
60 tu_get_driver_uuid(void *uuid)
61 {
62 memset(uuid, 0, VK_UUID_SIZE);
63 }
64
65 static void
66 tu_get_device_uuid(void *uuid)
67 {
68 tu_use_args(uuid);
69 tu_stub();
70 }
71
72 VkResult
73 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
74 {
75 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
76 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
77 */
78 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
79 if (!gem_handle)
80 goto fail_new;
81
82 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
83 * want immediate backing pages because vkAllocateMemory and friends must
84 * not lazily fail.
85 *
86 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
87 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
88 * maybe I misunderstand.
89 */
90
91 /* TODO: Do we need 'offset' if we have 'iova'? */
92 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
93 if (!offset)
94 goto fail_info;
95
96 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
97 if (!iova)
98 goto fail_info;
99
100 *bo = (struct tu_bo) {
101 .gem_handle = gem_handle,
102 .size = size,
103 .offset = offset,
104 .iova = iova,
105 };
106
107 return VK_SUCCESS;
108
109 fail_info:
110 tu_gem_close(dev, bo->gem_handle);
111 fail_new:
112 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113 }
114
115 VkResult
116 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
117 {
118 if (bo->map)
119 return VK_SUCCESS;
120
121 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
122 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
123 dev->physical_device->local_fd, bo->offset);
124 if (map == MAP_FAILED)
125 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
126
127 return VK_SUCCESS;
128 }
129
130 void
131 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
132 {
133 assert(bo->gem_handle);
134
135 if (bo->map)
136 munmap(bo->map, bo->size);
137
138 tu_gem_close(dev, bo->gem_handle);
139 }
140
141 static VkResult
142 tu_physical_device_init(struct tu_physical_device *device,
143 struct tu_instance *instance,
144 drmDevicePtr drm_device)
145 {
146 const char *path = drm_device->nodes[DRM_NODE_RENDER];
147 VkResult result = VK_SUCCESS;
148 drmVersionPtr version;
149 int fd;
150 int master_fd = -1;
151 struct fd_pipe *tmp_pipe = NULL;
152 uint64_t val;
153
154 fd = open(path, O_RDWR | O_CLOEXEC);
155 if (fd < 0) {
156 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
157 "failed to open device %s", path);
158 }
159
160 /* Version 1.3 added MSM_INFO_IOVA. */
161 const int min_version_major = 1;
162 const int min_version_minor = 3;
163
164 version = drmGetVersion(fd);
165 if (!version) {
166 close(fd);
167 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
168 "failed to query kernel driver version for device %s",
169 path);
170 }
171
172 if (strcmp(version->name, "msm")) {
173 drmFreeVersion(version);
174 if (master_fd != -1)
175 close(master_fd);
176 close(fd);
177 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
178 "device %s does not use the msm kernel driver", path);
179 }
180
181 if (version->version_major != 1 || version->version_minor < 3) {
182 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
183 "kernel driver for device %s has version %d.%d, "
184 "but Vulkan requires version >= %d.%d",
185 path,
186 version->version_major, version->version_minor,
187 min_version_major, min_version_minor);
188 drmFreeVersion(version);
189 close(fd);
190 return result;
191 }
192
193 drmFreeVersion(version);
194
195 if (instance->debug_flags & TU_DEBUG_STARTUP)
196 tu_logi("Found compatible device '%s'.", path);
197
198 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
199 device->instance = instance;
200 assert(strlen(path) < ARRAY_SIZE(device->path));
201 strncpy(device->path, path, ARRAY_SIZE(device->path));
202
203 if (instance->enabled_extensions.KHR_display) {
204 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
205 if (master_fd >= 0) {
206 /* TODO: free master_fd is accel is not working? */
207 }
208 }
209
210 device->master_fd = master_fd;
211 device->local_fd = fd;
212
213 device->drm_device = fd_device_new_dup(fd);
214 if (!device->drm_device) {
215 result = vk_errorf(
216 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
217 goto fail;
218 }
219
220 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
221 if (!tmp_pipe) {
222 result = vk_errorf(
223 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
224 goto fail;
225 }
226
227 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
228 result = vk_errorf(
229 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
230 goto fail;
231 }
232 device->gpu_id = val;
233
234 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
235 result = vk_errorf(
236 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
237 goto fail;
238 }
239 device->gmem_size = val;
240
241 fd_pipe_del(tmp_pipe);
242 tmp_pipe = NULL;
243
244 memset(device->name, 0, sizeof(device->name));
245 sprintf(device->name, "FD%d", device->gpu_id);
246
247 switch(device->gpu_id) {
248 case 530:
249 case 630:
250 break;
251 default:
252 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
253 "device %s is unsupported", device->name);
254 goto fail;
255 }
256 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
257 result = vk_errorf(
258 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
259 goto fail;
260 }
261
262 /* The gpu id is already embedded in the uuid so we just pass "tu"
263 * when creating the cache.
264 */
265 char buf[VK_UUID_SIZE * 2 + 1];
266 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
267 device->disk_cache = disk_cache_create(device->name, buf, 0);
268
269 fprintf(stderr,
270 "WARNING: tu is not a conformant vulkan implementation, "
271 "testing use only.\n");
272
273 tu_get_driver_uuid(&device->device_uuid);
274 tu_get_device_uuid(&device->device_uuid);
275
276 tu_fill_device_extension_table(device, &device->supported_extensions);
277
278 if (result != VK_SUCCESS) {
279 vk_error(instance, result);
280 goto fail;
281 }
282
283 return VK_SUCCESS;
284
285 fail:
286 if (tmp_pipe)
287 fd_pipe_del(tmp_pipe);
288 if (device->drm_device)
289 fd_device_del(device->drm_device);
290 close(fd);
291 if (master_fd != -1)
292 close(master_fd);
293 return result;
294 }
295
296 static void
297 tu_physical_device_finish(struct tu_physical_device *device)
298 {
299 disk_cache_destroy(device->disk_cache);
300 close(device->local_fd);
301 if (device->master_fd != -1)
302 close(device->master_fd);
303 }
304
305 static void *
306 default_alloc_func(void *pUserData,
307 size_t size,
308 size_t align,
309 VkSystemAllocationScope allocationScope)
310 {
311 return malloc(size);
312 }
313
314 static void *
315 default_realloc_func(void *pUserData,
316 void *pOriginal,
317 size_t size,
318 size_t align,
319 VkSystemAllocationScope allocationScope)
320 {
321 return realloc(pOriginal, size);
322 }
323
324 static void
325 default_free_func(void *pUserData, void *pMemory)
326 {
327 free(pMemory);
328 }
329
330 static const VkAllocationCallbacks default_alloc = {
331 .pUserData = NULL,
332 .pfnAllocation = default_alloc_func,
333 .pfnReallocation = default_realloc_func,
334 .pfnFree = default_free_func,
335 };
336
337 static const struct debug_control tu_debug_options[] = { { "startup",
338 TU_DEBUG_STARTUP },
339 { NULL, 0 } };
340
341 const char *
342 tu_get_debug_option_name(int id)
343 {
344 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
345 return tu_debug_options[id].string;
346 }
347
348 static int
349 tu_get_instance_extension_index(const char *name)
350 {
351 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
352 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
353 return i;
354 }
355 return -1;
356 }
357
358 VkResult
359 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
360 const VkAllocationCallbacks *pAllocator,
361 VkInstance *pInstance)
362 {
363 struct tu_instance *instance;
364 VkResult result;
365
366 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
367
368 uint32_t client_version;
369 if (pCreateInfo->pApplicationInfo &&
370 pCreateInfo->pApplicationInfo->apiVersion != 0) {
371 client_version = pCreateInfo->pApplicationInfo->apiVersion;
372 } else {
373 tu_EnumerateInstanceVersion(&client_version);
374 }
375
376 instance = vk_zalloc2(&default_alloc,
377 pAllocator,
378 sizeof(*instance),
379 8,
380 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
381 if (!instance)
382 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
383
384 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
385
386 if (pAllocator)
387 instance->alloc = *pAllocator;
388 else
389 instance->alloc = default_alloc;
390
391 instance->api_version = client_version;
392 instance->physical_device_count = -1;
393
394 instance->debug_flags =
395 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
396
397 if (instance->debug_flags & TU_DEBUG_STARTUP)
398 tu_logi("Created an instance");
399
400 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
401 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
402 int index = tu_get_instance_extension_index(ext_name);
403
404 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
405 vk_free2(&default_alloc, pAllocator, instance);
406 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
407 }
408
409 instance->enabled_extensions.extensions[index] = true;
410 }
411
412 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
413 if (result != VK_SUCCESS) {
414 vk_free2(&default_alloc, pAllocator, instance);
415 return vk_error(instance, result);
416 }
417
418 _mesa_locale_init();
419
420 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
421
422 *pInstance = tu_instance_to_handle(instance);
423
424 return VK_SUCCESS;
425 }
426
427 void
428 tu_DestroyInstance(VkInstance _instance,
429 const VkAllocationCallbacks *pAllocator)
430 {
431 TU_FROM_HANDLE(tu_instance, instance, _instance);
432
433 if (!instance)
434 return;
435
436 for (int i = 0; i < instance->physical_device_count; ++i) {
437 tu_physical_device_finish(instance->physical_devices + i);
438 }
439
440 VG(VALGRIND_DESTROY_MEMPOOL(instance));
441
442 _mesa_locale_fini();
443
444 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
445
446 vk_free(&instance->alloc, instance);
447 }
448
449 static VkResult
450 tu_enumerate_devices(struct tu_instance *instance)
451 {
452 /* TODO: Check for more devices ? */
453 drmDevicePtr devices[8];
454 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
455 int max_devices;
456
457 instance->physical_device_count = 0;
458
459 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
460
461 if (instance->debug_flags & TU_DEBUG_STARTUP)
462 tu_logi("Found %d drm nodes", max_devices);
463
464 if (max_devices < 1)
465 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
466
467 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
468 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
469 devices[i]->bustype == DRM_BUS_PLATFORM) {
470
471 result = tu_physical_device_init(instance->physical_devices +
472 instance->physical_device_count,
473 instance,
474 devices[i]);
475 if (result == VK_SUCCESS)
476 ++instance->physical_device_count;
477 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
478 break;
479 }
480 }
481 drmFreeDevices(devices, max_devices);
482
483 return result;
484 }
485
486 VkResult
487 tu_EnumeratePhysicalDevices(VkInstance _instance,
488 uint32_t *pPhysicalDeviceCount,
489 VkPhysicalDevice *pPhysicalDevices)
490 {
491 TU_FROM_HANDLE(tu_instance, instance, _instance);
492 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
493
494 VkResult result;
495
496 if (instance->physical_device_count < 0) {
497 result = tu_enumerate_devices(instance);
498 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
499 return result;
500 }
501
502 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
503 vk_outarray_append(&out, p) {
504 *p = tu_physical_device_to_handle(instance->physical_devices + i);
505 }
506
507 }
508
509 return vk_outarray_status(&out);
510 }
511
512 VkResult
513 tu_EnumeratePhysicalDeviceGroups(
514 VkInstance _instance,
515 uint32_t *pPhysicalDeviceGroupCount,
516 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
517 {
518 TU_FROM_HANDLE(tu_instance, instance, _instance);
519 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount);
520 VkResult result;
521
522 if (instance->physical_device_count < 0) {
523 result = tu_enumerate_devices(instance);
524 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
525 return result;
526 }
527
528 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
529 vk_outarray_append(&out, p) {
530 p->physicalDeviceCount = 1;
531 p->physicalDevices[0] =
532 tu_physical_device_to_handle(instance->physical_devices + i);
533 p->subsetAllocation = false;
534 }
535 }
536
537 return vk_outarray_status(&out);
538 }
539
540 void
541 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
542 VkPhysicalDeviceFeatures *pFeatures)
543 {
544 memset(pFeatures, 0, sizeof(*pFeatures));
545
546 *pFeatures = (VkPhysicalDeviceFeatures){
547 .robustBufferAccess = false,
548 .fullDrawIndexUint32 = false,
549 .imageCubeArray = false,
550 .independentBlend = false,
551 .geometryShader = false,
552 .tessellationShader = false,
553 .sampleRateShading = false,
554 .dualSrcBlend = false,
555 .logicOp = false,
556 .multiDrawIndirect = false,
557 .drawIndirectFirstInstance = false,
558 .depthClamp = false,
559 .depthBiasClamp = false,
560 .fillModeNonSolid = false,
561 .depthBounds = false,
562 .wideLines = false,
563 .largePoints = false,
564 .alphaToOne = false,
565 .multiViewport = false,
566 .samplerAnisotropy = false,
567 .textureCompressionETC2 = false,
568 .textureCompressionASTC_LDR = false,
569 .textureCompressionBC = false,
570 .occlusionQueryPrecise = false,
571 .pipelineStatisticsQuery = false,
572 .vertexPipelineStoresAndAtomics = false,
573 .fragmentStoresAndAtomics = false,
574 .shaderTessellationAndGeometryPointSize = false,
575 .shaderImageGatherExtended = false,
576 .shaderStorageImageExtendedFormats = false,
577 .shaderStorageImageMultisample = false,
578 .shaderUniformBufferArrayDynamicIndexing = false,
579 .shaderSampledImageArrayDynamicIndexing = false,
580 .shaderStorageBufferArrayDynamicIndexing = false,
581 .shaderStorageImageArrayDynamicIndexing = false,
582 .shaderStorageImageReadWithoutFormat = false,
583 .shaderStorageImageWriteWithoutFormat = false,
584 .shaderClipDistance = false,
585 .shaderCullDistance = false,
586 .shaderFloat64 = false,
587 .shaderInt64 = false,
588 .shaderInt16 = false,
589 .sparseBinding = false,
590 .variableMultisampleRate = false,
591 .inheritedQueries = false,
592 };
593 }
594
595 void
596 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
597 VkPhysicalDeviceFeatures2KHR *pFeatures)
598 {
599 vk_foreach_struct(ext, pFeatures->pNext)
600 {
601 switch (ext->sType) {
602 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
603 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
604 features->variablePointersStorageBuffer = false;
605 features->variablePointers = false;
606 break;
607 }
608 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
609 VkPhysicalDeviceMultiviewFeaturesKHR *features =
610 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
611 features->multiview = false;
612 features->multiviewGeometryShader = false;
613 features->multiviewTessellationShader = false;
614 break;
615 }
616 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
617 VkPhysicalDeviceShaderDrawParameterFeatures *features =
618 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
619 features->shaderDrawParameters = false;
620 break;
621 }
622 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
623 VkPhysicalDeviceProtectedMemoryFeatures *features =
624 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
625 features->protectedMemory = false;
626 break;
627 }
628 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
629 VkPhysicalDevice16BitStorageFeatures *features =
630 (VkPhysicalDevice16BitStorageFeatures *)ext;
631 features->storageBuffer16BitAccess = false;
632 features->uniformAndStorageBuffer16BitAccess = false;
633 features->storagePushConstant16 = false;
634 features->storageInputOutput16 = false;
635 break;
636 }
637 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
638 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
639 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
640 features->samplerYcbcrConversion = false;
641 break;
642 }
643 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
644 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
645 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
646 features->shaderInputAttachmentArrayDynamicIndexing = false;
647 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
648 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
649 features->shaderUniformBufferArrayNonUniformIndexing = false;
650 features->shaderSampledImageArrayNonUniformIndexing = false;
651 features->shaderStorageBufferArrayNonUniformIndexing = false;
652 features->shaderStorageImageArrayNonUniformIndexing = false;
653 features->shaderInputAttachmentArrayNonUniformIndexing = false;
654 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
655 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
656 features->descriptorBindingUniformBufferUpdateAfterBind = false;
657 features->descriptorBindingSampledImageUpdateAfterBind = false;
658 features->descriptorBindingStorageImageUpdateAfterBind = false;
659 features->descriptorBindingStorageBufferUpdateAfterBind = false;
660 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
661 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
662 features->descriptorBindingUpdateUnusedWhilePending = false;
663 features->descriptorBindingPartiallyBound = false;
664 features->descriptorBindingVariableDescriptorCount = false;
665 features->runtimeDescriptorArray = false;
666 break;
667 }
668 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
669 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
670 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
671 features->conditionalRendering = false;
672 features->inheritedConditionalRendering = false;
673 break;
674 }
675 default:
676 break;
677 }
678 }
679 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
680 }
681
682 void
683 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
684 VkPhysicalDeviceProperties *pProperties)
685 {
686 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
687 VkSampleCountFlags sample_counts = 0xf;
688
689 /* make sure that the entire descriptor set is addressable with a signed
690 * 32-bit int. So the sum of all limits scaled by descriptor size has to
691 * be at most 2 GiB. the combined image & samples object count as one of
692 * both. This limit is for the pipeline layout, not for the set layout, but
693 * there is no set limit, so we just set a pipeline limit. I don't think
694 * any app is going to hit this soon. */
695 size_t max_descriptor_set_size =
696 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
697 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
698 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
699 32 /* sampler, largest when combined with image */ +
700 64 /* sampled image */ + 64 /* storage image */);
701
702 VkPhysicalDeviceLimits limits = {
703 .maxImageDimension1D = (1 << 14),
704 .maxImageDimension2D = (1 << 14),
705 .maxImageDimension3D = (1 << 11),
706 .maxImageDimensionCube = (1 << 14),
707 .maxImageArrayLayers = (1 << 11),
708 .maxTexelBufferElements = 128 * 1024 * 1024,
709 .maxUniformBufferRange = UINT32_MAX,
710 .maxStorageBufferRange = UINT32_MAX,
711 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
712 .maxMemoryAllocationCount = UINT32_MAX,
713 .maxSamplerAllocationCount = 64 * 1024,
714 .bufferImageGranularity = 64, /* A cache line */
715 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
716 .maxBoundDescriptorSets = MAX_SETS,
717 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
718 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
719 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
720 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
721 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
722 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
723 .maxPerStageResources = max_descriptor_set_size,
724 .maxDescriptorSetSamplers = max_descriptor_set_size,
725 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
726 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
727 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
728 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
729 .maxDescriptorSetSampledImages = max_descriptor_set_size,
730 .maxDescriptorSetStorageImages = max_descriptor_set_size,
731 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
732 .maxVertexInputAttributes = 32,
733 .maxVertexInputBindings = 32,
734 .maxVertexInputAttributeOffset = 2047,
735 .maxVertexInputBindingStride = 2048,
736 .maxVertexOutputComponents = 128,
737 .maxTessellationGenerationLevel = 64,
738 .maxTessellationPatchSize = 32,
739 .maxTessellationControlPerVertexInputComponents = 128,
740 .maxTessellationControlPerVertexOutputComponents = 128,
741 .maxTessellationControlPerPatchOutputComponents = 120,
742 .maxTessellationControlTotalOutputComponents = 4096,
743 .maxTessellationEvaluationInputComponents = 128,
744 .maxTessellationEvaluationOutputComponents = 128,
745 .maxGeometryShaderInvocations = 127,
746 .maxGeometryInputComponents = 64,
747 .maxGeometryOutputComponents = 128,
748 .maxGeometryOutputVertices = 256,
749 .maxGeometryTotalOutputComponents = 1024,
750 .maxFragmentInputComponents = 128,
751 .maxFragmentOutputAttachments = 8,
752 .maxFragmentDualSrcAttachments = 1,
753 .maxFragmentCombinedOutputResources = 8,
754 .maxComputeSharedMemorySize = 32768,
755 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
756 .maxComputeWorkGroupInvocations = 2048,
757 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
758 .subPixelPrecisionBits = 4 /* FIXME */,
759 .subTexelPrecisionBits = 4 /* FIXME */,
760 .mipmapPrecisionBits = 4 /* FIXME */,
761 .maxDrawIndexedIndexValue = UINT32_MAX,
762 .maxDrawIndirectCount = UINT32_MAX,
763 .maxSamplerLodBias = 16,
764 .maxSamplerAnisotropy = 16,
765 .maxViewports = MAX_VIEWPORTS,
766 .maxViewportDimensions = { (1 << 14), (1 << 14) },
767 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
768 .viewportSubPixelBits = 8,
769 .minMemoryMapAlignment = 4096, /* A page */
770 .minTexelBufferOffsetAlignment = 1,
771 .minUniformBufferOffsetAlignment = 4,
772 .minStorageBufferOffsetAlignment = 4,
773 .minTexelOffset = -32,
774 .maxTexelOffset = 31,
775 .minTexelGatherOffset = -32,
776 .maxTexelGatherOffset = 31,
777 .minInterpolationOffset = -2,
778 .maxInterpolationOffset = 2,
779 .subPixelInterpolationOffsetBits = 8,
780 .maxFramebufferWidth = (1 << 14),
781 .maxFramebufferHeight = (1 << 14),
782 .maxFramebufferLayers = (1 << 10),
783 .framebufferColorSampleCounts = sample_counts,
784 .framebufferDepthSampleCounts = sample_counts,
785 .framebufferStencilSampleCounts = sample_counts,
786 .framebufferNoAttachmentsSampleCounts = sample_counts,
787 .maxColorAttachments = MAX_RTS,
788 .sampledImageColorSampleCounts = sample_counts,
789 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
790 .sampledImageDepthSampleCounts = sample_counts,
791 .sampledImageStencilSampleCounts = sample_counts,
792 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
793 .maxSampleMaskWords = 1,
794 .timestampComputeAndGraphics = true,
795 .timestampPeriod = 1,
796 .maxClipDistances = 8,
797 .maxCullDistances = 8,
798 .maxCombinedClipAndCullDistances = 8,
799 .discreteQueuePriorities = 1,
800 .pointSizeRange = { 0.125, 255.875 },
801 .lineWidthRange = { 0.0, 7.9921875 },
802 .pointSizeGranularity = (1.0 / 8.0),
803 .lineWidthGranularity = (1.0 / 128.0),
804 .strictLines = false, /* FINISHME */
805 .standardSampleLocations = true,
806 .optimalBufferCopyOffsetAlignment = 128,
807 .optimalBufferCopyRowPitchAlignment = 128,
808 .nonCoherentAtomSize = 64,
809 };
810
811 *pProperties = (VkPhysicalDeviceProperties){
812 .apiVersion = tu_physical_device_api_version(pdevice),
813 .driverVersion = vk_get_driver_version(),
814 .vendorID = 0, /* TODO */
815 .deviceID = 0,
816 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
817 .limits = limits,
818 .sparseProperties = { 0 },
819 };
820
821 strcpy(pProperties->deviceName, pdevice->name);
822 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
823 }
824
825 void
826 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
827 VkPhysicalDeviceProperties2KHR *pProperties)
828 {
829 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
830 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
831
832 vk_foreach_struct(ext, pProperties->pNext)
833 {
834 switch (ext->sType) {
835 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
836 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
837 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
838 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
839 break;
840 }
841 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
842 VkPhysicalDeviceIDPropertiesKHR *properties =
843 (VkPhysicalDeviceIDPropertiesKHR *)ext;
844 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
845 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
846 properties->deviceLUIDValid = false;
847 break;
848 }
849 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
850 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
851 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
852 properties->maxMultiviewViewCount = MAX_VIEWS;
853 properties->maxMultiviewInstanceIndex = INT_MAX;
854 break;
855 }
856 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
857 VkPhysicalDevicePointClippingPropertiesKHR *properties =
858 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
859 properties->pointClippingBehavior =
860 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
861 break;
862 }
863 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
864 VkPhysicalDeviceMaintenance3Properties *properties =
865 (VkPhysicalDeviceMaintenance3Properties *)ext;
866 /* Make sure everything is addressable by a signed 32-bit int, and
867 * our largest descriptors are 96 bytes. */
868 properties->maxPerSetDescriptors = (1ull << 31) / 96;
869 /* Our buffer size fields allow only this much */
870 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
871 break;
872 }
873 default:
874 break;
875 }
876 }
877 }
878
879 static const VkQueueFamilyProperties
880 tu_queue_family_properties = {
881 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
882 VK_QUEUE_COMPUTE_BIT |
883 VK_QUEUE_TRANSFER_BIT,
884 .queueCount = 1,
885 .timestampValidBits = 64,
886 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
887 };
888
889 void
890 tu_GetPhysicalDeviceQueueFamilyProperties(
891 VkPhysicalDevice physicalDevice,
892 uint32_t *pQueueFamilyPropertyCount,
893 VkQueueFamilyProperties *pQueueFamilyProperties)
894 {
895 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
896
897 vk_outarray_append(&out, p) {
898 *p = tu_queue_family_properties;
899 }
900 }
901
902 void
903 tu_GetPhysicalDeviceQueueFamilyProperties2(
904 VkPhysicalDevice physicalDevice,
905 uint32_t *pQueueFamilyPropertyCount,
906 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
907 {
908 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
909
910 vk_outarray_append(&out, p) {
911 p->queueFamilyProperties = tu_queue_family_properties;
912 }
913 }
914
915 static uint64_t
916 tu_get_system_heap_size()
917 {
918 struct sysinfo info;
919 sysinfo(&info);
920
921 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
922
923 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
924 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
925 */
926 uint64_t available_ram;
927 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
928 available_ram = total_ram / 2;
929 else
930 available_ram = total_ram * 3 / 4;
931
932 return available_ram;
933 }
934
935 void
936 tu_GetPhysicalDeviceMemoryProperties(
937 VkPhysicalDevice physicalDevice,
938 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
939 {
940 pMemoryProperties->memoryHeapCount = 1;
941 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
942 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
943
944 pMemoryProperties->memoryTypeCount = 1;
945 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
946 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
947 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
948 pMemoryProperties->memoryTypes[0].heapIndex = 0;
949 }
950
951 void
952 tu_GetPhysicalDeviceMemoryProperties2(
953 VkPhysicalDevice physicalDevice,
954 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
955 {
956 return tu_GetPhysicalDeviceMemoryProperties(
957 physicalDevice, &pMemoryProperties->memoryProperties);
958 }
959
960 static int
961 tu_queue_init(struct tu_device *device,
962 struct tu_queue *queue,
963 uint32_t queue_family_index,
964 int idx,
965 VkDeviceQueueCreateFlags flags)
966 {
967 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
968 queue->device = device;
969 queue->queue_family_index = queue_family_index;
970 queue->queue_idx = idx;
971 queue->flags = flags;
972
973 return VK_SUCCESS;
974 }
975
976 static void
977 tu_queue_finish(struct tu_queue *queue)
978 {
979 }
980
981 static int
982 tu_get_device_extension_index(const char *name)
983 {
984 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
985 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
986 return i;
987 }
988 return -1;
989 }
990
991 VkResult
992 tu_CreateDevice(VkPhysicalDevice physicalDevice,
993 const VkDeviceCreateInfo *pCreateInfo,
994 const VkAllocationCallbacks *pAllocator,
995 VkDevice *pDevice)
996 {
997 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
998 VkResult result;
999 struct tu_device *device;
1000
1001 /* Check enabled features */
1002 if (pCreateInfo->pEnabledFeatures) {
1003 VkPhysicalDeviceFeatures supported_features;
1004 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1005 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1006 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1007 unsigned num_features =
1008 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1009 for (uint32_t i = 0; i < num_features; i++) {
1010 if (enabled_feature[i] && !supported_feature[i])
1011 return vk_error(physical_device->instance,
1012 VK_ERROR_FEATURE_NOT_PRESENT);
1013 }
1014 }
1015
1016 device = vk_zalloc2(&physical_device->instance->alloc,
1017 pAllocator,
1018 sizeof(*device),
1019 8,
1020 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1021 if (!device)
1022 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1023
1024 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1025 device->instance = physical_device->instance;
1026 device->physical_device = physical_device;
1027
1028 if (pAllocator)
1029 device->alloc = *pAllocator;
1030 else
1031 device->alloc = physical_device->instance->alloc;
1032
1033 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1034 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1035 int index = tu_get_device_extension_index(ext_name);
1036 if (index < 0 ||
1037 !physical_device->supported_extensions.extensions[index]) {
1038 vk_free(&device->alloc, device);
1039 return vk_error(physical_device->instance,
1040 VK_ERROR_EXTENSION_NOT_PRESENT);
1041 }
1042
1043 device->enabled_extensions.extensions[index] = true;
1044 }
1045
1046 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1047 const VkDeviceQueueCreateInfo *queue_create =
1048 &pCreateInfo->pQueueCreateInfos[i];
1049 uint32_t qfi = queue_create->queueFamilyIndex;
1050 device->queues[qfi] =
1051 vk_alloc(&device->alloc,
1052 queue_create->queueCount * sizeof(struct tu_queue),
1053 8,
1054 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1055 if (!device->queues[qfi]) {
1056 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1057 goto fail;
1058 }
1059
1060 memset(device->queues[qfi],
1061 0,
1062 queue_create->queueCount * sizeof(struct tu_queue));
1063
1064 device->queue_count[qfi] = queue_create->queueCount;
1065
1066 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1067 result = tu_queue_init(
1068 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
1069 if (result != VK_SUCCESS)
1070 goto fail;
1071 }
1072 }
1073
1074 VkPipelineCacheCreateInfo ci;
1075 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1076 ci.pNext = NULL;
1077 ci.flags = 0;
1078 ci.pInitialData = NULL;
1079 ci.initialDataSize = 0;
1080 VkPipelineCache pc;
1081 result =
1082 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1083 if (result != VK_SUCCESS)
1084 goto fail;
1085
1086 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1087
1088 *pDevice = tu_device_to_handle(device);
1089 return VK_SUCCESS;
1090
1091 fail:
1092 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1093 for (unsigned q = 0; q < device->queue_count[i]; q++)
1094 tu_queue_finish(&device->queues[i][q]);
1095 if (device->queue_count[i])
1096 vk_free(&device->alloc, device->queues[i]);
1097 }
1098
1099 vk_free(&device->alloc, device);
1100 return result;
1101 }
1102
1103 void
1104 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1105 {
1106 TU_FROM_HANDLE(tu_device, device, _device);
1107
1108 if (!device)
1109 return;
1110
1111 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1112 for (unsigned q = 0; q < device->queue_count[i]; q++)
1113 tu_queue_finish(&device->queues[i][q]);
1114 if (device->queue_count[i])
1115 vk_free(&device->alloc, device->queues[i]);
1116 }
1117
1118 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1119 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1120
1121 vk_free(&device->alloc, device);
1122 }
1123
1124 VkResult
1125 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1126 VkLayerProperties *pProperties)
1127 {
1128 *pPropertyCount = 0;
1129 return VK_SUCCESS;
1130 }
1131
1132 VkResult
1133 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1134 uint32_t *pPropertyCount,
1135 VkLayerProperties *pProperties)
1136 {
1137 *pPropertyCount = 0;
1138 return VK_SUCCESS;
1139 }
1140
1141 void
1142 tu_GetDeviceQueue2(VkDevice _device,
1143 const VkDeviceQueueInfo2 *pQueueInfo,
1144 VkQueue *pQueue)
1145 {
1146 TU_FROM_HANDLE(tu_device, device, _device);
1147 struct tu_queue *queue;
1148
1149 queue =
1150 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1151 if (pQueueInfo->flags != queue->flags) {
1152 /* From the Vulkan 1.1.70 spec:
1153 *
1154 * "The queue returned by vkGetDeviceQueue2 must have the same
1155 * flags value from this structure as that used at device
1156 * creation time in a VkDeviceQueueCreateInfo instance. If no
1157 * matching flags were specified at device creation time then
1158 * pQueue will return VK_NULL_HANDLE."
1159 */
1160 *pQueue = VK_NULL_HANDLE;
1161 return;
1162 }
1163
1164 *pQueue = tu_queue_to_handle(queue);
1165 }
1166
1167 void
1168 tu_GetDeviceQueue(VkDevice _device,
1169 uint32_t queueFamilyIndex,
1170 uint32_t queueIndex,
1171 VkQueue *pQueue)
1172 {
1173 const VkDeviceQueueInfo2 info =
1174 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1175 .queueFamilyIndex = queueFamilyIndex,
1176 .queueIndex = queueIndex };
1177
1178 tu_GetDeviceQueue2(_device, &info, pQueue);
1179 }
1180
1181 VkResult
1182 tu_QueueSubmit(VkQueue _queue,
1183 uint32_t submitCount,
1184 const VkSubmitInfo *pSubmits,
1185 VkFence _fence)
1186 {
1187 return VK_SUCCESS;
1188 }
1189
1190 VkResult
1191 tu_QueueWaitIdle(VkQueue _queue)
1192 {
1193 return VK_SUCCESS;
1194 }
1195
1196 VkResult
1197 tu_DeviceWaitIdle(VkDevice _device)
1198 {
1199 TU_FROM_HANDLE(tu_device, device, _device);
1200
1201 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1202 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1203 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1204 }
1205 }
1206 return VK_SUCCESS;
1207 }
1208
1209 VkResult
1210 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1211 uint32_t *pPropertyCount,
1212 VkExtensionProperties *pProperties)
1213 {
1214 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1215
1216 /* We spport no lyaers */
1217 if (pLayerName)
1218 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1219
1220 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1221 if (tu_supported_instance_extensions.extensions[i]) {
1222 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1223 }
1224 }
1225
1226 return vk_outarray_status(&out);
1227 }
1228
1229 VkResult
1230 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1231 const char *pLayerName,
1232 uint32_t *pPropertyCount,
1233 VkExtensionProperties *pProperties)
1234 {
1235 /* We spport no lyaers */
1236 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1237 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1238
1239 /* We spport no lyaers */
1240 if (pLayerName)
1241 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1242
1243 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1244 if (device->supported_extensions.extensions[i]) {
1245 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1246 }
1247 }
1248
1249 return vk_outarray_status(&out);
1250 }
1251
1252 PFN_vkVoidFunction
1253 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1254 {
1255 TU_FROM_HANDLE(tu_instance, instance, _instance);
1256
1257 return tu_lookup_entrypoint_checked(pName,
1258 instance ? instance->api_version : 0,
1259 instance ? &instance->enabled_extensions
1260 : NULL,
1261 NULL);
1262 }
1263
1264 /* The loader wants us to expose a second GetInstanceProcAddr function
1265 * to work around certain LD_PRELOAD issues seen in apps.
1266 */
1267 PUBLIC
1268 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1269 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1270
1271 PUBLIC
1272 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1273 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1274 {
1275 return tu_GetInstanceProcAddr(instance, pName);
1276 }
1277
1278 PFN_vkVoidFunction
1279 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1280 {
1281 TU_FROM_HANDLE(tu_device, device, _device);
1282
1283 return tu_lookup_entrypoint_checked(pName,
1284 device->instance->api_version,
1285 &device->instance->enabled_extensions,
1286 &device->enabled_extensions);
1287 }
1288
1289 static VkResult
1290 tu_alloc_memory(struct tu_device *device,
1291 const VkMemoryAllocateInfo *pAllocateInfo,
1292 const VkAllocationCallbacks *pAllocator,
1293 VkDeviceMemory *pMem)
1294 {
1295 struct tu_device_memory *mem;
1296 VkResult result;
1297
1298 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1299
1300 if (pAllocateInfo->allocationSize == 0) {
1301 /* Apparently, this is allowed */
1302 *pMem = VK_NULL_HANDLE;
1303 return VK_SUCCESS;
1304 }
1305
1306 mem = vk_alloc2(&device->alloc,
1307 pAllocator,
1308 sizeof(*mem),
1309 8,
1310 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1311 if (mem == NULL)
1312 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1313
1314 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1315 if (result != VK_SUCCESS) {
1316 vk_free2(&device->alloc, pAllocator, mem);
1317 return result;
1318 }
1319
1320 mem->size = pAllocateInfo->allocationSize;
1321 mem->type_index = pAllocateInfo->memoryTypeIndex;
1322
1323 mem->map = NULL;
1324 mem->user_ptr = NULL;
1325
1326 *pMem = tu_device_memory_to_handle(mem);
1327
1328 return VK_SUCCESS;
1329 }
1330
1331 VkResult
1332 tu_AllocateMemory(VkDevice _device,
1333 const VkMemoryAllocateInfo *pAllocateInfo,
1334 const VkAllocationCallbacks *pAllocator,
1335 VkDeviceMemory *pMem)
1336 {
1337 TU_FROM_HANDLE(tu_device, device, _device);
1338 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1339 }
1340
1341 void
1342 tu_FreeMemory(VkDevice _device,
1343 VkDeviceMemory _mem,
1344 const VkAllocationCallbacks *pAllocator)
1345 {
1346 TU_FROM_HANDLE(tu_device, device, _device);
1347 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1348
1349 if (mem == NULL)
1350 return;
1351
1352 tu_bo_finish(device, &mem->bo);
1353 vk_free2(&device->alloc, pAllocator, mem);
1354 }
1355
1356 VkResult
1357 tu_MapMemory(VkDevice _device,
1358 VkDeviceMemory _memory,
1359 VkDeviceSize offset,
1360 VkDeviceSize size,
1361 VkMemoryMapFlags flags,
1362 void **ppData)
1363 {
1364 TU_FROM_HANDLE(tu_device, device, _device);
1365 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1366 VkResult result;
1367
1368 if (mem == NULL) {
1369 *ppData = NULL;
1370 return VK_SUCCESS;
1371 }
1372
1373 if (mem->user_ptr) {
1374 *ppData = mem->user_ptr;
1375 } else if (!mem->map){
1376 result = tu_bo_map(device, &mem->bo);
1377 if (result != VK_SUCCESS)
1378 return result;
1379 mem->map = mem->bo.map;
1380 } else
1381 *ppData = mem->map;
1382
1383 if (*ppData) {
1384 *ppData += offset;
1385 return VK_SUCCESS;
1386 }
1387
1388 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1389 }
1390
1391 void
1392 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1393 {
1394 /* I do not see any unmapping done by the freedreno Gallium driver. */
1395 }
1396
1397 VkResult
1398 tu_FlushMappedMemoryRanges(VkDevice _device,
1399 uint32_t memoryRangeCount,
1400 const VkMappedMemoryRange *pMemoryRanges)
1401 {
1402 return VK_SUCCESS;
1403 }
1404
1405 VkResult
1406 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1407 uint32_t memoryRangeCount,
1408 const VkMappedMemoryRange *pMemoryRanges)
1409 {
1410 return VK_SUCCESS;
1411 }
1412
1413 void
1414 tu_GetBufferMemoryRequirements(VkDevice _device,
1415 VkBuffer _buffer,
1416 VkMemoryRequirements *pMemoryRequirements)
1417 {
1418 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1419
1420 pMemoryRequirements->memoryTypeBits = 1;
1421 pMemoryRequirements->alignment = 16;
1422 pMemoryRequirements->size =
1423 align64(buffer->size, pMemoryRequirements->alignment);
1424 }
1425
1426 void
1427 tu_GetBufferMemoryRequirements2(
1428 VkDevice device,
1429 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1430 VkMemoryRequirements2KHR *pMemoryRequirements)
1431 {
1432 tu_GetBufferMemoryRequirements(
1433 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1434 }
1435
1436 void
1437 tu_GetImageMemoryRequirements(VkDevice _device,
1438 VkImage _image,
1439 VkMemoryRequirements *pMemoryRequirements)
1440 {
1441 TU_FROM_HANDLE(tu_image, image, _image);
1442
1443 pMemoryRequirements->memoryTypeBits = 1;
1444 pMemoryRequirements->size = image->size;
1445 pMemoryRequirements->alignment = image->alignment;
1446 }
1447
1448 void
1449 tu_GetImageMemoryRequirements2(VkDevice device,
1450 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1451 VkMemoryRequirements2KHR *pMemoryRequirements)
1452 {
1453 tu_GetImageMemoryRequirements(
1454 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1455 }
1456
1457 void
1458 tu_GetImageSparseMemoryRequirements(
1459 VkDevice device,
1460 VkImage image,
1461 uint32_t *pSparseMemoryRequirementCount,
1462 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1463 {
1464 tu_stub();
1465 }
1466
1467 void
1468 tu_GetImageSparseMemoryRequirements2(
1469 VkDevice device,
1470 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1471 uint32_t *pSparseMemoryRequirementCount,
1472 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1473 {
1474 tu_stub();
1475 }
1476
1477 void
1478 tu_GetDeviceMemoryCommitment(VkDevice device,
1479 VkDeviceMemory memory,
1480 VkDeviceSize *pCommittedMemoryInBytes)
1481 {
1482 *pCommittedMemoryInBytes = 0;
1483 }
1484
1485 VkResult
1486 tu_BindBufferMemory2(VkDevice device,
1487 uint32_t bindInfoCount,
1488 const VkBindBufferMemoryInfoKHR *pBindInfos)
1489 {
1490 return VK_SUCCESS;
1491 }
1492
1493 VkResult
1494 tu_BindBufferMemory(VkDevice device,
1495 VkBuffer buffer,
1496 VkDeviceMemory memory,
1497 VkDeviceSize memoryOffset)
1498 {
1499 const VkBindBufferMemoryInfoKHR info = {
1500 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1501 .buffer = buffer,
1502 .memory = memory,
1503 .memoryOffset = memoryOffset
1504 };
1505
1506 return tu_BindBufferMemory2(device, 1, &info);
1507 }
1508
1509 VkResult
1510 tu_BindImageMemory2(VkDevice device,
1511 uint32_t bindInfoCount,
1512 const VkBindImageMemoryInfoKHR *pBindInfos)
1513 {
1514 return VK_SUCCESS;
1515 }
1516
1517 VkResult
1518 tu_BindImageMemory(VkDevice device,
1519 VkImage image,
1520 VkDeviceMemory memory,
1521 VkDeviceSize memoryOffset)
1522 {
1523 const VkBindImageMemoryInfoKHR info = {
1524 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1525 .image = image,
1526 .memory = memory,
1527 .memoryOffset = memoryOffset
1528 };
1529
1530 return tu_BindImageMemory2(device, 1, &info);
1531 }
1532
1533 VkResult
1534 tu_QueueBindSparse(VkQueue _queue,
1535 uint32_t bindInfoCount,
1536 const VkBindSparseInfo *pBindInfo,
1537 VkFence _fence)
1538 {
1539 return VK_SUCCESS;
1540 }
1541
1542 VkResult
1543 tu_CreateFence(VkDevice _device,
1544 const VkFenceCreateInfo *pCreateInfo,
1545 const VkAllocationCallbacks *pAllocator,
1546 VkFence *pFence)
1547 {
1548 TU_FROM_HANDLE(tu_device, device, _device);
1549
1550 struct tu_fence *fence = vk_alloc2(&device->alloc,
1551 pAllocator,
1552 sizeof(*fence),
1553 8,
1554 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1555
1556 if (!fence)
1557 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1558
1559 *pFence = tu_fence_to_handle(fence);
1560
1561 return VK_SUCCESS;
1562 }
1563
1564 void
1565 tu_DestroyFence(VkDevice _device,
1566 VkFence _fence,
1567 const VkAllocationCallbacks *pAllocator)
1568 {
1569 TU_FROM_HANDLE(tu_device, device, _device);
1570 TU_FROM_HANDLE(tu_fence, fence, _fence);
1571
1572 if (!fence)
1573 return;
1574
1575 vk_free2(&device->alloc, pAllocator, fence);
1576 }
1577
1578 VkResult
1579 tu_WaitForFences(VkDevice _device,
1580 uint32_t fenceCount,
1581 const VkFence *pFences,
1582 VkBool32 waitAll,
1583 uint64_t timeout)
1584 {
1585 return VK_SUCCESS;
1586 }
1587
1588 VkResult
1589 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1590 {
1591 return VK_SUCCESS;
1592 }
1593
1594 VkResult
1595 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1596 {
1597 return VK_SUCCESS;
1598 }
1599
1600 // Queue semaphore functions
1601
1602 VkResult
1603 tu_CreateSemaphore(VkDevice _device,
1604 const VkSemaphoreCreateInfo *pCreateInfo,
1605 const VkAllocationCallbacks *pAllocator,
1606 VkSemaphore *pSemaphore)
1607 {
1608 TU_FROM_HANDLE(tu_device, device, _device);
1609
1610 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1611 pAllocator,
1612 sizeof(*sem),
1613 8,
1614 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1615 if (!sem)
1616 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1617
1618 *pSemaphore = tu_semaphore_to_handle(sem);
1619 return VK_SUCCESS;
1620 }
1621
1622 void
1623 tu_DestroySemaphore(VkDevice _device,
1624 VkSemaphore _semaphore,
1625 const VkAllocationCallbacks *pAllocator)
1626 {
1627 TU_FROM_HANDLE(tu_device, device, _device);
1628 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1629 if (!_semaphore)
1630 return;
1631
1632 vk_free2(&device->alloc, pAllocator, sem);
1633 }
1634
1635 VkResult
1636 tu_CreateEvent(VkDevice _device,
1637 const VkEventCreateInfo *pCreateInfo,
1638 const VkAllocationCallbacks *pAllocator,
1639 VkEvent *pEvent)
1640 {
1641 TU_FROM_HANDLE(tu_device, device, _device);
1642 struct tu_event *event = vk_alloc2(&device->alloc,
1643 pAllocator,
1644 sizeof(*event),
1645 8,
1646 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1647
1648 if (!event)
1649 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1650
1651 *pEvent = tu_event_to_handle(event);
1652
1653 return VK_SUCCESS;
1654 }
1655
1656 void
1657 tu_DestroyEvent(VkDevice _device,
1658 VkEvent _event,
1659 const VkAllocationCallbacks *pAllocator)
1660 {
1661 TU_FROM_HANDLE(tu_device, device, _device);
1662 TU_FROM_HANDLE(tu_event, event, _event);
1663
1664 if (!event)
1665 return;
1666 vk_free2(&device->alloc, pAllocator, event);
1667 }
1668
1669 VkResult
1670 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1671 {
1672 TU_FROM_HANDLE(tu_event, event, _event);
1673
1674 if (*event->map == 1)
1675 return VK_EVENT_SET;
1676 return VK_EVENT_RESET;
1677 }
1678
1679 VkResult
1680 tu_SetEvent(VkDevice _device, VkEvent _event)
1681 {
1682 TU_FROM_HANDLE(tu_event, event, _event);
1683 *event->map = 1;
1684
1685 return VK_SUCCESS;
1686 }
1687
1688 VkResult
1689 tu_ResetEvent(VkDevice _device, VkEvent _event)
1690 {
1691 TU_FROM_HANDLE(tu_event, event, _event);
1692 *event->map = 0;
1693
1694 return VK_SUCCESS;
1695 }
1696
1697 VkResult
1698 tu_CreateBuffer(VkDevice _device,
1699 const VkBufferCreateInfo *pCreateInfo,
1700 const VkAllocationCallbacks *pAllocator,
1701 VkBuffer *pBuffer)
1702 {
1703 TU_FROM_HANDLE(tu_device, device, _device);
1704 struct tu_buffer *buffer;
1705
1706 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1707
1708 buffer = vk_alloc2(&device->alloc,
1709 pAllocator,
1710 sizeof(*buffer),
1711 8,
1712 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1713 if (buffer == NULL)
1714 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1715
1716 buffer->size = pCreateInfo->size;
1717 buffer->usage = pCreateInfo->usage;
1718 buffer->flags = pCreateInfo->flags;
1719
1720 *pBuffer = tu_buffer_to_handle(buffer);
1721
1722 return VK_SUCCESS;
1723 }
1724
1725 void
1726 tu_DestroyBuffer(VkDevice _device,
1727 VkBuffer _buffer,
1728 const VkAllocationCallbacks *pAllocator)
1729 {
1730 TU_FROM_HANDLE(tu_device, device, _device);
1731 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1732
1733 if (!buffer)
1734 return;
1735
1736 vk_free2(&device->alloc, pAllocator, buffer);
1737 }
1738
1739 static uint32_t
1740 tu_surface_max_layer_count(struct tu_image_view *iview)
1741 {
1742 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1743 ? iview->extent.depth
1744 : (iview->base_layer + iview->layer_count);
1745 }
1746
1747 VkResult
1748 tu_CreateFramebuffer(VkDevice _device,
1749 const VkFramebufferCreateInfo *pCreateInfo,
1750 const VkAllocationCallbacks *pAllocator,
1751 VkFramebuffer *pFramebuffer)
1752 {
1753 TU_FROM_HANDLE(tu_device, device, _device);
1754 struct tu_framebuffer *framebuffer;
1755
1756 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1757
1758 size_t size =
1759 sizeof(*framebuffer) +
1760 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1761 framebuffer = vk_alloc2(
1762 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1763 if (framebuffer == NULL)
1764 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1765
1766 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1767 framebuffer->width = pCreateInfo->width;
1768 framebuffer->height = pCreateInfo->height;
1769 framebuffer->layers = pCreateInfo->layers;
1770 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1771 VkImageView _iview = pCreateInfo->pAttachments[i];
1772 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1773 framebuffer->attachments[i].attachment = iview;
1774
1775 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1776 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1777 framebuffer->layers =
1778 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1779 }
1780
1781 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1782 return VK_SUCCESS;
1783 }
1784
1785 void
1786 tu_DestroyFramebuffer(VkDevice _device,
1787 VkFramebuffer _fb,
1788 const VkAllocationCallbacks *pAllocator)
1789 {
1790 TU_FROM_HANDLE(tu_device, device, _device);
1791 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1792
1793 if (!fb)
1794 return;
1795 vk_free2(&device->alloc, pAllocator, fb);
1796 }
1797
1798 static void
1799 tu_init_sampler(struct tu_device *device,
1800 struct tu_sampler *sampler,
1801 const VkSamplerCreateInfo *pCreateInfo)
1802 {
1803 }
1804
1805 VkResult
1806 tu_CreateSampler(VkDevice _device,
1807 const VkSamplerCreateInfo *pCreateInfo,
1808 const VkAllocationCallbacks *pAllocator,
1809 VkSampler *pSampler)
1810 {
1811 TU_FROM_HANDLE(tu_device, device, _device);
1812 struct tu_sampler *sampler;
1813
1814 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1815
1816 sampler = vk_alloc2(&device->alloc,
1817 pAllocator,
1818 sizeof(*sampler),
1819 8,
1820 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1821 if (!sampler)
1822 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1823
1824 tu_init_sampler(device, sampler, pCreateInfo);
1825 *pSampler = tu_sampler_to_handle(sampler);
1826
1827 return VK_SUCCESS;
1828 }
1829
1830 void
1831 tu_DestroySampler(VkDevice _device,
1832 VkSampler _sampler,
1833 const VkAllocationCallbacks *pAllocator)
1834 {
1835 TU_FROM_HANDLE(tu_device, device, _device);
1836 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1837
1838 if (!sampler)
1839 return;
1840 vk_free2(&device->alloc, pAllocator, sampler);
1841 }
1842
1843 /* vk_icd.h does not declare this function, so we declare it here to
1844 * suppress Wmissing-prototypes.
1845 */
1846 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1847 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1848
1849 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1850 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1851 {
1852 /* For the full details on loader interface versioning, see
1853 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1854 * What follows is a condensed summary, to help you navigate the large and
1855 * confusing official doc.
1856 *
1857 * - Loader interface v0 is incompatible with later versions. We don't
1858 * support it.
1859 *
1860 * - In loader interface v1:
1861 * - The first ICD entrypoint called by the loader is
1862 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1863 * entrypoint.
1864 * - The ICD must statically expose no other Vulkan symbol unless it is
1865 * linked with -Bsymbolic.
1866 * - Each dispatchable Vulkan handle created by the ICD must be
1867 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1868 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1869 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1870 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1871 * such loader-managed surfaces.
1872 *
1873 * - Loader interface v2 differs from v1 in:
1874 * - The first ICD entrypoint called by the loader is
1875 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1876 * statically expose this entrypoint.
1877 *
1878 * - Loader interface v3 differs from v2 in:
1879 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1880 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1881 * because the loader no longer does so.
1882 */
1883 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1884 return VK_SUCCESS;
1885 }
1886
1887 void
1888 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1889 VkPhysicalDevice physicalDevice,
1890 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1891 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1892 {
1893 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1894 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1895 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1896 }
1897
1898 void
1899 tu_GetPhysicalDeviceExternalFenceProperties(
1900 VkPhysicalDevice physicalDevice,
1901 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1902 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1903 {
1904 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1905 pExternalFenceProperties->compatibleHandleTypes = 0;
1906 pExternalFenceProperties->externalFenceFeatures = 0;
1907 }
1908
1909 VkResult
1910 tu_CreateDebugReportCallbackEXT(
1911 VkInstance _instance,
1912 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1913 const VkAllocationCallbacks *pAllocator,
1914 VkDebugReportCallbackEXT *pCallback)
1915 {
1916 TU_FROM_HANDLE(tu_instance, instance, _instance);
1917 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1918 pCreateInfo,
1919 pAllocator,
1920 &instance->alloc,
1921 pCallback);
1922 }
1923
1924 void
1925 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1926 VkDebugReportCallbackEXT _callback,
1927 const VkAllocationCallbacks *pAllocator)
1928 {
1929 TU_FROM_HANDLE(tu_instance, instance, _instance);
1930 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1931 _callback,
1932 pAllocator,
1933 &instance->alloc);
1934 }
1935
1936 void
1937 tu_DebugReportMessageEXT(VkInstance _instance,
1938 VkDebugReportFlagsEXT flags,
1939 VkDebugReportObjectTypeEXT objectType,
1940 uint64_t object,
1941 size_t location,
1942 int32_t messageCode,
1943 const char *pLayerPrefix,
1944 const char *pMessage)
1945 {
1946 TU_FROM_HANDLE(tu_instance, instance, _instance);
1947 vk_debug_report(&instance->debug_report_callbacks,
1948 flags,
1949 objectType,
1950 object,
1951 location,
1952 messageCode,
1953 pLayerPrefix,
1954 pMessage);
1955 }
1956
1957 void
1958 tu_GetDeviceGroupPeerMemoryFeatures(
1959 VkDevice device,
1960 uint32_t heapIndex,
1961 uint32_t localDeviceIndex,
1962 uint32_t remoteDeviceIndex,
1963 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1964 {
1965 assert(localDeviceIndex == remoteDeviceIndex);
1966
1967 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1968 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1969 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1970 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1971 }