turnip: Fix memory mapping.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/sysinfo.h>
39 #include <unistd.h>
40 #include <xf86drm.h>
41 #include <msm_drm.h>
42
43 static int
44 tu_device_get_cache_uuid(uint16_t family, void *uuid)
45 {
46 uint32_t mesa_timestamp;
47 uint16_t f = family;
48 memset(uuid, 0, VK_UUID_SIZE);
49 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
50 &mesa_timestamp))
51 return -1;
52
53 memcpy(uuid, &mesa_timestamp, 4);
54 memcpy((char *)uuid + 4, &f, 2);
55 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
56 return 0;
57 }
58
59 static void
60 tu_get_driver_uuid(void *uuid)
61 {
62 memset(uuid, 0, VK_UUID_SIZE);
63 }
64
65 static void
66 tu_get_device_uuid(void *uuid)
67 {
68 tu_use_args(uuid);
69 tu_stub();
70 }
71
72 VkResult
73 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
74 {
75 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
76 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
77 */
78 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
79 if (!gem_handle)
80 goto fail_new;
81
82 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
83 * want immediate backing pages because vkAllocateMemory and friends must
84 * not lazily fail.
85 *
86 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
87 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
88 * maybe I misunderstand.
89 */
90
91 /* TODO: Do we need 'offset' if we have 'iova'? */
92 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
93 if (!offset)
94 goto fail_info;
95
96 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
97 if (!iova)
98 goto fail_info;
99
100 *bo = (struct tu_bo) {
101 .gem_handle = gem_handle,
102 .size = size,
103 .offset = offset,
104 .iova = iova,
105 };
106
107 return VK_SUCCESS;
108
109 fail_info:
110 tu_gem_close(dev, bo->gem_handle);
111 fail_new:
112 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113 }
114
115 VkResult
116 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
117 {
118 if (bo->map)
119 return VK_SUCCESS;
120
121 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
122 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
123 dev->physical_device->local_fd, bo->offset);
124 if (map == MAP_FAILED)
125 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
126
127 bo->map = map;
128 return VK_SUCCESS;
129 }
130
131 void
132 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
133 {
134 assert(bo->gem_handle);
135
136 if (bo->map)
137 munmap(bo->map, bo->size);
138
139 tu_gem_close(dev, bo->gem_handle);
140 }
141
142 static VkResult
143 tu_physical_device_init(struct tu_physical_device *device,
144 struct tu_instance *instance,
145 drmDevicePtr drm_device)
146 {
147 const char *path = drm_device->nodes[DRM_NODE_RENDER];
148 VkResult result = VK_SUCCESS;
149 drmVersionPtr version;
150 int fd;
151 int master_fd = -1;
152 struct fd_pipe *tmp_pipe = NULL;
153 uint64_t val;
154
155 fd = open(path, O_RDWR | O_CLOEXEC);
156 if (fd < 0) {
157 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
158 "failed to open device %s", path);
159 }
160
161 /* Version 1.3 added MSM_INFO_IOVA. */
162 const int min_version_major = 1;
163 const int min_version_minor = 3;
164
165 version = drmGetVersion(fd);
166 if (!version) {
167 close(fd);
168 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
169 "failed to query kernel driver version for device %s",
170 path);
171 }
172
173 if (strcmp(version->name, "msm")) {
174 drmFreeVersion(version);
175 if (master_fd != -1)
176 close(master_fd);
177 close(fd);
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "device %s does not use the msm kernel driver", path);
180 }
181
182 if (version->version_major != 1 || version->version_minor < 3) {
183 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "kernel driver for device %s has version %d.%d, "
185 "but Vulkan requires version >= %d.%d",
186 path,
187 version->version_major, version->version_minor,
188 min_version_major, min_version_minor);
189 drmFreeVersion(version);
190 close(fd);
191 return result;
192 }
193
194 drmFreeVersion(version);
195
196 if (instance->debug_flags & TU_DEBUG_STARTUP)
197 tu_logi("Found compatible device '%s'.", path);
198
199 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
200 device->instance = instance;
201 assert(strlen(path) < ARRAY_SIZE(device->path));
202 strncpy(device->path, path, ARRAY_SIZE(device->path));
203
204 if (instance->enabled_extensions.KHR_display) {
205 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
206 if (master_fd >= 0) {
207 /* TODO: free master_fd is accel is not working? */
208 }
209 }
210
211 device->master_fd = master_fd;
212 device->local_fd = fd;
213
214 device->drm_device = fd_device_new_dup(fd);
215 if (!device->drm_device) {
216 result = vk_errorf(
217 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
218 goto fail;
219 }
220
221 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
222 if (!tmp_pipe) {
223 result = vk_errorf(
224 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
225 goto fail;
226 }
227
228 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
229 result = vk_errorf(
230 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
231 goto fail;
232 }
233 device->gpu_id = val;
234
235 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
236 result = vk_errorf(
237 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
238 goto fail;
239 }
240 device->gmem_size = val;
241
242 fd_pipe_del(tmp_pipe);
243 tmp_pipe = NULL;
244
245 memset(device->name, 0, sizeof(device->name));
246 sprintf(device->name, "FD%d", device->gpu_id);
247
248 switch(device->gpu_id) {
249 case 530:
250 case 630:
251 break;
252 default:
253 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
254 "device %s is unsupported", device->name);
255 goto fail;
256 }
257 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
258 result = vk_errorf(
259 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
260 goto fail;
261 }
262
263 /* The gpu id is already embedded in the uuid so we just pass "tu"
264 * when creating the cache.
265 */
266 char buf[VK_UUID_SIZE * 2 + 1];
267 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
268 device->disk_cache = disk_cache_create(device->name, buf, 0);
269
270 fprintf(stderr,
271 "WARNING: tu is not a conformant vulkan implementation, "
272 "testing use only.\n");
273
274 tu_get_driver_uuid(&device->device_uuid);
275 tu_get_device_uuid(&device->device_uuid);
276
277 tu_fill_device_extension_table(device, &device->supported_extensions);
278
279 if (result != VK_SUCCESS) {
280 vk_error(instance, result);
281 goto fail;
282 }
283
284 return VK_SUCCESS;
285
286 fail:
287 if (tmp_pipe)
288 fd_pipe_del(tmp_pipe);
289 if (device->drm_device)
290 fd_device_del(device->drm_device);
291 close(fd);
292 if (master_fd != -1)
293 close(master_fd);
294 return result;
295 }
296
297 static void
298 tu_physical_device_finish(struct tu_physical_device *device)
299 {
300 disk_cache_destroy(device->disk_cache);
301 close(device->local_fd);
302 if (device->master_fd != -1)
303 close(device->master_fd);
304 }
305
306 static void *
307 default_alloc_func(void *pUserData,
308 size_t size,
309 size_t align,
310 VkSystemAllocationScope allocationScope)
311 {
312 return malloc(size);
313 }
314
315 static void *
316 default_realloc_func(void *pUserData,
317 void *pOriginal,
318 size_t size,
319 size_t align,
320 VkSystemAllocationScope allocationScope)
321 {
322 return realloc(pOriginal, size);
323 }
324
325 static void
326 default_free_func(void *pUserData, void *pMemory)
327 {
328 free(pMemory);
329 }
330
331 static const VkAllocationCallbacks default_alloc = {
332 .pUserData = NULL,
333 .pfnAllocation = default_alloc_func,
334 .pfnReallocation = default_realloc_func,
335 .pfnFree = default_free_func,
336 };
337
338 static const struct debug_control tu_debug_options[] = { { "startup",
339 TU_DEBUG_STARTUP },
340 { NULL, 0 } };
341
342 const char *
343 tu_get_debug_option_name(int id)
344 {
345 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
346 return tu_debug_options[id].string;
347 }
348
349 static int
350 tu_get_instance_extension_index(const char *name)
351 {
352 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
353 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
354 return i;
355 }
356 return -1;
357 }
358
359 VkResult
360 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
361 const VkAllocationCallbacks *pAllocator,
362 VkInstance *pInstance)
363 {
364 struct tu_instance *instance;
365 VkResult result;
366
367 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
368
369 uint32_t client_version;
370 if (pCreateInfo->pApplicationInfo &&
371 pCreateInfo->pApplicationInfo->apiVersion != 0) {
372 client_version = pCreateInfo->pApplicationInfo->apiVersion;
373 } else {
374 tu_EnumerateInstanceVersion(&client_version);
375 }
376
377 instance = vk_zalloc2(&default_alloc,
378 pAllocator,
379 sizeof(*instance),
380 8,
381 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
382 if (!instance)
383 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
384
385 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
386
387 if (pAllocator)
388 instance->alloc = *pAllocator;
389 else
390 instance->alloc = default_alloc;
391
392 instance->api_version = client_version;
393 instance->physical_device_count = -1;
394
395 instance->debug_flags =
396 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
397
398 if (instance->debug_flags & TU_DEBUG_STARTUP)
399 tu_logi("Created an instance");
400
401 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
402 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
403 int index = tu_get_instance_extension_index(ext_name);
404
405 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
406 vk_free2(&default_alloc, pAllocator, instance);
407 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
408 }
409
410 instance->enabled_extensions.extensions[index] = true;
411 }
412
413 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
414 if (result != VK_SUCCESS) {
415 vk_free2(&default_alloc, pAllocator, instance);
416 return vk_error(instance, result);
417 }
418
419 _mesa_locale_init();
420
421 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
422
423 *pInstance = tu_instance_to_handle(instance);
424
425 return VK_SUCCESS;
426 }
427
428 void
429 tu_DestroyInstance(VkInstance _instance,
430 const VkAllocationCallbacks *pAllocator)
431 {
432 TU_FROM_HANDLE(tu_instance, instance, _instance);
433
434 if (!instance)
435 return;
436
437 for (int i = 0; i < instance->physical_device_count; ++i) {
438 tu_physical_device_finish(instance->physical_devices + i);
439 }
440
441 VG(VALGRIND_DESTROY_MEMPOOL(instance));
442
443 _mesa_locale_fini();
444
445 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
446
447 vk_free(&instance->alloc, instance);
448 }
449
450 static VkResult
451 tu_enumerate_devices(struct tu_instance *instance)
452 {
453 /* TODO: Check for more devices ? */
454 drmDevicePtr devices[8];
455 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
456 int max_devices;
457
458 instance->physical_device_count = 0;
459
460 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
461
462 if (instance->debug_flags & TU_DEBUG_STARTUP)
463 tu_logi("Found %d drm nodes", max_devices);
464
465 if (max_devices < 1)
466 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
467
468 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
469 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
470 devices[i]->bustype == DRM_BUS_PLATFORM) {
471
472 result = tu_physical_device_init(instance->physical_devices +
473 instance->physical_device_count,
474 instance,
475 devices[i]);
476 if (result == VK_SUCCESS)
477 ++instance->physical_device_count;
478 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
479 break;
480 }
481 }
482 drmFreeDevices(devices, max_devices);
483
484 return result;
485 }
486
487 VkResult
488 tu_EnumeratePhysicalDevices(VkInstance _instance,
489 uint32_t *pPhysicalDeviceCount,
490 VkPhysicalDevice *pPhysicalDevices)
491 {
492 TU_FROM_HANDLE(tu_instance, instance, _instance);
493 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
494
495 VkResult result;
496
497 if (instance->physical_device_count < 0) {
498 result = tu_enumerate_devices(instance);
499 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
500 return result;
501 }
502
503 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
504 vk_outarray_append(&out, p) {
505 *p = tu_physical_device_to_handle(instance->physical_devices + i);
506 }
507
508 }
509
510 return vk_outarray_status(&out);
511 }
512
513 VkResult
514 tu_EnumeratePhysicalDeviceGroups(
515 VkInstance _instance,
516 uint32_t *pPhysicalDeviceGroupCount,
517 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
518 {
519 TU_FROM_HANDLE(tu_instance, instance, _instance);
520 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount);
521 VkResult result;
522
523 if (instance->physical_device_count < 0) {
524 result = tu_enumerate_devices(instance);
525 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
526 return result;
527 }
528
529 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
530 vk_outarray_append(&out, p) {
531 p->physicalDeviceCount = 1;
532 p->physicalDevices[0] =
533 tu_physical_device_to_handle(instance->physical_devices + i);
534 p->subsetAllocation = false;
535 }
536 }
537
538 return vk_outarray_status(&out);
539 }
540
541 void
542 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
543 VkPhysicalDeviceFeatures *pFeatures)
544 {
545 memset(pFeatures, 0, sizeof(*pFeatures));
546
547 *pFeatures = (VkPhysicalDeviceFeatures){
548 .robustBufferAccess = false,
549 .fullDrawIndexUint32 = false,
550 .imageCubeArray = false,
551 .independentBlend = false,
552 .geometryShader = false,
553 .tessellationShader = false,
554 .sampleRateShading = false,
555 .dualSrcBlend = false,
556 .logicOp = false,
557 .multiDrawIndirect = false,
558 .drawIndirectFirstInstance = false,
559 .depthClamp = false,
560 .depthBiasClamp = false,
561 .fillModeNonSolid = false,
562 .depthBounds = false,
563 .wideLines = false,
564 .largePoints = false,
565 .alphaToOne = false,
566 .multiViewport = false,
567 .samplerAnisotropy = false,
568 .textureCompressionETC2 = false,
569 .textureCompressionASTC_LDR = false,
570 .textureCompressionBC = false,
571 .occlusionQueryPrecise = false,
572 .pipelineStatisticsQuery = false,
573 .vertexPipelineStoresAndAtomics = false,
574 .fragmentStoresAndAtomics = false,
575 .shaderTessellationAndGeometryPointSize = false,
576 .shaderImageGatherExtended = false,
577 .shaderStorageImageExtendedFormats = false,
578 .shaderStorageImageMultisample = false,
579 .shaderUniformBufferArrayDynamicIndexing = false,
580 .shaderSampledImageArrayDynamicIndexing = false,
581 .shaderStorageBufferArrayDynamicIndexing = false,
582 .shaderStorageImageArrayDynamicIndexing = false,
583 .shaderStorageImageReadWithoutFormat = false,
584 .shaderStorageImageWriteWithoutFormat = false,
585 .shaderClipDistance = false,
586 .shaderCullDistance = false,
587 .shaderFloat64 = false,
588 .shaderInt64 = false,
589 .shaderInt16 = false,
590 .sparseBinding = false,
591 .variableMultisampleRate = false,
592 .inheritedQueries = false,
593 };
594 }
595
596 void
597 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
598 VkPhysicalDeviceFeatures2KHR *pFeatures)
599 {
600 vk_foreach_struct(ext, pFeatures->pNext)
601 {
602 switch (ext->sType) {
603 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
604 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
605 features->variablePointersStorageBuffer = false;
606 features->variablePointers = false;
607 break;
608 }
609 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
610 VkPhysicalDeviceMultiviewFeaturesKHR *features =
611 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
612 features->multiview = false;
613 features->multiviewGeometryShader = false;
614 features->multiviewTessellationShader = false;
615 break;
616 }
617 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
618 VkPhysicalDeviceShaderDrawParameterFeatures *features =
619 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
620 features->shaderDrawParameters = false;
621 break;
622 }
623 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
624 VkPhysicalDeviceProtectedMemoryFeatures *features =
625 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
626 features->protectedMemory = false;
627 break;
628 }
629 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
630 VkPhysicalDevice16BitStorageFeatures *features =
631 (VkPhysicalDevice16BitStorageFeatures *)ext;
632 features->storageBuffer16BitAccess = false;
633 features->uniformAndStorageBuffer16BitAccess = false;
634 features->storagePushConstant16 = false;
635 features->storageInputOutput16 = false;
636 break;
637 }
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
639 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
640 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
641 features->samplerYcbcrConversion = false;
642 break;
643 }
644 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
645 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
646 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
647 features->shaderInputAttachmentArrayDynamicIndexing = false;
648 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
649 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
650 features->shaderUniformBufferArrayNonUniformIndexing = false;
651 features->shaderSampledImageArrayNonUniformIndexing = false;
652 features->shaderStorageBufferArrayNonUniformIndexing = false;
653 features->shaderStorageImageArrayNonUniformIndexing = false;
654 features->shaderInputAttachmentArrayNonUniformIndexing = false;
655 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
656 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
657 features->descriptorBindingUniformBufferUpdateAfterBind = false;
658 features->descriptorBindingSampledImageUpdateAfterBind = false;
659 features->descriptorBindingStorageImageUpdateAfterBind = false;
660 features->descriptorBindingStorageBufferUpdateAfterBind = false;
661 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
662 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
663 features->descriptorBindingUpdateUnusedWhilePending = false;
664 features->descriptorBindingPartiallyBound = false;
665 features->descriptorBindingVariableDescriptorCount = false;
666 features->runtimeDescriptorArray = false;
667 break;
668 }
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
670 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
671 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
672 features->conditionalRendering = false;
673 features->inheritedConditionalRendering = false;
674 break;
675 }
676 default:
677 break;
678 }
679 }
680 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
681 }
682
683 void
684 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
685 VkPhysicalDeviceProperties *pProperties)
686 {
687 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
688 VkSampleCountFlags sample_counts = 0xf;
689
690 /* make sure that the entire descriptor set is addressable with a signed
691 * 32-bit int. So the sum of all limits scaled by descriptor size has to
692 * be at most 2 GiB. the combined image & samples object count as one of
693 * both. This limit is for the pipeline layout, not for the set layout, but
694 * there is no set limit, so we just set a pipeline limit. I don't think
695 * any app is going to hit this soon. */
696 size_t max_descriptor_set_size =
697 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
698 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
699 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
700 32 /* sampler, largest when combined with image */ +
701 64 /* sampled image */ + 64 /* storage image */);
702
703 VkPhysicalDeviceLimits limits = {
704 .maxImageDimension1D = (1 << 14),
705 .maxImageDimension2D = (1 << 14),
706 .maxImageDimension3D = (1 << 11),
707 .maxImageDimensionCube = (1 << 14),
708 .maxImageArrayLayers = (1 << 11),
709 .maxTexelBufferElements = 128 * 1024 * 1024,
710 .maxUniformBufferRange = UINT32_MAX,
711 .maxStorageBufferRange = UINT32_MAX,
712 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
713 .maxMemoryAllocationCount = UINT32_MAX,
714 .maxSamplerAllocationCount = 64 * 1024,
715 .bufferImageGranularity = 64, /* A cache line */
716 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
717 .maxBoundDescriptorSets = MAX_SETS,
718 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
719 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
720 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
721 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
722 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
723 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
724 .maxPerStageResources = max_descriptor_set_size,
725 .maxDescriptorSetSamplers = max_descriptor_set_size,
726 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
727 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
728 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
729 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
730 .maxDescriptorSetSampledImages = max_descriptor_set_size,
731 .maxDescriptorSetStorageImages = max_descriptor_set_size,
732 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
733 .maxVertexInputAttributes = 32,
734 .maxVertexInputBindings = 32,
735 .maxVertexInputAttributeOffset = 2047,
736 .maxVertexInputBindingStride = 2048,
737 .maxVertexOutputComponents = 128,
738 .maxTessellationGenerationLevel = 64,
739 .maxTessellationPatchSize = 32,
740 .maxTessellationControlPerVertexInputComponents = 128,
741 .maxTessellationControlPerVertexOutputComponents = 128,
742 .maxTessellationControlPerPatchOutputComponents = 120,
743 .maxTessellationControlTotalOutputComponents = 4096,
744 .maxTessellationEvaluationInputComponents = 128,
745 .maxTessellationEvaluationOutputComponents = 128,
746 .maxGeometryShaderInvocations = 127,
747 .maxGeometryInputComponents = 64,
748 .maxGeometryOutputComponents = 128,
749 .maxGeometryOutputVertices = 256,
750 .maxGeometryTotalOutputComponents = 1024,
751 .maxFragmentInputComponents = 128,
752 .maxFragmentOutputAttachments = 8,
753 .maxFragmentDualSrcAttachments = 1,
754 .maxFragmentCombinedOutputResources = 8,
755 .maxComputeSharedMemorySize = 32768,
756 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
757 .maxComputeWorkGroupInvocations = 2048,
758 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
759 .subPixelPrecisionBits = 4 /* FIXME */,
760 .subTexelPrecisionBits = 4 /* FIXME */,
761 .mipmapPrecisionBits = 4 /* FIXME */,
762 .maxDrawIndexedIndexValue = UINT32_MAX,
763 .maxDrawIndirectCount = UINT32_MAX,
764 .maxSamplerLodBias = 16,
765 .maxSamplerAnisotropy = 16,
766 .maxViewports = MAX_VIEWPORTS,
767 .maxViewportDimensions = { (1 << 14), (1 << 14) },
768 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
769 .viewportSubPixelBits = 8,
770 .minMemoryMapAlignment = 4096, /* A page */
771 .minTexelBufferOffsetAlignment = 1,
772 .minUniformBufferOffsetAlignment = 4,
773 .minStorageBufferOffsetAlignment = 4,
774 .minTexelOffset = -32,
775 .maxTexelOffset = 31,
776 .minTexelGatherOffset = -32,
777 .maxTexelGatherOffset = 31,
778 .minInterpolationOffset = -2,
779 .maxInterpolationOffset = 2,
780 .subPixelInterpolationOffsetBits = 8,
781 .maxFramebufferWidth = (1 << 14),
782 .maxFramebufferHeight = (1 << 14),
783 .maxFramebufferLayers = (1 << 10),
784 .framebufferColorSampleCounts = sample_counts,
785 .framebufferDepthSampleCounts = sample_counts,
786 .framebufferStencilSampleCounts = sample_counts,
787 .framebufferNoAttachmentsSampleCounts = sample_counts,
788 .maxColorAttachments = MAX_RTS,
789 .sampledImageColorSampleCounts = sample_counts,
790 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
791 .sampledImageDepthSampleCounts = sample_counts,
792 .sampledImageStencilSampleCounts = sample_counts,
793 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
794 .maxSampleMaskWords = 1,
795 .timestampComputeAndGraphics = true,
796 .timestampPeriod = 1,
797 .maxClipDistances = 8,
798 .maxCullDistances = 8,
799 .maxCombinedClipAndCullDistances = 8,
800 .discreteQueuePriorities = 1,
801 .pointSizeRange = { 0.125, 255.875 },
802 .lineWidthRange = { 0.0, 7.9921875 },
803 .pointSizeGranularity = (1.0 / 8.0),
804 .lineWidthGranularity = (1.0 / 128.0),
805 .strictLines = false, /* FINISHME */
806 .standardSampleLocations = true,
807 .optimalBufferCopyOffsetAlignment = 128,
808 .optimalBufferCopyRowPitchAlignment = 128,
809 .nonCoherentAtomSize = 64,
810 };
811
812 *pProperties = (VkPhysicalDeviceProperties){
813 .apiVersion = tu_physical_device_api_version(pdevice),
814 .driverVersion = vk_get_driver_version(),
815 .vendorID = 0, /* TODO */
816 .deviceID = 0,
817 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
818 .limits = limits,
819 .sparseProperties = { 0 },
820 };
821
822 strcpy(pProperties->deviceName, pdevice->name);
823 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
824 }
825
826 void
827 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
828 VkPhysicalDeviceProperties2KHR *pProperties)
829 {
830 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
831 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
832
833 vk_foreach_struct(ext, pProperties->pNext)
834 {
835 switch (ext->sType) {
836 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
837 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
838 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
839 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
840 break;
841 }
842 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
843 VkPhysicalDeviceIDPropertiesKHR *properties =
844 (VkPhysicalDeviceIDPropertiesKHR *)ext;
845 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
846 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
847 properties->deviceLUIDValid = false;
848 break;
849 }
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
851 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
852 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
853 properties->maxMultiviewViewCount = MAX_VIEWS;
854 properties->maxMultiviewInstanceIndex = INT_MAX;
855 break;
856 }
857 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
858 VkPhysicalDevicePointClippingPropertiesKHR *properties =
859 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
860 properties->pointClippingBehavior =
861 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
862 break;
863 }
864 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
865 VkPhysicalDeviceMaintenance3Properties *properties =
866 (VkPhysicalDeviceMaintenance3Properties *)ext;
867 /* Make sure everything is addressable by a signed 32-bit int, and
868 * our largest descriptors are 96 bytes. */
869 properties->maxPerSetDescriptors = (1ull << 31) / 96;
870 /* Our buffer size fields allow only this much */
871 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
872 break;
873 }
874 default:
875 break;
876 }
877 }
878 }
879
880 static const VkQueueFamilyProperties
881 tu_queue_family_properties = {
882 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
883 VK_QUEUE_COMPUTE_BIT |
884 VK_QUEUE_TRANSFER_BIT,
885 .queueCount = 1,
886 .timestampValidBits = 64,
887 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
888 };
889
890 void
891 tu_GetPhysicalDeviceQueueFamilyProperties(
892 VkPhysicalDevice physicalDevice,
893 uint32_t *pQueueFamilyPropertyCount,
894 VkQueueFamilyProperties *pQueueFamilyProperties)
895 {
896 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
897
898 vk_outarray_append(&out, p) {
899 *p = tu_queue_family_properties;
900 }
901 }
902
903 void
904 tu_GetPhysicalDeviceQueueFamilyProperties2(
905 VkPhysicalDevice physicalDevice,
906 uint32_t *pQueueFamilyPropertyCount,
907 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
908 {
909 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
910
911 vk_outarray_append(&out, p) {
912 p->queueFamilyProperties = tu_queue_family_properties;
913 }
914 }
915
916 static uint64_t
917 tu_get_system_heap_size()
918 {
919 struct sysinfo info;
920 sysinfo(&info);
921
922 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
923
924 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
925 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
926 */
927 uint64_t available_ram;
928 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
929 available_ram = total_ram / 2;
930 else
931 available_ram = total_ram * 3 / 4;
932
933 return available_ram;
934 }
935
936 void
937 tu_GetPhysicalDeviceMemoryProperties(
938 VkPhysicalDevice physicalDevice,
939 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
940 {
941 pMemoryProperties->memoryHeapCount = 1;
942 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
943 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
944
945 pMemoryProperties->memoryTypeCount = 1;
946 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
947 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
948 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
949 pMemoryProperties->memoryTypes[0].heapIndex = 0;
950 }
951
952 void
953 tu_GetPhysicalDeviceMemoryProperties2(
954 VkPhysicalDevice physicalDevice,
955 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
956 {
957 return tu_GetPhysicalDeviceMemoryProperties(
958 physicalDevice, &pMemoryProperties->memoryProperties);
959 }
960
961 static int
962 tu_queue_init(struct tu_device *device,
963 struct tu_queue *queue,
964 uint32_t queue_family_index,
965 int idx,
966 VkDeviceQueueCreateFlags flags)
967 {
968 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
969 queue->device = device;
970 queue->queue_family_index = queue_family_index;
971 queue->queue_idx = idx;
972 queue->flags = flags;
973
974 return VK_SUCCESS;
975 }
976
977 static void
978 tu_queue_finish(struct tu_queue *queue)
979 {
980 }
981
982 static int
983 tu_get_device_extension_index(const char *name)
984 {
985 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
986 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
987 return i;
988 }
989 return -1;
990 }
991
992 VkResult
993 tu_CreateDevice(VkPhysicalDevice physicalDevice,
994 const VkDeviceCreateInfo *pCreateInfo,
995 const VkAllocationCallbacks *pAllocator,
996 VkDevice *pDevice)
997 {
998 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
999 VkResult result;
1000 struct tu_device *device;
1001
1002 /* Check enabled features */
1003 if (pCreateInfo->pEnabledFeatures) {
1004 VkPhysicalDeviceFeatures supported_features;
1005 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1006 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1007 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1008 unsigned num_features =
1009 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1010 for (uint32_t i = 0; i < num_features; i++) {
1011 if (enabled_feature[i] && !supported_feature[i])
1012 return vk_error(physical_device->instance,
1013 VK_ERROR_FEATURE_NOT_PRESENT);
1014 }
1015 }
1016
1017 device = vk_zalloc2(&physical_device->instance->alloc,
1018 pAllocator,
1019 sizeof(*device),
1020 8,
1021 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1022 if (!device)
1023 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1024
1025 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1026 device->instance = physical_device->instance;
1027 device->physical_device = physical_device;
1028
1029 if (pAllocator)
1030 device->alloc = *pAllocator;
1031 else
1032 device->alloc = physical_device->instance->alloc;
1033
1034 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1035 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1036 int index = tu_get_device_extension_index(ext_name);
1037 if (index < 0 ||
1038 !physical_device->supported_extensions.extensions[index]) {
1039 vk_free(&device->alloc, device);
1040 return vk_error(physical_device->instance,
1041 VK_ERROR_EXTENSION_NOT_PRESENT);
1042 }
1043
1044 device->enabled_extensions.extensions[index] = true;
1045 }
1046
1047 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1048 const VkDeviceQueueCreateInfo *queue_create =
1049 &pCreateInfo->pQueueCreateInfos[i];
1050 uint32_t qfi = queue_create->queueFamilyIndex;
1051 device->queues[qfi] =
1052 vk_alloc(&device->alloc,
1053 queue_create->queueCount * sizeof(struct tu_queue),
1054 8,
1055 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1056 if (!device->queues[qfi]) {
1057 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1058 goto fail;
1059 }
1060
1061 memset(device->queues[qfi],
1062 0,
1063 queue_create->queueCount * sizeof(struct tu_queue));
1064
1065 device->queue_count[qfi] = queue_create->queueCount;
1066
1067 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1068 result = tu_queue_init(
1069 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
1070 if (result != VK_SUCCESS)
1071 goto fail;
1072 }
1073 }
1074
1075 VkPipelineCacheCreateInfo ci;
1076 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1077 ci.pNext = NULL;
1078 ci.flags = 0;
1079 ci.pInitialData = NULL;
1080 ci.initialDataSize = 0;
1081 VkPipelineCache pc;
1082 result =
1083 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1084 if (result != VK_SUCCESS)
1085 goto fail;
1086
1087 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1088
1089 *pDevice = tu_device_to_handle(device);
1090 return VK_SUCCESS;
1091
1092 fail:
1093 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1094 for (unsigned q = 0; q < device->queue_count[i]; q++)
1095 tu_queue_finish(&device->queues[i][q]);
1096 if (device->queue_count[i])
1097 vk_free(&device->alloc, device->queues[i]);
1098 }
1099
1100 vk_free(&device->alloc, device);
1101 return result;
1102 }
1103
1104 void
1105 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1106 {
1107 TU_FROM_HANDLE(tu_device, device, _device);
1108
1109 if (!device)
1110 return;
1111
1112 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1113 for (unsigned q = 0; q < device->queue_count[i]; q++)
1114 tu_queue_finish(&device->queues[i][q]);
1115 if (device->queue_count[i])
1116 vk_free(&device->alloc, device->queues[i]);
1117 }
1118
1119 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1120 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1121
1122 vk_free(&device->alloc, device);
1123 }
1124
1125 VkResult
1126 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1127 VkLayerProperties *pProperties)
1128 {
1129 *pPropertyCount = 0;
1130 return VK_SUCCESS;
1131 }
1132
1133 VkResult
1134 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1135 uint32_t *pPropertyCount,
1136 VkLayerProperties *pProperties)
1137 {
1138 *pPropertyCount = 0;
1139 return VK_SUCCESS;
1140 }
1141
1142 void
1143 tu_GetDeviceQueue2(VkDevice _device,
1144 const VkDeviceQueueInfo2 *pQueueInfo,
1145 VkQueue *pQueue)
1146 {
1147 TU_FROM_HANDLE(tu_device, device, _device);
1148 struct tu_queue *queue;
1149
1150 queue =
1151 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1152 if (pQueueInfo->flags != queue->flags) {
1153 /* From the Vulkan 1.1.70 spec:
1154 *
1155 * "The queue returned by vkGetDeviceQueue2 must have the same
1156 * flags value from this structure as that used at device
1157 * creation time in a VkDeviceQueueCreateInfo instance. If no
1158 * matching flags were specified at device creation time then
1159 * pQueue will return VK_NULL_HANDLE."
1160 */
1161 *pQueue = VK_NULL_HANDLE;
1162 return;
1163 }
1164
1165 *pQueue = tu_queue_to_handle(queue);
1166 }
1167
1168 void
1169 tu_GetDeviceQueue(VkDevice _device,
1170 uint32_t queueFamilyIndex,
1171 uint32_t queueIndex,
1172 VkQueue *pQueue)
1173 {
1174 const VkDeviceQueueInfo2 info =
1175 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1176 .queueFamilyIndex = queueFamilyIndex,
1177 .queueIndex = queueIndex };
1178
1179 tu_GetDeviceQueue2(_device, &info, pQueue);
1180 }
1181
1182 VkResult
1183 tu_QueueSubmit(VkQueue _queue,
1184 uint32_t submitCount,
1185 const VkSubmitInfo *pSubmits,
1186 VkFence _fence)
1187 {
1188 return VK_SUCCESS;
1189 }
1190
1191 VkResult
1192 tu_QueueWaitIdle(VkQueue _queue)
1193 {
1194 return VK_SUCCESS;
1195 }
1196
1197 VkResult
1198 tu_DeviceWaitIdle(VkDevice _device)
1199 {
1200 TU_FROM_HANDLE(tu_device, device, _device);
1201
1202 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1203 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1204 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1205 }
1206 }
1207 return VK_SUCCESS;
1208 }
1209
1210 VkResult
1211 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1212 uint32_t *pPropertyCount,
1213 VkExtensionProperties *pProperties)
1214 {
1215 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1216
1217 /* We spport no lyaers */
1218 if (pLayerName)
1219 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1220
1221 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1222 if (tu_supported_instance_extensions.extensions[i]) {
1223 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1224 }
1225 }
1226
1227 return vk_outarray_status(&out);
1228 }
1229
1230 VkResult
1231 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1232 const char *pLayerName,
1233 uint32_t *pPropertyCount,
1234 VkExtensionProperties *pProperties)
1235 {
1236 /* We spport no lyaers */
1237 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1238 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1239
1240 /* We spport no lyaers */
1241 if (pLayerName)
1242 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1243
1244 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1245 if (device->supported_extensions.extensions[i]) {
1246 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1247 }
1248 }
1249
1250 return vk_outarray_status(&out);
1251 }
1252
1253 PFN_vkVoidFunction
1254 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1255 {
1256 TU_FROM_HANDLE(tu_instance, instance, _instance);
1257
1258 return tu_lookup_entrypoint_checked(pName,
1259 instance ? instance->api_version : 0,
1260 instance ? &instance->enabled_extensions
1261 : NULL,
1262 NULL);
1263 }
1264
1265 /* The loader wants us to expose a second GetInstanceProcAddr function
1266 * to work around certain LD_PRELOAD issues seen in apps.
1267 */
1268 PUBLIC
1269 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1270 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1271
1272 PUBLIC
1273 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1274 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1275 {
1276 return tu_GetInstanceProcAddr(instance, pName);
1277 }
1278
1279 PFN_vkVoidFunction
1280 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1281 {
1282 TU_FROM_HANDLE(tu_device, device, _device);
1283
1284 return tu_lookup_entrypoint_checked(pName,
1285 device->instance->api_version,
1286 &device->instance->enabled_extensions,
1287 &device->enabled_extensions);
1288 }
1289
1290 static VkResult
1291 tu_alloc_memory(struct tu_device *device,
1292 const VkMemoryAllocateInfo *pAllocateInfo,
1293 const VkAllocationCallbacks *pAllocator,
1294 VkDeviceMemory *pMem)
1295 {
1296 struct tu_device_memory *mem;
1297 VkResult result;
1298
1299 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1300
1301 if (pAllocateInfo->allocationSize == 0) {
1302 /* Apparently, this is allowed */
1303 *pMem = VK_NULL_HANDLE;
1304 return VK_SUCCESS;
1305 }
1306
1307 mem = vk_alloc2(&device->alloc,
1308 pAllocator,
1309 sizeof(*mem),
1310 8,
1311 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1312 if (mem == NULL)
1313 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1314
1315 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1316 if (result != VK_SUCCESS) {
1317 vk_free2(&device->alloc, pAllocator, mem);
1318 return result;
1319 }
1320
1321 mem->size = pAllocateInfo->allocationSize;
1322 mem->type_index = pAllocateInfo->memoryTypeIndex;
1323
1324 mem->map = NULL;
1325 mem->user_ptr = NULL;
1326
1327 *pMem = tu_device_memory_to_handle(mem);
1328
1329 return VK_SUCCESS;
1330 }
1331
1332 VkResult
1333 tu_AllocateMemory(VkDevice _device,
1334 const VkMemoryAllocateInfo *pAllocateInfo,
1335 const VkAllocationCallbacks *pAllocator,
1336 VkDeviceMemory *pMem)
1337 {
1338 TU_FROM_HANDLE(tu_device, device, _device);
1339 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1340 }
1341
1342 void
1343 tu_FreeMemory(VkDevice _device,
1344 VkDeviceMemory _mem,
1345 const VkAllocationCallbacks *pAllocator)
1346 {
1347 TU_FROM_HANDLE(tu_device, device, _device);
1348 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1349
1350 if (mem == NULL)
1351 return;
1352
1353 tu_bo_finish(device, &mem->bo);
1354 vk_free2(&device->alloc, pAllocator, mem);
1355 }
1356
1357 VkResult
1358 tu_MapMemory(VkDevice _device,
1359 VkDeviceMemory _memory,
1360 VkDeviceSize offset,
1361 VkDeviceSize size,
1362 VkMemoryMapFlags flags,
1363 void **ppData)
1364 {
1365 TU_FROM_HANDLE(tu_device, device, _device);
1366 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1367 VkResult result;
1368
1369 if (mem == NULL) {
1370 *ppData = NULL;
1371 return VK_SUCCESS;
1372 }
1373
1374 if (mem->user_ptr) {
1375 *ppData = mem->user_ptr;
1376 } else if (!mem->map){
1377 result = tu_bo_map(device, &mem->bo);
1378 if (result != VK_SUCCESS)
1379 return result;
1380 *ppData = mem->map = mem->bo.map;
1381 } else
1382 *ppData = mem->map;
1383
1384 if (*ppData) {
1385 *ppData += offset;
1386 return VK_SUCCESS;
1387 }
1388
1389 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1390 }
1391
1392 void
1393 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1394 {
1395 /* I do not see any unmapping done by the freedreno Gallium driver. */
1396 }
1397
1398 VkResult
1399 tu_FlushMappedMemoryRanges(VkDevice _device,
1400 uint32_t memoryRangeCount,
1401 const VkMappedMemoryRange *pMemoryRanges)
1402 {
1403 return VK_SUCCESS;
1404 }
1405
1406 VkResult
1407 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1408 uint32_t memoryRangeCount,
1409 const VkMappedMemoryRange *pMemoryRanges)
1410 {
1411 return VK_SUCCESS;
1412 }
1413
1414 void
1415 tu_GetBufferMemoryRequirements(VkDevice _device,
1416 VkBuffer _buffer,
1417 VkMemoryRequirements *pMemoryRequirements)
1418 {
1419 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1420
1421 pMemoryRequirements->memoryTypeBits = 1;
1422 pMemoryRequirements->alignment = 16;
1423 pMemoryRequirements->size =
1424 align64(buffer->size, pMemoryRequirements->alignment);
1425 }
1426
1427 void
1428 tu_GetBufferMemoryRequirements2(
1429 VkDevice device,
1430 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1431 VkMemoryRequirements2KHR *pMemoryRequirements)
1432 {
1433 tu_GetBufferMemoryRequirements(
1434 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1435 }
1436
1437 void
1438 tu_GetImageMemoryRequirements(VkDevice _device,
1439 VkImage _image,
1440 VkMemoryRequirements *pMemoryRequirements)
1441 {
1442 TU_FROM_HANDLE(tu_image, image, _image);
1443
1444 pMemoryRequirements->memoryTypeBits = 1;
1445 pMemoryRequirements->size = image->size;
1446 pMemoryRequirements->alignment = image->alignment;
1447 }
1448
1449 void
1450 tu_GetImageMemoryRequirements2(VkDevice device,
1451 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1452 VkMemoryRequirements2KHR *pMemoryRequirements)
1453 {
1454 tu_GetImageMemoryRequirements(
1455 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1456 }
1457
1458 void
1459 tu_GetImageSparseMemoryRequirements(
1460 VkDevice device,
1461 VkImage image,
1462 uint32_t *pSparseMemoryRequirementCount,
1463 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1464 {
1465 tu_stub();
1466 }
1467
1468 void
1469 tu_GetImageSparseMemoryRequirements2(
1470 VkDevice device,
1471 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1472 uint32_t *pSparseMemoryRequirementCount,
1473 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1474 {
1475 tu_stub();
1476 }
1477
1478 void
1479 tu_GetDeviceMemoryCommitment(VkDevice device,
1480 VkDeviceMemory memory,
1481 VkDeviceSize *pCommittedMemoryInBytes)
1482 {
1483 *pCommittedMemoryInBytes = 0;
1484 }
1485
1486 VkResult
1487 tu_BindBufferMemory2(VkDevice device,
1488 uint32_t bindInfoCount,
1489 const VkBindBufferMemoryInfoKHR *pBindInfos)
1490 {
1491 return VK_SUCCESS;
1492 }
1493
1494 VkResult
1495 tu_BindBufferMemory(VkDevice device,
1496 VkBuffer buffer,
1497 VkDeviceMemory memory,
1498 VkDeviceSize memoryOffset)
1499 {
1500 const VkBindBufferMemoryInfoKHR info = {
1501 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1502 .buffer = buffer,
1503 .memory = memory,
1504 .memoryOffset = memoryOffset
1505 };
1506
1507 return tu_BindBufferMemory2(device, 1, &info);
1508 }
1509
1510 VkResult
1511 tu_BindImageMemory2(VkDevice device,
1512 uint32_t bindInfoCount,
1513 const VkBindImageMemoryInfoKHR *pBindInfos)
1514 {
1515 return VK_SUCCESS;
1516 }
1517
1518 VkResult
1519 tu_BindImageMemory(VkDevice device,
1520 VkImage image,
1521 VkDeviceMemory memory,
1522 VkDeviceSize memoryOffset)
1523 {
1524 const VkBindImageMemoryInfoKHR info = {
1525 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1526 .image = image,
1527 .memory = memory,
1528 .memoryOffset = memoryOffset
1529 };
1530
1531 return tu_BindImageMemory2(device, 1, &info);
1532 }
1533
1534 VkResult
1535 tu_QueueBindSparse(VkQueue _queue,
1536 uint32_t bindInfoCount,
1537 const VkBindSparseInfo *pBindInfo,
1538 VkFence _fence)
1539 {
1540 return VK_SUCCESS;
1541 }
1542
1543 VkResult
1544 tu_CreateFence(VkDevice _device,
1545 const VkFenceCreateInfo *pCreateInfo,
1546 const VkAllocationCallbacks *pAllocator,
1547 VkFence *pFence)
1548 {
1549 TU_FROM_HANDLE(tu_device, device, _device);
1550
1551 struct tu_fence *fence = vk_alloc2(&device->alloc,
1552 pAllocator,
1553 sizeof(*fence),
1554 8,
1555 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1556
1557 if (!fence)
1558 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1559
1560 *pFence = tu_fence_to_handle(fence);
1561
1562 return VK_SUCCESS;
1563 }
1564
1565 void
1566 tu_DestroyFence(VkDevice _device,
1567 VkFence _fence,
1568 const VkAllocationCallbacks *pAllocator)
1569 {
1570 TU_FROM_HANDLE(tu_device, device, _device);
1571 TU_FROM_HANDLE(tu_fence, fence, _fence);
1572
1573 if (!fence)
1574 return;
1575
1576 vk_free2(&device->alloc, pAllocator, fence);
1577 }
1578
1579 VkResult
1580 tu_WaitForFences(VkDevice _device,
1581 uint32_t fenceCount,
1582 const VkFence *pFences,
1583 VkBool32 waitAll,
1584 uint64_t timeout)
1585 {
1586 return VK_SUCCESS;
1587 }
1588
1589 VkResult
1590 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1591 {
1592 return VK_SUCCESS;
1593 }
1594
1595 VkResult
1596 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1597 {
1598 return VK_SUCCESS;
1599 }
1600
1601 // Queue semaphore functions
1602
1603 VkResult
1604 tu_CreateSemaphore(VkDevice _device,
1605 const VkSemaphoreCreateInfo *pCreateInfo,
1606 const VkAllocationCallbacks *pAllocator,
1607 VkSemaphore *pSemaphore)
1608 {
1609 TU_FROM_HANDLE(tu_device, device, _device);
1610
1611 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1612 pAllocator,
1613 sizeof(*sem),
1614 8,
1615 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1616 if (!sem)
1617 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1618
1619 *pSemaphore = tu_semaphore_to_handle(sem);
1620 return VK_SUCCESS;
1621 }
1622
1623 void
1624 tu_DestroySemaphore(VkDevice _device,
1625 VkSemaphore _semaphore,
1626 const VkAllocationCallbacks *pAllocator)
1627 {
1628 TU_FROM_HANDLE(tu_device, device, _device);
1629 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1630 if (!_semaphore)
1631 return;
1632
1633 vk_free2(&device->alloc, pAllocator, sem);
1634 }
1635
1636 VkResult
1637 tu_CreateEvent(VkDevice _device,
1638 const VkEventCreateInfo *pCreateInfo,
1639 const VkAllocationCallbacks *pAllocator,
1640 VkEvent *pEvent)
1641 {
1642 TU_FROM_HANDLE(tu_device, device, _device);
1643 struct tu_event *event = vk_alloc2(&device->alloc,
1644 pAllocator,
1645 sizeof(*event),
1646 8,
1647 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1648
1649 if (!event)
1650 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1651
1652 *pEvent = tu_event_to_handle(event);
1653
1654 return VK_SUCCESS;
1655 }
1656
1657 void
1658 tu_DestroyEvent(VkDevice _device,
1659 VkEvent _event,
1660 const VkAllocationCallbacks *pAllocator)
1661 {
1662 TU_FROM_HANDLE(tu_device, device, _device);
1663 TU_FROM_HANDLE(tu_event, event, _event);
1664
1665 if (!event)
1666 return;
1667 vk_free2(&device->alloc, pAllocator, event);
1668 }
1669
1670 VkResult
1671 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1672 {
1673 TU_FROM_HANDLE(tu_event, event, _event);
1674
1675 if (*event->map == 1)
1676 return VK_EVENT_SET;
1677 return VK_EVENT_RESET;
1678 }
1679
1680 VkResult
1681 tu_SetEvent(VkDevice _device, VkEvent _event)
1682 {
1683 TU_FROM_HANDLE(tu_event, event, _event);
1684 *event->map = 1;
1685
1686 return VK_SUCCESS;
1687 }
1688
1689 VkResult
1690 tu_ResetEvent(VkDevice _device, VkEvent _event)
1691 {
1692 TU_FROM_HANDLE(tu_event, event, _event);
1693 *event->map = 0;
1694
1695 return VK_SUCCESS;
1696 }
1697
1698 VkResult
1699 tu_CreateBuffer(VkDevice _device,
1700 const VkBufferCreateInfo *pCreateInfo,
1701 const VkAllocationCallbacks *pAllocator,
1702 VkBuffer *pBuffer)
1703 {
1704 TU_FROM_HANDLE(tu_device, device, _device);
1705 struct tu_buffer *buffer;
1706
1707 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1708
1709 buffer = vk_alloc2(&device->alloc,
1710 pAllocator,
1711 sizeof(*buffer),
1712 8,
1713 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1714 if (buffer == NULL)
1715 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1716
1717 buffer->size = pCreateInfo->size;
1718 buffer->usage = pCreateInfo->usage;
1719 buffer->flags = pCreateInfo->flags;
1720
1721 *pBuffer = tu_buffer_to_handle(buffer);
1722
1723 return VK_SUCCESS;
1724 }
1725
1726 void
1727 tu_DestroyBuffer(VkDevice _device,
1728 VkBuffer _buffer,
1729 const VkAllocationCallbacks *pAllocator)
1730 {
1731 TU_FROM_HANDLE(tu_device, device, _device);
1732 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1733
1734 if (!buffer)
1735 return;
1736
1737 vk_free2(&device->alloc, pAllocator, buffer);
1738 }
1739
1740 static uint32_t
1741 tu_surface_max_layer_count(struct tu_image_view *iview)
1742 {
1743 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1744 ? iview->extent.depth
1745 : (iview->base_layer + iview->layer_count);
1746 }
1747
1748 VkResult
1749 tu_CreateFramebuffer(VkDevice _device,
1750 const VkFramebufferCreateInfo *pCreateInfo,
1751 const VkAllocationCallbacks *pAllocator,
1752 VkFramebuffer *pFramebuffer)
1753 {
1754 TU_FROM_HANDLE(tu_device, device, _device);
1755 struct tu_framebuffer *framebuffer;
1756
1757 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1758
1759 size_t size =
1760 sizeof(*framebuffer) +
1761 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1762 framebuffer = vk_alloc2(
1763 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1764 if (framebuffer == NULL)
1765 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1766
1767 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1768 framebuffer->width = pCreateInfo->width;
1769 framebuffer->height = pCreateInfo->height;
1770 framebuffer->layers = pCreateInfo->layers;
1771 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1772 VkImageView _iview = pCreateInfo->pAttachments[i];
1773 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1774 framebuffer->attachments[i].attachment = iview;
1775
1776 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1777 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1778 framebuffer->layers =
1779 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1780 }
1781
1782 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1783 return VK_SUCCESS;
1784 }
1785
1786 void
1787 tu_DestroyFramebuffer(VkDevice _device,
1788 VkFramebuffer _fb,
1789 const VkAllocationCallbacks *pAllocator)
1790 {
1791 TU_FROM_HANDLE(tu_device, device, _device);
1792 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1793
1794 if (!fb)
1795 return;
1796 vk_free2(&device->alloc, pAllocator, fb);
1797 }
1798
1799 static void
1800 tu_init_sampler(struct tu_device *device,
1801 struct tu_sampler *sampler,
1802 const VkSamplerCreateInfo *pCreateInfo)
1803 {
1804 }
1805
1806 VkResult
1807 tu_CreateSampler(VkDevice _device,
1808 const VkSamplerCreateInfo *pCreateInfo,
1809 const VkAllocationCallbacks *pAllocator,
1810 VkSampler *pSampler)
1811 {
1812 TU_FROM_HANDLE(tu_device, device, _device);
1813 struct tu_sampler *sampler;
1814
1815 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1816
1817 sampler = vk_alloc2(&device->alloc,
1818 pAllocator,
1819 sizeof(*sampler),
1820 8,
1821 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1822 if (!sampler)
1823 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1824
1825 tu_init_sampler(device, sampler, pCreateInfo);
1826 *pSampler = tu_sampler_to_handle(sampler);
1827
1828 return VK_SUCCESS;
1829 }
1830
1831 void
1832 tu_DestroySampler(VkDevice _device,
1833 VkSampler _sampler,
1834 const VkAllocationCallbacks *pAllocator)
1835 {
1836 TU_FROM_HANDLE(tu_device, device, _device);
1837 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1838
1839 if (!sampler)
1840 return;
1841 vk_free2(&device->alloc, pAllocator, sampler);
1842 }
1843
1844 /* vk_icd.h does not declare this function, so we declare it here to
1845 * suppress Wmissing-prototypes.
1846 */
1847 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1848 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1849
1850 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1851 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1852 {
1853 /* For the full details on loader interface versioning, see
1854 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1855 * What follows is a condensed summary, to help you navigate the large and
1856 * confusing official doc.
1857 *
1858 * - Loader interface v0 is incompatible with later versions. We don't
1859 * support it.
1860 *
1861 * - In loader interface v1:
1862 * - The first ICD entrypoint called by the loader is
1863 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1864 * entrypoint.
1865 * - The ICD must statically expose no other Vulkan symbol unless it is
1866 * linked with -Bsymbolic.
1867 * - Each dispatchable Vulkan handle created by the ICD must be
1868 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1869 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1870 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1871 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1872 * such loader-managed surfaces.
1873 *
1874 * - Loader interface v2 differs from v1 in:
1875 * - The first ICD entrypoint called by the loader is
1876 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1877 * statically expose this entrypoint.
1878 *
1879 * - Loader interface v3 differs from v2 in:
1880 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1881 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1882 * because the loader no longer does so.
1883 */
1884 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1885 return VK_SUCCESS;
1886 }
1887
1888 void
1889 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1890 VkPhysicalDevice physicalDevice,
1891 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1892 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1893 {
1894 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1895 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1896 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1897 }
1898
1899 void
1900 tu_GetPhysicalDeviceExternalFenceProperties(
1901 VkPhysicalDevice physicalDevice,
1902 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1903 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1904 {
1905 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1906 pExternalFenceProperties->compatibleHandleTypes = 0;
1907 pExternalFenceProperties->externalFenceFeatures = 0;
1908 }
1909
1910 VkResult
1911 tu_CreateDebugReportCallbackEXT(
1912 VkInstance _instance,
1913 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1914 const VkAllocationCallbacks *pAllocator,
1915 VkDebugReportCallbackEXT *pCallback)
1916 {
1917 TU_FROM_HANDLE(tu_instance, instance, _instance);
1918 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1919 pCreateInfo,
1920 pAllocator,
1921 &instance->alloc,
1922 pCallback);
1923 }
1924
1925 void
1926 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1927 VkDebugReportCallbackEXT _callback,
1928 const VkAllocationCallbacks *pAllocator)
1929 {
1930 TU_FROM_HANDLE(tu_instance, instance, _instance);
1931 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1932 _callback,
1933 pAllocator,
1934 &instance->alloc);
1935 }
1936
1937 void
1938 tu_DebugReportMessageEXT(VkInstance _instance,
1939 VkDebugReportFlagsEXT flags,
1940 VkDebugReportObjectTypeEXT objectType,
1941 uint64_t object,
1942 size_t location,
1943 int32_t messageCode,
1944 const char *pLayerPrefix,
1945 const char *pMessage)
1946 {
1947 TU_FROM_HANDLE(tu_instance, instance, _instance);
1948 vk_debug_report(&instance->debug_report_callbacks,
1949 flags,
1950 objectType,
1951 object,
1952 location,
1953 messageCode,
1954 pLayerPrefix,
1955 pMessage);
1956 }
1957
1958 void
1959 tu_GetDeviceGroupPeerMemoryFeatures(
1960 VkDevice device,
1961 uint32_t heapIndex,
1962 uint32_t localDeviceIndex,
1963 uint32_t remoteDeviceIndex,
1964 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1965 {
1966 assert(localDeviceIndex == remoteDeviceIndex);
1967
1968 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1969 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1970 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1971 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1972 }