turnip: Use vk_errorf() for initialization error messages
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/sysinfo.h>
38 #include <unistd.h>
39 #include <xf86drm.h>
40
41 static int
42 tu_device_get_cache_uuid(uint16_t family, void *uuid)
43 {
44 uint32_t mesa_timestamp;
45 uint16_t f = family;
46 memset(uuid, 0, VK_UUID_SIZE);
47 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
48 &mesa_timestamp))
49 return -1;
50
51 memcpy(uuid, &mesa_timestamp, 4);
52 memcpy((char *)uuid + 4, &f, 2);
53 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
54 return 0;
55 }
56
57 static void
58 tu_get_driver_uuid(void *uuid)
59 {
60 memset(uuid, 0, VK_UUID_SIZE);
61 }
62
63 static void
64 tu_get_device_uuid(void *uuid)
65 {
66 stub();
67 }
68
69 static VkResult
70 tu_physical_device_init(struct tu_physical_device *device,
71 struct tu_instance *instance,
72 drmDevicePtr drm_device)
73 {
74 const char *path = drm_device->nodes[DRM_NODE_RENDER];
75 VkResult result = VK_SUCCESS;
76 drmVersionPtr version;
77 int fd;
78 int master_fd = -1;
79 struct fd_pipe *tmp_pipe = NULL;
80 uint64_t val;
81
82 fd = open(path, O_RDWR | O_CLOEXEC);
83 if (fd < 0) {
84 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
85 "failed to open device %s", path);
86 }
87
88 /* Version 1.3 added MSM_INFO_IOVA. */
89 const int min_version_major = 1;
90 const int min_version_minor = 3;
91
92 version = drmGetVersion(fd);
93 if (!version) {
94 close(fd);
95 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
96 "failed to query kernel driver version for device %s",
97 path);
98 }
99
100 if (strcmp(version->name, "msm")) {
101 drmFreeVersion(version);
102 if (master_fd != -1)
103 close(master_fd);
104 close(fd);
105 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
106 "device %s does not use the msm kernel driver", path);
107 }
108
109 if (version->version_major != 1 || version->version_minor < 3) {
110 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
111 "kernel driver for device %s has version %d.%d, "
112 "but Vulkan requires version >= %d.%d",
113 path,
114 version->version_major, version->version_minor,
115 min_version_major, min_version_minor);
116 drmFreeVersion(version);
117 close(fd);
118 return result;
119 }
120
121 drmFreeVersion(version);
122
123 if (instance->debug_flags & TU_DEBUG_STARTUP)
124 tu_logi("Found compatible device '%s'.", path);
125
126 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
127 device->instance = instance;
128 assert(strlen(path) < ARRAY_SIZE(device->path));
129 strncpy(device->path, path, ARRAY_SIZE(device->path));
130
131 if (instance->enabled_extensions.KHR_display) {
132 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
133 if (master_fd >= 0) {
134 /* TODO: free master_fd is accel is not working? */
135 }
136 }
137
138 device->master_fd = master_fd;
139 device->local_fd = fd;
140
141 device->drm_device = fd_device_new_dup(fd);
142 if (!device->drm_device) {
143 result = vk_errorf(
144 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
145 goto fail;
146 }
147
148 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
149 if (!tmp_pipe) {
150 result = vk_errorf(
151 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
152 goto fail;
153 }
154
155 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
156 result = vk_errorf(
157 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
158 goto fail;
159 }
160 device->gpu_id = val;
161
162 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
163 result = vk_errorf(
164 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
165 goto fail;
166 }
167 device->gmem_size = val;
168
169 fd_pipe_del(tmp_pipe);
170 tmp_pipe = NULL;
171
172 memset(device->name, 0, sizeof(device->name));
173 sprintf(device->name, "FD%d", device->gpu_id);
174
175 switch(device->gpu_id) {
176 case 530:
177 break;
178 default:
179 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
180 "device %s is unsupported", device->name);
181 goto fail;
182 }
183 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
184 result = vk_errorf(
185 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
186 goto fail;
187 }
188
189 /* The gpu id is already embedded in the uuid so we just pass "tu"
190 * when creating the cache.
191 */
192 char buf[VK_UUID_SIZE * 2 + 1];
193 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
194 device->disk_cache = disk_cache_create(device->name, buf, 0);
195
196 fprintf(stderr,
197 "WARNING: tu is not a conformant vulkan implementation, "
198 "testing use only.\n");
199
200 tu_get_driver_uuid(&device->device_uuid);
201 tu_get_device_uuid(&device->device_uuid);
202
203 tu_fill_device_extension_table(device, &device->supported_extensions);
204
205 if (result != VK_SUCCESS) {
206 vk_error(instance, result);
207 goto fail;
208 }
209
210 return VK_SUCCESS;
211
212 fail:
213 if (tmp_pipe)
214 fd_pipe_del(tmp_pipe);
215 if (device->drm_device)
216 fd_device_del(device->drm_device);
217 close(fd);
218 if (master_fd != -1)
219 close(master_fd);
220 return result;
221 }
222
223 static void
224 tu_physical_device_finish(struct tu_physical_device *device)
225 {
226 disk_cache_destroy(device->disk_cache);
227 close(device->local_fd);
228 if (device->master_fd != -1)
229 close(device->master_fd);
230 }
231
232 static void *
233 default_alloc_func(void *pUserData,
234 size_t size,
235 size_t align,
236 VkSystemAllocationScope allocationScope)
237 {
238 return malloc(size);
239 }
240
241 static void *
242 default_realloc_func(void *pUserData,
243 void *pOriginal,
244 size_t size,
245 size_t align,
246 VkSystemAllocationScope allocationScope)
247 {
248 return realloc(pOriginal, size);
249 }
250
251 static void
252 default_free_func(void *pUserData, void *pMemory)
253 {
254 free(pMemory);
255 }
256
257 static const VkAllocationCallbacks default_alloc = {
258 .pUserData = NULL,
259 .pfnAllocation = default_alloc_func,
260 .pfnReallocation = default_realloc_func,
261 .pfnFree = default_free_func,
262 };
263
264 static const struct debug_control tu_debug_options[] = { { "startup",
265 TU_DEBUG_STARTUP },
266 { NULL, 0 } };
267
268 const char *
269 tu_get_debug_option_name(int id)
270 {
271 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
272 return tu_debug_options[id].string;
273 }
274
275 static int
276 tu_get_instance_extension_index(const char *name)
277 {
278 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
279 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
280 return i;
281 }
282 return -1;
283 }
284
285 VkResult
286 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
287 const VkAllocationCallbacks *pAllocator,
288 VkInstance *pInstance)
289 {
290 struct tu_instance *instance;
291 VkResult result;
292
293 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
294
295 uint32_t client_version;
296 if (pCreateInfo->pApplicationInfo &&
297 pCreateInfo->pApplicationInfo->apiVersion != 0) {
298 client_version = pCreateInfo->pApplicationInfo->apiVersion;
299 } else {
300 tu_EnumerateInstanceVersion(&client_version);
301 }
302
303 instance = vk_zalloc2(&default_alloc,
304 pAllocator,
305 sizeof(*instance),
306 8,
307 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
308 if (!instance)
309 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
310
311 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
312
313 if (pAllocator)
314 instance->alloc = *pAllocator;
315 else
316 instance->alloc = default_alloc;
317
318 instance->api_version = client_version;
319 instance->physical_device_count = -1;
320
321 instance->debug_flags =
322 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
323
324 if (instance->debug_flags & TU_DEBUG_STARTUP)
325 tu_logi("Created an instance");
326
327 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
328 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
329 int index = tu_get_instance_extension_index(ext_name);
330
331 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
332 vk_free2(&default_alloc, pAllocator, instance);
333 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
334 }
335
336 instance->enabled_extensions.extensions[index] = true;
337 }
338
339 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
340 if (result != VK_SUCCESS) {
341 vk_free2(&default_alloc, pAllocator, instance);
342 return vk_error(instance, result);
343 }
344
345 _mesa_locale_init();
346
347 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
348
349 *pInstance = tu_instance_to_handle(instance);
350
351 return VK_SUCCESS;
352 }
353
354 void
355 tu_DestroyInstance(VkInstance _instance,
356 const VkAllocationCallbacks *pAllocator)
357 {
358 TU_FROM_HANDLE(tu_instance, instance, _instance);
359
360 if (!instance)
361 return;
362
363 for (int i = 0; i < instance->physical_device_count; ++i) {
364 tu_physical_device_finish(instance->physical_devices + i);
365 }
366
367 VG(VALGRIND_DESTROY_MEMPOOL(instance));
368
369 _mesa_locale_fini();
370
371 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
372
373 vk_free(&instance->alloc, instance);
374 }
375
376 static VkResult
377 tu_enumerate_devices(struct tu_instance *instance)
378 {
379 /* TODO: Check for more devices ? */
380 drmDevicePtr devices[8];
381 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
382 int max_devices;
383
384 instance->physical_device_count = 0;
385
386 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
387
388 if (instance->debug_flags & TU_DEBUG_STARTUP)
389 tu_logi("Found %d drm nodes", max_devices);
390
391 if (max_devices < 1)
392 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
393
394 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
395 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
396 devices[i]->bustype == DRM_BUS_PLATFORM) {
397
398 result = tu_physical_device_init(instance->physical_devices +
399 instance->physical_device_count,
400 instance,
401 devices[i]);
402 if (result == VK_SUCCESS)
403 ++instance->physical_device_count;
404 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
405 break;
406 }
407 }
408 drmFreeDevices(devices, max_devices);
409
410 return result;
411 }
412
413 VkResult
414 tu_EnumeratePhysicalDevices(VkInstance _instance,
415 uint32_t *pPhysicalDeviceCount,
416 VkPhysicalDevice *pPhysicalDevices)
417 {
418 TU_FROM_HANDLE(tu_instance, instance, _instance);
419 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
420
421 VkResult result;
422
423 if (instance->physical_device_count < 0) {
424 result = tu_enumerate_devices(instance);
425 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
426 return result;
427 }
428
429 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
430 vk_outarray_append(&out, p) {
431 *p = tu_physical_device_to_handle(instance->physical_devices + i);
432 }
433
434 }
435
436 return vk_outarray_status(&out);
437 }
438
439 VkResult
440 tu_EnumeratePhysicalDeviceGroups(
441 VkInstance _instance,
442 uint32_t *pPhysicalDeviceGroupCount,
443 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
444 {
445 TU_FROM_HANDLE(tu_instance, instance, _instance);
446 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount);
447 VkResult result;
448
449 if (instance->physical_device_count < 0) {
450 result = tu_enumerate_devices(instance);
451 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
452 return result;
453 }
454
455 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
456 vk_outarray_append(&out, p) {
457 p->physicalDeviceCount = 1;
458 p->physicalDevices[0] =
459 tu_physical_device_to_handle(instance->physical_devices + i);
460 p->subsetAllocation = false;
461 }
462 }
463
464 return vk_outarray_status(&out);
465 }
466
467 void
468 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
469 VkPhysicalDeviceFeatures *pFeatures)
470 {
471 memset(pFeatures, 0, sizeof(*pFeatures));
472
473 *pFeatures = (VkPhysicalDeviceFeatures){
474 .robustBufferAccess = false,
475 .fullDrawIndexUint32 = false,
476 .imageCubeArray = false,
477 .independentBlend = false,
478 .geometryShader = false,
479 .tessellationShader = false,
480 .sampleRateShading = false,
481 .dualSrcBlend = false,
482 .logicOp = false,
483 .multiDrawIndirect = false,
484 .drawIndirectFirstInstance = false,
485 .depthClamp = false,
486 .depthBiasClamp = false,
487 .fillModeNonSolid = false,
488 .depthBounds = false,
489 .wideLines = false,
490 .largePoints = false,
491 .alphaToOne = false,
492 .multiViewport = false,
493 .samplerAnisotropy = false,
494 .textureCompressionETC2 = false,
495 .textureCompressionASTC_LDR = false,
496 .textureCompressionBC = false,
497 .occlusionQueryPrecise = false,
498 .pipelineStatisticsQuery = false,
499 .vertexPipelineStoresAndAtomics = false,
500 .fragmentStoresAndAtomics = false,
501 .shaderTessellationAndGeometryPointSize = false,
502 .shaderImageGatherExtended = false,
503 .shaderStorageImageExtendedFormats = false,
504 .shaderStorageImageMultisample = false,
505 .shaderUniformBufferArrayDynamicIndexing = false,
506 .shaderSampledImageArrayDynamicIndexing = false,
507 .shaderStorageBufferArrayDynamicIndexing = false,
508 .shaderStorageImageArrayDynamicIndexing = false,
509 .shaderStorageImageReadWithoutFormat = false,
510 .shaderStorageImageWriteWithoutFormat = false,
511 .shaderClipDistance = false,
512 .shaderCullDistance = false,
513 .shaderFloat64 = false,
514 .shaderInt64 = false,
515 .shaderInt16 = false,
516 .sparseBinding = false,
517 .variableMultisampleRate = false,
518 .inheritedQueries = false,
519 };
520 }
521
522 void
523 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
524 VkPhysicalDeviceFeatures2KHR *pFeatures)
525 {
526 vk_foreach_struct(ext, pFeatures->pNext)
527 {
528 switch (ext->sType) {
529 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
530 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
531 features->variablePointersStorageBuffer = false;
532 features->variablePointers = false;
533 break;
534 }
535 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
536 VkPhysicalDeviceMultiviewFeaturesKHR *features =
537 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
538 features->multiview = false;
539 features->multiviewGeometryShader = false;
540 features->multiviewTessellationShader = false;
541 break;
542 }
543 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
544 VkPhysicalDeviceShaderDrawParameterFeatures *features =
545 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
546 features->shaderDrawParameters = false;
547 break;
548 }
549 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
550 VkPhysicalDeviceProtectedMemoryFeatures *features =
551 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
552 features->protectedMemory = false;
553 break;
554 }
555 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
556 VkPhysicalDevice16BitStorageFeatures *features =
557 (VkPhysicalDevice16BitStorageFeatures *)ext;
558 features->storageBuffer16BitAccess = false;
559 features->uniformAndStorageBuffer16BitAccess = false;
560 features->storagePushConstant16 = false;
561 features->storageInputOutput16 = false;
562 break;
563 }
564 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
565 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
566 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
567 features->samplerYcbcrConversion = false;
568 break;
569 }
570 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
571 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
572 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
573 features->shaderInputAttachmentArrayDynamicIndexing = false;
574 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
575 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
576 features->shaderUniformBufferArrayNonUniformIndexing = false;
577 features->shaderSampledImageArrayNonUniformIndexing = false;
578 features->shaderStorageBufferArrayNonUniformIndexing = false;
579 features->shaderStorageImageArrayNonUniformIndexing = false;
580 features->shaderInputAttachmentArrayNonUniformIndexing = false;
581 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
582 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
583 features->descriptorBindingUniformBufferUpdateAfterBind = false;
584 features->descriptorBindingSampledImageUpdateAfterBind = false;
585 features->descriptorBindingStorageImageUpdateAfterBind = false;
586 features->descriptorBindingStorageBufferUpdateAfterBind = false;
587 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
588 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
589 features->descriptorBindingUpdateUnusedWhilePending = false;
590 features->descriptorBindingPartiallyBound = false;
591 features->descriptorBindingVariableDescriptorCount = false;
592 features->runtimeDescriptorArray = false;
593 break;
594 }
595 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
596 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
597 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
598 features->conditionalRendering = false;
599 features->inheritedConditionalRendering = false;
600 break;
601 }
602 default:
603 break;
604 }
605 }
606 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
607 }
608
609 void
610 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
611 VkPhysicalDeviceProperties *pProperties)
612 {
613 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
614 VkSampleCountFlags sample_counts = 0xf;
615
616 /* make sure that the entire descriptor set is addressable with a signed
617 * 32-bit int. So the sum of all limits scaled by descriptor size has to
618 * be at most 2 GiB. the combined image & samples object count as one of
619 * both. This limit is for the pipeline layout, not for the set layout, but
620 * there is no set limit, so we just set a pipeline limit. I don't think
621 * any app is going to hit this soon. */
622 size_t max_descriptor_set_size =
623 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
624 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
625 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
626 32 /* sampler, largest when combined with image */ +
627 64 /* sampled image */ + 64 /* storage image */);
628
629 VkPhysicalDeviceLimits limits = {
630 .maxImageDimension1D = (1 << 14),
631 .maxImageDimension2D = (1 << 14),
632 .maxImageDimension3D = (1 << 11),
633 .maxImageDimensionCube = (1 << 14),
634 .maxImageArrayLayers = (1 << 11),
635 .maxTexelBufferElements = 128 * 1024 * 1024,
636 .maxUniformBufferRange = UINT32_MAX,
637 .maxStorageBufferRange = UINT32_MAX,
638 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
639 .maxMemoryAllocationCount = UINT32_MAX,
640 .maxSamplerAllocationCount = 64 * 1024,
641 .bufferImageGranularity = 64, /* A cache line */
642 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
643 .maxBoundDescriptorSets = MAX_SETS,
644 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
645 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
646 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
647 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
648 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
649 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
650 .maxPerStageResources = max_descriptor_set_size,
651 .maxDescriptorSetSamplers = max_descriptor_set_size,
652 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
653 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
654 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
655 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
656 .maxDescriptorSetSampledImages = max_descriptor_set_size,
657 .maxDescriptorSetStorageImages = max_descriptor_set_size,
658 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
659 .maxVertexInputAttributes = 32,
660 .maxVertexInputBindings = 32,
661 .maxVertexInputAttributeOffset = 2047,
662 .maxVertexInputBindingStride = 2048,
663 .maxVertexOutputComponents = 128,
664 .maxTessellationGenerationLevel = 64,
665 .maxTessellationPatchSize = 32,
666 .maxTessellationControlPerVertexInputComponents = 128,
667 .maxTessellationControlPerVertexOutputComponents = 128,
668 .maxTessellationControlPerPatchOutputComponents = 120,
669 .maxTessellationControlTotalOutputComponents = 4096,
670 .maxTessellationEvaluationInputComponents = 128,
671 .maxTessellationEvaluationOutputComponents = 128,
672 .maxGeometryShaderInvocations = 127,
673 .maxGeometryInputComponents = 64,
674 .maxGeometryOutputComponents = 128,
675 .maxGeometryOutputVertices = 256,
676 .maxGeometryTotalOutputComponents = 1024,
677 .maxFragmentInputComponents = 128,
678 .maxFragmentOutputAttachments = 8,
679 .maxFragmentDualSrcAttachments = 1,
680 .maxFragmentCombinedOutputResources = 8,
681 .maxComputeSharedMemorySize = 32768,
682 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
683 .maxComputeWorkGroupInvocations = 2048,
684 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
685 .subPixelPrecisionBits = 4 /* FIXME */,
686 .subTexelPrecisionBits = 4 /* FIXME */,
687 .mipmapPrecisionBits = 4 /* FIXME */,
688 .maxDrawIndexedIndexValue = UINT32_MAX,
689 .maxDrawIndirectCount = UINT32_MAX,
690 .maxSamplerLodBias = 16,
691 .maxSamplerAnisotropy = 16,
692 .maxViewports = MAX_VIEWPORTS,
693 .maxViewportDimensions = { (1 << 14), (1 << 14) },
694 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
695 .viewportSubPixelBits = 8,
696 .minMemoryMapAlignment = 4096, /* A page */
697 .minTexelBufferOffsetAlignment = 1,
698 .minUniformBufferOffsetAlignment = 4,
699 .minStorageBufferOffsetAlignment = 4,
700 .minTexelOffset = -32,
701 .maxTexelOffset = 31,
702 .minTexelGatherOffset = -32,
703 .maxTexelGatherOffset = 31,
704 .minInterpolationOffset = -2,
705 .maxInterpolationOffset = 2,
706 .subPixelInterpolationOffsetBits = 8,
707 .maxFramebufferWidth = (1 << 14),
708 .maxFramebufferHeight = (1 << 14),
709 .maxFramebufferLayers = (1 << 10),
710 .framebufferColorSampleCounts = sample_counts,
711 .framebufferDepthSampleCounts = sample_counts,
712 .framebufferStencilSampleCounts = sample_counts,
713 .framebufferNoAttachmentsSampleCounts = sample_counts,
714 .maxColorAttachments = MAX_RTS,
715 .sampledImageColorSampleCounts = sample_counts,
716 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
717 .sampledImageDepthSampleCounts = sample_counts,
718 .sampledImageStencilSampleCounts = sample_counts,
719 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
720 .maxSampleMaskWords = 1,
721 .timestampComputeAndGraphics = true,
722 .timestampPeriod = 1,
723 .maxClipDistances = 8,
724 .maxCullDistances = 8,
725 .maxCombinedClipAndCullDistances = 8,
726 .discreteQueuePriorities = 1,
727 .pointSizeRange = { 0.125, 255.875 },
728 .lineWidthRange = { 0.0, 7.9921875 },
729 .pointSizeGranularity = (1.0 / 8.0),
730 .lineWidthGranularity = (1.0 / 128.0),
731 .strictLines = false, /* FINISHME */
732 .standardSampleLocations = true,
733 .optimalBufferCopyOffsetAlignment = 128,
734 .optimalBufferCopyRowPitchAlignment = 128,
735 .nonCoherentAtomSize = 64,
736 };
737
738 *pProperties = (VkPhysicalDeviceProperties){
739 .apiVersion = tu_physical_device_api_version(pdevice),
740 .driverVersion = vk_get_driver_version(),
741 .vendorID = 0, /* TODO */
742 .deviceID = 0,
743 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
744 .limits = limits,
745 .sparseProperties = { 0 },
746 };
747
748 strcpy(pProperties->deviceName, pdevice->name);
749 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
750 }
751
752 void
753 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
754 VkPhysicalDeviceProperties2KHR *pProperties)
755 {
756 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
757 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
758
759 vk_foreach_struct(ext, pProperties->pNext)
760 {
761 switch (ext->sType) {
762 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
763 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
764 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
765 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
766 break;
767 }
768 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
769 VkPhysicalDeviceIDPropertiesKHR *properties =
770 (VkPhysicalDeviceIDPropertiesKHR *)ext;
771 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
772 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
773 properties->deviceLUIDValid = false;
774 break;
775 }
776 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
777 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
778 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
779 properties->maxMultiviewViewCount = MAX_VIEWS;
780 properties->maxMultiviewInstanceIndex = INT_MAX;
781 break;
782 }
783 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
784 VkPhysicalDevicePointClippingPropertiesKHR *properties =
785 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
786 properties->pointClippingBehavior =
787 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
788 break;
789 }
790 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
791 VkPhysicalDeviceMaintenance3Properties *properties =
792 (VkPhysicalDeviceMaintenance3Properties *)ext;
793 /* Make sure everything is addressable by a signed 32-bit int, and
794 * our largest descriptors are 96 bytes. */
795 properties->maxPerSetDescriptors = (1ull << 31) / 96;
796 /* Our buffer size fields allow only this much */
797 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
798 break;
799 }
800 default:
801 break;
802 }
803 }
804 }
805
806 static const VkQueueFamilyProperties
807 tu_queue_family_properties = {
808 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
809 VK_QUEUE_COMPUTE_BIT |
810 VK_QUEUE_TRANSFER_BIT,
811 .queueCount = 1,
812 .timestampValidBits = 64,
813 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
814 };
815
816 void
817 tu_GetPhysicalDeviceQueueFamilyProperties(
818 VkPhysicalDevice physicalDevice,
819 uint32_t *pQueueFamilyPropertyCount,
820 VkQueueFamilyProperties *pQueueFamilyProperties)
821 {
822 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
823
824 vk_outarray_append(&out, p) {
825 *p = tu_queue_family_properties;
826 }
827 }
828
829 void
830 tu_GetPhysicalDeviceQueueFamilyProperties2(
831 VkPhysicalDevice physicalDevice,
832 uint32_t *pQueueFamilyPropertyCount,
833 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
834 {
835 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
836
837 vk_outarray_append(&out, p) {
838 p->queueFamilyProperties = tu_queue_family_properties;
839 }
840 }
841
842 static uint64_t
843 tu_get_system_heap_size()
844 {
845 struct sysinfo info;
846 sysinfo(&info);
847
848 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
849
850 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
851 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
852 */
853 uint64_t available_ram;
854 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
855 available_ram = total_ram / 2;
856 else
857 available_ram = total_ram * 3 / 4;
858
859 return available_ram;
860 }
861
862 void
863 tu_GetPhysicalDeviceMemoryProperties(
864 VkPhysicalDevice physicalDevice,
865 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
866 {
867 pMemoryProperties->memoryHeapCount = 1;
868 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
869 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
870
871 pMemoryProperties->memoryTypeCount = 1;
872 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
873 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
874 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
875 pMemoryProperties->memoryTypes[0].heapIndex = 0;
876 }
877
878 void
879 tu_GetPhysicalDeviceMemoryProperties2(
880 VkPhysicalDevice physicalDevice,
881 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
882 {
883 return tu_GetPhysicalDeviceMemoryProperties(
884 physicalDevice, &pMemoryProperties->memoryProperties);
885 }
886
887 static int
888 tu_queue_init(struct tu_device *device,
889 struct tu_queue *queue,
890 uint32_t queue_family_index,
891 int idx,
892 VkDeviceQueueCreateFlags flags)
893 {
894 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
895 queue->device = device;
896 queue->queue_family_index = queue_family_index;
897 queue->queue_idx = idx;
898 queue->flags = flags;
899
900 return VK_SUCCESS;
901 }
902
903 static void
904 tu_queue_finish(struct tu_queue *queue)
905 {
906 }
907
908 static int
909 tu_get_device_extension_index(const char *name)
910 {
911 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
912 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
913 return i;
914 }
915 return -1;
916 }
917
918 VkResult
919 tu_CreateDevice(VkPhysicalDevice physicalDevice,
920 const VkDeviceCreateInfo *pCreateInfo,
921 const VkAllocationCallbacks *pAllocator,
922 VkDevice *pDevice)
923 {
924 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
925 VkResult result;
926 struct tu_device *device;
927
928 /* Check enabled features */
929 if (pCreateInfo->pEnabledFeatures) {
930 VkPhysicalDeviceFeatures supported_features;
931 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
932 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
933 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
934 unsigned num_features =
935 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
936 for (uint32_t i = 0; i < num_features; i++) {
937 if (enabled_feature[i] && !supported_feature[i])
938 return vk_error(physical_device->instance,
939 VK_ERROR_FEATURE_NOT_PRESENT);
940 }
941 }
942
943 device = vk_zalloc2(&physical_device->instance->alloc,
944 pAllocator,
945 sizeof(*device),
946 8,
947 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
948 if (!device)
949 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
950
951 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
952 device->instance = physical_device->instance;
953 device->physical_device = physical_device;
954
955 if (pAllocator)
956 device->alloc = *pAllocator;
957 else
958 device->alloc = physical_device->instance->alloc;
959
960 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
961 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
962 int index = tu_get_device_extension_index(ext_name);
963 if (index < 0 ||
964 !physical_device->supported_extensions.extensions[index]) {
965 vk_free(&device->alloc, device);
966 return vk_error(physical_device->instance,
967 VK_ERROR_EXTENSION_NOT_PRESENT);
968 }
969
970 device->enabled_extensions.extensions[index] = true;
971 }
972
973 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
974 const VkDeviceQueueCreateInfo *queue_create =
975 &pCreateInfo->pQueueCreateInfos[i];
976 uint32_t qfi = queue_create->queueFamilyIndex;
977 device->queues[qfi] =
978 vk_alloc(&device->alloc,
979 queue_create->queueCount * sizeof(struct tu_queue),
980 8,
981 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
982 if (!device->queues[qfi]) {
983 result = VK_ERROR_OUT_OF_HOST_MEMORY;
984 goto fail;
985 }
986
987 memset(device->queues[qfi],
988 0,
989 queue_create->queueCount * sizeof(struct tu_queue));
990
991 device->queue_count[qfi] = queue_create->queueCount;
992
993 for (unsigned q = 0; q < queue_create->queueCount; q++) {
994 result = tu_queue_init(
995 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
996 if (result != VK_SUCCESS)
997 goto fail;
998 }
999 }
1000
1001 VkPipelineCacheCreateInfo ci;
1002 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1003 ci.pNext = NULL;
1004 ci.flags = 0;
1005 ci.pInitialData = NULL;
1006 ci.initialDataSize = 0;
1007 VkPipelineCache pc;
1008 result =
1009 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1010 if (result != VK_SUCCESS)
1011 goto fail;
1012
1013 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1014
1015 *pDevice = tu_device_to_handle(device);
1016 return VK_SUCCESS;
1017
1018 fail:
1019 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1020 for (unsigned q = 0; q < device->queue_count[i]; q++)
1021 tu_queue_finish(&device->queues[i][q]);
1022 if (device->queue_count[i])
1023 vk_free(&device->alloc, device->queues[i]);
1024 }
1025
1026 vk_free(&device->alloc, device);
1027 return result;
1028 }
1029
1030 void
1031 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1032 {
1033 TU_FROM_HANDLE(tu_device, device, _device);
1034
1035 if (!device)
1036 return;
1037
1038 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1039 for (unsigned q = 0; q < device->queue_count[i]; q++)
1040 tu_queue_finish(&device->queues[i][q]);
1041 if (device->queue_count[i])
1042 vk_free(&device->alloc, device->queues[i]);
1043 }
1044
1045 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1046 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1047
1048 vk_free(&device->alloc, device);
1049 }
1050
1051 VkResult
1052 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1053 VkLayerProperties *pProperties)
1054 {
1055 *pPropertyCount = 0;
1056 return VK_SUCCESS;
1057 }
1058
1059 VkResult
1060 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1061 uint32_t *pPropertyCount,
1062 VkLayerProperties *pProperties)
1063 {
1064 *pPropertyCount = 0;
1065 return VK_SUCCESS;
1066 }
1067
1068 void
1069 tu_GetDeviceQueue2(VkDevice _device,
1070 const VkDeviceQueueInfo2 *pQueueInfo,
1071 VkQueue *pQueue)
1072 {
1073 TU_FROM_HANDLE(tu_device, device, _device);
1074 struct tu_queue *queue;
1075
1076 queue =
1077 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1078 if (pQueueInfo->flags != queue->flags) {
1079 /* From the Vulkan 1.1.70 spec:
1080 *
1081 * "The queue returned by vkGetDeviceQueue2 must have the same
1082 * flags value from this structure as that used at device
1083 * creation time in a VkDeviceQueueCreateInfo instance. If no
1084 * matching flags were specified at device creation time then
1085 * pQueue will return VK_NULL_HANDLE."
1086 */
1087 *pQueue = VK_NULL_HANDLE;
1088 return;
1089 }
1090
1091 *pQueue = tu_queue_to_handle(queue);
1092 }
1093
1094 void
1095 tu_GetDeviceQueue(VkDevice _device,
1096 uint32_t queueFamilyIndex,
1097 uint32_t queueIndex,
1098 VkQueue *pQueue)
1099 {
1100 const VkDeviceQueueInfo2 info =
1101 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1102 .queueFamilyIndex = queueFamilyIndex,
1103 .queueIndex = queueIndex };
1104
1105 tu_GetDeviceQueue2(_device, &info, pQueue);
1106 }
1107
1108 VkResult
1109 tu_QueueSubmit(VkQueue _queue,
1110 uint32_t submitCount,
1111 const VkSubmitInfo *pSubmits,
1112 VkFence _fence)
1113 {
1114 return VK_SUCCESS;
1115 }
1116
1117 VkResult
1118 tu_QueueWaitIdle(VkQueue _queue)
1119 {
1120 return VK_SUCCESS;
1121 }
1122
1123 VkResult
1124 tu_DeviceWaitIdle(VkDevice _device)
1125 {
1126 TU_FROM_HANDLE(tu_device, device, _device);
1127
1128 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1129 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1130 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1131 }
1132 }
1133 return VK_SUCCESS;
1134 }
1135
1136 VkResult
1137 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1138 uint32_t *pPropertyCount,
1139 VkExtensionProperties *pProperties)
1140 {
1141 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1142
1143 /* We spport no lyaers */
1144 if (pLayerName)
1145 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1146
1147 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1148 if (tu_supported_instance_extensions.extensions[i]) {
1149 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1150 }
1151 }
1152
1153 return vk_outarray_status(&out);
1154 }
1155
1156 VkResult
1157 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1158 const char *pLayerName,
1159 uint32_t *pPropertyCount,
1160 VkExtensionProperties *pProperties)
1161 {
1162 /* We spport no lyaers */
1163 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1164 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1165
1166 /* We spport no lyaers */
1167 if (pLayerName)
1168 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1169
1170 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1171 if (device->supported_extensions.extensions[i]) {
1172 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1173 }
1174 }
1175
1176 return vk_outarray_status(&out);
1177 }
1178
1179 PFN_vkVoidFunction
1180 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1181 {
1182 TU_FROM_HANDLE(tu_instance, instance, _instance);
1183
1184 return tu_lookup_entrypoint_checked(pName,
1185 instance ? instance->api_version : 0,
1186 instance ? &instance->enabled_extensions
1187 : NULL,
1188 NULL);
1189 }
1190
1191 /* The loader wants us to expose a second GetInstanceProcAddr function
1192 * to work around certain LD_PRELOAD issues seen in apps.
1193 */
1194 PUBLIC
1195 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1196 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1197
1198 PUBLIC
1199 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1200 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1201 {
1202 return tu_GetInstanceProcAddr(instance, pName);
1203 }
1204
1205 PFN_vkVoidFunction
1206 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1207 {
1208 TU_FROM_HANDLE(tu_device, device, _device);
1209
1210 return tu_lookup_entrypoint_checked(pName,
1211 device->instance->api_version,
1212 &device->instance->enabled_extensions,
1213 &device->enabled_extensions);
1214 }
1215
1216 static VkResult
1217 tu_alloc_memory(struct tu_device *device,
1218 const VkMemoryAllocateInfo *pAllocateInfo,
1219 const VkAllocationCallbacks *pAllocator,
1220 VkDeviceMemory *pMem)
1221 {
1222 struct tu_device_memory *mem;
1223
1224 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1225
1226 if (pAllocateInfo->allocationSize == 0) {
1227 /* Apparently, this is allowed */
1228 *pMem = VK_NULL_HANDLE;
1229 return VK_SUCCESS;
1230 }
1231
1232 mem = vk_alloc2(&device->alloc,
1233 pAllocator,
1234 sizeof(*mem),
1235 8,
1236 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1237 if (mem == NULL)
1238 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1239
1240 mem->bo = fd_bo_new(device->physical_device->drm_device, pAllocateInfo->allocationSize,
1241 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
1242 DRM_FREEDRENO_GEM_TYPE_KMEM);
1243 if (!mem->bo) {
1244 vk_free2(&device->alloc, pAllocator, mem);
1245 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1246 }
1247 mem->size = pAllocateInfo->allocationSize;
1248 mem->type_index = pAllocateInfo->memoryTypeIndex;
1249
1250 mem->map = NULL;
1251 mem->user_ptr = NULL;
1252
1253 *pMem = tu_device_memory_to_handle(mem);
1254
1255 return VK_SUCCESS;
1256 }
1257
1258 VkResult
1259 tu_AllocateMemory(VkDevice _device,
1260 const VkMemoryAllocateInfo *pAllocateInfo,
1261 const VkAllocationCallbacks *pAllocator,
1262 VkDeviceMemory *pMem)
1263 {
1264 TU_FROM_HANDLE(tu_device, device, _device);
1265 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1266 }
1267
1268 void
1269 tu_FreeMemory(VkDevice _device,
1270 VkDeviceMemory _mem,
1271 const VkAllocationCallbacks *pAllocator)
1272 {
1273 TU_FROM_HANDLE(tu_device, device, _device);
1274 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1275
1276 if (mem == NULL)
1277 return;
1278
1279 if (mem->bo)
1280 fd_bo_del(mem->bo);
1281
1282 vk_free2(&device->alloc, pAllocator, mem);
1283 }
1284
1285 VkResult
1286 tu_MapMemory(VkDevice _device,
1287 VkDeviceMemory _memory,
1288 VkDeviceSize offset,
1289 VkDeviceSize size,
1290 VkMemoryMapFlags flags,
1291 void **ppData)
1292 {
1293 TU_FROM_HANDLE(tu_device, device, _device);
1294 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1295
1296 if (mem == NULL) {
1297 *ppData = NULL;
1298 return VK_SUCCESS;
1299 }
1300
1301 if (mem->user_ptr) {
1302 *ppData = mem->user_ptr;
1303 } else if (!mem->map){
1304 *ppData = mem->map = fd_bo_map(mem->bo);
1305 } else
1306 *ppData = mem->map;
1307
1308 if (*ppData) {
1309 *ppData += offset;
1310 return VK_SUCCESS;
1311 }
1312
1313 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1314 }
1315
1316 void
1317 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1318 {
1319 /* I do not see any unmapping done by the freedreno Gallium driver. */
1320 }
1321
1322 VkResult
1323 tu_FlushMappedMemoryRanges(VkDevice _device,
1324 uint32_t memoryRangeCount,
1325 const VkMappedMemoryRange *pMemoryRanges)
1326 {
1327 return VK_SUCCESS;
1328 }
1329
1330 VkResult
1331 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1332 uint32_t memoryRangeCount,
1333 const VkMappedMemoryRange *pMemoryRanges)
1334 {
1335 return VK_SUCCESS;
1336 }
1337
1338 void
1339 tu_GetBufferMemoryRequirements(VkDevice _device,
1340 VkBuffer _buffer,
1341 VkMemoryRequirements *pMemoryRequirements)
1342 {
1343 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1344
1345 pMemoryRequirements->memoryTypeBits = 1;
1346 pMemoryRequirements->alignment = 16;
1347 pMemoryRequirements->size =
1348 align64(buffer->size, pMemoryRequirements->alignment);
1349 }
1350
1351 void
1352 tu_GetBufferMemoryRequirements2(
1353 VkDevice device,
1354 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1355 VkMemoryRequirements2KHR *pMemoryRequirements)
1356 {
1357 tu_GetBufferMemoryRequirements(
1358 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1359 }
1360
1361 void
1362 tu_GetImageMemoryRequirements(VkDevice _device,
1363 VkImage _image,
1364 VkMemoryRequirements *pMemoryRequirements)
1365 {
1366 TU_FROM_HANDLE(tu_image, image, _image);
1367
1368 pMemoryRequirements->memoryTypeBits = 1;
1369 pMemoryRequirements->size = image->size;
1370 pMemoryRequirements->alignment = image->alignment;
1371 }
1372
1373 void
1374 tu_GetImageMemoryRequirements2(VkDevice device,
1375 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1376 VkMemoryRequirements2KHR *pMemoryRequirements)
1377 {
1378 tu_GetImageMemoryRequirements(
1379 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1380 }
1381
1382 void
1383 tu_GetImageSparseMemoryRequirements(
1384 VkDevice device,
1385 VkImage image,
1386 uint32_t *pSparseMemoryRequirementCount,
1387 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1388 {
1389 stub();
1390 }
1391
1392 void
1393 tu_GetImageSparseMemoryRequirements2(
1394 VkDevice device,
1395 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1396 uint32_t *pSparseMemoryRequirementCount,
1397 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1398 {
1399 stub();
1400 }
1401
1402 void
1403 tu_GetDeviceMemoryCommitment(VkDevice device,
1404 VkDeviceMemory memory,
1405 VkDeviceSize *pCommittedMemoryInBytes)
1406 {
1407 *pCommittedMemoryInBytes = 0;
1408 }
1409
1410 VkResult
1411 tu_BindBufferMemory2(VkDevice device,
1412 uint32_t bindInfoCount,
1413 const VkBindBufferMemoryInfoKHR *pBindInfos)
1414 {
1415 return VK_SUCCESS;
1416 }
1417
1418 VkResult
1419 tu_BindBufferMemory(VkDevice device,
1420 VkBuffer buffer,
1421 VkDeviceMemory memory,
1422 VkDeviceSize memoryOffset)
1423 {
1424 const VkBindBufferMemoryInfoKHR info = {
1425 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1426 .buffer = buffer,
1427 .memory = memory,
1428 .memoryOffset = memoryOffset
1429 };
1430
1431 return tu_BindBufferMemory2(device, 1, &info);
1432 }
1433
1434 VkResult
1435 tu_BindImageMemory2(VkDevice device,
1436 uint32_t bindInfoCount,
1437 const VkBindImageMemoryInfoKHR *pBindInfos)
1438 {
1439 return VK_SUCCESS;
1440 }
1441
1442 VkResult
1443 tu_BindImageMemory(VkDevice device,
1444 VkImage image,
1445 VkDeviceMemory memory,
1446 VkDeviceSize memoryOffset)
1447 {
1448 const VkBindImageMemoryInfoKHR info = {
1449 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1450 .image = image,
1451 .memory = memory,
1452 .memoryOffset = memoryOffset
1453 };
1454
1455 return tu_BindImageMemory2(device, 1, &info);
1456 }
1457
1458 VkResult
1459 tu_QueueBindSparse(VkQueue _queue,
1460 uint32_t bindInfoCount,
1461 const VkBindSparseInfo *pBindInfo,
1462 VkFence _fence)
1463 {
1464 return VK_SUCCESS;
1465 }
1466
1467 VkResult
1468 tu_CreateFence(VkDevice _device,
1469 const VkFenceCreateInfo *pCreateInfo,
1470 const VkAllocationCallbacks *pAllocator,
1471 VkFence *pFence)
1472 {
1473 TU_FROM_HANDLE(tu_device, device, _device);
1474
1475 struct tu_fence *fence = vk_alloc2(&device->alloc,
1476 pAllocator,
1477 sizeof(*fence),
1478 8,
1479 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1480
1481 if (!fence)
1482 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1483
1484 *pFence = tu_fence_to_handle(fence);
1485
1486 return VK_SUCCESS;
1487 }
1488
1489 void
1490 tu_DestroyFence(VkDevice _device,
1491 VkFence _fence,
1492 const VkAllocationCallbacks *pAllocator)
1493 {
1494 TU_FROM_HANDLE(tu_device, device, _device);
1495 TU_FROM_HANDLE(tu_fence, fence, _fence);
1496
1497 if (!fence)
1498 return;
1499
1500 vk_free2(&device->alloc, pAllocator, fence);
1501 }
1502
1503 VkResult
1504 tu_WaitForFences(VkDevice _device,
1505 uint32_t fenceCount,
1506 const VkFence *pFences,
1507 VkBool32 waitAll,
1508 uint64_t timeout)
1509 {
1510 return VK_SUCCESS;
1511 }
1512
1513 VkResult
1514 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1515 {
1516 return VK_SUCCESS;
1517 }
1518
1519 VkResult
1520 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1521 {
1522 return VK_SUCCESS;
1523 }
1524
1525 // Queue semaphore functions
1526
1527 VkResult
1528 tu_CreateSemaphore(VkDevice _device,
1529 const VkSemaphoreCreateInfo *pCreateInfo,
1530 const VkAllocationCallbacks *pAllocator,
1531 VkSemaphore *pSemaphore)
1532 {
1533 TU_FROM_HANDLE(tu_device, device, _device);
1534
1535 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1536 pAllocator,
1537 sizeof(*sem),
1538 8,
1539 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1540 if (!sem)
1541 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1542
1543 *pSemaphore = tu_semaphore_to_handle(sem);
1544 return VK_SUCCESS;
1545 }
1546
1547 void
1548 tu_DestroySemaphore(VkDevice _device,
1549 VkSemaphore _semaphore,
1550 const VkAllocationCallbacks *pAllocator)
1551 {
1552 TU_FROM_HANDLE(tu_device, device, _device);
1553 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1554 if (!_semaphore)
1555 return;
1556
1557 vk_free2(&device->alloc, pAllocator, sem);
1558 }
1559
1560 VkResult
1561 tu_CreateEvent(VkDevice _device,
1562 const VkEventCreateInfo *pCreateInfo,
1563 const VkAllocationCallbacks *pAllocator,
1564 VkEvent *pEvent)
1565 {
1566 TU_FROM_HANDLE(tu_device, device, _device);
1567 struct tu_event *event = vk_alloc2(&device->alloc,
1568 pAllocator,
1569 sizeof(*event),
1570 8,
1571 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1572
1573 if (!event)
1574 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1575
1576 *pEvent = tu_event_to_handle(event);
1577
1578 return VK_SUCCESS;
1579 }
1580
1581 void
1582 tu_DestroyEvent(VkDevice _device,
1583 VkEvent _event,
1584 const VkAllocationCallbacks *pAllocator)
1585 {
1586 TU_FROM_HANDLE(tu_device, device, _device);
1587 TU_FROM_HANDLE(tu_event, event, _event);
1588
1589 if (!event)
1590 return;
1591 vk_free2(&device->alloc, pAllocator, event);
1592 }
1593
1594 VkResult
1595 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1596 {
1597 TU_FROM_HANDLE(tu_event, event, _event);
1598
1599 if (*event->map == 1)
1600 return VK_EVENT_SET;
1601 return VK_EVENT_RESET;
1602 }
1603
1604 VkResult
1605 tu_SetEvent(VkDevice _device, VkEvent _event)
1606 {
1607 TU_FROM_HANDLE(tu_event, event, _event);
1608 *event->map = 1;
1609
1610 return VK_SUCCESS;
1611 }
1612
1613 VkResult
1614 tu_ResetEvent(VkDevice _device, VkEvent _event)
1615 {
1616 TU_FROM_HANDLE(tu_event, event, _event);
1617 *event->map = 0;
1618
1619 return VK_SUCCESS;
1620 }
1621
1622 VkResult
1623 tu_CreateBuffer(VkDevice _device,
1624 const VkBufferCreateInfo *pCreateInfo,
1625 const VkAllocationCallbacks *pAllocator,
1626 VkBuffer *pBuffer)
1627 {
1628 TU_FROM_HANDLE(tu_device, device, _device);
1629 struct tu_buffer *buffer;
1630
1631 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1632
1633 buffer = vk_alloc2(&device->alloc,
1634 pAllocator,
1635 sizeof(*buffer),
1636 8,
1637 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1638 if (buffer == NULL)
1639 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1640
1641 buffer->size = pCreateInfo->size;
1642 buffer->usage = pCreateInfo->usage;
1643 buffer->flags = pCreateInfo->flags;
1644
1645 *pBuffer = tu_buffer_to_handle(buffer);
1646
1647 return VK_SUCCESS;
1648 }
1649
1650 void
1651 tu_DestroyBuffer(VkDevice _device,
1652 VkBuffer _buffer,
1653 const VkAllocationCallbacks *pAllocator)
1654 {
1655 TU_FROM_HANDLE(tu_device, device, _device);
1656 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1657
1658 if (!buffer)
1659 return;
1660
1661 vk_free2(&device->alloc, pAllocator, buffer);
1662 }
1663
1664 static uint32_t
1665 tu_surface_max_layer_count(struct tu_image_view *iview)
1666 {
1667 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1668 ? iview->extent.depth
1669 : (iview->base_layer + iview->layer_count);
1670 }
1671
1672 VkResult
1673 tu_CreateFramebuffer(VkDevice _device,
1674 const VkFramebufferCreateInfo *pCreateInfo,
1675 const VkAllocationCallbacks *pAllocator,
1676 VkFramebuffer *pFramebuffer)
1677 {
1678 TU_FROM_HANDLE(tu_device, device, _device);
1679 struct tu_framebuffer *framebuffer;
1680
1681 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1682
1683 size_t size =
1684 sizeof(*framebuffer) +
1685 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1686 framebuffer = vk_alloc2(
1687 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1688 if (framebuffer == NULL)
1689 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1690
1691 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1692 framebuffer->width = pCreateInfo->width;
1693 framebuffer->height = pCreateInfo->height;
1694 framebuffer->layers = pCreateInfo->layers;
1695 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1696 VkImageView _iview = pCreateInfo->pAttachments[i];
1697 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1698 framebuffer->attachments[i].attachment = iview;
1699
1700 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1701 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1702 framebuffer->layers =
1703 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1704 }
1705
1706 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1707 return VK_SUCCESS;
1708 }
1709
1710 void
1711 tu_DestroyFramebuffer(VkDevice _device,
1712 VkFramebuffer _fb,
1713 const VkAllocationCallbacks *pAllocator)
1714 {
1715 TU_FROM_HANDLE(tu_device, device, _device);
1716 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1717
1718 if (!fb)
1719 return;
1720 vk_free2(&device->alloc, pAllocator, fb);
1721 }
1722
1723 static void
1724 tu_init_sampler(struct tu_device *device,
1725 struct tu_sampler *sampler,
1726 const VkSamplerCreateInfo *pCreateInfo)
1727 {
1728 }
1729
1730 VkResult
1731 tu_CreateSampler(VkDevice _device,
1732 const VkSamplerCreateInfo *pCreateInfo,
1733 const VkAllocationCallbacks *pAllocator,
1734 VkSampler *pSampler)
1735 {
1736 TU_FROM_HANDLE(tu_device, device, _device);
1737 struct tu_sampler *sampler;
1738
1739 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1740
1741 sampler = vk_alloc2(&device->alloc,
1742 pAllocator,
1743 sizeof(*sampler),
1744 8,
1745 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1746 if (!sampler)
1747 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1748
1749 tu_init_sampler(device, sampler, pCreateInfo);
1750 *pSampler = tu_sampler_to_handle(sampler);
1751
1752 return VK_SUCCESS;
1753 }
1754
1755 void
1756 tu_DestroySampler(VkDevice _device,
1757 VkSampler _sampler,
1758 const VkAllocationCallbacks *pAllocator)
1759 {
1760 TU_FROM_HANDLE(tu_device, device, _device);
1761 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1762
1763 if (!sampler)
1764 return;
1765 vk_free2(&device->alloc, pAllocator, sampler);
1766 }
1767
1768 /* vk_icd.h does not declare this function, so we declare it here to
1769 * suppress Wmissing-prototypes.
1770 */
1771 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1772 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1773
1774 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1775 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1776 {
1777 /* For the full details on loader interface versioning, see
1778 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1779 * What follows is a condensed summary, to help you navigate the large and
1780 * confusing official doc.
1781 *
1782 * - Loader interface v0 is incompatible with later versions. We don't
1783 * support it.
1784 *
1785 * - In loader interface v1:
1786 * - The first ICD entrypoint called by the loader is
1787 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1788 * entrypoint.
1789 * - The ICD must statically expose no other Vulkan symbol unless it is
1790 * linked with -Bsymbolic.
1791 * - Each dispatchable Vulkan handle created by the ICD must be
1792 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1793 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1794 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1795 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1796 * such loader-managed surfaces.
1797 *
1798 * - Loader interface v2 differs from v1 in:
1799 * - The first ICD entrypoint called by the loader is
1800 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1801 * statically expose this entrypoint.
1802 *
1803 * - Loader interface v3 differs from v2 in:
1804 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1805 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1806 * because the loader no longer does so.
1807 */
1808 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1809 return VK_SUCCESS;
1810 }
1811
1812 void
1813 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1814 VkPhysicalDevice physicalDevice,
1815 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1816 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1817 {
1818 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1819 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1820 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1821 }
1822
1823 void
1824 tu_GetPhysicalDeviceExternalFenceProperties(
1825 VkPhysicalDevice physicalDevice,
1826 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1827 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1828 {
1829 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1830 pExternalFenceProperties->compatibleHandleTypes = 0;
1831 pExternalFenceProperties->externalFenceFeatures = 0;
1832 }
1833
1834 VkResult
1835 tu_CreateDebugReportCallbackEXT(
1836 VkInstance _instance,
1837 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1838 const VkAllocationCallbacks *pAllocator,
1839 VkDebugReportCallbackEXT *pCallback)
1840 {
1841 TU_FROM_HANDLE(tu_instance, instance, _instance);
1842 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1843 pCreateInfo,
1844 pAllocator,
1845 &instance->alloc,
1846 pCallback);
1847 }
1848
1849 void
1850 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1851 VkDebugReportCallbackEXT _callback,
1852 const VkAllocationCallbacks *pAllocator)
1853 {
1854 TU_FROM_HANDLE(tu_instance, instance, _instance);
1855 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1856 _callback,
1857 pAllocator,
1858 &instance->alloc);
1859 }
1860
1861 void
1862 tu_DebugReportMessageEXT(VkInstance _instance,
1863 VkDebugReportFlagsEXT flags,
1864 VkDebugReportObjectTypeEXT objectType,
1865 uint64_t object,
1866 size_t location,
1867 int32_t messageCode,
1868 const char *pLayerPrefix,
1869 const char *pMessage)
1870 {
1871 TU_FROM_HANDLE(tu_instance, instance, _instance);
1872 vk_debug_report(&instance->debug_report_callbacks,
1873 flags,
1874 objectType,
1875 object,
1876 location,
1877 messageCode,
1878 pLayerPrefix,
1879 pMessage);
1880 }
1881
1882 void
1883 tu_GetDeviceGroupPeerMemoryFeatures(
1884 VkDevice device,
1885 uint32_t heapIndex,
1886 uint32_t localDeviceIndex,
1887 uint32_t remoteDeviceIndex,
1888 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1889 {
1890 assert(localDeviceIndex == remoteDeviceIndex);
1891
1892 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1893 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1894 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1895 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1896 }