turnip: Gather some device info.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <xf86drm.h>
39
40 static int
41 tu_device_get_cache_uuid(uint16_t family, void *uuid)
42 {
43 uint32_t mesa_timestamp;
44 uint16_t f = family;
45 memset(uuid, 0, VK_UUID_SIZE);
46 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
47 &mesa_timestamp))
48 return -1;
49
50 memcpy(uuid, &mesa_timestamp, 4);
51 memcpy((char *)uuid + 4, &f, 2);
52 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
53 return 0;
54 }
55
56 static void
57 tu_get_driver_uuid(void *uuid)
58 {
59 memset(uuid, 0, VK_UUID_SIZE);
60 }
61
62 static void
63 tu_get_device_uuid(void *uuid)
64 {
65 stub();
66 }
67
68 static VkResult
69 tu_physical_device_init(struct tu_physical_device *device,
70 struct tu_instance *instance,
71 drmDevicePtr drm_device)
72 {
73 const char *path = drm_device->nodes[DRM_NODE_RENDER];
74 VkResult result;
75 drmVersionPtr version;
76 int fd;
77 int master_fd = -1;
78 struct fd_pipe *tmp_pipe = NULL;
79 uint64_t val;
80
81 fd = open(path, O_RDWR | O_CLOEXEC);
82 if (fd < 0) {
83 if (instance->debug_flags & TU_DEBUG_STARTUP)
84 tu_logi("Could not open device '%s'", path);
85
86 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
87 }
88
89 version = drmGetVersion(fd);
90 if (!version) {
91 close(fd);
92
93 if (instance->debug_flags & TU_DEBUG_STARTUP)
94 tu_logi("Could not get the kernel driver version for device '%s'",
95 path);
96
97 return vk_errorf(instance,
98 VK_ERROR_INCOMPATIBLE_DRIVER,
99 "failed to get version %s: %m",
100 path);
101 }
102
103 if (strcmp(version->name, "msm")) {
104 drmFreeVersion(version);
105 if (master_fd != -1)
106 close(master_fd);
107 close(fd);
108
109 if (instance->debug_flags & TU_DEBUG_STARTUP)
110 tu_logi("Device '%s' is not using the msm kernel driver.", path);
111
112 return VK_ERROR_INCOMPATIBLE_DRIVER;
113 }
114 drmFreeVersion(version);
115
116 if (instance->debug_flags & TU_DEBUG_STARTUP)
117 tu_logi("Found compatible device '%s'.", path);
118
119 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
120 device->instance = instance;
121 assert(strlen(path) < ARRAY_SIZE(device->path));
122 strncpy(device->path, path, ARRAY_SIZE(device->path));
123
124 if (instance->enabled_extensions.KHR_display) {
125 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
126 if (master_fd >= 0) {
127 /* TODO: free master_fd is accel is not working? */
128 abort();
129 }
130 }
131
132 device->master_fd = master_fd;
133 device->local_fd = fd;
134
135 device->drm_device = fd_device_new_dup(fd);
136 if (!device->drm_device) {
137 result = vk_errorf(
138 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
139 goto fail;
140 }
141
142 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
143 if (!tmp_pipe) {
144 result = vk_errorf(
145 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
146 goto fail;
147 }
148
149 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
150 result = vk_errorf(
151 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
152 goto fail;
153 }
154 device->gpu_id = val;
155
156 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
157 result = vk_errorf(
158 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
159 goto fail;
160 }
161 device->gmem_size = val;
162
163 fd_pipe_del(tmp_pipe);
164 tmp_pipe = NULL;
165
166 memset(device->name, 0, sizeof(device->name));
167 sprintf(device->name, "FD%d", device->gpu_id);
168
169 switch(device->gpu_id) {
170 case 530:
171 break;
172 default:
173 if (instance->debug_flags & TU_DEBUG_STARTUP)
174 tu_logi("Device '%s' is not supported.", device->name);
175 goto fail;
176 }
177 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
178 result = vk_errorf(
179 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
180 goto fail;
181 }
182
183 /* The gpu id is already embedded in the uuid so we just pass "tu"
184 * when creating the cache.
185 */
186 char buf[VK_UUID_SIZE * 2 + 1];
187 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
188 device->disk_cache = disk_cache_create(device->name, buf, 0);
189
190 fprintf(stderr,
191 "WARNING: tu is not a conformant vulkan implementation, "
192 "testing use only.\n");
193
194 tu_get_driver_uuid(&device->device_uuid);
195 tu_get_device_uuid(&device->device_uuid);
196
197 tu_fill_device_extension_table(device, &device->supported_extensions);
198
199 if (result != VK_SUCCESS) {
200 vk_error(instance, result);
201 goto fail;
202 }
203
204 return VK_SUCCESS;
205
206 fail:
207 if (tmp_pipe)
208 fd_pipe_del(tmp_pipe);
209 if (device->drm_device)
210 fd_device_del(device->drm_device);
211 close(fd);
212 if (master_fd != -1)
213 close(master_fd);
214 return result;
215 }
216
217 static void
218 tu_physical_device_finish(struct tu_physical_device *device)
219 {
220 disk_cache_destroy(device->disk_cache);
221 close(device->local_fd);
222 if (device->master_fd != -1)
223 close(device->master_fd);
224 }
225
226 static void *
227 default_alloc_func(void *pUserData,
228 size_t size,
229 size_t align,
230 VkSystemAllocationScope allocationScope)
231 {
232 return malloc(size);
233 }
234
235 static void *
236 default_realloc_func(void *pUserData,
237 void *pOriginal,
238 size_t size,
239 size_t align,
240 VkSystemAllocationScope allocationScope)
241 {
242 return realloc(pOriginal, size);
243 }
244
245 static void
246 default_free_func(void *pUserData, void *pMemory)
247 {
248 free(pMemory);
249 }
250
251 static const VkAllocationCallbacks default_alloc = {
252 .pUserData = NULL,
253 .pfnAllocation = default_alloc_func,
254 .pfnReallocation = default_realloc_func,
255 .pfnFree = default_free_func,
256 };
257
258 static const struct debug_control tu_debug_options[] = { { "startup",
259 TU_DEBUG_STARTUP },
260 { NULL, 0 } };
261
262 const char *
263 tu_get_debug_option_name(int id)
264 {
265 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
266 return tu_debug_options[id].string;
267 }
268
269 static int
270 tu_get_instance_extension_index(const char *name)
271 {
272 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
273 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
274 return i;
275 }
276 return -1;
277 }
278
279 VkResult
280 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
281 const VkAllocationCallbacks *pAllocator,
282 VkInstance *pInstance)
283 {
284 struct tu_instance *instance;
285 VkResult result;
286
287 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
288
289 uint32_t client_version;
290 if (pCreateInfo->pApplicationInfo &&
291 pCreateInfo->pApplicationInfo->apiVersion != 0) {
292 client_version = pCreateInfo->pApplicationInfo->apiVersion;
293 } else {
294 tu_EnumerateInstanceVersion(&client_version);
295 }
296
297 instance = vk_zalloc2(&default_alloc,
298 pAllocator,
299 sizeof(*instance),
300 8,
301 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
302 if (!instance)
303 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
304
305 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
306
307 if (pAllocator)
308 instance->alloc = *pAllocator;
309 else
310 instance->alloc = default_alloc;
311
312 instance->api_version = client_version;
313 instance->physical_device_count = -1;
314
315 instance->debug_flags =
316 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
317
318 if (instance->debug_flags & TU_DEBUG_STARTUP)
319 tu_logi("Created an instance");
320
321 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
322 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
323 int index = tu_get_instance_extension_index(ext_name);
324
325 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
326 vk_free2(&default_alloc, pAllocator, instance);
327 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
328 }
329
330 instance->enabled_extensions.extensions[index] = true;
331 }
332
333 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
334 if (result != VK_SUCCESS) {
335 vk_free2(&default_alloc, pAllocator, instance);
336 return vk_error(instance, result);
337 }
338
339 _mesa_locale_init();
340
341 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
342
343 *pInstance = tu_instance_to_handle(instance);
344
345 return VK_SUCCESS;
346 }
347
348 void
349 tu_DestroyInstance(VkInstance _instance,
350 const VkAllocationCallbacks *pAllocator)
351 {
352 TU_FROM_HANDLE(tu_instance, instance, _instance);
353
354 if (!instance)
355 return;
356
357 for (int i = 0; i < instance->physical_device_count; ++i) {
358 tu_physical_device_finish(instance->physical_devices + i);
359 }
360
361 VG(VALGRIND_DESTROY_MEMPOOL(instance));
362
363 _mesa_locale_fini();
364
365 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
366
367 vk_free(&instance->alloc, instance);
368 }
369
370 static VkResult
371 tu_enumerate_devices(struct tu_instance *instance)
372 {
373 /* TODO: Check for more devices ? */
374 drmDevicePtr devices[8];
375 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
376 int max_devices;
377
378 instance->physical_device_count = 0;
379
380 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
381
382 if (instance->debug_flags & TU_DEBUG_STARTUP)
383 tu_logi("Found %d drm nodes", max_devices);
384
385 if (max_devices < 1)
386 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
387
388 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
389 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
390 devices[i]->bustype == DRM_BUS_PLATFORM) {
391
392 result = tu_physical_device_init(instance->physical_devices +
393 instance->physical_device_count,
394 instance,
395 devices[i]);
396 if (result == VK_SUCCESS)
397 ++instance->physical_device_count;
398 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
399 break;
400 }
401 }
402 drmFreeDevices(devices, max_devices);
403
404 return result;
405 }
406
407 VkResult
408 tu_EnumeratePhysicalDevices(VkInstance _instance,
409 uint32_t *pPhysicalDeviceCount,
410 VkPhysicalDevice *pPhysicalDevices)
411 {
412 TU_FROM_HANDLE(tu_instance, instance, _instance);
413 VkResult result;
414
415 if (instance->physical_device_count < 0) {
416 result = tu_enumerate_devices(instance);
417 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
418 return result;
419 }
420
421 if (!pPhysicalDevices) {
422 *pPhysicalDeviceCount = instance->physical_device_count;
423 } else {
424 *pPhysicalDeviceCount =
425 MIN2(*pPhysicalDeviceCount, instance->physical_device_count);
426 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
427 pPhysicalDevices[i] =
428 tu_physical_device_to_handle(instance->physical_devices + i);
429 }
430
431 return *pPhysicalDeviceCount < instance->physical_device_count
432 ? VK_INCOMPLETE
433 : VK_SUCCESS;
434 }
435
436 VkResult
437 tu_EnumeratePhysicalDeviceGroups(
438 VkInstance _instance,
439 uint32_t *pPhysicalDeviceGroupCount,
440 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
441 {
442 TU_FROM_HANDLE(tu_instance, instance, _instance);
443 VkResult result;
444
445 if (instance->physical_device_count < 0) {
446 result = tu_enumerate_devices(instance);
447 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
448 return result;
449 }
450
451 if (!pPhysicalDeviceGroupProperties) {
452 *pPhysicalDeviceGroupCount = instance->physical_device_count;
453 } else {
454 *pPhysicalDeviceGroupCount =
455 MIN2(*pPhysicalDeviceGroupCount, instance->physical_device_count);
456 for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
457 pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
458 pPhysicalDeviceGroupProperties[i].physicalDevices[0] =
459 tu_physical_device_to_handle(instance->physical_devices + i);
460 pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
461 }
462 }
463 return *pPhysicalDeviceGroupCount < instance->physical_device_count
464 ? VK_INCOMPLETE
465 : VK_SUCCESS;
466 }
467
468 void
469 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
470 VkPhysicalDeviceFeatures *pFeatures)
471 {
472 memset(pFeatures, 0, sizeof(*pFeatures));
473
474 *pFeatures = (VkPhysicalDeviceFeatures){
475 .robustBufferAccess = false,
476 .fullDrawIndexUint32 = false,
477 .imageCubeArray = false,
478 .independentBlend = false,
479 .geometryShader = false,
480 .tessellationShader = false,
481 .sampleRateShading = false,
482 .dualSrcBlend = false,
483 .logicOp = false,
484 .multiDrawIndirect = false,
485 .drawIndirectFirstInstance = false,
486 .depthClamp = false,
487 .depthBiasClamp = false,
488 .fillModeNonSolid = false,
489 .depthBounds = false,
490 .wideLines = false,
491 .largePoints = false,
492 .alphaToOne = false,
493 .multiViewport = false,
494 .samplerAnisotropy = false,
495 .textureCompressionETC2 = false,
496 .textureCompressionASTC_LDR = false,
497 .textureCompressionBC = false,
498 .occlusionQueryPrecise = false,
499 .pipelineStatisticsQuery = false,
500 .vertexPipelineStoresAndAtomics = false,
501 .fragmentStoresAndAtomics = false,
502 .shaderTessellationAndGeometryPointSize = false,
503 .shaderImageGatherExtended = false,
504 .shaderStorageImageExtendedFormats = false,
505 .shaderStorageImageMultisample = false,
506 .shaderUniformBufferArrayDynamicIndexing = false,
507 .shaderSampledImageArrayDynamicIndexing = false,
508 .shaderStorageBufferArrayDynamicIndexing = false,
509 .shaderStorageImageArrayDynamicIndexing = false,
510 .shaderStorageImageReadWithoutFormat = false,
511 .shaderStorageImageWriteWithoutFormat = false,
512 .shaderClipDistance = false,
513 .shaderCullDistance = false,
514 .shaderFloat64 = false,
515 .shaderInt64 = false,
516 .shaderInt16 = false,
517 .sparseBinding = false,
518 .variableMultisampleRate = false,
519 .inheritedQueries = false,
520 };
521 }
522
523 void
524 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
525 VkPhysicalDeviceFeatures2KHR *pFeatures)
526 {
527 vk_foreach_struct(ext, pFeatures->pNext)
528 {
529 switch (ext->sType) {
530 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
531 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
532 features->variablePointersStorageBuffer = true;
533 features->variablePointers = false;
534 break;
535 }
536 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
537 VkPhysicalDeviceMultiviewFeaturesKHR *features =
538 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
539 features->multiview = true;
540 features->multiviewGeometryShader = true;
541 features->multiviewTessellationShader = true;
542 break;
543 }
544 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
545 VkPhysicalDeviceShaderDrawParameterFeatures *features =
546 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
547 features->shaderDrawParameters = true;
548 break;
549 }
550 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
551 VkPhysicalDeviceProtectedMemoryFeatures *features =
552 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
553 features->protectedMemory = false;
554 break;
555 }
556 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
557 VkPhysicalDevice16BitStorageFeatures *features =
558 (VkPhysicalDevice16BitStorageFeatures *)ext;
559 features->storageBuffer16BitAccess = false;
560 features->uniformAndStorageBuffer16BitAccess = false;
561 features->storagePushConstant16 = false;
562 features->storageInputOutput16 = false;
563 break;
564 }
565 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
566 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
567 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
568 features->samplerYcbcrConversion = false;
569 break;
570 }
571 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
572 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
573 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
574 features->shaderInputAttachmentArrayDynamicIndexing = true;
575 features->shaderUniformTexelBufferArrayDynamicIndexing = true;
576 features->shaderStorageTexelBufferArrayDynamicIndexing = true;
577 features->shaderUniformBufferArrayNonUniformIndexing = false;
578 features->shaderSampledImageArrayNonUniformIndexing = false;
579 features->shaderStorageBufferArrayNonUniformIndexing = false;
580 features->shaderStorageImageArrayNonUniformIndexing = false;
581 features->shaderInputAttachmentArrayNonUniformIndexing = false;
582 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
583 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
584 features->descriptorBindingUniformBufferUpdateAfterBind = true;
585 features->descriptorBindingSampledImageUpdateAfterBind = true;
586 features->descriptorBindingStorageImageUpdateAfterBind = true;
587 features->descriptorBindingStorageBufferUpdateAfterBind = true;
588 features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
589 features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
590 features->descriptorBindingUpdateUnusedWhilePending = true;
591 features->descriptorBindingPartiallyBound = true;
592 features->descriptorBindingVariableDescriptorCount = true;
593 features->runtimeDescriptorArray = true;
594 break;
595 }
596 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
597 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
598 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
599 features->conditionalRendering = true;
600 features->inheritedConditionalRendering = false;
601 break;
602 }
603 default:
604 break;
605 }
606 }
607 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
608 }
609
610 void
611 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
612 VkPhysicalDeviceProperties *pProperties)
613 {
614 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
615 VkSampleCountFlags sample_counts = 0xf;
616
617 /* make sure that the entire descriptor set is addressable with a signed
618 * 32-bit int. So the sum of all limits scaled by descriptor size has to
619 * be at most 2 GiB. the combined image & samples object count as one of
620 * both. This limit is for the pipeline layout, not for the set layout, but
621 * there is no set limit, so we just set a pipeline limit. I don't think
622 * any app is going to hit this soon. */
623 size_t max_descriptor_set_size =
624 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
625 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
626 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
627 32 /* sampler, largest when combined with image */ +
628 64 /* sampled image */ + 64 /* storage image */);
629
630 VkPhysicalDeviceLimits limits = {
631 .maxImageDimension1D = (1 << 14),
632 .maxImageDimension2D = (1 << 14),
633 .maxImageDimension3D = (1 << 11),
634 .maxImageDimensionCube = (1 << 14),
635 .maxImageArrayLayers = (1 << 11),
636 .maxTexelBufferElements = 128 * 1024 * 1024,
637 .maxUniformBufferRange = UINT32_MAX,
638 .maxStorageBufferRange = UINT32_MAX,
639 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
640 .maxMemoryAllocationCount = UINT32_MAX,
641 .maxSamplerAllocationCount = 64 * 1024,
642 .bufferImageGranularity = 64, /* A cache line */
643 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
644 .maxBoundDescriptorSets = MAX_SETS,
645 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
646 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
647 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
648 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
649 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
650 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
651 .maxPerStageResources = max_descriptor_set_size,
652 .maxDescriptorSetSamplers = max_descriptor_set_size,
653 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
654 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
655 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
656 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
657 .maxDescriptorSetSampledImages = max_descriptor_set_size,
658 .maxDescriptorSetStorageImages = max_descriptor_set_size,
659 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
660 .maxVertexInputAttributes = 32,
661 .maxVertexInputBindings = 32,
662 .maxVertexInputAttributeOffset = 2047,
663 .maxVertexInputBindingStride = 2048,
664 .maxVertexOutputComponents = 128,
665 .maxTessellationGenerationLevel = 64,
666 .maxTessellationPatchSize = 32,
667 .maxTessellationControlPerVertexInputComponents = 128,
668 .maxTessellationControlPerVertexOutputComponents = 128,
669 .maxTessellationControlPerPatchOutputComponents = 120,
670 .maxTessellationControlTotalOutputComponents = 4096,
671 .maxTessellationEvaluationInputComponents = 128,
672 .maxTessellationEvaluationOutputComponents = 128,
673 .maxGeometryShaderInvocations = 127,
674 .maxGeometryInputComponents = 64,
675 .maxGeometryOutputComponents = 128,
676 .maxGeometryOutputVertices = 256,
677 .maxGeometryTotalOutputComponents = 1024,
678 .maxFragmentInputComponents = 128,
679 .maxFragmentOutputAttachments = 8,
680 .maxFragmentDualSrcAttachments = 1,
681 .maxFragmentCombinedOutputResources = 8,
682 .maxComputeSharedMemorySize = 32768,
683 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
684 .maxComputeWorkGroupInvocations = 2048,
685 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
686 .subPixelPrecisionBits = 4 /* FIXME */,
687 .subTexelPrecisionBits = 4 /* FIXME */,
688 .mipmapPrecisionBits = 4 /* FIXME */,
689 .maxDrawIndexedIndexValue = UINT32_MAX,
690 .maxDrawIndirectCount = UINT32_MAX,
691 .maxSamplerLodBias = 16,
692 .maxSamplerAnisotropy = 16,
693 .maxViewports = MAX_VIEWPORTS,
694 .maxViewportDimensions = { (1 << 14), (1 << 14) },
695 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
696 .viewportSubPixelBits = 8,
697 .minMemoryMapAlignment = 4096, /* A page */
698 .minTexelBufferOffsetAlignment = 1,
699 .minUniformBufferOffsetAlignment = 4,
700 .minStorageBufferOffsetAlignment = 4,
701 .minTexelOffset = -32,
702 .maxTexelOffset = 31,
703 .minTexelGatherOffset = -32,
704 .maxTexelGatherOffset = 31,
705 .minInterpolationOffset = -2,
706 .maxInterpolationOffset = 2,
707 .subPixelInterpolationOffsetBits = 8,
708 .maxFramebufferWidth = (1 << 14),
709 .maxFramebufferHeight = (1 << 14),
710 .maxFramebufferLayers = (1 << 10),
711 .framebufferColorSampleCounts = sample_counts,
712 .framebufferDepthSampleCounts = sample_counts,
713 .framebufferStencilSampleCounts = sample_counts,
714 .framebufferNoAttachmentsSampleCounts = sample_counts,
715 .maxColorAttachments = MAX_RTS,
716 .sampledImageColorSampleCounts = sample_counts,
717 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
718 .sampledImageDepthSampleCounts = sample_counts,
719 .sampledImageStencilSampleCounts = sample_counts,
720 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
721 .maxSampleMaskWords = 1,
722 .timestampComputeAndGraphics = true,
723 .timestampPeriod = 1,
724 .maxClipDistances = 8,
725 .maxCullDistances = 8,
726 .maxCombinedClipAndCullDistances = 8,
727 .discreteQueuePriorities = 1,
728 .pointSizeRange = { 0.125, 255.875 },
729 .lineWidthRange = { 0.0, 7.9921875 },
730 .pointSizeGranularity = (1.0 / 8.0),
731 .lineWidthGranularity = (1.0 / 128.0),
732 .strictLines = false, /* FINISHME */
733 .standardSampleLocations = true,
734 .optimalBufferCopyOffsetAlignment = 128,
735 .optimalBufferCopyRowPitchAlignment = 128,
736 .nonCoherentAtomSize = 64,
737 };
738
739 *pProperties = (VkPhysicalDeviceProperties){
740 .apiVersion = tu_physical_device_api_version(pdevice),
741 .driverVersion = vk_get_driver_version(),
742 .vendorID = 0, /* TODO */
743 .deviceID = 0,
744 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
745 .limits = limits,
746 .sparseProperties = { 0 },
747 };
748
749 strcpy(pProperties->deviceName, pdevice->name);
750 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
751 }
752
753 void
754 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
755 VkPhysicalDeviceProperties2KHR *pProperties)
756 {
757 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
758 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
759
760 vk_foreach_struct(ext, pProperties->pNext)
761 {
762 switch (ext->sType) {
763 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
764 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
765 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
766 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
767 break;
768 }
769 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
770 VkPhysicalDeviceIDPropertiesKHR *properties =
771 (VkPhysicalDeviceIDPropertiesKHR *)ext;
772 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
773 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
774 properties->deviceLUIDValid = false;
775 break;
776 }
777 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
778 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
779 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
780 properties->maxMultiviewViewCount = MAX_VIEWS;
781 properties->maxMultiviewInstanceIndex = INT_MAX;
782 break;
783 }
784 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
785 VkPhysicalDevicePointClippingPropertiesKHR *properties =
786 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
787 properties->pointClippingBehavior =
788 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
789 break;
790 }
791 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
792 VkPhysicalDeviceMaintenance3Properties *properties =
793 (VkPhysicalDeviceMaintenance3Properties *)ext;
794 /* Make sure everything is addressable by a signed 32-bit int, and
795 * our largest descriptors are 96 bytes. */
796 properties->maxPerSetDescriptors = (1ull << 31) / 96;
797 /* Our buffer size fields allow only this much */
798 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
799 break;
800 }
801 default:
802 break;
803 }
804 }
805 }
806
807 static void
808 tu_get_physical_device_queue_family_properties(
809 struct tu_physical_device *pdevice,
810 uint32_t *pCount,
811 VkQueueFamilyProperties **pQueueFamilyProperties)
812 {
813 int num_queue_families = 1;
814 int idx;
815 if (pQueueFamilyProperties == NULL) {
816 *pCount = num_queue_families;
817 return;
818 }
819
820 if (!*pCount)
821 return;
822
823 idx = 0;
824 if (*pCount >= 1) {
825 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
826 .queueFlags =
827 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
828 .queueCount = 1,
829 .timestampValidBits = 64,
830 .minImageTransferGranularity = (VkExtent3D){ 1, 1, 1 },
831 };
832 idx++;
833 }
834
835 *pCount = idx;
836 }
837
838 void
839 tu_GetPhysicalDeviceQueueFamilyProperties(
840 VkPhysicalDevice physicalDevice,
841 uint32_t *pCount,
842 VkQueueFamilyProperties *pQueueFamilyProperties)
843 {
844 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
845 if (!pQueueFamilyProperties) {
846 return tu_get_physical_device_queue_family_properties(
847 pdevice, pCount, NULL);
848 return;
849 }
850 VkQueueFamilyProperties *properties[] = {
851 pQueueFamilyProperties + 0,
852 };
853 tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
854 assert(*pCount <= 1);
855 }
856
857 void
858 tu_GetPhysicalDeviceQueueFamilyProperties2(
859 VkPhysicalDevice physicalDevice,
860 uint32_t *pCount,
861 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
862 {
863 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
864 if (!pQueueFamilyProperties) {
865 return tu_get_physical_device_queue_family_properties(
866 pdevice, pCount, NULL);
867 return;
868 }
869 VkQueueFamilyProperties *properties[] = {
870 &pQueueFamilyProperties[0].queueFamilyProperties,
871 };
872 tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
873 assert(*pCount <= 1);
874 }
875
876 void
877 tu_GetPhysicalDeviceMemoryProperties(
878 VkPhysicalDevice physicalDevice,
879 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
880 {
881 stub();
882 }
883
884 void
885 tu_GetPhysicalDeviceMemoryProperties2(
886 VkPhysicalDevice physicalDevice,
887 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
888 {
889 return tu_GetPhysicalDeviceMemoryProperties(
890 physicalDevice, &pMemoryProperties->memoryProperties);
891 }
892
893 static int
894 tu_queue_init(struct tu_device *device,
895 struct tu_queue *queue,
896 uint32_t queue_family_index,
897 int idx,
898 VkDeviceQueueCreateFlags flags)
899 {
900 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
901 queue->device = device;
902 queue->queue_family_index = queue_family_index;
903 queue->queue_idx = idx;
904 queue->flags = flags;
905
906 return VK_SUCCESS;
907 }
908
909 static void
910 tu_queue_finish(struct tu_queue *queue)
911 {
912 }
913
914 static int
915 tu_get_device_extension_index(const char *name)
916 {
917 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
918 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
919 return i;
920 }
921 return -1;
922 }
923
924 VkResult
925 tu_CreateDevice(VkPhysicalDevice physicalDevice,
926 const VkDeviceCreateInfo *pCreateInfo,
927 const VkAllocationCallbacks *pAllocator,
928 VkDevice *pDevice)
929 {
930 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
931 VkResult result;
932 struct tu_device *device;
933
934 /* Check enabled features */
935 if (pCreateInfo->pEnabledFeatures) {
936 VkPhysicalDeviceFeatures supported_features;
937 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
938 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
939 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
940 unsigned num_features =
941 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
942 for (uint32_t i = 0; i < num_features; i++) {
943 if (enabled_feature[i] && !supported_feature[i])
944 return vk_error(physical_device->instance,
945 VK_ERROR_FEATURE_NOT_PRESENT);
946 }
947 }
948
949 device = vk_zalloc2(&physical_device->instance->alloc,
950 pAllocator,
951 sizeof(*device),
952 8,
953 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
954 if (!device)
955 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
956
957 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
958 device->instance = physical_device->instance;
959 device->physical_device = physical_device;
960
961 if (pAllocator)
962 device->alloc = *pAllocator;
963 else
964 device->alloc = physical_device->instance->alloc;
965
966 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
967 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
968 int index = tu_get_device_extension_index(ext_name);
969 if (index < 0 ||
970 !physical_device->supported_extensions.extensions[index]) {
971 vk_free(&device->alloc, device);
972 return vk_error(physical_device->instance,
973 VK_ERROR_EXTENSION_NOT_PRESENT);
974 }
975
976 device->enabled_extensions.extensions[index] = true;
977 }
978
979 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
980 const VkDeviceQueueCreateInfo *queue_create =
981 &pCreateInfo->pQueueCreateInfos[i];
982 uint32_t qfi = queue_create->queueFamilyIndex;
983 device->queues[qfi] =
984 vk_alloc(&device->alloc,
985 queue_create->queueCount * sizeof(struct tu_queue),
986 8,
987 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
988 if (!device->queues[qfi]) {
989 result = VK_ERROR_OUT_OF_HOST_MEMORY;
990 goto fail;
991 }
992
993 memset(device->queues[qfi],
994 0,
995 queue_create->queueCount * sizeof(struct tu_queue));
996
997 device->queue_count[qfi] = queue_create->queueCount;
998
999 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1000 result = tu_queue_init(
1001 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
1002 if (result != VK_SUCCESS)
1003 goto fail;
1004 }
1005 }
1006
1007 VkPipelineCacheCreateInfo ci;
1008 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1009 ci.pNext = NULL;
1010 ci.flags = 0;
1011 ci.pInitialData = NULL;
1012 ci.initialDataSize = 0;
1013 VkPipelineCache pc;
1014 result =
1015 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1016 if (result != VK_SUCCESS)
1017 goto fail;
1018
1019 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1020
1021 *pDevice = tu_device_to_handle(device);
1022 return VK_SUCCESS;
1023
1024 fail:
1025 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1026 for (unsigned q = 0; q < device->queue_count[i]; q++)
1027 tu_queue_finish(&device->queues[i][q]);
1028 if (device->queue_count[i])
1029 vk_free(&device->alloc, device->queues[i]);
1030 }
1031
1032 vk_free(&device->alloc, device);
1033 return result;
1034 }
1035
1036 void
1037 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1038 {
1039 TU_FROM_HANDLE(tu_device, device, _device);
1040
1041 if (!device)
1042 return;
1043
1044 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1045 for (unsigned q = 0; q < device->queue_count[i]; q++)
1046 tu_queue_finish(&device->queues[i][q]);
1047 if (device->queue_count[i])
1048 vk_free(&device->alloc, device->queues[i]);
1049 }
1050
1051 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1052 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1053
1054 vk_free(&device->alloc, device);
1055 }
1056
1057 VkResult
1058 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1059 VkLayerProperties *pProperties)
1060 {
1061 if (pProperties == NULL) {
1062 *pPropertyCount = 0;
1063 return VK_SUCCESS;
1064 }
1065
1066 /* None supported at this time */
1067 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1068 }
1069
1070 VkResult
1071 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1072 uint32_t *pPropertyCount,
1073 VkLayerProperties *pProperties)
1074 {
1075 if (pProperties == NULL) {
1076 *pPropertyCount = 0;
1077 return VK_SUCCESS;
1078 }
1079
1080 /* None supported at this time */
1081 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1082 }
1083
1084 void
1085 tu_GetDeviceQueue2(VkDevice _device,
1086 const VkDeviceQueueInfo2 *pQueueInfo,
1087 VkQueue *pQueue)
1088 {
1089 TU_FROM_HANDLE(tu_device, device, _device);
1090 struct tu_queue *queue;
1091
1092 queue =
1093 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1094 if (pQueueInfo->flags != queue->flags) {
1095 /* From the Vulkan 1.1.70 spec:
1096 *
1097 * "The queue returned by vkGetDeviceQueue2 must have the same
1098 * flags value from this structure as that used at device
1099 * creation time in a VkDeviceQueueCreateInfo instance. If no
1100 * matching flags were specified at device creation time then
1101 * pQueue will return VK_NULL_HANDLE."
1102 */
1103 *pQueue = VK_NULL_HANDLE;
1104 return;
1105 }
1106
1107 *pQueue = tu_queue_to_handle(queue);
1108 }
1109
1110 void
1111 tu_GetDeviceQueue(VkDevice _device,
1112 uint32_t queueFamilyIndex,
1113 uint32_t queueIndex,
1114 VkQueue *pQueue)
1115 {
1116 const VkDeviceQueueInfo2 info =
1117 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1118 .queueFamilyIndex = queueFamilyIndex,
1119 .queueIndex = queueIndex };
1120
1121 tu_GetDeviceQueue2(_device, &info, pQueue);
1122 }
1123
1124 VkResult
1125 tu_QueueSubmit(VkQueue _queue,
1126 uint32_t submitCount,
1127 const VkSubmitInfo *pSubmits,
1128 VkFence _fence)
1129 {
1130 return VK_SUCCESS;
1131 }
1132
1133 VkResult
1134 tu_QueueWaitIdle(VkQueue _queue)
1135 {
1136 return VK_SUCCESS;
1137 }
1138
1139 VkResult
1140 tu_DeviceWaitIdle(VkDevice _device)
1141 {
1142 TU_FROM_HANDLE(tu_device, device, _device);
1143
1144 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1145 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1146 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1147 }
1148 }
1149 return VK_SUCCESS;
1150 }
1151
1152 VkResult
1153 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1154 uint32_t *pPropertyCount,
1155 VkExtensionProperties *pProperties)
1156 {
1157 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1158
1159 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1160 if (tu_supported_instance_extensions.extensions[i]) {
1161 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1162 }
1163 }
1164
1165 return vk_outarray_status(&out);
1166 }
1167
1168 VkResult
1169 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1170 const char *pLayerName,
1171 uint32_t *pPropertyCount,
1172 VkExtensionProperties *pProperties)
1173 {
1174 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1175 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1176
1177 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1178 if (device->supported_extensions.extensions[i]) {
1179 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1180 }
1181 }
1182
1183 return vk_outarray_status(&out);
1184 }
1185
1186 PFN_vkVoidFunction
1187 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1188 {
1189 TU_FROM_HANDLE(tu_instance, instance, _instance);
1190
1191 return tu_lookup_entrypoint_checked(pName,
1192 instance ? instance->api_version : 0,
1193 instance ? &instance->enabled_extensions
1194 : NULL,
1195 NULL);
1196 }
1197
1198 /* The loader wants us to expose a second GetInstanceProcAddr function
1199 * to work around certain LD_PRELOAD issues seen in apps.
1200 */
1201 PUBLIC
1202 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1203 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1204
1205 PUBLIC
1206 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1207 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1208 {
1209 return tu_GetInstanceProcAddr(instance, pName);
1210 }
1211
1212 PFN_vkVoidFunction
1213 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1214 {
1215 TU_FROM_HANDLE(tu_device, device, _device);
1216
1217 return tu_lookup_entrypoint_checked(pName,
1218 device->instance->api_version,
1219 &device->instance->enabled_extensions,
1220 &device->enabled_extensions);
1221 }
1222
1223 static VkResult
1224 tu_alloc_memory(struct tu_device *device,
1225 const VkMemoryAllocateInfo *pAllocateInfo,
1226 const VkAllocationCallbacks *pAllocator,
1227 VkDeviceMemory *pMem)
1228 {
1229 struct tu_device_memory *mem;
1230
1231 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1232
1233 if (pAllocateInfo->allocationSize == 0) {
1234 /* Apparently, this is allowed */
1235 *pMem = VK_NULL_HANDLE;
1236 return VK_SUCCESS;
1237 }
1238
1239 mem = vk_alloc2(&device->alloc,
1240 pAllocator,
1241 sizeof(*mem),
1242 8,
1243 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1244 if (mem == NULL)
1245 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1246
1247 *pMem = tu_device_memory_to_handle(mem);
1248
1249 return VK_SUCCESS;
1250 }
1251
1252 VkResult
1253 tu_AllocateMemory(VkDevice _device,
1254 const VkMemoryAllocateInfo *pAllocateInfo,
1255 const VkAllocationCallbacks *pAllocator,
1256 VkDeviceMemory *pMem)
1257 {
1258 TU_FROM_HANDLE(tu_device, device, _device);
1259 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1260 }
1261
1262 void
1263 tu_FreeMemory(VkDevice _device,
1264 VkDeviceMemory _mem,
1265 const VkAllocationCallbacks *pAllocator)
1266 {
1267 TU_FROM_HANDLE(tu_device, device, _device);
1268 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1269
1270 if (mem == NULL)
1271 return;
1272
1273 vk_free2(&device->alloc, pAllocator, mem);
1274 }
1275
1276 VkResult
1277 tu_MapMemory(VkDevice _device,
1278 VkDeviceMemory _memory,
1279 VkDeviceSize offset,
1280 VkDeviceSize size,
1281 VkMemoryMapFlags flags,
1282 void **ppData)
1283 {
1284 TU_FROM_HANDLE(tu_device, device, _device);
1285 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1286
1287 if (mem == NULL) {
1288 *ppData = NULL;
1289 return VK_SUCCESS;
1290 }
1291
1292 if (mem->user_ptr)
1293 *ppData = mem->user_ptr;
1294
1295 if (*ppData) {
1296 *ppData += offset;
1297 return VK_SUCCESS;
1298 }
1299
1300 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1301 }
1302
1303 void
1304 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1305 {
1306 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1307
1308 if (mem == NULL)
1309 return;
1310 }
1311
1312 VkResult
1313 tu_FlushMappedMemoryRanges(VkDevice _device,
1314 uint32_t memoryRangeCount,
1315 const VkMappedMemoryRange *pMemoryRanges)
1316 {
1317 return VK_SUCCESS;
1318 }
1319
1320 VkResult
1321 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1322 uint32_t memoryRangeCount,
1323 const VkMappedMemoryRange *pMemoryRanges)
1324 {
1325 return VK_SUCCESS;
1326 }
1327
1328 void
1329 tu_GetBufferMemoryRequirements(VkDevice _device,
1330 VkBuffer _buffer,
1331 VkMemoryRequirements *pMemoryRequirements)
1332 {
1333 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1334
1335 pMemoryRequirements->alignment = 16;
1336 pMemoryRequirements->size =
1337 align64(buffer->size, pMemoryRequirements->alignment);
1338 }
1339
1340 void
1341 tu_GetBufferMemoryRequirements2(
1342 VkDevice device,
1343 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1344 VkMemoryRequirements2KHR *pMemoryRequirements)
1345 {
1346 tu_GetBufferMemoryRequirements(
1347 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1348 }
1349
1350 void
1351 tu_GetImageMemoryRequirements(VkDevice _device,
1352 VkImage _image,
1353 VkMemoryRequirements *pMemoryRequirements)
1354 {
1355 TU_FROM_HANDLE(tu_image, image, _image);
1356
1357 /* TODO: memory type */
1358
1359 pMemoryRequirements->size = image->size;
1360 pMemoryRequirements->alignment = image->alignment;
1361 }
1362
1363 void
1364 tu_GetImageMemoryRequirements2(VkDevice device,
1365 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1366 VkMemoryRequirements2KHR *pMemoryRequirements)
1367 {
1368 tu_GetImageMemoryRequirements(
1369 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1370 }
1371
1372 void
1373 tu_GetImageSparseMemoryRequirements(
1374 VkDevice device,
1375 VkImage image,
1376 uint32_t *pSparseMemoryRequirementCount,
1377 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1378 {
1379 stub();
1380 }
1381
1382 void
1383 tu_GetImageSparseMemoryRequirements2(
1384 VkDevice device,
1385 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1386 uint32_t *pSparseMemoryRequirementCount,
1387 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1388 {
1389 stub();
1390 }
1391
1392 void
1393 tu_GetDeviceMemoryCommitment(VkDevice device,
1394 VkDeviceMemory memory,
1395 VkDeviceSize *pCommittedMemoryInBytes)
1396 {
1397 *pCommittedMemoryInBytes = 0;
1398 }
1399
1400 VkResult
1401 tu_BindBufferMemory2(VkDevice device,
1402 uint32_t bindInfoCount,
1403 const VkBindBufferMemoryInfoKHR *pBindInfos)
1404 {
1405 return VK_SUCCESS;
1406 }
1407
1408 VkResult
1409 tu_BindBufferMemory(VkDevice device,
1410 VkBuffer buffer,
1411 VkDeviceMemory memory,
1412 VkDeviceSize memoryOffset)
1413 {
1414 const VkBindBufferMemoryInfoKHR info = {
1415 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1416 .buffer = buffer,
1417 .memory = memory,
1418 .memoryOffset = memoryOffset
1419 };
1420
1421 return tu_BindBufferMemory2(device, 1, &info);
1422 }
1423
1424 VkResult
1425 tu_BindImageMemory2(VkDevice device,
1426 uint32_t bindInfoCount,
1427 const VkBindImageMemoryInfoKHR *pBindInfos)
1428 {
1429 return VK_SUCCESS;
1430 }
1431
1432 VkResult
1433 tu_BindImageMemory(VkDevice device,
1434 VkImage image,
1435 VkDeviceMemory memory,
1436 VkDeviceSize memoryOffset)
1437 {
1438 const VkBindImageMemoryInfoKHR info = {
1439 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1440 .image = image,
1441 .memory = memory,
1442 .memoryOffset = memoryOffset
1443 };
1444
1445 return tu_BindImageMemory2(device, 1, &info);
1446 }
1447
1448 VkResult
1449 tu_QueueBindSparse(VkQueue _queue,
1450 uint32_t bindInfoCount,
1451 const VkBindSparseInfo *pBindInfo,
1452 VkFence _fence)
1453 {
1454 return VK_SUCCESS;
1455 }
1456
1457 VkResult
1458 tu_CreateFence(VkDevice _device,
1459 const VkFenceCreateInfo *pCreateInfo,
1460 const VkAllocationCallbacks *pAllocator,
1461 VkFence *pFence)
1462 {
1463 TU_FROM_HANDLE(tu_device, device, _device);
1464
1465 struct tu_fence *fence = vk_alloc2(&device->alloc,
1466 pAllocator,
1467 sizeof(*fence),
1468 8,
1469 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1470
1471 if (!fence)
1472 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1473
1474 *pFence = tu_fence_to_handle(fence);
1475
1476 return VK_SUCCESS;
1477 }
1478
1479 void
1480 tu_DestroyFence(VkDevice _device,
1481 VkFence _fence,
1482 const VkAllocationCallbacks *pAllocator)
1483 {
1484 TU_FROM_HANDLE(tu_device, device, _device);
1485 TU_FROM_HANDLE(tu_fence, fence, _fence);
1486
1487 if (!fence)
1488 return;
1489
1490 vk_free2(&device->alloc, pAllocator, fence);
1491 }
1492
1493 VkResult
1494 tu_WaitForFences(VkDevice _device,
1495 uint32_t fenceCount,
1496 const VkFence *pFences,
1497 VkBool32 waitAll,
1498 uint64_t timeout)
1499 {
1500 return VK_SUCCESS;
1501 }
1502
1503 VkResult
1504 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1505 {
1506 return VK_SUCCESS;
1507 }
1508
1509 VkResult
1510 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1511 {
1512 return VK_SUCCESS;
1513 }
1514
1515 // Queue semaphore functions
1516
1517 VkResult
1518 tu_CreateSemaphore(VkDevice _device,
1519 const VkSemaphoreCreateInfo *pCreateInfo,
1520 const VkAllocationCallbacks *pAllocator,
1521 VkSemaphore *pSemaphore)
1522 {
1523 TU_FROM_HANDLE(tu_device, device, _device);
1524
1525 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1526 pAllocator,
1527 sizeof(*sem),
1528 8,
1529 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1530 if (!sem)
1531 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1532
1533 *pSemaphore = tu_semaphore_to_handle(sem);
1534 return VK_SUCCESS;
1535 }
1536
1537 void
1538 tu_DestroySemaphore(VkDevice _device,
1539 VkSemaphore _semaphore,
1540 const VkAllocationCallbacks *pAllocator)
1541 {
1542 TU_FROM_HANDLE(tu_device, device, _device);
1543 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1544 if (!_semaphore)
1545 return;
1546
1547 vk_free2(&device->alloc, pAllocator, sem);
1548 }
1549
1550 VkResult
1551 tu_CreateEvent(VkDevice _device,
1552 const VkEventCreateInfo *pCreateInfo,
1553 const VkAllocationCallbacks *pAllocator,
1554 VkEvent *pEvent)
1555 {
1556 TU_FROM_HANDLE(tu_device, device, _device);
1557 struct tu_event *event = vk_alloc2(&device->alloc,
1558 pAllocator,
1559 sizeof(*event),
1560 8,
1561 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1562
1563 if (!event)
1564 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1565
1566 *pEvent = tu_event_to_handle(event);
1567
1568 return VK_SUCCESS;
1569 }
1570
1571 void
1572 tu_DestroyEvent(VkDevice _device,
1573 VkEvent _event,
1574 const VkAllocationCallbacks *pAllocator)
1575 {
1576 TU_FROM_HANDLE(tu_device, device, _device);
1577 TU_FROM_HANDLE(tu_event, event, _event);
1578
1579 if (!event)
1580 return;
1581 vk_free2(&device->alloc, pAllocator, event);
1582 }
1583
1584 VkResult
1585 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1586 {
1587 TU_FROM_HANDLE(tu_event, event, _event);
1588
1589 if (*event->map == 1)
1590 return VK_EVENT_SET;
1591 return VK_EVENT_RESET;
1592 }
1593
1594 VkResult
1595 tu_SetEvent(VkDevice _device, VkEvent _event)
1596 {
1597 TU_FROM_HANDLE(tu_event, event, _event);
1598 *event->map = 1;
1599
1600 return VK_SUCCESS;
1601 }
1602
1603 VkResult
1604 tu_ResetEvent(VkDevice _device, VkEvent _event)
1605 {
1606 TU_FROM_HANDLE(tu_event, event, _event);
1607 *event->map = 0;
1608
1609 return VK_SUCCESS;
1610 }
1611
1612 VkResult
1613 tu_CreateBuffer(VkDevice _device,
1614 const VkBufferCreateInfo *pCreateInfo,
1615 const VkAllocationCallbacks *pAllocator,
1616 VkBuffer *pBuffer)
1617 {
1618 TU_FROM_HANDLE(tu_device, device, _device);
1619 struct tu_buffer *buffer;
1620
1621 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1622
1623 buffer = vk_alloc2(&device->alloc,
1624 pAllocator,
1625 sizeof(*buffer),
1626 8,
1627 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1628 if (buffer == NULL)
1629 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1630
1631 buffer->size = pCreateInfo->size;
1632 buffer->usage = pCreateInfo->usage;
1633 buffer->flags = pCreateInfo->flags;
1634
1635 *pBuffer = tu_buffer_to_handle(buffer);
1636
1637 return VK_SUCCESS;
1638 }
1639
1640 void
1641 tu_DestroyBuffer(VkDevice _device,
1642 VkBuffer _buffer,
1643 const VkAllocationCallbacks *pAllocator)
1644 {
1645 TU_FROM_HANDLE(tu_device, device, _device);
1646 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1647
1648 if (!buffer)
1649 return;
1650
1651 vk_free2(&device->alloc, pAllocator, buffer);
1652 }
1653
1654 static uint32_t
1655 tu_surface_max_layer_count(struct tu_image_view *iview)
1656 {
1657 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1658 ? iview->extent.depth
1659 : (iview->base_layer + iview->layer_count);
1660 }
1661
1662 VkResult
1663 tu_CreateFramebuffer(VkDevice _device,
1664 const VkFramebufferCreateInfo *pCreateInfo,
1665 const VkAllocationCallbacks *pAllocator,
1666 VkFramebuffer *pFramebuffer)
1667 {
1668 TU_FROM_HANDLE(tu_device, device, _device);
1669 struct tu_framebuffer *framebuffer;
1670
1671 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1672
1673 size_t size =
1674 sizeof(*framebuffer) +
1675 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1676 framebuffer = vk_alloc2(
1677 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1678 if (framebuffer == NULL)
1679 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1680
1681 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1682 framebuffer->width = pCreateInfo->width;
1683 framebuffer->height = pCreateInfo->height;
1684 framebuffer->layers = pCreateInfo->layers;
1685 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1686 VkImageView _iview = pCreateInfo->pAttachments[i];
1687 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1688 framebuffer->attachments[i].attachment = iview;
1689
1690 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1691 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1692 framebuffer->layers =
1693 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1694 }
1695
1696 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1697 return VK_SUCCESS;
1698 }
1699
1700 void
1701 tu_DestroyFramebuffer(VkDevice _device,
1702 VkFramebuffer _fb,
1703 const VkAllocationCallbacks *pAllocator)
1704 {
1705 TU_FROM_HANDLE(tu_device, device, _device);
1706 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1707
1708 if (!fb)
1709 return;
1710 vk_free2(&device->alloc, pAllocator, fb);
1711 }
1712
1713 static void
1714 tu_init_sampler(struct tu_device *device,
1715 struct tu_sampler *sampler,
1716 const VkSamplerCreateInfo *pCreateInfo)
1717 {
1718 }
1719
1720 VkResult
1721 tu_CreateSampler(VkDevice _device,
1722 const VkSamplerCreateInfo *pCreateInfo,
1723 const VkAllocationCallbacks *pAllocator,
1724 VkSampler *pSampler)
1725 {
1726 TU_FROM_HANDLE(tu_device, device, _device);
1727 struct tu_sampler *sampler;
1728
1729 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1730
1731 sampler = vk_alloc2(&device->alloc,
1732 pAllocator,
1733 sizeof(*sampler),
1734 8,
1735 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1736 if (!sampler)
1737 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1738
1739 tu_init_sampler(device, sampler, pCreateInfo);
1740 *pSampler = tu_sampler_to_handle(sampler);
1741
1742 return VK_SUCCESS;
1743 }
1744
1745 void
1746 tu_DestroySampler(VkDevice _device,
1747 VkSampler _sampler,
1748 const VkAllocationCallbacks *pAllocator)
1749 {
1750 TU_FROM_HANDLE(tu_device, device, _device);
1751 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1752
1753 if (!sampler)
1754 return;
1755 vk_free2(&device->alloc, pAllocator, sampler);
1756 }
1757
1758 /* vk_icd.h does not declare this function, so we declare it here to
1759 * suppress Wmissing-prototypes.
1760 */
1761 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1762 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1763
1764 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1765 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1766 {
1767 /* For the full details on loader interface versioning, see
1768 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1769 * What follows is a condensed summary, to help you navigate the large and
1770 * confusing official doc.
1771 *
1772 * - Loader interface v0 is incompatible with later versions. We don't
1773 * support it.
1774 *
1775 * - In loader interface v1:
1776 * - The first ICD entrypoint called by the loader is
1777 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1778 * entrypoint.
1779 * - The ICD must statically expose no other Vulkan symbol unless it is
1780 * linked with -Bsymbolic.
1781 * - Each dispatchable Vulkan handle created by the ICD must be
1782 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1783 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1784 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1785 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1786 * such loader-managed surfaces.
1787 *
1788 * - Loader interface v2 differs from v1 in:
1789 * - The first ICD entrypoint called by the loader is
1790 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1791 * statically expose this entrypoint.
1792 *
1793 * - Loader interface v3 differs from v2 in:
1794 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1795 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1796 * because the loader no longer does so.
1797 */
1798 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1799 return VK_SUCCESS;
1800 }
1801
1802 void
1803 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1804 VkPhysicalDevice physicalDevice,
1805 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1806 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1807 {
1808 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1809 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1810 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1811 }
1812
1813 void
1814 tu_GetPhysicalDeviceExternalFenceProperties(
1815 VkPhysicalDevice physicalDevice,
1816 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1817 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1818 {
1819 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1820 pExternalFenceProperties->compatibleHandleTypes = 0;
1821 pExternalFenceProperties->externalFenceFeatures = 0;
1822 }
1823
1824 VkResult
1825 tu_CreateDebugReportCallbackEXT(
1826 VkInstance _instance,
1827 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1828 const VkAllocationCallbacks *pAllocator,
1829 VkDebugReportCallbackEXT *pCallback)
1830 {
1831 TU_FROM_HANDLE(tu_instance, instance, _instance);
1832 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1833 pCreateInfo,
1834 pAllocator,
1835 &instance->alloc,
1836 pCallback);
1837 }
1838
1839 void
1840 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1841 VkDebugReportCallbackEXT _callback,
1842 const VkAllocationCallbacks *pAllocator)
1843 {
1844 TU_FROM_HANDLE(tu_instance, instance, _instance);
1845 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1846 _callback,
1847 pAllocator,
1848 &instance->alloc);
1849 }
1850
1851 void
1852 tu_DebugReportMessageEXT(VkInstance _instance,
1853 VkDebugReportFlagsEXT flags,
1854 VkDebugReportObjectTypeEXT objectType,
1855 uint64_t object,
1856 size_t location,
1857 int32_t messageCode,
1858 const char *pLayerPrefix,
1859 const char *pMessage)
1860 {
1861 TU_FROM_HANDLE(tu_instance, instance, _instance);
1862 vk_debug_report(&instance->debug_report_callbacks,
1863 flags,
1864 objectType,
1865 object,
1866 location,
1867 messageCode,
1868 pLayerPrefix,
1869 pMessage);
1870 }
1871
1872 void
1873 tu_GetDeviceGroupPeerMemoryFeatures(
1874 VkDevice device,
1875 uint32_t heapIndex,
1876 uint32_t localDeviceIndex,
1877 uint32_t remoteDeviceIndex,
1878 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1879 {
1880 assert(localDeviceIndex == remoteDeviceIndex);
1881
1882 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1883 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1884 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1885 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1886 }