turnip: Fix a real -Wmaybe-uninitialized
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/sysinfo.h>
38 #include <unistd.h>
39 #include <xf86drm.h>
40
41 static int
42 tu_device_get_cache_uuid(uint16_t family, void *uuid)
43 {
44 uint32_t mesa_timestamp;
45 uint16_t f = family;
46 memset(uuid, 0, VK_UUID_SIZE);
47 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
48 &mesa_timestamp))
49 return -1;
50
51 memcpy(uuid, &mesa_timestamp, 4);
52 memcpy((char *)uuid + 4, &f, 2);
53 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
54 return 0;
55 }
56
57 static void
58 tu_get_driver_uuid(void *uuid)
59 {
60 memset(uuid, 0, VK_UUID_SIZE);
61 }
62
63 static void
64 tu_get_device_uuid(void *uuid)
65 {
66 stub();
67 }
68
69 static VkResult
70 tu_physical_device_init(struct tu_physical_device *device,
71 struct tu_instance *instance,
72 drmDevicePtr drm_device)
73 {
74 const char *path = drm_device->nodes[DRM_NODE_RENDER];
75 VkResult result = VK_SUCCESS;
76 drmVersionPtr version;
77 int fd;
78 int master_fd = -1;
79 struct fd_pipe *tmp_pipe = NULL;
80 uint64_t val;
81
82 fd = open(path, O_RDWR | O_CLOEXEC);
83 if (fd < 0) {
84 if (instance->debug_flags & TU_DEBUG_STARTUP)
85 tu_logi("Could not open device '%s'", path);
86
87 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
88 }
89
90 version = drmGetVersion(fd);
91 if (!version) {
92 close(fd);
93
94 if (instance->debug_flags & TU_DEBUG_STARTUP)
95 tu_logi("Could not get the kernel driver version for device '%s'",
96 path);
97
98 return vk_errorf(instance,
99 VK_ERROR_INCOMPATIBLE_DRIVER,
100 "failed to get version %s: %m",
101 path);
102 }
103
104 if (strcmp(version->name, "msm")) {
105 drmFreeVersion(version);
106 if (master_fd != -1)
107 close(master_fd);
108 close(fd);
109
110 if (instance->debug_flags & TU_DEBUG_STARTUP)
111 tu_logi("Device '%s' is not using the msm kernel driver.", path);
112
113 return VK_ERROR_INCOMPATIBLE_DRIVER;
114 }
115 drmFreeVersion(version);
116
117 if (instance->debug_flags & TU_DEBUG_STARTUP)
118 tu_logi("Found compatible device '%s'.", path);
119
120 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
121 device->instance = instance;
122 assert(strlen(path) < ARRAY_SIZE(device->path));
123 strncpy(device->path, path, ARRAY_SIZE(device->path));
124
125 if (instance->enabled_extensions.KHR_display) {
126 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
127 if (master_fd >= 0) {
128 /* TODO: free master_fd is accel is not working? */
129 }
130 }
131
132 device->master_fd = master_fd;
133 device->local_fd = fd;
134
135 device->drm_device = fd_device_new_dup(fd);
136 if (!device->drm_device) {
137 result = vk_errorf(
138 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
139 goto fail;
140 }
141
142 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
143 if (!tmp_pipe) {
144 result = vk_errorf(
145 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
146 goto fail;
147 }
148
149 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
150 result = vk_errorf(
151 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
152 goto fail;
153 }
154 device->gpu_id = val;
155
156 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
157 result = vk_errorf(
158 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
159 goto fail;
160 }
161 device->gmem_size = val;
162
163 fd_pipe_del(tmp_pipe);
164 tmp_pipe = NULL;
165
166 memset(device->name, 0, sizeof(device->name));
167 sprintf(device->name, "FD%d", device->gpu_id);
168
169 switch(device->gpu_id) {
170 case 530:
171 break;
172 default:
173 if (instance->debug_flags & TU_DEBUG_STARTUP)
174 tu_logi("Device '%s' is not supported.", device->name);
175 result = vk_errorf(
176 instance, VK_ERROR_INITIALIZATION_FAILED, "unsupported device");
177 goto fail;
178 }
179 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
180 result = vk_errorf(
181 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
182 goto fail;
183 }
184
185 /* The gpu id is already embedded in the uuid so we just pass "tu"
186 * when creating the cache.
187 */
188 char buf[VK_UUID_SIZE * 2 + 1];
189 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
190 device->disk_cache = disk_cache_create(device->name, buf, 0);
191
192 fprintf(stderr,
193 "WARNING: tu is not a conformant vulkan implementation, "
194 "testing use only.\n");
195
196 tu_get_driver_uuid(&device->device_uuid);
197 tu_get_device_uuid(&device->device_uuid);
198
199 tu_fill_device_extension_table(device, &device->supported_extensions);
200
201 if (result != VK_SUCCESS) {
202 vk_error(instance, result);
203 goto fail;
204 }
205
206 return VK_SUCCESS;
207
208 fail:
209 if (tmp_pipe)
210 fd_pipe_del(tmp_pipe);
211 if (device->drm_device)
212 fd_device_del(device->drm_device);
213 close(fd);
214 if (master_fd != -1)
215 close(master_fd);
216 return result;
217 }
218
219 static void
220 tu_physical_device_finish(struct tu_physical_device *device)
221 {
222 disk_cache_destroy(device->disk_cache);
223 close(device->local_fd);
224 if (device->master_fd != -1)
225 close(device->master_fd);
226 }
227
228 static void *
229 default_alloc_func(void *pUserData,
230 size_t size,
231 size_t align,
232 VkSystemAllocationScope allocationScope)
233 {
234 return malloc(size);
235 }
236
237 static void *
238 default_realloc_func(void *pUserData,
239 void *pOriginal,
240 size_t size,
241 size_t align,
242 VkSystemAllocationScope allocationScope)
243 {
244 return realloc(pOriginal, size);
245 }
246
247 static void
248 default_free_func(void *pUserData, void *pMemory)
249 {
250 free(pMemory);
251 }
252
253 static const VkAllocationCallbacks default_alloc = {
254 .pUserData = NULL,
255 .pfnAllocation = default_alloc_func,
256 .pfnReallocation = default_realloc_func,
257 .pfnFree = default_free_func,
258 };
259
260 static const struct debug_control tu_debug_options[] = { { "startup",
261 TU_DEBUG_STARTUP },
262 { NULL, 0 } };
263
264 const char *
265 tu_get_debug_option_name(int id)
266 {
267 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
268 return tu_debug_options[id].string;
269 }
270
271 static int
272 tu_get_instance_extension_index(const char *name)
273 {
274 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
275 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
276 return i;
277 }
278 return -1;
279 }
280
281 VkResult
282 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
283 const VkAllocationCallbacks *pAllocator,
284 VkInstance *pInstance)
285 {
286 struct tu_instance *instance;
287 VkResult result;
288
289 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
290
291 uint32_t client_version;
292 if (pCreateInfo->pApplicationInfo &&
293 pCreateInfo->pApplicationInfo->apiVersion != 0) {
294 client_version = pCreateInfo->pApplicationInfo->apiVersion;
295 } else {
296 tu_EnumerateInstanceVersion(&client_version);
297 }
298
299 instance = vk_zalloc2(&default_alloc,
300 pAllocator,
301 sizeof(*instance),
302 8,
303 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
304 if (!instance)
305 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
306
307 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
308
309 if (pAllocator)
310 instance->alloc = *pAllocator;
311 else
312 instance->alloc = default_alloc;
313
314 instance->api_version = client_version;
315 instance->physical_device_count = -1;
316
317 instance->debug_flags =
318 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
319
320 if (instance->debug_flags & TU_DEBUG_STARTUP)
321 tu_logi("Created an instance");
322
323 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
324 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
325 int index = tu_get_instance_extension_index(ext_name);
326
327 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
328 vk_free2(&default_alloc, pAllocator, instance);
329 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
330 }
331
332 instance->enabled_extensions.extensions[index] = true;
333 }
334
335 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
336 if (result != VK_SUCCESS) {
337 vk_free2(&default_alloc, pAllocator, instance);
338 return vk_error(instance, result);
339 }
340
341 _mesa_locale_init();
342
343 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
344
345 *pInstance = tu_instance_to_handle(instance);
346
347 return VK_SUCCESS;
348 }
349
350 void
351 tu_DestroyInstance(VkInstance _instance,
352 const VkAllocationCallbacks *pAllocator)
353 {
354 TU_FROM_HANDLE(tu_instance, instance, _instance);
355
356 if (!instance)
357 return;
358
359 for (int i = 0; i < instance->physical_device_count; ++i) {
360 tu_physical_device_finish(instance->physical_devices + i);
361 }
362
363 VG(VALGRIND_DESTROY_MEMPOOL(instance));
364
365 _mesa_locale_fini();
366
367 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
368
369 vk_free(&instance->alloc, instance);
370 }
371
372 static VkResult
373 tu_enumerate_devices(struct tu_instance *instance)
374 {
375 /* TODO: Check for more devices ? */
376 drmDevicePtr devices[8];
377 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
378 int max_devices;
379
380 instance->physical_device_count = 0;
381
382 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
383
384 if (instance->debug_flags & TU_DEBUG_STARTUP)
385 tu_logi("Found %d drm nodes", max_devices);
386
387 if (max_devices < 1)
388 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
389
390 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
391 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
392 devices[i]->bustype == DRM_BUS_PLATFORM) {
393
394 result = tu_physical_device_init(instance->physical_devices +
395 instance->physical_device_count,
396 instance,
397 devices[i]);
398 if (result == VK_SUCCESS)
399 ++instance->physical_device_count;
400 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
401 break;
402 }
403 }
404 drmFreeDevices(devices, max_devices);
405
406 return result;
407 }
408
409 VkResult
410 tu_EnumeratePhysicalDevices(VkInstance _instance,
411 uint32_t *pPhysicalDeviceCount,
412 VkPhysicalDevice *pPhysicalDevices)
413 {
414 TU_FROM_HANDLE(tu_instance, instance, _instance);
415 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
416
417 VkResult result;
418
419 if (instance->physical_device_count < 0) {
420 result = tu_enumerate_devices(instance);
421 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
422 return result;
423 }
424
425 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
426 vk_outarray_append(&out, p) {
427 *p = tu_physical_device_to_handle(instance->physical_devices + i);
428 }
429
430 }
431
432 return vk_outarray_status(&out);
433 }
434
435 VkResult
436 tu_EnumeratePhysicalDeviceGroups(
437 VkInstance _instance,
438 uint32_t *pPhysicalDeviceGroupCount,
439 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
440 {
441 TU_FROM_HANDLE(tu_instance, instance, _instance);
442 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, pPhysicalDeviceGroupCount);
443 VkResult result;
444
445 if (instance->physical_device_count < 0) {
446 result = tu_enumerate_devices(instance);
447 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
448 return result;
449 }
450
451 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
452 vk_outarray_append(&out, p) {
453 p->physicalDeviceCount = 1;
454 p->physicalDevices[0] =
455 tu_physical_device_to_handle(instance->physical_devices + i);
456 p->subsetAllocation = false;
457 }
458 }
459
460 return vk_outarray_status(&out);
461 }
462
463 void
464 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
465 VkPhysicalDeviceFeatures *pFeatures)
466 {
467 memset(pFeatures, 0, sizeof(*pFeatures));
468
469 *pFeatures = (VkPhysicalDeviceFeatures){
470 .robustBufferAccess = false,
471 .fullDrawIndexUint32 = false,
472 .imageCubeArray = false,
473 .independentBlend = false,
474 .geometryShader = false,
475 .tessellationShader = false,
476 .sampleRateShading = false,
477 .dualSrcBlend = false,
478 .logicOp = false,
479 .multiDrawIndirect = false,
480 .drawIndirectFirstInstance = false,
481 .depthClamp = false,
482 .depthBiasClamp = false,
483 .fillModeNonSolid = false,
484 .depthBounds = false,
485 .wideLines = false,
486 .largePoints = false,
487 .alphaToOne = false,
488 .multiViewport = false,
489 .samplerAnisotropy = false,
490 .textureCompressionETC2 = false,
491 .textureCompressionASTC_LDR = false,
492 .textureCompressionBC = false,
493 .occlusionQueryPrecise = false,
494 .pipelineStatisticsQuery = false,
495 .vertexPipelineStoresAndAtomics = false,
496 .fragmentStoresAndAtomics = false,
497 .shaderTessellationAndGeometryPointSize = false,
498 .shaderImageGatherExtended = false,
499 .shaderStorageImageExtendedFormats = false,
500 .shaderStorageImageMultisample = false,
501 .shaderUniformBufferArrayDynamicIndexing = false,
502 .shaderSampledImageArrayDynamicIndexing = false,
503 .shaderStorageBufferArrayDynamicIndexing = false,
504 .shaderStorageImageArrayDynamicIndexing = false,
505 .shaderStorageImageReadWithoutFormat = false,
506 .shaderStorageImageWriteWithoutFormat = false,
507 .shaderClipDistance = false,
508 .shaderCullDistance = false,
509 .shaderFloat64 = false,
510 .shaderInt64 = false,
511 .shaderInt16 = false,
512 .sparseBinding = false,
513 .variableMultisampleRate = false,
514 .inheritedQueries = false,
515 };
516 }
517
518 void
519 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
520 VkPhysicalDeviceFeatures2KHR *pFeatures)
521 {
522 vk_foreach_struct(ext, pFeatures->pNext)
523 {
524 switch (ext->sType) {
525 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
526 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
527 features->variablePointersStorageBuffer = false;
528 features->variablePointers = false;
529 break;
530 }
531 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
532 VkPhysicalDeviceMultiviewFeaturesKHR *features =
533 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
534 features->multiview = false;
535 features->multiviewGeometryShader = false;
536 features->multiviewTessellationShader = false;
537 break;
538 }
539 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
540 VkPhysicalDeviceShaderDrawParameterFeatures *features =
541 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
542 features->shaderDrawParameters = false;
543 break;
544 }
545 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
546 VkPhysicalDeviceProtectedMemoryFeatures *features =
547 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
548 features->protectedMemory = false;
549 break;
550 }
551 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
552 VkPhysicalDevice16BitStorageFeatures *features =
553 (VkPhysicalDevice16BitStorageFeatures *)ext;
554 features->storageBuffer16BitAccess = false;
555 features->uniformAndStorageBuffer16BitAccess = false;
556 features->storagePushConstant16 = false;
557 features->storageInputOutput16 = false;
558 break;
559 }
560 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
561 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
562 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
563 features->samplerYcbcrConversion = false;
564 break;
565 }
566 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
567 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
568 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
569 features->shaderInputAttachmentArrayDynamicIndexing = false;
570 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
571 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
572 features->shaderUniformBufferArrayNonUniformIndexing = false;
573 features->shaderSampledImageArrayNonUniformIndexing = false;
574 features->shaderStorageBufferArrayNonUniformIndexing = false;
575 features->shaderStorageImageArrayNonUniformIndexing = false;
576 features->shaderInputAttachmentArrayNonUniformIndexing = false;
577 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
578 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
579 features->descriptorBindingUniformBufferUpdateAfterBind = false;
580 features->descriptorBindingSampledImageUpdateAfterBind = false;
581 features->descriptorBindingStorageImageUpdateAfterBind = false;
582 features->descriptorBindingStorageBufferUpdateAfterBind = false;
583 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
584 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
585 features->descriptorBindingUpdateUnusedWhilePending = false;
586 features->descriptorBindingPartiallyBound = false;
587 features->descriptorBindingVariableDescriptorCount = false;
588 features->runtimeDescriptorArray = false;
589 break;
590 }
591 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
592 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
593 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
594 features->conditionalRendering = false;
595 features->inheritedConditionalRendering = false;
596 break;
597 }
598 default:
599 break;
600 }
601 }
602 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
603 }
604
605 void
606 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
607 VkPhysicalDeviceProperties *pProperties)
608 {
609 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
610 VkSampleCountFlags sample_counts = 0xf;
611
612 /* make sure that the entire descriptor set is addressable with a signed
613 * 32-bit int. So the sum of all limits scaled by descriptor size has to
614 * be at most 2 GiB. the combined image & samples object count as one of
615 * both. This limit is for the pipeline layout, not for the set layout, but
616 * there is no set limit, so we just set a pipeline limit. I don't think
617 * any app is going to hit this soon. */
618 size_t max_descriptor_set_size =
619 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
620 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
621 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
622 32 /* sampler, largest when combined with image */ +
623 64 /* sampled image */ + 64 /* storage image */);
624
625 VkPhysicalDeviceLimits limits = {
626 .maxImageDimension1D = (1 << 14),
627 .maxImageDimension2D = (1 << 14),
628 .maxImageDimension3D = (1 << 11),
629 .maxImageDimensionCube = (1 << 14),
630 .maxImageArrayLayers = (1 << 11),
631 .maxTexelBufferElements = 128 * 1024 * 1024,
632 .maxUniformBufferRange = UINT32_MAX,
633 .maxStorageBufferRange = UINT32_MAX,
634 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
635 .maxMemoryAllocationCount = UINT32_MAX,
636 .maxSamplerAllocationCount = 64 * 1024,
637 .bufferImageGranularity = 64, /* A cache line */
638 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
639 .maxBoundDescriptorSets = MAX_SETS,
640 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
641 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
642 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
643 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
644 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
645 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
646 .maxPerStageResources = max_descriptor_set_size,
647 .maxDescriptorSetSamplers = max_descriptor_set_size,
648 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
649 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
650 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
651 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
652 .maxDescriptorSetSampledImages = max_descriptor_set_size,
653 .maxDescriptorSetStorageImages = max_descriptor_set_size,
654 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
655 .maxVertexInputAttributes = 32,
656 .maxVertexInputBindings = 32,
657 .maxVertexInputAttributeOffset = 2047,
658 .maxVertexInputBindingStride = 2048,
659 .maxVertexOutputComponents = 128,
660 .maxTessellationGenerationLevel = 64,
661 .maxTessellationPatchSize = 32,
662 .maxTessellationControlPerVertexInputComponents = 128,
663 .maxTessellationControlPerVertexOutputComponents = 128,
664 .maxTessellationControlPerPatchOutputComponents = 120,
665 .maxTessellationControlTotalOutputComponents = 4096,
666 .maxTessellationEvaluationInputComponents = 128,
667 .maxTessellationEvaluationOutputComponents = 128,
668 .maxGeometryShaderInvocations = 127,
669 .maxGeometryInputComponents = 64,
670 .maxGeometryOutputComponents = 128,
671 .maxGeometryOutputVertices = 256,
672 .maxGeometryTotalOutputComponents = 1024,
673 .maxFragmentInputComponents = 128,
674 .maxFragmentOutputAttachments = 8,
675 .maxFragmentDualSrcAttachments = 1,
676 .maxFragmentCombinedOutputResources = 8,
677 .maxComputeSharedMemorySize = 32768,
678 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
679 .maxComputeWorkGroupInvocations = 2048,
680 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
681 .subPixelPrecisionBits = 4 /* FIXME */,
682 .subTexelPrecisionBits = 4 /* FIXME */,
683 .mipmapPrecisionBits = 4 /* FIXME */,
684 .maxDrawIndexedIndexValue = UINT32_MAX,
685 .maxDrawIndirectCount = UINT32_MAX,
686 .maxSamplerLodBias = 16,
687 .maxSamplerAnisotropy = 16,
688 .maxViewports = MAX_VIEWPORTS,
689 .maxViewportDimensions = { (1 << 14), (1 << 14) },
690 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
691 .viewportSubPixelBits = 8,
692 .minMemoryMapAlignment = 4096, /* A page */
693 .minTexelBufferOffsetAlignment = 1,
694 .minUniformBufferOffsetAlignment = 4,
695 .minStorageBufferOffsetAlignment = 4,
696 .minTexelOffset = -32,
697 .maxTexelOffset = 31,
698 .minTexelGatherOffset = -32,
699 .maxTexelGatherOffset = 31,
700 .minInterpolationOffset = -2,
701 .maxInterpolationOffset = 2,
702 .subPixelInterpolationOffsetBits = 8,
703 .maxFramebufferWidth = (1 << 14),
704 .maxFramebufferHeight = (1 << 14),
705 .maxFramebufferLayers = (1 << 10),
706 .framebufferColorSampleCounts = sample_counts,
707 .framebufferDepthSampleCounts = sample_counts,
708 .framebufferStencilSampleCounts = sample_counts,
709 .framebufferNoAttachmentsSampleCounts = sample_counts,
710 .maxColorAttachments = MAX_RTS,
711 .sampledImageColorSampleCounts = sample_counts,
712 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
713 .sampledImageDepthSampleCounts = sample_counts,
714 .sampledImageStencilSampleCounts = sample_counts,
715 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
716 .maxSampleMaskWords = 1,
717 .timestampComputeAndGraphics = true,
718 .timestampPeriod = 1,
719 .maxClipDistances = 8,
720 .maxCullDistances = 8,
721 .maxCombinedClipAndCullDistances = 8,
722 .discreteQueuePriorities = 1,
723 .pointSizeRange = { 0.125, 255.875 },
724 .lineWidthRange = { 0.0, 7.9921875 },
725 .pointSizeGranularity = (1.0 / 8.0),
726 .lineWidthGranularity = (1.0 / 128.0),
727 .strictLines = false, /* FINISHME */
728 .standardSampleLocations = true,
729 .optimalBufferCopyOffsetAlignment = 128,
730 .optimalBufferCopyRowPitchAlignment = 128,
731 .nonCoherentAtomSize = 64,
732 };
733
734 *pProperties = (VkPhysicalDeviceProperties){
735 .apiVersion = tu_physical_device_api_version(pdevice),
736 .driverVersion = vk_get_driver_version(),
737 .vendorID = 0, /* TODO */
738 .deviceID = 0,
739 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
740 .limits = limits,
741 .sparseProperties = { 0 },
742 };
743
744 strcpy(pProperties->deviceName, pdevice->name);
745 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
746 }
747
748 void
749 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
750 VkPhysicalDeviceProperties2KHR *pProperties)
751 {
752 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
753 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
754
755 vk_foreach_struct(ext, pProperties->pNext)
756 {
757 switch (ext->sType) {
758 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
759 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
760 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
761 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
762 break;
763 }
764 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
765 VkPhysicalDeviceIDPropertiesKHR *properties =
766 (VkPhysicalDeviceIDPropertiesKHR *)ext;
767 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
768 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
769 properties->deviceLUIDValid = false;
770 break;
771 }
772 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
773 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
774 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
775 properties->maxMultiviewViewCount = MAX_VIEWS;
776 properties->maxMultiviewInstanceIndex = INT_MAX;
777 break;
778 }
779 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
780 VkPhysicalDevicePointClippingPropertiesKHR *properties =
781 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
782 properties->pointClippingBehavior =
783 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
784 break;
785 }
786 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
787 VkPhysicalDeviceMaintenance3Properties *properties =
788 (VkPhysicalDeviceMaintenance3Properties *)ext;
789 /* Make sure everything is addressable by a signed 32-bit int, and
790 * our largest descriptors are 96 bytes. */
791 properties->maxPerSetDescriptors = (1ull << 31) / 96;
792 /* Our buffer size fields allow only this much */
793 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
794 break;
795 }
796 default:
797 break;
798 }
799 }
800 }
801
802 static const VkQueueFamilyProperties
803 tu_queue_family_properties = {
804 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
805 VK_QUEUE_COMPUTE_BIT |
806 VK_QUEUE_TRANSFER_BIT,
807 .queueCount = 1,
808 .timestampValidBits = 64,
809 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
810 };
811
812 void
813 tu_GetPhysicalDeviceQueueFamilyProperties(
814 VkPhysicalDevice physicalDevice,
815 uint32_t *pQueueFamilyPropertyCount,
816 VkQueueFamilyProperties *pQueueFamilyProperties)
817 {
818 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
819
820 vk_outarray_append(&out, p) {
821 *p = tu_queue_family_properties;
822 }
823 }
824
825 void
826 tu_GetPhysicalDeviceQueueFamilyProperties2(
827 VkPhysicalDevice physicalDevice,
828 uint32_t *pQueueFamilyPropertyCount,
829 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
830 {
831 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
832
833 vk_outarray_append(&out, p) {
834 p->queueFamilyProperties = tu_queue_family_properties;
835 }
836 }
837
838 static uint64_t
839 tu_get_system_heap_size()
840 {
841 struct sysinfo info;
842 sysinfo(&info);
843
844 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
845
846 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
847 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
848 */
849 uint64_t available_ram;
850 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
851 available_ram = total_ram / 2;
852 else
853 available_ram = total_ram * 3 / 4;
854
855 return available_ram;
856 }
857
858 void
859 tu_GetPhysicalDeviceMemoryProperties(
860 VkPhysicalDevice physicalDevice,
861 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
862 {
863 pMemoryProperties->memoryHeapCount = 1;
864 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
865 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
866
867 pMemoryProperties->memoryTypeCount = 1;
868 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
869 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
870 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
871 pMemoryProperties->memoryTypes[0].heapIndex = 0;
872 }
873
874 void
875 tu_GetPhysicalDeviceMemoryProperties2(
876 VkPhysicalDevice physicalDevice,
877 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
878 {
879 return tu_GetPhysicalDeviceMemoryProperties(
880 physicalDevice, &pMemoryProperties->memoryProperties);
881 }
882
883 static int
884 tu_queue_init(struct tu_device *device,
885 struct tu_queue *queue,
886 uint32_t queue_family_index,
887 int idx,
888 VkDeviceQueueCreateFlags flags)
889 {
890 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
891 queue->device = device;
892 queue->queue_family_index = queue_family_index;
893 queue->queue_idx = idx;
894 queue->flags = flags;
895
896 return VK_SUCCESS;
897 }
898
899 static void
900 tu_queue_finish(struct tu_queue *queue)
901 {
902 }
903
904 static int
905 tu_get_device_extension_index(const char *name)
906 {
907 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
908 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
909 return i;
910 }
911 return -1;
912 }
913
914 VkResult
915 tu_CreateDevice(VkPhysicalDevice physicalDevice,
916 const VkDeviceCreateInfo *pCreateInfo,
917 const VkAllocationCallbacks *pAllocator,
918 VkDevice *pDevice)
919 {
920 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
921 VkResult result;
922 struct tu_device *device;
923
924 /* Check enabled features */
925 if (pCreateInfo->pEnabledFeatures) {
926 VkPhysicalDeviceFeatures supported_features;
927 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
928 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
929 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
930 unsigned num_features =
931 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
932 for (uint32_t i = 0; i < num_features; i++) {
933 if (enabled_feature[i] && !supported_feature[i])
934 return vk_error(physical_device->instance,
935 VK_ERROR_FEATURE_NOT_PRESENT);
936 }
937 }
938
939 device = vk_zalloc2(&physical_device->instance->alloc,
940 pAllocator,
941 sizeof(*device),
942 8,
943 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
944 if (!device)
945 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
946
947 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
948 device->instance = physical_device->instance;
949 device->physical_device = physical_device;
950
951 if (pAllocator)
952 device->alloc = *pAllocator;
953 else
954 device->alloc = physical_device->instance->alloc;
955
956 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
957 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
958 int index = tu_get_device_extension_index(ext_name);
959 if (index < 0 ||
960 !physical_device->supported_extensions.extensions[index]) {
961 vk_free(&device->alloc, device);
962 return vk_error(physical_device->instance,
963 VK_ERROR_EXTENSION_NOT_PRESENT);
964 }
965
966 device->enabled_extensions.extensions[index] = true;
967 }
968
969 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
970 const VkDeviceQueueCreateInfo *queue_create =
971 &pCreateInfo->pQueueCreateInfos[i];
972 uint32_t qfi = queue_create->queueFamilyIndex;
973 device->queues[qfi] =
974 vk_alloc(&device->alloc,
975 queue_create->queueCount * sizeof(struct tu_queue),
976 8,
977 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
978 if (!device->queues[qfi]) {
979 result = VK_ERROR_OUT_OF_HOST_MEMORY;
980 goto fail;
981 }
982
983 memset(device->queues[qfi],
984 0,
985 queue_create->queueCount * sizeof(struct tu_queue));
986
987 device->queue_count[qfi] = queue_create->queueCount;
988
989 for (unsigned q = 0; q < queue_create->queueCount; q++) {
990 result = tu_queue_init(
991 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
992 if (result != VK_SUCCESS)
993 goto fail;
994 }
995 }
996
997 VkPipelineCacheCreateInfo ci;
998 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
999 ci.pNext = NULL;
1000 ci.flags = 0;
1001 ci.pInitialData = NULL;
1002 ci.initialDataSize = 0;
1003 VkPipelineCache pc;
1004 result =
1005 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1006 if (result != VK_SUCCESS)
1007 goto fail;
1008
1009 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1010
1011 *pDevice = tu_device_to_handle(device);
1012 return VK_SUCCESS;
1013
1014 fail:
1015 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1016 for (unsigned q = 0; q < device->queue_count[i]; q++)
1017 tu_queue_finish(&device->queues[i][q]);
1018 if (device->queue_count[i])
1019 vk_free(&device->alloc, device->queues[i]);
1020 }
1021
1022 vk_free(&device->alloc, device);
1023 return result;
1024 }
1025
1026 void
1027 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1028 {
1029 TU_FROM_HANDLE(tu_device, device, _device);
1030
1031 if (!device)
1032 return;
1033
1034 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1035 for (unsigned q = 0; q < device->queue_count[i]; q++)
1036 tu_queue_finish(&device->queues[i][q]);
1037 if (device->queue_count[i])
1038 vk_free(&device->alloc, device->queues[i]);
1039 }
1040
1041 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1042 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1043
1044 vk_free(&device->alloc, device);
1045 }
1046
1047 VkResult
1048 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1049 VkLayerProperties *pProperties)
1050 {
1051 *pPropertyCount = 0;
1052 return VK_SUCCESS;
1053 }
1054
1055 VkResult
1056 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1057 uint32_t *pPropertyCount,
1058 VkLayerProperties *pProperties)
1059 {
1060 *pPropertyCount = 0;
1061 return VK_SUCCESS;
1062 }
1063
1064 void
1065 tu_GetDeviceQueue2(VkDevice _device,
1066 const VkDeviceQueueInfo2 *pQueueInfo,
1067 VkQueue *pQueue)
1068 {
1069 TU_FROM_HANDLE(tu_device, device, _device);
1070 struct tu_queue *queue;
1071
1072 queue =
1073 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1074 if (pQueueInfo->flags != queue->flags) {
1075 /* From the Vulkan 1.1.70 spec:
1076 *
1077 * "The queue returned by vkGetDeviceQueue2 must have the same
1078 * flags value from this structure as that used at device
1079 * creation time in a VkDeviceQueueCreateInfo instance. If no
1080 * matching flags were specified at device creation time then
1081 * pQueue will return VK_NULL_HANDLE."
1082 */
1083 *pQueue = VK_NULL_HANDLE;
1084 return;
1085 }
1086
1087 *pQueue = tu_queue_to_handle(queue);
1088 }
1089
1090 void
1091 tu_GetDeviceQueue(VkDevice _device,
1092 uint32_t queueFamilyIndex,
1093 uint32_t queueIndex,
1094 VkQueue *pQueue)
1095 {
1096 const VkDeviceQueueInfo2 info =
1097 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1098 .queueFamilyIndex = queueFamilyIndex,
1099 .queueIndex = queueIndex };
1100
1101 tu_GetDeviceQueue2(_device, &info, pQueue);
1102 }
1103
1104 VkResult
1105 tu_QueueSubmit(VkQueue _queue,
1106 uint32_t submitCount,
1107 const VkSubmitInfo *pSubmits,
1108 VkFence _fence)
1109 {
1110 return VK_SUCCESS;
1111 }
1112
1113 VkResult
1114 tu_QueueWaitIdle(VkQueue _queue)
1115 {
1116 return VK_SUCCESS;
1117 }
1118
1119 VkResult
1120 tu_DeviceWaitIdle(VkDevice _device)
1121 {
1122 TU_FROM_HANDLE(tu_device, device, _device);
1123
1124 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1125 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1126 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1127 }
1128 }
1129 return VK_SUCCESS;
1130 }
1131
1132 VkResult
1133 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1134 uint32_t *pPropertyCount,
1135 VkExtensionProperties *pProperties)
1136 {
1137 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1138
1139 /* We spport no lyaers */
1140 if (pLayerName)
1141 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1142
1143 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1144 if (tu_supported_instance_extensions.extensions[i]) {
1145 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1146 }
1147 }
1148
1149 return vk_outarray_status(&out);
1150 }
1151
1152 VkResult
1153 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1154 const char *pLayerName,
1155 uint32_t *pPropertyCount,
1156 VkExtensionProperties *pProperties)
1157 {
1158 /* We spport no lyaers */
1159 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1160 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1161
1162 /* We spport no lyaers */
1163 if (pLayerName)
1164 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1165
1166 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1167 if (device->supported_extensions.extensions[i]) {
1168 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1169 }
1170 }
1171
1172 return vk_outarray_status(&out);
1173 }
1174
1175 PFN_vkVoidFunction
1176 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1177 {
1178 TU_FROM_HANDLE(tu_instance, instance, _instance);
1179
1180 return tu_lookup_entrypoint_checked(pName,
1181 instance ? instance->api_version : 0,
1182 instance ? &instance->enabled_extensions
1183 : NULL,
1184 NULL);
1185 }
1186
1187 /* The loader wants us to expose a second GetInstanceProcAddr function
1188 * to work around certain LD_PRELOAD issues seen in apps.
1189 */
1190 PUBLIC
1191 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1192 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1193
1194 PUBLIC
1195 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1196 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1197 {
1198 return tu_GetInstanceProcAddr(instance, pName);
1199 }
1200
1201 PFN_vkVoidFunction
1202 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1203 {
1204 TU_FROM_HANDLE(tu_device, device, _device);
1205
1206 return tu_lookup_entrypoint_checked(pName,
1207 device->instance->api_version,
1208 &device->instance->enabled_extensions,
1209 &device->enabled_extensions);
1210 }
1211
1212 static VkResult
1213 tu_alloc_memory(struct tu_device *device,
1214 const VkMemoryAllocateInfo *pAllocateInfo,
1215 const VkAllocationCallbacks *pAllocator,
1216 VkDeviceMemory *pMem)
1217 {
1218 struct tu_device_memory *mem;
1219
1220 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1221
1222 if (pAllocateInfo->allocationSize == 0) {
1223 /* Apparently, this is allowed */
1224 *pMem = VK_NULL_HANDLE;
1225 return VK_SUCCESS;
1226 }
1227
1228 mem = vk_alloc2(&device->alloc,
1229 pAllocator,
1230 sizeof(*mem),
1231 8,
1232 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1233 if (mem == NULL)
1234 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1235
1236 mem->bo = fd_bo_new(device->physical_device->drm_device, pAllocateInfo->allocationSize,
1237 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
1238 DRM_FREEDRENO_GEM_TYPE_KMEM);
1239 if (!mem->bo) {
1240 vk_free2(&device->alloc, pAllocator, mem);
1241 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1242 }
1243 mem->size = pAllocateInfo->allocationSize;
1244 mem->type_index = pAllocateInfo->memoryTypeIndex;
1245
1246 mem->map = NULL;
1247 mem->user_ptr = NULL;
1248
1249 *pMem = tu_device_memory_to_handle(mem);
1250
1251 return VK_SUCCESS;
1252 }
1253
1254 VkResult
1255 tu_AllocateMemory(VkDevice _device,
1256 const VkMemoryAllocateInfo *pAllocateInfo,
1257 const VkAllocationCallbacks *pAllocator,
1258 VkDeviceMemory *pMem)
1259 {
1260 TU_FROM_HANDLE(tu_device, device, _device);
1261 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1262 }
1263
1264 void
1265 tu_FreeMemory(VkDevice _device,
1266 VkDeviceMemory _mem,
1267 const VkAllocationCallbacks *pAllocator)
1268 {
1269 TU_FROM_HANDLE(tu_device, device, _device);
1270 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1271
1272 if (mem == NULL)
1273 return;
1274
1275 if (mem->bo)
1276 fd_bo_del(mem->bo);
1277
1278 vk_free2(&device->alloc, pAllocator, mem);
1279 }
1280
1281 VkResult
1282 tu_MapMemory(VkDevice _device,
1283 VkDeviceMemory _memory,
1284 VkDeviceSize offset,
1285 VkDeviceSize size,
1286 VkMemoryMapFlags flags,
1287 void **ppData)
1288 {
1289 TU_FROM_HANDLE(tu_device, device, _device);
1290 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1291
1292 if (mem == NULL) {
1293 *ppData = NULL;
1294 return VK_SUCCESS;
1295 }
1296
1297 if (mem->user_ptr) {
1298 *ppData = mem->user_ptr;
1299 } else if (!mem->map){
1300 *ppData = mem->map = fd_bo_map(mem->bo);
1301 } else
1302 *ppData = mem->map;
1303
1304 if (*ppData) {
1305 *ppData += offset;
1306 return VK_SUCCESS;
1307 }
1308
1309 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1310 }
1311
1312 void
1313 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1314 {
1315 /* I do not see any unmapping done by the freedreno Gallium driver. */
1316 }
1317
1318 VkResult
1319 tu_FlushMappedMemoryRanges(VkDevice _device,
1320 uint32_t memoryRangeCount,
1321 const VkMappedMemoryRange *pMemoryRanges)
1322 {
1323 return VK_SUCCESS;
1324 }
1325
1326 VkResult
1327 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1328 uint32_t memoryRangeCount,
1329 const VkMappedMemoryRange *pMemoryRanges)
1330 {
1331 return VK_SUCCESS;
1332 }
1333
1334 void
1335 tu_GetBufferMemoryRequirements(VkDevice _device,
1336 VkBuffer _buffer,
1337 VkMemoryRequirements *pMemoryRequirements)
1338 {
1339 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1340
1341 pMemoryRequirements->memoryTypeBits = 1;
1342 pMemoryRequirements->alignment = 16;
1343 pMemoryRequirements->size =
1344 align64(buffer->size, pMemoryRequirements->alignment);
1345 }
1346
1347 void
1348 tu_GetBufferMemoryRequirements2(
1349 VkDevice device,
1350 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1351 VkMemoryRequirements2KHR *pMemoryRequirements)
1352 {
1353 tu_GetBufferMemoryRequirements(
1354 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1355 }
1356
1357 void
1358 tu_GetImageMemoryRequirements(VkDevice _device,
1359 VkImage _image,
1360 VkMemoryRequirements *pMemoryRequirements)
1361 {
1362 TU_FROM_HANDLE(tu_image, image, _image);
1363
1364 pMemoryRequirements->memoryTypeBits = 1;
1365 pMemoryRequirements->size = image->size;
1366 pMemoryRequirements->alignment = image->alignment;
1367 }
1368
1369 void
1370 tu_GetImageMemoryRequirements2(VkDevice device,
1371 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1372 VkMemoryRequirements2KHR *pMemoryRequirements)
1373 {
1374 tu_GetImageMemoryRequirements(
1375 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1376 }
1377
1378 void
1379 tu_GetImageSparseMemoryRequirements(
1380 VkDevice device,
1381 VkImage image,
1382 uint32_t *pSparseMemoryRequirementCount,
1383 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1384 {
1385 stub();
1386 }
1387
1388 void
1389 tu_GetImageSparseMemoryRequirements2(
1390 VkDevice device,
1391 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1392 uint32_t *pSparseMemoryRequirementCount,
1393 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1394 {
1395 stub();
1396 }
1397
1398 void
1399 tu_GetDeviceMemoryCommitment(VkDevice device,
1400 VkDeviceMemory memory,
1401 VkDeviceSize *pCommittedMemoryInBytes)
1402 {
1403 *pCommittedMemoryInBytes = 0;
1404 }
1405
1406 VkResult
1407 tu_BindBufferMemory2(VkDevice device,
1408 uint32_t bindInfoCount,
1409 const VkBindBufferMemoryInfoKHR *pBindInfos)
1410 {
1411 return VK_SUCCESS;
1412 }
1413
1414 VkResult
1415 tu_BindBufferMemory(VkDevice device,
1416 VkBuffer buffer,
1417 VkDeviceMemory memory,
1418 VkDeviceSize memoryOffset)
1419 {
1420 const VkBindBufferMemoryInfoKHR info = {
1421 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1422 .buffer = buffer,
1423 .memory = memory,
1424 .memoryOffset = memoryOffset
1425 };
1426
1427 return tu_BindBufferMemory2(device, 1, &info);
1428 }
1429
1430 VkResult
1431 tu_BindImageMemory2(VkDevice device,
1432 uint32_t bindInfoCount,
1433 const VkBindImageMemoryInfoKHR *pBindInfos)
1434 {
1435 return VK_SUCCESS;
1436 }
1437
1438 VkResult
1439 tu_BindImageMemory(VkDevice device,
1440 VkImage image,
1441 VkDeviceMemory memory,
1442 VkDeviceSize memoryOffset)
1443 {
1444 const VkBindImageMemoryInfoKHR info = {
1445 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1446 .image = image,
1447 .memory = memory,
1448 .memoryOffset = memoryOffset
1449 };
1450
1451 return tu_BindImageMemory2(device, 1, &info);
1452 }
1453
1454 VkResult
1455 tu_QueueBindSparse(VkQueue _queue,
1456 uint32_t bindInfoCount,
1457 const VkBindSparseInfo *pBindInfo,
1458 VkFence _fence)
1459 {
1460 return VK_SUCCESS;
1461 }
1462
1463 VkResult
1464 tu_CreateFence(VkDevice _device,
1465 const VkFenceCreateInfo *pCreateInfo,
1466 const VkAllocationCallbacks *pAllocator,
1467 VkFence *pFence)
1468 {
1469 TU_FROM_HANDLE(tu_device, device, _device);
1470
1471 struct tu_fence *fence = vk_alloc2(&device->alloc,
1472 pAllocator,
1473 sizeof(*fence),
1474 8,
1475 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1476
1477 if (!fence)
1478 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1479
1480 *pFence = tu_fence_to_handle(fence);
1481
1482 return VK_SUCCESS;
1483 }
1484
1485 void
1486 tu_DestroyFence(VkDevice _device,
1487 VkFence _fence,
1488 const VkAllocationCallbacks *pAllocator)
1489 {
1490 TU_FROM_HANDLE(tu_device, device, _device);
1491 TU_FROM_HANDLE(tu_fence, fence, _fence);
1492
1493 if (!fence)
1494 return;
1495
1496 vk_free2(&device->alloc, pAllocator, fence);
1497 }
1498
1499 VkResult
1500 tu_WaitForFences(VkDevice _device,
1501 uint32_t fenceCount,
1502 const VkFence *pFences,
1503 VkBool32 waitAll,
1504 uint64_t timeout)
1505 {
1506 return VK_SUCCESS;
1507 }
1508
1509 VkResult
1510 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1511 {
1512 return VK_SUCCESS;
1513 }
1514
1515 VkResult
1516 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1517 {
1518 return VK_SUCCESS;
1519 }
1520
1521 // Queue semaphore functions
1522
1523 VkResult
1524 tu_CreateSemaphore(VkDevice _device,
1525 const VkSemaphoreCreateInfo *pCreateInfo,
1526 const VkAllocationCallbacks *pAllocator,
1527 VkSemaphore *pSemaphore)
1528 {
1529 TU_FROM_HANDLE(tu_device, device, _device);
1530
1531 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1532 pAllocator,
1533 sizeof(*sem),
1534 8,
1535 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1536 if (!sem)
1537 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1538
1539 *pSemaphore = tu_semaphore_to_handle(sem);
1540 return VK_SUCCESS;
1541 }
1542
1543 void
1544 tu_DestroySemaphore(VkDevice _device,
1545 VkSemaphore _semaphore,
1546 const VkAllocationCallbacks *pAllocator)
1547 {
1548 TU_FROM_HANDLE(tu_device, device, _device);
1549 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1550 if (!_semaphore)
1551 return;
1552
1553 vk_free2(&device->alloc, pAllocator, sem);
1554 }
1555
1556 VkResult
1557 tu_CreateEvent(VkDevice _device,
1558 const VkEventCreateInfo *pCreateInfo,
1559 const VkAllocationCallbacks *pAllocator,
1560 VkEvent *pEvent)
1561 {
1562 TU_FROM_HANDLE(tu_device, device, _device);
1563 struct tu_event *event = vk_alloc2(&device->alloc,
1564 pAllocator,
1565 sizeof(*event),
1566 8,
1567 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1568
1569 if (!event)
1570 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1571
1572 *pEvent = tu_event_to_handle(event);
1573
1574 return VK_SUCCESS;
1575 }
1576
1577 void
1578 tu_DestroyEvent(VkDevice _device,
1579 VkEvent _event,
1580 const VkAllocationCallbacks *pAllocator)
1581 {
1582 TU_FROM_HANDLE(tu_device, device, _device);
1583 TU_FROM_HANDLE(tu_event, event, _event);
1584
1585 if (!event)
1586 return;
1587 vk_free2(&device->alloc, pAllocator, event);
1588 }
1589
1590 VkResult
1591 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1592 {
1593 TU_FROM_HANDLE(tu_event, event, _event);
1594
1595 if (*event->map == 1)
1596 return VK_EVENT_SET;
1597 return VK_EVENT_RESET;
1598 }
1599
1600 VkResult
1601 tu_SetEvent(VkDevice _device, VkEvent _event)
1602 {
1603 TU_FROM_HANDLE(tu_event, event, _event);
1604 *event->map = 1;
1605
1606 return VK_SUCCESS;
1607 }
1608
1609 VkResult
1610 tu_ResetEvent(VkDevice _device, VkEvent _event)
1611 {
1612 TU_FROM_HANDLE(tu_event, event, _event);
1613 *event->map = 0;
1614
1615 return VK_SUCCESS;
1616 }
1617
1618 VkResult
1619 tu_CreateBuffer(VkDevice _device,
1620 const VkBufferCreateInfo *pCreateInfo,
1621 const VkAllocationCallbacks *pAllocator,
1622 VkBuffer *pBuffer)
1623 {
1624 TU_FROM_HANDLE(tu_device, device, _device);
1625 struct tu_buffer *buffer;
1626
1627 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1628
1629 buffer = vk_alloc2(&device->alloc,
1630 pAllocator,
1631 sizeof(*buffer),
1632 8,
1633 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1634 if (buffer == NULL)
1635 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1636
1637 buffer->size = pCreateInfo->size;
1638 buffer->usage = pCreateInfo->usage;
1639 buffer->flags = pCreateInfo->flags;
1640
1641 *pBuffer = tu_buffer_to_handle(buffer);
1642
1643 return VK_SUCCESS;
1644 }
1645
1646 void
1647 tu_DestroyBuffer(VkDevice _device,
1648 VkBuffer _buffer,
1649 const VkAllocationCallbacks *pAllocator)
1650 {
1651 TU_FROM_HANDLE(tu_device, device, _device);
1652 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1653
1654 if (!buffer)
1655 return;
1656
1657 vk_free2(&device->alloc, pAllocator, buffer);
1658 }
1659
1660 static uint32_t
1661 tu_surface_max_layer_count(struct tu_image_view *iview)
1662 {
1663 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1664 ? iview->extent.depth
1665 : (iview->base_layer + iview->layer_count);
1666 }
1667
1668 VkResult
1669 tu_CreateFramebuffer(VkDevice _device,
1670 const VkFramebufferCreateInfo *pCreateInfo,
1671 const VkAllocationCallbacks *pAllocator,
1672 VkFramebuffer *pFramebuffer)
1673 {
1674 TU_FROM_HANDLE(tu_device, device, _device);
1675 struct tu_framebuffer *framebuffer;
1676
1677 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1678
1679 size_t size =
1680 sizeof(*framebuffer) +
1681 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1682 framebuffer = vk_alloc2(
1683 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1684 if (framebuffer == NULL)
1685 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1686
1687 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1688 framebuffer->width = pCreateInfo->width;
1689 framebuffer->height = pCreateInfo->height;
1690 framebuffer->layers = pCreateInfo->layers;
1691 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1692 VkImageView _iview = pCreateInfo->pAttachments[i];
1693 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1694 framebuffer->attachments[i].attachment = iview;
1695
1696 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1697 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1698 framebuffer->layers =
1699 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1700 }
1701
1702 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1703 return VK_SUCCESS;
1704 }
1705
1706 void
1707 tu_DestroyFramebuffer(VkDevice _device,
1708 VkFramebuffer _fb,
1709 const VkAllocationCallbacks *pAllocator)
1710 {
1711 TU_FROM_HANDLE(tu_device, device, _device);
1712 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1713
1714 if (!fb)
1715 return;
1716 vk_free2(&device->alloc, pAllocator, fb);
1717 }
1718
1719 static void
1720 tu_init_sampler(struct tu_device *device,
1721 struct tu_sampler *sampler,
1722 const VkSamplerCreateInfo *pCreateInfo)
1723 {
1724 }
1725
1726 VkResult
1727 tu_CreateSampler(VkDevice _device,
1728 const VkSamplerCreateInfo *pCreateInfo,
1729 const VkAllocationCallbacks *pAllocator,
1730 VkSampler *pSampler)
1731 {
1732 TU_FROM_HANDLE(tu_device, device, _device);
1733 struct tu_sampler *sampler;
1734
1735 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1736
1737 sampler = vk_alloc2(&device->alloc,
1738 pAllocator,
1739 sizeof(*sampler),
1740 8,
1741 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1742 if (!sampler)
1743 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1744
1745 tu_init_sampler(device, sampler, pCreateInfo);
1746 *pSampler = tu_sampler_to_handle(sampler);
1747
1748 return VK_SUCCESS;
1749 }
1750
1751 void
1752 tu_DestroySampler(VkDevice _device,
1753 VkSampler _sampler,
1754 const VkAllocationCallbacks *pAllocator)
1755 {
1756 TU_FROM_HANDLE(tu_device, device, _device);
1757 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1758
1759 if (!sampler)
1760 return;
1761 vk_free2(&device->alloc, pAllocator, sampler);
1762 }
1763
1764 /* vk_icd.h does not declare this function, so we declare it here to
1765 * suppress Wmissing-prototypes.
1766 */
1767 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1768 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1769
1770 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1771 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1772 {
1773 /* For the full details on loader interface versioning, see
1774 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1775 * What follows is a condensed summary, to help you navigate the large and
1776 * confusing official doc.
1777 *
1778 * - Loader interface v0 is incompatible with later versions. We don't
1779 * support it.
1780 *
1781 * - In loader interface v1:
1782 * - The first ICD entrypoint called by the loader is
1783 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1784 * entrypoint.
1785 * - The ICD must statically expose no other Vulkan symbol unless it is
1786 * linked with -Bsymbolic.
1787 * - Each dispatchable Vulkan handle created by the ICD must be
1788 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1789 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1790 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1791 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1792 * such loader-managed surfaces.
1793 *
1794 * - Loader interface v2 differs from v1 in:
1795 * - The first ICD entrypoint called by the loader is
1796 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1797 * statically expose this entrypoint.
1798 *
1799 * - Loader interface v3 differs from v2 in:
1800 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1801 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1802 * because the loader no longer does so.
1803 */
1804 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1805 return VK_SUCCESS;
1806 }
1807
1808 void
1809 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1810 VkPhysicalDevice physicalDevice,
1811 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1812 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1813 {
1814 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1815 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1816 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1817 }
1818
1819 void
1820 tu_GetPhysicalDeviceExternalFenceProperties(
1821 VkPhysicalDevice physicalDevice,
1822 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1823 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1824 {
1825 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1826 pExternalFenceProperties->compatibleHandleTypes = 0;
1827 pExternalFenceProperties->externalFenceFeatures = 0;
1828 }
1829
1830 VkResult
1831 tu_CreateDebugReportCallbackEXT(
1832 VkInstance _instance,
1833 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1834 const VkAllocationCallbacks *pAllocator,
1835 VkDebugReportCallbackEXT *pCallback)
1836 {
1837 TU_FROM_HANDLE(tu_instance, instance, _instance);
1838 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1839 pCreateInfo,
1840 pAllocator,
1841 &instance->alloc,
1842 pCallback);
1843 }
1844
1845 void
1846 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1847 VkDebugReportCallbackEXT _callback,
1848 const VkAllocationCallbacks *pAllocator)
1849 {
1850 TU_FROM_HANDLE(tu_instance, instance, _instance);
1851 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1852 _callback,
1853 pAllocator,
1854 &instance->alloc);
1855 }
1856
1857 void
1858 tu_DebugReportMessageEXT(VkInstance _instance,
1859 VkDebugReportFlagsEXT flags,
1860 VkDebugReportObjectTypeEXT objectType,
1861 uint64_t object,
1862 size_t location,
1863 int32_t messageCode,
1864 const char *pLayerPrefix,
1865 const char *pMessage)
1866 {
1867 TU_FROM_HANDLE(tu_instance, instance, _instance);
1868 vk_debug_report(&instance->debug_report_callbacks,
1869 flags,
1870 objectType,
1871 object,
1872 location,
1873 messageCode,
1874 pLayerPrefix,
1875 pMessage);
1876 }
1877
1878 void
1879 tu_GetDeviceGroupPeerMemoryFeatures(
1880 VkDevice device,
1881 uint32_t heapIndex,
1882 uint32_t localDeviceIndex,
1883 uint32_t remoteDeviceIndex,
1884 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1885 {
1886 assert(localDeviceIndex == remoteDeviceIndex);
1887
1888 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1889 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1890 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1891 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1892 }