turnip: Fix result of vkEnumerate*LayerProperties
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <sys/sysinfo.h>
38 #include <unistd.h>
39 #include <xf86drm.h>
40
41 static int
42 tu_device_get_cache_uuid(uint16_t family, void *uuid)
43 {
44 uint32_t mesa_timestamp;
45 uint16_t f = family;
46 memset(uuid, 0, VK_UUID_SIZE);
47 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
48 &mesa_timestamp))
49 return -1;
50
51 memcpy(uuid, &mesa_timestamp, 4);
52 memcpy((char *)uuid + 4, &f, 2);
53 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
54 return 0;
55 }
56
57 static void
58 tu_get_driver_uuid(void *uuid)
59 {
60 memset(uuid, 0, VK_UUID_SIZE);
61 }
62
63 static void
64 tu_get_device_uuid(void *uuid)
65 {
66 stub();
67 }
68
69 static VkResult
70 tu_physical_device_init(struct tu_physical_device *device,
71 struct tu_instance *instance,
72 drmDevicePtr drm_device)
73 {
74 const char *path = drm_device->nodes[DRM_NODE_RENDER];
75 VkResult result;
76 drmVersionPtr version;
77 int fd;
78 int master_fd = -1;
79 struct fd_pipe *tmp_pipe = NULL;
80 uint64_t val;
81
82 fd = open(path, O_RDWR | O_CLOEXEC);
83 if (fd < 0) {
84 if (instance->debug_flags & TU_DEBUG_STARTUP)
85 tu_logi("Could not open device '%s'", path);
86
87 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
88 }
89
90 version = drmGetVersion(fd);
91 if (!version) {
92 close(fd);
93
94 if (instance->debug_flags & TU_DEBUG_STARTUP)
95 tu_logi("Could not get the kernel driver version for device '%s'",
96 path);
97
98 return vk_errorf(instance,
99 VK_ERROR_INCOMPATIBLE_DRIVER,
100 "failed to get version %s: %m",
101 path);
102 }
103
104 if (strcmp(version->name, "msm")) {
105 drmFreeVersion(version);
106 if (master_fd != -1)
107 close(master_fd);
108 close(fd);
109
110 if (instance->debug_flags & TU_DEBUG_STARTUP)
111 tu_logi("Device '%s' is not using the msm kernel driver.", path);
112
113 return VK_ERROR_INCOMPATIBLE_DRIVER;
114 }
115 drmFreeVersion(version);
116
117 if (instance->debug_flags & TU_DEBUG_STARTUP)
118 tu_logi("Found compatible device '%s'.", path);
119
120 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
121 device->instance = instance;
122 assert(strlen(path) < ARRAY_SIZE(device->path));
123 strncpy(device->path, path, ARRAY_SIZE(device->path));
124
125 if (instance->enabled_extensions.KHR_display) {
126 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
127 if (master_fd >= 0) {
128 /* TODO: free master_fd is accel is not working? */
129 }
130 }
131
132 device->master_fd = master_fd;
133 device->local_fd = fd;
134
135 device->drm_device = fd_device_new_dup(fd);
136 if (!device->drm_device) {
137 result = vk_errorf(
138 instance, VK_ERROR_INITIALIZATION_FAILED, "could not create the libdrm device");
139 goto fail;
140 }
141
142 tmp_pipe = fd_pipe_new(device->drm_device, FD_PIPE_3D);
143 if (!tmp_pipe) {
144 result = vk_errorf(
145 instance, VK_ERROR_INITIALIZATION_FAILED, "could not open the 3D pipe");
146 goto fail;
147 }
148
149 if (fd_pipe_get_param(tmp_pipe, FD_GPU_ID, &val)) {
150 result = vk_errorf(
151 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GPU ID");
152 goto fail;
153 }
154 device->gpu_id = val;
155
156 if (fd_pipe_get_param(tmp_pipe, FD_GMEM_SIZE, &val)) {
157 result = vk_errorf(
158 instance, VK_ERROR_INITIALIZATION_FAILED, "could not get GMEM size");
159 goto fail;
160 }
161 device->gmem_size = val;
162
163 fd_pipe_del(tmp_pipe);
164 tmp_pipe = NULL;
165
166 memset(device->name, 0, sizeof(device->name));
167 sprintf(device->name, "FD%d", device->gpu_id);
168
169 switch(device->gpu_id) {
170 case 530:
171 break;
172 default:
173 if (instance->debug_flags & TU_DEBUG_STARTUP)
174 tu_logi("Device '%s' is not supported.", device->name);
175 result = vk_errorf(
176 instance, VK_ERROR_INITIALIZATION_FAILED, "unsupported device");
177 goto fail;
178 }
179 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
180 result = vk_errorf(
181 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
182 goto fail;
183 }
184
185 /* The gpu id is already embedded in the uuid so we just pass "tu"
186 * when creating the cache.
187 */
188 char buf[VK_UUID_SIZE * 2 + 1];
189 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
190 device->disk_cache = disk_cache_create(device->name, buf, 0);
191
192 fprintf(stderr,
193 "WARNING: tu is not a conformant vulkan implementation, "
194 "testing use only.\n");
195
196 tu_get_driver_uuid(&device->device_uuid);
197 tu_get_device_uuid(&device->device_uuid);
198
199 tu_fill_device_extension_table(device, &device->supported_extensions);
200
201 if (result != VK_SUCCESS) {
202 vk_error(instance, result);
203 goto fail;
204 }
205
206 return VK_SUCCESS;
207
208 fail:
209 if (tmp_pipe)
210 fd_pipe_del(tmp_pipe);
211 if (device->drm_device)
212 fd_device_del(device->drm_device);
213 close(fd);
214 if (master_fd != -1)
215 close(master_fd);
216 return result;
217 }
218
219 static void
220 tu_physical_device_finish(struct tu_physical_device *device)
221 {
222 disk_cache_destroy(device->disk_cache);
223 close(device->local_fd);
224 if (device->master_fd != -1)
225 close(device->master_fd);
226 }
227
228 static void *
229 default_alloc_func(void *pUserData,
230 size_t size,
231 size_t align,
232 VkSystemAllocationScope allocationScope)
233 {
234 return malloc(size);
235 }
236
237 static void *
238 default_realloc_func(void *pUserData,
239 void *pOriginal,
240 size_t size,
241 size_t align,
242 VkSystemAllocationScope allocationScope)
243 {
244 return realloc(pOriginal, size);
245 }
246
247 static void
248 default_free_func(void *pUserData, void *pMemory)
249 {
250 free(pMemory);
251 }
252
253 static const VkAllocationCallbacks default_alloc = {
254 .pUserData = NULL,
255 .pfnAllocation = default_alloc_func,
256 .pfnReallocation = default_realloc_func,
257 .pfnFree = default_free_func,
258 };
259
260 static const struct debug_control tu_debug_options[] = { { "startup",
261 TU_DEBUG_STARTUP },
262 { NULL, 0 } };
263
264 const char *
265 tu_get_debug_option_name(int id)
266 {
267 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
268 return tu_debug_options[id].string;
269 }
270
271 static int
272 tu_get_instance_extension_index(const char *name)
273 {
274 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
275 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
276 return i;
277 }
278 return -1;
279 }
280
281 VkResult
282 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
283 const VkAllocationCallbacks *pAllocator,
284 VkInstance *pInstance)
285 {
286 struct tu_instance *instance;
287 VkResult result;
288
289 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
290
291 uint32_t client_version;
292 if (pCreateInfo->pApplicationInfo &&
293 pCreateInfo->pApplicationInfo->apiVersion != 0) {
294 client_version = pCreateInfo->pApplicationInfo->apiVersion;
295 } else {
296 tu_EnumerateInstanceVersion(&client_version);
297 }
298
299 instance = vk_zalloc2(&default_alloc,
300 pAllocator,
301 sizeof(*instance),
302 8,
303 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
304 if (!instance)
305 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
306
307 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
308
309 if (pAllocator)
310 instance->alloc = *pAllocator;
311 else
312 instance->alloc = default_alloc;
313
314 instance->api_version = client_version;
315 instance->physical_device_count = -1;
316
317 instance->debug_flags =
318 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
319
320 if (instance->debug_flags & TU_DEBUG_STARTUP)
321 tu_logi("Created an instance");
322
323 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
324 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
325 int index = tu_get_instance_extension_index(ext_name);
326
327 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
328 vk_free2(&default_alloc, pAllocator, instance);
329 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
330 }
331
332 instance->enabled_extensions.extensions[index] = true;
333 }
334
335 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
336 if (result != VK_SUCCESS) {
337 vk_free2(&default_alloc, pAllocator, instance);
338 return vk_error(instance, result);
339 }
340
341 _mesa_locale_init();
342
343 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
344
345 *pInstance = tu_instance_to_handle(instance);
346
347 return VK_SUCCESS;
348 }
349
350 void
351 tu_DestroyInstance(VkInstance _instance,
352 const VkAllocationCallbacks *pAllocator)
353 {
354 TU_FROM_HANDLE(tu_instance, instance, _instance);
355
356 if (!instance)
357 return;
358
359 for (int i = 0; i < instance->physical_device_count; ++i) {
360 tu_physical_device_finish(instance->physical_devices + i);
361 }
362
363 VG(VALGRIND_DESTROY_MEMPOOL(instance));
364
365 _mesa_locale_fini();
366
367 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
368
369 vk_free(&instance->alloc, instance);
370 }
371
372 static VkResult
373 tu_enumerate_devices(struct tu_instance *instance)
374 {
375 /* TODO: Check for more devices ? */
376 drmDevicePtr devices[8];
377 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
378 int max_devices;
379
380 instance->physical_device_count = 0;
381
382 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
383
384 if (instance->debug_flags & TU_DEBUG_STARTUP)
385 tu_logi("Found %d drm nodes", max_devices);
386
387 if (max_devices < 1)
388 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
389
390 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
391 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
392 devices[i]->bustype == DRM_BUS_PLATFORM) {
393
394 result = tu_physical_device_init(instance->physical_devices +
395 instance->physical_device_count,
396 instance,
397 devices[i]);
398 if (result == VK_SUCCESS)
399 ++instance->physical_device_count;
400 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
401 break;
402 }
403 }
404 drmFreeDevices(devices, max_devices);
405
406 return result;
407 }
408
409 VkResult
410 tu_EnumeratePhysicalDevices(VkInstance _instance,
411 uint32_t *pPhysicalDeviceCount,
412 VkPhysicalDevice *pPhysicalDevices)
413 {
414 TU_FROM_HANDLE(tu_instance, instance, _instance);
415 VkResult result;
416
417 if (instance->physical_device_count < 0) {
418 result = tu_enumerate_devices(instance);
419 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
420 return result;
421 }
422
423 if (!pPhysicalDevices) {
424 *pPhysicalDeviceCount = instance->physical_device_count;
425 } else {
426 *pPhysicalDeviceCount =
427 MIN2(*pPhysicalDeviceCount, instance->physical_device_count);
428 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
429 pPhysicalDevices[i] =
430 tu_physical_device_to_handle(instance->physical_devices + i);
431 }
432
433 return *pPhysicalDeviceCount < instance->physical_device_count
434 ? VK_INCOMPLETE
435 : VK_SUCCESS;
436 }
437
438 VkResult
439 tu_EnumeratePhysicalDeviceGroups(
440 VkInstance _instance,
441 uint32_t *pPhysicalDeviceGroupCount,
442 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
443 {
444 TU_FROM_HANDLE(tu_instance, instance, _instance);
445 VkResult result;
446
447 if (instance->physical_device_count < 0) {
448 result = tu_enumerate_devices(instance);
449 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
450 return result;
451 }
452
453 if (!pPhysicalDeviceGroupProperties) {
454 *pPhysicalDeviceGroupCount = instance->physical_device_count;
455 } else {
456 *pPhysicalDeviceGroupCount =
457 MIN2(*pPhysicalDeviceGroupCount, instance->physical_device_count);
458 for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
459 pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
460 pPhysicalDeviceGroupProperties[i].physicalDevices[0] =
461 tu_physical_device_to_handle(instance->physical_devices + i);
462 pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
463 }
464 }
465 return *pPhysicalDeviceGroupCount < instance->physical_device_count
466 ? VK_INCOMPLETE
467 : VK_SUCCESS;
468 }
469
470 void
471 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
472 VkPhysicalDeviceFeatures *pFeatures)
473 {
474 memset(pFeatures, 0, sizeof(*pFeatures));
475
476 *pFeatures = (VkPhysicalDeviceFeatures){
477 .robustBufferAccess = false,
478 .fullDrawIndexUint32 = false,
479 .imageCubeArray = false,
480 .independentBlend = false,
481 .geometryShader = false,
482 .tessellationShader = false,
483 .sampleRateShading = false,
484 .dualSrcBlend = false,
485 .logicOp = false,
486 .multiDrawIndirect = false,
487 .drawIndirectFirstInstance = false,
488 .depthClamp = false,
489 .depthBiasClamp = false,
490 .fillModeNonSolid = false,
491 .depthBounds = false,
492 .wideLines = false,
493 .largePoints = false,
494 .alphaToOne = false,
495 .multiViewport = false,
496 .samplerAnisotropy = false,
497 .textureCompressionETC2 = false,
498 .textureCompressionASTC_LDR = false,
499 .textureCompressionBC = false,
500 .occlusionQueryPrecise = false,
501 .pipelineStatisticsQuery = false,
502 .vertexPipelineStoresAndAtomics = false,
503 .fragmentStoresAndAtomics = false,
504 .shaderTessellationAndGeometryPointSize = false,
505 .shaderImageGatherExtended = false,
506 .shaderStorageImageExtendedFormats = false,
507 .shaderStorageImageMultisample = false,
508 .shaderUniformBufferArrayDynamicIndexing = false,
509 .shaderSampledImageArrayDynamicIndexing = false,
510 .shaderStorageBufferArrayDynamicIndexing = false,
511 .shaderStorageImageArrayDynamicIndexing = false,
512 .shaderStorageImageReadWithoutFormat = false,
513 .shaderStorageImageWriteWithoutFormat = false,
514 .shaderClipDistance = false,
515 .shaderCullDistance = false,
516 .shaderFloat64 = false,
517 .shaderInt64 = false,
518 .shaderInt16 = false,
519 .sparseBinding = false,
520 .variableMultisampleRate = false,
521 .inheritedQueries = false,
522 };
523 }
524
525 void
526 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
527 VkPhysicalDeviceFeatures2KHR *pFeatures)
528 {
529 vk_foreach_struct(ext, pFeatures->pNext)
530 {
531 switch (ext->sType) {
532 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
533 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
534 features->variablePointersStorageBuffer = false;
535 features->variablePointers = false;
536 break;
537 }
538 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
539 VkPhysicalDeviceMultiviewFeaturesKHR *features =
540 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
541 features->multiview = false;
542 features->multiviewGeometryShader = false;
543 features->multiviewTessellationShader = false;
544 break;
545 }
546 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
547 VkPhysicalDeviceShaderDrawParameterFeatures *features =
548 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
549 features->shaderDrawParameters = false;
550 break;
551 }
552 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
553 VkPhysicalDeviceProtectedMemoryFeatures *features =
554 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
555 features->protectedMemory = false;
556 break;
557 }
558 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
559 VkPhysicalDevice16BitStorageFeatures *features =
560 (VkPhysicalDevice16BitStorageFeatures *)ext;
561 features->storageBuffer16BitAccess = false;
562 features->uniformAndStorageBuffer16BitAccess = false;
563 features->storagePushConstant16 = false;
564 features->storageInputOutput16 = false;
565 break;
566 }
567 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
568 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
569 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
570 features->samplerYcbcrConversion = false;
571 break;
572 }
573 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
574 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
575 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
576 features->shaderInputAttachmentArrayDynamicIndexing = false;
577 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
578 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
579 features->shaderUniformBufferArrayNonUniformIndexing = false;
580 features->shaderSampledImageArrayNonUniformIndexing = false;
581 features->shaderStorageBufferArrayNonUniformIndexing = false;
582 features->shaderStorageImageArrayNonUniformIndexing = false;
583 features->shaderInputAttachmentArrayNonUniformIndexing = false;
584 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
585 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
586 features->descriptorBindingUniformBufferUpdateAfterBind = false;
587 features->descriptorBindingSampledImageUpdateAfterBind = false;
588 features->descriptorBindingStorageImageUpdateAfterBind = false;
589 features->descriptorBindingStorageBufferUpdateAfterBind = false;
590 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
591 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
592 features->descriptorBindingUpdateUnusedWhilePending = false;
593 features->descriptorBindingPartiallyBound = false;
594 features->descriptorBindingVariableDescriptorCount = false;
595 features->runtimeDescriptorArray = false;
596 break;
597 }
598 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
599 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
600 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
601 features->conditionalRendering = false;
602 features->inheritedConditionalRendering = false;
603 break;
604 }
605 default:
606 break;
607 }
608 }
609 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
610 }
611
612 void
613 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
614 VkPhysicalDeviceProperties *pProperties)
615 {
616 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
617 VkSampleCountFlags sample_counts = 0xf;
618
619 /* make sure that the entire descriptor set is addressable with a signed
620 * 32-bit int. So the sum of all limits scaled by descriptor size has to
621 * be at most 2 GiB. the combined image & samples object count as one of
622 * both. This limit is for the pipeline layout, not for the set layout, but
623 * there is no set limit, so we just set a pipeline limit. I don't think
624 * any app is going to hit this soon. */
625 size_t max_descriptor_set_size =
626 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
627 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
628 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
629 32 /* sampler, largest when combined with image */ +
630 64 /* sampled image */ + 64 /* storage image */);
631
632 VkPhysicalDeviceLimits limits = {
633 .maxImageDimension1D = (1 << 14),
634 .maxImageDimension2D = (1 << 14),
635 .maxImageDimension3D = (1 << 11),
636 .maxImageDimensionCube = (1 << 14),
637 .maxImageArrayLayers = (1 << 11),
638 .maxTexelBufferElements = 128 * 1024 * 1024,
639 .maxUniformBufferRange = UINT32_MAX,
640 .maxStorageBufferRange = UINT32_MAX,
641 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
642 .maxMemoryAllocationCount = UINT32_MAX,
643 .maxSamplerAllocationCount = 64 * 1024,
644 .bufferImageGranularity = 64, /* A cache line */
645 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
646 .maxBoundDescriptorSets = MAX_SETS,
647 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
648 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
649 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
650 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
651 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
652 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
653 .maxPerStageResources = max_descriptor_set_size,
654 .maxDescriptorSetSamplers = max_descriptor_set_size,
655 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
656 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
657 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
658 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
659 .maxDescriptorSetSampledImages = max_descriptor_set_size,
660 .maxDescriptorSetStorageImages = max_descriptor_set_size,
661 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
662 .maxVertexInputAttributes = 32,
663 .maxVertexInputBindings = 32,
664 .maxVertexInputAttributeOffset = 2047,
665 .maxVertexInputBindingStride = 2048,
666 .maxVertexOutputComponents = 128,
667 .maxTessellationGenerationLevel = 64,
668 .maxTessellationPatchSize = 32,
669 .maxTessellationControlPerVertexInputComponents = 128,
670 .maxTessellationControlPerVertexOutputComponents = 128,
671 .maxTessellationControlPerPatchOutputComponents = 120,
672 .maxTessellationControlTotalOutputComponents = 4096,
673 .maxTessellationEvaluationInputComponents = 128,
674 .maxTessellationEvaluationOutputComponents = 128,
675 .maxGeometryShaderInvocations = 127,
676 .maxGeometryInputComponents = 64,
677 .maxGeometryOutputComponents = 128,
678 .maxGeometryOutputVertices = 256,
679 .maxGeometryTotalOutputComponents = 1024,
680 .maxFragmentInputComponents = 128,
681 .maxFragmentOutputAttachments = 8,
682 .maxFragmentDualSrcAttachments = 1,
683 .maxFragmentCombinedOutputResources = 8,
684 .maxComputeSharedMemorySize = 32768,
685 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
686 .maxComputeWorkGroupInvocations = 2048,
687 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
688 .subPixelPrecisionBits = 4 /* FIXME */,
689 .subTexelPrecisionBits = 4 /* FIXME */,
690 .mipmapPrecisionBits = 4 /* FIXME */,
691 .maxDrawIndexedIndexValue = UINT32_MAX,
692 .maxDrawIndirectCount = UINT32_MAX,
693 .maxSamplerLodBias = 16,
694 .maxSamplerAnisotropy = 16,
695 .maxViewports = MAX_VIEWPORTS,
696 .maxViewportDimensions = { (1 << 14), (1 << 14) },
697 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
698 .viewportSubPixelBits = 8,
699 .minMemoryMapAlignment = 4096, /* A page */
700 .minTexelBufferOffsetAlignment = 1,
701 .minUniformBufferOffsetAlignment = 4,
702 .minStorageBufferOffsetAlignment = 4,
703 .minTexelOffset = -32,
704 .maxTexelOffset = 31,
705 .minTexelGatherOffset = -32,
706 .maxTexelGatherOffset = 31,
707 .minInterpolationOffset = -2,
708 .maxInterpolationOffset = 2,
709 .subPixelInterpolationOffsetBits = 8,
710 .maxFramebufferWidth = (1 << 14),
711 .maxFramebufferHeight = (1 << 14),
712 .maxFramebufferLayers = (1 << 10),
713 .framebufferColorSampleCounts = sample_counts,
714 .framebufferDepthSampleCounts = sample_counts,
715 .framebufferStencilSampleCounts = sample_counts,
716 .framebufferNoAttachmentsSampleCounts = sample_counts,
717 .maxColorAttachments = MAX_RTS,
718 .sampledImageColorSampleCounts = sample_counts,
719 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
720 .sampledImageDepthSampleCounts = sample_counts,
721 .sampledImageStencilSampleCounts = sample_counts,
722 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
723 .maxSampleMaskWords = 1,
724 .timestampComputeAndGraphics = true,
725 .timestampPeriod = 1,
726 .maxClipDistances = 8,
727 .maxCullDistances = 8,
728 .maxCombinedClipAndCullDistances = 8,
729 .discreteQueuePriorities = 1,
730 .pointSizeRange = { 0.125, 255.875 },
731 .lineWidthRange = { 0.0, 7.9921875 },
732 .pointSizeGranularity = (1.0 / 8.0),
733 .lineWidthGranularity = (1.0 / 128.0),
734 .strictLines = false, /* FINISHME */
735 .standardSampleLocations = true,
736 .optimalBufferCopyOffsetAlignment = 128,
737 .optimalBufferCopyRowPitchAlignment = 128,
738 .nonCoherentAtomSize = 64,
739 };
740
741 *pProperties = (VkPhysicalDeviceProperties){
742 .apiVersion = tu_physical_device_api_version(pdevice),
743 .driverVersion = vk_get_driver_version(),
744 .vendorID = 0, /* TODO */
745 .deviceID = 0,
746 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
747 .limits = limits,
748 .sparseProperties = { 0 },
749 };
750
751 strcpy(pProperties->deviceName, pdevice->name);
752 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
753 }
754
755 void
756 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
757 VkPhysicalDeviceProperties2KHR *pProperties)
758 {
759 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
760 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
761
762 vk_foreach_struct(ext, pProperties->pNext)
763 {
764 switch (ext->sType) {
765 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
766 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
767 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
768 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
769 break;
770 }
771 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
772 VkPhysicalDeviceIDPropertiesKHR *properties =
773 (VkPhysicalDeviceIDPropertiesKHR *)ext;
774 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
775 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
776 properties->deviceLUIDValid = false;
777 break;
778 }
779 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
780 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
781 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
782 properties->maxMultiviewViewCount = MAX_VIEWS;
783 properties->maxMultiviewInstanceIndex = INT_MAX;
784 break;
785 }
786 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
787 VkPhysicalDevicePointClippingPropertiesKHR *properties =
788 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
789 properties->pointClippingBehavior =
790 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
791 break;
792 }
793 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
794 VkPhysicalDeviceMaintenance3Properties *properties =
795 (VkPhysicalDeviceMaintenance3Properties *)ext;
796 /* Make sure everything is addressable by a signed 32-bit int, and
797 * our largest descriptors are 96 bytes. */
798 properties->maxPerSetDescriptors = (1ull << 31) / 96;
799 /* Our buffer size fields allow only this much */
800 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
801 break;
802 }
803 default:
804 break;
805 }
806 }
807 }
808
809 static void
810 tu_get_physical_device_queue_family_properties(
811 struct tu_physical_device *pdevice,
812 uint32_t *pCount,
813 VkQueueFamilyProperties **pQueueFamilyProperties)
814 {
815 int num_queue_families = 1;
816 int idx;
817 if (pQueueFamilyProperties == NULL) {
818 *pCount = num_queue_families;
819 return;
820 }
821
822 if (!*pCount)
823 return;
824
825 idx = 0;
826 if (*pCount >= 1) {
827 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
828 .queueFlags =
829 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
830 .queueCount = 1,
831 .timestampValidBits = 64,
832 .minImageTransferGranularity = (VkExtent3D){ 1, 1, 1 },
833 };
834 idx++;
835 }
836
837 *pCount = idx;
838 }
839
840 void
841 tu_GetPhysicalDeviceQueueFamilyProperties(
842 VkPhysicalDevice physicalDevice,
843 uint32_t *pCount,
844 VkQueueFamilyProperties *pQueueFamilyProperties)
845 {
846 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
847 if (!pQueueFamilyProperties) {
848 return tu_get_physical_device_queue_family_properties(
849 pdevice, pCount, NULL);
850 return;
851 }
852 VkQueueFamilyProperties *properties[] = {
853 pQueueFamilyProperties + 0,
854 };
855 tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
856 assert(*pCount <= 1);
857 }
858
859 void
860 tu_GetPhysicalDeviceQueueFamilyProperties2(
861 VkPhysicalDevice physicalDevice,
862 uint32_t *pCount,
863 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
864 {
865 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
866 if (!pQueueFamilyProperties) {
867 return tu_get_physical_device_queue_family_properties(
868 pdevice, pCount, NULL);
869 return;
870 }
871 VkQueueFamilyProperties *properties[] = {
872 &pQueueFamilyProperties[0].queueFamilyProperties,
873 };
874 tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
875 assert(*pCount <= 1);
876 }
877
878 static uint64_t
879 tu_get_system_heap_size()
880 {
881 struct sysinfo info;
882 sysinfo(&info);
883
884 uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
885
886 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
887 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
888 */
889 uint64_t available_ram;
890 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
891 available_ram = total_ram / 2;
892 else
893 available_ram = total_ram * 3 / 4;
894
895 return available_ram;
896 }
897
898 void
899 tu_GetPhysicalDeviceMemoryProperties(
900 VkPhysicalDevice physicalDevice,
901 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
902 {
903 pMemoryProperties->memoryHeapCount = 1;
904 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
905 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
906
907 pMemoryProperties->memoryTypeCount = 1;
908 pMemoryProperties->memoryTypes[0].propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
909 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
910 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
911 pMemoryProperties->memoryTypes[0].heapIndex = 0;
912 }
913
914 void
915 tu_GetPhysicalDeviceMemoryProperties2(
916 VkPhysicalDevice physicalDevice,
917 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
918 {
919 return tu_GetPhysicalDeviceMemoryProperties(
920 physicalDevice, &pMemoryProperties->memoryProperties);
921 }
922
923 static int
924 tu_queue_init(struct tu_device *device,
925 struct tu_queue *queue,
926 uint32_t queue_family_index,
927 int idx,
928 VkDeviceQueueCreateFlags flags)
929 {
930 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
931 queue->device = device;
932 queue->queue_family_index = queue_family_index;
933 queue->queue_idx = idx;
934 queue->flags = flags;
935
936 return VK_SUCCESS;
937 }
938
939 static void
940 tu_queue_finish(struct tu_queue *queue)
941 {
942 }
943
944 static int
945 tu_get_device_extension_index(const char *name)
946 {
947 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
948 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
949 return i;
950 }
951 return -1;
952 }
953
954 VkResult
955 tu_CreateDevice(VkPhysicalDevice physicalDevice,
956 const VkDeviceCreateInfo *pCreateInfo,
957 const VkAllocationCallbacks *pAllocator,
958 VkDevice *pDevice)
959 {
960 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
961 VkResult result;
962 struct tu_device *device;
963
964 /* Check enabled features */
965 if (pCreateInfo->pEnabledFeatures) {
966 VkPhysicalDeviceFeatures supported_features;
967 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
968 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
969 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
970 unsigned num_features =
971 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
972 for (uint32_t i = 0; i < num_features; i++) {
973 if (enabled_feature[i] && !supported_feature[i])
974 return vk_error(physical_device->instance,
975 VK_ERROR_FEATURE_NOT_PRESENT);
976 }
977 }
978
979 device = vk_zalloc2(&physical_device->instance->alloc,
980 pAllocator,
981 sizeof(*device),
982 8,
983 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
984 if (!device)
985 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
986
987 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
988 device->instance = physical_device->instance;
989 device->physical_device = physical_device;
990
991 if (pAllocator)
992 device->alloc = *pAllocator;
993 else
994 device->alloc = physical_device->instance->alloc;
995
996 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
997 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
998 int index = tu_get_device_extension_index(ext_name);
999 if (index < 0 ||
1000 !physical_device->supported_extensions.extensions[index]) {
1001 vk_free(&device->alloc, device);
1002 return vk_error(physical_device->instance,
1003 VK_ERROR_EXTENSION_NOT_PRESENT);
1004 }
1005
1006 device->enabled_extensions.extensions[index] = true;
1007 }
1008
1009 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1010 const VkDeviceQueueCreateInfo *queue_create =
1011 &pCreateInfo->pQueueCreateInfos[i];
1012 uint32_t qfi = queue_create->queueFamilyIndex;
1013 device->queues[qfi] =
1014 vk_alloc(&device->alloc,
1015 queue_create->queueCount * sizeof(struct tu_queue),
1016 8,
1017 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1018 if (!device->queues[qfi]) {
1019 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1020 goto fail;
1021 }
1022
1023 memset(device->queues[qfi],
1024 0,
1025 queue_create->queueCount * sizeof(struct tu_queue));
1026
1027 device->queue_count[qfi] = queue_create->queueCount;
1028
1029 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1030 result = tu_queue_init(
1031 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
1032 if (result != VK_SUCCESS)
1033 goto fail;
1034 }
1035 }
1036
1037 VkPipelineCacheCreateInfo ci;
1038 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1039 ci.pNext = NULL;
1040 ci.flags = 0;
1041 ci.pInitialData = NULL;
1042 ci.initialDataSize = 0;
1043 VkPipelineCache pc;
1044 result =
1045 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1046 if (result != VK_SUCCESS)
1047 goto fail;
1048
1049 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1050
1051 *pDevice = tu_device_to_handle(device);
1052 return VK_SUCCESS;
1053
1054 fail:
1055 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1056 for (unsigned q = 0; q < device->queue_count[i]; q++)
1057 tu_queue_finish(&device->queues[i][q]);
1058 if (device->queue_count[i])
1059 vk_free(&device->alloc, device->queues[i]);
1060 }
1061
1062 vk_free(&device->alloc, device);
1063 return result;
1064 }
1065
1066 void
1067 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1068 {
1069 TU_FROM_HANDLE(tu_device, device, _device);
1070
1071 if (!device)
1072 return;
1073
1074 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1075 for (unsigned q = 0; q < device->queue_count[i]; q++)
1076 tu_queue_finish(&device->queues[i][q]);
1077 if (device->queue_count[i])
1078 vk_free(&device->alloc, device->queues[i]);
1079 }
1080
1081 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1082 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1083
1084 vk_free(&device->alloc, device);
1085 }
1086
1087 VkResult
1088 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1089 VkLayerProperties *pProperties)
1090 {
1091 *pPropertyCount = 0;
1092 return VK_SUCCESS;
1093 }
1094
1095 VkResult
1096 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1097 uint32_t *pPropertyCount,
1098 VkLayerProperties *pProperties)
1099 {
1100 *pPropertyCount = 0;
1101 return VK_SUCCESS;
1102 }
1103
1104 void
1105 tu_GetDeviceQueue2(VkDevice _device,
1106 const VkDeviceQueueInfo2 *pQueueInfo,
1107 VkQueue *pQueue)
1108 {
1109 TU_FROM_HANDLE(tu_device, device, _device);
1110 struct tu_queue *queue;
1111
1112 queue =
1113 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1114 if (pQueueInfo->flags != queue->flags) {
1115 /* From the Vulkan 1.1.70 spec:
1116 *
1117 * "The queue returned by vkGetDeviceQueue2 must have the same
1118 * flags value from this structure as that used at device
1119 * creation time in a VkDeviceQueueCreateInfo instance. If no
1120 * matching flags were specified at device creation time then
1121 * pQueue will return VK_NULL_HANDLE."
1122 */
1123 *pQueue = VK_NULL_HANDLE;
1124 return;
1125 }
1126
1127 *pQueue = tu_queue_to_handle(queue);
1128 }
1129
1130 void
1131 tu_GetDeviceQueue(VkDevice _device,
1132 uint32_t queueFamilyIndex,
1133 uint32_t queueIndex,
1134 VkQueue *pQueue)
1135 {
1136 const VkDeviceQueueInfo2 info =
1137 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1138 .queueFamilyIndex = queueFamilyIndex,
1139 .queueIndex = queueIndex };
1140
1141 tu_GetDeviceQueue2(_device, &info, pQueue);
1142 }
1143
1144 VkResult
1145 tu_QueueSubmit(VkQueue _queue,
1146 uint32_t submitCount,
1147 const VkSubmitInfo *pSubmits,
1148 VkFence _fence)
1149 {
1150 return VK_SUCCESS;
1151 }
1152
1153 VkResult
1154 tu_QueueWaitIdle(VkQueue _queue)
1155 {
1156 return VK_SUCCESS;
1157 }
1158
1159 VkResult
1160 tu_DeviceWaitIdle(VkDevice _device)
1161 {
1162 TU_FROM_HANDLE(tu_device, device, _device);
1163
1164 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1165 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1166 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1167 }
1168 }
1169 return VK_SUCCESS;
1170 }
1171
1172 VkResult
1173 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1174 uint32_t *pPropertyCount,
1175 VkExtensionProperties *pProperties)
1176 {
1177 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1178
1179 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1180 if (tu_supported_instance_extensions.extensions[i]) {
1181 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1182 }
1183 }
1184
1185 return vk_outarray_status(&out);
1186 }
1187
1188 VkResult
1189 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1190 const char *pLayerName,
1191 uint32_t *pPropertyCount,
1192 VkExtensionProperties *pProperties)
1193 {
1194 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1195 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1196
1197 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1198 if (device->supported_extensions.extensions[i]) {
1199 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1200 }
1201 }
1202
1203 return vk_outarray_status(&out);
1204 }
1205
1206 PFN_vkVoidFunction
1207 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1208 {
1209 TU_FROM_HANDLE(tu_instance, instance, _instance);
1210
1211 return tu_lookup_entrypoint_checked(pName,
1212 instance ? instance->api_version : 0,
1213 instance ? &instance->enabled_extensions
1214 : NULL,
1215 NULL);
1216 }
1217
1218 /* The loader wants us to expose a second GetInstanceProcAddr function
1219 * to work around certain LD_PRELOAD issues seen in apps.
1220 */
1221 PUBLIC
1222 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1223 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1224
1225 PUBLIC
1226 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1227 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1228 {
1229 return tu_GetInstanceProcAddr(instance, pName);
1230 }
1231
1232 PFN_vkVoidFunction
1233 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1234 {
1235 TU_FROM_HANDLE(tu_device, device, _device);
1236
1237 return tu_lookup_entrypoint_checked(pName,
1238 device->instance->api_version,
1239 &device->instance->enabled_extensions,
1240 &device->enabled_extensions);
1241 }
1242
1243 static VkResult
1244 tu_alloc_memory(struct tu_device *device,
1245 const VkMemoryAllocateInfo *pAllocateInfo,
1246 const VkAllocationCallbacks *pAllocator,
1247 VkDeviceMemory *pMem)
1248 {
1249 struct tu_device_memory *mem;
1250
1251 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1252
1253 if (pAllocateInfo->allocationSize == 0) {
1254 /* Apparently, this is allowed */
1255 *pMem = VK_NULL_HANDLE;
1256 return VK_SUCCESS;
1257 }
1258
1259 mem = vk_alloc2(&device->alloc,
1260 pAllocator,
1261 sizeof(*mem),
1262 8,
1263 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1264 if (mem == NULL)
1265 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1266
1267 mem->bo = fd_bo_new(device->physical_device->drm_device, pAllocateInfo->allocationSize,
1268 DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
1269 DRM_FREEDRENO_GEM_TYPE_KMEM);
1270 if (!mem->bo) {
1271 vk_free2(&device->alloc, pAllocator, mem);
1272 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1273 }
1274 mem->size = pAllocateInfo->allocationSize;
1275 mem->type_index = pAllocateInfo->memoryTypeIndex;
1276
1277 mem->map = NULL;
1278 mem->user_ptr = NULL;
1279
1280 *pMem = tu_device_memory_to_handle(mem);
1281
1282 return VK_SUCCESS;
1283 }
1284
1285 VkResult
1286 tu_AllocateMemory(VkDevice _device,
1287 const VkMemoryAllocateInfo *pAllocateInfo,
1288 const VkAllocationCallbacks *pAllocator,
1289 VkDeviceMemory *pMem)
1290 {
1291 TU_FROM_HANDLE(tu_device, device, _device);
1292 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1293 }
1294
1295 void
1296 tu_FreeMemory(VkDevice _device,
1297 VkDeviceMemory _mem,
1298 const VkAllocationCallbacks *pAllocator)
1299 {
1300 TU_FROM_HANDLE(tu_device, device, _device);
1301 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1302
1303 if (mem == NULL)
1304 return;
1305
1306 if (mem->bo)
1307 fd_bo_del(mem->bo);
1308
1309 vk_free2(&device->alloc, pAllocator, mem);
1310 }
1311
1312 VkResult
1313 tu_MapMemory(VkDevice _device,
1314 VkDeviceMemory _memory,
1315 VkDeviceSize offset,
1316 VkDeviceSize size,
1317 VkMemoryMapFlags flags,
1318 void **ppData)
1319 {
1320 TU_FROM_HANDLE(tu_device, device, _device);
1321 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1322
1323 if (mem == NULL) {
1324 *ppData = NULL;
1325 return VK_SUCCESS;
1326 }
1327
1328 if (mem->user_ptr) {
1329 *ppData = mem->user_ptr;
1330 } else if (!mem->map){
1331 *ppData = mem->map = fd_bo_map(mem->bo);
1332 } else
1333 *ppData = mem->map;
1334
1335 if (*ppData) {
1336 *ppData += offset;
1337 return VK_SUCCESS;
1338 }
1339
1340 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1341 }
1342
1343 void
1344 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1345 {
1346 /* I do not see any unmapping done by the freedreno Gallium driver. */
1347 }
1348
1349 VkResult
1350 tu_FlushMappedMemoryRanges(VkDevice _device,
1351 uint32_t memoryRangeCount,
1352 const VkMappedMemoryRange *pMemoryRanges)
1353 {
1354 return VK_SUCCESS;
1355 }
1356
1357 VkResult
1358 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1359 uint32_t memoryRangeCount,
1360 const VkMappedMemoryRange *pMemoryRanges)
1361 {
1362 return VK_SUCCESS;
1363 }
1364
1365 void
1366 tu_GetBufferMemoryRequirements(VkDevice _device,
1367 VkBuffer _buffer,
1368 VkMemoryRequirements *pMemoryRequirements)
1369 {
1370 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1371
1372 pMemoryRequirements->memoryTypeBits = 1;
1373 pMemoryRequirements->alignment = 16;
1374 pMemoryRequirements->size =
1375 align64(buffer->size, pMemoryRequirements->alignment);
1376 }
1377
1378 void
1379 tu_GetBufferMemoryRequirements2(
1380 VkDevice device,
1381 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1382 VkMemoryRequirements2KHR *pMemoryRequirements)
1383 {
1384 tu_GetBufferMemoryRequirements(
1385 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1386 }
1387
1388 void
1389 tu_GetImageMemoryRequirements(VkDevice _device,
1390 VkImage _image,
1391 VkMemoryRequirements *pMemoryRequirements)
1392 {
1393 TU_FROM_HANDLE(tu_image, image, _image);
1394
1395 pMemoryRequirements->memoryTypeBits = 1;
1396 pMemoryRequirements->size = image->size;
1397 pMemoryRequirements->alignment = image->alignment;
1398 }
1399
1400 void
1401 tu_GetImageMemoryRequirements2(VkDevice device,
1402 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1403 VkMemoryRequirements2KHR *pMemoryRequirements)
1404 {
1405 tu_GetImageMemoryRequirements(
1406 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1407 }
1408
1409 void
1410 tu_GetImageSparseMemoryRequirements(
1411 VkDevice device,
1412 VkImage image,
1413 uint32_t *pSparseMemoryRequirementCount,
1414 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1415 {
1416 stub();
1417 }
1418
1419 void
1420 tu_GetImageSparseMemoryRequirements2(
1421 VkDevice device,
1422 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1423 uint32_t *pSparseMemoryRequirementCount,
1424 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1425 {
1426 stub();
1427 }
1428
1429 void
1430 tu_GetDeviceMemoryCommitment(VkDevice device,
1431 VkDeviceMemory memory,
1432 VkDeviceSize *pCommittedMemoryInBytes)
1433 {
1434 *pCommittedMemoryInBytes = 0;
1435 }
1436
1437 VkResult
1438 tu_BindBufferMemory2(VkDevice device,
1439 uint32_t bindInfoCount,
1440 const VkBindBufferMemoryInfoKHR *pBindInfos)
1441 {
1442 return VK_SUCCESS;
1443 }
1444
1445 VkResult
1446 tu_BindBufferMemory(VkDevice device,
1447 VkBuffer buffer,
1448 VkDeviceMemory memory,
1449 VkDeviceSize memoryOffset)
1450 {
1451 const VkBindBufferMemoryInfoKHR info = {
1452 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1453 .buffer = buffer,
1454 .memory = memory,
1455 .memoryOffset = memoryOffset
1456 };
1457
1458 return tu_BindBufferMemory2(device, 1, &info);
1459 }
1460
1461 VkResult
1462 tu_BindImageMemory2(VkDevice device,
1463 uint32_t bindInfoCount,
1464 const VkBindImageMemoryInfoKHR *pBindInfos)
1465 {
1466 return VK_SUCCESS;
1467 }
1468
1469 VkResult
1470 tu_BindImageMemory(VkDevice device,
1471 VkImage image,
1472 VkDeviceMemory memory,
1473 VkDeviceSize memoryOffset)
1474 {
1475 const VkBindImageMemoryInfoKHR info = {
1476 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1477 .image = image,
1478 .memory = memory,
1479 .memoryOffset = memoryOffset
1480 };
1481
1482 return tu_BindImageMemory2(device, 1, &info);
1483 }
1484
1485 VkResult
1486 tu_QueueBindSparse(VkQueue _queue,
1487 uint32_t bindInfoCount,
1488 const VkBindSparseInfo *pBindInfo,
1489 VkFence _fence)
1490 {
1491 return VK_SUCCESS;
1492 }
1493
1494 VkResult
1495 tu_CreateFence(VkDevice _device,
1496 const VkFenceCreateInfo *pCreateInfo,
1497 const VkAllocationCallbacks *pAllocator,
1498 VkFence *pFence)
1499 {
1500 TU_FROM_HANDLE(tu_device, device, _device);
1501
1502 struct tu_fence *fence = vk_alloc2(&device->alloc,
1503 pAllocator,
1504 sizeof(*fence),
1505 8,
1506 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1507
1508 if (!fence)
1509 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1510
1511 *pFence = tu_fence_to_handle(fence);
1512
1513 return VK_SUCCESS;
1514 }
1515
1516 void
1517 tu_DestroyFence(VkDevice _device,
1518 VkFence _fence,
1519 const VkAllocationCallbacks *pAllocator)
1520 {
1521 TU_FROM_HANDLE(tu_device, device, _device);
1522 TU_FROM_HANDLE(tu_fence, fence, _fence);
1523
1524 if (!fence)
1525 return;
1526
1527 vk_free2(&device->alloc, pAllocator, fence);
1528 }
1529
1530 VkResult
1531 tu_WaitForFences(VkDevice _device,
1532 uint32_t fenceCount,
1533 const VkFence *pFences,
1534 VkBool32 waitAll,
1535 uint64_t timeout)
1536 {
1537 return VK_SUCCESS;
1538 }
1539
1540 VkResult
1541 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1542 {
1543 return VK_SUCCESS;
1544 }
1545
1546 VkResult
1547 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1548 {
1549 return VK_SUCCESS;
1550 }
1551
1552 // Queue semaphore functions
1553
1554 VkResult
1555 tu_CreateSemaphore(VkDevice _device,
1556 const VkSemaphoreCreateInfo *pCreateInfo,
1557 const VkAllocationCallbacks *pAllocator,
1558 VkSemaphore *pSemaphore)
1559 {
1560 TU_FROM_HANDLE(tu_device, device, _device);
1561
1562 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1563 pAllocator,
1564 sizeof(*sem),
1565 8,
1566 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1567 if (!sem)
1568 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1569
1570 *pSemaphore = tu_semaphore_to_handle(sem);
1571 return VK_SUCCESS;
1572 }
1573
1574 void
1575 tu_DestroySemaphore(VkDevice _device,
1576 VkSemaphore _semaphore,
1577 const VkAllocationCallbacks *pAllocator)
1578 {
1579 TU_FROM_HANDLE(tu_device, device, _device);
1580 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1581 if (!_semaphore)
1582 return;
1583
1584 vk_free2(&device->alloc, pAllocator, sem);
1585 }
1586
1587 VkResult
1588 tu_CreateEvent(VkDevice _device,
1589 const VkEventCreateInfo *pCreateInfo,
1590 const VkAllocationCallbacks *pAllocator,
1591 VkEvent *pEvent)
1592 {
1593 TU_FROM_HANDLE(tu_device, device, _device);
1594 struct tu_event *event = vk_alloc2(&device->alloc,
1595 pAllocator,
1596 sizeof(*event),
1597 8,
1598 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1599
1600 if (!event)
1601 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1602
1603 *pEvent = tu_event_to_handle(event);
1604
1605 return VK_SUCCESS;
1606 }
1607
1608 void
1609 tu_DestroyEvent(VkDevice _device,
1610 VkEvent _event,
1611 const VkAllocationCallbacks *pAllocator)
1612 {
1613 TU_FROM_HANDLE(tu_device, device, _device);
1614 TU_FROM_HANDLE(tu_event, event, _event);
1615
1616 if (!event)
1617 return;
1618 vk_free2(&device->alloc, pAllocator, event);
1619 }
1620
1621 VkResult
1622 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1623 {
1624 TU_FROM_HANDLE(tu_event, event, _event);
1625
1626 if (*event->map == 1)
1627 return VK_EVENT_SET;
1628 return VK_EVENT_RESET;
1629 }
1630
1631 VkResult
1632 tu_SetEvent(VkDevice _device, VkEvent _event)
1633 {
1634 TU_FROM_HANDLE(tu_event, event, _event);
1635 *event->map = 1;
1636
1637 return VK_SUCCESS;
1638 }
1639
1640 VkResult
1641 tu_ResetEvent(VkDevice _device, VkEvent _event)
1642 {
1643 TU_FROM_HANDLE(tu_event, event, _event);
1644 *event->map = 0;
1645
1646 return VK_SUCCESS;
1647 }
1648
1649 VkResult
1650 tu_CreateBuffer(VkDevice _device,
1651 const VkBufferCreateInfo *pCreateInfo,
1652 const VkAllocationCallbacks *pAllocator,
1653 VkBuffer *pBuffer)
1654 {
1655 TU_FROM_HANDLE(tu_device, device, _device);
1656 struct tu_buffer *buffer;
1657
1658 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1659
1660 buffer = vk_alloc2(&device->alloc,
1661 pAllocator,
1662 sizeof(*buffer),
1663 8,
1664 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1665 if (buffer == NULL)
1666 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1667
1668 buffer->size = pCreateInfo->size;
1669 buffer->usage = pCreateInfo->usage;
1670 buffer->flags = pCreateInfo->flags;
1671
1672 *pBuffer = tu_buffer_to_handle(buffer);
1673
1674 return VK_SUCCESS;
1675 }
1676
1677 void
1678 tu_DestroyBuffer(VkDevice _device,
1679 VkBuffer _buffer,
1680 const VkAllocationCallbacks *pAllocator)
1681 {
1682 TU_FROM_HANDLE(tu_device, device, _device);
1683 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1684
1685 if (!buffer)
1686 return;
1687
1688 vk_free2(&device->alloc, pAllocator, buffer);
1689 }
1690
1691 static uint32_t
1692 tu_surface_max_layer_count(struct tu_image_view *iview)
1693 {
1694 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1695 ? iview->extent.depth
1696 : (iview->base_layer + iview->layer_count);
1697 }
1698
1699 VkResult
1700 tu_CreateFramebuffer(VkDevice _device,
1701 const VkFramebufferCreateInfo *pCreateInfo,
1702 const VkAllocationCallbacks *pAllocator,
1703 VkFramebuffer *pFramebuffer)
1704 {
1705 TU_FROM_HANDLE(tu_device, device, _device);
1706 struct tu_framebuffer *framebuffer;
1707
1708 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1709
1710 size_t size =
1711 sizeof(*framebuffer) +
1712 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1713 framebuffer = vk_alloc2(
1714 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1715 if (framebuffer == NULL)
1716 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1717
1718 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1719 framebuffer->width = pCreateInfo->width;
1720 framebuffer->height = pCreateInfo->height;
1721 framebuffer->layers = pCreateInfo->layers;
1722 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1723 VkImageView _iview = pCreateInfo->pAttachments[i];
1724 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1725 framebuffer->attachments[i].attachment = iview;
1726
1727 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1728 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1729 framebuffer->layers =
1730 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1731 }
1732
1733 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1734 return VK_SUCCESS;
1735 }
1736
1737 void
1738 tu_DestroyFramebuffer(VkDevice _device,
1739 VkFramebuffer _fb,
1740 const VkAllocationCallbacks *pAllocator)
1741 {
1742 TU_FROM_HANDLE(tu_device, device, _device);
1743 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1744
1745 if (!fb)
1746 return;
1747 vk_free2(&device->alloc, pAllocator, fb);
1748 }
1749
1750 static void
1751 tu_init_sampler(struct tu_device *device,
1752 struct tu_sampler *sampler,
1753 const VkSamplerCreateInfo *pCreateInfo)
1754 {
1755 }
1756
1757 VkResult
1758 tu_CreateSampler(VkDevice _device,
1759 const VkSamplerCreateInfo *pCreateInfo,
1760 const VkAllocationCallbacks *pAllocator,
1761 VkSampler *pSampler)
1762 {
1763 TU_FROM_HANDLE(tu_device, device, _device);
1764 struct tu_sampler *sampler;
1765
1766 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1767
1768 sampler = vk_alloc2(&device->alloc,
1769 pAllocator,
1770 sizeof(*sampler),
1771 8,
1772 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1773 if (!sampler)
1774 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1775
1776 tu_init_sampler(device, sampler, pCreateInfo);
1777 *pSampler = tu_sampler_to_handle(sampler);
1778
1779 return VK_SUCCESS;
1780 }
1781
1782 void
1783 tu_DestroySampler(VkDevice _device,
1784 VkSampler _sampler,
1785 const VkAllocationCallbacks *pAllocator)
1786 {
1787 TU_FROM_HANDLE(tu_device, device, _device);
1788 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1789
1790 if (!sampler)
1791 return;
1792 vk_free2(&device->alloc, pAllocator, sampler);
1793 }
1794
1795 /* vk_icd.h does not declare this function, so we declare it here to
1796 * suppress Wmissing-prototypes.
1797 */
1798 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1799 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1800
1801 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1802 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1803 {
1804 /* For the full details on loader interface versioning, see
1805 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1806 * What follows is a condensed summary, to help you navigate the large and
1807 * confusing official doc.
1808 *
1809 * - Loader interface v0 is incompatible with later versions. We don't
1810 * support it.
1811 *
1812 * - In loader interface v1:
1813 * - The first ICD entrypoint called by the loader is
1814 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1815 * entrypoint.
1816 * - The ICD must statically expose no other Vulkan symbol unless it is
1817 * linked with -Bsymbolic.
1818 * - Each dispatchable Vulkan handle created by the ICD must be
1819 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1820 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1821 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1822 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1823 * such loader-managed surfaces.
1824 *
1825 * - Loader interface v2 differs from v1 in:
1826 * - The first ICD entrypoint called by the loader is
1827 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1828 * statically expose this entrypoint.
1829 *
1830 * - Loader interface v3 differs from v2 in:
1831 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1832 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1833 * because the loader no longer does so.
1834 */
1835 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1836 return VK_SUCCESS;
1837 }
1838
1839 void
1840 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1841 VkPhysicalDevice physicalDevice,
1842 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1843 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1844 {
1845 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1846 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1847 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1848 }
1849
1850 void
1851 tu_GetPhysicalDeviceExternalFenceProperties(
1852 VkPhysicalDevice physicalDevice,
1853 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1854 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1855 {
1856 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1857 pExternalFenceProperties->compatibleHandleTypes = 0;
1858 pExternalFenceProperties->externalFenceFeatures = 0;
1859 }
1860
1861 VkResult
1862 tu_CreateDebugReportCallbackEXT(
1863 VkInstance _instance,
1864 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1865 const VkAllocationCallbacks *pAllocator,
1866 VkDebugReportCallbackEXT *pCallback)
1867 {
1868 TU_FROM_HANDLE(tu_instance, instance, _instance);
1869 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1870 pCreateInfo,
1871 pAllocator,
1872 &instance->alloc,
1873 pCallback);
1874 }
1875
1876 void
1877 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1878 VkDebugReportCallbackEXT _callback,
1879 const VkAllocationCallbacks *pAllocator)
1880 {
1881 TU_FROM_HANDLE(tu_instance, instance, _instance);
1882 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1883 _callback,
1884 pAllocator,
1885 &instance->alloc);
1886 }
1887
1888 void
1889 tu_DebugReportMessageEXT(VkInstance _instance,
1890 VkDebugReportFlagsEXT flags,
1891 VkDebugReportObjectTypeEXT objectType,
1892 uint64_t object,
1893 size_t location,
1894 int32_t messageCode,
1895 const char *pLayerPrefix,
1896 const char *pMessage)
1897 {
1898 TU_FROM_HANDLE(tu_instance, instance, _instance);
1899 vk_debug_report(&instance->debug_report_callbacks,
1900 flags,
1901 objectType,
1902 object,
1903 location,
1904 messageCode,
1905 pLayerPrefix,
1906 pMessage);
1907 }
1908
1909 void
1910 tu_GetDeviceGroupPeerMemoryFeatures(
1911 VkDevice device,
1912 uint32_t heapIndex,
1913 uint32_t localDeviceIndex,
1914 uint32_t remoteDeviceIndex,
1915 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1916 {
1917 assert(localDeviceIndex == remoteDeviceIndex);
1918
1919 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1920 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1921 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1922 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1923 }