turnip: Implement and enable VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
256 if (instance->debug_flags & TU_DEBUG_STARTUP)
257 tu_logi("Could not query the GMEM size");
258 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
259 "could not get GMEM size");
260 goto fail;
261 }
262
263 memset(device->name, 0, sizeof(device->name));
264 sprintf(device->name, "FD%d", device->gpu_id);
265
266 switch (device->gpu_id) {
267 case 618:
268 device->magic.RB_UNKNOWN_8E04_blit = 0x00100000;
269 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
270 device->ccu_offset_bypass = 0x10000;
271 device->magic.PC_UNKNOWN_9805 = 0x0;
272 device->magic.SP_UNKNOWN_A0F8 = 0x0;
273 break;
274 case 630:
275 case 640:
276 device->magic.RB_UNKNOWN_8E04_blit = 0x01000000;
277 device->ccu_offset_gmem = 0xf8000;
278 device->ccu_offset_bypass = 0x20000;
279 device->magic.PC_UNKNOWN_9805 = 0x1;
280 device->magic.SP_UNKNOWN_A0F8 = 0x1;
281 break;
282 default:
283 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
284 "device %s is unsupported", device->name);
285 goto fail;
286 }
287 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
288 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
289 "cannot generate UUID");
290 goto fail;
291 }
292
293 /* The gpu id is already embedded in the uuid so we just pass "tu"
294 * when creating the cache.
295 */
296 char buf[VK_UUID_SIZE * 2 + 1];
297 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
298 device->disk_cache = disk_cache_create(device->name, buf, 0);
299
300 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
301 "testing use only.\n");
302
303 tu_get_driver_uuid(&device->device_uuid);
304 tu_get_device_uuid(&device->device_uuid);
305
306 tu_fill_device_extension_table(device, &device->supported_extensions);
307
308 if (result != VK_SUCCESS) {
309 vk_error(instance, result);
310 goto fail;
311 }
312
313 result = tu_wsi_init(device);
314 if (result != VK_SUCCESS) {
315 vk_error(instance, result);
316 goto fail;
317 }
318
319 return VK_SUCCESS;
320
321 fail:
322 close(fd);
323 if (master_fd != -1)
324 close(master_fd);
325 return result;
326 }
327
328 static void
329 tu_physical_device_finish(struct tu_physical_device *device)
330 {
331 tu_wsi_finish(device);
332
333 disk_cache_destroy(device->disk_cache);
334 close(device->local_fd);
335 if (device->master_fd != -1)
336 close(device->master_fd);
337 }
338
339 static VKAPI_ATTR void *
340 default_alloc_func(void *pUserData,
341 size_t size,
342 size_t align,
343 VkSystemAllocationScope allocationScope)
344 {
345 return malloc(size);
346 }
347
348 static VKAPI_ATTR void *
349 default_realloc_func(void *pUserData,
350 void *pOriginal,
351 size_t size,
352 size_t align,
353 VkSystemAllocationScope allocationScope)
354 {
355 return realloc(pOriginal, size);
356 }
357
358 static VKAPI_ATTR void
359 default_free_func(void *pUserData, void *pMemory)
360 {
361 free(pMemory);
362 }
363
364 static const VkAllocationCallbacks default_alloc = {
365 .pUserData = NULL,
366 .pfnAllocation = default_alloc_func,
367 .pfnReallocation = default_realloc_func,
368 .pfnFree = default_free_func,
369 };
370
371 static const struct debug_control tu_debug_options[] = {
372 { "startup", TU_DEBUG_STARTUP },
373 { "nir", TU_DEBUG_NIR },
374 { "ir3", TU_DEBUG_IR3 },
375 { "nobin", TU_DEBUG_NOBIN },
376 { "sysmem", TU_DEBUG_SYSMEM },
377 { "forcebin", TU_DEBUG_FORCEBIN },
378 { NULL, 0 }
379 };
380
381 const char *
382 tu_get_debug_option_name(int id)
383 {
384 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
385 return tu_debug_options[id].string;
386 }
387
388 static int
389 tu_get_instance_extension_index(const char *name)
390 {
391 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
392 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
393 return i;
394 }
395 return -1;
396 }
397
398 VkResult
399 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
400 const VkAllocationCallbacks *pAllocator,
401 VkInstance *pInstance)
402 {
403 struct tu_instance *instance;
404 VkResult result;
405
406 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
407
408 uint32_t client_version;
409 if (pCreateInfo->pApplicationInfo &&
410 pCreateInfo->pApplicationInfo->apiVersion != 0) {
411 client_version = pCreateInfo->pApplicationInfo->apiVersion;
412 } else {
413 tu_EnumerateInstanceVersion(&client_version);
414 }
415
416 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
417 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
418 if (!instance)
419 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
420
421 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
422
423 if (pAllocator)
424 instance->alloc = *pAllocator;
425 else
426 instance->alloc = default_alloc;
427
428 instance->api_version = client_version;
429 instance->physical_device_count = -1;
430
431 instance->debug_flags =
432 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
433
434 if (instance->debug_flags & TU_DEBUG_STARTUP)
435 tu_logi("Created an instance");
436
437 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
438 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
439 int index = tu_get_instance_extension_index(ext_name);
440
441 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
442 vk_free2(&default_alloc, pAllocator, instance);
443 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
444 }
445
446 instance->enabled_extensions.extensions[index] = true;
447 }
448
449 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
450 if (result != VK_SUCCESS) {
451 vk_free2(&default_alloc, pAllocator, instance);
452 return vk_error(instance, result);
453 }
454
455 glsl_type_singleton_init_or_ref();
456
457 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
458
459 *pInstance = tu_instance_to_handle(instance);
460
461 return VK_SUCCESS;
462 }
463
464 void
465 tu_DestroyInstance(VkInstance _instance,
466 const VkAllocationCallbacks *pAllocator)
467 {
468 TU_FROM_HANDLE(tu_instance, instance, _instance);
469
470 if (!instance)
471 return;
472
473 for (int i = 0; i < instance->physical_device_count; ++i) {
474 tu_physical_device_finish(instance->physical_devices + i);
475 }
476
477 VG(VALGRIND_DESTROY_MEMPOOL(instance));
478
479 glsl_type_singleton_decref();
480
481 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
482
483 vk_free(&instance->alloc, instance);
484 }
485
486 static VkResult
487 tu_enumerate_devices(struct tu_instance *instance)
488 {
489 /* TODO: Check for more devices ? */
490 drmDevicePtr devices[8];
491 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
492 int max_devices;
493
494 instance->physical_device_count = 0;
495
496 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
497
498 if (instance->debug_flags & TU_DEBUG_STARTUP)
499 tu_logi("Found %d drm nodes", max_devices);
500
501 if (max_devices < 1)
502 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
503
504 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
505 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
506 devices[i]->bustype == DRM_BUS_PLATFORM) {
507
508 result = tu_physical_device_init(
509 instance->physical_devices + instance->physical_device_count,
510 instance, devices[i]);
511 if (result == VK_SUCCESS)
512 ++instance->physical_device_count;
513 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
514 break;
515 }
516 }
517 drmFreeDevices(devices, max_devices);
518
519 return result;
520 }
521
522 VkResult
523 tu_EnumeratePhysicalDevices(VkInstance _instance,
524 uint32_t *pPhysicalDeviceCount,
525 VkPhysicalDevice *pPhysicalDevices)
526 {
527 TU_FROM_HANDLE(tu_instance, instance, _instance);
528 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
529
530 VkResult result;
531
532 if (instance->physical_device_count < 0) {
533 result = tu_enumerate_devices(instance);
534 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
535 return result;
536 }
537
538 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
539 vk_outarray_append(&out, p)
540 {
541 *p = tu_physical_device_to_handle(instance->physical_devices + i);
542 }
543 }
544
545 return vk_outarray_status(&out);
546 }
547
548 VkResult
549 tu_EnumeratePhysicalDeviceGroups(
550 VkInstance _instance,
551 uint32_t *pPhysicalDeviceGroupCount,
552 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
553 {
554 TU_FROM_HANDLE(tu_instance, instance, _instance);
555 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
556 pPhysicalDeviceGroupCount);
557 VkResult result;
558
559 if (instance->physical_device_count < 0) {
560 result = tu_enumerate_devices(instance);
561 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
562 return result;
563 }
564
565 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
566 vk_outarray_append(&out, p)
567 {
568 p->physicalDeviceCount = 1;
569 p->physicalDevices[0] =
570 tu_physical_device_to_handle(instance->physical_devices + i);
571 p->subsetAllocation = false;
572 }
573 }
574
575 return vk_outarray_status(&out);
576 }
577
578 void
579 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
580 VkPhysicalDeviceFeatures *pFeatures)
581 {
582 memset(pFeatures, 0, sizeof(*pFeatures));
583
584 *pFeatures = (VkPhysicalDeviceFeatures) {
585 .robustBufferAccess = false,
586 .fullDrawIndexUint32 = true,
587 .imageCubeArray = true,
588 .independentBlend = true,
589 .geometryShader = true,
590 .tessellationShader = false,
591 .sampleRateShading = true,
592 .dualSrcBlend = true,
593 .logicOp = true,
594 .multiDrawIndirect = false,
595 .drawIndirectFirstInstance = false,
596 .depthClamp = true,
597 .depthBiasClamp = false,
598 .fillModeNonSolid = false,
599 .depthBounds = false,
600 .wideLines = false,
601 .largePoints = false,
602 .alphaToOne = false,
603 .multiViewport = false,
604 .samplerAnisotropy = true,
605 .textureCompressionETC2 = true,
606 .textureCompressionASTC_LDR = true,
607 .textureCompressionBC = true,
608 .occlusionQueryPrecise = true,
609 .pipelineStatisticsQuery = false,
610 .vertexPipelineStoresAndAtomics = false,
611 .fragmentStoresAndAtomics = false,
612 .shaderTessellationAndGeometryPointSize = false,
613 .shaderImageGatherExtended = false,
614 .shaderStorageImageExtendedFormats = false,
615 .shaderStorageImageMultisample = false,
616 .shaderUniformBufferArrayDynamicIndexing = false,
617 .shaderSampledImageArrayDynamicIndexing = false,
618 .shaderStorageBufferArrayDynamicIndexing = false,
619 .shaderStorageImageArrayDynamicIndexing = false,
620 .shaderStorageImageReadWithoutFormat = false,
621 .shaderStorageImageWriteWithoutFormat = false,
622 .shaderClipDistance = false,
623 .shaderCullDistance = false,
624 .shaderFloat64 = false,
625 .shaderInt64 = false,
626 .shaderInt16 = false,
627 .sparseBinding = false,
628 .variableMultisampleRate = false,
629 .inheritedQueries = false,
630 };
631 }
632
633 void
634 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
635 VkPhysicalDeviceFeatures2 *pFeatures)
636 {
637 vk_foreach_struct(ext, pFeatures->pNext)
638 {
639 switch (ext->sType) {
640 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
641 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
642 features->variablePointersStorageBuffer = false;
643 features->variablePointers = false;
644 break;
645 }
646 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
647 VkPhysicalDeviceMultiviewFeatures *features =
648 (VkPhysicalDeviceMultiviewFeatures *) ext;
649 features->multiview = false;
650 features->multiviewGeometryShader = false;
651 features->multiviewTessellationShader = false;
652 break;
653 }
654 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
655 VkPhysicalDeviceShaderDrawParametersFeatures *features =
656 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
657 features->shaderDrawParameters = false;
658 break;
659 }
660 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
661 VkPhysicalDeviceProtectedMemoryFeatures *features =
662 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
663 features->protectedMemory = false;
664 break;
665 }
666 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
667 VkPhysicalDevice16BitStorageFeatures *features =
668 (VkPhysicalDevice16BitStorageFeatures *) ext;
669 features->storageBuffer16BitAccess = false;
670 features->uniformAndStorageBuffer16BitAccess = false;
671 features->storagePushConstant16 = false;
672 features->storageInputOutput16 = false;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
676 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
677 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
678 features->samplerYcbcrConversion = false;
679 break;
680 }
681 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
682 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
683 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
684 features->shaderInputAttachmentArrayDynamicIndexing = false;
685 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
686 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
687 features->shaderUniformBufferArrayNonUniformIndexing = false;
688 features->shaderSampledImageArrayNonUniformIndexing = false;
689 features->shaderStorageBufferArrayNonUniformIndexing = false;
690 features->shaderStorageImageArrayNonUniformIndexing = false;
691 features->shaderInputAttachmentArrayNonUniformIndexing = false;
692 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
693 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
694 features->descriptorBindingUniformBufferUpdateAfterBind = false;
695 features->descriptorBindingSampledImageUpdateAfterBind = false;
696 features->descriptorBindingStorageImageUpdateAfterBind = false;
697 features->descriptorBindingStorageBufferUpdateAfterBind = false;
698 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
699 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
700 features->descriptorBindingUpdateUnusedWhilePending = false;
701 features->descriptorBindingPartiallyBound = false;
702 features->descriptorBindingVariableDescriptorCount = false;
703 features->runtimeDescriptorArray = false;
704 break;
705 }
706 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
707 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
708 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
709 features->conditionalRendering = false;
710 features->inheritedConditionalRendering = false;
711 break;
712 }
713 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
714 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
715 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
716 features->transformFeedback = true;
717 features->geometryStreams = false;
718 break;
719 }
720 default:
721 break;
722 }
723 }
724 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
725 }
726
727 void
728 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
729 VkPhysicalDeviceProperties *pProperties)
730 {
731 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
732 VkSampleCountFlags sample_counts =
733 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
734
735 /* I have no idea what the maximum size is, but the hardware supports very
736 * large numbers of descriptors (at least 2^16). This limit is based on
737 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
738 * we don't have to think about what to do if that overflows, but really
739 * nothing is likely to get close to this.
740 */
741 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
742
743 VkPhysicalDeviceLimits limits = {
744 .maxImageDimension1D = (1 << 14),
745 .maxImageDimension2D = (1 << 14),
746 .maxImageDimension3D = (1 << 11),
747 .maxImageDimensionCube = (1 << 14),
748 .maxImageArrayLayers = (1 << 11),
749 .maxTexelBufferElements = 128 * 1024 * 1024,
750 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
751 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
752 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
753 .maxMemoryAllocationCount = UINT32_MAX,
754 .maxSamplerAllocationCount = 64 * 1024,
755 .bufferImageGranularity = 64, /* A cache line */
756 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
757 .maxBoundDescriptorSets = MAX_SETS,
758 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
759 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
760 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
761 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
762 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
763 .maxPerStageDescriptorInputAttachments = MAX_RTS,
764 .maxPerStageResources = max_descriptor_set_size,
765 .maxDescriptorSetSamplers = max_descriptor_set_size,
766 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
767 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
768 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
769 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
770 .maxDescriptorSetSampledImages = max_descriptor_set_size,
771 .maxDescriptorSetStorageImages = max_descriptor_set_size,
772 .maxDescriptorSetInputAttachments = MAX_RTS,
773 .maxVertexInputAttributes = 32,
774 .maxVertexInputBindings = 32,
775 .maxVertexInputAttributeOffset = 4095,
776 .maxVertexInputBindingStride = 2048,
777 .maxVertexOutputComponents = 128,
778 .maxTessellationGenerationLevel = 64,
779 .maxTessellationPatchSize = 32,
780 .maxTessellationControlPerVertexInputComponents = 128,
781 .maxTessellationControlPerVertexOutputComponents = 128,
782 .maxTessellationControlPerPatchOutputComponents = 120,
783 .maxTessellationControlTotalOutputComponents = 4096,
784 .maxTessellationEvaluationInputComponents = 128,
785 .maxTessellationEvaluationOutputComponents = 128,
786 .maxGeometryShaderInvocations = 32,
787 .maxGeometryInputComponents = 64,
788 .maxGeometryOutputComponents = 128,
789 .maxGeometryOutputVertices = 256,
790 .maxGeometryTotalOutputComponents = 1024,
791 .maxFragmentInputComponents = 124,
792 .maxFragmentOutputAttachments = 8,
793 .maxFragmentDualSrcAttachments = 1,
794 .maxFragmentCombinedOutputResources = 8,
795 .maxComputeSharedMemorySize = 32768,
796 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
797 .maxComputeWorkGroupInvocations = 2048,
798 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
799 .subPixelPrecisionBits = 8,
800 .subTexelPrecisionBits = 4 /* FIXME */,
801 .mipmapPrecisionBits = 4 /* FIXME */,
802 .maxDrawIndexedIndexValue = UINT32_MAX,
803 .maxDrawIndirectCount = UINT32_MAX,
804 .maxSamplerLodBias = 16,
805 .maxSamplerAnisotropy = 16,
806 .maxViewports = MAX_VIEWPORTS,
807 .maxViewportDimensions = { (1 << 14), (1 << 14) },
808 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
809 .viewportSubPixelBits = 8,
810 .minMemoryMapAlignment = 4096, /* A page */
811 .minTexelBufferOffsetAlignment = 64,
812 .minUniformBufferOffsetAlignment = 64,
813 .minStorageBufferOffsetAlignment = 64,
814 .minTexelOffset = -32,
815 .maxTexelOffset = 31,
816 .minTexelGatherOffset = -32,
817 .maxTexelGatherOffset = 31,
818 .minInterpolationOffset = -2,
819 .maxInterpolationOffset = 2,
820 .subPixelInterpolationOffsetBits = 8,
821 .maxFramebufferWidth = (1 << 14),
822 .maxFramebufferHeight = (1 << 14),
823 .maxFramebufferLayers = (1 << 10),
824 .framebufferColorSampleCounts = sample_counts,
825 .framebufferDepthSampleCounts = sample_counts,
826 .framebufferStencilSampleCounts = sample_counts,
827 .framebufferNoAttachmentsSampleCounts = sample_counts,
828 .maxColorAttachments = MAX_RTS,
829 .sampledImageColorSampleCounts = sample_counts,
830 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
831 .sampledImageDepthSampleCounts = sample_counts,
832 .sampledImageStencilSampleCounts = sample_counts,
833 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
834 .maxSampleMaskWords = 1,
835 .timestampComputeAndGraphics = true,
836 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
837 .maxClipDistances = 8,
838 .maxCullDistances = 8,
839 .maxCombinedClipAndCullDistances = 8,
840 .discreteQueuePriorities = 1,
841 .pointSizeRange = { 0.125, 255.875 },
842 .lineWidthRange = { 0.0, 7.9921875 },
843 .pointSizeGranularity = (1.0 / 8.0),
844 .lineWidthGranularity = (1.0 / 128.0),
845 .strictLines = false, /* FINISHME */
846 .standardSampleLocations = true,
847 .optimalBufferCopyOffsetAlignment = 128,
848 .optimalBufferCopyRowPitchAlignment = 128,
849 .nonCoherentAtomSize = 64,
850 };
851
852 *pProperties = (VkPhysicalDeviceProperties) {
853 .apiVersion = tu_physical_device_api_version(pdevice),
854 .driverVersion = vk_get_driver_version(),
855 .vendorID = 0, /* TODO */
856 .deviceID = 0,
857 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
858 .limits = limits,
859 .sparseProperties = { 0 },
860 };
861
862 strcpy(pProperties->deviceName, pdevice->name);
863 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
864 }
865
866 void
867 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
868 VkPhysicalDeviceProperties2 *pProperties)
869 {
870 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
871 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
872
873 vk_foreach_struct(ext, pProperties->pNext)
874 {
875 switch (ext->sType) {
876 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
877 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
878 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
879 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
880 break;
881 }
882 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
883 VkPhysicalDeviceIDProperties *properties =
884 (VkPhysicalDeviceIDProperties *) ext;
885 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
886 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
887 properties->deviceLUIDValid = false;
888 break;
889 }
890 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
891 VkPhysicalDeviceMultiviewProperties *properties =
892 (VkPhysicalDeviceMultiviewProperties *) ext;
893 properties->maxMultiviewViewCount = MAX_VIEWS;
894 properties->maxMultiviewInstanceIndex = INT_MAX;
895 break;
896 }
897 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
898 VkPhysicalDevicePointClippingProperties *properties =
899 (VkPhysicalDevicePointClippingProperties *) ext;
900 properties->pointClippingBehavior =
901 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
902 break;
903 }
904 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
905 VkPhysicalDeviceMaintenance3Properties *properties =
906 (VkPhysicalDeviceMaintenance3Properties *) ext;
907 /* Make sure everything is addressable by a signed 32-bit int, and
908 * our largest descriptors are 96 bytes. */
909 properties->maxPerSetDescriptors = (1ull << 31) / 96;
910 /* Our buffer size fields allow only this much */
911 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
912 break;
913 }
914 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
915 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
916 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
917
918 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
919 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
920 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
921 properties->maxTransformFeedbackStreamDataSize = 512;
922 properties->maxTransformFeedbackBufferDataSize = 512;
923 properties->maxTransformFeedbackBufferDataStride = 512;
924 properties->transformFeedbackQueries = true;
925 properties->transformFeedbackStreamsLinesTriangles = false;
926 properties->transformFeedbackRasterizationStreamSelect = false;
927 properties->transformFeedbackDraw = true;
928 break;
929 }
930 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
931 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
932 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
933 properties->sampleLocationSampleCounts = 0;
934 if (pdevice->supported_extensions.EXT_sample_locations) {
935 properties->sampleLocationSampleCounts =
936 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
937 }
938 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
939 properties->sampleLocationCoordinateRange[0] = 0.0f;
940 properties->sampleLocationCoordinateRange[1] = 0.9375f;
941 properties->sampleLocationSubPixelBits = 4;
942 properties->variableSampleLocations = true;
943 break;
944 }
945 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
946 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
947 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
948 properties->filterMinmaxImageComponentMapping = true;
949 properties->filterMinmaxSingleComponentFormats = true;
950 break;
951 }
952
953 default:
954 break;
955 }
956 }
957 }
958
959 static const VkQueueFamilyProperties tu_queue_family_properties = {
960 .queueFlags =
961 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
962 .queueCount = 1,
963 .timestampValidBits = 48,
964 .minImageTransferGranularity = { 1, 1, 1 },
965 };
966
967 void
968 tu_GetPhysicalDeviceQueueFamilyProperties(
969 VkPhysicalDevice physicalDevice,
970 uint32_t *pQueueFamilyPropertyCount,
971 VkQueueFamilyProperties *pQueueFamilyProperties)
972 {
973 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
974
975 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
976 }
977
978 void
979 tu_GetPhysicalDeviceQueueFamilyProperties2(
980 VkPhysicalDevice physicalDevice,
981 uint32_t *pQueueFamilyPropertyCount,
982 VkQueueFamilyProperties2 *pQueueFamilyProperties)
983 {
984 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
985
986 vk_outarray_append(&out, p)
987 {
988 p->queueFamilyProperties = tu_queue_family_properties;
989 }
990 }
991
992 static uint64_t
993 tu_get_system_heap_size()
994 {
995 struct sysinfo info;
996 sysinfo(&info);
997
998 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
999
1000 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1001 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1002 */
1003 uint64_t available_ram;
1004 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1005 available_ram = total_ram / 2;
1006 else
1007 available_ram = total_ram * 3 / 4;
1008
1009 return available_ram;
1010 }
1011
1012 void
1013 tu_GetPhysicalDeviceMemoryProperties(
1014 VkPhysicalDevice physicalDevice,
1015 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1016 {
1017 pMemoryProperties->memoryHeapCount = 1;
1018 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1019 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1020
1021 pMemoryProperties->memoryTypeCount = 1;
1022 pMemoryProperties->memoryTypes[0].propertyFlags =
1023 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1024 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1025 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1026 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1027 }
1028
1029 void
1030 tu_GetPhysicalDeviceMemoryProperties2(
1031 VkPhysicalDevice physicalDevice,
1032 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1033 {
1034 return tu_GetPhysicalDeviceMemoryProperties(
1035 physicalDevice, &pMemoryProperties->memoryProperties);
1036 }
1037
1038 static VkResult
1039 tu_queue_init(struct tu_device *device,
1040 struct tu_queue *queue,
1041 uint32_t queue_family_index,
1042 int idx,
1043 VkDeviceQueueCreateFlags flags)
1044 {
1045 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1046 queue->device = device;
1047 queue->queue_family_index = queue_family_index;
1048 queue->queue_idx = idx;
1049 queue->flags = flags;
1050
1051 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1052 if (ret)
1053 return VK_ERROR_INITIALIZATION_FAILED;
1054
1055 tu_fence_init(&queue->submit_fence, false);
1056
1057 return VK_SUCCESS;
1058 }
1059
1060 static void
1061 tu_queue_finish(struct tu_queue *queue)
1062 {
1063 tu_fence_finish(&queue->submit_fence);
1064 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1065 }
1066
1067 static int
1068 tu_get_device_extension_index(const char *name)
1069 {
1070 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1071 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1072 return i;
1073 }
1074 return -1;
1075 }
1076
1077 struct PACKED bcolor_entry {
1078 uint32_t fp32[4];
1079 uint16_t ui16[4];
1080 int16_t si16[4];
1081 uint16_t fp16[4];
1082 uint16_t rgb565;
1083 uint16_t rgb5a1;
1084 uint16_t rgba4;
1085 uint8_t __pad0[2];
1086 uint8_t ui8[4];
1087 int8_t si8[4];
1088 uint32_t rgb10a2;
1089 uint32_t z24; /* also s8? */
1090 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1091 uint8_t __pad1[56];
1092 } border_color[] = {
1093 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1094 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1095 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1096 .fp32[3] = 0x3f800000,
1097 .ui16[3] = 0xffff,
1098 .si16[3] = 0x7fff,
1099 .fp16[3] = 0x3c00,
1100 .rgb5a1 = 0x8000,
1101 .rgba4 = 0xf000,
1102 .ui8[3] = 0xff,
1103 .si8[3] = 0x7f,
1104 .rgb10a2 = 0xc0000000,
1105 .srgb[3] = 0x3c00,
1106 },
1107 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1108 .fp32[3] = 1,
1109 .fp16[3] = 1,
1110 },
1111 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1112 .fp32[0 ... 3] = 0x3f800000,
1113 .ui16[0 ... 3] = 0xffff,
1114 .si16[0 ... 3] = 0x7fff,
1115 .fp16[0 ... 3] = 0x3c00,
1116 .rgb565 = 0xffff,
1117 .rgb5a1 = 0xffff,
1118 .rgba4 = 0xffff,
1119 .ui8[0 ... 3] = 0xff,
1120 .si8[0 ... 3] = 0x7f,
1121 .rgb10a2 = 0xffffffff,
1122 .z24 = 0xffffff,
1123 .srgb[0 ... 3] = 0x3c00,
1124 },
1125 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1126 .fp32[0 ... 3] = 1,
1127 .fp16[0 ... 3] = 1,
1128 },
1129 };
1130
1131
1132 VkResult
1133 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1134 const VkDeviceCreateInfo *pCreateInfo,
1135 const VkAllocationCallbacks *pAllocator,
1136 VkDevice *pDevice)
1137 {
1138 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1139 VkResult result;
1140 struct tu_device *device;
1141
1142 /* Check enabled features */
1143 if (pCreateInfo->pEnabledFeatures) {
1144 VkPhysicalDeviceFeatures supported_features;
1145 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1146 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1147 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1148 unsigned num_features =
1149 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1150 for (uint32_t i = 0; i < num_features; i++) {
1151 if (enabled_feature[i] && !supported_feature[i])
1152 return vk_error(physical_device->instance,
1153 VK_ERROR_FEATURE_NOT_PRESENT);
1154 }
1155 }
1156
1157 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1158 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1159 if (!device)
1160 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1161
1162 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1163 device->instance = physical_device->instance;
1164 device->physical_device = physical_device;
1165
1166 if (pAllocator)
1167 device->alloc = *pAllocator;
1168 else
1169 device->alloc = physical_device->instance->alloc;
1170
1171 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1172 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1173 int index = tu_get_device_extension_index(ext_name);
1174 if (index < 0 ||
1175 !physical_device->supported_extensions.extensions[index]) {
1176 vk_free(&device->alloc, device);
1177 return vk_error(physical_device->instance,
1178 VK_ERROR_EXTENSION_NOT_PRESENT);
1179 }
1180
1181 device->enabled_extensions.extensions[index] = true;
1182 }
1183
1184 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1185 const VkDeviceQueueCreateInfo *queue_create =
1186 &pCreateInfo->pQueueCreateInfos[i];
1187 uint32_t qfi = queue_create->queueFamilyIndex;
1188 device->queues[qfi] = vk_alloc(
1189 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1190 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1191 if (!device->queues[qfi]) {
1192 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1193 goto fail_queues;
1194 }
1195
1196 memset(device->queues[qfi], 0,
1197 queue_create->queueCount * sizeof(struct tu_queue));
1198
1199 device->queue_count[qfi] = queue_create->queueCount;
1200
1201 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1202 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1203 queue_create->flags);
1204 if (result != VK_SUCCESS)
1205 goto fail_queues;
1206 }
1207 }
1208
1209 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1210 if (!device->compiler)
1211 goto fail_queues;
1212
1213 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1214 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
1215
1216 device->vsc_data_pitch = 0x440 * 4;
1217 device->vsc_data2_pitch = 0x1040 * 4;
1218
1219 result = tu_bo_init_new(device, &device->vsc_data, VSC_DATA_SIZE(device->vsc_data_pitch));
1220 if (result != VK_SUCCESS)
1221 goto fail_vsc_data;
1222
1223 result = tu_bo_init_new(device, &device->vsc_data2, VSC_DATA2_SIZE(device->vsc_data2_pitch));
1224 if (result != VK_SUCCESS)
1225 goto fail_vsc_data2;
1226
1227 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1228 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1229 if (result != VK_SUCCESS)
1230 goto fail_border_color;
1231
1232 result = tu_bo_map(device, &device->border_color);
1233 if (result != VK_SUCCESS)
1234 goto fail_border_color_map;
1235
1236 memcpy(device->border_color.map, border_color, sizeof(border_color));
1237
1238 VkPipelineCacheCreateInfo ci;
1239 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1240 ci.pNext = NULL;
1241 ci.flags = 0;
1242 ci.pInitialData = NULL;
1243 ci.initialDataSize = 0;
1244 VkPipelineCache pc;
1245 result =
1246 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1247 if (result != VK_SUCCESS)
1248 goto fail_pipeline_cache;
1249
1250 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1251
1252 *pDevice = tu_device_to_handle(device);
1253 return VK_SUCCESS;
1254
1255 fail_pipeline_cache:
1256 fail_border_color_map:
1257 tu_bo_finish(device, &device->border_color);
1258
1259 fail_border_color:
1260 tu_bo_finish(device, &device->vsc_data2);
1261
1262 fail_vsc_data2:
1263 tu_bo_finish(device, &device->vsc_data);
1264
1265 fail_vsc_data:
1266 ralloc_free(device->compiler);
1267
1268 fail_queues:
1269 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1270 for (unsigned q = 0; q < device->queue_count[i]; q++)
1271 tu_queue_finish(&device->queues[i][q]);
1272 if (device->queue_count[i])
1273 vk_free(&device->alloc, device->queues[i]);
1274 }
1275
1276 vk_free(&device->alloc, device);
1277 return result;
1278 }
1279
1280 void
1281 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1282 {
1283 TU_FROM_HANDLE(tu_device, device, _device);
1284
1285 if (!device)
1286 return;
1287
1288 tu_bo_finish(device, &device->vsc_data);
1289 tu_bo_finish(device, &device->vsc_data2);
1290
1291 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1292 for (unsigned q = 0; q < device->queue_count[i]; q++)
1293 tu_queue_finish(&device->queues[i][q]);
1294 if (device->queue_count[i])
1295 vk_free(&device->alloc, device->queues[i]);
1296 }
1297
1298 /* the compiler does not use pAllocator */
1299 ralloc_free(device->compiler);
1300
1301 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1302 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1303
1304 vk_free(&device->alloc, device);
1305 }
1306
1307 VkResult
1308 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1309 VkLayerProperties *pProperties)
1310 {
1311 *pPropertyCount = 0;
1312 return VK_SUCCESS;
1313 }
1314
1315 VkResult
1316 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1317 uint32_t *pPropertyCount,
1318 VkLayerProperties *pProperties)
1319 {
1320 *pPropertyCount = 0;
1321 return VK_SUCCESS;
1322 }
1323
1324 void
1325 tu_GetDeviceQueue2(VkDevice _device,
1326 const VkDeviceQueueInfo2 *pQueueInfo,
1327 VkQueue *pQueue)
1328 {
1329 TU_FROM_HANDLE(tu_device, device, _device);
1330 struct tu_queue *queue;
1331
1332 queue =
1333 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1334 if (pQueueInfo->flags != queue->flags) {
1335 /* From the Vulkan 1.1.70 spec:
1336 *
1337 * "The queue returned by vkGetDeviceQueue2 must have the same
1338 * flags value from this structure as that used at device
1339 * creation time in a VkDeviceQueueCreateInfo instance. If no
1340 * matching flags were specified at device creation time then
1341 * pQueue will return VK_NULL_HANDLE."
1342 */
1343 *pQueue = VK_NULL_HANDLE;
1344 return;
1345 }
1346
1347 *pQueue = tu_queue_to_handle(queue);
1348 }
1349
1350 void
1351 tu_GetDeviceQueue(VkDevice _device,
1352 uint32_t queueFamilyIndex,
1353 uint32_t queueIndex,
1354 VkQueue *pQueue)
1355 {
1356 const VkDeviceQueueInfo2 info =
1357 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1358 .queueFamilyIndex = queueFamilyIndex,
1359 .queueIndex = queueIndex };
1360
1361 tu_GetDeviceQueue2(_device, &info, pQueue);
1362 }
1363
1364 VkResult
1365 tu_QueueSubmit(VkQueue _queue,
1366 uint32_t submitCount,
1367 const VkSubmitInfo *pSubmits,
1368 VkFence _fence)
1369 {
1370 TU_FROM_HANDLE(tu_queue, queue, _queue);
1371
1372 for (uint32_t i = 0; i < submitCount; ++i) {
1373 const VkSubmitInfo *submit = pSubmits + i;
1374 const bool last_submit = (i == submitCount - 1);
1375 struct tu_bo_list bo_list;
1376 tu_bo_list_init(&bo_list);
1377
1378 uint32_t entry_count = 0;
1379 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1380 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1381 entry_count += cmdbuf->cs.entry_count;
1382 }
1383
1384 struct drm_msm_gem_submit_cmd cmds[entry_count];
1385 uint32_t entry_idx = 0;
1386 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1387 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1388 struct tu_cs *cs = &cmdbuf->cs;
1389 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1390 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1391 cmds[entry_idx].submit_idx =
1392 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1393 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1394 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1395 cmds[entry_idx].size = cs->entries[i].size;
1396 cmds[entry_idx].pad = 0;
1397 cmds[entry_idx].nr_relocs = 0;
1398 cmds[entry_idx].relocs = 0;
1399 }
1400
1401 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1402 }
1403
1404 uint32_t flags = MSM_PIPE_3D0;
1405 if (last_submit) {
1406 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1407 }
1408
1409 struct drm_msm_gem_submit req = {
1410 .flags = flags,
1411 .queueid = queue->msm_queue_id,
1412 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1413 .nr_bos = bo_list.count,
1414 .cmds = (uint64_t)(uintptr_t)cmds,
1415 .nr_cmds = entry_count,
1416 };
1417
1418 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1419 DRM_MSM_GEM_SUBMIT,
1420 &req, sizeof(req));
1421 if (ret) {
1422 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1423 abort();
1424 }
1425
1426 tu_bo_list_destroy(&bo_list);
1427
1428 if (last_submit) {
1429 /* no need to merge fences as queue execution is serialized */
1430 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1431 }
1432 }
1433
1434 if (_fence != VK_NULL_HANDLE) {
1435 TU_FROM_HANDLE(tu_fence, fence, _fence);
1436 tu_fence_copy(fence, &queue->submit_fence);
1437 }
1438
1439 return VK_SUCCESS;
1440 }
1441
1442 VkResult
1443 tu_QueueWaitIdle(VkQueue _queue)
1444 {
1445 TU_FROM_HANDLE(tu_queue, queue, _queue);
1446
1447 tu_fence_wait_idle(&queue->submit_fence);
1448
1449 return VK_SUCCESS;
1450 }
1451
1452 VkResult
1453 tu_DeviceWaitIdle(VkDevice _device)
1454 {
1455 TU_FROM_HANDLE(tu_device, device, _device);
1456
1457 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1458 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1459 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1460 }
1461 }
1462 return VK_SUCCESS;
1463 }
1464
1465 VkResult
1466 tu_ImportSemaphoreFdKHR(VkDevice _device,
1467 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1468 {
1469 tu_stub();
1470
1471 return VK_SUCCESS;
1472 }
1473
1474 VkResult
1475 tu_GetSemaphoreFdKHR(VkDevice _device,
1476 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1477 int *pFd)
1478 {
1479 tu_stub();
1480
1481 return VK_SUCCESS;
1482 }
1483
1484 VkResult
1485 tu_ImportFenceFdKHR(VkDevice _device,
1486 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
1487 {
1488 tu_stub();
1489
1490 return VK_SUCCESS;
1491 }
1492
1493 VkResult
1494 tu_GetFenceFdKHR(VkDevice _device,
1495 const VkFenceGetFdInfoKHR *pGetFdInfo,
1496 int *pFd)
1497 {
1498 tu_stub();
1499
1500 return VK_SUCCESS;
1501 }
1502
1503 VkResult
1504 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1505 uint32_t *pPropertyCount,
1506 VkExtensionProperties *pProperties)
1507 {
1508 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1509
1510 /* We spport no lyaers */
1511 if (pLayerName)
1512 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1513
1514 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1515 if (tu_supported_instance_extensions.extensions[i]) {
1516 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1517 }
1518 }
1519
1520 return vk_outarray_status(&out);
1521 }
1522
1523 VkResult
1524 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1525 const char *pLayerName,
1526 uint32_t *pPropertyCount,
1527 VkExtensionProperties *pProperties)
1528 {
1529 /* We spport no lyaers */
1530 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1531 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1532
1533 /* We spport no lyaers */
1534 if (pLayerName)
1535 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1536
1537 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1538 if (device->supported_extensions.extensions[i]) {
1539 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1540 }
1541 }
1542
1543 return vk_outarray_status(&out);
1544 }
1545
1546 PFN_vkVoidFunction
1547 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1548 {
1549 TU_FROM_HANDLE(tu_instance, instance, _instance);
1550
1551 return tu_lookup_entrypoint_checked(
1552 pName, instance ? instance->api_version : 0,
1553 instance ? &instance->enabled_extensions : NULL, NULL);
1554 }
1555
1556 /* The loader wants us to expose a second GetInstanceProcAddr function
1557 * to work around certain LD_PRELOAD issues seen in apps.
1558 */
1559 PUBLIC
1560 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1561 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1562
1563 PUBLIC
1564 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1565 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1566 {
1567 return tu_GetInstanceProcAddr(instance, pName);
1568 }
1569
1570 PFN_vkVoidFunction
1571 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1572 {
1573 TU_FROM_HANDLE(tu_device, device, _device);
1574
1575 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1576 &device->instance->enabled_extensions,
1577 &device->enabled_extensions);
1578 }
1579
1580 static VkResult
1581 tu_alloc_memory(struct tu_device *device,
1582 const VkMemoryAllocateInfo *pAllocateInfo,
1583 const VkAllocationCallbacks *pAllocator,
1584 VkDeviceMemory *pMem)
1585 {
1586 struct tu_device_memory *mem;
1587 VkResult result;
1588
1589 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1590
1591 if (pAllocateInfo->allocationSize == 0) {
1592 /* Apparently, this is allowed */
1593 *pMem = VK_NULL_HANDLE;
1594 return VK_SUCCESS;
1595 }
1596
1597 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1598 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1599 if (mem == NULL)
1600 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1601
1602 const VkImportMemoryFdInfoKHR *fd_info =
1603 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1604 if (fd_info && !fd_info->handleType)
1605 fd_info = NULL;
1606
1607 if (fd_info) {
1608 assert(fd_info->handleType ==
1609 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1610 fd_info->handleType ==
1611 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1612
1613 /*
1614 * TODO Importing the same fd twice gives us the same handle without
1615 * reference counting. We need to maintain a per-instance handle-to-bo
1616 * table and add reference count to tu_bo.
1617 */
1618 result = tu_bo_init_dmabuf(device, &mem->bo,
1619 pAllocateInfo->allocationSize, fd_info->fd);
1620 if (result == VK_SUCCESS) {
1621 /* take ownership and close the fd */
1622 close(fd_info->fd);
1623 }
1624 } else {
1625 result =
1626 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1627 }
1628
1629 if (result != VK_SUCCESS) {
1630 vk_free2(&device->alloc, pAllocator, mem);
1631 return result;
1632 }
1633
1634 mem->size = pAllocateInfo->allocationSize;
1635 mem->type_index = pAllocateInfo->memoryTypeIndex;
1636
1637 mem->map = NULL;
1638 mem->user_ptr = NULL;
1639
1640 *pMem = tu_device_memory_to_handle(mem);
1641
1642 return VK_SUCCESS;
1643 }
1644
1645 VkResult
1646 tu_AllocateMemory(VkDevice _device,
1647 const VkMemoryAllocateInfo *pAllocateInfo,
1648 const VkAllocationCallbacks *pAllocator,
1649 VkDeviceMemory *pMem)
1650 {
1651 TU_FROM_HANDLE(tu_device, device, _device);
1652 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1653 }
1654
1655 void
1656 tu_FreeMemory(VkDevice _device,
1657 VkDeviceMemory _mem,
1658 const VkAllocationCallbacks *pAllocator)
1659 {
1660 TU_FROM_HANDLE(tu_device, device, _device);
1661 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1662
1663 if (mem == NULL)
1664 return;
1665
1666 tu_bo_finish(device, &mem->bo);
1667 vk_free2(&device->alloc, pAllocator, mem);
1668 }
1669
1670 VkResult
1671 tu_MapMemory(VkDevice _device,
1672 VkDeviceMemory _memory,
1673 VkDeviceSize offset,
1674 VkDeviceSize size,
1675 VkMemoryMapFlags flags,
1676 void **ppData)
1677 {
1678 TU_FROM_HANDLE(tu_device, device, _device);
1679 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1680 VkResult result;
1681
1682 if (mem == NULL) {
1683 *ppData = NULL;
1684 return VK_SUCCESS;
1685 }
1686
1687 if (mem->user_ptr) {
1688 *ppData = mem->user_ptr;
1689 } else if (!mem->map) {
1690 result = tu_bo_map(device, &mem->bo);
1691 if (result != VK_SUCCESS)
1692 return result;
1693 *ppData = mem->map = mem->bo.map;
1694 } else
1695 *ppData = mem->map;
1696
1697 if (*ppData) {
1698 *ppData += offset;
1699 return VK_SUCCESS;
1700 }
1701
1702 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1703 }
1704
1705 void
1706 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1707 {
1708 /* I do not see any unmapping done by the freedreno Gallium driver. */
1709 }
1710
1711 VkResult
1712 tu_FlushMappedMemoryRanges(VkDevice _device,
1713 uint32_t memoryRangeCount,
1714 const VkMappedMemoryRange *pMemoryRanges)
1715 {
1716 return VK_SUCCESS;
1717 }
1718
1719 VkResult
1720 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1721 uint32_t memoryRangeCount,
1722 const VkMappedMemoryRange *pMemoryRanges)
1723 {
1724 return VK_SUCCESS;
1725 }
1726
1727 void
1728 tu_GetBufferMemoryRequirements(VkDevice _device,
1729 VkBuffer _buffer,
1730 VkMemoryRequirements *pMemoryRequirements)
1731 {
1732 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1733
1734 pMemoryRequirements->memoryTypeBits = 1;
1735 pMemoryRequirements->alignment = 64;
1736 pMemoryRequirements->size =
1737 align64(buffer->size, pMemoryRequirements->alignment);
1738 }
1739
1740 void
1741 tu_GetBufferMemoryRequirements2(
1742 VkDevice device,
1743 const VkBufferMemoryRequirementsInfo2 *pInfo,
1744 VkMemoryRequirements2 *pMemoryRequirements)
1745 {
1746 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1747 &pMemoryRequirements->memoryRequirements);
1748 }
1749
1750 void
1751 tu_GetImageMemoryRequirements(VkDevice _device,
1752 VkImage _image,
1753 VkMemoryRequirements *pMemoryRequirements)
1754 {
1755 TU_FROM_HANDLE(tu_image, image, _image);
1756
1757 pMemoryRequirements->memoryTypeBits = 1;
1758 pMemoryRequirements->size = image->layout.size;
1759 pMemoryRequirements->alignment = image->layout.base_align;
1760 }
1761
1762 void
1763 tu_GetImageMemoryRequirements2(VkDevice device,
1764 const VkImageMemoryRequirementsInfo2 *pInfo,
1765 VkMemoryRequirements2 *pMemoryRequirements)
1766 {
1767 tu_GetImageMemoryRequirements(device, pInfo->image,
1768 &pMemoryRequirements->memoryRequirements);
1769 }
1770
1771 void
1772 tu_GetImageSparseMemoryRequirements(
1773 VkDevice device,
1774 VkImage image,
1775 uint32_t *pSparseMemoryRequirementCount,
1776 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1777 {
1778 tu_stub();
1779 }
1780
1781 void
1782 tu_GetImageSparseMemoryRequirements2(
1783 VkDevice device,
1784 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1785 uint32_t *pSparseMemoryRequirementCount,
1786 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1787 {
1788 tu_stub();
1789 }
1790
1791 void
1792 tu_GetDeviceMemoryCommitment(VkDevice device,
1793 VkDeviceMemory memory,
1794 VkDeviceSize *pCommittedMemoryInBytes)
1795 {
1796 *pCommittedMemoryInBytes = 0;
1797 }
1798
1799 VkResult
1800 tu_BindBufferMemory2(VkDevice device,
1801 uint32_t bindInfoCount,
1802 const VkBindBufferMemoryInfo *pBindInfos)
1803 {
1804 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1805 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1806 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1807
1808 if (mem) {
1809 buffer->bo = &mem->bo;
1810 buffer->bo_offset = pBindInfos[i].memoryOffset;
1811 } else {
1812 buffer->bo = NULL;
1813 }
1814 }
1815 return VK_SUCCESS;
1816 }
1817
1818 VkResult
1819 tu_BindBufferMemory(VkDevice device,
1820 VkBuffer buffer,
1821 VkDeviceMemory memory,
1822 VkDeviceSize memoryOffset)
1823 {
1824 const VkBindBufferMemoryInfo info = {
1825 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1826 .buffer = buffer,
1827 .memory = memory,
1828 .memoryOffset = memoryOffset
1829 };
1830
1831 return tu_BindBufferMemory2(device, 1, &info);
1832 }
1833
1834 VkResult
1835 tu_BindImageMemory2(VkDevice device,
1836 uint32_t bindInfoCount,
1837 const VkBindImageMemoryInfo *pBindInfos)
1838 {
1839 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1840 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1841 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1842
1843 if (mem) {
1844 image->bo = &mem->bo;
1845 image->bo_offset = pBindInfos[i].memoryOffset;
1846 } else {
1847 image->bo = NULL;
1848 image->bo_offset = 0;
1849 }
1850 }
1851
1852 return VK_SUCCESS;
1853 }
1854
1855 VkResult
1856 tu_BindImageMemory(VkDevice device,
1857 VkImage image,
1858 VkDeviceMemory memory,
1859 VkDeviceSize memoryOffset)
1860 {
1861 const VkBindImageMemoryInfo info = {
1862 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1863 .image = image,
1864 .memory = memory,
1865 .memoryOffset = memoryOffset
1866 };
1867
1868 return tu_BindImageMemory2(device, 1, &info);
1869 }
1870
1871 VkResult
1872 tu_QueueBindSparse(VkQueue _queue,
1873 uint32_t bindInfoCount,
1874 const VkBindSparseInfo *pBindInfo,
1875 VkFence _fence)
1876 {
1877 return VK_SUCCESS;
1878 }
1879
1880 // Queue semaphore functions
1881
1882 VkResult
1883 tu_CreateSemaphore(VkDevice _device,
1884 const VkSemaphoreCreateInfo *pCreateInfo,
1885 const VkAllocationCallbacks *pAllocator,
1886 VkSemaphore *pSemaphore)
1887 {
1888 TU_FROM_HANDLE(tu_device, device, _device);
1889
1890 struct tu_semaphore *sem =
1891 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1892 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1893 if (!sem)
1894 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1895
1896 *pSemaphore = tu_semaphore_to_handle(sem);
1897 return VK_SUCCESS;
1898 }
1899
1900 void
1901 tu_DestroySemaphore(VkDevice _device,
1902 VkSemaphore _semaphore,
1903 const VkAllocationCallbacks *pAllocator)
1904 {
1905 TU_FROM_HANDLE(tu_device, device, _device);
1906 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1907 if (!_semaphore)
1908 return;
1909
1910 vk_free2(&device->alloc, pAllocator, sem);
1911 }
1912
1913 VkResult
1914 tu_CreateEvent(VkDevice _device,
1915 const VkEventCreateInfo *pCreateInfo,
1916 const VkAllocationCallbacks *pAllocator,
1917 VkEvent *pEvent)
1918 {
1919 TU_FROM_HANDLE(tu_device, device, _device);
1920 struct tu_event *event =
1921 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1922 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1923
1924 if (!event)
1925 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1926
1927 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1928 if (result != VK_SUCCESS)
1929 goto fail_alloc;
1930
1931 result = tu_bo_map(device, &event->bo);
1932 if (result != VK_SUCCESS)
1933 goto fail_map;
1934
1935 *pEvent = tu_event_to_handle(event);
1936
1937 return VK_SUCCESS;
1938
1939 fail_map:
1940 tu_bo_finish(device, &event->bo);
1941 fail_alloc:
1942 vk_free2(&device->alloc, pAllocator, event);
1943 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1944 }
1945
1946 void
1947 tu_DestroyEvent(VkDevice _device,
1948 VkEvent _event,
1949 const VkAllocationCallbacks *pAllocator)
1950 {
1951 TU_FROM_HANDLE(tu_device, device, _device);
1952 TU_FROM_HANDLE(tu_event, event, _event);
1953
1954 if (!event)
1955 return;
1956
1957 tu_bo_finish(device, &event->bo);
1958 vk_free2(&device->alloc, pAllocator, event);
1959 }
1960
1961 VkResult
1962 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1963 {
1964 TU_FROM_HANDLE(tu_event, event, _event);
1965
1966 if (*(uint64_t*) event->bo.map == 1)
1967 return VK_EVENT_SET;
1968 return VK_EVENT_RESET;
1969 }
1970
1971 VkResult
1972 tu_SetEvent(VkDevice _device, VkEvent _event)
1973 {
1974 TU_FROM_HANDLE(tu_event, event, _event);
1975 *(uint64_t*) event->bo.map = 1;
1976
1977 return VK_SUCCESS;
1978 }
1979
1980 VkResult
1981 tu_ResetEvent(VkDevice _device, VkEvent _event)
1982 {
1983 TU_FROM_HANDLE(tu_event, event, _event);
1984 *(uint64_t*) event->bo.map = 0;
1985
1986 return VK_SUCCESS;
1987 }
1988
1989 VkResult
1990 tu_CreateBuffer(VkDevice _device,
1991 const VkBufferCreateInfo *pCreateInfo,
1992 const VkAllocationCallbacks *pAllocator,
1993 VkBuffer *pBuffer)
1994 {
1995 TU_FROM_HANDLE(tu_device, device, _device);
1996 struct tu_buffer *buffer;
1997
1998 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1999
2000 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2001 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2002 if (buffer == NULL)
2003 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2004
2005 buffer->size = pCreateInfo->size;
2006 buffer->usage = pCreateInfo->usage;
2007 buffer->flags = pCreateInfo->flags;
2008
2009 *pBuffer = tu_buffer_to_handle(buffer);
2010
2011 return VK_SUCCESS;
2012 }
2013
2014 void
2015 tu_DestroyBuffer(VkDevice _device,
2016 VkBuffer _buffer,
2017 const VkAllocationCallbacks *pAllocator)
2018 {
2019 TU_FROM_HANDLE(tu_device, device, _device);
2020 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2021
2022 if (!buffer)
2023 return;
2024
2025 vk_free2(&device->alloc, pAllocator, buffer);
2026 }
2027
2028 VkResult
2029 tu_CreateFramebuffer(VkDevice _device,
2030 const VkFramebufferCreateInfo *pCreateInfo,
2031 const VkAllocationCallbacks *pAllocator,
2032 VkFramebuffer *pFramebuffer)
2033 {
2034 TU_FROM_HANDLE(tu_device, device, _device);
2035 struct tu_framebuffer *framebuffer;
2036
2037 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2038
2039 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2040 pCreateInfo->attachmentCount;
2041 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2042 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2043 if (framebuffer == NULL)
2044 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2045
2046 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2047 framebuffer->width = pCreateInfo->width;
2048 framebuffer->height = pCreateInfo->height;
2049 framebuffer->layers = pCreateInfo->layers;
2050 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2051 VkImageView _iview = pCreateInfo->pAttachments[i];
2052 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2053 framebuffer->attachments[i].attachment = iview;
2054 }
2055
2056 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2057 return VK_SUCCESS;
2058 }
2059
2060 void
2061 tu_DestroyFramebuffer(VkDevice _device,
2062 VkFramebuffer _fb,
2063 const VkAllocationCallbacks *pAllocator)
2064 {
2065 TU_FROM_HANDLE(tu_device, device, _device);
2066 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2067
2068 if (!fb)
2069 return;
2070 vk_free2(&device->alloc, pAllocator, fb);
2071 }
2072
2073 static enum a6xx_tex_clamp
2074 tu6_tex_wrap(VkSamplerAddressMode address_mode)
2075 {
2076 switch (address_mode) {
2077 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
2078 return A6XX_TEX_REPEAT;
2079 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
2080 return A6XX_TEX_MIRROR_REPEAT;
2081 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
2082 return A6XX_TEX_CLAMP_TO_EDGE;
2083 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
2084 return A6XX_TEX_CLAMP_TO_BORDER;
2085 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
2086 /* only works for PoT.. need to emulate otherwise! */
2087 return A6XX_TEX_MIRROR_CLAMP;
2088 default:
2089 unreachable("illegal tex wrap mode");
2090 break;
2091 }
2092 }
2093
2094 static enum a6xx_tex_filter
2095 tu6_tex_filter(VkFilter filter, unsigned aniso)
2096 {
2097 switch (filter) {
2098 case VK_FILTER_NEAREST:
2099 return A6XX_TEX_NEAREST;
2100 case VK_FILTER_LINEAR:
2101 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
2102 case VK_FILTER_CUBIC_EXT:
2103 return A6XX_TEX_CUBIC;
2104 default:
2105 unreachable("illegal texture filter");
2106 break;
2107 }
2108 }
2109
2110 static inline enum adreno_compare_func
2111 tu6_compare_func(VkCompareOp op)
2112 {
2113 return (enum adreno_compare_func) op;
2114 }
2115
2116 static void
2117 tu_init_sampler(struct tu_device *device,
2118 struct tu_sampler *sampler,
2119 const VkSamplerCreateInfo *pCreateInfo)
2120 {
2121 const struct VkSamplerReductionModeCreateInfo *reduction =
2122 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2123
2124 unsigned aniso = pCreateInfo->anisotropyEnable ?
2125 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2126 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2127
2128 sampler->descriptor[0] =
2129 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2130 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2131 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2132 A6XX_TEX_SAMP_0_ANISO(aniso) |
2133 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2134 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2135 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2136 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2137 sampler->descriptor[1] =
2138 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2139 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2140 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
2141 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
2142 COND(pCreateInfo->compareEnable,
2143 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2144 /* This is an offset into the border_color BO, which we fill with all the
2145 * possible Vulkan border colors in the correct order, so we can just use
2146 * the Vulkan enum with no translation necessary.
2147 */
2148 sampler->descriptor[2] =
2149 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2150 sizeof(struct bcolor_entry));
2151 sampler->descriptor[3] = 0;
2152
2153 if (reduction) {
2154 /* note: vulkan enum matches hw */
2155 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(reduction->reductionMode);
2156 }
2157
2158 /* TODO:
2159 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2160 */
2161 }
2162
2163 VkResult
2164 tu_CreateSampler(VkDevice _device,
2165 const VkSamplerCreateInfo *pCreateInfo,
2166 const VkAllocationCallbacks *pAllocator,
2167 VkSampler *pSampler)
2168 {
2169 TU_FROM_HANDLE(tu_device, device, _device);
2170 struct tu_sampler *sampler;
2171
2172 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2173
2174 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2175 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2176 if (!sampler)
2177 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2178
2179 tu_init_sampler(device, sampler, pCreateInfo);
2180 *pSampler = tu_sampler_to_handle(sampler);
2181
2182 return VK_SUCCESS;
2183 }
2184
2185 void
2186 tu_DestroySampler(VkDevice _device,
2187 VkSampler _sampler,
2188 const VkAllocationCallbacks *pAllocator)
2189 {
2190 TU_FROM_HANDLE(tu_device, device, _device);
2191 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2192
2193 if (!sampler)
2194 return;
2195 vk_free2(&device->alloc, pAllocator, sampler);
2196 }
2197
2198 /* vk_icd.h does not declare this function, so we declare it here to
2199 * suppress Wmissing-prototypes.
2200 */
2201 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2202 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2203
2204 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2205 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2206 {
2207 /* For the full details on loader interface versioning, see
2208 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2209 * What follows is a condensed summary, to help you navigate the large and
2210 * confusing official doc.
2211 *
2212 * - Loader interface v0 is incompatible with later versions. We don't
2213 * support it.
2214 *
2215 * - In loader interface v1:
2216 * - The first ICD entrypoint called by the loader is
2217 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2218 * entrypoint.
2219 * - The ICD must statically expose no other Vulkan symbol unless it
2220 * is linked with -Bsymbolic.
2221 * - Each dispatchable Vulkan handle created by the ICD must be
2222 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2223 * ICD must initialize VK_LOADER_DATA.loadMagic to
2224 * ICD_LOADER_MAGIC.
2225 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2226 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2227 * such loader-managed surfaces.
2228 *
2229 * - Loader interface v2 differs from v1 in:
2230 * - The first ICD entrypoint called by the loader is
2231 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2232 * statically expose this entrypoint.
2233 *
2234 * - Loader interface v3 differs from v2 in:
2235 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2236 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2237 * because the loader no longer does so.
2238 */
2239 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2240 return VK_SUCCESS;
2241 }
2242
2243 VkResult
2244 tu_GetMemoryFdKHR(VkDevice _device,
2245 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2246 int *pFd)
2247 {
2248 TU_FROM_HANDLE(tu_device, device, _device);
2249 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2250
2251 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2252
2253 /* At the moment, we support only the below handle types. */
2254 assert(pGetFdInfo->handleType ==
2255 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2256 pGetFdInfo->handleType ==
2257 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2258
2259 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2260 if (prime_fd < 0)
2261 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2262
2263 *pFd = prime_fd;
2264 return VK_SUCCESS;
2265 }
2266
2267 VkResult
2268 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2269 VkExternalMemoryHandleTypeFlagBits handleType,
2270 int fd,
2271 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2272 {
2273 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2274 pMemoryFdProperties->memoryTypeBits = 1;
2275 return VK_SUCCESS;
2276 }
2277
2278 void
2279 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2280 VkPhysicalDevice physicalDevice,
2281 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2282 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2283 {
2284 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2285 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2286 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2287 }
2288
2289 void
2290 tu_GetPhysicalDeviceExternalFenceProperties(
2291 VkPhysicalDevice physicalDevice,
2292 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2293 VkExternalFenceProperties *pExternalFenceProperties)
2294 {
2295 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2296 pExternalFenceProperties->compatibleHandleTypes = 0;
2297 pExternalFenceProperties->externalFenceFeatures = 0;
2298 }
2299
2300 VkResult
2301 tu_CreateDebugReportCallbackEXT(
2302 VkInstance _instance,
2303 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2304 const VkAllocationCallbacks *pAllocator,
2305 VkDebugReportCallbackEXT *pCallback)
2306 {
2307 TU_FROM_HANDLE(tu_instance, instance, _instance);
2308 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2309 pCreateInfo, pAllocator,
2310 &instance->alloc, pCallback);
2311 }
2312
2313 void
2314 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2315 VkDebugReportCallbackEXT _callback,
2316 const VkAllocationCallbacks *pAllocator)
2317 {
2318 TU_FROM_HANDLE(tu_instance, instance, _instance);
2319 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2320 _callback, pAllocator, &instance->alloc);
2321 }
2322
2323 void
2324 tu_DebugReportMessageEXT(VkInstance _instance,
2325 VkDebugReportFlagsEXT flags,
2326 VkDebugReportObjectTypeEXT objectType,
2327 uint64_t object,
2328 size_t location,
2329 int32_t messageCode,
2330 const char *pLayerPrefix,
2331 const char *pMessage)
2332 {
2333 TU_FROM_HANDLE(tu_instance, instance, _instance);
2334 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2335 object, location, messageCode, pLayerPrefix, pMessage);
2336 }
2337
2338 void
2339 tu_GetDeviceGroupPeerMemoryFeatures(
2340 VkDevice device,
2341 uint32_t heapIndex,
2342 uint32_t localDeviceIndex,
2343 uint32_t remoteDeviceIndex,
2344 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2345 {
2346 assert(localDeviceIndex == remoteDeviceIndex);
2347
2348 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2349 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2350 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2351 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2352 }
2353
2354 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2355 VkPhysicalDevice physicalDevice,
2356 VkSampleCountFlagBits samples,
2357 VkMultisamplePropertiesEXT* pMultisampleProperties)
2358 {
2359 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2360
2361 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2362 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2363 else
2364 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2365 }