freedreno/a6xx+tu: rename VSC_DATA/VSC_DATA2
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
256 if (instance->debug_flags & TU_DEBUG_STARTUP)
257 tu_logi("Could not query the GMEM size");
258 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
259 "could not get GMEM size");
260 goto fail;
261 }
262
263 memset(device->name, 0, sizeof(device->name));
264 sprintf(device->name, "FD%d", device->gpu_id);
265
266 switch (device->gpu_id) {
267 case 618:
268 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
269 device->ccu_offset_bypass = 0x10000;
270 device->tile_align_w = 64;
271 device->magic.PC_UNKNOWN_9805 = 0x0;
272 device->magic.SP_UNKNOWN_A0F8 = 0x0;
273 break;
274 case 630:
275 case 640:
276 device->ccu_offset_gmem = 0xf8000;
277 device->ccu_offset_bypass = 0x20000;
278 device->tile_align_w = 64;
279 device->magic.PC_UNKNOWN_9805 = 0x1;
280 device->magic.SP_UNKNOWN_A0F8 = 0x1;
281 break;
282 case 650:
283 device->ccu_offset_gmem = 0x114000;
284 device->ccu_offset_bypass = 0x30000;
285 device->tile_align_w = 96;
286 device->magic.PC_UNKNOWN_9805 = 0x2;
287 device->magic.SP_UNKNOWN_A0F8 = 0x2;
288 break;
289 default:
290 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
291 "device %s is unsupported", device->name);
292 goto fail;
293 }
294 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
295 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
296 "cannot generate UUID");
297 goto fail;
298 }
299
300 /* The gpu id is already embedded in the uuid so we just pass "tu"
301 * when creating the cache.
302 */
303 char buf[VK_UUID_SIZE * 2 + 1];
304 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
305 device->disk_cache = disk_cache_create(device->name, buf, 0);
306
307 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
308 "testing use only.\n");
309
310 tu_get_driver_uuid(&device->device_uuid);
311 tu_get_device_uuid(&device->device_uuid);
312
313 tu_fill_device_extension_table(device, &device->supported_extensions);
314
315 if (result != VK_SUCCESS) {
316 vk_error(instance, result);
317 goto fail;
318 }
319
320 result = tu_wsi_init(device);
321 if (result != VK_SUCCESS) {
322 vk_error(instance, result);
323 goto fail;
324 }
325
326 return VK_SUCCESS;
327
328 fail:
329 close(fd);
330 if (master_fd != -1)
331 close(master_fd);
332 return result;
333 }
334
335 static void
336 tu_physical_device_finish(struct tu_physical_device *device)
337 {
338 tu_wsi_finish(device);
339
340 disk_cache_destroy(device->disk_cache);
341 close(device->local_fd);
342 if (device->master_fd != -1)
343 close(device->master_fd);
344 }
345
346 static VKAPI_ATTR void *
347 default_alloc_func(void *pUserData,
348 size_t size,
349 size_t align,
350 VkSystemAllocationScope allocationScope)
351 {
352 return malloc(size);
353 }
354
355 static VKAPI_ATTR void *
356 default_realloc_func(void *pUserData,
357 void *pOriginal,
358 size_t size,
359 size_t align,
360 VkSystemAllocationScope allocationScope)
361 {
362 return realloc(pOriginal, size);
363 }
364
365 static VKAPI_ATTR void
366 default_free_func(void *pUserData, void *pMemory)
367 {
368 free(pMemory);
369 }
370
371 static const VkAllocationCallbacks default_alloc = {
372 .pUserData = NULL,
373 .pfnAllocation = default_alloc_func,
374 .pfnReallocation = default_realloc_func,
375 .pfnFree = default_free_func,
376 };
377
378 static const struct debug_control tu_debug_options[] = {
379 { "startup", TU_DEBUG_STARTUP },
380 { "nir", TU_DEBUG_NIR },
381 { "ir3", TU_DEBUG_IR3 },
382 { "nobin", TU_DEBUG_NOBIN },
383 { "sysmem", TU_DEBUG_SYSMEM },
384 { "forcebin", TU_DEBUG_FORCEBIN },
385 { NULL, 0 }
386 };
387
388 const char *
389 tu_get_debug_option_name(int id)
390 {
391 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
392 return tu_debug_options[id].string;
393 }
394
395 static int
396 tu_get_instance_extension_index(const char *name)
397 {
398 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
399 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
400 return i;
401 }
402 return -1;
403 }
404
405 VkResult
406 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
407 const VkAllocationCallbacks *pAllocator,
408 VkInstance *pInstance)
409 {
410 struct tu_instance *instance;
411 VkResult result;
412
413 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
414
415 uint32_t client_version;
416 if (pCreateInfo->pApplicationInfo &&
417 pCreateInfo->pApplicationInfo->apiVersion != 0) {
418 client_version = pCreateInfo->pApplicationInfo->apiVersion;
419 } else {
420 tu_EnumerateInstanceVersion(&client_version);
421 }
422
423 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
424 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
425 if (!instance)
426 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
427
428 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
429
430 if (pAllocator)
431 instance->alloc = *pAllocator;
432 else
433 instance->alloc = default_alloc;
434
435 instance->api_version = client_version;
436 instance->physical_device_count = -1;
437
438 instance->debug_flags =
439 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
440
441 if (instance->debug_flags & TU_DEBUG_STARTUP)
442 tu_logi("Created an instance");
443
444 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
445 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
446 int index = tu_get_instance_extension_index(ext_name);
447
448 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
449 vk_free2(&default_alloc, pAllocator, instance);
450 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
451 }
452
453 instance->enabled_extensions.extensions[index] = true;
454 }
455
456 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
457 if (result != VK_SUCCESS) {
458 vk_free2(&default_alloc, pAllocator, instance);
459 return vk_error(instance, result);
460 }
461
462 glsl_type_singleton_init_or_ref();
463
464 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
465
466 *pInstance = tu_instance_to_handle(instance);
467
468 return VK_SUCCESS;
469 }
470
471 void
472 tu_DestroyInstance(VkInstance _instance,
473 const VkAllocationCallbacks *pAllocator)
474 {
475 TU_FROM_HANDLE(tu_instance, instance, _instance);
476
477 if (!instance)
478 return;
479
480 for (int i = 0; i < instance->physical_device_count; ++i) {
481 tu_physical_device_finish(instance->physical_devices + i);
482 }
483
484 VG(VALGRIND_DESTROY_MEMPOOL(instance));
485
486 glsl_type_singleton_decref();
487
488 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
489
490 vk_free(&instance->alloc, instance);
491 }
492
493 static VkResult
494 tu_enumerate_devices(struct tu_instance *instance)
495 {
496 /* TODO: Check for more devices ? */
497 drmDevicePtr devices[8];
498 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
499 int max_devices;
500
501 instance->physical_device_count = 0;
502
503 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
504
505 if (instance->debug_flags & TU_DEBUG_STARTUP)
506 tu_logi("Found %d drm nodes", max_devices);
507
508 if (max_devices < 1)
509 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
510
511 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
512 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
513 devices[i]->bustype == DRM_BUS_PLATFORM) {
514
515 result = tu_physical_device_init(
516 instance->physical_devices + instance->physical_device_count,
517 instance, devices[i]);
518 if (result == VK_SUCCESS)
519 ++instance->physical_device_count;
520 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
521 break;
522 }
523 }
524 drmFreeDevices(devices, max_devices);
525
526 return result;
527 }
528
529 VkResult
530 tu_EnumeratePhysicalDevices(VkInstance _instance,
531 uint32_t *pPhysicalDeviceCount,
532 VkPhysicalDevice *pPhysicalDevices)
533 {
534 TU_FROM_HANDLE(tu_instance, instance, _instance);
535 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
536
537 VkResult result;
538
539 if (instance->physical_device_count < 0) {
540 result = tu_enumerate_devices(instance);
541 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
542 return result;
543 }
544
545 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
546 vk_outarray_append(&out, p)
547 {
548 *p = tu_physical_device_to_handle(instance->physical_devices + i);
549 }
550 }
551
552 return vk_outarray_status(&out);
553 }
554
555 VkResult
556 tu_EnumeratePhysicalDeviceGroups(
557 VkInstance _instance,
558 uint32_t *pPhysicalDeviceGroupCount,
559 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
560 {
561 TU_FROM_HANDLE(tu_instance, instance, _instance);
562 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
563 pPhysicalDeviceGroupCount);
564 VkResult result;
565
566 if (instance->physical_device_count < 0) {
567 result = tu_enumerate_devices(instance);
568 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
569 return result;
570 }
571
572 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
573 vk_outarray_append(&out, p)
574 {
575 p->physicalDeviceCount = 1;
576 p->physicalDevices[0] =
577 tu_physical_device_to_handle(instance->physical_devices + i);
578 p->subsetAllocation = false;
579 }
580 }
581
582 return vk_outarray_status(&out);
583 }
584
585 void
586 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
587 VkPhysicalDeviceFeatures *pFeatures)
588 {
589 memset(pFeatures, 0, sizeof(*pFeatures));
590
591 *pFeatures = (VkPhysicalDeviceFeatures) {
592 .robustBufferAccess = false,
593 .fullDrawIndexUint32 = true,
594 .imageCubeArray = true,
595 .independentBlend = true,
596 .geometryShader = true,
597 .tessellationShader = false,
598 .sampleRateShading = true,
599 .dualSrcBlend = true,
600 .logicOp = true,
601 .multiDrawIndirect = false,
602 .drawIndirectFirstInstance = false,
603 .depthClamp = true,
604 .depthBiasClamp = false,
605 .fillModeNonSolid = false,
606 .depthBounds = false,
607 .wideLines = false,
608 .largePoints = false,
609 .alphaToOne = false,
610 .multiViewport = false,
611 .samplerAnisotropy = true,
612 .textureCompressionETC2 = true,
613 .textureCompressionASTC_LDR = true,
614 .textureCompressionBC = true,
615 .occlusionQueryPrecise = true,
616 .pipelineStatisticsQuery = false,
617 .vertexPipelineStoresAndAtomics = false,
618 .fragmentStoresAndAtomics = false,
619 .shaderTessellationAndGeometryPointSize = false,
620 .shaderImageGatherExtended = false,
621 .shaderStorageImageExtendedFormats = false,
622 .shaderStorageImageMultisample = false,
623 .shaderUniformBufferArrayDynamicIndexing = false,
624 .shaderSampledImageArrayDynamicIndexing = false,
625 .shaderStorageBufferArrayDynamicIndexing = false,
626 .shaderStorageImageArrayDynamicIndexing = false,
627 .shaderStorageImageReadWithoutFormat = false,
628 .shaderStorageImageWriteWithoutFormat = false,
629 .shaderClipDistance = false,
630 .shaderCullDistance = false,
631 .shaderFloat64 = false,
632 .shaderInt64 = false,
633 .shaderInt16 = false,
634 .sparseBinding = false,
635 .variableMultisampleRate = false,
636 .inheritedQueries = false,
637 };
638 }
639
640 void
641 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
642 VkPhysicalDeviceFeatures2 *pFeatures)
643 {
644 vk_foreach_struct(ext, pFeatures->pNext)
645 {
646 switch (ext->sType) {
647 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
648 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
649 features->variablePointersStorageBuffer = false;
650 features->variablePointers = false;
651 break;
652 }
653 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
654 VkPhysicalDeviceMultiviewFeatures *features =
655 (VkPhysicalDeviceMultiviewFeatures *) ext;
656 features->multiview = false;
657 features->multiviewGeometryShader = false;
658 features->multiviewTessellationShader = false;
659 break;
660 }
661 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
662 VkPhysicalDeviceShaderDrawParametersFeatures *features =
663 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
664 features->shaderDrawParameters = false;
665 break;
666 }
667 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
668 VkPhysicalDeviceProtectedMemoryFeatures *features =
669 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
670 features->protectedMemory = false;
671 break;
672 }
673 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
674 VkPhysicalDevice16BitStorageFeatures *features =
675 (VkPhysicalDevice16BitStorageFeatures *) ext;
676 features->storageBuffer16BitAccess = false;
677 features->uniformAndStorageBuffer16BitAccess = false;
678 features->storagePushConstant16 = false;
679 features->storageInputOutput16 = false;
680 break;
681 }
682 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
683 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
684 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
685 features->samplerYcbcrConversion = false;
686 break;
687 }
688 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
689 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
690 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
691 features->shaderInputAttachmentArrayDynamicIndexing = false;
692 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
693 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
694 features->shaderUniformBufferArrayNonUniformIndexing = false;
695 features->shaderSampledImageArrayNonUniformIndexing = false;
696 features->shaderStorageBufferArrayNonUniformIndexing = false;
697 features->shaderStorageImageArrayNonUniformIndexing = false;
698 features->shaderInputAttachmentArrayNonUniformIndexing = false;
699 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
700 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
701 features->descriptorBindingUniformBufferUpdateAfterBind = false;
702 features->descriptorBindingSampledImageUpdateAfterBind = false;
703 features->descriptorBindingStorageImageUpdateAfterBind = false;
704 features->descriptorBindingStorageBufferUpdateAfterBind = false;
705 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
706 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
707 features->descriptorBindingUpdateUnusedWhilePending = false;
708 features->descriptorBindingPartiallyBound = false;
709 features->descriptorBindingVariableDescriptorCount = false;
710 features->runtimeDescriptorArray = false;
711 break;
712 }
713 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
714 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
715 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
716 features->conditionalRendering = false;
717 features->inheritedConditionalRendering = false;
718 break;
719 }
720 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
721 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
722 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
723 features->transformFeedback = true;
724 features->geometryStreams = false;
725 break;
726 }
727 default:
728 break;
729 }
730 }
731 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
732 }
733
734 void
735 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
736 VkPhysicalDeviceProperties *pProperties)
737 {
738 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
739 VkSampleCountFlags sample_counts =
740 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
741
742 /* I have no idea what the maximum size is, but the hardware supports very
743 * large numbers of descriptors (at least 2^16). This limit is based on
744 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
745 * we don't have to think about what to do if that overflows, but really
746 * nothing is likely to get close to this.
747 */
748 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
749
750 VkPhysicalDeviceLimits limits = {
751 .maxImageDimension1D = (1 << 14),
752 .maxImageDimension2D = (1 << 14),
753 .maxImageDimension3D = (1 << 11),
754 .maxImageDimensionCube = (1 << 14),
755 .maxImageArrayLayers = (1 << 11),
756 .maxTexelBufferElements = 128 * 1024 * 1024,
757 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
758 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
759 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
760 .maxMemoryAllocationCount = UINT32_MAX,
761 .maxSamplerAllocationCount = 64 * 1024,
762 .bufferImageGranularity = 64, /* A cache line */
763 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
764 .maxBoundDescriptorSets = MAX_SETS,
765 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
766 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
767 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
768 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
769 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
770 .maxPerStageDescriptorInputAttachments = MAX_RTS,
771 .maxPerStageResources = max_descriptor_set_size,
772 .maxDescriptorSetSamplers = max_descriptor_set_size,
773 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
774 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
775 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
776 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
777 .maxDescriptorSetSampledImages = max_descriptor_set_size,
778 .maxDescriptorSetStorageImages = max_descriptor_set_size,
779 .maxDescriptorSetInputAttachments = MAX_RTS,
780 .maxVertexInputAttributes = 32,
781 .maxVertexInputBindings = 32,
782 .maxVertexInputAttributeOffset = 4095,
783 .maxVertexInputBindingStride = 2048,
784 .maxVertexOutputComponents = 128,
785 .maxTessellationGenerationLevel = 64,
786 .maxTessellationPatchSize = 32,
787 .maxTessellationControlPerVertexInputComponents = 128,
788 .maxTessellationControlPerVertexOutputComponents = 128,
789 .maxTessellationControlPerPatchOutputComponents = 120,
790 .maxTessellationControlTotalOutputComponents = 4096,
791 .maxTessellationEvaluationInputComponents = 128,
792 .maxTessellationEvaluationOutputComponents = 128,
793 .maxGeometryShaderInvocations = 32,
794 .maxGeometryInputComponents = 64,
795 .maxGeometryOutputComponents = 128,
796 .maxGeometryOutputVertices = 256,
797 .maxGeometryTotalOutputComponents = 1024,
798 .maxFragmentInputComponents = 124,
799 .maxFragmentOutputAttachments = 8,
800 .maxFragmentDualSrcAttachments = 1,
801 .maxFragmentCombinedOutputResources = 8,
802 .maxComputeSharedMemorySize = 32768,
803 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
804 .maxComputeWorkGroupInvocations = 2048,
805 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
806 .subPixelPrecisionBits = 8,
807 .subTexelPrecisionBits = 4 /* FIXME */,
808 .mipmapPrecisionBits = 4 /* FIXME */,
809 .maxDrawIndexedIndexValue = UINT32_MAX,
810 .maxDrawIndirectCount = UINT32_MAX,
811 .maxSamplerLodBias = 16,
812 .maxSamplerAnisotropy = 16,
813 .maxViewports = MAX_VIEWPORTS,
814 .maxViewportDimensions = { (1 << 14), (1 << 14) },
815 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
816 .viewportSubPixelBits = 8,
817 .minMemoryMapAlignment = 4096, /* A page */
818 .minTexelBufferOffsetAlignment = 64,
819 .minUniformBufferOffsetAlignment = 64,
820 .minStorageBufferOffsetAlignment = 64,
821 .minTexelOffset = -32,
822 .maxTexelOffset = 31,
823 .minTexelGatherOffset = -32,
824 .maxTexelGatherOffset = 31,
825 .minInterpolationOffset = -2,
826 .maxInterpolationOffset = 2,
827 .subPixelInterpolationOffsetBits = 8,
828 .maxFramebufferWidth = (1 << 14),
829 .maxFramebufferHeight = (1 << 14),
830 .maxFramebufferLayers = (1 << 10),
831 .framebufferColorSampleCounts = sample_counts,
832 .framebufferDepthSampleCounts = sample_counts,
833 .framebufferStencilSampleCounts = sample_counts,
834 .framebufferNoAttachmentsSampleCounts = sample_counts,
835 .maxColorAttachments = MAX_RTS,
836 .sampledImageColorSampleCounts = sample_counts,
837 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
838 .sampledImageDepthSampleCounts = sample_counts,
839 .sampledImageStencilSampleCounts = sample_counts,
840 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
841 .maxSampleMaskWords = 1,
842 .timestampComputeAndGraphics = true,
843 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
844 .maxClipDistances = 8,
845 .maxCullDistances = 8,
846 .maxCombinedClipAndCullDistances = 8,
847 .discreteQueuePriorities = 1,
848 .pointSizeRange = { 0.125, 255.875 },
849 .lineWidthRange = { 0.0, 7.9921875 },
850 .pointSizeGranularity = (1.0 / 8.0),
851 .lineWidthGranularity = (1.0 / 128.0),
852 .strictLines = false, /* FINISHME */
853 .standardSampleLocations = true,
854 .optimalBufferCopyOffsetAlignment = 128,
855 .optimalBufferCopyRowPitchAlignment = 128,
856 .nonCoherentAtomSize = 64,
857 };
858
859 *pProperties = (VkPhysicalDeviceProperties) {
860 .apiVersion = tu_physical_device_api_version(pdevice),
861 .driverVersion = vk_get_driver_version(),
862 .vendorID = 0, /* TODO */
863 .deviceID = 0,
864 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
865 .limits = limits,
866 .sparseProperties = { 0 },
867 };
868
869 strcpy(pProperties->deviceName, pdevice->name);
870 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
871 }
872
873 void
874 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
875 VkPhysicalDeviceProperties2 *pProperties)
876 {
877 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
878 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
879
880 vk_foreach_struct(ext, pProperties->pNext)
881 {
882 switch (ext->sType) {
883 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
884 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
885 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
886 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
887 break;
888 }
889 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
890 VkPhysicalDeviceIDProperties *properties =
891 (VkPhysicalDeviceIDProperties *) ext;
892 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
893 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
894 properties->deviceLUIDValid = false;
895 break;
896 }
897 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
898 VkPhysicalDeviceMultiviewProperties *properties =
899 (VkPhysicalDeviceMultiviewProperties *) ext;
900 properties->maxMultiviewViewCount = MAX_VIEWS;
901 properties->maxMultiviewInstanceIndex = INT_MAX;
902 break;
903 }
904 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
905 VkPhysicalDevicePointClippingProperties *properties =
906 (VkPhysicalDevicePointClippingProperties *) ext;
907 properties->pointClippingBehavior =
908 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
909 break;
910 }
911 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
912 VkPhysicalDeviceMaintenance3Properties *properties =
913 (VkPhysicalDeviceMaintenance3Properties *) ext;
914 /* Make sure everything is addressable by a signed 32-bit int, and
915 * our largest descriptors are 96 bytes. */
916 properties->maxPerSetDescriptors = (1ull << 31) / 96;
917 /* Our buffer size fields allow only this much */
918 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
919 break;
920 }
921 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
922 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
923 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
924
925 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
926 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
927 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
928 properties->maxTransformFeedbackStreamDataSize = 512;
929 properties->maxTransformFeedbackBufferDataSize = 512;
930 properties->maxTransformFeedbackBufferDataStride = 512;
931 properties->transformFeedbackQueries = true;
932 properties->transformFeedbackStreamsLinesTriangles = false;
933 properties->transformFeedbackRasterizationStreamSelect = false;
934 properties->transformFeedbackDraw = true;
935 break;
936 }
937 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
938 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
939 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
940 properties->sampleLocationSampleCounts = 0;
941 if (pdevice->supported_extensions.EXT_sample_locations) {
942 properties->sampleLocationSampleCounts =
943 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
944 }
945 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
946 properties->sampleLocationCoordinateRange[0] = 0.0f;
947 properties->sampleLocationCoordinateRange[1] = 0.9375f;
948 properties->sampleLocationSubPixelBits = 4;
949 properties->variableSampleLocations = true;
950 break;
951 }
952 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
953 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
954 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
955 properties->filterMinmaxImageComponentMapping = true;
956 properties->filterMinmaxSingleComponentFormats = true;
957 break;
958 }
959
960 default:
961 break;
962 }
963 }
964 }
965
966 static const VkQueueFamilyProperties tu_queue_family_properties = {
967 .queueFlags =
968 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
969 .queueCount = 1,
970 .timestampValidBits = 48,
971 .minImageTransferGranularity = { 1, 1, 1 },
972 };
973
974 void
975 tu_GetPhysicalDeviceQueueFamilyProperties(
976 VkPhysicalDevice physicalDevice,
977 uint32_t *pQueueFamilyPropertyCount,
978 VkQueueFamilyProperties *pQueueFamilyProperties)
979 {
980 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
981
982 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
983 }
984
985 void
986 tu_GetPhysicalDeviceQueueFamilyProperties2(
987 VkPhysicalDevice physicalDevice,
988 uint32_t *pQueueFamilyPropertyCount,
989 VkQueueFamilyProperties2 *pQueueFamilyProperties)
990 {
991 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
992
993 vk_outarray_append(&out, p)
994 {
995 p->queueFamilyProperties = tu_queue_family_properties;
996 }
997 }
998
999 static uint64_t
1000 tu_get_system_heap_size()
1001 {
1002 struct sysinfo info;
1003 sysinfo(&info);
1004
1005 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1006
1007 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1008 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1009 */
1010 uint64_t available_ram;
1011 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1012 available_ram = total_ram / 2;
1013 else
1014 available_ram = total_ram * 3 / 4;
1015
1016 return available_ram;
1017 }
1018
1019 void
1020 tu_GetPhysicalDeviceMemoryProperties(
1021 VkPhysicalDevice physicalDevice,
1022 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1023 {
1024 pMemoryProperties->memoryHeapCount = 1;
1025 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1026 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1027
1028 pMemoryProperties->memoryTypeCount = 1;
1029 pMemoryProperties->memoryTypes[0].propertyFlags =
1030 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1031 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1032 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1033 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1034 }
1035
1036 void
1037 tu_GetPhysicalDeviceMemoryProperties2(
1038 VkPhysicalDevice physicalDevice,
1039 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1040 {
1041 return tu_GetPhysicalDeviceMemoryProperties(
1042 physicalDevice, &pMemoryProperties->memoryProperties);
1043 }
1044
1045 static VkResult
1046 tu_queue_init(struct tu_device *device,
1047 struct tu_queue *queue,
1048 uint32_t queue_family_index,
1049 int idx,
1050 VkDeviceQueueCreateFlags flags)
1051 {
1052 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1053 queue->device = device;
1054 queue->queue_family_index = queue_family_index;
1055 queue->queue_idx = idx;
1056 queue->flags = flags;
1057
1058 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1059 if (ret)
1060 return VK_ERROR_INITIALIZATION_FAILED;
1061
1062 tu_fence_init(&queue->submit_fence, false);
1063
1064 return VK_SUCCESS;
1065 }
1066
1067 static void
1068 tu_queue_finish(struct tu_queue *queue)
1069 {
1070 tu_fence_finish(&queue->submit_fence);
1071 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1072 }
1073
1074 static int
1075 tu_get_device_extension_index(const char *name)
1076 {
1077 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1078 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1079 return i;
1080 }
1081 return -1;
1082 }
1083
1084 struct PACKED bcolor_entry {
1085 uint32_t fp32[4];
1086 uint16_t ui16[4];
1087 int16_t si16[4];
1088 uint16_t fp16[4];
1089 uint16_t rgb565;
1090 uint16_t rgb5a1;
1091 uint16_t rgba4;
1092 uint8_t __pad0[2];
1093 uint8_t ui8[4];
1094 int8_t si8[4];
1095 uint32_t rgb10a2;
1096 uint32_t z24; /* also s8? */
1097 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1098 uint8_t __pad1[56];
1099 } border_color[] = {
1100 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1101 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1102 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1103 .fp32[3] = 0x3f800000,
1104 .ui16[3] = 0xffff,
1105 .si16[3] = 0x7fff,
1106 .fp16[3] = 0x3c00,
1107 .rgb5a1 = 0x8000,
1108 .rgba4 = 0xf000,
1109 .ui8[3] = 0xff,
1110 .si8[3] = 0x7f,
1111 .rgb10a2 = 0xc0000000,
1112 .srgb[3] = 0x3c00,
1113 },
1114 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1115 .fp32[3] = 1,
1116 .fp16[3] = 1,
1117 },
1118 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1119 .fp32[0 ... 3] = 0x3f800000,
1120 .ui16[0 ... 3] = 0xffff,
1121 .si16[0 ... 3] = 0x7fff,
1122 .fp16[0 ... 3] = 0x3c00,
1123 .rgb565 = 0xffff,
1124 .rgb5a1 = 0xffff,
1125 .rgba4 = 0xffff,
1126 .ui8[0 ... 3] = 0xff,
1127 .si8[0 ... 3] = 0x7f,
1128 .rgb10a2 = 0xffffffff,
1129 .z24 = 0xffffff,
1130 .srgb[0 ... 3] = 0x3c00,
1131 },
1132 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1133 .fp32[0 ... 3] = 1,
1134 .fp16[0 ... 3] = 1,
1135 },
1136 };
1137
1138
1139 VkResult
1140 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1141 const VkDeviceCreateInfo *pCreateInfo,
1142 const VkAllocationCallbacks *pAllocator,
1143 VkDevice *pDevice)
1144 {
1145 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1146 VkResult result;
1147 struct tu_device *device;
1148
1149 /* Check enabled features */
1150 if (pCreateInfo->pEnabledFeatures) {
1151 VkPhysicalDeviceFeatures supported_features;
1152 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1153 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1154 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1155 unsigned num_features =
1156 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1157 for (uint32_t i = 0; i < num_features; i++) {
1158 if (enabled_feature[i] && !supported_feature[i])
1159 return vk_error(physical_device->instance,
1160 VK_ERROR_FEATURE_NOT_PRESENT);
1161 }
1162 }
1163
1164 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1165 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1166 if (!device)
1167 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1168
1169 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1170 device->instance = physical_device->instance;
1171 device->physical_device = physical_device;
1172
1173 if (pAllocator)
1174 device->alloc = *pAllocator;
1175 else
1176 device->alloc = physical_device->instance->alloc;
1177
1178 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1179 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1180 int index = tu_get_device_extension_index(ext_name);
1181 if (index < 0 ||
1182 !physical_device->supported_extensions.extensions[index]) {
1183 vk_free(&device->alloc, device);
1184 return vk_error(physical_device->instance,
1185 VK_ERROR_EXTENSION_NOT_PRESENT);
1186 }
1187
1188 device->enabled_extensions.extensions[index] = true;
1189 }
1190
1191 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1192 const VkDeviceQueueCreateInfo *queue_create =
1193 &pCreateInfo->pQueueCreateInfos[i];
1194 uint32_t qfi = queue_create->queueFamilyIndex;
1195 device->queues[qfi] = vk_alloc(
1196 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1197 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1198 if (!device->queues[qfi]) {
1199 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1200 goto fail_queues;
1201 }
1202
1203 memset(device->queues[qfi], 0,
1204 queue_create->queueCount * sizeof(struct tu_queue));
1205
1206 device->queue_count[qfi] = queue_create->queueCount;
1207
1208 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1209 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1210 queue_create->flags);
1211 if (result != VK_SUCCESS)
1212 goto fail_queues;
1213 }
1214 }
1215
1216 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1217 if (!device->compiler)
1218 goto fail_queues;
1219
1220 #define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1221 #define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
1222
1223 device->vsc_draw_strm_pitch = 0x440 * 4;
1224 device->vsc_prim_strm_pitch = 0x1040 * 4;
1225
1226 result = tu_bo_init_new(device, &device->vsc_draw_strm, VSC_DRAW_STRM_SIZE(device->vsc_draw_strm_pitch));
1227 if (result != VK_SUCCESS)
1228 goto fail_vsc_data;
1229
1230 result = tu_bo_init_new(device, &device->vsc_prim_strm, VSC_PRIM_STRM_SIZE(device->vsc_prim_strm_pitch));
1231 if (result != VK_SUCCESS)
1232 goto fail_vsc_data2;
1233
1234 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1235 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1236 if (result != VK_SUCCESS)
1237 goto fail_border_color;
1238
1239 result = tu_bo_map(device, &device->border_color);
1240 if (result != VK_SUCCESS)
1241 goto fail_border_color_map;
1242
1243 memcpy(device->border_color.map, border_color, sizeof(border_color));
1244
1245 VkPipelineCacheCreateInfo ci;
1246 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1247 ci.pNext = NULL;
1248 ci.flags = 0;
1249 ci.pInitialData = NULL;
1250 ci.initialDataSize = 0;
1251 VkPipelineCache pc;
1252 result =
1253 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1254 if (result != VK_SUCCESS)
1255 goto fail_pipeline_cache;
1256
1257 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1258
1259 *pDevice = tu_device_to_handle(device);
1260 return VK_SUCCESS;
1261
1262 fail_pipeline_cache:
1263 fail_border_color_map:
1264 tu_bo_finish(device, &device->border_color);
1265
1266 fail_border_color:
1267 tu_bo_finish(device, &device->vsc_prim_strm);
1268
1269 fail_vsc_data2:
1270 tu_bo_finish(device, &device->vsc_draw_strm);
1271
1272 fail_vsc_data:
1273 ralloc_free(device->compiler);
1274
1275 fail_queues:
1276 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1277 for (unsigned q = 0; q < device->queue_count[i]; q++)
1278 tu_queue_finish(&device->queues[i][q]);
1279 if (device->queue_count[i])
1280 vk_free(&device->alloc, device->queues[i]);
1281 }
1282
1283 vk_free(&device->alloc, device);
1284 return result;
1285 }
1286
1287 void
1288 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1289 {
1290 TU_FROM_HANDLE(tu_device, device, _device);
1291
1292 if (!device)
1293 return;
1294
1295 tu_bo_finish(device, &device->vsc_draw_strm);
1296 tu_bo_finish(device, &device->vsc_prim_strm);
1297
1298 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1299 for (unsigned q = 0; q < device->queue_count[i]; q++)
1300 tu_queue_finish(&device->queues[i][q]);
1301 if (device->queue_count[i])
1302 vk_free(&device->alloc, device->queues[i]);
1303 }
1304
1305 /* the compiler does not use pAllocator */
1306 ralloc_free(device->compiler);
1307
1308 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1309 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1310
1311 vk_free(&device->alloc, device);
1312 }
1313
1314 VkResult
1315 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1316 VkLayerProperties *pProperties)
1317 {
1318 *pPropertyCount = 0;
1319 return VK_SUCCESS;
1320 }
1321
1322 VkResult
1323 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1324 uint32_t *pPropertyCount,
1325 VkLayerProperties *pProperties)
1326 {
1327 *pPropertyCount = 0;
1328 return VK_SUCCESS;
1329 }
1330
1331 void
1332 tu_GetDeviceQueue2(VkDevice _device,
1333 const VkDeviceQueueInfo2 *pQueueInfo,
1334 VkQueue *pQueue)
1335 {
1336 TU_FROM_HANDLE(tu_device, device, _device);
1337 struct tu_queue *queue;
1338
1339 queue =
1340 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1341 if (pQueueInfo->flags != queue->flags) {
1342 /* From the Vulkan 1.1.70 spec:
1343 *
1344 * "The queue returned by vkGetDeviceQueue2 must have the same
1345 * flags value from this structure as that used at device
1346 * creation time in a VkDeviceQueueCreateInfo instance. If no
1347 * matching flags were specified at device creation time then
1348 * pQueue will return VK_NULL_HANDLE."
1349 */
1350 *pQueue = VK_NULL_HANDLE;
1351 return;
1352 }
1353
1354 *pQueue = tu_queue_to_handle(queue);
1355 }
1356
1357 void
1358 tu_GetDeviceQueue(VkDevice _device,
1359 uint32_t queueFamilyIndex,
1360 uint32_t queueIndex,
1361 VkQueue *pQueue)
1362 {
1363 const VkDeviceQueueInfo2 info =
1364 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1365 .queueFamilyIndex = queueFamilyIndex,
1366 .queueIndex = queueIndex };
1367
1368 tu_GetDeviceQueue2(_device, &info, pQueue);
1369 }
1370
1371 VkResult
1372 tu_QueueSubmit(VkQueue _queue,
1373 uint32_t submitCount,
1374 const VkSubmitInfo *pSubmits,
1375 VkFence _fence)
1376 {
1377 TU_FROM_HANDLE(tu_queue, queue, _queue);
1378
1379 for (uint32_t i = 0; i < submitCount; ++i) {
1380 const VkSubmitInfo *submit = pSubmits + i;
1381 const bool last_submit = (i == submitCount - 1);
1382 struct tu_bo_list bo_list;
1383 tu_bo_list_init(&bo_list);
1384
1385 uint32_t entry_count = 0;
1386 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1387 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1388 entry_count += cmdbuf->cs.entry_count;
1389 }
1390
1391 struct drm_msm_gem_submit_cmd cmds[entry_count];
1392 uint32_t entry_idx = 0;
1393 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1394 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1395 struct tu_cs *cs = &cmdbuf->cs;
1396 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1397 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1398 cmds[entry_idx].submit_idx =
1399 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1400 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1401 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1402 cmds[entry_idx].size = cs->entries[i].size;
1403 cmds[entry_idx].pad = 0;
1404 cmds[entry_idx].nr_relocs = 0;
1405 cmds[entry_idx].relocs = 0;
1406 }
1407
1408 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1409 }
1410
1411 uint32_t flags = MSM_PIPE_3D0;
1412 if (last_submit) {
1413 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1414 }
1415
1416 struct drm_msm_gem_submit req = {
1417 .flags = flags,
1418 .queueid = queue->msm_queue_id,
1419 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1420 .nr_bos = bo_list.count,
1421 .cmds = (uint64_t)(uintptr_t)cmds,
1422 .nr_cmds = entry_count,
1423 };
1424
1425 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1426 DRM_MSM_GEM_SUBMIT,
1427 &req, sizeof(req));
1428 if (ret) {
1429 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1430 abort();
1431 }
1432
1433 tu_bo_list_destroy(&bo_list);
1434
1435 if (last_submit) {
1436 /* no need to merge fences as queue execution is serialized */
1437 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1438 }
1439 }
1440
1441 if (_fence != VK_NULL_HANDLE) {
1442 TU_FROM_HANDLE(tu_fence, fence, _fence);
1443 tu_fence_copy(fence, &queue->submit_fence);
1444 }
1445
1446 return VK_SUCCESS;
1447 }
1448
1449 VkResult
1450 tu_QueueWaitIdle(VkQueue _queue)
1451 {
1452 TU_FROM_HANDLE(tu_queue, queue, _queue);
1453
1454 tu_fence_wait_idle(&queue->submit_fence);
1455
1456 return VK_SUCCESS;
1457 }
1458
1459 VkResult
1460 tu_DeviceWaitIdle(VkDevice _device)
1461 {
1462 TU_FROM_HANDLE(tu_device, device, _device);
1463
1464 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1465 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1466 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1467 }
1468 }
1469 return VK_SUCCESS;
1470 }
1471
1472 VkResult
1473 tu_ImportSemaphoreFdKHR(VkDevice _device,
1474 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1475 {
1476 tu_stub();
1477
1478 return VK_SUCCESS;
1479 }
1480
1481 VkResult
1482 tu_GetSemaphoreFdKHR(VkDevice _device,
1483 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1484 int *pFd)
1485 {
1486 tu_stub();
1487
1488 return VK_SUCCESS;
1489 }
1490
1491 VkResult
1492 tu_ImportFenceFdKHR(VkDevice _device,
1493 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
1494 {
1495 tu_stub();
1496
1497 return VK_SUCCESS;
1498 }
1499
1500 VkResult
1501 tu_GetFenceFdKHR(VkDevice _device,
1502 const VkFenceGetFdInfoKHR *pGetFdInfo,
1503 int *pFd)
1504 {
1505 tu_stub();
1506
1507 return VK_SUCCESS;
1508 }
1509
1510 VkResult
1511 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1512 uint32_t *pPropertyCount,
1513 VkExtensionProperties *pProperties)
1514 {
1515 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1516
1517 /* We spport no lyaers */
1518 if (pLayerName)
1519 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1520
1521 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1522 if (tu_supported_instance_extensions.extensions[i]) {
1523 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1524 }
1525 }
1526
1527 return vk_outarray_status(&out);
1528 }
1529
1530 VkResult
1531 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1532 const char *pLayerName,
1533 uint32_t *pPropertyCount,
1534 VkExtensionProperties *pProperties)
1535 {
1536 /* We spport no lyaers */
1537 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1538 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1539
1540 /* We spport no lyaers */
1541 if (pLayerName)
1542 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1543
1544 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1545 if (device->supported_extensions.extensions[i]) {
1546 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1547 }
1548 }
1549
1550 return vk_outarray_status(&out);
1551 }
1552
1553 PFN_vkVoidFunction
1554 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1555 {
1556 TU_FROM_HANDLE(tu_instance, instance, _instance);
1557
1558 return tu_lookup_entrypoint_checked(
1559 pName, instance ? instance->api_version : 0,
1560 instance ? &instance->enabled_extensions : NULL, NULL);
1561 }
1562
1563 /* The loader wants us to expose a second GetInstanceProcAddr function
1564 * to work around certain LD_PRELOAD issues seen in apps.
1565 */
1566 PUBLIC
1567 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1568 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1569
1570 PUBLIC
1571 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1572 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1573 {
1574 return tu_GetInstanceProcAddr(instance, pName);
1575 }
1576
1577 PFN_vkVoidFunction
1578 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1579 {
1580 TU_FROM_HANDLE(tu_device, device, _device);
1581
1582 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1583 &device->instance->enabled_extensions,
1584 &device->enabled_extensions);
1585 }
1586
1587 static VkResult
1588 tu_alloc_memory(struct tu_device *device,
1589 const VkMemoryAllocateInfo *pAllocateInfo,
1590 const VkAllocationCallbacks *pAllocator,
1591 VkDeviceMemory *pMem)
1592 {
1593 struct tu_device_memory *mem;
1594 VkResult result;
1595
1596 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1597
1598 if (pAllocateInfo->allocationSize == 0) {
1599 /* Apparently, this is allowed */
1600 *pMem = VK_NULL_HANDLE;
1601 return VK_SUCCESS;
1602 }
1603
1604 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1605 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1606 if (mem == NULL)
1607 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1608
1609 const VkImportMemoryFdInfoKHR *fd_info =
1610 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1611 if (fd_info && !fd_info->handleType)
1612 fd_info = NULL;
1613
1614 if (fd_info) {
1615 assert(fd_info->handleType ==
1616 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1617 fd_info->handleType ==
1618 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1619
1620 /*
1621 * TODO Importing the same fd twice gives us the same handle without
1622 * reference counting. We need to maintain a per-instance handle-to-bo
1623 * table and add reference count to tu_bo.
1624 */
1625 result = tu_bo_init_dmabuf(device, &mem->bo,
1626 pAllocateInfo->allocationSize, fd_info->fd);
1627 if (result == VK_SUCCESS) {
1628 /* take ownership and close the fd */
1629 close(fd_info->fd);
1630 }
1631 } else {
1632 result =
1633 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1634 }
1635
1636 if (result != VK_SUCCESS) {
1637 vk_free2(&device->alloc, pAllocator, mem);
1638 return result;
1639 }
1640
1641 mem->size = pAllocateInfo->allocationSize;
1642 mem->type_index = pAllocateInfo->memoryTypeIndex;
1643
1644 mem->map = NULL;
1645 mem->user_ptr = NULL;
1646
1647 *pMem = tu_device_memory_to_handle(mem);
1648
1649 return VK_SUCCESS;
1650 }
1651
1652 VkResult
1653 tu_AllocateMemory(VkDevice _device,
1654 const VkMemoryAllocateInfo *pAllocateInfo,
1655 const VkAllocationCallbacks *pAllocator,
1656 VkDeviceMemory *pMem)
1657 {
1658 TU_FROM_HANDLE(tu_device, device, _device);
1659 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1660 }
1661
1662 void
1663 tu_FreeMemory(VkDevice _device,
1664 VkDeviceMemory _mem,
1665 const VkAllocationCallbacks *pAllocator)
1666 {
1667 TU_FROM_HANDLE(tu_device, device, _device);
1668 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1669
1670 if (mem == NULL)
1671 return;
1672
1673 tu_bo_finish(device, &mem->bo);
1674 vk_free2(&device->alloc, pAllocator, mem);
1675 }
1676
1677 VkResult
1678 tu_MapMemory(VkDevice _device,
1679 VkDeviceMemory _memory,
1680 VkDeviceSize offset,
1681 VkDeviceSize size,
1682 VkMemoryMapFlags flags,
1683 void **ppData)
1684 {
1685 TU_FROM_HANDLE(tu_device, device, _device);
1686 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1687 VkResult result;
1688
1689 if (mem == NULL) {
1690 *ppData = NULL;
1691 return VK_SUCCESS;
1692 }
1693
1694 if (mem->user_ptr) {
1695 *ppData = mem->user_ptr;
1696 } else if (!mem->map) {
1697 result = tu_bo_map(device, &mem->bo);
1698 if (result != VK_SUCCESS)
1699 return result;
1700 *ppData = mem->map = mem->bo.map;
1701 } else
1702 *ppData = mem->map;
1703
1704 if (*ppData) {
1705 *ppData += offset;
1706 return VK_SUCCESS;
1707 }
1708
1709 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1710 }
1711
1712 void
1713 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1714 {
1715 /* I do not see any unmapping done by the freedreno Gallium driver. */
1716 }
1717
1718 VkResult
1719 tu_FlushMappedMemoryRanges(VkDevice _device,
1720 uint32_t memoryRangeCount,
1721 const VkMappedMemoryRange *pMemoryRanges)
1722 {
1723 return VK_SUCCESS;
1724 }
1725
1726 VkResult
1727 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1728 uint32_t memoryRangeCount,
1729 const VkMappedMemoryRange *pMemoryRanges)
1730 {
1731 return VK_SUCCESS;
1732 }
1733
1734 void
1735 tu_GetBufferMemoryRequirements(VkDevice _device,
1736 VkBuffer _buffer,
1737 VkMemoryRequirements *pMemoryRequirements)
1738 {
1739 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1740
1741 pMemoryRequirements->memoryTypeBits = 1;
1742 pMemoryRequirements->alignment = 64;
1743 pMemoryRequirements->size =
1744 align64(buffer->size, pMemoryRequirements->alignment);
1745 }
1746
1747 void
1748 tu_GetBufferMemoryRequirements2(
1749 VkDevice device,
1750 const VkBufferMemoryRequirementsInfo2 *pInfo,
1751 VkMemoryRequirements2 *pMemoryRequirements)
1752 {
1753 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1754 &pMemoryRequirements->memoryRequirements);
1755 }
1756
1757 void
1758 tu_GetImageMemoryRequirements(VkDevice _device,
1759 VkImage _image,
1760 VkMemoryRequirements *pMemoryRequirements)
1761 {
1762 TU_FROM_HANDLE(tu_image, image, _image);
1763
1764 pMemoryRequirements->memoryTypeBits = 1;
1765 pMemoryRequirements->size = image->layout.size;
1766 pMemoryRequirements->alignment = image->layout.base_align;
1767 }
1768
1769 void
1770 tu_GetImageMemoryRequirements2(VkDevice device,
1771 const VkImageMemoryRequirementsInfo2 *pInfo,
1772 VkMemoryRequirements2 *pMemoryRequirements)
1773 {
1774 tu_GetImageMemoryRequirements(device, pInfo->image,
1775 &pMemoryRequirements->memoryRequirements);
1776 }
1777
1778 void
1779 tu_GetImageSparseMemoryRequirements(
1780 VkDevice device,
1781 VkImage image,
1782 uint32_t *pSparseMemoryRequirementCount,
1783 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1784 {
1785 tu_stub();
1786 }
1787
1788 void
1789 tu_GetImageSparseMemoryRequirements2(
1790 VkDevice device,
1791 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1792 uint32_t *pSparseMemoryRequirementCount,
1793 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1794 {
1795 tu_stub();
1796 }
1797
1798 void
1799 tu_GetDeviceMemoryCommitment(VkDevice device,
1800 VkDeviceMemory memory,
1801 VkDeviceSize *pCommittedMemoryInBytes)
1802 {
1803 *pCommittedMemoryInBytes = 0;
1804 }
1805
1806 VkResult
1807 tu_BindBufferMemory2(VkDevice device,
1808 uint32_t bindInfoCount,
1809 const VkBindBufferMemoryInfo *pBindInfos)
1810 {
1811 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1812 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1813 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1814
1815 if (mem) {
1816 buffer->bo = &mem->bo;
1817 buffer->bo_offset = pBindInfos[i].memoryOffset;
1818 } else {
1819 buffer->bo = NULL;
1820 }
1821 }
1822 return VK_SUCCESS;
1823 }
1824
1825 VkResult
1826 tu_BindBufferMemory(VkDevice device,
1827 VkBuffer buffer,
1828 VkDeviceMemory memory,
1829 VkDeviceSize memoryOffset)
1830 {
1831 const VkBindBufferMemoryInfo info = {
1832 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1833 .buffer = buffer,
1834 .memory = memory,
1835 .memoryOffset = memoryOffset
1836 };
1837
1838 return tu_BindBufferMemory2(device, 1, &info);
1839 }
1840
1841 VkResult
1842 tu_BindImageMemory2(VkDevice device,
1843 uint32_t bindInfoCount,
1844 const VkBindImageMemoryInfo *pBindInfos)
1845 {
1846 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1847 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1848 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1849
1850 if (mem) {
1851 image->bo = &mem->bo;
1852 image->bo_offset = pBindInfos[i].memoryOffset;
1853 } else {
1854 image->bo = NULL;
1855 image->bo_offset = 0;
1856 }
1857 }
1858
1859 return VK_SUCCESS;
1860 }
1861
1862 VkResult
1863 tu_BindImageMemory(VkDevice device,
1864 VkImage image,
1865 VkDeviceMemory memory,
1866 VkDeviceSize memoryOffset)
1867 {
1868 const VkBindImageMemoryInfo info = {
1869 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1870 .image = image,
1871 .memory = memory,
1872 .memoryOffset = memoryOffset
1873 };
1874
1875 return tu_BindImageMemory2(device, 1, &info);
1876 }
1877
1878 VkResult
1879 tu_QueueBindSparse(VkQueue _queue,
1880 uint32_t bindInfoCount,
1881 const VkBindSparseInfo *pBindInfo,
1882 VkFence _fence)
1883 {
1884 return VK_SUCCESS;
1885 }
1886
1887 // Queue semaphore functions
1888
1889 VkResult
1890 tu_CreateSemaphore(VkDevice _device,
1891 const VkSemaphoreCreateInfo *pCreateInfo,
1892 const VkAllocationCallbacks *pAllocator,
1893 VkSemaphore *pSemaphore)
1894 {
1895 TU_FROM_HANDLE(tu_device, device, _device);
1896
1897 struct tu_semaphore *sem =
1898 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1899 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1900 if (!sem)
1901 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1902
1903 *pSemaphore = tu_semaphore_to_handle(sem);
1904 return VK_SUCCESS;
1905 }
1906
1907 void
1908 tu_DestroySemaphore(VkDevice _device,
1909 VkSemaphore _semaphore,
1910 const VkAllocationCallbacks *pAllocator)
1911 {
1912 TU_FROM_HANDLE(tu_device, device, _device);
1913 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1914 if (!_semaphore)
1915 return;
1916
1917 vk_free2(&device->alloc, pAllocator, sem);
1918 }
1919
1920 VkResult
1921 tu_CreateEvent(VkDevice _device,
1922 const VkEventCreateInfo *pCreateInfo,
1923 const VkAllocationCallbacks *pAllocator,
1924 VkEvent *pEvent)
1925 {
1926 TU_FROM_HANDLE(tu_device, device, _device);
1927 struct tu_event *event =
1928 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1929 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1930
1931 if (!event)
1932 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1933
1934 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1935 if (result != VK_SUCCESS)
1936 goto fail_alloc;
1937
1938 result = tu_bo_map(device, &event->bo);
1939 if (result != VK_SUCCESS)
1940 goto fail_map;
1941
1942 *pEvent = tu_event_to_handle(event);
1943
1944 return VK_SUCCESS;
1945
1946 fail_map:
1947 tu_bo_finish(device, &event->bo);
1948 fail_alloc:
1949 vk_free2(&device->alloc, pAllocator, event);
1950 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1951 }
1952
1953 void
1954 tu_DestroyEvent(VkDevice _device,
1955 VkEvent _event,
1956 const VkAllocationCallbacks *pAllocator)
1957 {
1958 TU_FROM_HANDLE(tu_device, device, _device);
1959 TU_FROM_HANDLE(tu_event, event, _event);
1960
1961 if (!event)
1962 return;
1963
1964 tu_bo_finish(device, &event->bo);
1965 vk_free2(&device->alloc, pAllocator, event);
1966 }
1967
1968 VkResult
1969 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1970 {
1971 TU_FROM_HANDLE(tu_event, event, _event);
1972
1973 if (*(uint64_t*) event->bo.map == 1)
1974 return VK_EVENT_SET;
1975 return VK_EVENT_RESET;
1976 }
1977
1978 VkResult
1979 tu_SetEvent(VkDevice _device, VkEvent _event)
1980 {
1981 TU_FROM_HANDLE(tu_event, event, _event);
1982 *(uint64_t*) event->bo.map = 1;
1983
1984 return VK_SUCCESS;
1985 }
1986
1987 VkResult
1988 tu_ResetEvent(VkDevice _device, VkEvent _event)
1989 {
1990 TU_FROM_HANDLE(tu_event, event, _event);
1991 *(uint64_t*) event->bo.map = 0;
1992
1993 return VK_SUCCESS;
1994 }
1995
1996 VkResult
1997 tu_CreateBuffer(VkDevice _device,
1998 const VkBufferCreateInfo *pCreateInfo,
1999 const VkAllocationCallbacks *pAllocator,
2000 VkBuffer *pBuffer)
2001 {
2002 TU_FROM_HANDLE(tu_device, device, _device);
2003 struct tu_buffer *buffer;
2004
2005 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2006
2007 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2008 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2009 if (buffer == NULL)
2010 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2011
2012 buffer->size = pCreateInfo->size;
2013 buffer->usage = pCreateInfo->usage;
2014 buffer->flags = pCreateInfo->flags;
2015
2016 *pBuffer = tu_buffer_to_handle(buffer);
2017
2018 return VK_SUCCESS;
2019 }
2020
2021 void
2022 tu_DestroyBuffer(VkDevice _device,
2023 VkBuffer _buffer,
2024 const VkAllocationCallbacks *pAllocator)
2025 {
2026 TU_FROM_HANDLE(tu_device, device, _device);
2027 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2028
2029 if (!buffer)
2030 return;
2031
2032 vk_free2(&device->alloc, pAllocator, buffer);
2033 }
2034
2035 VkResult
2036 tu_CreateFramebuffer(VkDevice _device,
2037 const VkFramebufferCreateInfo *pCreateInfo,
2038 const VkAllocationCallbacks *pAllocator,
2039 VkFramebuffer *pFramebuffer)
2040 {
2041 TU_FROM_HANDLE(tu_device, device, _device);
2042 struct tu_framebuffer *framebuffer;
2043
2044 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2045
2046 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2047 pCreateInfo->attachmentCount;
2048 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2049 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2050 if (framebuffer == NULL)
2051 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2052
2053 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2054 framebuffer->width = pCreateInfo->width;
2055 framebuffer->height = pCreateInfo->height;
2056 framebuffer->layers = pCreateInfo->layers;
2057 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2058 VkImageView _iview = pCreateInfo->pAttachments[i];
2059 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2060 framebuffer->attachments[i].attachment = iview;
2061 }
2062
2063 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2064 return VK_SUCCESS;
2065 }
2066
2067 void
2068 tu_DestroyFramebuffer(VkDevice _device,
2069 VkFramebuffer _fb,
2070 const VkAllocationCallbacks *pAllocator)
2071 {
2072 TU_FROM_HANDLE(tu_device, device, _device);
2073 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2074
2075 if (!fb)
2076 return;
2077 vk_free2(&device->alloc, pAllocator, fb);
2078 }
2079
2080 static enum a6xx_tex_clamp
2081 tu6_tex_wrap(VkSamplerAddressMode address_mode)
2082 {
2083 switch (address_mode) {
2084 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
2085 return A6XX_TEX_REPEAT;
2086 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
2087 return A6XX_TEX_MIRROR_REPEAT;
2088 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
2089 return A6XX_TEX_CLAMP_TO_EDGE;
2090 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
2091 return A6XX_TEX_CLAMP_TO_BORDER;
2092 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
2093 /* only works for PoT.. need to emulate otherwise! */
2094 return A6XX_TEX_MIRROR_CLAMP;
2095 default:
2096 unreachable("illegal tex wrap mode");
2097 break;
2098 }
2099 }
2100
2101 static enum a6xx_tex_filter
2102 tu6_tex_filter(VkFilter filter, unsigned aniso)
2103 {
2104 switch (filter) {
2105 case VK_FILTER_NEAREST:
2106 return A6XX_TEX_NEAREST;
2107 case VK_FILTER_LINEAR:
2108 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
2109 case VK_FILTER_CUBIC_EXT:
2110 return A6XX_TEX_CUBIC;
2111 default:
2112 unreachable("illegal texture filter");
2113 break;
2114 }
2115 }
2116
2117 static inline enum adreno_compare_func
2118 tu6_compare_func(VkCompareOp op)
2119 {
2120 return (enum adreno_compare_func) op;
2121 }
2122
2123 static void
2124 tu_init_sampler(struct tu_device *device,
2125 struct tu_sampler *sampler,
2126 const VkSamplerCreateInfo *pCreateInfo)
2127 {
2128 const struct VkSamplerReductionModeCreateInfo *reduction =
2129 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2130
2131 unsigned aniso = pCreateInfo->anisotropyEnable ?
2132 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2133 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2134
2135 sampler->descriptor[0] =
2136 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2137 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2138 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2139 A6XX_TEX_SAMP_0_ANISO(aniso) |
2140 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2141 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2142 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2143 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2144 sampler->descriptor[1] =
2145 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2146 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2147 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
2148 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
2149 COND(pCreateInfo->compareEnable,
2150 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2151 /* This is an offset into the border_color BO, which we fill with all the
2152 * possible Vulkan border colors in the correct order, so we can just use
2153 * the Vulkan enum with no translation necessary.
2154 */
2155 sampler->descriptor[2] =
2156 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2157 sizeof(struct bcolor_entry));
2158 sampler->descriptor[3] = 0;
2159
2160 if (reduction) {
2161 /* note: vulkan enum matches hw */
2162 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(reduction->reductionMode);
2163 }
2164
2165 /* TODO:
2166 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2167 */
2168 }
2169
2170 VkResult
2171 tu_CreateSampler(VkDevice _device,
2172 const VkSamplerCreateInfo *pCreateInfo,
2173 const VkAllocationCallbacks *pAllocator,
2174 VkSampler *pSampler)
2175 {
2176 TU_FROM_HANDLE(tu_device, device, _device);
2177 struct tu_sampler *sampler;
2178
2179 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2180
2181 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2182 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2183 if (!sampler)
2184 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2185
2186 tu_init_sampler(device, sampler, pCreateInfo);
2187 *pSampler = tu_sampler_to_handle(sampler);
2188
2189 return VK_SUCCESS;
2190 }
2191
2192 void
2193 tu_DestroySampler(VkDevice _device,
2194 VkSampler _sampler,
2195 const VkAllocationCallbacks *pAllocator)
2196 {
2197 TU_FROM_HANDLE(tu_device, device, _device);
2198 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2199
2200 if (!sampler)
2201 return;
2202 vk_free2(&device->alloc, pAllocator, sampler);
2203 }
2204
2205 /* vk_icd.h does not declare this function, so we declare it here to
2206 * suppress Wmissing-prototypes.
2207 */
2208 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2209 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2210
2211 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2212 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2213 {
2214 /* For the full details on loader interface versioning, see
2215 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2216 * What follows is a condensed summary, to help you navigate the large and
2217 * confusing official doc.
2218 *
2219 * - Loader interface v0 is incompatible with later versions. We don't
2220 * support it.
2221 *
2222 * - In loader interface v1:
2223 * - The first ICD entrypoint called by the loader is
2224 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2225 * entrypoint.
2226 * - The ICD must statically expose no other Vulkan symbol unless it
2227 * is linked with -Bsymbolic.
2228 * - Each dispatchable Vulkan handle created by the ICD must be
2229 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2230 * ICD must initialize VK_LOADER_DATA.loadMagic to
2231 * ICD_LOADER_MAGIC.
2232 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2233 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2234 * such loader-managed surfaces.
2235 *
2236 * - Loader interface v2 differs from v1 in:
2237 * - The first ICD entrypoint called by the loader is
2238 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2239 * statically expose this entrypoint.
2240 *
2241 * - Loader interface v3 differs from v2 in:
2242 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2243 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2244 * because the loader no longer does so.
2245 */
2246 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2247 return VK_SUCCESS;
2248 }
2249
2250 VkResult
2251 tu_GetMemoryFdKHR(VkDevice _device,
2252 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2253 int *pFd)
2254 {
2255 TU_FROM_HANDLE(tu_device, device, _device);
2256 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2257
2258 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2259
2260 /* At the moment, we support only the below handle types. */
2261 assert(pGetFdInfo->handleType ==
2262 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2263 pGetFdInfo->handleType ==
2264 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2265
2266 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2267 if (prime_fd < 0)
2268 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2269
2270 *pFd = prime_fd;
2271 return VK_SUCCESS;
2272 }
2273
2274 VkResult
2275 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2276 VkExternalMemoryHandleTypeFlagBits handleType,
2277 int fd,
2278 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2279 {
2280 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2281 pMemoryFdProperties->memoryTypeBits = 1;
2282 return VK_SUCCESS;
2283 }
2284
2285 void
2286 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2287 VkPhysicalDevice physicalDevice,
2288 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2289 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2290 {
2291 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2292 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2293 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2294 }
2295
2296 void
2297 tu_GetPhysicalDeviceExternalFenceProperties(
2298 VkPhysicalDevice physicalDevice,
2299 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2300 VkExternalFenceProperties *pExternalFenceProperties)
2301 {
2302 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2303 pExternalFenceProperties->compatibleHandleTypes = 0;
2304 pExternalFenceProperties->externalFenceFeatures = 0;
2305 }
2306
2307 VkResult
2308 tu_CreateDebugReportCallbackEXT(
2309 VkInstance _instance,
2310 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2311 const VkAllocationCallbacks *pAllocator,
2312 VkDebugReportCallbackEXT *pCallback)
2313 {
2314 TU_FROM_HANDLE(tu_instance, instance, _instance);
2315 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2316 pCreateInfo, pAllocator,
2317 &instance->alloc, pCallback);
2318 }
2319
2320 void
2321 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2322 VkDebugReportCallbackEXT _callback,
2323 const VkAllocationCallbacks *pAllocator)
2324 {
2325 TU_FROM_HANDLE(tu_instance, instance, _instance);
2326 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2327 _callback, pAllocator, &instance->alloc);
2328 }
2329
2330 void
2331 tu_DebugReportMessageEXT(VkInstance _instance,
2332 VkDebugReportFlagsEXT flags,
2333 VkDebugReportObjectTypeEXT objectType,
2334 uint64_t object,
2335 size_t location,
2336 int32_t messageCode,
2337 const char *pLayerPrefix,
2338 const char *pMessage)
2339 {
2340 TU_FROM_HANDLE(tu_instance, instance, _instance);
2341 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2342 object, location, messageCode, pLayerPrefix, pMessage);
2343 }
2344
2345 void
2346 tu_GetDeviceGroupPeerMemoryFeatures(
2347 VkDevice device,
2348 uint32_t heapIndex,
2349 uint32_t localDeviceIndex,
2350 uint32_t remoteDeviceIndex,
2351 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2352 {
2353 assert(localDeviceIndex == remoteDeviceIndex);
2354
2355 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2356 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2357 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2358 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2359 }
2360
2361 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2362 VkPhysicalDevice physicalDevice,
2363 VkSampleCountFlagBits samples,
2364 VkMultisamplePropertiesEXT* pMultisampleProperties)
2365 {
2366 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2367
2368 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2369 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2370 else
2371 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2372 }