turnip: remove unused RB_UNKNOWN_8E04_blit
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
256 if (instance->debug_flags & TU_DEBUG_STARTUP)
257 tu_logi("Could not query the GMEM size");
258 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
259 "could not get GMEM size");
260 goto fail;
261 }
262
263 memset(device->name, 0, sizeof(device->name));
264 sprintf(device->name, "FD%d", device->gpu_id);
265
266 switch (device->gpu_id) {
267 case 618:
268 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
269 device->ccu_offset_bypass = 0x10000;
270 device->magic.PC_UNKNOWN_9805 = 0x0;
271 device->magic.SP_UNKNOWN_A0F8 = 0x0;
272 break;
273 case 630:
274 case 640:
275 device->ccu_offset_gmem = 0xf8000;
276 device->ccu_offset_bypass = 0x20000;
277 device->magic.PC_UNKNOWN_9805 = 0x1;
278 device->magic.SP_UNKNOWN_A0F8 = 0x1;
279 break;
280 default:
281 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
282 "device %s is unsupported", device->name);
283 goto fail;
284 }
285 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
286 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
287 "cannot generate UUID");
288 goto fail;
289 }
290
291 /* The gpu id is already embedded in the uuid so we just pass "tu"
292 * when creating the cache.
293 */
294 char buf[VK_UUID_SIZE * 2 + 1];
295 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
296 device->disk_cache = disk_cache_create(device->name, buf, 0);
297
298 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
299 "testing use only.\n");
300
301 tu_get_driver_uuid(&device->device_uuid);
302 tu_get_device_uuid(&device->device_uuid);
303
304 tu_fill_device_extension_table(device, &device->supported_extensions);
305
306 if (result != VK_SUCCESS) {
307 vk_error(instance, result);
308 goto fail;
309 }
310
311 result = tu_wsi_init(device);
312 if (result != VK_SUCCESS) {
313 vk_error(instance, result);
314 goto fail;
315 }
316
317 return VK_SUCCESS;
318
319 fail:
320 close(fd);
321 if (master_fd != -1)
322 close(master_fd);
323 return result;
324 }
325
326 static void
327 tu_physical_device_finish(struct tu_physical_device *device)
328 {
329 tu_wsi_finish(device);
330
331 disk_cache_destroy(device->disk_cache);
332 close(device->local_fd);
333 if (device->master_fd != -1)
334 close(device->master_fd);
335 }
336
337 static VKAPI_ATTR void *
338 default_alloc_func(void *pUserData,
339 size_t size,
340 size_t align,
341 VkSystemAllocationScope allocationScope)
342 {
343 return malloc(size);
344 }
345
346 static VKAPI_ATTR void *
347 default_realloc_func(void *pUserData,
348 void *pOriginal,
349 size_t size,
350 size_t align,
351 VkSystemAllocationScope allocationScope)
352 {
353 return realloc(pOriginal, size);
354 }
355
356 static VKAPI_ATTR void
357 default_free_func(void *pUserData, void *pMemory)
358 {
359 free(pMemory);
360 }
361
362 static const VkAllocationCallbacks default_alloc = {
363 .pUserData = NULL,
364 .pfnAllocation = default_alloc_func,
365 .pfnReallocation = default_realloc_func,
366 .pfnFree = default_free_func,
367 };
368
369 static const struct debug_control tu_debug_options[] = {
370 { "startup", TU_DEBUG_STARTUP },
371 { "nir", TU_DEBUG_NIR },
372 { "ir3", TU_DEBUG_IR3 },
373 { "nobin", TU_DEBUG_NOBIN },
374 { "sysmem", TU_DEBUG_SYSMEM },
375 { "forcebin", TU_DEBUG_FORCEBIN },
376 { NULL, 0 }
377 };
378
379 const char *
380 tu_get_debug_option_name(int id)
381 {
382 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
383 return tu_debug_options[id].string;
384 }
385
386 static int
387 tu_get_instance_extension_index(const char *name)
388 {
389 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
390 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
391 return i;
392 }
393 return -1;
394 }
395
396 VkResult
397 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
398 const VkAllocationCallbacks *pAllocator,
399 VkInstance *pInstance)
400 {
401 struct tu_instance *instance;
402 VkResult result;
403
404 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
405
406 uint32_t client_version;
407 if (pCreateInfo->pApplicationInfo &&
408 pCreateInfo->pApplicationInfo->apiVersion != 0) {
409 client_version = pCreateInfo->pApplicationInfo->apiVersion;
410 } else {
411 tu_EnumerateInstanceVersion(&client_version);
412 }
413
414 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
415 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
416 if (!instance)
417 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
418
419 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
420
421 if (pAllocator)
422 instance->alloc = *pAllocator;
423 else
424 instance->alloc = default_alloc;
425
426 instance->api_version = client_version;
427 instance->physical_device_count = -1;
428
429 instance->debug_flags =
430 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
431
432 if (instance->debug_flags & TU_DEBUG_STARTUP)
433 tu_logi("Created an instance");
434
435 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
436 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
437 int index = tu_get_instance_extension_index(ext_name);
438
439 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
440 vk_free2(&default_alloc, pAllocator, instance);
441 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
442 }
443
444 instance->enabled_extensions.extensions[index] = true;
445 }
446
447 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
448 if (result != VK_SUCCESS) {
449 vk_free2(&default_alloc, pAllocator, instance);
450 return vk_error(instance, result);
451 }
452
453 glsl_type_singleton_init_or_ref();
454
455 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
456
457 *pInstance = tu_instance_to_handle(instance);
458
459 return VK_SUCCESS;
460 }
461
462 void
463 tu_DestroyInstance(VkInstance _instance,
464 const VkAllocationCallbacks *pAllocator)
465 {
466 TU_FROM_HANDLE(tu_instance, instance, _instance);
467
468 if (!instance)
469 return;
470
471 for (int i = 0; i < instance->physical_device_count; ++i) {
472 tu_physical_device_finish(instance->physical_devices + i);
473 }
474
475 VG(VALGRIND_DESTROY_MEMPOOL(instance));
476
477 glsl_type_singleton_decref();
478
479 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
480
481 vk_free(&instance->alloc, instance);
482 }
483
484 static VkResult
485 tu_enumerate_devices(struct tu_instance *instance)
486 {
487 /* TODO: Check for more devices ? */
488 drmDevicePtr devices[8];
489 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
490 int max_devices;
491
492 instance->physical_device_count = 0;
493
494 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
495
496 if (instance->debug_flags & TU_DEBUG_STARTUP)
497 tu_logi("Found %d drm nodes", max_devices);
498
499 if (max_devices < 1)
500 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
501
502 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
503 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
504 devices[i]->bustype == DRM_BUS_PLATFORM) {
505
506 result = tu_physical_device_init(
507 instance->physical_devices + instance->physical_device_count,
508 instance, devices[i]);
509 if (result == VK_SUCCESS)
510 ++instance->physical_device_count;
511 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
512 break;
513 }
514 }
515 drmFreeDevices(devices, max_devices);
516
517 return result;
518 }
519
520 VkResult
521 tu_EnumeratePhysicalDevices(VkInstance _instance,
522 uint32_t *pPhysicalDeviceCount,
523 VkPhysicalDevice *pPhysicalDevices)
524 {
525 TU_FROM_HANDLE(tu_instance, instance, _instance);
526 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
527
528 VkResult result;
529
530 if (instance->physical_device_count < 0) {
531 result = tu_enumerate_devices(instance);
532 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
533 return result;
534 }
535
536 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
537 vk_outarray_append(&out, p)
538 {
539 *p = tu_physical_device_to_handle(instance->physical_devices + i);
540 }
541 }
542
543 return vk_outarray_status(&out);
544 }
545
546 VkResult
547 tu_EnumeratePhysicalDeviceGroups(
548 VkInstance _instance,
549 uint32_t *pPhysicalDeviceGroupCount,
550 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
551 {
552 TU_FROM_HANDLE(tu_instance, instance, _instance);
553 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
554 pPhysicalDeviceGroupCount);
555 VkResult result;
556
557 if (instance->physical_device_count < 0) {
558 result = tu_enumerate_devices(instance);
559 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
560 return result;
561 }
562
563 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
564 vk_outarray_append(&out, p)
565 {
566 p->physicalDeviceCount = 1;
567 p->physicalDevices[0] =
568 tu_physical_device_to_handle(instance->physical_devices + i);
569 p->subsetAllocation = false;
570 }
571 }
572
573 return vk_outarray_status(&out);
574 }
575
576 void
577 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
578 VkPhysicalDeviceFeatures *pFeatures)
579 {
580 memset(pFeatures, 0, sizeof(*pFeatures));
581
582 *pFeatures = (VkPhysicalDeviceFeatures) {
583 .robustBufferAccess = false,
584 .fullDrawIndexUint32 = true,
585 .imageCubeArray = true,
586 .independentBlend = true,
587 .geometryShader = true,
588 .tessellationShader = false,
589 .sampleRateShading = true,
590 .dualSrcBlend = true,
591 .logicOp = true,
592 .multiDrawIndirect = false,
593 .drawIndirectFirstInstance = false,
594 .depthClamp = true,
595 .depthBiasClamp = false,
596 .fillModeNonSolid = false,
597 .depthBounds = false,
598 .wideLines = false,
599 .largePoints = false,
600 .alphaToOne = false,
601 .multiViewport = false,
602 .samplerAnisotropy = true,
603 .textureCompressionETC2 = true,
604 .textureCompressionASTC_LDR = true,
605 .textureCompressionBC = true,
606 .occlusionQueryPrecise = true,
607 .pipelineStatisticsQuery = false,
608 .vertexPipelineStoresAndAtomics = false,
609 .fragmentStoresAndAtomics = false,
610 .shaderTessellationAndGeometryPointSize = false,
611 .shaderImageGatherExtended = false,
612 .shaderStorageImageExtendedFormats = false,
613 .shaderStorageImageMultisample = false,
614 .shaderUniformBufferArrayDynamicIndexing = false,
615 .shaderSampledImageArrayDynamicIndexing = false,
616 .shaderStorageBufferArrayDynamicIndexing = false,
617 .shaderStorageImageArrayDynamicIndexing = false,
618 .shaderStorageImageReadWithoutFormat = false,
619 .shaderStorageImageWriteWithoutFormat = false,
620 .shaderClipDistance = false,
621 .shaderCullDistance = false,
622 .shaderFloat64 = false,
623 .shaderInt64 = false,
624 .shaderInt16 = false,
625 .sparseBinding = false,
626 .variableMultisampleRate = false,
627 .inheritedQueries = false,
628 };
629 }
630
631 void
632 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
633 VkPhysicalDeviceFeatures2 *pFeatures)
634 {
635 vk_foreach_struct(ext, pFeatures->pNext)
636 {
637 switch (ext->sType) {
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
639 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
640 features->variablePointersStorageBuffer = false;
641 features->variablePointers = false;
642 break;
643 }
644 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
645 VkPhysicalDeviceMultiviewFeatures *features =
646 (VkPhysicalDeviceMultiviewFeatures *) ext;
647 features->multiview = false;
648 features->multiviewGeometryShader = false;
649 features->multiviewTessellationShader = false;
650 break;
651 }
652 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
653 VkPhysicalDeviceShaderDrawParametersFeatures *features =
654 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
655 features->shaderDrawParameters = false;
656 break;
657 }
658 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
659 VkPhysicalDeviceProtectedMemoryFeatures *features =
660 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
661 features->protectedMemory = false;
662 break;
663 }
664 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
665 VkPhysicalDevice16BitStorageFeatures *features =
666 (VkPhysicalDevice16BitStorageFeatures *) ext;
667 features->storageBuffer16BitAccess = false;
668 features->uniformAndStorageBuffer16BitAccess = false;
669 features->storagePushConstant16 = false;
670 features->storageInputOutput16 = false;
671 break;
672 }
673 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
674 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
675 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
676 features->samplerYcbcrConversion = false;
677 break;
678 }
679 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
680 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
681 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
682 features->shaderInputAttachmentArrayDynamicIndexing = false;
683 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
684 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
685 features->shaderUniformBufferArrayNonUniformIndexing = false;
686 features->shaderSampledImageArrayNonUniformIndexing = false;
687 features->shaderStorageBufferArrayNonUniformIndexing = false;
688 features->shaderStorageImageArrayNonUniformIndexing = false;
689 features->shaderInputAttachmentArrayNonUniformIndexing = false;
690 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
691 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
692 features->descriptorBindingUniformBufferUpdateAfterBind = false;
693 features->descriptorBindingSampledImageUpdateAfterBind = false;
694 features->descriptorBindingStorageImageUpdateAfterBind = false;
695 features->descriptorBindingStorageBufferUpdateAfterBind = false;
696 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
697 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
698 features->descriptorBindingUpdateUnusedWhilePending = false;
699 features->descriptorBindingPartiallyBound = false;
700 features->descriptorBindingVariableDescriptorCount = false;
701 features->runtimeDescriptorArray = false;
702 break;
703 }
704 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
705 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
706 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
707 features->conditionalRendering = false;
708 features->inheritedConditionalRendering = false;
709 break;
710 }
711 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
712 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
713 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
714 features->transformFeedback = true;
715 features->geometryStreams = false;
716 break;
717 }
718 default:
719 break;
720 }
721 }
722 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
723 }
724
725 void
726 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
727 VkPhysicalDeviceProperties *pProperties)
728 {
729 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
730 VkSampleCountFlags sample_counts =
731 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
732
733 /* I have no idea what the maximum size is, but the hardware supports very
734 * large numbers of descriptors (at least 2^16). This limit is based on
735 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
736 * we don't have to think about what to do if that overflows, but really
737 * nothing is likely to get close to this.
738 */
739 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
740
741 VkPhysicalDeviceLimits limits = {
742 .maxImageDimension1D = (1 << 14),
743 .maxImageDimension2D = (1 << 14),
744 .maxImageDimension3D = (1 << 11),
745 .maxImageDimensionCube = (1 << 14),
746 .maxImageArrayLayers = (1 << 11),
747 .maxTexelBufferElements = 128 * 1024 * 1024,
748 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
749 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
750 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
751 .maxMemoryAllocationCount = UINT32_MAX,
752 .maxSamplerAllocationCount = 64 * 1024,
753 .bufferImageGranularity = 64, /* A cache line */
754 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
755 .maxBoundDescriptorSets = MAX_SETS,
756 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
757 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
758 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
759 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
760 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
761 .maxPerStageDescriptorInputAttachments = MAX_RTS,
762 .maxPerStageResources = max_descriptor_set_size,
763 .maxDescriptorSetSamplers = max_descriptor_set_size,
764 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
765 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
766 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
767 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
768 .maxDescriptorSetSampledImages = max_descriptor_set_size,
769 .maxDescriptorSetStorageImages = max_descriptor_set_size,
770 .maxDescriptorSetInputAttachments = MAX_RTS,
771 .maxVertexInputAttributes = 32,
772 .maxVertexInputBindings = 32,
773 .maxVertexInputAttributeOffset = 4095,
774 .maxVertexInputBindingStride = 2048,
775 .maxVertexOutputComponents = 128,
776 .maxTessellationGenerationLevel = 64,
777 .maxTessellationPatchSize = 32,
778 .maxTessellationControlPerVertexInputComponents = 128,
779 .maxTessellationControlPerVertexOutputComponents = 128,
780 .maxTessellationControlPerPatchOutputComponents = 120,
781 .maxTessellationControlTotalOutputComponents = 4096,
782 .maxTessellationEvaluationInputComponents = 128,
783 .maxTessellationEvaluationOutputComponents = 128,
784 .maxGeometryShaderInvocations = 32,
785 .maxGeometryInputComponents = 64,
786 .maxGeometryOutputComponents = 128,
787 .maxGeometryOutputVertices = 256,
788 .maxGeometryTotalOutputComponents = 1024,
789 .maxFragmentInputComponents = 124,
790 .maxFragmentOutputAttachments = 8,
791 .maxFragmentDualSrcAttachments = 1,
792 .maxFragmentCombinedOutputResources = 8,
793 .maxComputeSharedMemorySize = 32768,
794 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
795 .maxComputeWorkGroupInvocations = 2048,
796 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
797 .subPixelPrecisionBits = 8,
798 .subTexelPrecisionBits = 4 /* FIXME */,
799 .mipmapPrecisionBits = 4 /* FIXME */,
800 .maxDrawIndexedIndexValue = UINT32_MAX,
801 .maxDrawIndirectCount = UINT32_MAX,
802 .maxSamplerLodBias = 16,
803 .maxSamplerAnisotropy = 16,
804 .maxViewports = MAX_VIEWPORTS,
805 .maxViewportDimensions = { (1 << 14), (1 << 14) },
806 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
807 .viewportSubPixelBits = 8,
808 .minMemoryMapAlignment = 4096, /* A page */
809 .minTexelBufferOffsetAlignment = 64,
810 .minUniformBufferOffsetAlignment = 64,
811 .minStorageBufferOffsetAlignment = 64,
812 .minTexelOffset = -32,
813 .maxTexelOffset = 31,
814 .minTexelGatherOffset = -32,
815 .maxTexelGatherOffset = 31,
816 .minInterpolationOffset = -2,
817 .maxInterpolationOffset = 2,
818 .subPixelInterpolationOffsetBits = 8,
819 .maxFramebufferWidth = (1 << 14),
820 .maxFramebufferHeight = (1 << 14),
821 .maxFramebufferLayers = (1 << 10),
822 .framebufferColorSampleCounts = sample_counts,
823 .framebufferDepthSampleCounts = sample_counts,
824 .framebufferStencilSampleCounts = sample_counts,
825 .framebufferNoAttachmentsSampleCounts = sample_counts,
826 .maxColorAttachments = MAX_RTS,
827 .sampledImageColorSampleCounts = sample_counts,
828 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
829 .sampledImageDepthSampleCounts = sample_counts,
830 .sampledImageStencilSampleCounts = sample_counts,
831 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
832 .maxSampleMaskWords = 1,
833 .timestampComputeAndGraphics = true,
834 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
835 .maxClipDistances = 8,
836 .maxCullDistances = 8,
837 .maxCombinedClipAndCullDistances = 8,
838 .discreteQueuePriorities = 1,
839 .pointSizeRange = { 0.125, 255.875 },
840 .lineWidthRange = { 0.0, 7.9921875 },
841 .pointSizeGranularity = (1.0 / 8.0),
842 .lineWidthGranularity = (1.0 / 128.0),
843 .strictLines = false, /* FINISHME */
844 .standardSampleLocations = true,
845 .optimalBufferCopyOffsetAlignment = 128,
846 .optimalBufferCopyRowPitchAlignment = 128,
847 .nonCoherentAtomSize = 64,
848 };
849
850 *pProperties = (VkPhysicalDeviceProperties) {
851 .apiVersion = tu_physical_device_api_version(pdevice),
852 .driverVersion = vk_get_driver_version(),
853 .vendorID = 0, /* TODO */
854 .deviceID = 0,
855 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
856 .limits = limits,
857 .sparseProperties = { 0 },
858 };
859
860 strcpy(pProperties->deviceName, pdevice->name);
861 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
862 }
863
864 void
865 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
866 VkPhysicalDeviceProperties2 *pProperties)
867 {
868 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
869 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
870
871 vk_foreach_struct(ext, pProperties->pNext)
872 {
873 switch (ext->sType) {
874 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
875 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
876 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
877 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
878 break;
879 }
880 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
881 VkPhysicalDeviceIDProperties *properties =
882 (VkPhysicalDeviceIDProperties *) ext;
883 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
884 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
885 properties->deviceLUIDValid = false;
886 break;
887 }
888 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
889 VkPhysicalDeviceMultiviewProperties *properties =
890 (VkPhysicalDeviceMultiviewProperties *) ext;
891 properties->maxMultiviewViewCount = MAX_VIEWS;
892 properties->maxMultiviewInstanceIndex = INT_MAX;
893 break;
894 }
895 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
896 VkPhysicalDevicePointClippingProperties *properties =
897 (VkPhysicalDevicePointClippingProperties *) ext;
898 properties->pointClippingBehavior =
899 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
900 break;
901 }
902 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
903 VkPhysicalDeviceMaintenance3Properties *properties =
904 (VkPhysicalDeviceMaintenance3Properties *) ext;
905 /* Make sure everything is addressable by a signed 32-bit int, and
906 * our largest descriptors are 96 bytes. */
907 properties->maxPerSetDescriptors = (1ull << 31) / 96;
908 /* Our buffer size fields allow only this much */
909 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
910 break;
911 }
912 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
913 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
914 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
915
916 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
917 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
918 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
919 properties->maxTransformFeedbackStreamDataSize = 512;
920 properties->maxTransformFeedbackBufferDataSize = 512;
921 properties->maxTransformFeedbackBufferDataStride = 512;
922 properties->transformFeedbackQueries = true;
923 properties->transformFeedbackStreamsLinesTriangles = false;
924 properties->transformFeedbackRasterizationStreamSelect = false;
925 properties->transformFeedbackDraw = true;
926 break;
927 }
928 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
929 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
930 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
931 properties->sampleLocationSampleCounts = 0;
932 if (pdevice->supported_extensions.EXT_sample_locations) {
933 properties->sampleLocationSampleCounts =
934 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
935 }
936 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
937 properties->sampleLocationCoordinateRange[0] = 0.0f;
938 properties->sampleLocationCoordinateRange[1] = 0.9375f;
939 properties->sampleLocationSubPixelBits = 4;
940 properties->variableSampleLocations = true;
941 break;
942 }
943 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
944 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
945 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
946 properties->filterMinmaxImageComponentMapping = true;
947 properties->filterMinmaxSingleComponentFormats = true;
948 break;
949 }
950
951 default:
952 break;
953 }
954 }
955 }
956
957 static const VkQueueFamilyProperties tu_queue_family_properties = {
958 .queueFlags =
959 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
960 .queueCount = 1,
961 .timestampValidBits = 48,
962 .minImageTransferGranularity = { 1, 1, 1 },
963 };
964
965 void
966 tu_GetPhysicalDeviceQueueFamilyProperties(
967 VkPhysicalDevice physicalDevice,
968 uint32_t *pQueueFamilyPropertyCount,
969 VkQueueFamilyProperties *pQueueFamilyProperties)
970 {
971 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
972
973 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
974 }
975
976 void
977 tu_GetPhysicalDeviceQueueFamilyProperties2(
978 VkPhysicalDevice physicalDevice,
979 uint32_t *pQueueFamilyPropertyCount,
980 VkQueueFamilyProperties2 *pQueueFamilyProperties)
981 {
982 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
983
984 vk_outarray_append(&out, p)
985 {
986 p->queueFamilyProperties = tu_queue_family_properties;
987 }
988 }
989
990 static uint64_t
991 tu_get_system_heap_size()
992 {
993 struct sysinfo info;
994 sysinfo(&info);
995
996 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
997
998 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
999 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1000 */
1001 uint64_t available_ram;
1002 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1003 available_ram = total_ram / 2;
1004 else
1005 available_ram = total_ram * 3 / 4;
1006
1007 return available_ram;
1008 }
1009
1010 void
1011 tu_GetPhysicalDeviceMemoryProperties(
1012 VkPhysicalDevice physicalDevice,
1013 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1014 {
1015 pMemoryProperties->memoryHeapCount = 1;
1016 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1017 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1018
1019 pMemoryProperties->memoryTypeCount = 1;
1020 pMemoryProperties->memoryTypes[0].propertyFlags =
1021 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1022 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1023 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1024 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1025 }
1026
1027 void
1028 tu_GetPhysicalDeviceMemoryProperties2(
1029 VkPhysicalDevice physicalDevice,
1030 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1031 {
1032 return tu_GetPhysicalDeviceMemoryProperties(
1033 physicalDevice, &pMemoryProperties->memoryProperties);
1034 }
1035
1036 static VkResult
1037 tu_queue_init(struct tu_device *device,
1038 struct tu_queue *queue,
1039 uint32_t queue_family_index,
1040 int idx,
1041 VkDeviceQueueCreateFlags flags)
1042 {
1043 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1044 queue->device = device;
1045 queue->queue_family_index = queue_family_index;
1046 queue->queue_idx = idx;
1047 queue->flags = flags;
1048
1049 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1050 if (ret)
1051 return VK_ERROR_INITIALIZATION_FAILED;
1052
1053 tu_fence_init(&queue->submit_fence, false);
1054
1055 return VK_SUCCESS;
1056 }
1057
1058 static void
1059 tu_queue_finish(struct tu_queue *queue)
1060 {
1061 tu_fence_finish(&queue->submit_fence);
1062 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1063 }
1064
1065 static int
1066 tu_get_device_extension_index(const char *name)
1067 {
1068 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1069 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1070 return i;
1071 }
1072 return -1;
1073 }
1074
1075 struct PACKED bcolor_entry {
1076 uint32_t fp32[4];
1077 uint16_t ui16[4];
1078 int16_t si16[4];
1079 uint16_t fp16[4];
1080 uint16_t rgb565;
1081 uint16_t rgb5a1;
1082 uint16_t rgba4;
1083 uint8_t __pad0[2];
1084 uint8_t ui8[4];
1085 int8_t si8[4];
1086 uint32_t rgb10a2;
1087 uint32_t z24; /* also s8? */
1088 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1089 uint8_t __pad1[56];
1090 } border_color[] = {
1091 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1092 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1093 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1094 .fp32[3] = 0x3f800000,
1095 .ui16[3] = 0xffff,
1096 .si16[3] = 0x7fff,
1097 .fp16[3] = 0x3c00,
1098 .rgb5a1 = 0x8000,
1099 .rgba4 = 0xf000,
1100 .ui8[3] = 0xff,
1101 .si8[3] = 0x7f,
1102 .rgb10a2 = 0xc0000000,
1103 .srgb[3] = 0x3c00,
1104 },
1105 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1106 .fp32[3] = 1,
1107 .fp16[3] = 1,
1108 },
1109 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1110 .fp32[0 ... 3] = 0x3f800000,
1111 .ui16[0 ... 3] = 0xffff,
1112 .si16[0 ... 3] = 0x7fff,
1113 .fp16[0 ... 3] = 0x3c00,
1114 .rgb565 = 0xffff,
1115 .rgb5a1 = 0xffff,
1116 .rgba4 = 0xffff,
1117 .ui8[0 ... 3] = 0xff,
1118 .si8[0 ... 3] = 0x7f,
1119 .rgb10a2 = 0xffffffff,
1120 .z24 = 0xffffff,
1121 .srgb[0 ... 3] = 0x3c00,
1122 },
1123 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1124 .fp32[0 ... 3] = 1,
1125 .fp16[0 ... 3] = 1,
1126 },
1127 };
1128
1129
1130 VkResult
1131 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1132 const VkDeviceCreateInfo *pCreateInfo,
1133 const VkAllocationCallbacks *pAllocator,
1134 VkDevice *pDevice)
1135 {
1136 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1137 VkResult result;
1138 struct tu_device *device;
1139
1140 /* Check enabled features */
1141 if (pCreateInfo->pEnabledFeatures) {
1142 VkPhysicalDeviceFeatures supported_features;
1143 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1144 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1145 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1146 unsigned num_features =
1147 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1148 for (uint32_t i = 0; i < num_features; i++) {
1149 if (enabled_feature[i] && !supported_feature[i])
1150 return vk_error(physical_device->instance,
1151 VK_ERROR_FEATURE_NOT_PRESENT);
1152 }
1153 }
1154
1155 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1156 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1157 if (!device)
1158 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1159
1160 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1161 device->instance = physical_device->instance;
1162 device->physical_device = physical_device;
1163
1164 if (pAllocator)
1165 device->alloc = *pAllocator;
1166 else
1167 device->alloc = physical_device->instance->alloc;
1168
1169 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1170 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1171 int index = tu_get_device_extension_index(ext_name);
1172 if (index < 0 ||
1173 !physical_device->supported_extensions.extensions[index]) {
1174 vk_free(&device->alloc, device);
1175 return vk_error(physical_device->instance,
1176 VK_ERROR_EXTENSION_NOT_PRESENT);
1177 }
1178
1179 device->enabled_extensions.extensions[index] = true;
1180 }
1181
1182 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1183 const VkDeviceQueueCreateInfo *queue_create =
1184 &pCreateInfo->pQueueCreateInfos[i];
1185 uint32_t qfi = queue_create->queueFamilyIndex;
1186 device->queues[qfi] = vk_alloc(
1187 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1188 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1189 if (!device->queues[qfi]) {
1190 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1191 goto fail_queues;
1192 }
1193
1194 memset(device->queues[qfi], 0,
1195 queue_create->queueCount * sizeof(struct tu_queue));
1196
1197 device->queue_count[qfi] = queue_create->queueCount;
1198
1199 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1200 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1201 queue_create->flags);
1202 if (result != VK_SUCCESS)
1203 goto fail_queues;
1204 }
1205 }
1206
1207 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1208 if (!device->compiler)
1209 goto fail_queues;
1210
1211 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1212 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
1213
1214 device->vsc_data_pitch = 0x440 * 4;
1215 device->vsc_data2_pitch = 0x1040 * 4;
1216
1217 result = tu_bo_init_new(device, &device->vsc_data, VSC_DATA_SIZE(device->vsc_data_pitch));
1218 if (result != VK_SUCCESS)
1219 goto fail_vsc_data;
1220
1221 result = tu_bo_init_new(device, &device->vsc_data2, VSC_DATA2_SIZE(device->vsc_data2_pitch));
1222 if (result != VK_SUCCESS)
1223 goto fail_vsc_data2;
1224
1225 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1226 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1227 if (result != VK_SUCCESS)
1228 goto fail_border_color;
1229
1230 result = tu_bo_map(device, &device->border_color);
1231 if (result != VK_SUCCESS)
1232 goto fail_border_color_map;
1233
1234 memcpy(device->border_color.map, border_color, sizeof(border_color));
1235
1236 VkPipelineCacheCreateInfo ci;
1237 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1238 ci.pNext = NULL;
1239 ci.flags = 0;
1240 ci.pInitialData = NULL;
1241 ci.initialDataSize = 0;
1242 VkPipelineCache pc;
1243 result =
1244 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1245 if (result != VK_SUCCESS)
1246 goto fail_pipeline_cache;
1247
1248 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1249
1250 *pDevice = tu_device_to_handle(device);
1251 return VK_SUCCESS;
1252
1253 fail_pipeline_cache:
1254 fail_border_color_map:
1255 tu_bo_finish(device, &device->border_color);
1256
1257 fail_border_color:
1258 tu_bo_finish(device, &device->vsc_data2);
1259
1260 fail_vsc_data2:
1261 tu_bo_finish(device, &device->vsc_data);
1262
1263 fail_vsc_data:
1264 ralloc_free(device->compiler);
1265
1266 fail_queues:
1267 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1268 for (unsigned q = 0; q < device->queue_count[i]; q++)
1269 tu_queue_finish(&device->queues[i][q]);
1270 if (device->queue_count[i])
1271 vk_free(&device->alloc, device->queues[i]);
1272 }
1273
1274 vk_free(&device->alloc, device);
1275 return result;
1276 }
1277
1278 void
1279 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1280 {
1281 TU_FROM_HANDLE(tu_device, device, _device);
1282
1283 if (!device)
1284 return;
1285
1286 tu_bo_finish(device, &device->vsc_data);
1287 tu_bo_finish(device, &device->vsc_data2);
1288
1289 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1290 for (unsigned q = 0; q < device->queue_count[i]; q++)
1291 tu_queue_finish(&device->queues[i][q]);
1292 if (device->queue_count[i])
1293 vk_free(&device->alloc, device->queues[i]);
1294 }
1295
1296 /* the compiler does not use pAllocator */
1297 ralloc_free(device->compiler);
1298
1299 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1300 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1301
1302 vk_free(&device->alloc, device);
1303 }
1304
1305 VkResult
1306 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1307 VkLayerProperties *pProperties)
1308 {
1309 *pPropertyCount = 0;
1310 return VK_SUCCESS;
1311 }
1312
1313 VkResult
1314 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1315 uint32_t *pPropertyCount,
1316 VkLayerProperties *pProperties)
1317 {
1318 *pPropertyCount = 0;
1319 return VK_SUCCESS;
1320 }
1321
1322 void
1323 tu_GetDeviceQueue2(VkDevice _device,
1324 const VkDeviceQueueInfo2 *pQueueInfo,
1325 VkQueue *pQueue)
1326 {
1327 TU_FROM_HANDLE(tu_device, device, _device);
1328 struct tu_queue *queue;
1329
1330 queue =
1331 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1332 if (pQueueInfo->flags != queue->flags) {
1333 /* From the Vulkan 1.1.70 spec:
1334 *
1335 * "The queue returned by vkGetDeviceQueue2 must have the same
1336 * flags value from this structure as that used at device
1337 * creation time in a VkDeviceQueueCreateInfo instance. If no
1338 * matching flags were specified at device creation time then
1339 * pQueue will return VK_NULL_HANDLE."
1340 */
1341 *pQueue = VK_NULL_HANDLE;
1342 return;
1343 }
1344
1345 *pQueue = tu_queue_to_handle(queue);
1346 }
1347
1348 void
1349 tu_GetDeviceQueue(VkDevice _device,
1350 uint32_t queueFamilyIndex,
1351 uint32_t queueIndex,
1352 VkQueue *pQueue)
1353 {
1354 const VkDeviceQueueInfo2 info =
1355 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1356 .queueFamilyIndex = queueFamilyIndex,
1357 .queueIndex = queueIndex };
1358
1359 tu_GetDeviceQueue2(_device, &info, pQueue);
1360 }
1361
1362 VkResult
1363 tu_QueueSubmit(VkQueue _queue,
1364 uint32_t submitCount,
1365 const VkSubmitInfo *pSubmits,
1366 VkFence _fence)
1367 {
1368 TU_FROM_HANDLE(tu_queue, queue, _queue);
1369
1370 for (uint32_t i = 0; i < submitCount; ++i) {
1371 const VkSubmitInfo *submit = pSubmits + i;
1372 const bool last_submit = (i == submitCount - 1);
1373 struct tu_bo_list bo_list;
1374 tu_bo_list_init(&bo_list);
1375
1376 uint32_t entry_count = 0;
1377 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1378 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1379 entry_count += cmdbuf->cs.entry_count;
1380 }
1381
1382 struct drm_msm_gem_submit_cmd cmds[entry_count];
1383 uint32_t entry_idx = 0;
1384 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1385 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1386 struct tu_cs *cs = &cmdbuf->cs;
1387 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1388 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1389 cmds[entry_idx].submit_idx =
1390 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1391 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1392 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1393 cmds[entry_idx].size = cs->entries[i].size;
1394 cmds[entry_idx].pad = 0;
1395 cmds[entry_idx].nr_relocs = 0;
1396 cmds[entry_idx].relocs = 0;
1397 }
1398
1399 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1400 }
1401
1402 uint32_t flags = MSM_PIPE_3D0;
1403 if (last_submit) {
1404 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1405 }
1406
1407 struct drm_msm_gem_submit req = {
1408 .flags = flags,
1409 .queueid = queue->msm_queue_id,
1410 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1411 .nr_bos = bo_list.count,
1412 .cmds = (uint64_t)(uintptr_t)cmds,
1413 .nr_cmds = entry_count,
1414 };
1415
1416 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1417 DRM_MSM_GEM_SUBMIT,
1418 &req, sizeof(req));
1419 if (ret) {
1420 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1421 abort();
1422 }
1423
1424 tu_bo_list_destroy(&bo_list);
1425
1426 if (last_submit) {
1427 /* no need to merge fences as queue execution is serialized */
1428 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1429 }
1430 }
1431
1432 if (_fence != VK_NULL_HANDLE) {
1433 TU_FROM_HANDLE(tu_fence, fence, _fence);
1434 tu_fence_copy(fence, &queue->submit_fence);
1435 }
1436
1437 return VK_SUCCESS;
1438 }
1439
1440 VkResult
1441 tu_QueueWaitIdle(VkQueue _queue)
1442 {
1443 TU_FROM_HANDLE(tu_queue, queue, _queue);
1444
1445 tu_fence_wait_idle(&queue->submit_fence);
1446
1447 return VK_SUCCESS;
1448 }
1449
1450 VkResult
1451 tu_DeviceWaitIdle(VkDevice _device)
1452 {
1453 TU_FROM_HANDLE(tu_device, device, _device);
1454
1455 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1456 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1457 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1458 }
1459 }
1460 return VK_SUCCESS;
1461 }
1462
1463 VkResult
1464 tu_ImportSemaphoreFdKHR(VkDevice _device,
1465 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1466 {
1467 tu_stub();
1468
1469 return VK_SUCCESS;
1470 }
1471
1472 VkResult
1473 tu_GetSemaphoreFdKHR(VkDevice _device,
1474 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1475 int *pFd)
1476 {
1477 tu_stub();
1478
1479 return VK_SUCCESS;
1480 }
1481
1482 VkResult
1483 tu_ImportFenceFdKHR(VkDevice _device,
1484 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
1485 {
1486 tu_stub();
1487
1488 return VK_SUCCESS;
1489 }
1490
1491 VkResult
1492 tu_GetFenceFdKHR(VkDevice _device,
1493 const VkFenceGetFdInfoKHR *pGetFdInfo,
1494 int *pFd)
1495 {
1496 tu_stub();
1497
1498 return VK_SUCCESS;
1499 }
1500
1501 VkResult
1502 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1503 uint32_t *pPropertyCount,
1504 VkExtensionProperties *pProperties)
1505 {
1506 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1507
1508 /* We spport no lyaers */
1509 if (pLayerName)
1510 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1511
1512 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1513 if (tu_supported_instance_extensions.extensions[i]) {
1514 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1515 }
1516 }
1517
1518 return vk_outarray_status(&out);
1519 }
1520
1521 VkResult
1522 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1523 const char *pLayerName,
1524 uint32_t *pPropertyCount,
1525 VkExtensionProperties *pProperties)
1526 {
1527 /* We spport no lyaers */
1528 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1529 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1530
1531 /* We spport no lyaers */
1532 if (pLayerName)
1533 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1534
1535 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1536 if (device->supported_extensions.extensions[i]) {
1537 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1538 }
1539 }
1540
1541 return vk_outarray_status(&out);
1542 }
1543
1544 PFN_vkVoidFunction
1545 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1546 {
1547 TU_FROM_HANDLE(tu_instance, instance, _instance);
1548
1549 return tu_lookup_entrypoint_checked(
1550 pName, instance ? instance->api_version : 0,
1551 instance ? &instance->enabled_extensions : NULL, NULL);
1552 }
1553
1554 /* The loader wants us to expose a second GetInstanceProcAddr function
1555 * to work around certain LD_PRELOAD issues seen in apps.
1556 */
1557 PUBLIC
1558 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1559 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1560
1561 PUBLIC
1562 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1563 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1564 {
1565 return tu_GetInstanceProcAddr(instance, pName);
1566 }
1567
1568 PFN_vkVoidFunction
1569 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1570 {
1571 TU_FROM_HANDLE(tu_device, device, _device);
1572
1573 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1574 &device->instance->enabled_extensions,
1575 &device->enabled_extensions);
1576 }
1577
1578 static VkResult
1579 tu_alloc_memory(struct tu_device *device,
1580 const VkMemoryAllocateInfo *pAllocateInfo,
1581 const VkAllocationCallbacks *pAllocator,
1582 VkDeviceMemory *pMem)
1583 {
1584 struct tu_device_memory *mem;
1585 VkResult result;
1586
1587 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1588
1589 if (pAllocateInfo->allocationSize == 0) {
1590 /* Apparently, this is allowed */
1591 *pMem = VK_NULL_HANDLE;
1592 return VK_SUCCESS;
1593 }
1594
1595 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1596 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1597 if (mem == NULL)
1598 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1599
1600 const VkImportMemoryFdInfoKHR *fd_info =
1601 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1602 if (fd_info && !fd_info->handleType)
1603 fd_info = NULL;
1604
1605 if (fd_info) {
1606 assert(fd_info->handleType ==
1607 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1608 fd_info->handleType ==
1609 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1610
1611 /*
1612 * TODO Importing the same fd twice gives us the same handle without
1613 * reference counting. We need to maintain a per-instance handle-to-bo
1614 * table and add reference count to tu_bo.
1615 */
1616 result = tu_bo_init_dmabuf(device, &mem->bo,
1617 pAllocateInfo->allocationSize, fd_info->fd);
1618 if (result == VK_SUCCESS) {
1619 /* take ownership and close the fd */
1620 close(fd_info->fd);
1621 }
1622 } else {
1623 result =
1624 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1625 }
1626
1627 if (result != VK_SUCCESS) {
1628 vk_free2(&device->alloc, pAllocator, mem);
1629 return result;
1630 }
1631
1632 mem->size = pAllocateInfo->allocationSize;
1633 mem->type_index = pAllocateInfo->memoryTypeIndex;
1634
1635 mem->map = NULL;
1636 mem->user_ptr = NULL;
1637
1638 *pMem = tu_device_memory_to_handle(mem);
1639
1640 return VK_SUCCESS;
1641 }
1642
1643 VkResult
1644 tu_AllocateMemory(VkDevice _device,
1645 const VkMemoryAllocateInfo *pAllocateInfo,
1646 const VkAllocationCallbacks *pAllocator,
1647 VkDeviceMemory *pMem)
1648 {
1649 TU_FROM_HANDLE(tu_device, device, _device);
1650 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1651 }
1652
1653 void
1654 tu_FreeMemory(VkDevice _device,
1655 VkDeviceMemory _mem,
1656 const VkAllocationCallbacks *pAllocator)
1657 {
1658 TU_FROM_HANDLE(tu_device, device, _device);
1659 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1660
1661 if (mem == NULL)
1662 return;
1663
1664 tu_bo_finish(device, &mem->bo);
1665 vk_free2(&device->alloc, pAllocator, mem);
1666 }
1667
1668 VkResult
1669 tu_MapMemory(VkDevice _device,
1670 VkDeviceMemory _memory,
1671 VkDeviceSize offset,
1672 VkDeviceSize size,
1673 VkMemoryMapFlags flags,
1674 void **ppData)
1675 {
1676 TU_FROM_HANDLE(tu_device, device, _device);
1677 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1678 VkResult result;
1679
1680 if (mem == NULL) {
1681 *ppData = NULL;
1682 return VK_SUCCESS;
1683 }
1684
1685 if (mem->user_ptr) {
1686 *ppData = mem->user_ptr;
1687 } else if (!mem->map) {
1688 result = tu_bo_map(device, &mem->bo);
1689 if (result != VK_SUCCESS)
1690 return result;
1691 *ppData = mem->map = mem->bo.map;
1692 } else
1693 *ppData = mem->map;
1694
1695 if (*ppData) {
1696 *ppData += offset;
1697 return VK_SUCCESS;
1698 }
1699
1700 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1701 }
1702
1703 void
1704 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1705 {
1706 /* I do not see any unmapping done by the freedreno Gallium driver. */
1707 }
1708
1709 VkResult
1710 tu_FlushMappedMemoryRanges(VkDevice _device,
1711 uint32_t memoryRangeCount,
1712 const VkMappedMemoryRange *pMemoryRanges)
1713 {
1714 return VK_SUCCESS;
1715 }
1716
1717 VkResult
1718 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1719 uint32_t memoryRangeCount,
1720 const VkMappedMemoryRange *pMemoryRanges)
1721 {
1722 return VK_SUCCESS;
1723 }
1724
1725 void
1726 tu_GetBufferMemoryRequirements(VkDevice _device,
1727 VkBuffer _buffer,
1728 VkMemoryRequirements *pMemoryRequirements)
1729 {
1730 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1731
1732 pMemoryRequirements->memoryTypeBits = 1;
1733 pMemoryRequirements->alignment = 64;
1734 pMemoryRequirements->size =
1735 align64(buffer->size, pMemoryRequirements->alignment);
1736 }
1737
1738 void
1739 tu_GetBufferMemoryRequirements2(
1740 VkDevice device,
1741 const VkBufferMemoryRequirementsInfo2 *pInfo,
1742 VkMemoryRequirements2 *pMemoryRequirements)
1743 {
1744 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1745 &pMemoryRequirements->memoryRequirements);
1746 }
1747
1748 void
1749 tu_GetImageMemoryRequirements(VkDevice _device,
1750 VkImage _image,
1751 VkMemoryRequirements *pMemoryRequirements)
1752 {
1753 TU_FROM_HANDLE(tu_image, image, _image);
1754
1755 pMemoryRequirements->memoryTypeBits = 1;
1756 pMemoryRequirements->size = image->layout.size;
1757 pMemoryRequirements->alignment = image->layout.base_align;
1758 }
1759
1760 void
1761 tu_GetImageMemoryRequirements2(VkDevice device,
1762 const VkImageMemoryRequirementsInfo2 *pInfo,
1763 VkMemoryRequirements2 *pMemoryRequirements)
1764 {
1765 tu_GetImageMemoryRequirements(device, pInfo->image,
1766 &pMemoryRequirements->memoryRequirements);
1767 }
1768
1769 void
1770 tu_GetImageSparseMemoryRequirements(
1771 VkDevice device,
1772 VkImage image,
1773 uint32_t *pSparseMemoryRequirementCount,
1774 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1775 {
1776 tu_stub();
1777 }
1778
1779 void
1780 tu_GetImageSparseMemoryRequirements2(
1781 VkDevice device,
1782 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1783 uint32_t *pSparseMemoryRequirementCount,
1784 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1785 {
1786 tu_stub();
1787 }
1788
1789 void
1790 tu_GetDeviceMemoryCommitment(VkDevice device,
1791 VkDeviceMemory memory,
1792 VkDeviceSize *pCommittedMemoryInBytes)
1793 {
1794 *pCommittedMemoryInBytes = 0;
1795 }
1796
1797 VkResult
1798 tu_BindBufferMemory2(VkDevice device,
1799 uint32_t bindInfoCount,
1800 const VkBindBufferMemoryInfo *pBindInfos)
1801 {
1802 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1803 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1804 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1805
1806 if (mem) {
1807 buffer->bo = &mem->bo;
1808 buffer->bo_offset = pBindInfos[i].memoryOffset;
1809 } else {
1810 buffer->bo = NULL;
1811 }
1812 }
1813 return VK_SUCCESS;
1814 }
1815
1816 VkResult
1817 tu_BindBufferMemory(VkDevice device,
1818 VkBuffer buffer,
1819 VkDeviceMemory memory,
1820 VkDeviceSize memoryOffset)
1821 {
1822 const VkBindBufferMemoryInfo info = {
1823 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1824 .buffer = buffer,
1825 .memory = memory,
1826 .memoryOffset = memoryOffset
1827 };
1828
1829 return tu_BindBufferMemory2(device, 1, &info);
1830 }
1831
1832 VkResult
1833 tu_BindImageMemory2(VkDevice device,
1834 uint32_t bindInfoCount,
1835 const VkBindImageMemoryInfo *pBindInfos)
1836 {
1837 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1838 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1839 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1840
1841 if (mem) {
1842 image->bo = &mem->bo;
1843 image->bo_offset = pBindInfos[i].memoryOffset;
1844 } else {
1845 image->bo = NULL;
1846 image->bo_offset = 0;
1847 }
1848 }
1849
1850 return VK_SUCCESS;
1851 }
1852
1853 VkResult
1854 tu_BindImageMemory(VkDevice device,
1855 VkImage image,
1856 VkDeviceMemory memory,
1857 VkDeviceSize memoryOffset)
1858 {
1859 const VkBindImageMemoryInfo info = {
1860 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1861 .image = image,
1862 .memory = memory,
1863 .memoryOffset = memoryOffset
1864 };
1865
1866 return tu_BindImageMemory2(device, 1, &info);
1867 }
1868
1869 VkResult
1870 tu_QueueBindSparse(VkQueue _queue,
1871 uint32_t bindInfoCount,
1872 const VkBindSparseInfo *pBindInfo,
1873 VkFence _fence)
1874 {
1875 return VK_SUCCESS;
1876 }
1877
1878 // Queue semaphore functions
1879
1880 VkResult
1881 tu_CreateSemaphore(VkDevice _device,
1882 const VkSemaphoreCreateInfo *pCreateInfo,
1883 const VkAllocationCallbacks *pAllocator,
1884 VkSemaphore *pSemaphore)
1885 {
1886 TU_FROM_HANDLE(tu_device, device, _device);
1887
1888 struct tu_semaphore *sem =
1889 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1890 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1891 if (!sem)
1892 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1893
1894 *pSemaphore = tu_semaphore_to_handle(sem);
1895 return VK_SUCCESS;
1896 }
1897
1898 void
1899 tu_DestroySemaphore(VkDevice _device,
1900 VkSemaphore _semaphore,
1901 const VkAllocationCallbacks *pAllocator)
1902 {
1903 TU_FROM_HANDLE(tu_device, device, _device);
1904 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1905 if (!_semaphore)
1906 return;
1907
1908 vk_free2(&device->alloc, pAllocator, sem);
1909 }
1910
1911 VkResult
1912 tu_CreateEvent(VkDevice _device,
1913 const VkEventCreateInfo *pCreateInfo,
1914 const VkAllocationCallbacks *pAllocator,
1915 VkEvent *pEvent)
1916 {
1917 TU_FROM_HANDLE(tu_device, device, _device);
1918 struct tu_event *event =
1919 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1920 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1921
1922 if (!event)
1923 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1924
1925 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1926 if (result != VK_SUCCESS)
1927 goto fail_alloc;
1928
1929 result = tu_bo_map(device, &event->bo);
1930 if (result != VK_SUCCESS)
1931 goto fail_map;
1932
1933 *pEvent = tu_event_to_handle(event);
1934
1935 return VK_SUCCESS;
1936
1937 fail_map:
1938 tu_bo_finish(device, &event->bo);
1939 fail_alloc:
1940 vk_free2(&device->alloc, pAllocator, event);
1941 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1942 }
1943
1944 void
1945 tu_DestroyEvent(VkDevice _device,
1946 VkEvent _event,
1947 const VkAllocationCallbacks *pAllocator)
1948 {
1949 TU_FROM_HANDLE(tu_device, device, _device);
1950 TU_FROM_HANDLE(tu_event, event, _event);
1951
1952 if (!event)
1953 return;
1954
1955 tu_bo_finish(device, &event->bo);
1956 vk_free2(&device->alloc, pAllocator, event);
1957 }
1958
1959 VkResult
1960 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1961 {
1962 TU_FROM_HANDLE(tu_event, event, _event);
1963
1964 if (*(uint64_t*) event->bo.map == 1)
1965 return VK_EVENT_SET;
1966 return VK_EVENT_RESET;
1967 }
1968
1969 VkResult
1970 tu_SetEvent(VkDevice _device, VkEvent _event)
1971 {
1972 TU_FROM_HANDLE(tu_event, event, _event);
1973 *(uint64_t*) event->bo.map = 1;
1974
1975 return VK_SUCCESS;
1976 }
1977
1978 VkResult
1979 tu_ResetEvent(VkDevice _device, VkEvent _event)
1980 {
1981 TU_FROM_HANDLE(tu_event, event, _event);
1982 *(uint64_t*) event->bo.map = 0;
1983
1984 return VK_SUCCESS;
1985 }
1986
1987 VkResult
1988 tu_CreateBuffer(VkDevice _device,
1989 const VkBufferCreateInfo *pCreateInfo,
1990 const VkAllocationCallbacks *pAllocator,
1991 VkBuffer *pBuffer)
1992 {
1993 TU_FROM_HANDLE(tu_device, device, _device);
1994 struct tu_buffer *buffer;
1995
1996 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1997
1998 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1999 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2000 if (buffer == NULL)
2001 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2002
2003 buffer->size = pCreateInfo->size;
2004 buffer->usage = pCreateInfo->usage;
2005 buffer->flags = pCreateInfo->flags;
2006
2007 *pBuffer = tu_buffer_to_handle(buffer);
2008
2009 return VK_SUCCESS;
2010 }
2011
2012 void
2013 tu_DestroyBuffer(VkDevice _device,
2014 VkBuffer _buffer,
2015 const VkAllocationCallbacks *pAllocator)
2016 {
2017 TU_FROM_HANDLE(tu_device, device, _device);
2018 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2019
2020 if (!buffer)
2021 return;
2022
2023 vk_free2(&device->alloc, pAllocator, buffer);
2024 }
2025
2026 VkResult
2027 tu_CreateFramebuffer(VkDevice _device,
2028 const VkFramebufferCreateInfo *pCreateInfo,
2029 const VkAllocationCallbacks *pAllocator,
2030 VkFramebuffer *pFramebuffer)
2031 {
2032 TU_FROM_HANDLE(tu_device, device, _device);
2033 struct tu_framebuffer *framebuffer;
2034
2035 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2036
2037 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2038 pCreateInfo->attachmentCount;
2039 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2040 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2041 if (framebuffer == NULL)
2042 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2043
2044 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2045 framebuffer->width = pCreateInfo->width;
2046 framebuffer->height = pCreateInfo->height;
2047 framebuffer->layers = pCreateInfo->layers;
2048 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2049 VkImageView _iview = pCreateInfo->pAttachments[i];
2050 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2051 framebuffer->attachments[i].attachment = iview;
2052 }
2053
2054 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2055 return VK_SUCCESS;
2056 }
2057
2058 void
2059 tu_DestroyFramebuffer(VkDevice _device,
2060 VkFramebuffer _fb,
2061 const VkAllocationCallbacks *pAllocator)
2062 {
2063 TU_FROM_HANDLE(tu_device, device, _device);
2064 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2065
2066 if (!fb)
2067 return;
2068 vk_free2(&device->alloc, pAllocator, fb);
2069 }
2070
2071 static enum a6xx_tex_clamp
2072 tu6_tex_wrap(VkSamplerAddressMode address_mode)
2073 {
2074 switch (address_mode) {
2075 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
2076 return A6XX_TEX_REPEAT;
2077 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
2078 return A6XX_TEX_MIRROR_REPEAT;
2079 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
2080 return A6XX_TEX_CLAMP_TO_EDGE;
2081 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
2082 return A6XX_TEX_CLAMP_TO_BORDER;
2083 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
2084 /* only works for PoT.. need to emulate otherwise! */
2085 return A6XX_TEX_MIRROR_CLAMP;
2086 default:
2087 unreachable("illegal tex wrap mode");
2088 break;
2089 }
2090 }
2091
2092 static enum a6xx_tex_filter
2093 tu6_tex_filter(VkFilter filter, unsigned aniso)
2094 {
2095 switch (filter) {
2096 case VK_FILTER_NEAREST:
2097 return A6XX_TEX_NEAREST;
2098 case VK_FILTER_LINEAR:
2099 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
2100 case VK_FILTER_CUBIC_EXT:
2101 return A6XX_TEX_CUBIC;
2102 default:
2103 unreachable("illegal texture filter");
2104 break;
2105 }
2106 }
2107
2108 static inline enum adreno_compare_func
2109 tu6_compare_func(VkCompareOp op)
2110 {
2111 return (enum adreno_compare_func) op;
2112 }
2113
2114 static void
2115 tu_init_sampler(struct tu_device *device,
2116 struct tu_sampler *sampler,
2117 const VkSamplerCreateInfo *pCreateInfo)
2118 {
2119 const struct VkSamplerReductionModeCreateInfo *reduction =
2120 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2121
2122 unsigned aniso = pCreateInfo->anisotropyEnable ?
2123 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2124 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2125
2126 sampler->descriptor[0] =
2127 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2128 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2129 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2130 A6XX_TEX_SAMP_0_ANISO(aniso) |
2131 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2132 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2133 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2134 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2135 sampler->descriptor[1] =
2136 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2137 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2138 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
2139 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
2140 COND(pCreateInfo->compareEnable,
2141 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2142 /* This is an offset into the border_color BO, which we fill with all the
2143 * possible Vulkan border colors in the correct order, so we can just use
2144 * the Vulkan enum with no translation necessary.
2145 */
2146 sampler->descriptor[2] =
2147 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2148 sizeof(struct bcolor_entry));
2149 sampler->descriptor[3] = 0;
2150
2151 if (reduction) {
2152 /* note: vulkan enum matches hw */
2153 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(reduction->reductionMode);
2154 }
2155
2156 /* TODO:
2157 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2158 */
2159 }
2160
2161 VkResult
2162 tu_CreateSampler(VkDevice _device,
2163 const VkSamplerCreateInfo *pCreateInfo,
2164 const VkAllocationCallbacks *pAllocator,
2165 VkSampler *pSampler)
2166 {
2167 TU_FROM_HANDLE(tu_device, device, _device);
2168 struct tu_sampler *sampler;
2169
2170 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2171
2172 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2173 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2174 if (!sampler)
2175 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2176
2177 tu_init_sampler(device, sampler, pCreateInfo);
2178 *pSampler = tu_sampler_to_handle(sampler);
2179
2180 return VK_SUCCESS;
2181 }
2182
2183 void
2184 tu_DestroySampler(VkDevice _device,
2185 VkSampler _sampler,
2186 const VkAllocationCallbacks *pAllocator)
2187 {
2188 TU_FROM_HANDLE(tu_device, device, _device);
2189 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2190
2191 if (!sampler)
2192 return;
2193 vk_free2(&device->alloc, pAllocator, sampler);
2194 }
2195
2196 /* vk_icd.h does not declare this function, so we declare it here to
2197 * suppress Wmissing-prototypes.
2198 */
2199 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2200 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2201
2202 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2203 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2204 {
2205 /* For the full details on loader interface versioning, see
2206 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2207 * What follows is a condensed summary, to help you navigate the large and
2208 * confusing official doc.
2209 *
2210 * - Loader interface v0 is incompatible with later versions. We don't
2211 * support it.
2212 *
2213 * - In loader interface v1:
2214 * - The first ICD entrypoint called by the loader is
2215 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2216 * entrypoint.
2217 * - The ICD must statically expose no other Vulkan symbol unless it
2218 * is linked with -Bsymbolic.
2219 * - Each dispatchable Vulkan handle created by the ICD must be
2220 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2221 * ICD must initialize VK_LOADER_DATA.loadMagic to
2222 * ICD_LOADER_MAGIC.
2223 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2224 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2225 * such loader-managed surfaces.
2226 *
2227 * - Loader interface v2 differs from v1 in:
2228 * - The first ICD entrypoint called by the loader is
2229 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2230 * statically expose this entrypoint.
2231 *
2232 * - Loader interface v3 differs from v2 in:
2233 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2234 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2235 * because the loader no longer does so.
2236 */
2237 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2238 return VK_SUCCESS;
2239 }
2240
2241 VkResult
2242 tu_GetMemoryFdKHR(VkDevice _device,
2243 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2244 int *pFd)
2245 {
2246 TU_FROM_HANDLE(tu_device, device, _device);
2247 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2248
2249 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2250
2251 /* At the moment, we support only the below handle types. */
2252 assert(pGetFdInfo->handleType ==
2253 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2254 pGetFdInfo->handleType ==
2255 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2256
2257 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2258 if (prime_fd < 0)
2259 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2260
2261 *pFd = prime_fd;
2262 return VK_SUCCESS;
2263 }
2264
2265 VkResult
2266 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2267 VkExternalMemoryHandleTypeFlagBits handleType,
2268 int fd,
2269 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2270 {
2271 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2272 pMemoryFdProperties->memoryTypeBits = 1;
2273 return VK_SUCCESS;
2274 }
2275
2276 void
2277 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2278 VkPhysicalDevice physicalDevice,
2279 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2280 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2281 {
2282 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2283 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2284 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2285 }
2286
2287 void
2288 tu_GetPhysicalDeviceExternalFenceProperties(
2289 VkPhysicalDevice physicalDevice,
2290 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2291 VkExternalFenceProperties *pExternalFenceProperties)
2292 {
2293 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2294 pExternalFenceProperties->compatibleHandleTypes = 0;
2295 pExternalFenceProperties->externalFenceFeatures = 0;
2296 }
2297
2298 VkResult
2299 tu_CreateDebugReportCallbackEXT(
2300 VkInstance _instance,
2301 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2302 const VkAllocationCallbacks *pAllocator,
2303 VkDebugReportCallbackEXT *pCallback)
2304 {
2305 TU_FROM_HANDLE(tu_instance, instance, _instance);
2306 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2307 pCreateInfo, pAllocator,
2308 &instance->alloc, pCallback);
2309 }
2310
2311 void
2312 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2313 VkDebugReportCallbackEXT _callback,
2314 const VkAllocationCallbacks *pAllocator)
2315 {
2316 TU_FROM_HANDLE(tu_instance, instance, _instance);
2317 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2318 _callback, pAllocator, &instance->alloc);
2319 }
2320
2321 void
2322 tu_DebugReportMessageEXT(VkInstance _instance,
2323 VkDebugReportFlagsEXT flags,
2324 VkDebugReportObjectTypeEXT objectType,
2325 uint64_t object,
2326 size_t location,
2327 int32_t messageCode,
2328 const char *pLayerPrefix,
2329 const char *pMessage)
2330 {
2331 TU_FROM_HANDLE(tu_instance, instance, _instance);
2332 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2333 object, location, messageCode, pLayerPrefix, pMessage);
2334 }
2335
2336 void
2337 tu_GetDeviceGroupPeerMemoryFeatures(
2338 VkDevice device,
2339 uint32_t heapIndex,
2340 uint32_t localDeviceIndex,
2341 uint32_t remoteDeviceIndex,
2342 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2343 {
2344 assert(localDeviceIndex == remoteDeviceIndex);
2345
2346 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2347 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2348 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2349 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2350 }
2351
2352 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2353 VkPhysicalDevice physicalDevice,
2354 VkSampleCountFlagBits samples,
2355 VkMultisamplePropertiesEXT* pMultisampleProperties)
2356 {
2357 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2358
2359 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2360 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2361 else
2362 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2363 }