tu: Add noubwc debug flag to disable UBWC
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 static int
49 tu_device_get_cache_uuid(uint16_t family, void *uuid)
50 {
51 uint32_t mesa_timestamp;
52 uint16_t f = family;
53 memset(uuid, 0, VK_UUID_SIZE);
54 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
55 &mesa_timestamp))
56 return -1;
57
58 memcpy(uuid, &mesa_timestamp, 4);
59 memcpy((char *) uuid + 4, &f, 2);
60 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
61 return 0;
62 }
63
64 static void
65 tu_get_driver_uuid(void *uuid)
66 {
67 memset(uuid, 0, VK_UUID_SIZE);
68 snprintf(uuid, VK_UUID_SIZE, "freedreno");
69 }
70
71 static void
72 tu_get_device_uuid(void *uuid)
73 {
74 memset(uuid, 0, VK_UUID_SIZE);
75 }
76
77 static VkResult
78 tu_bo_init(struct tu_device *dev,
79 struct tu_bo *bo,
80 uint32_t gem_handle,
81 uint64_t size)
82 {
83 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
84 if (!iova)
85 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
86
87 *bo = (struct tu_bo) {
88 .gem_handle = gem_handle,
89 .size = size,
90 .iova = iova,
91 };
92
93 return VK_SUCCESS;
94 }
95
96 VkResult
97 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
98 {
99 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
100 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
101 */
102 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
103 if (!gem_handle)
104 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
105
106 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
107 if (result != VK_SUCCESS) {
108 tu_gem_close(dev, gem_handle);
109 return vk_error(dev->instance, result);
110 }
111
112 return VK_SUCCESS;
113 }
114
115 VkResult
116 tu_bo_init_dmabuf(struct tu_device *dev,
117 struct tu_bo *bo,
118 uint64_t size,
119 int fd)
120 {
121 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
122 if (!gem_handle)
123 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
124
125 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
126 if (result != VK_SUCCESS) {
127 tu_gem_close(dev, gem_handle);
128 return vk_error(dev->instance, result);
129 }
130
131 return VK_SUCCESS;
132 }
133
134 int
135 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
136 {
137 return tu_gem_export_dmabuf(dev, bo->gem_handle);
138 }
139
140 VkResult
141 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
142 {
143 if (bo->map)
144 return VK_SUCCESS;
145
146 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
147 if (!offset)
148 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
149
150 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
151 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
152 dev->physical_device->local_fd, offset);
153 if (map == MAP_FAILED)
154 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
155
156 bo->map = map;
157 return VK_SUCCESS;
158 }
159
160 void
161 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
162 {
163 assert(bo->gem_handle);
164
165 if (bo->map)
166 munmap(bo->map, bo->size);
167
168 tu_gem_close(dev, bo->gem_handle);
169 }
170
171 static VkResult
172 tu_physical_device_init(struct tu_physical_device *device,
173 struct tu_instance *instance,
174 drmDevicePtr drm_device)
175 {
176 const char *path = drm_device->nodes[DRM_NODE_RENDER];
177 VkResult result = VK_SUCCESS;
178 drmVersionPtr version;
179 int fd;
180 int master_fd = -1;
181
182 fd = open(path, O_RDWR | O_CLOEXEC);
183 if (fd < 0) {
184 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
185 "failed to open device %s", path);
186 }
187
188 /* Version 1.3 added MSM_INFO_IOVA. */
189 const int min_version_major = 1;
190 const int min_version_minor = 3;
191
192 version = drmGetVersion(fd);
193 if (!version) {
194 close(fd);
195 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
196 "failed to query kernel driver version for device %s",
197 path);
198 }
199
200 if (strcmp(version->name, "msm")) {
201 drmFreeVersion(version);
202 close(fd);
203 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "device %s does not use the msm kernel driver", path);
205 }
206
207 if (version->version_major != min_version_major ||
208 version->version_minor < min_version_minor) {
209 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
210 "kernel driver for device %s has version %d.%d, "
211 "but Vulkan requires version >= %d.%d",
212 path, version->version_major, version->version_minor,
213 min_version_major, min_version_minor);
214 drmFreeVersion(version);
215 close(fd);
216 return result;
217 }
218
219 drmFreeVersion(version);
220
221 if (instance->debug_flags & TU_DEBUG_STARTUP)
222 tu_logi("Found compatible device '%s'.", path);
223
224 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
225 device->instance = instance;
226 assert(strlen(path) < ARRAY_SIZE(device->path));
227 strncpy(device->path, path, ARRAY_SIZE(device->path));
228
229 if (instance->enabled_extensions.KHR_display) {
230 master_fd =
231 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
232 if (master_fd >= 0) {
233 /* TODO: free master_fd is accel is not working? */
234 }
235 }
236
237 device->master_fd = master_fd;
238 device->local_fd = fd;
239
240 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
241 if (instance->debug_flags & TU_DEBUG_STARTUP)
242 tu_logi("Could not query the GPU ID");
243 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
244 "could not get GPU ID");
245 goto fail;
246 }
247
248 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
249 if (instance->debug_flags & TU_DEBUG_STARTUP)
250 tu_logi("Could not query the GMEM size");
251 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
252 "could not get GMEM size");
253 goto fail;
254 }
255
256 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
257 if (instance->debug_flags & TU_DEBUG_STARTUP)
258 tu_logi("Could not query the GMEM size");
259 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
260 "could not get GMEM size");
261 goto fail;
262 }
263
264 memset(device->name, 0, sizeof(device->name));
265 sprintf(device->name, "FD%d", device->gpu_id);
266
267 switch (device->gpu_id) {
268 case 618:
269 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
270 device->ccu_offset_bypass = 0x10000;
271 device->tile_align_w = 64;
272 device->magic.PC_UNKNOWN_9805 = 0x0;
273 device->magic.SP_UNKNOWN_A0F8 = 0x0;
274 break;
275 case 630:
276 case 640:
277 device->ccu_offset_gmem = 0xf8000;
278 device->ccu_offset_bypass = 0x20000;
279 device->tile_align_w = 64;
280 device->magic.PC_UNKNOWN_9805 = 0x1;
281 device->magic.SP_UNKNOWN_A0F8 = 0x1;
282 break;
283 case 650:
284 device->ccu_offset_gmem = 0x114000;
285 device->ccu_offset_bypass = 0x30000;
286 device->tile_align_w = 96;
287 device->magic.PC_UNKNOWN_9805 = 0x2;
288 device->magic.SP_UNKNOWN_A0F8 = 0x2;
289 break;
290 default:
291 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
292 "device %s is unsupported", device->name);
293 goto fail;
294 }
295 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
296 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
297 "cannot generate UUID");
298 goto fail;
299 }
300
301 /* The gpu id is already embedded in the uuid so we just pass "tu"
302 * when creating the cache.
303 */
304 char buf[VK_UUID_SIZE * 2 + 1];
305 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
306 device->disk_cache = disk_cache_create(device->name, buf, 0);
307
308 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
309 "testing use only.\n");
310
311 tu_get_driver_uuid(&device->device_uuid);
312 tu_get_device_uuid(&device->device_uuid);
313
314 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
315
316 if (result != VK_SUCCESS) {
317 vk_error(instance, result);
318 goto fail;
319 }
320
321 result = tu_wsi_init(device);
322 if (result != VK_SUCCESS) {
323 vk_error(instance, result);
324 goto fail;
325 }
326
327 return VK_SUCCESS;
328
329 fail:
330 close(fd);
331 if (master_fd != -1)
332 close(master_fd);
333 return result;
334 }
335
336 static void
337 tu_physical_device_finish(struct tu_physical_device *device)
338 {
339 tu_wsi_finish(device);
340
341 disk_cache_destroy(device->disk_cache);
342 close(device->local_fd);
343 if (device->master_fd != -1)
344 close(device->master_fd);
345 }
346
347 static VKAPI_ATTR void *
348 default_alloc_func(void *pUserData,
349 size_t size,
350 size_t align,
351 VkSystemAllocationScope allocationScope)
352 {
353 return malloc(size);
354 }
355
356 static VKAPI_ATTR void *
357 default_realloc_func(void *pUserData,
358 void *pOriginal,
359 size_t size,
360 size_t align,
361 VkSystemAllocationScope allocationScope)
362 {
363 return realloc(pOriginal, size);
364 }
365
366 static VKAPI_ATTR void
367 default_free_func(void *pUserData, void *pMemory)
368 {
369 free(pMemory);
370 }
371
372 static const VkAllocationCallbacks default_alloc = {
373 .pUserData = NULL,
374 .pfnAllocation = default_alloc_func,
375 .pfnReallocation = default_realloc_func,
376 .pfnFree = default_free_func,
377 };
378
379 static const struct debug_control tu_debug_options[] = {
380 { "startup", TU_DEBUG_STARTUP },
381 { "nir", TU_DEBUG_NIR },
382 { "ir3", TU_DEBUG_IR3 },
383 { "nobin", TU_DEBUG_NOBIN },
384 { "sysmem", TU_DEBUG_SYSMEM },
385 { "forcebin", TU_DEBUG_FORCEBIN },
386 { "noubwc", TU_DEBUG_NOUBWC },
387 { NULL, 0 }
388 };
389
390 const char *
391 tu_get_debug_option_name(int id)
392 {
393 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
394 return tu_debug_options[id].string;
395 }
396
397 static int
398 tu_get_instance_extension_index(const char *name)
399 {
400 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
401 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
402 return i;
403 }
404 return -1;
405 }
406
407 VkResult
408 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
409 const VkAllocationCallbacks *pAllocator,
410 VkInstance *pInstance)
411 {
412 struct tu_instance *instance;
413 VkResult result;
414
415 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
416
417 uint32_t client_version;
418 if (pCreateInfo->pApplicationInfo &&
419 pCreateInfo->pApplicationInfo->apiVersion != 0) {
420 client_version = pCreateInfo->pApplicationInfo->apiVersion;
421 } else {
422 tu_EnumerateInstanceVersion(&client_version);
423 }
424
425 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
426 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
427 if (!instance)
428 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
429
430 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
431
432 if (pAllocator)
433 instance->alloc = *pAllocator;
434 else
435 instance->alloc = default_alloc;
436
437 instance->api_version = client_version;
438 instance->physical_device_count = -1;
439
440 instance->debug_flags =
441 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
442
443 if (instance->debug_flags & TU_DEBUG_STARTUP)
444 tu_logi("Created an instance");
445
446 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
447 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
448 int index = tu_get_instance_extension_index(ext_name);
449
450 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
451 vk_free2(&default_alloc, pAllocator, instance);
452 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
453 }
454
455 instance->enabled_extensions.extensions[index] = true;
456 }
457
458 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
459 if (result != VK_SUCCESS) {
460 vk_free2(&default_alloc, pAllocator, instance);
461 return vk_error(instance, result);
462 }
463
464 glsl_type_singleton_init_or_ref();
465
466 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
467
468 *pInstance = tu_instance_to_handle(instance);
469
470 return VK_SUCCESS;
471 }
472
473 void
474 tu_DestroyInstance(VkInstance _instance,
475 const VkAllocationCallbacks *pAllocator)
476 {
477 TU_FROM_HANDLE(tu_instance, instance, _instance);
478
479 if (!instance)
480 return;
481
482 for (int i = 0; i < instance->physical_device_count; ++i) {
483 tu_physical_device_finish(instance->physical_devices + i);
484 }
485
486 VG(VALGRIND_DESTROY_MEMPOOL(instance));
487
488 glsl_type_singleton_decref();
489
490 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
491
492 vk_free(&instance->alloc, instance);
493 }
494
495 static VkResult
496 tu_enumerate_devices(struct tu_instance *instance)
497 {
498 /* TODO: Check for more devices ? */
499 drmDevicePtr devices[8];
500 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
501 int max_devices;
502
503 instance->physical_device_count = 0;
504
505 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
506
507 if (instance->debug_flags & TU_DEBUG_STARTUP)
508 tu_logi("Found %d drm nodes", max_devices);
509
510 if (max_devices < 1)
511 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
512
513 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
514 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
515 devices[i]->bustype == DRM_BUS_PLATFORM) {
516
517 result = tu_physical_device_init(
518 instance->physical_devices + instance->physical_device_count,
519 instance, devices[i]);
520 if (result == VK_SUCCESS)
521 ++instance->physical_device_count;
522 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
523 break;
524 }
525 }
526 drmFreeDevices(devices, max_devices);
527
528 return result;
529 }
530
531 VkResult
532 tu_EnumeratePhysicalDevices(VkInstance _instance,
533 uint32_t *pPhysicalDeviceCount,
534 VkPhysicalDevice *pPhysicalDevices)
535 {
536 TU_FROM_HANDLE(tu_instance, instance, _instance);
537 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
538
539 VkResult result;
540
541 if (instance->physical_device_count < 0) {
542 result = tu_enumerate_devices(instance);
543 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
544 return result;
545 }
546
547 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
548 vk_outarray_append(&out, p)
549 {
550 *p = tu_physical_device_to_handle(instance->physical_devices + i);
551 }
552 }
553
554 return vk_outarray_status(&out);
555 }
556
557 VkResult
558 tu_EnumeratePhysicalDeviceGroups(
559 VkInstance _instance,
560 uint32_t *pPhysicalDeviceGroupCount,
561 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
562 {
563 TU_FROM_HANDLE(tu_instance, instance, _instance);
564 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
565 pPhysicalDeviceGroupCount);
566 VkResult result;
567
568 if (instance->physical_device_count < 0) {
569 result = tu_enumerate_devices(instance);
570 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
571 return result;
572 }
573
574 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
575 vk_outarray_append(&out, p)
576 {
577 p->physicalDeviceCount = 1;
578 p->physicalDevices[0] =
579 tu_physical_device_to_handle(instance->physical_devices + i);
580 p->subsetAllocation = false;
581 }
582 }
583
584 return vk_outarray_status(&out);
585 }
586
587 void
588 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
589 VkPhysicalDeviceFeatures *pFeatures)
590 {
591 memset(pFeatures, 0, sizeof(*pFeatures));
592
593 *pFeatures = (VkPhysicalDeviceFeatures) {
594 .robustBufferAccess = false,
595 .fullDrawIndexUint32 = true,
596 .imageCubeArray = true,
597 .independentBlend = true,
598 .geometryShader = true,
599 .tessellationShader = false,
600 .sampleRateShading = true,
601 .dualSrcBlend = true,
602 .logicOp = true,
603 .multiDrawIndirect = false,
604 .drawIndirectFirstInstance = false,
605 .depthClamp = true,
606 .depthBiasClamp = false,
607 .fillModeNonSolid = false,
608 .depthBounds = false,
609 .wideLines = false,
610 .largePoints = false,
611 .alphaToOne = false,
612 .multiViewport = false,
613 .samplerAnisotropy = true,
614 .textureCompressionETC2 = true,
615 .textureCompressionASTC_LDR = true,
616 .textureCompressionBC = true,
617 .occlusionQueryPrecise = true,
618 .pipelineStatisticsQuery = false,
619 .vertexPipelineStoresAndAtomics = false,
620 .fragmentStoresAndAtomics = false,
621 .shaderTessellationAndGeometryPointSize = false,
622 .shaderImageGatherExtended = false,
623 .shaderStorageImageExtendedFormats = false,
624 .shaderStorageImageMultisample = false,
625 .shaderUniformBufferArrayDynamicIndexing = false,
626 .shaderSampledImageArrayDynamicIndexing = false,
627 .shaderStorageBufferArrayDynamicIndexing = false,
628 .shaderStorageImageArrayDynamicIndexing = false,
629 .shaderStorageImageReadWithoutFormat = false,
630 .shaderStorageImageWriteWithoutFormat = false,
631 .shaderClipDistance = false,
632 .shaderCullDistance = false,
633 .shaderFloat64 = false,
634 .shaderInt64 = false,
635 .shaderInt16 = false,
636 .sparseBinding = false,
637 .variableMultisampleRate = false,
638 .inheritedQueries = false,
639 };
640 }
641
642 void
643 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
644 VkPhysicalDeviceFeatures2 *pFeatures)
645 {
646 vk_foreach_struct(ext, pFeatures->pNext)
647 {
648 switch (ext->sType) {
649 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
650 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
651 features->variablePointersStorageBuffer = false;
652 features->variablePointers = false;
653 break;
654 }
655 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
656 VkPhysicalDeviceMultiviewFeatures *features =
657 (VkPhysicalDeviceMultiviewFeatures *) ext;
658 features->multiview = false;
659 features->multiviewGeometryShader = false;
660 features->multiviewTessellationShader = false;
661 break;
662 }
663 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
664 VkPhysicalDeviceShaderDrawParametersFeatures *features =
665 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
666 features->shaderDrawParameters = false;
667 break;
668 }
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
670 VkPhysicalDeviceProtectedMemoryFeatures *features =
671 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
672 features->protectedMemory = false;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
676 VkPhysicalDevice16BitStorageFeatures *features =
677 (VkPhysicalDevice16BitStorageFeatures *) ext;
678 features->storageBuffer16BitAccess = false;
679 features->uniformAndStorageBuffer16BitAccess = false;
680 features->storagePushConstant16 = false;
681 features->storageInputOutput16 = false;
682 break;
683 }
684 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
685 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
686 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
687 features->samplerYcbcrConversion = false;
688 break;
689 }
690 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
691 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
692 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
693 features->shaderInputAttachmentArrayDynamicIndexing = false;
694 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
695 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
696 features->shaderUniformBufferArrayNonUniformIndexing = false;
697 features->shaderSampledImageArrayNonUniformIndexing = false;
698 features->shaderStorageBufferArrayNonUniformIndexing = false;
699 features->shaderStorageImageArrayNonUniformIndexing = false;
700 features->shaderInputAttachmentArrayNonUniformIndexing = false;
701 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
702 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
703 features->descriptorBindingUniformBufferUpdateAfterBind = false;
704 features->descriptorBindingSampledImageUpdateAfterBind = false;
705 features->descriptorBindingStorageImageUpdateAfterBind = false;
706 features->descriptorBindingStorageBufferUpdateAfterBind = false;
707 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
708 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
709 features->descriptorBindingUpdateUnusedWhilePending = false;
710 features->descriptorBindingPartiallyBound = false;
711 features->descriptorBindingVariableDescriptorCount = false;
712 features->runtimeDescriptorArray = false;
713 break;
714 }
715 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
716 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
717 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
718 features->conditionalRendering = false;
719 features->inheritedConditionalRendering = false;
720 break;
721 }
722 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
723 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
724 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
725 features->transformFeedback = true;
726 features->geometryStreams = false;
727 break;
728 }
729 default:
730 break;
731 }
732 }
733 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
734 }
735
736 void
737 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
738 VkPhysicalDeviceProperties *pProperties)
739 {
740 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
741 VkSampleCountFlags sample_counts =
742 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
743
744 /* I have no idea what the maximum size is, but the hardware supports very
745 * large numbers of descriptors (at least 2^16). This limit is based on
746 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
747 * we don't have to think about what to do if that overflows, but really
748 * nothing is likely to get close to this.
749 */
750 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
751
752 VkPhysicalDeviceLimits limits = {
753 .maxImageDimension1D = (1 << 14),
754 .maxImageDimension2D = (1 << 14),
755 .maxImageDimension3D = (1 << 11),
756 .maxImageDimensionCube = (1 << 14),
757 .maxImageArrayLayers = (1 << 11),
758 .maxTexelBufferElements = 128 * 1024 * 1024,
759 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
760 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
761 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
762 .maxMemoryAllocationCount = UINT32_MAX,
763 .maxSamplerAllocationCount = 64 * 1024,
764 .bufferImageGranularity = 64, /* A cache line */
765 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
766 .maxBoundDescriptorSets = MAX_SETS,
767 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
768 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
769 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
770 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
771 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
772 .maxPerStageDescriptorInputAttachments = MAX_RTS,
773 .maxPerStageResources = max_descriptor_set_size,
774 .maxDescriptorSetSamplers = max_descriptor_set_size,
775 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
776 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
777 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
778 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
779 .maxDescriptorSetSampledImages = max_descriptor_set_size,
780 .maxDescriptorSetStorageImages = max_descriptor_set_size,
781 .maxDescriptorSetInputAttachments = MAX_RTS,
782 .maxVertexInputAttributes = 32,
783 .maxVertexInputBindings = 32,
784 .maxVertexInputAttributeOffset = 4095,
785 .maxVertexInputBindingStride = 2048,
786 .maxVertexOutputComponents = 128,
787 .maxTessellationGenerationLevel = 64,
788 .maxTessellationPatchSize = 32,
789 .maxTessellationControlPerVertexInputComponents = 128,
790 .maxTessellationControlPerVertexOutputComponents = 128,
791 .maxTessellationControlPerPatchOutputComponents = 120,
792 .maxTessellationControlTotalOutputComponents = 4096,
793 .maxTessellationEvaluationInputComponents = 128,
794 .maxTessellationEvaluationOutputComponents = 128,
795 .maxGeometryShaderInvocations = 32,
796 .maxGeometryInputComponents = 64,
797 .maxGeometryOutputComponents = 128,
798 .maxGeometryOutputVertices = 256,
799 .maxGeometryTotalOutputComponents = 1024,
800 .maxFragmentInputComponents = 124,
801 .maxFragmentOutputAttachments = 8,
802 .maxFragmentDualSrcAttachments = 1,
803 .maxFragmentCombinedOutputResources = 8,
804 .maxComputeSharedMemorySize = 32768,
805 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
806 .maxComputeWorkGroupInvocations = 2048,
807 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
808 .subPixelPrecisionBits = 8,
809 .subTexelPrecisionBits = 4 /* FIXME */,
810 .mipmapPrecisionBits = 4 /* FIXME */,
811 .maxDrawIndexedIndexValue = UINT32_MAX,
812 .maxDrawIndirectCount = UINT32_MAX,
813 .maxSamplerLodBias = 16,
814 .maxSamplerAnisotropy = 16,
815 .maxViewports = MAX_VIEWPORTS,
816 .maxViewportDimensions = { (1 << 14), (1 << 14) },
817 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
818 .viewportSubPixelBits = 8,
819 .minMemoryMapAlignment = 4096, /* A page */
820 .minTexelBufferOffsetAlignment = 64,
821 .minUniformBufferOffsetAlignment = 64,
822 .minStorageBufferOffsetAlignment = 64,
823 .minTexelOffset = -32,
824 .maxTexelOffset = 31,
825 .minTexelGatherOffset = -32,
826 .maxTexelGatherOffset = 31,
827 .minInterpolationOffset = -2,
828 .maxInterpolationOffset = 2,
829 .subPixelInterpolationOffsetBits = 8,
830 .maxFramebufferWidth = (1 << 14),
831 .maxFramebufferHeight = (1 << 14),
832 .maxFramebufferLayers = (1 << 10),
833 .framebufferColorSampleCounts = sample_counts,
834 .framebufferDepthSampleCounts = sample_counts,
835 .framebufferStencilSampleCounts = sample_counts,
836 .framebufferNoAttachmentsSampleCounts = sample_counts,
837 .maxColorAttachments = MAX_RTS,
838 .sampledImageColorSampleCounts = sample_counts,
839 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
840 .sampledImageDepthSampleCounts = sample_counts,
841 .sampledImageStencilSampleCounts = sample_counts,
842 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
843 .maxSampleMaskWords = 1,
844 .timestampComputeAndGraphics = true,
845 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
846 .maxClipDistances = 8,
847 .maxCullDistances = 8,
848 .maxCombinedClipAndCullDistances = 8,
849 .discreteQueuePriorities = 1,
850 .pointSizeRange = { 0.125, 255.875 },
851 .lineWidthRange = { 0.0, 7.9921875 },
852 .pointSizeGranularity = (1.0 / 8.0),
853 .lineWidthGranularity = (1.0 / 128.0),
854 .strictLines = false, /* FINISHME */
855 .standardSampleLocations = true,
856 .optimalBufferCopyOffsetAlignment = 128,
857 .optimalBufferCopyRowPitchAlignment = 128,
858 .nonCoherentAtomSize = 64,
859 };
860
861 *pProperties = (VkPhysicalDeviceProperties) {
862 .apiVersion = tu_physical_device_api_version(pdevice),
863 .driverVersion = vk_get_driver_version(),
864 .vendorID = 0, /* TODO */
865 .deviceID = 0,
866 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
867 .limits = limits,
868 .sparseProperties = { 0 },
869 };
870
871 strcpy(pProperties->deviceName, pdevice->name);
872 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
873 }
874
875 void
876 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
877 VkPhysicalDeviceProperties2 *pProperties)
878 {
879 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
880 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
881
882 vk_foreach_struct(ext, pProperties->pNext)
883 {
884 switch (ext->sType) {
885 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
886 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
887 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
888 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
889 break;
890 }
891 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
892 VkPhysicalDeviceIDProperties *properties =
893 (VkPhysicalDeviceIDProperties *) ext;
894 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
895 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
896 properties->deviceLUIDValid = false;
897 break;
898 }
899 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
900 VkPhysicalDeviceMultiviewProperties *properties =
901 (VkPhysicalDeviceMultiviewProperties *) ext;
902 properties->maxMultiviewViewCount = MAX_VIEWS;
903 properties->maxMultiviewInstanceIndex = INT_MAX;
904 break;
905 }
906 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
907 VkPhysicalDevicePointClippingProperties *properties =
908 (VkPhysicalDevicePointClippingProperties *) ext;
909 properties->pointClippingBehavior =
910 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
911 break;
912 }
913 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
914 VkPhysicalDeviceMaintenance3Properties *properties =
915 (VkPhysicalDeviceMaintenance3Properties *) ext;
916 /* Make sure everything is addressable by a signed 32-bit int, and
917 * our largest descriptors are 96 bytes. */
918 properties->maxPerSetDescriptors = (1ull << 31) / 96;
919 /* Our buffer size fields allow only this much */
920 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
921 break;
922 }
923 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
924 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
925 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
926
927 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
928 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
929 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
930 properties->maxTransformFeedbackStreamDataSize = 512;
931 properties->maxTransformFeedbackBufferDataSize = 512;
932 properties->maxTransformFeedbackBufferDataStride = 512;
933 properties->transformFeedbackQueries = true;
934 properties->transformFeedbackStreamsLinesTriangles = false;
935 properties->transformFeedbackRasterizationStreamSelect = false;
936 properties->transformFeedbackDraw = true;
937 break;
938 }
939 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
940 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
941 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
942 properties->sampleLocationSampleCounts = 0;
943 if (pdevice->supported_extensions.EXT_sample_locations) {
944 properties->sampleLocationSampleCounts =
945 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
946 }
947 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
948 properties->sampleLocationCoordinateRange[0] = 0.0f;
949 properties->sampleLocationCoordinateRange[1] = 0.9375f;
950 properties->sampleLocationSubPixelBits = 4;
951 properties->variableSampleLocations = true;
952 break;
953 }
954 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
955 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
956 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
957 properties->filterMinmaxImageComponentMapping = true;
958 properties->filterMinmaxSingleComponentFormats = true;
959 break;
960 }
961
962 default:
963 break;
964 }
965 }
966 }
967
968 static const VkQueueFamilyProperties tu_queue_family_properties = {
969 .queueFlags =
970 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
971 .queueCount = 1,
972 .timestampValidBits = 48,
973 .minImageTransferGranularity = { 1, 1, 1 },
974 };
975
976 void
977 tu_GetPhysicalDeviceQueueFamilyProperties(
978 VkPhysicalDevice physicalDevice,
979 uint32_t *pQueueFamilyPropertyCount,
980 VkQueueFamilyProperties *pQueueFamilyProperties)
981 {
982 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
983
984 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
985 }
986
987 void
988 tu_GetPhysicalDeviceQueueFamilyProperties2(
989 VkPhysicalDevice physicalDevice,
990 uint32_t *pQueueFamilyPropertyCount,
991 VkQueueFamilyProperties2 *pQueueFamilyProperties)
992 {
993 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
994
995 vk_outarray_append(&out, p)
996 {
997 p->queueFamilyProperties = tu_queue_family_properties;
998 }
999 }
1000
1001 static uint64_t
1002 tu_get_system_heap_size()
1003 {
1004 struct sysinfo info;
1005 sysinfo(&info);
1006
1007 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1008
1009 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1010 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1011 */
1012 uint64_t available_ram;
1013 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1014 available_ram = total_ram / 2;
1015 else
1016 available_ram = total_ram * 3 / 4;
1017
1018 return available_ram;
1019 }
1020
1021 void
1022 tu_GetPhysicalDeviceMemoryProperties(
1023 VkPhysicalDevice physicalDevice,
1024 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1025 {
1026 pMemoryProperties->memoryHeapCount = 1;
1027 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1028 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1029
1030 pMemoryProperties->memoryTypeCount = 1;
1031 pMemoryProperties->memoryTypes[0].propertyFlags =
1032 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1033 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1034 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1035 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1036 }
1037
1038 void
1039 tu_GetPhysicalDeviceMemoryProperties2(
1040 VkPhysicalDevice physicalDevice,
1041 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1042 {
1043 return tu_GetPhysicalDeviceMemoryProperties(
1044 physicalDevice, &pMemoryProperties->memoryProperties);
1045 }
1046
1047 static VkResult
1048 tu_queue_init(struct tu_device *device,
1049 struct tu_queue *queue,
1050 uint32_t queue_family_index,
1051 int idx,
1052 VkDeviceQueueCreateFlags flags)
1053 {
1054 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1055 queue->device = device;
1056 queue->queue_family_index = queue_family_index;
1057 queue->queue_idx = idx;
1058 queue->flags = flags;
1059
1060 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1061 if (ret)
1062 return VK_ERROR_INITIALIZATION_FAILED;
1063
1064 tu_fence_init(&queue->submit_fence, false);
1065
1066 return VK_SUCCESS;
1067 }
1068
1069 static void
1070 tu_queue_finish(struct tu_queue *queue)
1071 {
1072 tu_fence_finish(&queue->submit_fence);
1073 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1074 }
1075
1076 static int
1077 tu_get_device_extension_index(const char *name)
1078 {
1079 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1080 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1081 return i;
1082 }
1083 return -1;
1084 }
1085
1086 struct PACKED bcolor_entry {
1087 uint32_t fp32[4];
1088 uint16_t ui16[4];
1089 int16_t si16[4];
1090 uint16_t fp16[4];
1091 uint16_t rgb565;
1092 uint16_t rgb5a1;
1093 uint16_t rgba4;
1094 uint8_t __pad0[2];
1095 uint8_t ui8[4];
1096 int8_t si8[4];
1097 uint32_t rgb10a2;
1098 uint32_t z24; /* also s8? */
1099 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1100 uint8_t __pad1[56];
1101 } border_color[] = {
1102 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1103 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1104 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1105 .fp32[3] = 0x3f800000,
1106 .ui16[3] = 0xffff,
1107 .si16[3] = 0x7fff,
1108 .fp16[3] = 0x3c00,
1109 .rgb5a1 = 0x8000,
1110 .rgba4 = 0xf000,
1111 .ui8[3] = 0xff,
1112 .si8[3] = 0x7f,
1113 .rgb10a2 = 0xc0000000,
1114 .srgb[3] = 0x3c00,
1115 },
1116 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1117 .fp32[3] = 1,
1118 .fp16[3] = 1,
1119 },
1120 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1121 .fp32[0 ... 3] = 0x3f800000,
1122 .ui16[0 ... 3] = 0xffff,
1123 .si16[0 ... 3] = 0x7fff,
1124 .fp16[0 ... 3] = 0x3c00,
1125 .rgb565 = 0xffff,
1126 .rgb5a1 = 0xffff,
1127 .rgba4 = 0xffff,
1128 .ui8[0 ... 3] = 0xff,
1129 .si8[0 ... 3] = 0x7f,
1130 .rgb10a2 = 0xffffffff,
1131 .z24 = 0xffffff,
1132 .srgb[0 ... 3] = 0x3c00,
1133 },
1134 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1135 .fp32[0 ... 3] = 1,
1136 .fp16[0 ... 3] = 1,
1137 },
1138 };
1139
1140
1141 VkResult
1142 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1143 const VkDeviceCreateInfo *pCreateInfo,
1144 const VkAllocationCallbacks *pAllocator,
1145 VkDevice *pDevice)
1146 {
1147 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1148 VkResult result;
1149 struct tu_device *device;
1150
1151 /* Check enabled features */
1152 if (pCreateInfo->pEnabledFeatures) {
1153 VkPhysicalDeviceFeatures supported_features;
1154 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1155 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1156 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1157 unsigned num_features =
1158 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1159 for (uint32_t i = 0; i < num_features; i++) {
1160 if (enabled_feature[i] && !supported_feature[i])
1161 return vk_error(physical_device->instance,
1162 VK_ERROR_FEATURE_NOT_PRESENT);
1163 }
1164 }
1165
1166 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1167 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1168 if (!device)
1169 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1170
1171 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1172 device->instance = physical_device->instance;
1173 device->physical_device = physical_device;
1174
1175 if (pAllocator)
1176 device->alloc = *pAllocator;
1177 else
1178 device->alloc = physical_device->instance->alloc;
1179
1180 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1181 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1182 int index = tu_get_device_extension_index(ext_name);
1183 if (index < 0 ||
1184 !physical_device->supported_extensions.extensions[index]) {
1185 vk_free(&device->alloc, device);
1186 return vk_error(physical_device->instance,
1187 VK_ERROR_EXTENSION_NOT_PRESENT);
1188 }
1189
1190 device->enabled_extensions.extensions[index] = true;
1191 }
1192
1193 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1194 const VkDeviceQueueCreateInfo *queue_create =
1195 &pCreateInfo->pQueueCreateInfos[i];
1196 uint32_t qfi = queue_create->queueFamilyIndex;
1197 device->queues[qfi] = vk_alloc(
1198 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1199 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1200 if (!device->queues[qfi]) {
1201 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1202 goto fail_queues;
1203 }
1204
1205 memset(device->queues[qfi], 0,
1206 queue_create->queueCount * sizeof(struct tu_queue));
1207
1208 device->queue_count[qfi] = queue_create->queueCount;
1209
1210 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1211 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1212 queue_create->flags);
1213 if (result != VK_SUCCESS)
1214 goto fail_queues;
1215 }
1216 }
1217
1218 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1219 if (!device->compiler)
1220 goto fail_queues;
1221
1222 #define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1223 #define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
1224
1225 device->vsc_draw_strm_pitch = 0x440 * 4;
1226 device->vsc_prim_strm_pitch = 0x1040 * 4;
1227
1228 result = tu_bo_init_new(device, &device->vsc_draw_strm, VSC_DRAW_STRM_SIZE(device->vsc_draw_strm_pitch));
1229 if (result != VK_SUCCESS)
1230 goto fail_vsc_data;
1231
1232 result = tu_bo_init_new(device, &device->vsc_prim_strm, VSC_PRIM_STRM_SIZE(device->vsc_prim_strm_pitch));
1233 if (result != VK_SUCCESS)
1234 goto fail_vsc_data2;
1235
1236 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1237 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1238 if (result != VK_SUCCESS)
1239 goto fail_border_color;
1240
1241 result = tu_bo_map(device, &device->border_color);
1242 if (result != VK_SUCCESS)
1243 goto fail_border_color_map;
1244
1245 memcpy(device->border_color.map, border_color, sizeof(border_color));
1246
1247 VkPipelineCacheCreateInfo ci;
1248 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1249 ci.pNext = NULL;
1250 ci.flags = 0;
1251 ci.pInitialData = NULL;
1252 ci.initialDataSize = 0;
1253 VkPipelineCache pc;
1254 result =
1255 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1256 if (result != VK_SUCCESS)
1257 goto fail_pipeline_cache;
1258
1259 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1260
1261 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1262 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1263
1264 *pDevice = tu_device_to_handle(device);
1265 return VK_SUCCESS;
1266
1267 fail_pipeline_cache:
1268 fail_border_color_map:
1269 tu_bo_finish(device, &device->border_color);
1270
1271 fail_border_color:
1272 tu_bo_finish(device, &device->vsc_prim_strm);
1273
1274 fail_vsc_data2:
1275 tu_bo_finish(device, &device->vsc_draw_strm);
1276
1277 fail_vsc_data:
1278 ralloc_free(device->compiler);
1279
1280 fail_queues:
1281 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1282 for (unsigned q = 0; q < device->queue_count[i]; q++)
1283 tu_queue_finish(&device->queues[i][q]);
1284 if (device->queue_count[i])
1285 vk_free(&device->alloc, device->queues[i]);
1286 }
1287
1288 vk_free(&device->alloc, device);
1289 return result;
1290 }
1291
1292 void
1293 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1294 {
1295 TU_FROM_HANDLE(tu_device, device, _device);
1296
1297 if (!device)
1298 return;
1299
1300 tu_bo_finish(device, &device->vsc_draw_strm);
1301 tu_bo_finish(device, &device->vsc_prim_strm);
1302
1303 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1304 for (unsigned q = 0; q < device->queue_count[i]; q++)
1305 tu_queue_finish(&device->queues[i][q]);
1306 if (device->queue_count[i])
1307 vk_free(&device->alloc, device->queues[i]);
1308 }
1309
1310 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1311 if (device->scratch_bos[i].initialized)
1312 tu_bo_finish(device, &device->scratch_bos[i].bo);
1313 }
1314
1315 /* the compiler does not use pAllocator */
1316 ralloc_free(device->compiler);
1317
1318 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1319 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1320
1321 vk_free(&device->alloc, device);
1322 }
1323
1324 VkResult
1325 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1326 {
1327 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1328 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1329 assert(index < ARRAY_SIZE(dev->scratch_bos));
1330
1331 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1332 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1333 /* Fast path: just return the already-allocated BO. */
1334 *bo = &dev->scratch_bos[i].bo;
1335 return VK_SUCCESS;
1336 }
1337 }
1338
1339 /* Slow path: actually allocate the BO. We take a lock because the process
1340 * of allocating it is slow, and we don't want to block the CPU while it
1341 * finishes.
1342 */
1343 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1344
1345 /* Another thread may have allocated it already while we were waiting on
1346 * the lock. We need to check this in order to avoid double-allocating.
1347 */
1348 if (dev->scratch_bos[index].initialized) {
1349 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1350 *bo = &dev->scratch_bos[index].bo;
1351 return VK_SUCCESS;
1352 }
1353
1354 unsigned bo_size = 1ull << size_log2;
1355 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1356 if (result != VK_SUCCESS) {
1357 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1358 return result;
1359 }
1360
1361 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1362
1363 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1364
1365 *bo = &dev->scratch_bos[index].bo;
1366 return VK_SUCCESS;
1367 }
1368
1369 VkResult
1370 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1371 VkLayerProperties *pProperties)
1372 {
1373 *pPropertyCount = 0;
1374 return VK_SUCCESS;
1375 }
1376
1377 VkResult
1378 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1379 uint32_t *pPropertyCount,
1380 VkLayerProperties *pProperties)
1381 {
1382 *pPropertyCount = 0;
1383 return VK_SUCCESS;
1384 }
1385
1386 void
1387 tu_GetDeviceQueue2(VkDevice _device,
1388 const VkDeviceQueueInfo2 *pQueueInfo,
1389 VkQueue *pQueue)
1390 {
1391 TU_FROM_HANDLE(tu_device, device, _device);
1392 struct tu_queue *queue;
1393
1394 queue =
1395 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1396 if (pQueueInfo->flags != queue->flags) {
1397 /* From the Vulkan 1.1.70 spec:
1398 *
1399 * "The queue returned by vkGetDeviceQueue2 must have the same
1400 * flags value from this structure as that used at device
1401 * creation time in a VkDeviceQueueCreateInfo instance. If no
1402 * matching flags were specified at device creation time then
1403 * pQueue will return VK_NULL_HANDLE."
1404 */
1405 *pQueue = VK_NULL_HANDLE;
1406 return;
1407 }
1408
1409 *pQueue = tu_queue_to_handle(queue);
1410 }
1411
1412 void
1413 tu_GetDeviceQueue(VkDevice _device,
1414 uint32_t queueFamilyIndex,
1415 uint32_t queueIndex,
1416 VkQueue *pQueue)
1417 {
1418 const VkDeviceQueueInfo2 info =
1419 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1420 .queueFamilyIndex = queueFamilyIndex,
1421 .queueIndex = queueIndex };
1422
1423 tu_GetDeviceQueue2(_device, &info, pQueue);
1424 }
1425
1426 VkResult
1427 tu_QueueSubmit(VkQueue _queue,
1428 uint32_t submitCount,
1429 const VkSubmitInfo *pSubmits,
1430 VkFence _fence)
1431 {
1432 TU_FROM_HANDLE(tu_queue, queue, _queue);
1433
1434 for (uint32_t i = 0; i < submitCount; ++i) {
1435 const VkSubmitInfo *submit = pSubmits + i;
1436 const bool last_submit = (i == submitCount - 1);
1437 struct tu_bo_list bo_list;
1438 tu_bo_list_init(&bo_list);
1439
1440 uint32_t entry_count = 0;
1441 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1442 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1443 entry_count += cmdbuf->cs.entry_count;
1444 }
1445
1446 struct drm_msm_gem_submit_cmd cmds[entry_count];
1447 uint32_t entry_idx = 0;
1448 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1449 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1450 struct tu_cs *cs = &cmdbuf->cs;
1451 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1452 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1453 cmds[entry_idx].submit_idx =
1454 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1455 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1456 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1457 cmds[entry_idx].size = cs->entries[i].size;
1458 cmds[entry_idx].pad = 0;
1459 cmds[entry_idx].nr_relocs = 0;
1460 cmds[entry_idx].relocs = 0;
1461 }
1462
1463 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1464 }
1465
1466 uint32_t flags = MSM_PIPE_3D0;
1467 if (last_submit) {
1468 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1469 }
1470
1471 struct drm_msm_gem_submit req = {
1472 .flags = flags,
1473 .queueid = queue->msm_queue_id,
1474 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1475 .nr_bos = bo_list.count,
1476 .cmds = (uint64_t)(uintptr_t)cmds,
1477 .nr_cmds = entry_count,
1478 };
1479
1480 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1481 DRM_MSM_GEM_SUBMIT,
1482 &req, sizeof(req));
1483 if (ret) {
1484 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1485 abort();
1486 }
1487
1488 tu_bo_list_destroy(&bo_list);
1489
1490 if (last_submit) {
1491 /* no need to merge fences as queue execution is serialized */
1492 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1493 }
1494 }
1495
1496 if (_fence != VK_NULL_HANDLE) {
1497 TU_FROM_HANDLE(tu_fence, fence, _fence);
1498 tu_fence_copy(fence, &queue->submit_fence);
1499 }
1500
1501 return VK_SUCCESS;
1502 }
1503
1504 VkResult
1505 tu_QueueWaitIdle(VkQueue _queue)
1506 {
1507 TU_FROM_HANDLE(tu_queue, queue, _queue);
1508
1509 tu_fence_wait_idle(&queue->submit_fence);
1510
1511 return VK_SUCCESS;
1512 }
1513
1514 VkResult
1515 tu_DeviceWaitIdle(VkDevice _device)
1516 {
1517 TU_FROM_HANDLE(tu_device, device, _device);
1518
1519 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1520 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1521 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1522 }
1523 }
1524 return VK_SUCCESS;
1525 }
1526
1527 VkResult
1528 tu_ImportSemaphoreFdKHR(VkDevice _device,
1529 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1530 {
1531 tu_stub();
1532
1533 return VK_SUCCESS;
1534 }
1535
1536 VkResult
1537 tu_GetSemaphoreFdKHR(VkDevice _device,
1538 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1539 int *pFd)
1540 {
1541 tu_stub();
1542
1543 return VK_SUCCESS;
1544 }
1545
1546 VkResult
1547 tu_ImportFenceFdKHR(VkDevice _device,
1548 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
1549 {
1550 tu_stub();
1551
1552 return VK_SUCCESS;
1553 }
1554
1555 VkResult
1556 tu_GetFenceFdKHR(VkDevice _device,
1557 const VkFenceGetFdInfoKHR *pGetFdInfo,
1558 int *pFd)
1559 {
1560 tu_stub();
1561
1562 return VK_SUCCESS;
1563 }
1564
1565 VkResult
1566 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1567 uint32_t *pPropertyCount,
1568 VkExtensionProperties *pProperties)
1569 {
1570 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1571
1572 /* We spport no lyaers */
1573 if (pLayerName)
1574 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1575
1576 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1577 if (tu_instance_extensions_supported.extensions[i]) {
1578 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1579 }
1580 }
1581
1582 return vk_outarray_status(&out);
1583 }
1584
1585 VkResult
1586 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1587 const char *pLayerName,
1588 uint32_t *pPropertyCount,
1589 VkExtensionProperties *pProperties)
1590 {
1591 /* We spport no lyaers */
1592 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1593 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1594
1595 /* We spport no lyaers */
1596 if (pLayerName)
1597 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1598
1599 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1600 if (device->supported_extensions.extensions[i]) {
1601 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1602 }
1603 }
1604
1605 return vk_outarray_status(&out);
1606 }
1607
1608 PFN_vkVoidFunction
1609 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1610 {
1611 TU_FROM_HANDLE(tu_instance, instance, _instance);
1612
1613 return tu_lookup_entrypoint_checked(
1614 pName, instance ? instance->api_version : 0,
1615 instance ? &instance->enabled_extensions : NULL, NULL);
1616 }
1617
1618 /* The loader wants us to expose a second GetInstanceProcAddr function
1619 * to work around certain LD_PRELOAD issues seen in apps.
1620 */
1621 PUBLIC
1622 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1623 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1624
1625 PUBLIC
1626 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1627 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1628 {
1629 return tu_GetInstanceProcAddr(instance, pName);
1630 }
1631
1632 PFN_vkVoidFunction
1633 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1634 {
1635 TU_FROM_HANDLE(tu_device, device, _device);
1636
1637 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1638 &device->instance->enabled_extensions,
1639 &device->enabled_extensions);
1640 }
1641
1642 static VkResult
1643 tu_alloc_memory(struct tu_device *device,
1644 const VkMemoryAllocateInfo *pAllocateInfo,
1645 const VkAllocationCallbacks *pAllocator,
1646 VkDeviceMemory *pMem)
1647 {
1648 struct tu_device_memory *mem;
1649 VkResult result;
1650
1651 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1652
1653 if (pAllocateInfo->allocationSize == 0) {
1654 /* Apparently, this is allowed */
1655 *pMem = VK_NULL_HANDLE;
1656 return VK_SUCCESS;
1657 }
1658
1659 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1660 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1661 if (mem == NULL)
1662 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1663
1664 const VkImportMemoryFdInfoKHR *fd_info =
1665 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1666 if (fd_info && !fd_info->handleType)
1667 fd_info = NULL;
1668
1669 if (fd_info) {
1670 assert(fd_info->handleType ==
1671 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1672 fd_info->handleType ==
1673 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1674
1675 /*
1676 * TODO Importing the same fd twice gives us the same handle without
1677 * reference counting. We need to maintain a per-instance handle-to-bo
1678 * table and add reference count to tu_bo.
1679 */
1680 result = tu_bo_init_dmabuf(device, &mem->bo,
1681 pAllocateInfo->allocationSize, fd_info->fd);
1682 if (result == VK_SUCCESS) {
1683 /* take ownership and close the fd */
1684 close(fd_info->fd);
1685 }
1686 } else {
1687 result =
1688 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1689 }
1690
1691 if (result != VK_SUCCESS) {
1692 vk_free2(&device->alloc, pAllocator, mem);
1693 return result;
1694 }
1695
1696 mem->size = pAllocateInfo->allocationSize;
1697 mem->type_index = pAllocateInfo->memoryTypeIndex;
1698
1699 mem->map = NULL;
1700 mem->user_ptr = NULL;
1701
1702 *pMem = tu_device_memory_to_handle(mem);
1703
1704 return VK_SUCCESS;
1705 }
1706
1707 VkResult
1708 tu_AllocateMemory(VkDevice _device,
1709 const VkMemoryAllocateInfo *pAllocateInfo,
1710 const VkAllocationCallbacks *pAllocator,
1711 VkDeviceMemory *pMem)
1712 {
1713 TU_FROM_HANDLE(tu_device, device, _device);
1714 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1715 }
1716
1717 void
1718 tu_FreeMemory(VkDevice _device,
1719 VkDeviceMemory _mem,
1720 const VkAllocationCallbacks *pAllocator)
1721 {
1722 TU_FROM_HANDLE(tu_device, device, _device);
1723 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1724
1725 if (mem == NULL)
1726 return;
1727
1728 tu_bo_finish(device, &mem->bo);
1729 vk_free2(&device->alloc, pAllocator, mem);
1730 }
1731
1732 VkResult
1733 tu_MapMemory(VkDevice _device,
1734 VkDeviceMemory _memory,
1735 VkDeviceSize offset,
1736 VkDeviceSize size,
1737 VkMemoryMapFlags flags,
1738 void **ppData)
1739 {
1740 TU_FROM_HANDLE(tu_device, device, _device);
1741 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1742 VkResult result;
1743
1744 if (mem == NULL) {
1745 *ppData = NULL;
1746 return VK_SUCCESS;
1747 }
1748
1749 if (mem->user_ptr) {
1750 *ppData = mem->user_ptr;
1751 } else if (!mem->map) {
1752 result = tu_bo_map(device, &mem->bo);
1753 if (result != VK_SUCCESS)
1754 return result;
1755 *ppData = mem->map = mem->bo.map;
1756 } else
1757 *ppData = mem->map;
1758
1759 if (*ppData) {
1760 *ppData += offset;
1761 return VK_SUCCESS;
1762 }
1763
1764 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1765 }
1766
1767 void
1768 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1769 {
1770 /* I do not see any unmapping done by the freedreno Gallium driver. */
1771 }
1772
1773 VkResult
1774 tu_FlushMappedMemoryRanges(VkDevice _device,
1775 uint32_t memoryRangeCount,
1776 const VkMappedMemoryRange *pMemoryRanges)
1777 {
1778 return VK_SUCCESS;
1779 }
1780
1781 VkResult
1782 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1783 uint32_t memoryRangeCount,
1784 const VkMappedMemoryRange *pMemoryRanges)
1785 {
1786 return VK_SUCCESS;
1787 }
1788
1789 void
1790 tu_GetBufferMemoryRequirements(VkDevice _device,
1791 VkBuffer _buffer,
1792 VkMemoryRequirements *pMemoryRequirements)
1793 {
1794 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1795
1796 pMemoryRequirements->memoryTypeBits = 1;
1797 pMemoryRequirements->alignment = 64;
1798 pMemoryRequirements->size =
1799 align64(buffer->size, pMemoryRequirements->alignment);
1800 }
1801
1802 void
1803 tu_GetBufferMemoryRequirements2(
1804 VkDevice device,
1805 const VkBufferMemoryRequirementsInfo2 *pInfo,
1806 VkMemoryRequirements2 *pMemoryRequirements)
1807 {
1808 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1809 &pMemoryRequirements->memoryRequirements);
1810 }
1811
1812 void
1813 tu_GetImageMemoryRequirements(VkDevice _device,
1814 VkImage _image,
1815 VkMemoryRequirements *pMemoryRequirements)
1816 {
1817 TU_FROM_HANDLE(tu_image, image, _image);
1818
1819 pMemoryRequirements->memoryTypeBits = 1;
1820 pMemoryRequirements->size = image->layout.size;
1821 pMemoryRequirements->alignment = image->layout.base_align;
1822 }
1823
1824 void
1825 tu_GetImageMemoryRequirements2(VkDevice device,
1826 const VkImageMemoryRequirementsInfo2 *pInfo,
1827 VkMemoryRequirements2 *pMemoryRequirements)
1828 {
1829 tu_GetImageMemoryRequirements(device, pInfo->image,
1830 &pMemoryRequirements->memoryRequirements);
1831 }
1832
1833 void
1834 tu_GetImageSparseMemoryRequirements(
1835 VkDevice device,
1836 VkImage image,
1837 uint32_t *pSparseMemoryRequirementCount,
1838 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1839 {
1840 tu_stub();
1841 }
1842
1843 void
1844 tu_GetImageSparseMemoryRequirements2(
1845 VkDevice device,
1846 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1847 uint32_t *pSparseMemoryRequirementCount,
1848 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1849 {
1850 tu_stub();
1851 }
1852
1853 void
1854 tu_GetDeviceMemoryCommitment(VkDevice device,
1855 VkDeviceMemory memory,
1856 VkDeviceSize *pCommittedMemoryInBytes)
1857 {
1858 *pCommittedMemoryInBytes = 0;
1859 }
1860
1861 VkResult
1862 tu_BindBufferMemory2(VkDevice device,
1863 uint32_t bindInfoCount,
1864 const VkBindBufferMemoryInfo *pBindInfos)
1865 {
1866 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1867 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1868 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1869
1870 if (mem) {
1871 buffer->bo = &mem->bo;
1872 buffer->bo_offset = pBindInfos[i].memoryOffset;
1873 } else {
1874 buffer->bo = NULL;
1875 }
1876 }
1877 return VK_SUCCESS;
1878 }
1879
1880 VkResult
1881 tu_BindBufferMemory(VkDevice device,
1882 VkBuffer buffer,
1883 VkDeviceMemory memory,
1884 VkDeviceSize memoryOffset)
1885 {
1886 const VkBindBufferMemoryInfo info = {
1887 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1888 .buffer = buffer,
1889 .memory = memory,
1890 .memoryOffset = memoryOffset
1891 };
1892
1893 return tu_BindBufferMemory2(device, 1, &info);
1894 }
1895
1896 VkResult
1897 tu_BindImageMemory2(VkDevice device,
1898 uint32_t bindInfoCount,
1899 const VkBindImageMemoryInfo *pBindInfos)
1900 {
1901 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1902 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1903 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1904
1905 if (mem) {
1906 image->bo = &mem->bo;
1907 image->bo_offset = pBindInfos[i].memoryOffset;
1908 } else {
1909 image->bo = NULL;
1910 image->bo_offset = 0;
1911 }
1912 }
1913
1914 return VK_SUCCESS;
1915 }
1916
1917 VkResult
1918 tu_BindImageMemory(VkDevice device,
1919 VkImage image,
1920 VkDeviceMemory memory,
1921 VkDeviceSize memoryOffset)
1922 {
1923 const VkBindImageMemoryInfo info = {
1924 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1925 .image = image,
1926 .memory = memory,
1927 .memoryOffset = memoryOffset
1928 };
1929
1930 return tu_BindImageMemory2(device, 1, &info);
1931 }
1932
1933 VkResult
1934 tu_QueueBindSparse(VkQueue _queue,
1935 uint32_t bindInfoCount,
1936 const VkBindSparseInfo *pBindInfo,
1937 VkFence _fence)
1938 {
1939 return VK_SUCCESS;
1940 }
1941
1942 // Queue semaphore functions
1943
1944 VkResult
1945 tu_CreateSemaphore(VkDevice _device,
1946 const VkSemaphoreCreateInfo *pCreateInfo,
1947 const VkAllocationCallbacks *pAllocator,
1948 VkSemaphore *pSemaphore)
1949 {
1950 TU_FROM_HANDLE(tu_device, device, _device);
1951
1952 struct tu_semaphore *sem =
1953 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1954 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1955 if (!sem)
1956 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1957
1958 *pSemaphore = tu_semaphore_to_handle(sem);
1959 return VK_SUCCESS;
1960 }
1961
1962 void
1963 tu_DestroySemaphore(VkDevice _device,
1964 VkSemaphore _semaphore,
1965 const VkAllocationCallbacks *pAllocator)
1966 {
1967 TU_FROM_HANDLE(tu_device, device, _device);
1968 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1969 if (!_semaphore)
1970 return;
1971
1972 vk_free2(&device->alloc, pAllocator, sem);
1973 }
1974
1975 VkResult
1976 tu_CreateEvent(VkDevice _device,
1977 const VkEventCreateInfo *pCreateInfo,
1978 const VkAllocationCallbacks *pAllocator,
1979 VkEvent *pEvent)
1980 {
1981 TU_FROM_HANDLE(tu_device, device, _device);
1982 struct tu_event *event =
1983 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1984 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1985
1986 if (!event)
1987 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1988
1989 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1990 if (result != VK_SUCCESS)
1991 goto fail_alloc;
1992
1993 result = tu_bo_map(device, &event->bo);
1994 if (result != VK_SUCCESS)
1995 goto fail_map;
1996
1997 *pEvent = tu_event_to_handle(event);
1998
1999 return VK_SUCCESS;
2000
2001 fail_map:
2002 tu_bo_finish(device, &event->bo);
2003 fail_alloc:
2004 vk_free2(&device->alloc, pAllocator, event);
2005 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2006 }
2007
2008 void
2009 tu_DestroyEvent(VkDevice _device,
2010 VkEvent _event,
2011 const VkAllocationCallbacks *pAllocator)
2012 {
2013 TU_FROM_HANDLE(tu_device, device, _device);
2014 TU_FROM_HANDLE(tu_event, event, _event);
2015
2016 if (!event)
2017 return;
2018
2019 tu_bo_finish(device, &event->bo);
2020 vk_free2(&device->alloc, pAllocator, event);
2021 }
2022
2023 VkResult
2024 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2025 {
2026 TU_FROM_HANDLE(tu_event, event, _event);
2027
2028 if (*(uint64_t*) event->bo.map == 1)
2029 return VK_EVENT_SET;
2030 return VK_EVENT_RESET;
2031 }
2032
2033 VkResult
2034 tu_SetEvent(VkDevice _device, VkEvent _event)
2035 {
2036 TU_FROM_HANDLE(tu_event, event, _event);
2037 *(uint64_t*) event->bo.map = 1;
2038
2039 return VK_SUCCESS;
2040 }
2041
2042 VkResult
2043 tu_ResetEvent(VkDevice _device, VkEvent _event)
2044 {
2045 TU_FROM_HANDLE(tu_event, event, _event);
2046 *(uint64_t*) event->bo.map = 0;
2047
2048 return VK_SUCCESS;
2049 }
2050
2051 VkResult
2052 tu_CreateBuffer(VkDevice _device,
2053 const VkBufferCreateInfo *pCreateInfo,
2054 const VkAllocationCallbacks *pAllocator,
2055 VkBuffer *pBuffer)
2056 {
2057 TU_FROM_HANDLE(tu_device, device, _device);
2058 struct tu_buffer *buffer;
2059
2060 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2061
2062 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2063 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2064 if (buffer == NULL)
2065 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2066
2067 buffer->size = pCreateInfo->size;
2068 buffer->usage = pCreateInfo->usage;
2069 buffer->flags = pCreateInfo->flags;
2070
2071 *pBuffer = tu_buffer_to_handle(buffer);
2072
2073 return VK_SUCCESS;
2074 }
2075
2076 void
2077 tu_DestroyBuffer(VkDevice _device,
2078 VkBuffer _buffer,
2079 const VkAllocationCallbacks *pAllocator)
2080 {
2081 TU_FROM_HANDLE(tu_device, device, _device);
2082 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2083
2084 if (!buffer)
2085 return;
2086
2087 vk_free2(&device->alloc, pAllocator, buffer);
2088 }
2089
2090 VkResult
2091 tu_CreateFramebuffer(VkDevice _device,
2092 const VkFramebufferCreateInfo *pCreateInfo,
2093 const VkAllocationCallbacks *pAllocator,
2094 VkFramebuffer *pFramebuffer)
2095 {
2096 TU_FROM_HANDLE(tu_device, device, _device);
2097 struct tu_framebuffer *framebuffer;
2098
2099 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2100
2101 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2102 pCreateInfo->attachmentCount;
2103 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2104 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2105 if (framebuffer == NULL)
2106 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2107
2108 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2109 framebuffer->width = pCreateInfo->width;
2110 framebuffer->height = pCreateInfo->height;
2111 framebuffer->layers = pCreateInfo->layers;
2112 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2113 VkImageView _iview = pCreateInfo->pAttachments[i];
2114 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2115 framebuffer->attachments[i].attachment = iview;
2116 }
2117
2118 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2119 return VK_SUCCESS;
2120 }
2121
2122 void
2123 tu_DestroyFramebuffer(VkDevice _device,
2124 VkFramebuffer _fb,
2125 const VkAllocationCallbacks *pAllocator)
2126 {
2127 TU_FROM_HANDLE(tu_device, device, _device);
2128 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2129
2130 if (!fb)
2131 return;
2132 vk_free2(&device->alloc, pAllocator, fb);
2133 }
2134
2135 static enum a6xx_tex_clamp
2136 tu6_tex_wrap(VkSamplerAddressMode address_mode)
2137 {
2138 switch (address_mode) {
2139 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
2140 return A6XX_TEX_REPEAT;
2141 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
2142 return A6XX_TEX_MIRROR_REPEAT;
2143 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
2144 return A6XX_TEX_CLAMP_TO_EDGE;
2145 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
2146 return A6XX_TEX_CLAMP_TO_BORDER;
2147 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
2148 /* only works for PoT.. need to emulate otherwise! */
2149 return A6XX_TEX_MIRROR_CLAMP;
2150 default:
2151 unreachable("illegal tex wrap mode");
2152 break;
2153 }
2154 }
2155
2156 static enum a6xx_tex_filter
2157 tu6_tex_filter(VkFilter filter, unsigned aniso)
2158 {
2159 switch (filter) {
2160 case VK_FILTER_NEAREST:
2161 return A6XX_TEX_NEAREST;
2162 case VK_FILTER_LINEAR:
2163 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
2164 case VK_FILTER_CUBIC_EXT:
2165 return A6XX_TEX_CUBIC;
2166 default:
2167 unreachable("illegal texture filter");
2168 break;
2169 }
2170 }
2171
2172 static inline enum adreno_compare_func
2173 tu6_compare_func(VkCompareOp op)
2174 {
2175 return (enum adreno_compare_func) op;
2176 }
2177
2178 static void
2179 tu_init_sampler(struct tu_device *device,
2180 struct tu_sampler *sampler,
2181 const VkSamplerCreateInfo *pCreateInfo)
2182 {
2183 const struct VkSamplerReductionModeCreateInfo *reduction =
2184 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2185
2186 unsigned aniso = pCreateInfo->anisotropyEnable ?
2187 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2188 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2189
2190 sampler->descriptor[0] =
2191 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2192 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2193 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2194 A6XX_TEX_SAMP_0_ANISO(aniso) |
2195 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2196 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2197 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2198 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2199 sampler->descriptor[1] =
2200 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2201 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2202 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
2203 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
2204 COND(pCreateInfo->compareEnable,
2205 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2206 /* This is an offset into the border_color BO, which we fill with all the
2207 * possible Vulkan border colors in the correct order, so we can just use
2208 * the Vulkan enum with no translation necessary.
2209 */
2210 sampler->descriptor[2] =
2211 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2212 sizeof(struct bcolor_entry));
2213 sampler->descriptor[3] = 0;
2214
2215 if (reduction) {
2216 /* note: vulkan enum matches hw */
2217 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(reduction->reductionMode);
2218 }
2219
2220 /* TODO:
2221 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2222 */
2223 }
2224
2225 VkResult
2226 tu_CreateSampler(VkDevice _device,
2227 const VkSamplerCreateInfo *pCreateInfo,
2228 const VkAllocationCallbacks *pAllocator,
2229 VkSampler *pSampler)
2230 {
2231 TU_FROM_HANDLE(tu_device, device, _device);
2232 struct tu_sampler *sampler;
2233
2234 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2235
2236 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2237 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2238 if (!sampler)
2239 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2240
2241 tu_init_sampler(device, sampler, pCreateInfo);
2242 *pSampler = tu_sampler_to_handle(sampler);
2243
2244 return VK_SUCCESS;
2245 }
2246
2247 void
2248 tu_DestroySampler(VkDevice _device,
2249 VkSampler _sampler,
2250 const VkAllocationCallbacks *pAllocator)
2251 {
2252 TU_FROM_HANDLE(tu_device, device, _device);
2253 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2254
2255 if (!sampler)
2256 return;
2257 vk_free2(&device->alloc, pAllocator, sampler);
2258 }
2259
2260 /* vk_icd.h does not declare this function, so we declare it here to
2261 * suppress Wmissing-prototypes.
2262 */
2263 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2264 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2265
2266 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2267 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2268 {
2269 /* For the full details on loader interface versioning, see
2270 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2271 * What follows is a condensed summary, to help you navigate the large and
2272 * confusing official doc.
2273 *
2274 * - Loader interface v0 is incompatible with later versions. We don't
2275 * support it.
2276 *
2277 * - In loader interface v1:
2278 * - The first ICD entrypoint called by the loader is
2279 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2280 * entrypoint.
2281 * - The ICD must statically expose no other Vulkan symbol unless it
2282 * is linked with -Bsymbolic.
2283 * - Each dispatchable Vulkan handle created by the ICD must be
2284 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2285 * ICD must initialize VK_LOADER_DATA.loadMagic to
2286 * ICD_LOADER_MAGIC.
2287 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2288 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2289 * such loader-managed surfaces.
2290 *
2291 * - Loader interface v2 differs from v1 in:
2292 * - The first ICD entrypoint called by the loader is
2293 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2294 * statically expose this entrypoint.
2295 *
2296 * - Loader interface v3 differs from v2 in:
2297 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2298 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2299 * because the loader no longer does so.
2300 */
2301 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2302 return VK_SUCCESS;
2303 }
2304
2305 VkResult
2306 tu_GetMemoryFdKHR(VkDevice _device,
2307 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2308 int *pFd)
2309 {
2310 TU_FROM_HANDLE(tu_device, device, _device);
2311 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2312
2313 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2314
2315 /* At the moment, we support only the below handle types. */
2316 assert(pGetFdInfo->handleType ==
2317 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2318 pGetFdInfo->handleType ==
2319 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2320
2321 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2322 if (prime_fd < 0)
2323 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2324
2325 *pFd = prime_fd;
2326 return VK_SUCCESS;
2327 }
2328
2329 VkResult
2330 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2331 VkExternalMemoryHandleTypeFlagBits handleType,
2332 int fd,
2333 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2334 {
2335 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2336 pMemoryFdProperties->memoryTypeBits = 1;
2337 return VK_SUCCESS;
2338 }
2339
2340 void
2341 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2342 VkPhysicalDevice physicalDevice,
2343 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2344 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2345 {
2346 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2347 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2348 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2349 }
2350
2351 void
2352 tu_GetPhysicalDeviceExternalFenceProperties(
2353 VkPhysicalDevice physicalDevice,
2354 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2355 VkExternalFenceProperties *pExternalFenceProperties)
2356 {
2357 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2358 pExternalFenceProperties->compatibleHandleTypes = 0;
2359 pExternalFenceProperties->externalFenceFeatures = 0;
2360 }
2361
2362 VkResult
2363 tu_CreateDebugReportCallbackEXT(
2364 VkInstance _instance,
2365 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2366 const VkAllocationCallbacks *pAllocator,
2367 VkDebugReportCallbackEXT *pCallback)
2368 {
2369 TU_FROM_HANDLE(tu_instance, instance, _instance);
2370 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2371 pCreateInfo, pAllocator,
2372 &instance->alloc, pCallback);
2373 }
2374
2375 void
2376 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2377 VkDebugReportCallbackEXT _callback,
2378 const VkAllocationCallbacks *pAllocator)
2379 {
2380 TU_FROM_HANDLE(tu_instance, instance, _instance);
2381 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2382 _callback, pAllocator, &instance->alloc);
2383 }
2384
2385 void
2386 tu_DebugReportMessageEXT(VkInstance _instance,
2387 VkDebugReportFlagsEXT flags,
2388 VkDebugReportObjectTypeEXT objectType,
2389 uint64_t object,
2390 size_t location,
2391 int32_t messageCode,
2392 const char *pLayerPrefix,
2393 const char *pMessage)
2394 {
2395 TU_FROM_HANDLE(tu_instance, instance, _instance);
2396 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2397 object, location, messageCode, pLayerPrefix, pMessage);
2398 }
2399
2400 void
2401 tu_GetDeviceGroupPeerMemoryFeatures(
2402 VkDevice device,
2403 uint32_t heapIndex,
2404 uint32_t localDeviceIndex,
2405 uint32_t remoteDeviceIndex,
2406 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2407 {
2408 assert(localDeviceIndex == remoteDeviceIndex);
2409
2410 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2411 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2412 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2413 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2414 }
2415
2416 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2417 VkPhysicalDevice physicalDevice,
2418 VkSampleCountFlagBits samples,
2419 VkMultisamplePropertiesEXT* pMultisampleProperties)
2420 {
2421 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2422
2423 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2424 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2425 else
2426 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2427 }