turnip: implement VK_KHR_shader_draw_parameters
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
50
51 static int
52 tu_device_get_cache_uuid(uint16_t family, void *uuid)
53 {
54 uint32_t mesa_timestamp;
55 uint16_t f = family;
56 memset(uuid, 0, VK_UUID_SIZE);
57 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
58 &mesa_timestamp))
59 return -1;
60
61 memcpy(uuid, &mesa_timestamp, 4);
62 memcpy((char *) uuid + 4, &f, 2);
63 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
64 return 0;
65 }
66
67 static VkResult
68 tu_bo_init(struct tu_device *dev,
69 struct tu_bo *bo,
70 uint32_t gem_handle,
71 uint64_t size)
72 {
73 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
74 if (!iova)
75 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
76
77 *bo = (struct tu_bo) {
78 .gem_handle = gem_handle,
79 .size = size,
80 .iova = iova,
81 };
82
83 return VK_SUCCESS;
84 }
85
86 VkResult
87 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
88 {
89 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
90 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
91 */
92 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
93 if (!gem_handle)
94 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
95
96 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
97 if (result != VK_SUCCESS) {
98 tu_gem_close(dev, gem_handle);
99 return vk_error(dev->instance, result);
100 }
101
102 return VK_SUCCESS;
103 }
104
105 VkResult
106 tu_bo_init_dmabuf(struct tu_device *dev,
107 struct tu_bo *bo,
108 uint64_t size,
109 int fd)
110 {
111 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
112 if (!gem_handle)
113 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
114
115 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
116 if (result != VK_SUCCESS) {
117 tu_gem_close(dev, gem_handle);
118 return vk_error(dev->instance, result);
119 }
120
121 return VK_SUCCESS;
122 }
123
124 int
125 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
126 {
127 return tu_gem_export_dmabuf(dev, bo->gem_handle);
128 }
129
130 VkResult
131 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
132 {
133 if (bo->map)
134 return VK_SUCCESS;
135
136 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
137 if (!offset)
138 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
139
140 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
141 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
142 dev->physical_device->local_fd, offset);
143 if (map == MAP_FAILED)
144 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
145
146 bo->map = map;
147 return VK_SUCCESS;
148 }
149
150 void
151 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
152 {
153 assert(bo->gem_handle);
154
155 if (bo->map)
156 munmap(bo->map, bo->size);
157
158 tu_gem_close(dev, bo->gem_handle);
159 }
160
161 static VkResult
162 tu_physical_device_init(struct tu_physical_device *device,
163 struct tu_instance *instance,
164 drmDevicePtr drm_device)
165 {
166 const char *path = drm_device->nodes[DRM_NODE_RENDER];
167 VkResult result = VK_SUCCESS;
168 drmVersionPtr version;
169 int fd;
170 int master_fd = -1;
171
172 fd = open(path, O_RDWR | O_CLOEXEC);
173 if (fd < 0) {
174 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
175 "failed to open device %s", path);
176 }
177
178 /* Version 1.3 added MSM_INFO_IOVA. */
179 const int min_version_major = 1;
180 const int min_version_minor = 3;
181
182 version = drmGetVersion(fd);
183 if (!version) {
184 close(fd);
185 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
186 "failed to query kernel driver version for device %s",
187 path);
188 }
189
190 if (strcmp(version->name, "msm")) {
191 drmFreeVersion(version);
192 close(fd);
193 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
194 "device %s does not use the msm kernel driver", path);
195 }
196
197 if (version->version_major != min_version_major ||
198 version->version_minor < min_version_minor) {
199 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
200 "kernel driver for device %s has version %d.%d, "
201 "but Vulkan requires version >= %d.%d",
202 path, version->version_major, version->version_minor,
203 min_version_major, min_version_minor);
204 drmFreeVersion(version);
205 close(fd);
206 return result;
207 }
208
209 drmFreeVersion(version);
210
211 if (instance->debug_flags & TU_DEBUG_STARTUP)
212 tu_logi("Found compatible device '%s'.", path);
213
214 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
215 device->instance = instance;
216 assert(strlen(path) < ARRAY_SIZE(device->path));
217 strncpy(device->path, path, ARRAY_SIZE(device->path));
218
219 if (instance->enabled_extensions.KHR_display) {
220 master_fd =
221 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
222 if (master_fd >= 0) {
223 /* TODO: free master_fd is accel is not working? */
224 }
225 }
226
227 device->master_fd = master_fd;
228 device->local_fd = fd;
229
230 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
231 if (instance->debug_flags & TU_DEBUG_STARTUP)
232 tu_logi("Could not query the GPU ID");
233 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
234 "could not get GPU ID");
235 goto fail;
236 }
237
238 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
239 if (instance->debug_flags & TU_DEBUG_STARTUP)
240 tu_logi("Could not query the GMEM size");
241 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
242 "could not get GMEM size");
243 goto fail;
244 }
245
246 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
247 if (instance->debug_flags & TU_DEBUG_STARTUP)
248 tu_logi("Could not query the GMEM size");
249 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
250 "could not get GMEM size");
251 goto fail;
252 }
253
254 memset(device->name, 0, sizeof(device->name));
255 sprintf(device->name, "FD%d", device->gpu_id);
256
257 switch (device->gpu_id) {
258 case 618:
259 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
260 device->ccu_offset_bypass = 0x10000;
261 device->tile_align_w = 64;
262 device->magic.PC_UNKNOWN_9805 = 0x0;
263 device->magic.SP_UNKNOWN_A0F8 = 0x0;
264 break;
265 case 630:
266 case 640:
267 device->ccu_offset_gmem = 0xf8000;
268 device->ccu_offset_bypass = 0x20000;
269 device->tile_align_w = 64;
270 device->magic.PC_UNKNOWN_9805 = 0x1;
271 device->magic.SP_UNKNOWN_A0F8 = 0x1;
272 break;
273 case 650:
274 device->ccu_offset_gmem = 0x114000;
275 device->ccu_offset_bypass = 0x30000;
276 device->tile_align_w = 96;
277 device->magic.PC_UNKNOWN_9805 = 0x2;
278 device->magic.SP_UNKNOWN_A0F8 = 0x2;
279 break;
280 default:
281 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
282 "device %s is unsupported", device->name);
283 goto fail;
284 }
285 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
286 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
287 "cannot generate UUID");
288 goto fail;
289 }
290
291 /* The gpu id is already embedded in the uuid so we just pass "tu"
292 * when creating the cache.
293 */
294 char buf[VK_UUID_SIZE * 2 + 1];
295 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
296 device->disk_cache = disk_cache_create(device->name, buf, 0);
297
298 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
299 "testing use only.\n");
300
301 fd_get_driver_uuid(device->driver_uuid);
302 fd_get_device_uuid(device->device_uuid, device->gpu_id);
303
304 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
305
306 if (result != VK_SUCCESS) {
307 vk_error(instance, result);
308 goto fail;
309 }
310
311 result = tu_wsi_init(device);
312 if (result != VK_SUCCESS) {
313 vk_error(instance, result);
314 goto fail;
315 }
316
317 return VK_SUCCESS;
318
319 fail:
320 close(fd);
321 if (master_fd != -1)
322 close(master_fd);
323 return result;
324 }
325
326 static void
327 tu_physical_device_finish(struct tu_physical_device *device)
328 {
329 tu_wsi_finish(device);
330
331 disk_cache_destroy(device->disk_cache);
332 close(device->local_fd);
333 if (device->master_fd != -1)
334 close(device->master_fd);
335 }
336
337 static VKAPI_ATTR void *
338 default_alloc_func(void *pUserData,
339 size_t size,
340 size_t align,
341 VkSystemAllocationScope allocationScope)
342 {
343 return malloc(size);
344 }
345
346 static VKAPI_ATTR void *
347 default_realloc_func(void *pUserData,
348 void *pOriginal,
349 size_t size,
350 size_t align,
351 VkSystemAllocationScope allocationScope)
352 {
353 return realloc(pOriginal, size);
354 }
355
356 static VKAPI_ATTR void
357 default_free_func(void *pUserData, void *pMemory)
358 {
359 free(pMemory);
360 }
361
362 static const VkAllocationCallbacks default_alloc = {
363 .pUserData = NULL,
364 .pfnAllocation = default_alloc_func,
365 .pfnReallocation = default_realloc_func,
366 .pfnFree = default_free_func,
367 };
368
369 static const struct debug_control tu_debug_options[] = {
370 { "startup", TU_DEBUG_STARTUP },
371 { "nir", TU_DEBUG_NIR },
372 { "ir3", TU_DEBUG_IR3 },
373 { "nobin", TU_DEBUG_NOBIN },
374 { "sysmem", TU_DEBUG_SYSMEM },
375 { "forcebin", TU_DEBUG_FORCEBIN },
376 { "noubwc", TU_DEBUG_NOUBWC },
377 { NULL, 0 }
378 };
379
380 const char *
381 tu_get_debug_option_name(int id)
382 {
383 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
384 return tu_debug_options[id].string;
385 }
386
387 static int
388 tu_get_instance_extension_index(const char *name)
389 {
390 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
391 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
392 return i;
393 }
394 return -1;
395 }
396
397 VkResult
398 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
399 const VkAllocationCallbacks *pAllocator,
400 VkInstance *pInstance)
401 {
402 struct tu_instance *instance;
403 VkResult result;
404
405 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
406
407 uint32_t client_version;
408 if (pCreateInfo->pApplicationInfo &&
409 pCreateInfo->pApplicationInfo->apiVersion != 0) {
410 client_version = pCreateInfo->pApplicationInfo->apiVersion;
411 } else {
412 tu_EnumerateInstanceVersion(&client_version);
413 }
414
415 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
416 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
417 if (!instance)
418 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
419
420 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
421
422 if (pAllocator)
423 instance->alloc = *pAllocator;
424 else
425 instance->alloc = default_alloc;
426
427 instance->api_version = client_version;
428 instance->physical_device_count = -1;
429
430 instance->debug_flags =
431 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
432
433 if (instance->debug_flags & TU_DEBUG_STARTUP)
434 tu_logi("Created an instance");
435
436 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
437 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
438 int index = tu_get_instance_extension_index(ext_name);
439
440 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
441 vk_free2(&default_alloc, pAllocator, instance);
442 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
443 }
444
445 instance->enabled_extensions.extensions[index] = true;
446 }
447
448 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
449 if (result != VK_SUCCESS) {
450 vk_free2(&default_alloc, pAllocator, instance);
451 return vk_error(instance, result);
452 }
453
454 glsl_type_singleton_init_or_ref();
455
456 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
457
458 *pInstance = tu_instance_to_handle(instance);
459
460 return VK_SUCCESS;
461 }
462
463 void
464 tu_DestroyInstance(VkInstance _instance,
465 const VkAllocationCallbacks *pAllocator)
466 {
467 TU_FROM_HANDLE(tu_instance, instance, _instance);
468
469 if (!instance)
470 return;
471
472 for (int i = 0; i < instance->physical_device_count; ++i) {
473 tu_physical_device_finish(instance->physical_devices + i);
474 }
475
476 VG(VALGRIND_DESTROY_MEMPOOL(instance));
477
478 glsl_type_singleton_decref();
479
480 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
481
482 vk_free(&instance->alloc, instance);
483 }
484
485 static VkResult
486 tu_enumerate_devices(struct tu_instance *instance)
487 {
488 /* TODO: Check for more devices ? */
489 drmDevicePtr devices[8];
490 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
491 int max_devices;
492
493 instance->physical_device_count = 0;
494
495 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
496
497 if (instance->debug_flags & TU_DEBUG_STARTUP)
498 tu_logi("Found %d drm nodes", max_devices);
499
500 if (max_devices < 1)
501 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
502
503 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
504 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
505 devices[i]->bustype == DRM_BUS_PLATFORM) {
506
507 result = tu_physical_device_init(
508 instance->physical_devices + instance->physical_device_count,
509 instance, devices[i]);
510 if (result == VK_SUCCESS)
511 ++instance->physical_device_count;
512 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
513 break;
514 }
515 }
516 drmFreeDevices(devices, max_devices);
517
518 return result;
519 }
520
521 VkResult
522 tu_EnumeratePhysicalDevices(VkInstance _instance,
523 uint32_t *pPhysicalDeviceCount,
524 VkPhysicalDevice *pPhysicalDevices)
525 {
526 TU_FROM_HANDLE(tu_instance, instance, _instance);
527 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
528
529 VkResult result;
530
531 if (instance->physical_device_count < 0) {
532 result = tu_enumerate_devices(instance);
533 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
534 return result;
535 }
536
537 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
538 vk_outarray_append(&out, p)
539 {
540 *p = tu_physical_device_to_handle(instance->physical_devices + i);
541 }
542 }
543
544 return vk_outarray_status(&out);
545 }
546
547 VkResult
548 tu_EnumeratePhysicalDeviceGroups(
549 VkInstance _instance,
550 uint32_t *pPhysicalDeviceGroupCount,
551 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
552 {
553 TU_FROM_HANDLE(tu_instance, instance, _instance);
554 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
555 pPhysicalDeviceGroupCount);
556 VkResult result;
557
558 if (instance->physical_device_count < 0) {
559 result = tu_enumerate_devices(instance);
560 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
561 return result;
562 }
563
564 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
565 vk_outarray_append(&out, p)
566 {
567 p->physicalDeviceCount = 1;
568 p->physicalDevices[0] =
569 tu_physical_device_to_handle(instance->physical_devices + i);
570 p->subsetAllocation = false;
571 }
572 }
573
574 return vk_outarray_status(&out);
575 }
576
577 void
578 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
579 VkPhysicalDeviceFeatures *pFeatures)
580 {
581 memset(pFeatures, 0, sizeof(*pFeatures));
582
583 *pFeatures = (VkPhysicalDeviceFeatures) {
584 .robustBufferAccess = true,
585 .fullDrawIndexUint32 = true,
586 .imageCubeArray = true,
587 .independentBlend = true,
588 .geometryShader = true,
589 .tessellationShader = true,
590 .sampleRateShading = true,
591 .dualSrcBlend = true,
592 .logicOp = true,
593 .multiDrawIndirect = true,
594 .drawIndirectFirstInstance = true,
595 .depthClamp = true,
596 .depthBiasClamp = false,
597 .fillModeNonSolid = false,
598 .depthBounds = true,
599 .wideLines = false,
600 .largePoints = false,
601 .alphaToOne = true,
602 .multiViewport = false,
603 .samplerAnisotropy = true,
604 .textureCompressionETC2 = true,
605 .textureCompressionASTC_LDR = true,
606 .textureCompressionBC = true,
607 .occlusionQueryPrecise = true,
608 .pipelineStatisticsQuery = false,
609 .vertexPipelineStoresAndAtomics = false,
610 .fragmentStoresAndAtomics = false,
611 .shaderTessellationAndGeometryPointSize = false,
612 .shaderImageGatherExtended = false,
613 .shaderStorageImageExtendedFormats = false,
614 .shaderStorageImageMultisample = false,
615 .shaderUniformBufferArrayDynamicIndexing = false,
616 .shaderSampledImageArrayDynamicIndexing = false,
617 .shaderStorageBufferArrayDynamicIndexing = false,
618 .shaderStorageImageArrayDynamicIndexing = false,
619 .shaderStorageImageReadWithoutFormat = false,
620 .shaderStorageImageWriteWithoutFormat = false,
621 .shaderClipDistance = false,
622 .shaderCullDistance = false,
623 .shaderFloat64 = false,
624 .shaderInt64 = false,
625 .shaderInt16 = false,
626 .sparseBinding = false,
627 .variableMultisampleRate = false,
628 .inheritedQueries = false,
629 };
630 }
631
632 void
633 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
634 VkPhysicalDeviceFeatures2 *pFeatures)
635 {
636 vk_foreach_struct(ext, pFeatures->pNext)
637 {
638 switch (ext->sType) {
639 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
640 *((VkPhysicalDeviceVulkan11Features*) ext) = (VkPhysicalDeviceVulkan11Features) {
641 .storageBuffer16BitAccess = false,
642 .uniformAndStorageBuffer16BitAccess = false,
643 .storagePushConstant16 = false,
644 .storageInputOutput16 = false,
645 .multiview = false,
646 .multiviewGeometryShader = false,
647 .multiviewTessellationShader = false,
648 .variablePointersStorageBuffer = false,
649 .variablePointers = false,
650 .protectedMemory = false,
651 .samplerYcbcrConversion = true,
652 .shaderDrawParameters = true,
653 };
654 break;
655 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
656 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
657 features->variablePointersStorageBuffer = false;
658 features->variablePointers = false;
659 break;
660 }
661 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
662 VkPhysicalDeviceMultiviewFeatures *features =
663 (VkPhysicalDeviceMultiviewFeatures *) ext;
664 features->multiview = false;
665 features->multiviewGeometryShader = false;
666 features->multiviewTessellationShader = false;
667 break;
668 }
669 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
670 VkPhysicalDeviceShaderDrawParametersFeatures *features =
671 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
672 features->shaderDrawParameters = true;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
676 VkPhysicalDeviceProtectedMemoryFeatures *features =
677 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
678 features->protectedMemory = false;
679 break;
680 }
681 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
682 VkPhysicalDevice16BitStorageFeatures *features =
683 (VkPhysicalDevice16BitStorageFeatures *) ext;
684 features->storageBuffer16BitAccess = false;
685 features->uniformAndStorageBuffer16BitAccess = false;
686 features->storagePushConstant16 = false;
687 features->storageInputOutput16 = false;
688 break;
689 }
690 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
691 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
692 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
693 features->samplerYcbcrConversion = true;
694 break;
695 }
696 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
697 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
698 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
699 features->shaderInputAttachmentArrayDynamicIndexing = false;
700 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
701 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
702 features->shaderUniformBufferArrayNonUniformIndexing = false;
703 features->shaderSampledImageArrayNonUniformIndexing = false;
704 features->shaderStorageBufferArrayNonUniformIndexing = false;
705 features->shaderStorageImageArrayNonUniformIndexing = false;
706 features->shaderInputAttachmentArrayNonUniformIndexing = false;
707 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
708 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
709 features->descriptorBindingUniformBufferUpdateAfterBind = false;
710 features->descriptorBindingSampledImageUpdateAfterBind = false;
711 features->descriptorBindingStorageImageUpdateAfterBind = false;
712 features->descriptorBindingStorageBufferUpdateAfterBind = false;
713 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
714 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
715 features->descriptorBindingUpdateUnusedWhilePending = false;
716 features->descriptorBindingPartiallyBound = false;
717 features->descriptorBindingVariableDescriptorCount = false;
718 features->runtimeDescriptorArray = false;
719 break;
720 }
721 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
722 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
723 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
724 features->conditionalRendering = false;
725 features->inheritedConditionalRendering = false;
726 break;
727 }
728 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
729 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
730 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
731 features->transformFeedback = true;
732 features->geometryStreams = false;
733 break;
734 }
735 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
736 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
737 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
738 features->indexTypeUint8 = true;
739 break;
740 }
741 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
742 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
743 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
744 features->vertexAttributeInstanceRateDivisor = true;
745 features->vertexAttributeInstanceRateZeroDivisor = true;
746 break;
747 }
748 default:
749 break;
750 }
751 }
752 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
753 }
754
755 void
756 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
757 VkPhysicalDeviceProperties *pProperties)
758 {
759 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
760 VkSampleCountFlags sample_counts =
761 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
762
763 /* I have no idea what the maximum size is, but the hardware supports very
764 * large numbers of descriptors (at least 2^16). This limit is based on
765 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
766 * we don't have to think about what to do if that overflows, but really
767 * nothing is likely to get close to this.
768 */
769 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
770
771 VkPhysicalDeviceLimits limits = {
772 .maxImageDimension1D = (1 << 14),
773 .maxImageDimension2D = (1 << 14),
774 .maxImageDimension3D = (1 << 11),
775 .maxImageDimensionCube = (1 << 14),
776 .maxImageArrayLayers = (1 << 11),
777 .maxTexelBufferElements = 128 * 1024 * 1024,
778 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
779 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
780 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
781 .maxMemoryAllocationCount = UINT32_MAX,
782 .maxSamplerAllocationCount = 64 * 1024,
783 .bufferImageGranularity = 64, /* A cache line */
784 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
785 .maxBoundDescriptorSets = MAX_SETS,
786 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
787 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
788 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
789 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
790 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
791 .maxPerStageDescriptorInputAttachments = MAX_RTS,
792 .maxPerStageResources = max_descriptor_set_size,
793 .maxDescriptorSetSamplers = max_descriptor_set_size,
794 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
795 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
796 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
797 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
798 .maxDescriptorSetSampledImages = max_descriptor_set_size,
799 .maxDescriptorSetStorageImages = max_descriptor_set_size,
800 .maxDescriptorSetInputAttachments = MAX_RTS,
801 .maxVertexInputAttributes = 32,
802 .maxVertexInputBindings = 32,
803 .maxVertexInputAttributeOffset = 4095,
804 .maxVertexInputBindingStride = 2048,
805 .maxVertexOutputComponents = 128,
806 .maxTessellationGenerationLevel = 64,
807 .maxTessellationPatchSize = 32,
808 .maxTessellationControlPerVertexInputComponents = 128,
809 .maxTessellationControlPerVertexOutputComponents = 128,
810 .maxTessellationControlPerPatchOutputComponents = 120,
811 .maxTessellationControlTotalOutputComponents = 4096,
812 .maxTessellationEvaluationInputComponents = 128,
813 .maxTessellationEvaluationOutputComponents = 128,
814 .maxGeometryShaderInvocations = 32,
815 .maxGeometryInputComponents = 64,
816 .maxGeometryOutputComponents = 128,
817 .maxGeometryOutputVertices = 256,
818 .maxGeometryTotalOutputComponents = 1024,
819 .maxFragmentInputComponents = 124,
820 .maxFragmentOutputAttachments = 8,
821 .maxFragmentDualSrcAttachments = 1,
822 .maxFragmentCombinedOutputResources = 8,
823 .maxComputeSharedMemorySize = 32768,
824 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
825 .maxComputeWorkGroupInvocations = 2048,
826 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
827 .subPixelPrecisionBits = 8,
828 .subTexelPrecisionBits = 8,
829 .mipmapPrecisionBits = 8,
830 .maxDrawIndexedIndexValue = UINT32_MAX,
831 .maxDrawIndirectCount = UINT32_MAX,
832 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
833 .maxSamplerAnisotropy = 16,
834 .maxViewports = MAX_VIEWPORTS,
835 .maxViewportDimensions = { (1 << 14), (1 << 14) },
836 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
837 .viewportSubPixelBits = 8,
838 .minMemoryMapAlignment = 4096, /* A page */
839 .minTexelBufferOffsetAlignment = 64,
840 .minUniformBufferOffsetAlignment = 64,
841 .minStorageBufferOffsetAlignment = 64,
842 .minTexelOffset = -16,
843 .maxTexelOffset = 15,
844 .minTexelGatherOffset = -32,
845 .maxTexelGatherOffset = 31,
846 .minInterpolationOffset = -0.5,
847 .maxInterpolationOffset = 0.4375,
848 .subPixelInterpolationOffsetBits = 4,
849 .maxFramebufferWidth = (1 << 14),
850 .maxFramebufferHeight = (1 << 14),
851 .maxFramebufferLayers = (1 << 10),
852 .framebufferColorSampleCounts = sample_counts,
853 .framebufferDepthSampleCounts = sample_counts,
854 .framebufferStencilSampleCounts = sample_counts,
855 .framebufferNoAttachmentsSampleCounts = sample_counts,
856 .maxColorAttachments = MAX_RTS,
857 .sampledImageColorSampleCounts = sample_counts,
858 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
859 .sampledImageDepthSampleCounts = sample_counts,
860 .sampledImageStencilSampleCounts = sample_counts,
861 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
862 .maxSampleMaskWords = 1,
863 .timestampComputeAndGraphics = true,
864 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
865 .maxClipDistances = 8,
866 .maxCullDistances = 8,
867 .maxCombinedClipAndCullDistances = 8,
868 .discreteQueuePriorities = 1,
869 .pointSizeRange = { 0.125, 255.875 },
870 .lineWidthRange = { 0.0, 7.9921875 },
871 .pointSizeGranularity = (1.0 / 8.0),
872 .lineWidthGranularity = (1.0 / 128.0),
873 .strictLines = false, /* FINISHME */
874 .standardSampleLocations = true,
875 .optimalBufferCopyOffsetAlignment = 128,
876 .optimalBufferCopyRowPitchAlignment = 128,
877 .nonCoherentAtomSize = 64,
878 };
879
880 *pProperties = (VkPhysicalDeviceProperties) {
881 .apiVersion = tu_physical_device_api_version(pdevice),
882 .driverVersion = vk_get_driver_version(),
883 .vendorID = 0, /* TODO */
884 .deviceID = 0,
885 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
886 .limits = limits,
887 .sparseProperties = { 0 },
888 };
889
890 strcpy(pProperties->deviceName, pdevice->name);
891 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
892 }
893
894 void
895 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
896 VkPhysicalDeviceProperties2 *pProperties)
897 {
898 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
899 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
900
901 vk_foreach_struct(ext, pProperties->pNext)
902 {
903 switch (ext->sType) {
904 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
905 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
906 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
907 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
908 break;
909 }
910 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
911 VkPhysicalDeviceIDProperties *properties =
912 (VkPhysicalDeviceIDProperties *) ext;
913 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
914 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
915 properties->deviceLUIDValid = false;
916 break;
917 }
918 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
919 VkPhysicalDeviceMultiviewProperties *properties =
920 (VkPhysicalDeviceMultiviewProperties *) ext;
921 properties->maxMultiviewViewCount = MAX_VIEWS;
922 properties->maxMultiviewInstanceIndex = INT_MAX;
923 break;
924 }
925 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
926 VkPhysicalDevicePointClippingProperties *properties =
927 (VkPhysicalDevicePointClippingProperties *) ext;
928 properties->pointClippingBehavior =
929 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
930 break;
931 }
932 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
933 VkPhysicalDeviceMaintenance3Properties *properties =
934 (VkPhysicalDeviceMaintenance3Properties *) ext;
935 /* Make sure everything is addressable by a signed 32-bit int, and
936 * our largest descriptors are 96 bytes. */
937 properties->maxPerSetDescriptors = (1ull << 31) / 96;
938 /* Our buffer size fields allow only this much */
939 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
940 break;
941 }
942 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
943 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
944 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
945
946 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
947 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
948 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
949 properties->maxTransformFeedbackStreamDataSize = 512;
950 properties->maxTransformFeedbackBufferDataSize = 512;
951 properties->maxTransformFeedbackBufferDataStride = 512;
952 properties->transformFeedbackQueries = true;
953 properties->transformFeedbackStreamsLinesTriangles = false;
954 properties->transformFeedbackRasterizationStreamSelect = false;
955 properties->transformFeedbackDraw = true;
956 break;
957 }
958 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
959 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
960 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
961 properties->sampleLocationSampleCounts = 0;
962 if (pdevice->supported_extensions.EXT_sample_locations) {
963 properties->sampleLocationSampleCounts =
964 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
965 }
966 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
967 properties->sampleLocationCoordinateRange[0] = 0.0f;
968 properties->sampleLocationCoordinateRange[1] = 0.9375f;
969 properties->sampleLocationSubPixelBits = 4;
970 properties->variableSampleLocations = true;
971 break;
972 }
973 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
974 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
975 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
976 properties->filterMinmaxImageComponentMapping = true;
977 properties->filterMinmaxSingleComponentFormats = true;
978 break;
979 }
980 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
981 VkPhysicalDeviceSubgroupProperties *properties =
982 (VkPhysicalDeviceSubgroupProperties *)ext;
983 properties->subgroupSize = 64;
984 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
985 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
986 VK_SUBGROUP_FEATURE_VOTE_BIT;
987 properties->quadOperationsInAllStages = false;
988 break;
989 }
990 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
991 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
992 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
993 props->maxVertexAttribDivisor = UINT32_MAX;
994 break;
995 }
996 default:
997 break;
998 }
999 }
1000 }
1001
1002 static const VkQueueFamilyProperties tu_queue_family_properties = {
1003 .queueFlags =
1004 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
1005 .queueCount = 1,
1006 .timestampValidBits = 48,
1007 .minImageTransferGranularity = { 1, 1, 1 },
1008 };
1009
1010 void
1011 tu_GetPhysicalDeviceQueueFamilyProperties(
1012 VkPhysicalDevice physicalDevice,
1013 uint32_t *pQueueFamilyPropertyCount,
1014 VkQueueFamilyProperties *pQueueFamilyProperties)
1015 {
1016 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1017
1018 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
1019 }
1020
1021 void
1022 tu_GetPhysicalDeviceQueueFamilyProperties2(
1023 VkPhysicalDevice physicalDevice,
1024 uint32_t *pQueueFamilyPropertyCount,
1025 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1026 {
1027 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1028
1029 vk_outarray_append(&out, p)
1030 {
1031 p->queueFamilyProperties = tu_queue_family_properties;
1032 }
1033 }
1034
1035 static uint64_t
1036 tu_get_system_heap_size()
1037 {
1038 struct sysinfo info;
1039 sysinfo(&info);
1040
1041 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1042
1043 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1044 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1045 */
1046 uint64_t available_ram;
1047 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1048 available_ram = total_ram / 2;
1049 else
1050 available_ram = total_ram * 3 / 4;
1051
1052 return available_ram;
1053 }
1054
1055 void
1056 tu_GetPhysicalDeviceMemoryProperties(
1057 VkPhysicalDevice physicalDevice,
1058 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1059 {
1060 pMemoryProperties->memoryHeapCount = 1;
1061 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1062 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1063
1064 pMemoryProperties->memoryTypeCount = 1;
1065 pMemoryProperties->memoryTypes[0].propertyFlags =
1066 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1067 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1068 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1069 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1070 }
1071
1072 void
1073 tu_GetPhysicalDeviceMemoryProperties2(
1074 VkPhysicalDevice physicalDevice,
1075 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1076 {
1077 return tu_GetPhysicalDeviceMemoryProperties(
1078 physicalDevice, &pMemoryProperties->memoryProperties);
1079 }
1080
1081 static VkResult
1082 tu_queue_init(struct tu_device *device,
1083 struct tu_queue *queue,
1084 uint32_t queue_family_index,
1085 int idx,
1086 VkDeviceQueueCreateFlags flags)
1087 {
1088 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1089 queue->device = device;
1090 queue->queue_family_index = queue_family_index;
1091 queue->queue_idx = idx;
1092 queue->flags = flags;
1093
1094 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1095 if (ret)
1096 return VK_ERROR_INITIALIZATION_FAILED;
1097
1098 tu_fence_init(&queue->submit_fence, false);
1099
1100 return VK_SUCCESS;
1101 }
1102
1103 static void
1104 tu_queue_finish(struct tu_queue *queue)
1105 {
1106 tu_fence_finish(&queue->submit_fence);
1107 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1108 }
1109
1110 static int
1111 tu_get_device_extension_index(const char *name)
1112 {
1113 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1114 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1115 return i;
1116 }
1117 return -1;
1118 }
1119
1120 struct PACKED bcolor_entry {
1121 uint32_t fp32[4];
1122 uint16_t ui16[4];
1123 int16_t si16[4];
1124 uint16_t fp16[4];
1125 uint16_t rgb565;
1126 uint16_t rgb5a1;
1127 uint16_t rgba4;
1128 uint8_t __pad0[2];
1129 uint8_t ui8[4];
1130 int8_t si8[4];
1131 uint32_t rgb10a2;
1132 uint32_t z24; /* also s8? */
1133 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1134 uint8_t __pad1[56];
1135 } border_color[] = {
1136 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1137 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1138 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1139 .fp32[3] = 0x3f800000,
1140 .ui16[3] = 0xffff,
1141 .si16[3] = 0x7fff,
1142 .fp16[3] = 0x3c00,
1143 .rgb5a1 = 0x8000,
1144 .rgba4 = 0xf000,
1145 .ui8[3] = 0xff,
1146 .si8[3] = 0x7f,
1147 .rgb10a2 = 0xc0000000,
1148 .srgb[3] = 0x3c00,
1149 },
1150 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1151 .fp32[3] = 1,
1152 .fp16[3] = 1,
1153 },
1154 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1155 .fp32[0 ... 3] = 0x3f800000,
1156 .ui16[0 ... 3] = 0xffff,
1157 .si16[0 ... 3] = 0x7fff,
1158 .fp16[0 ... 3] = 0x3c00,
1159 .rgb565 = 0xffff,
1160 .rgb5a1 = 0xffff,
1161 .rgba4 = 0xffff,
1162 .ui8[0 ... 3] = 0xff,
1163 .si8[0 ... 3] = 0x7f,
1164 .rgb10a2 = 0xffffffff,
1165 .z24 = 0xffffff,
1166 .srgb[0 ... 3] = 0x3c00,
1167 },
1168 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1169 .fp32[0 ... 3] = 1,
1170 .fp16[0 ... 3] = 1,
1171 },
1172 };
1173
1174
1175 VkResult
1176 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1177 const VkDeviceCreateInfo *pCreateInfo,
1178 const VkAllocationCallbacks *pAllocator,
1179 VkDevice *pDevice)
1180 {
1181 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1182 VkResult result;
1183 struct tu_device *device;
1184
1185 /* Check enabled features */
1186 if (pCreateInfo->pEnabledFeatures) {
1187 VkPhysicalDeviceFeatures supported_features;
1188 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1189 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1190 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1191 unsigned num_features =
1192 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1193 for (uint32_t i = 0; i < num_features; i++) {
1194 if (enabled_feature[i] && !supported_feature[i])
1195 return vk_error(physical_device->instance,
1196 VK_ERROR_FEATURE_NOT_PRESENT);
1197 }
1198 }
1199
1200 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1201 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1202 if (!device)
1203 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1204
1205 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1206 device->instance = physical_device->instance;
1207 device->physical_device = physical_device;
1208
1209 if (pAllocator)
1210 device->alloc = *pAllocator;
1211 else
1212 device->alloc = physical_device->instance->alloc;
1213
1214 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1215 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1216 int index = tu_get_device_extension_index(ext_name);
1217 if (index < 0 ||
1218 !physical_device->supported_extensions.extensions[index]) {
1219 vk_free(&device->alloc, device);
1220 return vk_error(physical_device->instance,
1221 VK_ERROR_EXTENSION_NOT_PRESENT);
1222 }
1223
1224 device->enabled_extensions.extensions[index] = true;
1225 }
1226
1227 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1228 const VkDeviceQueueCreateInfo *queue_create =
1229 &pCreateInfo->pQueueCreateInfos[i];
1230 uint32_t qfi = queue_create->queueFamilyIndex;
1231 device->queues[qfi] = vk_alloc(
1232 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1233 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1234 if (!device->queues[qfi]) {
1235 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1236 goto fail_queues;
1237 }
1238
1239 memset(device->queues[qfi], 0,
1240 queue_create->queueCount * sizeof(struct tu_queue));
1241
1242 device->queue_count[qfi] = queue_create->queueCount;
1243
1244 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1245 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1246 queue_create->flags);
1247 if (result != VK_SUCCESS)
1248 goto fail_queues;
1249 }
1250 }
1251
1252 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1253 if (!device->compiler)
1254 goto fail_queues;
1255
1256 #define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1257 #define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
1258
1259 device->vsc_draw_strm_pitch = 0x440 * 4;
1260 device->vsc_prim_strm_pitch = 0x1040 * 4;
1261
1262 result = tu_bo_init_new(device, &device->vsc_draw_strm, VSC_DRAW_STRM_SIZE(device->vsc_draw_strm_pitch));
1263 if (result != VK_SUCCESS)
1264 goto fail_vsc_data;
1265
1266 result = tu_bo_init_new(device, &device->vsc_prim_strm, VSC_PRIM_STRM_SIZE(device->vsc_prim_strm_pitch));
1267 if (result != VK_SUCCESS)
1268 goto fail_vsc_data2;
1269
1270 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1271 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1272 if (result != VK_SUCCESS)
1273 goto fail_border_color;
1274
1275 result = tu_bo_map(device, &device->border_color);
1276 if (result != VK_SUCCESS)
1277 goto fail_border_color_map;
1278
1279 memcpy(device->border_color.map, border_color, sizeof(border_color));
1280
1281 VkPipelineCacheCreateInfo ci;
1282 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1283 ci.pNext = NULL;
1284 ci.flags = 0;
1285 ci.pInitialData = NULL;
1286 ci.initialDataSize = 0;
1287 VkPipelineCache pc;
1288 result =
1289 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1290 if (result != VK_SUCCESS)
1291 goto fail_pipeline_cache;
1292
1293 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1294
1295 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1296 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1297
1298 *pDevice = tu_device_to_handle(device);
1299 return VK_SUCCESS;
1300
1301 fail_pipeline_cache:
1302 fail_border_color_map:
1303 tu_bo_finish(device, &device->border_color);
1304
1305 fail_border_color:
1306 tu_bo_finish(device, &device->vsc_prim_strm);
1307
1308 fail_vsc_data2:
1309 tu_bo_finish(device, &device->vsc_draw_strm);
1310
1311 fail_vsc_data:
1312 ralloc_free(device->compiler);
1313
1314 fail_queues:
1315 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1316 for (unsigned q = 0; q < device->queue_count[i]; q++)
1317 tu_queue_finish(&device->queues[i][q]);
1318 if (device->queue_count[i])
1319 vk_free(&device->alloc, device->queues[i]);
1320 }
1321
1322 vk_free(&device->alloc, device);
1323 return result;
1324 }
1325
1326 void
1327 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1328 {
1329 TU_FROM_HANDLE(tu_device, device, _device);
1330
1331 if (!device)
1332 return;
1333
1334 tu_bo_finish(device, &device->vsc_draw_strm);
1335 tu_bo_finish(device, &device->vsc_prim_strm);
1336
1337 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1338 for (unsigned q = 0; q < device->queue_count[i]; q++)
1339 tu_queue_finish(&device->queues[i][q]);
1340 if (device->queue_count[i])
1341 vk_free(&device->alloc, device->queues[i]);
1342 }
1343
1344 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1345 if (device->scratch_bos[i].initialized)
1346 tu_bo_finish(device, &device->scratch_bos[i].bo);
1347 }
1348
1349 /* the compiler does not use pAllocator */
1350 ralloc_free(device->compiler);
1351
1352 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1353 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1354
1355 vk_free(&device->alloc, device);
1356 }
1357
1358 VkResult
1359 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1360 {
1361 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1362 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1363 assert(index < ARRAY_SIZE(dev->scratch_bos));
1364
1365 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1366 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1367 /* Fast path: just return the already-allocated BO. */
1368 *bo = &dev->scratch_bos[i].bo;
1369 return VK_SUCCESS;
1370 }
1371 }
1372
1373 /* Slow path: actually allocate the BO. We take a lock because the process
1374 * of allocating it is slow, and we don't want to block the CPU while it
1375 * finishes.
1376 */
1377 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1378
1379 /* Another thread may have allocated it already while we were waiting on
1380 * the lock. We need to check this in order to avoid double-allocating.
1381 */
1382 if (dev->scratch_bos[index].initialized) {
1383 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1384 *bo = &dev->scratch_bos[index].bo;
1385 return VK_SUCCESS;
1386 }
1387
1388 unsigned bo_size = 1ull << size_log2;
1389 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1390 if (result != VK_SUCCESS) {
1391 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1392 return result;
1393 }
1394
1395 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1396
1397 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1398
1399 *bo = &dev->scratch_bos[index].bo;
1400 return VK_SUCCESS;
1401 }
1402
1403 VkResult
1404 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1405 VkLayerProperties *pProperties)
1406 {
1407 *pPropertyCount = 0;
1408 return VK_SUCCESS;
1409 }
1410
1411 VkResult
1412 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1413 uint32_t *pPropertyCount,
1414 VkLayerProperties *pProperties)
1415 {
1416 *pPropertyCount = 0;
1417 return VK_SUCCESS;
1418 }
1419
1420 void
1421 tu_GetDeviceQueue2(VkDevice _device,
1422 const VkDeviceQueueInfo2 *pQueueInfo,
1423 VkQueue *pQueue)
1424 {
1425 TU_FROM_HANDLE(tu_device, device, _device);
1426 struct tu_queue *queue;
1427
1428 queue =
1429 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1430 if (pQueueInfo->flags != queue->flags) {
1431 /* From the Vulkan 1.1.70 spec:
1432 *
1433 * "The queue returned by vkGetDeviceQueue2 must have the same
1434 * flags value from this structure as that used at device
1435 * creation time in a VkDeviceQueueCreateInfo instance. If no
1436 * matching flags were specified at device creation time then
1437 * pQueue will return VK_NULL_HANDLE."
1438 */
1439 *pQueue = VK_NULL_HANDLE;
1440 return;
1441 }
1442
1443 *pQueue = tu_queue_to_handle(queue);
1444 }
1445
1446 void
1447 tu_GetDeviceQueue(VkDevice _device,
1448 uint32_t queueFamilyIndex,
1449 uint32_t queueIndex,
1450 VkQueue *pQueue)
1451 {
1452 const VkDeviceQueueInfo2 info =
1453 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1454 .queueFamilyIndex = queueFamilyIndex,
1455 .queueIndex = queueIndex };
1456
1457 tu_GetDeviceQueue2(_device, &info, pQueue);
1458 }
1459
1460 VkResult
1461 tu_QueueSubmit(VkQueue _queue,
1462 uint32_t submitCount,
1463 const VkSubmitInfo *pSubmits,
1464 VkFence _fence)
1465 {
1466 TU_FROM_HANDLE(tu_queue, queue, _queue);
1467
1468 for (uint32_t i = 0; i < submitCount; ++i) {
1469 const VkSubmitInfo *submit = pSubmits + i;
1470 const bool last_submit = (i == submitCount - 1);
1471 struct tu_bo_list bo_list;
1472 tu_bo_list_init(&bo_list);
1473
1474 uint32_t entry_count = 0;
1475 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1476 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1477 entry_count += cmdbuf->cs.entry_count;
1478 }
1479
1480 struct drm_msm_gem_submit_cmd cmds[entry_count];
1481 uint32_t entry_idx = 0;
1482 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1483 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1484 struct tu_cs *cs = &cmdbuf->cs;
1485 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1486 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1487 cmds[entry_idx].submit_idx =
1488 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1489 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1490 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1491 cmds[entry_idx].size = cs->entries[i].size;
1492 cmds[entry_idx].pad = 0;
1493 cmds[entry_idx].nr_relocs = 0;
1494 cmds[entry_idx].relocs = 0;
1495 }
1496
1497 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1498 }
1499
1500 uint32_t flags = MSM_PIPE_3D0;
1501 if (last_submit) {
1502 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1503 }
1504
1505 struct drm_msm_gem_submit req = {
1506 .flags = flags,
1507 .queueid = queue->msm_queue_id,
1508 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1509 .nr_bos = bo_list.count,
1510 .cmds = (uint64_t)(uintptr_t)cmds,
1511 .nr_cmds = entry_count,
1512 };
1513
1514 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1515 DRM_MSM_GEM_SUBMIT,
1516 &req, sizeof(req));
1517 if (ret) {
1518 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1519 abort();
1520 }
1521
1522 tu_bo_list_destroy(&bo_list);
1523
1524 if (last_submit) {
1525 /* no need to merge fences as queue execution is serialized */
1526 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1527 }
1528 }
1529
1530 if (_fence != VK_NULL_HANDLE) {
1531 TU_FROM_HANDLE(tu_fence, fence, _fence);
1532 tu_fence_copy(fence, &queue->submit_fence);
1533 }
1534
1535 return VK_SUCCESS;
1536 }
1537
1538 VkResult
1539 tu_QueueWaitIdle(VkQueue _queue)
1540 {
1541 TU_FROM_HANDLE(tu_queue, queue, _queue);
1542
1543 tu_fence_wait_idle(&queue->submit_fence);
1544
1545 return VK_SUCCESS;
1546 }
1547
1548 VkResult
1549 tu_DeviceWaitIdle(VkDevice _device)
1550 {
1551 TU_FROM_HANDLE(tu_device, device, _device);
1552
1553 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1554 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1555 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1556 }
1557 }
1558 return VK_SUCCESS;
1559 }
1560
1561 VkResult
1562 tu_ImportSemaphoreFdKHR(VkDevice _device,
1563 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1564 {
1565 tu_stub();
1566
1567 return VK_SUCCESS;
1568 }
1569
1570 VkResult
1571 tu_GetSemaphoreFdKHR(VkDevice _device,
1572 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1573 int *pFd)
1574 {
1575 tu_stub();
1576
1577 return VK_SUCCESS;
1578 }
1579
1580 VkResult
1581 tu_ImportFenceFdKHR(VkDevice _device,
1582 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
1583 {
1584 tu_stub();
1585
1586 return VK_SUCCESS;
1587 }
1588
1589 VkResult
1590 tu_GetFenceFdKHR(VkDevice _device,
1591 const VkFenceGetFdInfoKHR *pGetFdInfo,
1592 int *pFd)
1593 {
1594 tu_stub();
1595
1596 return VK_SUCCESS;
1597 }
1598
1599 VkResult
1600 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1601 uint32_t *pPropertyCount,
1602 VkExtensionProperties *pProperties)
1603 {
1604 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1605
1606 /* We spport no lyaers */
1607 if (pLayerName)
1608 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1609
1610 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1611 if (tu_instance_extensions_supported.extensions[i]) {
1612 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1613 }
1614 }
1615
1616 return vk_outarray_status(&out);
1617 }
1618
1619 VkResult
1620 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1621 const char *pLayerName,
1622 uint32_t *pPropertyCount,
1623 VkExtensionProperties *pProperties)
1624 {
1625 /* We spport no lyaers */
1626 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1627 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1628
1629 /* We spport no lyaers */
1630 if (pLayerName)
1631 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1632
1633 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1634 if (device->supported_extensions.extensions[i]) {
1635 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1636 }
1637 }
1638
1639 return vk_outarray_status(&out);
1640 }
1641
1642 PFN_vkVoidFunction
1643 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1644 {
1645 TU_FROM_HANDLE(tu_instance, instance, _instance);
1646
1647 return tu_lookup_entrypoint_checked(
1648 pName, instance ? instance->api_version : 0,
1649 instance ? &instance->enabled_extensions : NULL, NULL);
1650 }
1651
1652 /* The loader wants us to expose a second GetInstanceProcAddr function
1653 * to work around certain LD_PRELOAD issues seen in apps.
1654 */
1655 PUBLIC
1656 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1657 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1658
1659 PUBLIC
1660 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1661 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1662 {
1663 return tu_GetInstanceProcAddr(instance, pName);
1664 }
1665
1666 PFN_vkVoidFunction
1667 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1668 {
1669 TU_FROM_HANDLE(tu_device, device, _device);
1670
1671 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1672 &device->instance->enabled_extensions,
1673 &device->enabled_extensions);
1674 }
1675
1676 static VkResult
1677 tu_alloc_memory(struct tu_device *device,
1678 const VkMemoryAllocateInfo *pAllocateInfo,
1679 const VkAllocationCallbacks *pAllocator,
1680 VkDeviceMemory *pMem)
1681 {
1682 struct tu_device_memory *mem;
1683 VkResult result;
1684
1685 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1686
1687 if (pAllocateInfo->allocationSize == 0) {
1688 /* Apparently, this is allowed */
1689 *pMem = VK_NULL_HANDLE;
1690 return VK_SUCCESS;
1691 }
1692
1693 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1694 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1695 if (mem == NULL)
1696 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1697
1698 const VkImportMemoryFdInfoKHR *fd_info =
1699 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1700 if (fd_info && !fd_info->handleType)
1701 fd_info = NULL;
1702
1703 if (fd_info) {
1704 assert(fd_info->handleType ==
1705 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1706 fd_info->handleType ==
1707 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1708
1709 /*
1710 * TODO Importing the same fd twice gives us the same handle without
1711 * reference counting. We need to maintain a per-instance handle-to-bo
1712 * table and add reference count to tu_bo.
1713 */
1714 result = tu_bo_init_dmabuf(device, &mem->bo,
1715 pAllocateInfo->allocationSize, fd_info->fd);
1716 if (result == VK_SUCCESS) {
1717 /* take ownership and close the fd */
1718 close(fd_info->fd);
1719 }
1720 } else {
1721 result =
1722 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1723 }
1724
1725 if (result != VK_SUCCESS) {
1726 vk_free2(&device->alloc, pAllocator, mem);
1727 return result;
1728 }
1729
1730 mem->size = pAllocateInfo->allocationSize;
1731 mem->type_index = pAllocateInfo->memoryTypeIndex;
1732
1733 mem->map = NULL;
1734 mem->user_ptr = NULL;
1735
1736 *pMem = tu_device_memory_to_handle(mem);
1737
1738 return VK_SUCCESS;
1739 }
1740
1741 VkResult
1742 tu_AllocateMemory(VkDevice _device,
1743 const VkMemoryAllocateInfo *pAllocateInfo,
1744 const VkAllocationCallbacks *pAllocator,
1745 VkDeviceMemory *pMem)
1746 {
1747 TU_FROM_HANDLE(tu_device, device, _device);
1748 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1749 }
1750
1751 void
1752 tu_FreeMemory(VkDevice _device,
1753 VkDeviceMemory _mem,
1754 const VkAllocationCallbacks *pAllocator)
1755 {
1756 TU_FROM_HANDLE(tu_device, device, _device);
1757 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1758
1759 if (mem == NULL)
1760 return;
1761
1762 tu_bo_finish(device, &mem->bo);
1763 vk_free2(&device->alloc, pAllocator, mem);
1764 }
1765
1766 VkResult
1767 tu_MapMemory(VkDevice _device,
1768 VkDeviceMemory _memory,
1769 VkDeviceSize offset,
1770 VkDeviceSize size,
1771 VkMemoryMapFlags flags,
1772 void **ppData)
1773 {
1774 TU_FROM_HANDLE(tu_device, device, _device);
1775 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1776 VkResult result;
1777
1778 if (mem == NULL) {
1779 *ppData = NULL;
1780 return VK_SUCCESS;
1781 }
1782
1783 if (mem->user_ptr) {
1784 *ppData = mem->user_ptr;
1785 } else if (!mem->map) {
1786 result = tu_bo_map(device, &mem->bo);
1787 if (result != VK_SUCCESS)
1788 return result;
1789 *ppData = mem->map = mem->bo.map;
1790 } else
1791 *ppData = mem->map;
1792
1793 if (*ppData) {
1794 *ppData += offset;
1795 return VK_SUCCESS;
1796 }
1797
1798 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1799 }
1800
1801 void
1802 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1803 {
1804 /* I do not see any unmapping done by the freedreno Gallium driver. */
1805 }
1806
1807 VkResult
1808 tu_FlushMappedMemoryRanges(VkDevice _device,
1809 uint32_t memoryRangeCount,
1810 const VkMappedMemoryRange *pMemoryRanges)
1811 {
1812 return VK_SUCCESS;
1813 }
1814
1815 VkResult
1816 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1817 uint32_t memoryRangeCount,
1818 const VkMappedMemoryRange *pMemoryRanges)
1819 {
1820 return VK_SUCCESS;
1821 }
1822
1823 void
1824 tu_GetBufferMemoryRequirements(VkDevice _device,
1825 VkBuffer _buffer,
1826 VkMemoryRequirements *pMemoryRequirements)
1827 {
1828 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1829
1830 pMemoryRequirements->memoryTypeBits = 1;
1831 pMemoryRequirements->alignment = 64;
1832 pMemoryRequirements->size =
1833 align64(buffer->size, pMemoryRequirements->alignment);
1834 }
1835
1836 void
1837 tu_GetBufferMemoryRequirements2(
1838 VkDevice device,
1839 const VkBufferMemoryRequirementsInfo2 *pInfo,
1840 VkMemoryRequirements2 *pMemoryRequirements)
1841 {
1842 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1843 &pMemoryRequirements->memoryRequirements);
1844 }
1845
1846 void
1847 tu_GetImageMemoryRequirements(VkDevice _device,
1848 VkImage _image,
1849 VkMemoryRequirements *pMemoryRequirements)
1850 {
1851 TU_FROM_HANDLE(tu_image, image, _image);
1852
1853 pMemoryRequirements->memoryTypeBits = 1;
1854 pMemoryRequirements->size = image->layout.size;
1855 pMemoryRequirements->alignment = image->layout.base_align;
1856 }
1857
1858 void
1859 tu_GetImageMemoryRequirements2(VkDevice device,
1860 const VkImageMemoryRequirementsInfo2 *pInfo,
1861 VkMemoryRequirements2 *pMemoryRequirements)
1862 {
1863 tu_GetImageMemoryRequirements(device, pInfo->image,
1864 &pMemoryRequirements->memoryRequirements);
1865 }
1866
1867 void
1868 tu_GetImageSparseMemoryRequirements(
1869 VkDevice device,
1870 VkImage image,
1871 uint32_t *pSparseMemoryRequirementCount,
1872 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1873 {
1874 tu_stub();
1875 }
1876
1877 void
1878 tu_GetImageSparseMemoryRequirements2(
1879 VkDevice device,
1880 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1881 uint32_t *pSparseMemoryRequirementCount,
1882 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1883 {
1884 tu_stub();
1885 }
1886
1887 void
1888 tu_GetDeviceMemoryCommitment(VkDevice device,
1889 VkDeviceMemory memory,
1890 VkDeviceSize *pCommittedMemoryInBytes)
1891 {
1892 *pCommittedMemoryInBytes = 0;
1893 }
1894
1895 VkResult
1896 tu_BindBufferMemory2(VkDevice device,
1897 uint32_t bindInfoCount,
1898 const VkBindBufferMemoryInfo *pBindInfos)
1899 {
1900 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1901 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1902 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1903
1904 if (mem) {
1905 buffer->bo = &mem->bo;
1906 buffer->bo_offset = pBindInfos[i].memoryOffset;
1907 } else {
1908 buffer->bo = NULL;
1909 }
1910 }
1911 return VK_SUCCESS;
1912 }
1913
1914 VkResult
1915 tu_BindBufferMemory(VkDevice device,
1916 VkBuffer buffer,
1917 VkDeviceMemory memory,
1918 VkDeviceSize memoryOffset)
1919 {
1920 const VkBindBufferMemoryInfo info = {
1921 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1922 .buffer = buffer,
1923 .memory = memory,
1924 .memoryOffset = memoryOffset
1925 };
1926
1927 return tu_BindBufferMemory2(device, 1, &info);
1928 }
1929
1930 VkResult
1931 tu_BindImageMemory2(VkDevice device,
1932 uint32_t bindInfoCount,
1933 const VkBindImageMemoryInfo *pBindInfos)
1934 {
1935 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1936 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1937 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1938
1939 if (mem) {
1940 image->bo = &mem->bo;
1941 image->bo_offset = pBindInfos[i].memoryOffset;
1942 } else {
1943 image->bo = NULL;
1944 image->bo_offset = 0;
1945 }
1946 }
1947
1948 return VK_SUCCESS;
1949 }
1950
1951 VkResult
1952 tu_BindImageMemory(VkDevice device,
1953 VkImage image,
1954 VkDeviceMemory memory,
1955 VkDeviceSize memoryOffset)
1956 {
1957 const VkBindImageMemoryInfo info = {
1958 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1959 .image = image,
1960 .memory = memory,
1961 .memoryOffset = memoryOffset
1962 };
1963
1964 return tu_BindImageMemory2(device, 1, &info);
1965 }
1966
1967 VkResult
1968 tu_QueueBindSparse(VkQueue _queue,
1969 uint32_t bindInfoCount,
1970 const VkBindSparseInfo *pBindInfo,
1971 VkFence _fence)
1972 {
1973 return VK_SUCCESS;
1974 }
1975
1976 // Queue semaphore functions
1977
1978 VkResult
1979 tu_CreateSemaphore(VkDevice _device,
1980 const VkSemaphoreCreateInfo *pCreateInfo,
1981 const VkAllocationCallbacks *pAllocator,
1982 VkSemaphore *pSemaphore)
1983 {
1984 TU_FROM_HANDLE(tu_device, device, _device);
1985
1986 struct tu_semaphore *sem =
1987 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1988 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1989 if (!sem)
1990 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1991
1992 *pSemaphore = tu_semaphore_to_handle(sem);
1993 return VK_SUCCESS;
1994 }
1995
1996 void
1997 tu_DestroySemaphore(VkDevice _device,
1998 VkSemaphore _semaphore,
1999 const VkAllocationCallbacks *pAllocator)
2000 {
2001 TU_FROM_HANDLE(tu_device, device, _device);
2002 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
2003 if (!_semaphore)
2004 return;
2005
2006 vk_free2(&device->alloc, pAllocator, sem);
2007 }
2008
2009 VkResult
2010 tu_CreateEvent(VkDevice _device,
2011 const VkEventCreateInfo *pCreateInfo,
2012 const VkAllocationCallbacks *pAllocator,
2013 VkEvent *pEvent)
2014 {
2015 TU_FROM_HANDLE(tu_device, device, _device);
2016 struct tu_event *event =
2017 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
2018 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2019
2020 if (!event)
2021 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2022
2023 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2024 if (result != VK_SUCCESS)
2025 goto fail_alloc;
2026
2027 result = tu_bo_map(device, &event->bo);
2028 if (result != VK_SUCCESS)
2029 goto fail_map;
2030
2031 *pEvent = tu_event_to_handle(event);
2032
2033 return VK_SUCCESS;
2034
2035 fail_map:
2036 tu_bo_finish(device, &event->bo);
2037 fail_alloc:
2038 vk_free2(&device->alloc, pAllocator, event);
2039 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2040 }
2041
2042 void
2043 tu_DestroyEvent(VkDevice _device,
2044 VkEvent _event,
2045 const VkAllocationCallbacks *pAllocator)
2046 {
2047 TU_FROM_HANDLE(tu_device, device, _device);
2048 TU_FROM_HANDLE(tu_event, event, _event);
2049
2050 if (!event)
2051 return;
2052
2053 tu_bo_finish(device, &event->bo);
2054 vk_free2(&device->alloc, pAllocator, event);
2055 }
2056
2057 VkResult
2058 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2059 {
2060 TU_FROM_HANDLE(tu_event, event, _event);
2061
2062 if (*(uint64_t*) event->bo.map == 1)
2063 return VK_EVENT_SET;
2064 return VK_EVENT_RESET;
2065 }
2066
2067 VkResult
2068 tu_SetEvent(VkDevice _device, VkEvent _event)
2069 {
2070 TU_FROM_HANDLE(tu_event, event, _event);
2071 *(uint64_t*) event->bo.map = 1;
2072
2073 return VK_SUCCESS;
2074 }
2075
2076 VkResult
2077 tu_ResetEvent(VkDevice _device, VkEvent _event)
2078 {
2079 TU_FROM_HANDLE(tu_event, event, _event);
2080 *(uint64_t*) event->bo.map = 0;
2081
2082 return VK_SUCCESS;
2083 }
2084
2085 VkResult
2086 tu_CreateBuffer(VkDevice _device,
2087 const VkBufferCreateInfo *pCreateInfo,
2088 const VkAllocationCallbacks *pAllocator,
2089 VkBuffer *pBuffer)
2090 {
2091 TU_FROM_HANDLE(tu_device, device, _device);
2092 struct tu_buffer *buffer;
2093
2094 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2095
2096 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2097 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2098 if (buffer == NULL)
2099 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2100
2101 buffer->size = pCreateInfo->size;
2102 buffer->usage = pCreateInfo->usage;
2103 buffer->flags = pCreateInfo->flags;
2104
2105 *pBuffer = tu_buffer_to_handle(buffer);
2106
2107 return VK_SUCCESS;
2108 }
2109
2110 void
2111 tu_DestroyBuffer(VkDevice _device,
2112 VkBuffer _buffer,
2113 const VkAllocationCallbacks *pAllocator)
2114 {
2115 TU_FROM_HANDLE(tu_device, device, _device);
2116 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2117
2118 if (!buffer)
2119 return;
2120
2121 vk_free2(&device->alloc, pAllocator, buffer);
2122 }
2123
2124 VkResult
2125 tu_CreateFramebuffer(VkDevice _device,
2126 const VkFramebufferCreateInfo *pCreateInfo,
2127 const VkAllocationCallbacks *pAllocator,
2128 VkFramebuffer *pFramebuffer)
2129 {
2130 TU_FROM_HANDLE(tu_device, device, _device);
2131 struct tu_framebuffer *framebuffer;
2132
2133 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2134
2135 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2136 pCreateInfo->attachmentCount;
2137 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2138 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2139 if (framebuffer == NULL)
2140 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2141
2142 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2143 framebuffer->width = pCreateInfo->width;
2144 framebuffer->height = pCreateInfo->height;
2145 framebuffer->layers = pCreateInfo->layers;
2146 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2147 VkImageView _iview = pCreateInfo->pAttachments[i];
2148 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2149 framebuffer->attachments[i].attachment = iview;
2150 }
2151
2152 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2153 return VK_SUCCESS;
2154 }
2155
2156 void
2157 tu_DestroyFramebuffer(VkDevice _device,
2158 VkFramebuffer _fb,
2159 const VkAllocationCallbacks *pAllocator)
2160 {
2161 TU_FROM_HANDLE(tu_device, device, _device);
2162 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2163
2164 if (!fb)
2165 return;
2166 vk_free2(&device->alloc, pAllocator, fb);
2167 }
2168
2169 static void
2170 tu_init_sampler(struct tu_device *device,
2171 struct tu_sampler *sampler,
2172 const VkSamplerCreateInfo *pCreateInfo)
2173 {
2174 const struct VkSamplerReductionModeCreateInfo *reduction =
2175 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2176 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2177 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2178
2179 unsigned aniso = pCreateInfo->anisotropyEnable ?
2180 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2181 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2182 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2183 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2184
2185 sampler->descriptor[0] =
2186 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2187 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2188 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2189 A6XX_TEX_SAMP_0_ANISO(aniso) |
2190 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2191 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2192 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2193 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2194 sampler->descriptor[1] =
2195 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2196 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2197 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2198 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2199 COND(pCreateInfo->compareEnable,
2200 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2201 /* This is an offset into the border_color BO, which we fill with all the
2202 * possible Vulkan border colors in the correct order, so we can just use
2203 * the Vulkan enum with no translation necessary.
2204 */
2205 sampler->descriptor[2] =
2206 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2207 sizeof(struct bcolor_entry));
2208 sampler->descriptor[3] = 0;
2209
2210 if (reduction) {
2211 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2212 tu6_reduction_mode(reduction->reductionMode));
2213 }
2214
2215 sampler->ycbcr_sampler = ycbcr_conversion ?
2216 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2217
2218 if (sampler->ycbcr_sampler &&
2219 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2220 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2221 }
2222
2223 /* TODO:
2224 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2225 */
2226 }
2227
2228 VkResult
2229 tu_CreateSampler(VkDevice _device,
2230 const VkSamplerCreateInfo *pCreateInfo,
2231 const VkAllocationCallbacks *pAllocator,
2232 VkSampler *pSampler)
2233 {
2234 TU_FROM_HANDLE(tu_device, device, _device);
2235 struct tu_sampler *sampler;
2236
2237 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2238
2239 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2240 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2241 if (!sampler)
2242 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2243
2244 tu_init_sampler(device, sampler, pCreateInfo);
2245 *pSampler = tu_sampler_to_handle(sampler);
2246
2247 return VK_SUCCESS;
2248 }
2249
2250 void
2251 tu_DestroySampler(VkDevice _device,
2252 VkSampler _sampler,
2253 const VkAllocationCallbacks *pAllocator)
2254 {
2255 TU_FROM_HANDLE(tu_device, device, _device);
2256 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2257
2258 if (!sampler)
2259 return;
2260 vk_free2(&device->alloc, pAllocator, sampler);
2261 }
2262
2263 /* vk_icd.h does not declare this function, so we declare it here to
2264 * suppress Wmissing-prototypes.
2265 */
2266 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2267 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2268
2269 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2270 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2271 {
2272 /* For the full details on loader interface versioning, see
2273 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2274 * What follows is a condensed summary, to help you navigate the large and
2275 * confusing official doc.
2276 *
2277 * - Loader interface v0 is incompatible with later versions. We don't
2278 * support it.
2279 *
2280 * - In loader interface v1:
2281 * - The first ICD entrypoint called by the loader is
2282 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2283 * entrypoint.
2284 * - The ICD must statically expose no other Vulkan symbol unless it
2285 * is linked with -Bsymbolic.
2286 * - Each dispatchable Vulkan handle created by the ICD must be
2287 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2288 * ICD must initialize VK_LOADER_DATA.loadMagic to
2289 * ICD_LOADER_MAGIC.
2290 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2291 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2292 * such loader-managed surfaces.
2293 *
2294 * - Loader interface v2 differs from v1 in:
2295 * - The first ICD entrypoint called by the loader is
2296 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2297 * statically expose this entrypoint.
2298 *
2299 * - Loader interface v3 differs from v2 in:
2300 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2301 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2302 * because the loader no longer does so.
2303 */
2304 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2305 return VK_SUCCESS;
2306 }
2307
2308 VkResult
2309 tu_GetMemoryFdKHR(VkDevice _device,
2310 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2311 int *pFd)
2312 {
2313 TU_FROM_HANDLE(tu_device, device, _device);
2314 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2315
2316 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2317
2318 /* At the moment, we support only the below handle types. */
2319 assert(pGetFdInfo->handleType ==
2320 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2321 pGetFdInfo->handleType ==
2322 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2323
2324 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2325 if (prime_fd < 0)
2326 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2327
2328 *pFd = prime_fd;
2329 return VK_SUCCESS;
2330 }
2331
2332 VkResult
2333 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2334 VkExternalMemoryHandleTypeFlagBits handleType,
2335 int fd,
2336 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2337 {
2338 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2339 pMemoryFdProperties->memoryTypeBits = 1;
2340 return VK_SUCCESS;
2341 }
2342
2343 void
2344 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2345 VkPhysicalDevice physicalDevice,
2346 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2347 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2348 {
2349 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2350 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2351 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2352 }
2353
2354 void
2355 tu_GetPhysicalDeviceExternalFenceProperties(
2356 VkPhysicalDevice physicalDevice,
2357 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2358 VkExternalFenceProperties *pExternalFenceProperties)
2359 {
2360 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2361 pExternalFenceProperties->compatibleHandleTypes = 0;
2362 pExternalFenceProperties->externalFenceFeatures = 0;
2363 }
2364
2365 VkResult
2366 tu_CreateDebugReportCallbackEXT(
2367 VkInstance _instance,
2368 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2369 const VkAllocationCallbacks *pAllocator,
2370 VkDebugReportCallbackEXT *pCallback)
2371 {
2372 TU_FROM_HANDLE(tu_instance, instance, _instance);
2373 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2374 pCreateInfo, pAllocator,
2375 &instance->alloc, pCallback);
2376 }
2377
2378 void
2379 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2380 VkDebugReportCallbackEXT _callback,
2381 const VkAllocationCallbacks *pAllocator)
2382 {
2383 TU_FROM_HANDLE(tu_instance, instance, _instance);
2384 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2385 _callback, pAllocator, &instance->alloc);
2386 }
2387
2388 void
2389 tu_DebugReportMessageEXT(VkInstance _instance,
2390 VkDebugReportFlagsEXT flags,
2391 VkDebugReportObjectTypeEXT objectType,
2392 uint64_t object,
2393 size_t location,
2394 int32_t messageCode,
2395 const char *pLayerPrefix,
2396 const char *pMessage)
2397 {
2398 TU_FROM_HANDLE(tu_instance, instance, _instance);
2399 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2400 object, location, messageCode, pLayerPrefix, pMessage);
2401 }
2402
2403 void
2404 tu_GetDeviceGroupPeerMemoryFeatures(
2405 VkDevice device,
2406 uint32_t heapIndex,
2407 uint32_t localDeviceIndex,
2408 uint32_t remoteDeviceIndex,
2409 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2410 {
2411 assert(localDeviceIndex == remoteDeviceIndex);
2412
2413 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2414 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2415 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2416 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2417 }
2418
2419 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2420 VkPhysicalDevice physicalDevice,
2421 VkSampleCountFlagBits samples,
2422 VkMultisamplePropertiesEXT* pMultisampleProperties)
2423 {
2424 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2425
2426 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2427 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2428 else
2429 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2430 }