turnip: use global bo for clear blit shaders
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
50
51 static void
52 tu_semaphore_remove_temp(struct tu_device *device,
53 struct tu_semaphore *sem);
54
55 static int
56 tu_device_get_cache_uuid(uint16_t family, void *uuid)
57 {
58 uint32_t mesa_timestamp;
59 uint16_t f = family;
60 memset(uuid, 0, VK_UUID_SIZE);
61 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
62 &mesa_timestamp))
63 return -1;
64
65 memcpy(uuid, &mesa_timestamp, 4);
66 memcpy((char *) uuid + 4, &f, 2);
67 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
68 return 0;
69 }
70
71 static VkResult
72 tu_bo_init(struct tu_device *dev,
73 struct tu_bo *bo,
74 uint32_t gem_handle,
75 uint64_t size)
76 {
77 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
78 if (!iova)
79 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
80
81 *bo = (struct tu_bo) {
82 .gem_handle = gem_handle,
83 .size = size,
84 .iova = iova,
85 };
86
87 return VK_SUCCESS;
88 }
89
90 VkResult
91 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
92 {
93 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
94 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
95 */
96 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
97 if (!gem_handle)
98 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
99
100 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
101 if (result != VK_SUCCESS) {
102 tu_gem_close(dev, gem_handle);
103 return vk_error(dev->instance, result);
104 }
105
106 return VK_SUCCESS;
107 }
108
109 VkResult
110 tu_bo_init_dmabuf(struct tu_device *dev,
111 struct tu_bo *bo,
112 uint64_t size,
113 int fd)
114 {
115 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
116 if (!gem_handle)
117 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
118
119 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
120 if (result != VK_SUCCESS) {
121 tu_gem_close(dev, gem_handle);
122 return vk_error(dev->instance, result);
123 }
124
125 return VK_SUCCESS;
126 }
127
128 int
129 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
130 {
131 return tu_gem_export_dmabuf(dev, bo->gem_handle);
132 }
133
134 VkResult
135 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
136 {
137 if (bo->map)
138 return VK_SUCCESS;
139
140 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
141 if (!offset)
142 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
143
144 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
145 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
146 dev->physical_device->local_fd, offset);
147 if (map == MAP_FAILED)
148 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
149
150 bo->map = map;
151 return VK_SUCCESS;
152 }
153
154 void
155 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
156 {
157 assert(bo->gem_handle);
158
159 if (bo->map)
160 munmap(bo->map, bo->size);
161
162 tu_gem_close(dev, bo->gem_handle);
163 }
164
165 static VkResult
166 tu_physical_device_init(struct tu_physical_device *device,
167 struct tu_instance *instance,
168 drmDevicePtr drm_device)
169 {
170 const char *path = drm_device->nodes[DRM_NODE_RENDER];
171 VkResult result = VK_SUCCESS;
172 drmVersionPtr version;
173 int fd;
174 int master_fd = -1;
175
176 fd = open(path, O_RDWR | O_CLOEXEC);
177 if (fd < 0) {
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "failed to open device %s", path);
180 }
181
182 /* Version 1.3 added MSM_INFO_IOVA. */
183 const int min_version_major = 1;
184 const int min_version_minor = 3;
185
186 version = drmGetVersion(fd);
187 if (!version) {
188 close(fd);
189 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
190 "failed to query kernel driver version for device %s",
191 path);
192 }
193
194 if (strcmp(version->name, "msm")) {
195 drmFreeVersion(version);
196 close(fd);
197 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
198 "device %s does not use the msm kernel driver", path);
199 }
200
201 if (version->version_major != min_version_major ||
202 version->version_minor < min_version_minor) {
203 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "kernel driver for device %s has version %d.%d, "
205 "but Vulkan requires version >= %d.%d",
206 path, version->version_major, version->version_minor,
207 min_version_major, min_version_minor);
208 drmFreeVersion(version);
209 close(fd);
210 return result;
211 }
212
213 device->msm_major_version = version->version_major;
214 device->msm_minor_version = version->version_minor;
215
216 drmFreeVersion(version);
217
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Found compatible device '%s'.", path);
220
221 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
222 device->instance = instance;
223 assert(strlen(path) < ARRAY_SIZE(device->path));
224 strncpy(device->path, path, ARRAY_SIZE(device->path));
225
226 if (instance->enabled_extensions.KHR_display) {
227 master_fd =
228 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
229 if (master_fd >= 0) {
230 /* TODO: free master_fd is accel is not working? */
231 }
232 }
233
234 device->master_fd = master_fd;
235 device->local_fd = fd;
236
237 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
238 if (instance->debug_flags & TU_DEBUG_STARTUP)
239 tu_logi("Could not query the GPU ID");
240 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
241 "could not get GPU ID");
242 goto fail;
243 }
244
245 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
246 if (instance->debug_flags & TU_DEBUG_STARTUP)
247 tu_logi("Could not query the GMEM size");
248 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
249 "could not get GMEM size");
250 goto fail;
251 }
252
253 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
254 if (instance->debug_flags & TU_DEBUG_STARTUP)
255 tu_logi("Could not query the GMEM size");
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "could not get GMEM size");
258 goto fail;
259 }
260
261 memset(device->name, 0, sizeof(device->name));
262 sprintf(device->name, "FD%d", device->gpu_id);
263
264 switch (device->gpu_id) {
265 case 618:
266 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
267 device->ccu_offset_bypass = 0x10000;
268 device->tile_align_w = 64;
269 device->magic.PC_UNKNOWN_9805 = 0x0;
270 device->magic.SP_UNKNOWN_A0F8 = 0x0;
271 break;
272 case 630:
273 case 640:
274 device->ccu_offset_gmem = 0xf8000;
275 device->ccu_offset_bypass = 0x20000;
276 device->tile_align_w = 64;
277 device->magic.PC_UNKNOWN_9805 = 0x1;
278 device->magic.SP_UNKNOWN_A0F8 = 0x1;
279 break;
280 case 650:
281 device->ccu_offset_gmem = 0x114000;
282 device->ccu_offset_bypass = 0x30000;
283 device->tile_align_w = 96;
284 device->magic.PC_UNKNOWN_9805 = 0x2;
285 device->magic.SP_UNKNOWN_A0F8 = 0x2;
286 break;
287 default:
288 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
289 "device %s is unsupported", device->name);
290 goto fail;
291 }
292 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
293 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
294 "cannot generate UUID");
295 goto fail;
296 }
297
298 /* The gpu id is already embedded in the uuid so we just pass "tu"
299 * when creating the cache.
300 */
301 char buf[VK_UUID_SIZE * 2 + 1];
302 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
303 device->disk_cache = disk_cache_create(device->name, buf, 0);
304
305 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
306 "testing use only.\n");
307
308 fd_get_driver_uuid(device->driver_uuid);
309 fd_get_device_uuid(device->device_uuid, device->gpu_id);
310
311 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
312
313 if (result != VK_SUCCESS) {
314 vk_error(instance, result);
315 goto fail;
316 }
317
318 result = tu_wsi_init(device);
319 if (result != VK_SUCCESS) {
320 vk_error(instance, result);
321 goto fail;
322 }
323
324 return VK_SUCCESS;
325
326 fail:
327 close(fd);
328 if (master_fd != -1)
329 close(master_fd);
330 return result;
331 }
332
333 static void
334 tu_physical_device_finish(struct tu_physical_device *device)
335 {
336 tu_wsi_finish(device);
337
338 disk_cache_destroy(device->disk_cache);
339 close(device->local_fd);
340 if (device->master_fd != -1)
341 close(device->master_fd);
342 }
343
344 static VKAPI_ATTR void *
345 default_alloc_func(void *pUserData,
346 size_t size,
347 size_t align,
348 VkSystemAllocationScope allocationScope)
349 {
350 return malloc(size);
351 }
352
353 static VKAPI_ATTR void *
354 default_realloc_func(void *pUserData,
355 void *pOriginal,
356 size_t size,
357 size_t align,
358 VkSystemAllocationScope allocationScope)
359 {
360 return realloc(pOriginal, size);
361 }
362
363 static VKAPI_ATTR void
364 default_free_func(void *pUserData, void *pMemory)
365 {
366 free(pMemory);
367 }
368
369 static const VkAllocationCallbacks default_alloc = {
370 .pUserData = NULL,
371 .pfnAllocation = default_alloc_func,
372 .pfnReallocation = default_realloc_func,
373 .pfnFree = default_free_func,
374 };
375
376 static const struct debug_control tu_debug_options[] = {
377 { "startup", TU_DEBUG_STARTUP },
378 { "nir", TU_DEBUG_NIR },
379 { "ir3", TU_DEBUG_IR3 },
380 { "nobin", TU_DEBUG_NOBIN },
381 { "sysmem", TU_DEBUG_SYSMEM },
382 { "forcebin", TU_DEBUG_FORCEBIN },
383 { "noubwc", TU_DEBUG_NOUBWC },
384 { NULL, 0 }
385 };
386
387 const char *
388 tu_get_debug_option_name(int id)
389 {
390 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
391 return tu_debug_options[id].string;
392 }
393
394 static int
395 tu_get_instance_extension_index(const char *name)
396 {
397 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
398 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
399 return i;
400 }
401 return -1;
402 }
403
404 VkResult
405 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
406 const VkAllocationCallbacks *pAllocator,
407 VkInstance *pInstance)
408 {
409 struct tu_instance *instance;
410 VkResult result;
411
412 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
413
414 uint32_t client_version;
415 if (pCreateInfo->pApplicationInfo &&
416 pCreateInfo->pApplicationInfo->apiVersion != 0) {
417 client_version = pCreateInfo->pApplicationInfo->apiVersion;
418 } else {
419 tu_EnumerateInstanceVersion(&client_version);
420 }
421
422 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
423 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
424 if (!instance)
425 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
426
427 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
428
429 if (pAllocator)
430 instance->alloc = *pAllocator;
431 else
432 instance->alloc = default_alloc;
433
434 instance->api_version = client_version;
435 instance->physical_device_count = -1;
436
437 instance->debug_flags =
438 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
439
440 if (instance->debug_flags & TU_DEBUG_STARTUP)
441 tu_logi("Created an instance");
442
443 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
444 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
445 int index = tu_get_instance_extension_index(ext_name);
446
447 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
448 vk_free2(&default_alloc, pAllocator, instance);
449 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
450 }
451
452 instance->enabled_extensions.extensions[index] = true;
453 }
454
455 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
456 if (result != VK_SUCCESS) {
457 vk_free2(&default_alloc, pAllocator, instance);
458 return vk_error(instance, result);
459 }
460
461 glsl_type_singleton_init_or_ref();
462
463 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
464
465 *pInstance = tu_instance_to_handle(instance);
466
467 return VK_SUCCESS;
468 }
469
470 void
471 tu_DestroyInstance(VkInstance _instance,
472 const VkAllocationCallbacks *pAllocator)
473 {
474 TU_FROM_HANDLE(tu_instance, instance, _instance);
475
476 if (!instance)
477 return;
478
479 for (int i = 0; i < instance->physical_device_count; ++i) {
480 tu_physical_device_finish(instance->physical_devices + i);
481 }
482
483 VG(VALGRIND_DESTROY_MEMPOOL(instance));
484
485 glsl_type_singleton_decref();
486
487 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
488
489 vk_free(&instance->alloc, instance);
490 }
491
492 static VkResult
493 tu_enumerate_devices(struct tu_instance *instance)
494 {
495 /* TODO: Check for more devices ? */
496 drmDevicePtr devices[8];
497 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
498 int max_devices;
499
500 instance->physical_device_count = 0;
501
502 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
503
504 if (instance->debug_flags & TU_DEBUG_STARTUP) {
505 if (max_devices < 0)
506 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
507 else
508 tu_logi("Found %d drm nodes", max_devices);
509 }
510
511 if (max_devices < 1)
512 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
513
514 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
515 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
516 devices[i]->bustype == DRM_BUS_PLATFORM) {
517
518 result = tu_physical_device_init(
519 instance->physical_devices + instance->physical_device_count,
520 instance, devices[i]);
521 if (result == VK_SUCCESS)
522 ++instance->physical_device_count;
523 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
524 break;
525 }
526 }
527 drmFreeDevices(devices, max_devices);
528
529 return result;
530 }
531
532 VkResult
533 tu_EnumeratePhysicalDevices(VkInstance _instance,
534 uint32_t *pPhysicalDeviceCount,
535 VkPhysicalDevice *pPhysicalDevices)
536 {
537 TU_FROM_HANDLE(tu_instance, instance, _instance);
538 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
539
540 VkResult result;
541
542 if (instance->physical_device_count < 0) {
543 result = tu_enumerate_devices(instance);
544 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
545 return result;
546 }
547
548 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
549 vk_outarray_append(&out, p)
550 {
551 *p = tu_physical_device_to_handle(instance->physical_devices + i);
552 }
553 }
554
555 return vk_outarray_status(&out);
556 }
557
558 VkResult
559 tu_EnumeratePhysicalDeviceGroups(
560 VkInstance _instance,
561 uint32_t *pPhysicalDeviceGroupCount,
562 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
563 {
564 TU_FROM_HANDLE(tu_instance, instance, _instance);
565 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
566 pPhysicalDeviceGroupCount);
567 VkResult result;
568
569 if (instance->physical_device_count < 0) {
570 result = tu_enumerate_devices(instance);
571 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
572 return result;
573 }
574
575 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
576 vk_outarray_append(&out, p)
577 {
578 p->physicalDeviceCount = 1;
579 p->physicalDevices[0] =
580 tu_physical_device_to_handle(instance->physical_devices + i);
581 p->subsetAllocation = false;
582 }
583 }
584
585 return vk_outarray_status(&out);
586 }
587
588 void
589 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
590 VkPhysicalDeviceFeatures *pFeatures)
591 {
592 memset(pFeatures, 0, sizeof(*pFeatures));
593
594 *pFeatures = (VkPhysicalDeviceFeatures) {
595 .robustBufferAccess = true,
596 .fullDrawIndexUint32 = true,
597 .imageCubeArray = true,
598 .independentBlend = true,
599 .geometryShader = true,
600 .tessellationShader = true,
601 .sampleRateShading = true,
602 .dualSrcBlend = true,
603 .logicOp = true,
604 .multiDrawIndirect = true,
605 .drawIndirectFirstInstance = true,
606 .depthClamp = true,
607 .depthBiasClamp = true,
608 .fillModeNonSolid = true,
609 .depthBounds = true,
610 .wideLines = false,
611 .largePoints = true,
612 .alphaToOne = true,
613 .multiViewport = false,
614 .samplerAnisotropy = true,
615 .textureCompressionETC2 = true,
616 .textureCompressionASTC_LDR = true,
617 .textureCompressionBC = true,
618 .occlusionQueryPrecise = true,
619 .pipelineStatisticsQuery = false,
620 .vertexPipelineStoresAndAtomics = false,
621 .fragmentStoresAndAtomics = false,
622 .shaderTessellationAndGeometryPointSize = false,
623 .shaderImageGatherExtended = false,
624 .shaderStorageImageExtendedFormats = false,
625 .shaderStorageImageMultisample = false,
626 .shaderUniformBufferArrayDynamicIndexing = false,
627 .shaderSampledImageArrayDynamicIndexing = false,
628 .shaderStorageBufferArrayDynamicIndexing = false,
629 .shaderStorageImageArrayDynamicIndexing = false,
630 .shaderStorageImageReadWithoutFormat = false,
631 .shaderStorageImageWriteWithoutFormat = false,
632 .shaderClipDistance = false,
633 .shaderCullDistance = false,
634 .shaderFloat64 = false,
635 .shaderInt64 = false,
636 .shaderInt16 = false,
637 .sparseBinding = false,
638 .variableMultisampleRate = false,
639 .inheritedQueries = false,
640 };
641 }
642
643 void
644 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
645 VkPhysicalDeviceFeatures2 *pFeatures)
646 {
647 vk_foreach_struct(ext, pFeatures->pNext)
648 {
649 switch (ext->sType) {
650 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
651 VkPhysicalDeviceVulkan11Features *features = (void *) ext;
652 features->storageBuffer16BitAccess = false;
653 features->uniformAndStorageBuffer16BitAccess = false;
654 features->storagePushConstant16 = false;
655 features->storageInputOutput16 = false;
656 features->multiview = false;
657 features->multiviewGeometryShader = false;
658 features->multiviewTessellationShader = false;
659 features->variablePointersStorageBuffer = true;
660 features->variablePointers = true;
661 features->protectedMemory = false;
662 features->samplerYcbcrConversion = true;
663 features->shaderDrawParameters = true;
664 break;
665 }
666 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
667 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
668 features->variablePointersStorageBuffer = true;
669 features->variablePointers = true;
670 break;
671 }
672 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
673 VkPhysicalDeviceMultiviewFeatures *features =
674 (VkPhysicalDeviceMultiviewFeatures *) ext;
675 features->multiview = false;
676 features->multiviewGeometryShader = false;
677 features->multiviewTessellationShader = false;
678 break;
679 }
680 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
681 VkPhysicalDeviceShaderDrawParametersFeatures *features =
682 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
683 features->shaderDrawParameters = true;
684 break;
685 }
686 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
687 VkPhysicalDeviceProtectedMemoryFeatures *features =
688 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
689 features->protectedMemory = false;
690 break;
691 }
692 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
693 VkPhysicalDevice16BitStorageFeatures *features =
694 (VkPhysicalDevice16BitStorageFeatures *) ext;
695 features->storageBuffer16BitAccess = false;
696 features->uniformAndStorageBuffer16BitAccess = false;
697 features->storagePushConstant16 = false;
698 features->storageInputOutput16 = false;
699 break;
700 }
701 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
702 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
703 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
704 features->samplerYcbcrConversion = true;
705 break;
706 }
707 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
708 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
709 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
710 features->shaderInputAttachmentArrayDynamicIndexing = false;
711 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
712 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
713 features->shaderUniformBufferArrayNonUniformIndexing = false;
714 features->shaderSampledImageArrayNonUniformIndexing = false;
715 features->shaderStorageBufferArrayNonUniformIndexing = false;
716 features->shaderStorageImageArrayNonUniformIndexing = false;
717 features->shaderInputAttachmentArrayNonUniformIndexing = false;
718 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
719 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
720 features->descriptorBindingUniformBufferUpdateAfterBind = false;
721 features->descriptorBindingSampledImageUpdateAfterBind = false;
722 features->descriptorBindingStorageImageUpdateAfterBind = false;
723 features->descriptorBindingStorageBufferUpdateAfterBind = false;
724 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
725 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
726 features->descriptorBindingUpdateUnusedWhilePending = false;
727 features->descriptorBindingPartiallyBound = false;
728 features->descriptorBindingVariableDescriptorCount = false;
729 features->runtimeDescriptorArray = false;
730 break;
731 }
732 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
733 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
734 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
735 features->conditionalRendering = false;
736 features->inheritedConditionalRendering = false;
737 break;
738 }
739 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
740 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
741 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
742 features->transformFeedback = true;
743 features->geometryStreams = false;
744 break;
745 }
746 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
747 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
748 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
749 features->indexTypeUint8 = true;
750 break;
751 }
752 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
753 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
754 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
755 features->vertexAttributeInstanceRateDivisor = true;
756 features->vertexAttributeInstanceRateZeroDivisor = true;
757 break;
758 }
759 default:
760 break;
761 }
762 }
763 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
764 }
765
766 void
767 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
768 VkPhysicalDeviceProperties *pProperties)
769 {
770 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
771 VkSampleCountFlags sample_counts =
772 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
773
774 /* I have no idea what the maximum size is, but the hardware supports very
775 * large numbers of descriptors (at least 2^16). This limit is based on
776 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
777 * we don't have to think about what to do if that overflows, but really
778 * nothing is likely to get close to this.
779 */
780 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
781
782 VkPhysicalDeviceLimits limits = {
783 .maxImageDimension1D = (1 << 14),
784 .maxImageDimension2D = (1 << 14),
785 .maxImageDimension3D = (1 << 11),
786 .maxImageDimensionCube = (1 << 14),
787 .maxImageArrayLayers = (1 << 11),
788 .maxTexelBufferElements = 128 * 1024 * 1024,
789 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
790 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
791 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
792 .maxMemoryAllocationCount = UINT32_MAX,
793 .maxSamplerAllocationCount = 64 * 1024,
794 .bufferImageGranularity = 64, /* A cache line */
795 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
796 .maxBoundDescriptorSets = MAX_SETS,
797 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
798 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
799 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
800 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
801 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
802 .maxPerStageDescriptorInputAttachments = MAX_RTS,
803 .maxPerStageResources = max_descriptor_set_size,
804 .maxDescriptorSetSamplers = max_descriptor_set_size,
805 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
806 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
807 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
808 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
809 .maxDescriptorSetSampledImages = max_descriptor_set_size,
810 .maxDescriptorSetStorageImages = max_descriptor_set_size,
811 .maxDescriptorSetInputAttachments = MAX_RTS,
812 .maxVertexInputAttributes = 32,
813 .maxVertexInputBindings = 32,
814 .maxVertexInputAttributeOffset = 4095,
815 .maxVertexInputBindingStride = 2048,
816 .maxVertexOutputComponents = 128,
817 .maxTessellationGenerationLevel = 64,
818 .maxTessellationPatchSize = 32,
819 .maxTessellationControlPerVertexInputComponents = 128,
820 .maxTessellationControlPerVertexOutputComponents = 128,
821 .maxTessellationControlPerPatchOutputComponents = 120,
822 .maxTessellationControlTotalOutputComponents = 4096,
823 .maxTessellationEvaluationInputComponents = 128,
824 .maxTessellationEvaluationOutputComponents = 128,
825 .maxGeometryShaderInvocations = 32,
826 .maxGeometryInputComponents = 64,
827 .maxGeometryOutputComponents = 128,
828 .maxGeometryOutputVertices = 256,
829 .maxGeometryTotalOutputComponents = 1024,
830 .maxFragmentInputComponents = 124,
831 .maxFragmentOutputAttachments = 8,
832 .maxFragmentDualSrcAttachments = 1,
833 .maxFragmentCombinedOutputResources = 8,
834 .maxComputeSharedMemorySize = 32768,
835 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
836 .maxComputeWorkGroupInvocations = 2048,
837 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
838 .subPixelPrecisionBits = 8,
839 .subTexelPrecisionBits = 8,
840 .mipmapPrecisionBits = 8,
841 .maxDrawIndexedIndexValue = UINT32_MAX,
842 .maxDrawIndirectCount = UINT32_MAX,
843 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
844 .maxSamplerAnisotropy = 16,
845 .maxViewports = MAX_VIEWPORTS,
846 .maxViewportDimensions = { (1 << 14), (1 << 14) },
847 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
848 .viewportSubPixelBits = 8,
849 .minMemoryMapAlignment = 4096, /* A page */
850 .minTexelBufferOffsetAlignment = 64,
851 .minUniformBufferOffsetAlignment = 64,
852 .minStorageBufferOffsetAlignment = 64,
853 .minTexelOffset = -16,
854 .maxTexelOffset = 15,
855 .minTexelGatherOffset = -32,
856 .maxTexelGatherOffset = 31,
857 .minInterpolationOffset = -0.5,
858 .maxInterpolationOffset = 0.4375,
859 .subPixelInterpolationOffsetBits = 4,
860 .maxFramebufferWidth = (1 << 14),
861 .maxFramebufferHeight = (1 << 14),
862 .maxFramebufferLayers = (1 << 10),
863 .framebufferColorSampleCounts = sample_counts,
864 .framebufferDepthSampleCounts = sample_counts,
865 .framebufferStencilSampleCounts = sample_counts,
866 .framebufferNoAttachmentsSampleCounts = sample_counts,
867 .maxColorAttachments = MAX_RTS,
868 .sampledImageColorSampleCounts = sample_counts,
869 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
870 .sampledImageDepthSampleCounts = sample_counts,
871 .sampledImageStencilSampleCounts = sample_counts,
872 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
873 .maxSampleMaskWords = 1,
874 .timestampComputeAndGraphics = true,
875 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
876 .maxClipDistances = 8,
877 .maxCullDistances = 8,
878 .maxCombinedClipAndCullDistances = 8,
879 .discreteQueuePriorities = 1,
880 .pointSizeRange = { 1, 4092 },
881 .lineWidthRange = { 0.0, 7.9921875 },
882 .pointSizeGranularity = 0.0625,
883 .lineWidthGranularity = (1.0 / 128.0),
884 .strictLines = false, /* FINISHME */
885 .standardSampleLocations = true,
886 .optimalBufferCopyOffsetAlignment = 128,
887 .optimalBufferCopyRowPitchAlignment = 128,
888 .nonCoherentAtomSize = 64,
889 };
890
891 *pProperties = (VkPhysicalDeviceProperties) {
892 .apiVersion = tu_physical_device_api_version(pdevice),
893 .driverVersion = vk_get_driver_version(),
894 .vendorID = 0, /* TODO */
895 .deviceID = 0,
896 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
897 .limits = limits,
898 .sparseProperties = { 0 },
899 };
900
901 strcpy(pProperties->deviceName, pdevice->name);
902 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
903 }
904
905 void
906 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
907 VkPhysicalDeviceProperties2 *pProperties)
908 {
909 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
910 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
911
912 vk_foreach_struct(ext, pProperties->pNext)
913 {
914 switch (ext->sType) {
915 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
916 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
917 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
918 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
919 break;
920 }
921 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
922 VkPhysicalDeviceIDProperties *properties =
923 (VkPhysicalDeviceIDProperties *) ext;
924 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
925 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
926 properties->deviceLUIDValid = false;
927 break;
928 }
929 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
930 VkPhysicalDeviceMultiviewProperties *properties =
931 (VkPhysicalDeviceMultiviewProperties *) ext;
932 properties->maxMultiviewViewCount = MAX_VIEWS;
933 properties->maxMultiviewInstanceIndex = INT_MAX;
934 break;
935 }
936 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
937 VkPhysicalDevicePointClippingProperties *properties =
938 (VkPhysicalDevicePointClippingProperties *) ext;
939 properties->pointClippingBehavior =
940 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
941 break;
942 }
943 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
944 VkPhysicalDeviceMaintenance3Properties *properties =
945 (VkPhysicalDeviceMaintenance3Properties *) ext;
946 /* Make sure everything is addressable by a signed 32-bit int, and
947 * our largest descriptors are 96 bytes. */
948 properties->maxPerSetDescriptors = (1ull << 31) / 96;
949 /* Our buffer size fields allow only this much */
950 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
951 break;
952 }
953 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
954 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
955 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
956
957 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
958 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
959 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
960 properties->maxTransformFeedbackStreamDataSize = 512;
961 properties->maxTransformFeedbackBufferDataSize = 512;
962 properties->maxTransformFeedbackBufferDataStride = 512;
963 properties->transformFeedbackQueries = true;
964 properties->transformFeedbackStreamsLinesTriangles = false;
965 properties->transformFeedbackRasterizationStreamSelect = false;
966 properties->transformFeedbackDraw = true;
967 break;
968 }
969 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
970 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
971 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
972 properties->sampleLocationSampleCounts = 0;
973 if (pdevice->supported_extensions.EXT_sample_locations) {
974 properties->sampleLocationSampleCounts =
975 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
976 }
977 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
978 properties->sampleLocationCoordinateRange[0] = 0.0f;
979 properties->sampleLocationCoordinateRange[1] = 0.9375f;
980 properties->sampleLocationSubPixelBits = 4;
981 properties->variableSampleLocations = true;
982 break;
983 }
984 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
985 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
986 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
987 properties->filterMinmaxImageComponentMapping = true;
988 properties->filterMinmaxSingleComponentFormats = true;
989 break;
990 }
991 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
992 VkPhysicalDeviceSubgroupProperties *properties =
993 (VkPhysicalDeviceSubgroupProperties *)ext;
994 properties->subgroupSize = 64;
995 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
996 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
997 VK_SUBGROUP_FEATURE_VOTE_BIT;
998 properties->quadOperationsInAllStages = false;
999 break;
1000 }
1001 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1002 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
1003 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1004 props->maxVertexAttribDivisor = UINT32_MAX;
1005 break;
1006 }
1007 default:
1008 break;
1009 }
1010 }
1011 }
1012
1013 static const VkQueueFamilyProperties tu_queue_family_properties = {
1014 .queueFlags =
1015 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
1016 .queueCount = 1,
1017 .timestampValidBits = 48,
1018 .minImageTransferGranularity = { 1, 1, 1 },
1019 };
1020
1021 void
1022 tu_GetPhysicalDeviceQueueFamilyProperties(
1023 VkPhysicalDevice physicalDevice,
1024 uint32_t *pQueueFamilyPropertyCount,
1025 VkQueueFamilyProperties *pQueueFamilyProperties)
1026 {
1027 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1028
1029 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
1030 }
1031
1032 void
1033 tu_GetPhysicalDeviceQueueFamilyProperties2(
1034 VkPhysicalDevice physicalDevice,
1035 uint32_t *pQueueFamilyPropertyCount,
1036 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1037 {
1038 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1039
1040 vk_outarray_append(&out, p)
1041 {
1042 p->queueFamilyProperties = tu_queue_family_properties;
1043 }
1044 }
1045
1046 static uint64_t
1047 tu_get_system_heap_size()
1048 {
1049 struct sysinfo info;
1050 sysinfo(&info);
1051
1052 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1053
1054 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1055 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1056 */
1057 uint64_t available_ram;
1058 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1059 available_ram = total_ram / 2;
1060 else
1061 available_ram = total_ram * 3 / 4;
1062
1063 return available_ram;
1064 }
1065
1066 void
1067 tu_GetPhysicalDeviceMemoryProperties(
1068 VkPhysicalDevice physicalDevice,
1069 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1070 {
1071 pMemoryProperties->memoryHeapCount = 1;
1072 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1073 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1074
1075 pMemoryProperties->memoryTypeCount = 1;
1076 pMemoryProperties->memoryTypes[0].propertyFlags =
1077 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1078 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1079 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1080 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1081 }
1082
1083 void
1084 tu_GetPhysicalDeviceMemoryProperties2(
1085 VkPhysicalDevice physicalDevice,
1086 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1087 {
1088 return tu_GetPhysicalDeviceMemoryProperties(
1089 physicalDevice, &pMemoryProperties->memoryProperties);
1090 }
1091
1092 static VkResult
1093 tu_queue_init(struct tu_device *device,
1094 struct tu_queue *queue,
1095 uint32_t queue_family_index,
1096 int idx,
1097 VkDeviceQueueCreateFlags flags)
1098 {
1099 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1100 queue->device = device;
1101 queue->queue_family_index = queue_family_index;
1102 queue->queue_idx = idx;
1103 queue->flags = flags;
1104
1105 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1106 if (ret)
1107 return VK_ERROR_INITIALIZATION_FAILED;
1108
1109 tu_fence_init(&queue->submit_fence, false);
1110
1111 return VK_SUCCESS;
1112 }
1113
1114 static void
1115 tu_queue_finish(struct tu_queue *queue)
1116 {
1117 tu_fence_finish(&queue->submit_fence);
1118 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1119 }
1120
1121 static int
1122 tu_get_device_extension_index(const char *name)
1123 {
1124 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1125 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1126 return i;
1127 }
1128 return -1;
1129 }
1130
1131 struct PACKED bcolor_entry {
1132 uint32_t fp32[4];
1133 uint16_t ui16[4];
1134 int16_t si16[4];
1135 uint16_t fp16[4];
1136 uint16_t rgb565;
1137 uint16_t rgb5a1;
1138 uint16_t rgba4;
1139 uint8_t __pad0[2];
1140 uint8_t ui8[4];
1141 int8_t si8[4];
1142 uint32_t rgb10a2;
1143 uint32_t z24; /* also s8? */
1144 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1145 uint8_t __pad1[56];
1146 } border_color[] = {
1147 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1148 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1149 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1150 .fp32[3] = 0x3f800000,
1151 .ui16[3] = 0xffff,
1152 .si16[3] = 0x7fff,
1153 .fp16[3] = 0x3c00,
1154 .rgb5a1 = 0x8000,
1155 .rgba4 = 0xf000,
1156 .ui8[3] = 0xff,
1157 .si8[3] = 0x7f,
1158 .rgb10a2 = 0xc0000000,
1159 .srgb[3] = 0x3c00,
1160 },
1161 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1162 .fp32[3] = 1,
1163 .fp16[3] = 1,
1164 },
1165 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1166 .fp32[0 ... 3] = 0x3f800000,
1167 .ui16[0 ... 3] = 0xffff,
1168 .si16[0 ... 3] = 0x7fff,
1169 .fp16[0 ... 3] = 0x3c00,
1170 .rgb565 = 0xffff,
1171 .rgb5a1 = 0xffff,
1172 .rgba4 = 0xffff,
1173 .ui8[0 ... 3] = 0xff,
1174 .si8[0 ... 3] = 0x7f,
1175 .rgb10a2 = 0xffffffff,
1176 .z24 = 0xffffff,
1177 .srgb[0 ... 3] = 0x3c00,
1178 },
1179 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1180 .fp32[0 ... 3] = 1,
1181 .fp16[0 ... 3] = 1,
1182 },
1183 };
1184
1185 VkResult
1186 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1187 const VkDeviceCreateInfo *pCreateInfo,
1188 const VkAllocationCallbacks *pAllocator,
1189 VkDevice *pDevice)
1190 {
1191 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1192 VkResult result;
1193 struct tu_device *device;
1194
1195 /* Check enabled features */
1196 if (pCreateInfo->pEnabledFeatures) {
1197 VkPhysicalDeviceFeatures supported_features;
1198 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1199 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1200 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1201 unsigned num_features =
1202 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1203 for (uint32_t i = 0; i < num_features; i++) {
1204 if (enabled_feature[i] && !supported_feature[i])
1205 return vk_error(physical_device->instance,
1206 VK_ERROR_FEATURE_NOT_PRESENT);
1207 }
1208 }
1209
1210 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1211 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1212 if (!device)
1213 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1214
1215 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1216 device->instance = physical_device->instance;
1217 device->physical_device = physical_device;
1218 device->_lost = false;
1219
1220 if (pAllocator)
1221 device->alloc = *pAllocator;
1222 else
1223 device->alloc = physical_device->instance->alloc;
1224
1225 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1226 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1227 int index = tu_get_device_extension_index(ext_name);
1228 if (index < 0 ||
1229 !physical_device->supported_extensions.extensions[index]) {
1230 vk_free(&device->alloc, device);
1231 return vk_error(physical_device->instance,
1232 VK_ERROR_EXTENSION_NOT_PRESENT);
1233 }
1234
1235 device->enabled_extensions.extensions[index] = true;
1236 }
1237
1238 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1239 const VkDeviceQueueCreateInfo *queue_create =
1240 &pCreateInfo->pQueueCreateInfos[i];
1241 uint32_t qfi = queue_create->queueFamilyIndex;
1242 device->queues[qfi] = vk_alloc(
1243 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1244 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1245 if (!device->queues[qfi]) {
1246 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1247 goto fail_queues;
1248 }
1249
1250 memset(device->queues[qfi], 0,
1251 queue_create->queueCount * sizeof(struct tu_queue));
1252
1253 device->queue_count[qfi] = queue_create->queueCount;
1254
1255 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1256 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1257 queue_create->flags);
1258 if (result != VK_SUCCESS)
1259 goto fail_queues;
1260 }
1261 }
1262
1263 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1264 if (!device->compiler)
1265 goto fail_queues;
1266
1267 /* initial sizes, these will increase if there is overflow */
1268 device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
1269 device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
1270
1271 STATIC_ASSERT(sizeof(border_color) == sizeof(((struct tu6_global*) 0)->border_color));
1272 result = tu_bo_init_new(device, &device->global_bo, sizeof(struct tu6_global));
1273 if (result != VK_SUCCESS)
1274 goto fail_global_bo;
1275
1276 result = tu_bo_map(device, &device->global_bo);
1277 if (result != VK_SUCCESS)
1278 goto fail_global_bo_map;
1279
1280 memcpy(device->global_bo.map + gb_offset(border_color), border_color, sizeof(border_color));
1281 tu_init_clear_blit_shaders(device->global_bo.map);
1282
1283 VkPipelineCacheCreateInfo ci;
1284 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1285 ci.pNext = NULL;
1286 ci.flags = 0;
1287 ci.pInitialData = NULL;
1288 ci.initialDataSize = 0;
1289 VkPipelineCache pc;
1290 result =
1291 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1292 if (result != VK_SUCCESS)
1293 goto fail_pipeline_cache;
1294
1295 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1296
1297 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1298 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1299
1300 mtx_init(&device->vsc_pitch_mtx, mtx_plain);
1301
1302 *pDevice = tu_device_to_handle(device);
1303 return VK_SUCCESS;
1304
1305 fail_pipeline_cache:
1306 fail_global_bo_map:
1307 tu_bo_finish(device, &device->global_bo);
1308
1309 fail_global_bo:
1310 ralloc_free(device->compiler);
1311
1312 fail_queues:
1313 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1314 for (unsigned q = 0; q < device->queue_count[i]; q++)
1315 tu_queue_finish(&device->queues[i][q]);
1316 if (device->queue_count[i])
1317 vk_free(&device->alloc, device->queues[i]);
1318 }
1319
1320 vk_free(&device->alloc, device);
1321 return result;
1322 }
1323
1324 void
1325 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1326 {
1327 TU_FROM_HANDLE(tu_device, device, _device);
1328
1329 if (!device)
1330 return;
1331
1332 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1333 for (unsigned q = 0; q < device->queue_count[i]; q++)
1334 tu_queue_finish(&device->queues[i][q]);
1335 if (device->queue_count[i])
1336 vk_free(&device->alloc, device->queues[i]);
1337 }
1338
1339 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1340 if (device->scratch_bos[i].initialized)
1341 tu_bo_finish(device, &device->scratch_bos[i].bo);
1342 }
1343
1344 ir3_compiler_destroy(device->compiler);
1345
1346 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1347 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1348
1349 vk_free(&device->alloc, device);
1350 }
1351
1352 VkResult
1353 _tu_device_set_lost(struct tu_device *device,
1354 const char *file, int line,
1355 const char *msg, ...)
1356 {
1357 /* Set the flag indicating that waits should return in finite time even
1358 * after device loss.
1359 */
1360 p_atomic_inc(&device->_lost);
1361
1362 /* TODO: Report the log message through VkDebugReportCallbackEXT instead */
1363 fprintf(stderr, "%s:%d: ", file, line);
1364 va_list ap;
1365 va_start(ap, msg);
1366 vfprintf(stderr, msg, ap);
1367 va_end(ap);
1368
1369 if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
1370 abort();
1371
1372 return VK_ERROR_DEVICE_LOST;
1373 }
1374
1375 VkResult
1376 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1377 {
1378 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1379 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1380 assert(index < ARRAY_SIZE(dev->scratch_bos));
1381
1382 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1383 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1384 /* Fast path: just return the already-allocated BO. */
1385 *bo = &dev->scratch_bos[i].bo;
1386 return VK_SUCCESS;
1387 }
1388 }
1389
1390 /* Slow path: actually allocate the BO. We take a lock because the process
1391 * of allocating it is slow, and we don't want to block the CPU while it
1392 * finishes.
1393 */
1394 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1395
1396 /* Another thread may have allocated it already while we were waiting on
1397 * the lock. We need to check this in order to avoid double-allocating.
1398 */
1399 if (dev->scratch_bos[index].initialized) {
1400 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1401 *bo = &dev->scratch_bos[index].bo;
1402 return VK_SUCCESS;
1403 }
1404
1405 unsigned bo_size = 1ull << size_log2;
1406 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1407 if (result != VK_SUCCESS) {
1408 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1409 return result;
1410 }
1411
1412 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1413
1414 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1415
1416 *bo = &dev->scratch_bos[index].bo;
1417 return VK_SUCCESS;
1418 }
1419
1420 VkResult
1421 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1422 VkLayerProperties *pProperties)
1423 {
1424 *pPropertyCount = 0;
1425 return VK_SUCCESS;
1426 }
1427
1428 VkResult
1429 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1430 uint32_t *pPropertyCount,
1431 VkLayerProperties *pProperties)
1432 {
1433 *pPropertyCount = 0;
1434 return VK_SUCCESS;
1435 }
1436
1437 void
1438 tu_GetDeviceQueue2(VkDevice _device,
1439 const VkDeviceQueueInfo2 *pQueueInfo,
1440 VkQueue *pQueue)
1441 {
1442 TU_FROM_HANDLE(tu_device, device, _device);
1443 struct tu_queue *queue;
1444
1445 queue =
1446 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1447 if (pQueueInfo->flags != queue->flags) {
1448 /* From the Vulkan 1.1.70 spec:
1449 *
1450 * "The queue returned by vkGetDeviceQueue2 must have the same
1451 * flags value from this structure as that used at device
1452 * creation time in a VkDeviceQueueCreateInfo instance. If no
1453 * matching flags were specified at device creation time then
1454 * pQueue will return VK_NULL_HANDLE."
1455 */
1456 *pQueue = VK_NULL_HANDLE;
1457 return;
1458 }
1459
1460 *pQueue = tu_queue_to_handle(queue);
1461 }
1462
1463 void
1464 tu_GetDeviceQueue(VkDevice _device,
1465 uint32_t queueFamilyIndex,
1466 uint32_t queueIndex,
1467 VkQueue *pQueue)
1468 {
1469 const VkDeviceQueueInfo2 info =
1470 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1471 .queueFamilyIndex = queueFamilyIndex,
1472 .queueIndex = queueIndex };
1473
1474 tu_GetDeviceQueue2(_device, &info, pQueue);
1475 }
1476
1477 static VkResult
1478 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
1479 uint32_t sem_count,
1480 bool wait,
1481 struct drm_msm_gem_submit_syncobj **out,
1482 uint32_t *out_count)
1483 {
1484 uint32_t syncobj_count = 0;
1485 struct drm_msm_gem_submit_syncobj *syncobjs;
1486
1487 for (uint32_t i = 0; i < sem_count; ++i) {
1488 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1489
1490 struct tu_semaphore_part *part =
1491 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1492 &sem->temporary : &sem->permanent;
1493
1494 if (part->kind == TU_SEMAPHORE_SYNCOBJ)
1495 ++syncobj_count;
1496 }
1497
1498 *out = NULL;
1499 *out_count = syncobj_count;
1500 if (!syncobj_count)
1501 return VK_SUCCESS;
1502
1503 *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
1504 if (!syncobjs)
1505 return VK_ERROR_OUT_OF_HOST_MEMORY;
1506
1507 for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
1508 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1509
1510 struct tu_semaphore_part *part =
1511 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1512 &sem->temporary : &sem->permanent;
1513
1514 if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
1515 syncobjs[j].handle = part->syncobj;
1516 syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
1517 ++j;
1518 }
1519 }
1520
1521 return VK_SUCCESS;
1522 }
1523
1524
1525 static void
1526 tu_semaphores_remove_temp(struct tu_device *device,
1527 const VkSemaphore *sems,
1528 uint32_t sem_count)
1529 {
1530 for (uint32_t i = 0; i < sem_count; ++i) {
1531 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1532 tu_semaphore_remove_temp(device, sem);
1533 }
1534 }
1535
1536 VkResult
1537 tu_QueueSubmit(VkQueue _queue,
1538 uint32_t submitCount,
1539 const VkSubmitInfo *pSubmits,
1540 VkFence _fence)
1541 {
1542 TU_FROM_HANDLE(tu_queue, queue, _queue);
1543 VkResult result;
1544
1545 for (uint32_t i = 0; i < submitCount; ++i) {
1546 const VkSubmitInfo *submit = pSubmits + i;
1547 const bool last_submit = (i == submitCount - 1);
1548 struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
1549 uint32_t nr_in_syncobjs, nr_out_syncobjs;
1550 struct tu_bo_list bo_list;
1551 tu_bo_list_init(&bo_list);
1552
1553 result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
1554 pSubmits[i].waitSemaphoreCount,
1555 false, &in_syncobjs, &nr_in_syncobjs);
1556 if (result != VK_SUCCESS) {
1557 return tu_device_set_lost(queue->device,
1558 "failed to allocate space for semaphore submission\n");
1559 }
1560
1561 result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
1562 pSubmits[i].signalSemaphoreCount,
1563 false, &out_syncobjs, &nr_out_syncobjs);
1564 if (result != VK_SUCCESS) {
1565 free(in_syncobjs);
1566 return tu_device_set_lost(queue->device,
1567 "failed to allocate space for semaphore submission\n");
1568 }
1569
1570 uint32_t entry_count = 0;
1571 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1572 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1573 entry_count += cmdbuf->cs.entry_count;
1574 }
1575
1576 struct drm_msm_gem_submit_cmd cmds[entry_count];
1577 uint32_t entry_idx = 0;
1578 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1579 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1580 struct tu_cs *cs = &cmdbuf->cs;
1581 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1582 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1583 cmds[entry_idx].submit_idx =
1584 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1585 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1586 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1587 cmds[entry_idx].size = cs->entries[i].size;
1588 cmds[entry_idx].pad = 0;
1589 cmds[entry_idx].nr_relocs = 0;
1590 cmds[entry_idx].relocs = 0;
1591 }
1592
1593 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1594 }
1595
1596 uint32_t flags = MSM_PIPE_3D0;
1597 if (nr_in_syncobjs) {
1598 flags |= MSM_SUBMIT_SYNCOBJ_IN;
1599 }
1600 if (nr_out_syncobjs) {
1601 flags |= MSM_SUBMIT_SYNCOBJ_OUT;
1602 }
1603
1604 if (last_submit) {
1605 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1606 }
1607
1608 struct drm_msm_gem_submit req = {
1609 .flags = flags,
1610 .queueid = queue->msm_queue_id,
1611 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1612 .nr_bos = bo_list.count,
1613 .cmds = (uint64_t)(uintptr_t)cmds,
1614 .nr_cmds = entry_count,
1615 .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
1616 .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
1617 .nr_in_syncobjs = nr_in_syncobjs,
1618 .nr_out_syncobjs = nr_out_syncobjs,
1619 .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
1620 };
1621
1622 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1623 DRM_MSM_GEM_SUBMIT,
1624 &req, sizeof(req));
1625 if (ret) {
1626 free(in_syncobjs);
1627 free(out_syncobjs);
1628 return tu_device_set_lost(queue->device, "submit failed: %s\n",
1629 strerror(errno));
1630 }
1631
1632 tu_bo_list_destroy(&bo_list);
1633 free(in_syncobjs);
1634 free(out_syncobjs);
1635
1636 tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
1637 pSubmits[i].waitSemaphoreCount);
1638 if (last_submit) {
1639 /* no need to merge fences as queue execution is serialized */
1640 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1641 } else if (last_submit) {
1642 close(req.fence_fd);
1643 }
1644 }
1645
1646 if (_fence != VK_NULL_HANDLE) {
1647 TU_FROM_HANDLE(tu_fence, fence, _fence);
1648 tu_fence_copy(fence, &queue->submit_fence);
1649 }
1650
1651 return VK_SUCCESS;
1652 }
1653
1654 VkResult
1655 tu_QueueWaitIdle(VkQueue _queue)
1656 {
1657 TU_FROM_HANDLE(tu_queue, queue, _queue);
1658
1659 if (tu_device_is_lost(queue->device))
1660 return VK_ERROR_DEVICE_LOST;
1661
1662 tu_fence_wait_idle(&queue->submit_fence);
1663
1664 return VK_SUCCESS;
1665 }
1666
1667 VkResult
1668 tu_DeviceWaitIdle(VkDevice _device)
1669 {
1670 TU_FROM_HANDLE(tu_device, device, _device);
1671
1672 if (tu_device_is_lost(device))
1673 return VK_ERROR_DEVICE_LOST;
1674
1675 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1676 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1677 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1678 }
1679 }
1680 return VK_SUCCESS;
1681 }
1682
1683 VkResult
1684 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1685 uint32_t *pPropertyCount,
1686 VkExtensionProperties *pProperties)
1687 {
1688 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1689
1690 /* We spport no lyaers */
1691 if (pLayerName)
1692 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1693
1694 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1695 if (tu_instance_extensions_supported.extensions[i]) {
1696 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1697 }
1698 }
1699
1700 return vk_outarray_status(&out);
1701 }
1702
1703 VkResult
1704 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1705 const char *pLayerName,
1706 uint32_t *pPropertyCount,
1707 VkExtensionProperties *pProperties)
1708 {
1709 /* We spport no lyaers */
1710 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1711 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1712
1713 /* We spport no lyaers */
1714 if (pLayerName)
1715 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1716
1717 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1718 if (device->supported_extensions.extensions[i]) {
1719 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1720 }
1721 }
1722
1723 return vk_outarray_status(&out);
1724 }
1725
1726 PFN_vkVoidFunction
1727 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1728 {
1729 TU_FROM_HANDLE(tu_instance, instance, _instance);
1730
1731 return tu_lookup_entrypoint_checked(
1732 pName, instance ? instance->api_version : 0,
1733 instance ? &instance->enabled_extensions : NULL, NULL);
1734 }
1735
1736 /* The loader wants us to expose a second GetInstanceProcAddr function
1737 * to work around certain LD_PRELOAD issues seen in apps.
1738 */
1739 PUBLIC
1740 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1741 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1742
1743 PUBLIC
1744 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1745 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1746 {
1747 return tu_GetInstanceProcAddr(instance, pName);
1748 }
1749
1750 PFN_vkVoidFunction
1751 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1752 {
1753 TU_FROM_HANDLE(tu_device, device, _device);
1754
1755 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1756 &device->instance->enabled_extensions,
1757 &device->enabled_extensions);
1758 }
1759
1760 static VkResult
1761 tu_alloc_memory(struct tu_device *device,
1762 const VkMemoryAllocateInfo *pAllocateInfo,
1763 const VkAllocationCallbacks *pAllocator,
1764 VkDeviceMemory *pMem)
1765 {
1766 struct tu_device_memory *mem;
1767 VkResult result;
1768
1769 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1770
1771 if (pAllocateInfo->allocationSize == 0) {
1772 /* Apparently, this is allowed */
1773 *pMem = VK_NULL_HANDLE;
1774 return VK_SUCCESS;
1775 }
1776
1777 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1778 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1779 if (mem == NULL)
1780 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1781
1782 const VkImportMemoryFdInfoKHR *fd_info =
1783 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1784 if (fd_info && !fd_info->handleType)
1785 fd_info = NULL;
1786
1787 if (fd_info) {
1788 assert(fd_info->handleType ==
1789 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1790 fd_info->handleType ==
1791 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1792
1793 /*
1794 * TODO Importing the same fd twice gives us the same handle without
1795 * reference counting. We need to maintain a per-instance handle-to-bo
1796 * table and add reference count to tu_bo.
1797 */
1798 result = tu_bo_init_dmabuf(device, &mem->bo,
1799 pAllocateInfo->allocationSize, fd_info->fd);
1800 if (result == VK_SUCCESS) {
1801 /* take ownership and close the fd */
1802 close(fd_info->fd);
1803 }
1804 } else {
1805 result =
1806 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1807 }
1808
1809 if (result != VK_SUCCESS) {
1810 vk_free2(&device->alloc, pAllocator, mem);
1811 return result;
1812 }
1813
1814 mem->size = pAllocateInfo->allocationSize;
1815 mem->type_index = pAllocateInfo->memoryTypeIndex;
1816
1817 mem->map = NULL;
1818 mem->user_ptr = NULL;
1819
1820 *pMem = tu_device_memory_to_handle(mem);
1821
1822 return VK_SUCCESS;
1823 }
1824
1825 VkResult
1826 tu_AllocateMemory(VkDevice _device,
1827 const VkMemoryAllocateInfo *pAllocateInfo,
1828 const VkAllocationCallbacks *pAllocator,
1829 VkDeviceMemory *pMem)
1830 {
1831 TU_FROM_HANDLE(tu_device, device, _device);
1832 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1833 }
1834
1835 void
1836 tu_FreeMemory(VkDevice _device,
1837 VkDeviceMemory _mem,
1838 const VkAllocationCallbacks *pAllocator)
1839 {
1840 TU_FROM_HANDLE(tu_device, device, _device);
1841 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1842
1843 if (mem == NULL)
1844 return;
1845
1846 tu_bo_finish(device, &mem->bo);
1847 vk_free2(&device->alloc, pAllocator, mem);
1848 }
1849
1850 VkResult
1851 tu_MapMemory(VkDevice _device,
1852 VkDeviceMemory _memory,
1853 VkDeviceSize offset,
1854 VkDeviceSize size,
1855 VkMemoryMapFlags flags,
1856 void **ppData)
1857 {
1858 TU_FROM_HANDLE(tu_device, device, _device);
1859 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1860 VkResult result;
1861
1862 if (mem == NULL) {
1863 *ppData = NULL;
1864 return VK_SUCCESS;
1865 }
1866
1867 if (mem->user_ptr) {
1868 *ppData = mem->user_ptr;
1869 } else if (!mem->map) {
1870 result = tu_bo_map(device, &mem->bo);
1871 if (result != VK_SUCCESS)
1872 return result;
1873 *ppData = mem->map = mem->bo.map;
1874 } else
1875 *ppData = mem->map;
1876
1877 if (*ppData) {
1878 *ppData += offset;
1879 return VK_SUCCESS;
1880 }
1881
1882 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1883 }
1884
1885 void
1886 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1887 {
1888 /* I do not see any unmapping done by the freedreno Gallium driver. */
1889 }
1890
1891 VkResult
1892 tu_FlushMappedMemoryRanges(VkDevice _device,
1893 uint32_t memoryRangeCount,
1894 const VkMappedMemoryRange *pMemoryRanges)
1895 {
1896 return VK_SUCCESS;
1897 }
1898
1899 VkResult
1900 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1901 uint32_t memoryRangeCount,
1902 const VkMappedMemoryRange *pMemoryRanges)
1903 {
1904 return VK_SUCCESS;
1905 }
1906
1907 void
1908 tu_GetBufferMemoryRequirements(VkDevice _device,
1909 VkBuffer _buffer,
1910 VkMemoryRequirements *pMemoryRequirements)
1911 {
1912 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1913
1914 pMemoryRequirements->memoryTypeBits = 1;
1915 pMemoryRequirements->alignment = 64;
1916 pMemoryRequirements->size =
1917 align64(buffer->size, pMemoryRequirements->alignment);
1918 }
1919
1920 void
1921 tu_GetBufferMemoryRequirements2(
1922 VkDevice device,
1923 const VkBufferMemoryRequirementsInfo2 *pInfo,
1924 VkMemoryRequirements2 *pMemoryRequirements)
1925 {
1926 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1927 &pMemoryRequirements->memoryRequirements);
1928 }
1929
1930 void
1931 tu_GetImageMemoryRequirements(VkDevice _device,
1932 VkImage _image,
1933 VkMemoryRequirements *pMemoryRequirements)
1934 {
1935 TU_FROM_HANDLE(tu_image, image, _image);
1936
1937 pMemoryRequirements->memoryTypeBits = 1;
1938 pMemoryRequirements->size = image->total_size;
1939 pMemoryRequirements->alignment = image->layout[0].base_align;
1940 }
1941
1942 void
1943 tu_GetImageMemoryRequirements2(VkDevice device,
1944 const VkImageMemoryRequirementsInfo2 *pInfo,
1945 VkMemoryRequirements2 *pMemoryRequirements)
1946 {
1947 tu_GetImageMemoryRequirements(device, pInfo->image,
1948 &pMemoryRequirements->memoryRequirements);
1949 }
1950
1951 void
1952 tu_GetImageSparseMemoryRequirements(
1953 VkDevice device,
1954 VkImage image,
1955 uint32_t *pSparseMemoryRequirementCount,
1956 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1957 {
1958 tu_stub();
1959 }
1960
1961 void
1962 tu_GetImageSparseMemoryRequirements2(
1963 VkDevice device,
1964 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1965 uint32_t *pSparseMemoryRequirementCount,
1966 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1967 {
1968 tu_stub();
1969 }
1970
1971 void
1972 tu_GetDeviceMemoryCommitment(VkDevice device,
1973 VkDeviceMemory memory,
1974 VkDeviceSize *pCommittedMemoryInBytes)
1975 {
1976 *pCommittedMemoryInBytes = 0;
1977 }
1978
1979 VkResult
1980 tu_BindBufferMemory2(VkDevice device,
1981 uint32_t bindInfoCount,
1982 const VkBindBufferMemoryInfo *pBindInfos)
1983 {
1984 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1985 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1986 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1987
1988 if (mem) {
1989 buffer->bo = &mem->bo;
1990 buffer->bo_offset = pBindInfos[i].memoryOffset;
1991 } else {
1992 buffer->bo = NULL;
1993 }
1994 }
1995 return VK_SUCCESS;
1996 }
1997
1998 VkResult
1999 tu_BindBufferMemory(VkDevice device,
2000 VkBuffer buffer,
2001 VkDeviceMemory memory,
2002 VkDeviceSize memoryOffset)
2003 {
2004 const VkBindBufferMemoryInfo info = {
2005 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2006 .buffer = buffer,
2007 .memory = memory,
2008 .memoryOffset = memoryOffset
2009 };
2010
2011 return tu_BindBufferMemory2(device, 1, &info);
2012 }
2013
2014 VkResult
2015 tu_BindImageMemory2(VkDevice device,
2016 uint32_t bindInfoCount,
2017 const VkBindImageMemoryInfo *pBindInfos)
2018 {
2019 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2020 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
2021 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2022
2023 if (mem) {
2024 image->bo = &mem->bo;
2025 image->bo_offset = pBindInfos[i].memoryOffset;
2026 } else {
2027 image->bo = NULL;
2028 image->bo_offset = 0;
2029 }
2030 }
2031
2032 return VK_SUCCESS;
2033 }
2034
2035 VkResult
2036 tu_BindImageMemory(VkDevice device,
2037 VkImage image,
2038 VkDeviceMemory memory,
2039 VkDeviceSize memoryOffset)
2040 {
2041 const VkBindImageMemoryInfo info = {
2042 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2043 .image = image,
2044 .memory = memory,
2045 .memoryOffset = memoryOffset
2046 };
2047
2048 return tu_BindImageMemory2(device, 1, &info);
2049 }
2050
2051 VkResult
2052 tu_QueueBindSparse(VkQueue _queue,
2053 uint32_t bindInfoCount,
2054 const VkBindSparseInfo *pBindInfo,
2055 VkFence _fence)
2056 {
2057 return VK_SUCCESS;
2058 }
2059
2060 // Queue semaphore functions
2061
2062
2063 static void
2064 tu_semaphore_part_destroy(struct tu_device *device,
2065 struct tu_semaphore_part *part)
2066 {
2067 switch(part->kind) {
2068 case TU_SEMAPHORE_NONE:
2069 break;
2070 case TU_SEMAPHORE_SYNCOBJ:
2071 drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
2072 break;
2073 }
2074 part->kind = TU_SEMAPHORE_NONE;
2075 }
2076
2077 static void
2078 tu_semaphore_remove_temp(struct tu_device *device,
2079 struct tu_semaphore *sem)
2080 {
2081 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2082 tu_semaphore_part_destroy(device, &sem->temporary);
2083 }
2084 }
2085
2086 VkResult
2087 tu_CreateSemaphore(VkDevice _device,
2088 const VkSemaphoreCreateInfo *pCreateInfo,
2089 const VkAllocationCallbacks *pAllocator,
2090 VkSemaphore *pSemaphore)
2091 {
2092 TU_FROM_HANDLE(tu_device, device, _device);
2093
2094 struct tu_semaphore *sem =
2095 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
2096 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2097 if (!sem)
2098 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2099
2100 const VkExportSemaphoreCreateInfo *export =
2101 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
2102 VkExternalSemaphoreHandleTypeFlags handleTypes =
2103 export ? export->handleTypes : 0;
2104
2105 sem->permanent.kind = TU_SEMAPHORE_NONE;
2106 sem->temporary.kind = TU_SEMAPHORE_NONE;
2107
2108 if (handleTypes) {
2109 if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
2110 vk_free2(&device->alloc, pAllocator, sem);
2111 return VK_ERROR_OUT_OF_HOST_MEMORY;
2112 }
2113 sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
2114 }
2115 *pSemaphore = tu_semaphore_to_handle(sem);
2116 return VK_SUCCESS;
2117 }
2118
2119 void
2120 tu_DestroySemaphore(VkDevice _device,
2121 VkSemaphore _semaphore,
2122 const VkAllocationCallbacks *pAllocator)
2123 {
2124 TU_FROM_HANDLE(tu_device, device, _device);
2125 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
2126 if (!_semaphore)
2127 return;
2128
2129 tu_semaphore_part_destroy(device, &sem->permanent);
2130 tu_semaphore_part_destroy(device, &sem->temporary);
2131
2132 vk_free2(&device->alloc, pAllocator, sem);
2133 }
2134
2135 VkResult
2136 tu_CreateEvent(VkDevice _device,
2137 const VkEventCreateInfo *pCreateInfo,
2138 const VkAllocationCallbacks *pAllocator,
2139 VkEvent *pEvent)
2140 {
2141 TU_FROM_HANDLE(tu_device, device, _device);
2142 struct tu_event *event =
2143 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
2144 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2145
2146 if (!event)
2147 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2148
2149 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2150 if (result != VK_SUCCESS)
2151 goto fail_alloc;
2152
2153 result = tu_bo_map(device, &event->bo);
2154 if (result != VK_SUCCESS)
2155 goto fail_map;
2156
2157 *pEvent = tu_event_to_handle(event);
2158
2159 return VK_SUCCESS;
2160
2161 fail_map:
2162 tu_bo_finish(device, &event->bo);
2163 fail_alloc:
2164 vk_free2(&device->alloc, pAllocator, event);
2165 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2166 }
2167
2168 void
2169 tu_DestroyEvent(VkDevice _device,
2170 VkEvent _event,
2171 const VkAllocationCallbacks *pAllocator)
2172 {
2173 TU_FROM_HANDLE(tu_device, device, _device);
2174 TU_FROM_HANDLE(tu_event, event, _event);
2175
2176 if (!event)
2177 return;
2178
2179 tu_bo_finish(device, &event->bo);
2180 vk_free2(&device->alloc, pAllocator, event);
2181 }
2182
2183 VkResult
2184 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2185 {
2186 TU_FROM_HANDLE(tu_event, event, _event);
2187
2188 if (*(uint64_t*) event->bo.map == 1)
2189 return VK_EVENT_SET;
2190 return VK_EVENT_RESET;
2191 }
2192
2193 VkResult
2194 tu_SetEvent(VkDevice _device, VkEvent _event)
2195 {
2196 TU_FROM_HANDLE(tu_event, event, _event);
2197 *(uint64_t*) event->bo.map = 1;
2198
2199 return VK_SUCCESS;
2200 }
2201
2202 VkResult
2203 tu_ResetEvent(VkDevice _device, VkEvent _event)
2204 {
2205 TU_FROM_HANDLE(tu_event, event, _event);
2206 *(uint64_t*) event->bo.map = 0;
2207
2208 return VK_SUCCESS;
2209 }
2210
2211 VkResult
2212 tu_CreateBuffer(VkDevice _device,
2213 const VkBufferCreateInfo *pCreateInfo,
2214 const VkAllocationCallbacks *pAllocator,
2215 VkBuffer *pBuffer)
2216 {
2217 TU_FROM_HANDLE(tu_device, device, _device);
2218 struct tu_buffer *buffer;
2219
2220 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2221
2222 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2223 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2224 if (buffer == NULL)
2225 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2226
2227 buffer->size = pCreateInfo->size;
2228 buffer->usage = pCreateInfo->usage;
2229 buffer->flags = pCreateInfo->flags;
2230
2231 *pBuffer = tu_buffer_to_handle(buffer);
2232
2233 return VK_SUCCESS;
2234 }
2235
2236 void
2237 tu_DestroyBuffer(VkDevice _device,
2238 VkBuffer _buffer,
2239 const VkAllocationCallbacks *pAllocator)
2240 {
2241 TU_FROM_HANDLE(tu_device, device, _device);
2242 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2243
2244 if (!buffer)
2245 return;
2246
2247 vk_free2(&device->alloc, pAllocator, buffer);
2248 }
2249
2250 VkResult
2251 tu_CreateFramebuffer(VkDevice _device,
2252 const VkFramebufferCreateInfo *pCreateInfo,
2253 const VkAllocationCallbacks *pAllocator,
2254 VkFramebuffer *pFramebuffer)
2255 {
2256 TU_FROM_HANDLE(tu_device, device, _device);
2257 TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
2258 struct tu_framebuffer *framebuffer;
2259
2260 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2261
2262 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2263 pCreateInfo->attachmentCount;
2264 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2265 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2266 if (framebuffer == NULL)
2267 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2268
2269 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2270 framebuffer->width = pCreateInfo->width;
2271 framebuffer->height = pCreateInfo->height;
2272 framebuffer->layers = pCreateInfo->layers;
2273 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2274 VkImageView _iview = pCreateInfo->pAttachments[i];
2275 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2276 framebuffer->attachments[i].attachment = iview;
2277 }
2278
2279 tu_framebuffer_tiling_config(framebuffer, device, pass);
2280
2281 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2282 return VK_SUCCESS;
2283 }
2284
2285 void
2286 tu_DestroyFramebuffer(VkDevice _device,
2287 VkFramebuffer _fb,
2288 const VkAllocationCallbacks *pAllocator)
2289 {
2290 TU_FROM_HANDLE(tu_device, device, _device);
2291 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2292
2293 if (!fb)
2294 return;
2295 vk_free2(&device->alloc, pAllocator, fb);
2296 }
2297
2298 static void
2299 tu_init_sampler(struct tu_device *device,
2300 struct tu_sampler *sampler,
2301 const VkSamplerCreateInfo *pCreateInfo)
2302 {
2303 const struct VkSamplerReductionModeCreateInfo *reduction =
2304 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2305 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2306 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2307
2308 unsigned aniso = pCreateInfo->anisotropyEnable ?
2309 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2310 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2311 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2312 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2313
2314 sampler->descriptor[0] =
2315 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2316 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2317 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2318 A6XX_TEX_SAMP_0_ANISO(aniso) |
2319 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2320 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2321 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2322 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2323 sampler->descriptor[1] =
2324 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2325 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2326 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2327 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2328 COND(pCreateInfo->compareEnable,
2329 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2330 /* This is an offset into the border_color BO, which we fill with all the
2331 * possible Vulkan border colors in the correct order, so we can just use
2332 * the Vulkan enum with no translation necessary.
2333 */
2334 sampler->descriptor[2] =
2335 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2336 sizeof(struct bcolor_entry));
2337 sampler->descriptor[3] = 0;
2338
2339 if (reduction) {
2340 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2341 tu6_reduction_mode(reduction->reductionMode));
2342 }
2343
2344 sampler->ycbcr_sampler = ycbcr_conversion ?
2345 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2346
2347 if (sampler->ycbcr_sampler &&
2348 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2349 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2350 }
2351
2352 /* TODO:
2353 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2354 */
2355 }
2356
2357 VkResult
2358 tu_CreateSampler(VkDevice _device,
2359 const VkSamplerCreateInfo *pCreateInfo,
2360 const VkAllocationCallbacks *pAllocator,
2361 VkSampler *pSampler)
2362 {
2363 TU_FROM_HANDLE(tu_device, device, _device);
2364 struct tu_sampler *sampler;
2365
2366 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2367
2368 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2369 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2370 if (!sampler)
2371 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2372
2373 tu_init_sampler(device, sampler, pCreateInfo);
2374 *pSampler = tu_sampler_to_handle(sampler);
2375
2376 return VK_SUCCESS;
2377 }
2378
2379 void
2380 tu_DestroySampler(VkDevice _device,
2381 VkSampler _sampler,
2382 const VkAllocationCallbacks *pAllocator)
2383 {
2384 TU_FROM_HANDLE(tu_device, device, _device);
2385 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2386
2387 if (!sampler)
2388 return;
2389 vk_free2(&device->alloc, pAllocator, sampler);
2390 }
2391
2392 /* vk_icd.h does not declare this function, so we declare it here to
2393 * suppress Wmissing-prototypes.
2394 */
2395 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2396 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2397
2398 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2399 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2400 {
2401 /* For the full details on loader interface versioning, see
2402 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2403 * What follows is a condensed summary, to help you navigate the large and
2404 * confusing official doc.
2405 *
2406 * - Loader interface v0 is incompatible with later versions. We don't
2407 * support it.
2408 *
2409 * - In loader interface v1:
2410 * - The first ICD entrypoint called by the loader is
2411 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2412 * entrypoint.
2413 * - The ICD must statically expose no other Vulkan symbol unless it
2414 * is linked with -Bsymbolic.
2415 * - Each dispatchable Vulkan handle created by the ICD must be
2416 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2417 * ICD must initialize VK_LOADER_DATA.loadMagic to
2418 * ICD_LOADER_MAGIC.
2419 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2420 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2421 * such loader-managed surfaces.
2422 *
2423 * - Loader interface v2 differs from v1 in:
2424 * - The first ICD entrypoint called by the loader is
2425 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2426 * statically expose this entrypoint.
2427 *
2428 * - Loader interface v3 differs from v2 in:
2429 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2430 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2431 * because the loader no longer does so.
2432 */
2433 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2434 return VK_SUCCESS;
2435 }
2436
2437 VkResult
2438 tu_GetMemoryFdKHR(VkDevice _device,
2439 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2440 int *pFd)
2441 {
2442 TU_FROM_HANDLE(tu_device, device, _device);
2443 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2444
2445 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2446
2447 /* At the moment, we support only the below handle types. */
2448 assert(pGetFdInfo->handleType ==
2449 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2450 pGetFdInfo->handleType ==
2451 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2452
2453 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2454 if (prime_fd < 0)
2455 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2456
2457 *pFd = prime_fd;
2458 return VK_SUCCESS;
2459 }
2460
2461 VkResult
2462 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2463 VkExternalMemoryHandleTypeFlagBits handleType,
2464 int fd,
2465 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2466 {
2467 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2468 pMemoryFdProperties->memoryTypeBits = 1;
2469 return VK_SUCCESS;
2470 }
2471
2472 VkResult
2473 tu_ImportFenceFdKHR(VkDevice _device,
2474 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
2475 {
2476 tu_stub();
2477
2478 return VK_SUCCESS;
2479 }
2480
2481 VkResult
2482 tu_GetFenceFdKHR(VkDevice _device,
2483 const VkFenceGetFdInfoKHR *pGetFdInfo,
2484 int *pFd)
2485 {
2486 tu_stub();
2487
2488 return VK_SUCCESS;
2489 }
2490
2491 VkResult
2492 tu_ImportSemaphoreFdKHR(VkDevice _device,
2493 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
2494 {
2495 TU_FROM_HANDLE(tu_device, device, _device);
2496 TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
2497 int ret;
2498 struct tu_semaphore_part *dst = NULL;
2499
2500 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2501 dst = &sem->temporary;
2502 } else {
2503 dst = &sem->permanent;
2504 }
2505
2506 uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
2507
2508 switch(pImportSemaphoreFdInfo->handleType) {
2509 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
2510 uint32_t old_syncobj = syncobj;
2511 ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
2512 if (ret == 0) {
2513 close(pImportSemaphoreFdInfo->fd);
2514 if (old_syncobj)
2515 drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
2516 }
2517 break;
2518 }
2519 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
2520 if (!syncobj) {
2521 ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
2522 if (ret)
2523 break;
2524 }
2525 if (pImportSemaphoreFdInfo->fd == -1) {
2526 ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
2527 } else {
2528 ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
2529 }
2530 if (!ret)
2531 close(pImportSemaphoreFdInfo->fd);
2532 break;
2533 }
2534 default:
2535 unreachable("Unhandled semaphore handle type");
2536 }
2537
2538 if (ret) {
2539 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2540 }
2541 dst->syncobj = syncobj;
2542 dst->kind = TU_SEMAPHORE_SYNCOBJ;
2543
2544 return VK_SUCCESS;
2545 }
2546
2547 VkResult
2548 tu_GetSemaphoreFdKHR(VkDevice _device,
2549 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
2550 int *pFd)
2551 {
2552 TU_FROM_HANDLE(tu_device, device, _device);
2553 TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
2554 int ret;
2555 uint32_t syncobj_handle;
2556
2557 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2558 assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
2559 syncobj_handle = sem->temporary.syncobj;
2560 } else {
2561 assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
2562 syncobj_handle = sem->permanent.syncobj;
2563 }
2564
2565 switch(pGetFdInfo->handleType) {
2566 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
2567 ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
2568 break;
2569 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2570 ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
2571 if (!ret) {
2572 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2573 tu_semaphore_part_destroy(device, &sem->temporary);
2574 } else {
2575 drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
2576 }
2577 }
2578 break;
2579 default:
2580 unreachable("Unhandled semaphore handle type");
2581 }
2582
2583 if (ret)
2584 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
2585 return VK_SUCCESS;
2586 }
2587
2588
2589 static bool tu_has_syncobj(struct tu_physical_device *pdev)
2590 {
2591 uint64_t value;
2592 if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
2593 return false;
2594 return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
2595 }
2596
2597 void
2598 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2599 VkPhysicalDevice physicalDevice,
2600 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2601 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2602 {
2603 TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
2604
2605 if (tu_has_syncobj(pdev) &&
2606 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
2607 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
2608 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2609 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2610 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
2611 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
2612 } else {
2613 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2614 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2615 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2616 }
2617 }
2618
2619 void
2620 tu_GetPhysicalDeviceExternalFenceProperties(
2621 VkPhysicalDevice physicalDevice,
2622 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2623 VkExternalFenceProperties *pExternalFenceProperties)
2624 {
2625 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2626 pExternalFenceProperties->compatibleHandleTypes = 0;
2627 pExternalFenceProperties->externalFenceFeatures = 0;
2628 }
2629
2630 VkResult
2631 tu_CreateDebugReportCallbackEXT(
2632 VkInstance _instance,
2633 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2634 const VkAllocationCallbacks *pAllocator,
2635 VkDebugReportCallbackEXT *pCallback)
2636 {
2637 TU_FROM_HANDLE(tu_instance, instance, _instance);
2638 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2639 pCreateInfo, pAllocator,
2640 &instance->alloc, pCallback);
2641 }
2642
2643 void
2644 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2645 VkDebugReportCallbackEXT _callback,
2646 const VkAllocationCallbacks *pAllocator)
2647 {
2648 TU_FROM_HANDLE(tu_instance, instance, _instance);
2649 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2650 _callback, pAllocator, &instance->alloc);
2651 }
2652
2653 void
2654 tu_DebugReportMessageEXT(VkInstance _instance,
2655 VkDebugReportFlagsEXT flags,
2656 VkDebugReportObjectTypeEXT objectType,
2657 uint64_t object,
2658 size_t location,
2659 int32_t messageCode,
2660 const char *pLayerPrefix,
2661 const char *pMessage)
2662 {
2663 TU_FROM_HANDLE(tu_instance, instance, _instance);
2664 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2665 object, location, messageCode, pLayerPrefix, pMessage);
2666 }
2667
2668 void
2669 tu_GetDeviceGroupPeerMemoryFeatures(
2670 VkDevice device,
2671 uint32_t heapIndex,
2672 uint32_t localDeviceIndex,
2673 uint32_t remoteDeviceIndex,
2674 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2675 {
2676 assert(localDeviceIndex == remoteDeviceIndex);
2677
2678 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2679 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2680 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2681 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2682 }
2683
2684 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2685 VkPhysicalDevice physicalDevice,
2686 VkSampleCountFlagBits samples,
2687 VkMultisamplePropertiesEXT* pMultisampleProperties)
2688 {
2689 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2690
2691 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2692 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2693 else
2694 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2695 }