turnip: semaphore support.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/u_atomic.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 /* for fd_get_driver/device_uuid() */
49 #include "freedreno/common/freedreno_uuid.h"
50
51 static void
52 tu_semaphore_remove_temp(struct tu_device *device,
53 struct tu_semaphore *sem);
54
55 static int
56 tu_device_get_cache_uuid(uint16_t family, void *uuid)
57 {
58 uint32_t mesa_timestamp;
59 uint16_t f = family;
60 memset(uuid, 0, VK_UUID_SIZE);
61 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
62 &mesa_timestamp))
63 return -1;
64
65 memcpy(uuid, &mesa_timestamp, 4);
66 memcpy((char *) uuid + 4, &f, 2);
67 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
68 return 0;
69 }
70
71 static VkResult
72 tu_bo_init(struct tu_device *dev,
73 struct tu_bo *bo,
74 uint32_t gem_handle,
75 uint64_t size)
76 {
77 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
78 if (!iova)
79 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
80
81 *bo = (struct tu_bo) {
82 .gem_handle = gem_handle,
83 .size = size,
84 .iova = iova,
85 };
86
87 return VK_SUCCESS;
88 }
89
90 VkResult
91 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
92 {
93 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
94 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
95 */
96 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
97 if (!gem_handle)
98 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
99
100 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
101 if (result != VK_SUCCESS) {
102 tu_gem_close(dev, gem_handle);
103 return vk_error(dev->instance, result);
104 }
105
106 return VK_SUCCESS;
107 }
108
109 VkResult
110 tu_bo_init_dmabuf(struct tu_device *dev,
111 struct tu_bo *bo,
112 uint64_t size,
113 int fd)
114 {
115 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
116 if (!gem_handle)
117 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
118
119 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
120 if (result != VK_SUCCESS) {
121 tu_gem_close(dev, gem_handle);
122 return vk_error(dev->instance, result);
123 }
124
125 return VK_SUCCESS;
126 }
127
128 int
129 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
130 {
131 return tu_gem_export_dmabuf(dev, bo->gem_handle);
132 }
133
134 VkResult
135 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
136 {
137 if (bo->map)
138 return VK_SUCCESS;
139
140 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
141 if (!offset)
142 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
143
144 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
145 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
146 dev->physical_device->local_fd, offset);
147 if (map == MAP_FAILED)
148 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
149
150 bo->map = map;
151 return VK_SUCCESS;
152 }
153
154 void
155 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
156 {
157 assert(bo->gem_handle);
158
159 if (bo->map)
160 munmap(bo->map, bo->size);
161
162 tu_gem_close(dev, bo->gem_handle);
163 }
164
165 static VkResult
166 tu_physical_device_init(struct tu_physical_device *device,
167 struct tu_instance *instance,
168 drmDevicePtr drm_device)
169 {
170 const char *path = drm_device->nodes[DRM_NODE_RENDER];
171 VkResult result = VK_SUCCESS;
172 drmVersionPtr version;
173 int fd;
174 int master_fd = -1;
175
176 fd = open(path, O_RDWR | O_CLOEXEC);
177 if (fd < 0) {
178 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
179 "failed to open device %s", path);
180 }
181
182 /* Version 1.3 added MSM_INFO_IOVA. */
183 const int min_version_major = 1;
184 const int min_version_minor = 3;
185
186 version = drmGetVersion(fd);
187 if (!version) {
188 close(fd);
189 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
190 "failed to query kernel driver version for device %s",
191 path);
192 }
193
194 if (strcmp(version->name, "msm")) {
195 drmFreeVersion(version);
196 close(fd);
197 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
198 "device %s does not use the msm kernel driver", path);
199 }
200
201 if (version->version_major != min_version_major ||
202 version->version_minor < min_version_minor) {
203 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "kernel driver for device %s has version %d.%d, "
205 "but Vulkan requires version >= %d.%d",
206 path, version->version_major, version->version_minor,
207 min_version_major, min_version_minor);
208 drmFreeVersion(version);
209 close(fd);
210 return result;
211 }
212
213 device->msm_major_version = version->version_major;
214 device->msm_minor_version = version->version_minor;
215
216 drmFreeVersion(version);
217
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Found compatible device '%s'.", path);
220
221 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
222 device->instance = instance;
223 assert(strlen(path) < ARRAY_SIZE(device->path));
224 strncpy(device->path, path, ARRAY_SIZE(device->path));
225
226 if (instance->enabled_extensions.KHR_display) {
227 master_fd =
228 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
229 if (master_fd >= 0) {
230 /* TODO: free master_fd is accel is not working? */
231 }
232 }
233
234 device->master_fd = master_fd;
235 device->local_fd = fd;
236
237 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
238 if (instance->debug_flags & TU_DEBUG_STARTUP)
239 tu_logi("Could not query the GPU ID");
240 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
241 "could not get GPU ID");
242 goto fail;
243 }
244
245 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
246 if (instance->debug_flags & TU_DEBUG_STARTUP)
247 tu_logi("Could not query the GMEM size");
248 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
249 "could not get GMEM size");
250 goto fail;
251 }
252
253 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
254 if (instance->debug_flags & TU_DEBUG_STARTUP)
255 tu_logi("Could not query the GMEM size");
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "could not get GMEM size");
258 goto fail;
259 }
260
261 memset(device->name, 0, sizeof(device->name));
262 sprintf(device->name, "FD%d", device->gpu_id);
263
264 switch (device->gpu_id) {
265 case 618:
266 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
267 device->ccu_offset_bypass = 0x10000;
268 device->tile_align_w = 64;
269 device->magic.PC_UNKNOWN_9805 = 0x0;
270 device->magic.SP_UNKNOWN_A0F8 = 0x0;
271 break;
272 case 630:
273 case 640:
274 device->ccu_offset_gmem = 0xf8000;
275 device->ccu_offset_bypass = 0x20000;
276 device->tile_align_w = 64;
277 device->magic.PC_UNKNOWN_9805 = 0x1;
278 device->magic.SP_UNKNOWN_A0F8 = 0x1;
279 break;
280 case 650:
281 device->ccu_offset_gmem = 0x114000;
282 device->ccu_offset_bypass = 0x30000;
283 device->tile_align_w = 96;
284 device->magic.PC_UNKNOWN_9805 = 0x2;
285 device->magic.SP_UNKNOWN_A0F8 = 0x2;
286 break;
287 default:
288 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
289 "device %s is unsupported", device->name);
290 goto fail;
291 }
292 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
293 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
294 "cannot generate UUID");
295 goto fail;
296 }
297
298 /* The gpu id is already embedded in the uuid so we just pass "tu"
299 * when creating the cache.
300 */
301 char buf[VK_UUID_SIZE * 2 + 1];
302 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
303 device->disk_cache = disk_cache_create(device->name, buf, 0);
304
305 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
306 "testing use only.\n");
307
308 fd_get_driver_uuid(device->driver_uuid);
309 fd_get_device_uuid(device->device_uuid, device->gpu_id);
310
311 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
312
313 if (result != VK_SUCCESS) {
314 vk_error(instance, result);
315 goto fail;
316 }
317
318 result = tu_wsi_init(device);
319 if (result != VK_SUCCESS) {
320 vk_error(instance, result);
321 goto fail;
322 }
323
324 return VK_SUCCESS;
325
326 fail:
327 close(fd);
328 if (master_fd != -1)
329 close(master_fd);
330 return result;
331 }
332
333 static void
334 tu_physical_device_finish(struct tu_physical_device *device)
335 {
336 tu_wsi_finish(device);
337
338 disk_cache_destroy(device->disk_cache);
339 close(device->local_fd);
340 if (device->master_fd != -1)
341 close(device->master_fd);
342 }
343
344 static VKAPI_ATTR void *
345 default_alloc_func(void *pUserData,
346 size_t size,
347 size_t align,
348 VkSystemAllocationScope allocationScope)
349 {
350 return malloc(size);
351 }
352
353 static VKAPI_ATTR void *
354 default_realloc_func(void *pUserData,
355 void *pOriginal,
356 size_t size,
357 size_t align,
358 VkSystemAllocationScope allocationScope)
359 {
360 return realloc(pOriginal, size);
361 }
362
363 static VKAPI_ATTR void
364 default_free_func(void *pUserData, void *pMemory)
365 {
366 free(pMemory);
367 }
368
369 static const VkAllocationCallbacks default_alloc = {
370 .pUserData = NULL,
371 .pfnAllocation = default_alloc_func,
372 .pfnReallocation = default_realloc_func,
373 .pfnFree = default_free_func,
374 };
375
376 static const struct debug_control tu_debug_options[] = {
377 { "startup", TU_DEBUG_STARTUP },
378 { "nir", TU_DEBUG_NIR },
379 { "ir3", TU_DEBUG_IR3 },
380 { "nobin", TU_DEBUG_NOBIN },
381 { "sysmem", TU_DEBUG_SYSMEM },
382 { "forcebin", TU_DEBUG_FORCEBIN },
383 { "noubwc", TU_DEBUG_NOUBWC },
384 { NULL, 0 }
385 };
386
387 const char *
388 tu_get_debug_option_name(int id)
389 {
390 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
391 return tu_debug_options[id].string;
392 }
393
394 static int
395 tu_get_instance_extension_index(const char *name)
396 {
397 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
398 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
399 return i;
400 }
401 return -1;
402 }
403
404 VkResult
405 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
406 const VkAllocationCallbacks *pAllocator,
407 VkInstance *pInstance)
408 {
409 struct tu_instance *instance;
410 VkResult result;
411
412 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
413
414 uint32_t client_version;
415 if (pCreateInfo->pApplicationInfo &&
416 pCreateInfo->pApplicationInfo->apiVersion != 0) {
417 client_version = pCreateInfo->pApplicationInfo->apiVersion;
418 } else {
419 tu_EnumerateInstanceVersion(&client_version);
420 }
421
422 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
423 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
424 if (!instance)
425 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
426
427 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
428
429 if (pAllocator)
430 instance->alloc = *pAllocator;
431 else
432 instance->alloc = default_alloc;
433
434 instance->api_version = client_version;
435 instance->physical_device_count = -1;
436
437 instance->debug_flags =
438 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
439
440 if (instance->debug_flags & TU_DEBUG_STARTUP)
441 tu_logi("Created an instance");
442
443 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
444 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
445 int index = tu_get_instance_extension_index(ext_name);
446
447 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
448 vk_free2(&default_alloc, pAllocator, instance);
449 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
450 }
451
452 instance->enabled_extensions.extensions[index] = true;
453 }
454
455 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
456 if (result != VK_SUCCESS) {
457 vk_free2(&default_alloc, pAllocator, instance);
458 return vk_error(instance, result);
459 }
460
461 glsl_type_singleton_init_or_ref();
462
463 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
464
465 *pInstance = tu_instance_to_handle(instance);
466
467 return VK_SUCCESS;
468 }
469
470 void
471 tu_DestroyInstance(VkInstance _instance,
472 const VkAllocationCallbacks *pAllocator)
473 {
474 TU_FROM_HANDLE(tu_instance, instance, _instance);
475
476 if (!instance)
477 return;
478
479 for (int i = 0; i < instance->physical_device_count; ++i) {
480 tu_physical_device_finish(instance->physical_devices + i);
481 }
482
483 VG(VALGRIND_DESTROY_MEMPOOL(instance));
484
485 glsl_type_singleton_decref();
486
487 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
488
489 vk_free(&instance->alloc, instance);
490 }
491
492 static VkResult
493 tu_enumerate_devices(struct tu_instance *instance)
494 {
495 /* TODO: Check for more devices ? */
496 drmDevicePtr devices[8];
497 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
498 int max_devices;
499
500 instance->physical_device_count = 0;
501
502 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
503
504 if (instance->debug_flags & TU_DEBUG_STARTUP)
505 tu_logi("Found %d drm nodes", max_devices);
506
507 if (max_devices < 1)
508 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
509
510 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
511 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
512 devices[i]->bustype == DRM_BUS_PLATFORM) {
513
514 result = tu_physical_device_init(
515 instance->physical_devices + instance->physical_device_count,
516 instance, devices[i]);
517 if (result == VK_SUCCESS)
518 ++instance->physical_device_count;
519 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
520 break;
521 }
522 }
523 drmFreeDevices(devices, max_devices);
524
525 return result;
526 }
527
528 VkResult
529 tu_EnumeratePhysicalDevices(VkInstance _instance,
530 uint32_t *pPhysicalDeviceCount,
531 VkPhysicalDevice *pPhysicalDevices)
532 {
533 TU_FROM_HANDLE(tu_instance, instance, _instance);
534 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
535
536 VkResult result;
537
538 if (instance->physical_device_count < 0) {
539 result = tu_enumerate_devices(instance);
540 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
541 return result;
542 }
543
544 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
545 vk_outarray_append(&out, p)
546 {
547 *p = tu_physical_device_to_handle(instance->physical_devices + i);
548 }
549 }
550
551 return vk_outarray_status(&out);
552 }
553
554 VkResult
555 tu_EnumeratePhysicalDeviceGroups(
556 VkInstance _instance,
557 uint32_t *pPhysicalDeviceGroupCount,
558 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
559 {
560 TU_FROM_HANDLE(tu_instance, instance, _instance);
561 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
562 pPhysicalDeviceGroupCount);
563 VkResult result;
564
565 if (instance->physical_device_count < 0) {
566 result = tu_enumerate_devices(instance);
567 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
568 return result;
569 }
570
571 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
572 vk_outarray_append(&out, p)
573 {
574 p->physicalDeviceCount = 1;
575 p->physicalDevices[0] =
576 tu_physical_device_to_handle(instance->physical_devices + i);
577 p->subsetAllocation = false;
578 }
579 }
580
581 return vk_outarray_status(&out);
582 }
583
584 void
585 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
586 VkPhysicalDeviceFeatures *pFeatures)
587 {
588 memset(pFeatures, 0, sizeof(*pFeatures));
589
590 *pFeatures = (VkPhysicalDeviceFeatures) {
591 .robustBufferAccess = true,
592 .fullDrawIndexUint32 = true,
593 .imageCubeArray = true,
594 .independentBlend = true,
595 .geometryShader = true,
596 .tessellationShader = true,
597 .sampleRateShading = true,
598 .dualSrcBlend = true,
599 .logicOp = true,
600 .multiDrawIndirect = true,
601 .drawIndirectFirstInstance = true,
602 .depthClamp = true,
603 .depthBiasClamp = false,
604 .fillModeNonSolid = true,
605 .depthBounds = true,
606 .wideLines = false,
607 .largePoints = false,
608 .alphaToOne = true,
609 .multiViewport = false,
610 .samplerAnisotropy = true,
611 .textureCompressionETC2 = true,
612 .textureCompressionASTC_LDR = true,
613 .textureCompressionBC = true,
614 .occlusionQueryPrecise = true,
615 .pipelineStatisticsQuery = false,
616 .vertexPipelineStoresAndAtomics = false,
617 .fragmentStoresAndAtomics = false,
618 .shaderTessellationAndGeometryPointSize = false,
619 .shaderImageGatherExtended = false,
620 .shaderStorageImageExtendedFormats = false,
621 .shaderStorageImageMultisample = false,
622 .shaderUniformBufferArrayDynamicIndexing = false,
623 .shaderSampledImageArrayDynamicIndexing = false,
624 .shaderStorageBufferArrayDynamicIndexing = false,
625 .shaderStorageImageArrayDynamicIndexing = false,
626 .shaderStorageImageReadWithoutFormat = false,
627 .shaderStorageImageWriteWithoutFormat = false,
628 .shaderClipDistance = false,
629 .shaderCullDistance = false,
630 .shaderFloat64 = false,
631 .shaderInt64 = false,
632 .shaderInt16 = false,
633 .sparseBinding = false,
634 .variableMultisampleRate = false,
635 .inheritedQueries = false,
636 };
637 }
638
639 void
640 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
641 VkPhysicalDeviceFeatures2 *pFeatures)
642 {
643 vk_foreach_struct(ext, pFeatures->pNext)
644 {
645 switch (ext->sType) {
646 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
647 VkPhysicalDeviceVulkan11Features *features = (void *) ext;
648 features->storageBuffer16BitAccess = false;
649 features->uniformAndStorageBuffer16BitAccess = false;
650 features->storagePushConstant16 = false;
651 features->storageInputOutput16 = false;
652 features->multiview = false;
653 features->multiviewGeometryShader = false;
654 features->multiviewTessellationShader = false;
655 features->variablePointersStorageBuffer = false;
656 features->variablePointers = false;
657 features->protectedMemory = false;
658 features->samplerYcbcrConversion = true;
659 features->shaderDrawParameters = true;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
663 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
664 features->variablePointersStorageBuffer = false;
665 features->variablePointers = false;
666 break;
667 }
668 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
669 VkPhysicalDeviceMultiviewFeatures *features =
670 (VkPhysicalDeviceMultiviewFeatures *) ext;
671 features->multiview = false;
672 features->multiviewGeometryShader = false;
673 features->multiviewTessellationShader = false;
674 break;
675 }
676 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
677 VkPhysicalDeviceShaderDrawParametersFeatures *features =
678 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
679 features->shaderDrawParameters = true;
680 break;
681 }
682 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
683 VkPhysicalDeviceProtectedMemoryFeatures *features =
684 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
685 features->protectedMemory = false;
686 break;
687 }
688 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
689 VkPhysicalDevice16BitStorageFeatures *features =
690 (VkPhysicalDevice16BitStorageFeatures *) ext;
691 features->storageBuffer16BitAccess = false;
692 features->uniformAndStorageBuffer16BitAccess = false;
693 features->storagePushConstant16 = false;
694 features->storageInputOutput16 = false;
695 break;
696 }
697 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
698 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
699 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
700 features->samplerYcbcrConversion = true;
701 break;
702 }
703 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
704 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
705 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
706 features->shaderInputAttachmentArrayDynamicIndexing = false;
707 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
708 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
709 features->shaderUniformBufferArrayNonUniformIndexing = false;
710 features->shaderSampledImageArrayNonUniformIndexing = false;
711 features->shaderStorageBufferArrayNonUniformIndexing = false;
712 features->shaderStorageImageArrayNonUniformIndexing = false;
713 features->shaderInputAttachmentArrayNonUniformIndexing = false;
714 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
715 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
716 features->descriptorBindingUniformBufferUpdateAfterBind = false;
717 features->descriptorBindingSampledImageUpdateAfterBind = false;
718 features->descriptorBindingStorageImageUpdateAfterBind = false;
719 features->descriptorBindingStorageBufferUpdateAfterBind = false;
720 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
721 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
722 features->descriptorBindingUpdateUnusedWhilePending = false;
723 features->descriptorBindingPartiallyBound = false;
724 features->descriptorBindingVariableDescriptorCount = false;
725 features->runtimeDescriptorArray = false;
726 break;
727 }
728 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
729 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
730 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
731 features->conditionalRendering = false;
732 features->inheritedConditionalRendering = false;
733 break;
734 }
735 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
736 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
737 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
738 features->transformFeedback = true;
739 features->geometryStreams = false;
740 break;
741 }
742 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
743 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
744 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
745 features->indexTypeUint8 = true;
746 break;
747 }
748 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
749 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
750 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
751 features->vertexAttributeInstanceRateDivisor = true;
752 features->vertexAttributeInstanceRateZeroDivisor = true;
753 break;
754 }
755 default:
756 break;
757 }
758 }
759 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
760 }
761
762 void
763 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
764 VkPhysicalDeviceProperties *pProperties)
765 {
766 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
767 VkSampleCountFlags sample_counts =
768 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
769
770 /* I have no idea what the maximum size is, but the hardware supports very
771 * large numbers of descriptors (at least 2^16). This limit is based on
772 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
773 * we don't have to think about what to do if that overflows, but really
774 * nothing is likely to get close to this.
775 */
776 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
777
778 VkPhysicalDeviceLimits limits = {
779 .maxImageDimension1D = (1 << 14),
780 .maxImageDimension2D = (1 << 14),
781 .maxImageDimension3D = (1 << 11),
782 .maxImageDimensionCube = (1 << 14),
783 .maxImageArrayLayers = (1 << 11),
784 .maxTexelBufferElements = 128 * 1024 * 1024,
785 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
786 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
787 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
788 .maxMemoryAllocationCount = UINT32_MAX,
789 .maxSamplerAllocationCount = 64 * 1024,
790 .bufferImageGranularity = 64, /* A cache line */
791 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
792 .maxBoundDescriptorSets = MAX_SETS,
793 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
794 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
795 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
796 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
797 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
798 .maxPerStageDescriptorInputAttachments = MAX_RTS,
799 .maxPerStageResources = max_descriptor_set_size,
800 .maxDescriptorSetSamplers = max_descriptor_set_size,
801 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
802 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
803 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
804 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
805 .maxDescriptorSetSampledImages = max_descriptor_set_size,
806 .maxDescriptorSetStorageImages = max_descriptor_set_size,
807 .maxDescriptorSetInputAttachments = MAX_RTS,
808 .maxVertexInputAttributes = 32,
809 .maxVertexInputBindings = 32,
810 .maxVertexInputAttributeOffset = 4095,
811 .maxVertexInputBindingStride = 2048,
812 .maxVertexOutputComponents = 128,
813 .maxTessellationGenerationLevel = 64,
814 .maxTessellationPatchSize = 32,
815 .maxTessellationControlPerVertexInputComponents = 128,
816 .maxTessellationControlPerVertexOutputComponents = 128,
817 .maxTessellationControlPerPatchOutputComponents = 120,
818 .maxTessellationControlTotalOutputComponents = 4096,
819 .maxTessellationEvaluationInputComponents = 128,
820 .maxTessellationEvaluationOutputComponents = 128,
821 .maxGeometryShaderInvocations = 32,
822 .maxGeometryInputComponents = 64,
823 .maxGeometryOutputComponents = 128,
824 .maxGeometryOutputVertices = 256,
825 .maxGeometryTotalOutputComponents = 1024,
826 .maxFragmentInputComponents = 124,
827 .maxFragmentOutputAttachments = 8,
828 .maxFragmentDualSrcAttachments = 1,
829 .maxFragmentCombinedOutputResources = 8,
830 .maxComputeSharedMemorySize = 32768,
831 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
832 .maxComputeWorkGroupInvocations = 2048,
833 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
834 .subPixelPrecisionBits = 8,
835 .subTexelPrecisionBits = 8,
836 .mipmapPrecisionBits = 8,
837 .maxDrawIndexedIndexValue = UINT32_MAX,
838 .maxDrawIndirectCount = UINT32_MAX,
839 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
840 .maxSamplerAnisotropy = 16,
841 .maxViewports = MAX_VIEWPORTS,
842 .maxViewportDimensions = { (1 << 14), (1 << 14) },
843 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
844 .viewportSubPixelBits = 8,
845 .minMemoryMapAlignment = 4096, /* A page */
846 .minTexelBufferOffsetAlignment = 64,
847 .minUniformBufferOffsetAlignment = 64,
848 .minStorageBufferOffsetAlignment = 64,
849 .minTexelOffset = -16,
850 .maxTexelOffset = 15,
851 .minTexelGatherOffset = -32,
852 .maxTexelGatherOffset = 31,
853 .minInterpolationOffset = -0.5,
854 .maxInterpolationOffset = 0.4375,
855 .subPixelInterpolationOffsetBits = 4,
856 .maxFramebufferWidth = (1 << 14),
857 .maxFramebufferHeight = (1 << 14),
858 .maxFramebufferLayers = (1 << 10),
859 .framebufferColorSampleCounts = sample_counts,
860 .framebufferDepthSampleCounts = sample_counts,
861 .framebufferStencilSampleCounts = sample_counts,
862 .framebufferNoAttachmentsSampleCounts = sample_counts,
863 .maxColorAttachments = MAX_RTS,
864 .sampledImageColorSampleCounts = sample_counts,
865 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
866 .sampledImageDepthSampleCounts = sample_counts,
867 .sampledImageStencilSampleCounts = sample_counts,
868 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
869 .maxSampleMaskWords = 1,
870 .timestampComputeAndGraphics = true,
871 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
872 .maxClipDistances = 8,
873 .maxCullDistances = 8,
874 .maxCombinedClipAndCullDistances = 8,
875 .discreteQueuePriorities = 1,
876 .pointSizeRange = { 0.125, 255.875 },
877 .lineWidthRange = { 0.0, 7.9921875 },
878 .pointSizeGranularity = (1.0 / 8.0),
879 .lineWidthGranularity = (1.0 / 128.0),
880 .strictLines = false, /* FINISHME */
881 .standardSampleLocations = true,
882 .optimalBufferCopyOffsetAlignment = 128,
883 .optimalBufferCopyRowPitchAlignment = 128,
884 .nonCoherentAtomSize = 64,
885 };
886
887 *pProperties = (VkPhysicalDeviceProperties) {
888 .apiVersion = tu_physical_device_api_version(pdevice),
889 .driverVersion = vk_get_driver_version(),
890 .vendorID = 0, /* TODO */
891 .deviceID = 0,
892 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
893 .limits = limits,
894 .sparseProperties = { 0 },
895 };
896
897 strcpy(pProperties->deviceName, pdevice->name);
898 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
899 }
900
901 void
902 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
903 VkPhysicalDeviceProperties2 *pProperties)
904 {
905 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
906 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
907
908 vk_foreach_struct(ext, pProperties->pNext)
909 {
910 switch (ext->sType) {
911 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
912 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
913 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
914 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
915 break;
916 }
917 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
918 VkPhysicalDeviceIDProperties *properties =
919 (VkPhysicalDeviceIDProperties *) ext;
920 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
921 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
922 properties->deviceLUIDValid = false;
923 break;
924 }
925 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
926 VkPhysicalDeviceMultiviewProperties *properties =
927 (VkPhysicalDeviceMultiviewProperties *) ext;
928 properties->maxMultiviewViewCount = MAX_VIEWS;
929 properties->maxMultiviewInstanceIndex = INT_MAX;
930 break;
931 }
932 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
933 VkPhysicalDevicePointClippingProperties *properties =
934 (VkPhysicalDevicePointClippingProperties *) ext;
935 properties->pointClippingBehavior =
936 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
937 break;
938 }
939 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
940 VkPhysicalDeviceMaintenance3Properties *properties =
941 (VkPhysicalDeviceMaintenance3Properties *) ext;
942 /* Make sure everything is addressable by a signed 32-bit int, and
943 * our largest descriptors are 96 bytes. */
944 properties->maxPerSetDescriptors = (1ull << 31) / 96;
945 /* Our buffer size fields allow only this much */
946 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
947 break;
948 }
949 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
950 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
951 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
952
953 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
954 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
955 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
956 properties->maxTransformFeedbackStreamDataSize = 512;
957 properties->maxTransformFeedbackBufferDataSize = 512;
958 properties->maxTransformFeedbackBufferDataStride = 512;
959 properties->transformFeedbackQueries = true;
960 properties->transformFeedbackStreamsLinesTriangles = false;
961 properties->transformFeedbackRasterizationStreamSelect = false;
962 properties->transformFeedbackDraw = true;
963 break;
964 }
965 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
966 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
967 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
968 properties->sampleLocationSampleCounts = 0;
969 if (pdevice->supported_extensions.EXT_sample_locations) {
970 properties->sampleLocationSampleCounts =
971 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
972 }
973 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
974 properties->sampleLocationCoordinateRange[0] = 0.0f;
975 properties->sampleLocationCoordinateRange[1] = 0.9375f;
976 properties->sampleLocationSubPixelBits = 4;
977 properties->variableSampleLocations = true;
978 break;
979 }
980 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
981 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
982 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
983 properties->filterMinmaxImageComponentMapping = true;
984 properties->filterMinmaxSingleComponentFormats = true;
985 break;
986 }
987 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
988 VkPhysicalDeviceSubgroupProperties *properties =
989 (VkPhysicalDeviceSubgroupProperties *)ext;
990 properties->subgroupSize = 64;
991 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
992 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
993 VK_SUBGROUP_FEATURE_VOTE_BIT;
994 properties->quadOperationsInAllStages = false;
995 break;
996 }
997 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
998 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
999 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1000 props->maxVertexAttribDivisor = UINT32_MAX;
1001 break;
1002 }
1003 default:
1004 break;
1005 }
1006 }
1007 }
1008
1009 static const VkQueueFamilyProperties tu_queue_family_properties = {
1010 .queueFlags =
1011 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
1012 .queueCount = 1,
1013 .timestampValidBits = 48,
1014 .minImageTransferGranularity = { 1, 1, 1 },
1015 };
1016
1017 void
1018 tu_GetPhysicalDeviceQueueFamilyProperties(
1019 VkPhysicalDevice physicalDevice,
1020 uint32_t *pQueueFamilyPropertyCount,
1021 VkQueueFamilyProperties *pQueueFamilyProperties)
1022 {
1023 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1024
1025 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
1026 }
1027
1028 void
1029 tu_GetPhysicalDeviceQueueFamilyProperties2(
1030 VkPhysicalDevice physicalDevice,
1031 uint32_t *pQueueFamilyPropertyCount,
1032 VkQueueFamilyProperties2 *pQueueFamilyProperties)
1033 {
1034 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
1035
1036 vk_outarray_append(&out, p)
1037 {
1038 p->queueFamilyProperties = tu_queue_family_properties;
1039 }
1040 }
1041
1042 static uint64_t
1043 tu_get_system_heap_size()
1044 {
1045 struct sysinfo info;
1046 sysinfo(&info);
1047
1048 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
1049
1050 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
1051 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
1052 */
1053 uint64_t available_ram;
1054 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
1055 available_ram = total_ram / 2;
1056 else
1057 available_ram = total_ram * 3 / 4;
1058
1059 return available_ram;
1060 }
1061
1062 void
1063 tu_GetPhysicalDeviceMemoryProperties(
1064 VkPhysicalDevice physicalDevice,
1065 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1066 {
1067 pMemoryProperties->memoryHeapCount = 1;
1068 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1069 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1070
1071 pMemoryProperties->memoryTypeCount = 1;
1072 pMemoryProperties->memoryTypes[0].propertyFlags =
1073 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1074 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1075 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1076 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1077 }
1078
1079 void
1080 tu_GetPhysicalDeviceMemoryProperties2(
1081 VkPhysicalDevice physicalDevice,
1082 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1083 {
1084 return tu_GetPhysicalDeviceMemoryProperties(
1085 physicalDevice, &pMemoryProperties->memoryProperties);
1086 }
1087
1088 static VkResult
1089 tu_queue_init(struct tu_device *device,
1090 struct tu_queue *queue,
1091 uint32_t queue_family_index,
1092 int idx,
1093 VkDeviceQueueCreateFlags flags)
1094 {
1095 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1096 queue->device = device;
1097 queue->queue_family_index = queue_family_index;
1098 queue->queue_idx = idx;
1099 queue->flags = flags;
1100
1101 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1102 if (ret)
1103 return VK_ERROR_INITIALIZATION_FAILED;
1104
1105 tu_fence_init(&queue->submit_fence, false);
1106
1107 return VK_SUCCESS;
1108 }
1109
1110 static void
1111 tu_queue_finish(struct tu_queue *queue)
1112 {
1113 tu_fence_finish(&queue->submit_fence);
1114 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1115 }
1116
1117 static int
1118 tu_get_device_extension_index(const char *name)
1119 {
1120 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1121 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1122 return i;
1123 }
1124 return -1;
1125 }
1126
1127 struct PACKED bcolor_entry {
1128 uint32_t fp32[4];
1129 uint16_t ui16[4];
1130 int16_t si16[4];
1131 uint16_t fp16[4];
1132 uint16_t rgb565;
1133 uint16_t rgb5a1;
1134 uint16_t rgba4;
1135 uint8_t __pad0[2];
1136 uint8_t ui8[4];
1137 int8_t si8[4];
1138 uint32_t rgb10a2;
1139 uint32_t z24; /* also s8? */
1140 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1141 uint8_t __pad1[56];
1142 } border_color[] = {
1143 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1144 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1145 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1146 .fp32[3] = 0x3f800000,
1147 .ui16[3] = 0xffff,
1148 .si16[3] = 0x7fff,
1149 .fp16[3] = 0x3c00,
1150 .rgb5a1 = 0x8000,
1151 .rgba4 = 0xf000,
1152 .ui8[3] = 0xff,
1153 .si8[3] = 0x7f,
1154 .rgb10a2 = 0xc0000000,
1155 .srgb[3] = 0x3c00,
1156 },
1157 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1158 .fp32[3] = 1,
1159 .fp16[3] = 1,
1160 },
1161 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1162 .fp32[0 ... 3] = 0x3f800000,
1163 .ui16[0 ... 3] = 0xffff,
1164 .si16[0 ... 3] = 0x7fff,
1165 .fp16[0 ... 3] = 0x3c00,
1166 .rgb565 = 0xffff,
1167 .rgb5a1 = 0xffff,
1168 .rgba4 = 0xffff,
1169 .ui8[0 ... 3] = 0xff,
1170 .si8[0 ... 3] = 0x7f,
1171 .rgb10a2 = 0xffffffff,
1172 .z24 = 0xffffff,
1173 .srgb[0 ... 3] = 0x3c00,
1174 },
1175 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1176 .fp32[0 ... 3] = 1,
1177 .fp16[0 ... 3] = 1,
1178 },
1179 };
1180
1181
1182 VkResult
1183 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1184 const VkDeviceCreateInfo *pCreateInfo,
1185 const VkAllocationCallbacks *pAllocator,
1186 VkDevice *pDevice)
1187 {
1188 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1189 VkResult result;
1190 struct tu_device *device;
1191
1192 /* Check enabled features */
1193 if (pCreateInfo->pEnabledFeatures) {
1194 VkPhysicalDeviceFeatures supported_features;
1195 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1196 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1197 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1198 unsigned num_features =
1199 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1200 for (uint32_t i = 0; i < num_features; i++) {
1201 if (enabled_feature[i] && !supported_feature[i])
1202 return vk_error(physical_device->instance,
1203 VK_ERROR_FEATURE_NOT_PRESENT);
1204 }
1205 }
1206
1207 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1208 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1209 if (!device)
1210 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1211
1212 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1213 device->instance = physical_device->instance;
1214 device->physical_device = physical_device;
1215
1216 if (pAllocator)
1217 device->alloc = *pAllocator;
1218 else
1219 device->alloc = physical_device->instance->alloc;
1220
1221 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1222 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1223 int index = tu_get_device_extension_index(ext_name);
1224 if (index < 0 ||
1225 !physical_device->supported_extensions.extensions[index]) {
1226 vk_free(&device->alloc, device);
1227 return vk_error(physical_device->instance,
1228 VK_ERROR_EXTENSION_NOT_PRESENT);
1229 }
1230
1231 device->enabled_extensions.extensions[index] = true;
1232 }
1233
1234 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1235 const VkDeviceQueueCreateInfo *queue_create =
1236 &pCreateInfo->pQueueCreateInfos[i];
1237 uint32_t qfi = queue_create->queueFamilyIndex;
1238 device->queues[qfi] = vk_alloc(
1239 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1240 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1241 if (!device->queues[qfi]) {
1242 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1243 goto fail_queues;
1244 }
1245
1246 memset(device->queues[qfi], 0,
1247 queue_create->queueCount * sizeof(struct tu_queue));
1248
1249 device->queue_count[qfi] = queue_create->queueCount;
1250
1251 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1252 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1253 queue_create->flags);
1254 if (result != VK_SUCCESS)
1255 goto fail_queues;
1256 }
1257 }
1258
1259 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1260 if (!device->compiler)
1261 goto fail_queues;
1262
1263 #define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1264 #define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
1265
1266 device->vsc_draw_strm_pitch = 0x440 * 4;
1267 device->vsc_prim_strm_pitch = 0x1040 * 4;
1268
1269 result = tu_bo_init_new(device, &device->vsc_draw_strm, VSC_DRAW_STRM_SIZE(device->vsc_draw_strm_pitch));
1270 if (result != VK_SUCCESS)
1271 goto fail_vsc_data;
1272
1273 result = tu_bo_init_new(device, &device->vsc_prim_strm, VSC_PRIM_STRM_SIZE(device->vsc_prim_strm_pitch));
1274 if (result != VK_SUCCESS)
1275 goto fail_vsc_data2;
1276
1277 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1278 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1279 if (result != VK_SUCCESS)
1280 goto fail_border_color;
1281
1282 result = tu_bo_map(device, &device->border_color);
1283 if (result != VK_SUCCESS)
1284 goto fail_border_color_map;
1285
1286 memcpy(device->border_color.map, border_color, sizeof(border_color));
1287
1288 VkPipelineCacheCreateInfo ci;
1289 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1290 ci.pNext = NULL;
1291 ci.flags = 0;
1292 ci.pInitialData = NULL;
1293 ci.initialDataSize = 0;
1294 VkPipelineCache pc;
1295 result =
1296 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1297 if (result != VK_SUCCESS)
1298 goto fail_pipeline_cache;
1299
1300 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1301
1302 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1303 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1304
1305 *pDevice = tu_device_to_handle(device);
1306 return VK_SUCCESS;
1307
1308 fail_pipeline_cache:
1309 fail_border_color_map:
1310 tu_bo_finish(device, &device->border_color);
1311
1312 fail_border_color:
1313 tu_bo_finish(device, &device->vsc_prim_strm);
1314
1315 fail_vsc_data2:
1316 tu_bo_finish(device, &device->vsc_draw_strm);
1317
1318 fail_vsc_data:
1319 ralloc_free(device->compiler);
1320
1321 fail_queues:
1322 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1323 for (unsigned q = 0; q < device->queue_count[i]; q++)
1324 tu_queue_finish(&device->queues[i][q]);
1325 if (device->queue_count[i])
1326 vk_free(&device->alloc, device->queues[i]);
1327 }
1328
1329 vk_free(&device->alloc, device);
1330 return result;
1331 }
1332
1333 void
1334 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1335 {
1336 TU_FROM_HANDLE(tu_device, device, _device);
1337
1338 if (!device)
1339 return;
1340
1341 tu_bo_finish(device, &device->vsc_draw_strm);
1342 tu_bo_finish(device, &device->vsc_prim_strm);
1343
1344 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1345 for (unsigned q = 0; q < device->queue_count[i]; q++)
1346 tu_queue_finish(&device->queues[i][q]);
1347 if (device->queue_count[i])
1348 vk_free(&device->alloc, device->queues[i]);
1349 }
1350
1351 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1352 if (device->scratch_bos[i].initialized)
1353 tu_bo_finish(device, &device->scratch_bos[i].bo);
1354 }
1355
1356 ir3_compiler_destroy(device->compiler);
1357
1358 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1359 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1360
1361 vk_free(&device->alloc, device);
1362 }
1363
1364 VkResult
1365 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1366 {
1367 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1368 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1369 assert(index < ARRAY_SIZE(dev->scratch_bos));
1370
1371 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1372 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1373 /* Fast path: just return the already-allocated BO. */
1374 *bo = &dev->scratch_bos[i].bo;
1375 return VK_SUCCESS;
1376 }
1377 }
1378
1379 /* Slow path: actually allocate the BO. We take a lock because the process
1380 * of allocating it is slow, and we don't want to block the CPU while it
1381 * finishes.
1382 */
1383 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1384
1385 /* Another thread may have allocated it already while we were waiting on
1386 * the lock. We need to check this in order to avoid double-allocating.
1387 */
1388 if (dev->scratch_bos[index].initialized) {
1389 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1390 *bo = &dev->scratch_bos[index].bo;
1391 return VK_SUCCESS;
1392 }
1393
1394 unsigned bo_size = 1ull << size_log2;
1395 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1396 if (result != VK_SUCCESS) {
1397 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1398 return result;
1399 }
1400
1401 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1402
1403 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1404
1405 *bo = &dev->scratch_bos[index].bo;
1406 return VK_SUCCESS;
1407 }
1408
1409 VkResult
1410 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1411 VkLayerProperties *pProperties)
1412 {
1413 *pPropertyCount = 0;
1414 return VK_SUCCESS;
1415 }
1416
1417 VkResult
1418 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1419 uint32_t *pPropertyCount,
1420 VkLayerProperties *pProperties)
1421 {
1422 *pPropertyCount = 0;
1423 return VK_SUCCESS;
1424 }
1425
1426 void
1427 tu_GetDeviceQueue2(VkDevice _device,
1428 const VkDeviceQueueInfo2 *pQueueInfo,
1429 VkQueue *pQueue)
1430 {
1431 TU_FROM_HANDLE(tu_device, device, _device);
1432 struct tu_queue *queue;
1433
1434 queue =
1435 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1436 if (pQueueInfo->flags != queue->flags) {
1437 /* From the Vulkan 1.1.70 spec:
1438 *
1439 * "The queue returned by vkGetDeviceQueue2 must have the same
1440 * flags value from this structure as that used at device
1441 * creation time in a VkDeviceQueueCreateInfo instance. If no
1442 * matching flags were specified at device creation time then
1443 * pQueue will return VK_NULL_HANDLE."
1444 */
1445 *pQueue = VK_NULL_HANDLE;
1446 return;
1447 }
1448
1449 *pQueue = tu_queue_to_handle(queue);
1450 }
1451
1452 void
1453 tu_GetDeviceQueue(VkDevice _device,
1454 uint32_t queueFamilyIndex,
1455 uint32_t queueIndex,
1456 VkQueue *pQueue)
1457 {
1458 const VkDeviceQueueInfo2 info =
1459 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1460 .queueFamilyIndex = queueFamilyIndex,
1461 .queueIndex = queueIndex };
1462
1463 tu_GetDeviceQueue2(_device, &info, pQueue);
1464 }
1465
1466 static VkResult
1467 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
1468 uint32_t sem_count,
1469 bool wait,
1470 struct drm_msm_gem_submit_syncobj **out,
1471 uint32_t *out_count)
1472 {
1473 uint32_t syncobj_count = 0;
1474 struct drm_msm_gem_submit_syncobj *syncobjs;
1475
1476 for (uint32_t i = 0; i < sem_count; ++i) {
1477 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1478
1479 struct tu_semaphore_part *part =
1480 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1481 &sem->temporary : &sem->permanent;
1482
1483 if (part->kind == TU_SEMAPHORE_SYNCOBJ)
1484 ++syncobj_count;
1485 }
1486
1487 *out = NULL;
1488 *out_count = syncobj_count;
1489 if (!syncobj_count)
1490 return VK_SUCCESS;
1491
1492 *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
1493 if (!syncobjs)
1494 return VK_ERROR_OUT_OF_HOST_MEMORY;
1495
1496 for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
1497 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1498
1499 struct tu_semaphore_part *part =
1500 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1501 &sem->temporary : &sem->permanent;
1502
1503 if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
1504 syncobjs[j].handle = part->syncobj;
1505 syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
1506 ++j;
1507 }
1508 }
1509
1510 return VK_SUCCESS;
1511 }
1512
1513
1514 static void
1515 tu_semaphores_remove_temp(struct tu_device *device,
1516 const VkSemaphore *sems,
1517 uint32_t sem_count)
1518 {
1519 for (uint32_t i = 0; i < sem_count; ++i) {
1520 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1521 tu_semaphore_remove_temp(device, sem);
1522 }
1523 }
1524
1525 VkResult
1526 tu_QueueSubmit(VkQueue _queue,
1527 uint32_t submitCount,
1528 const VkSubmitInfo *pSubmits,
1529 VkFence _fence)
1530 {
1531 TU_FROM_HANDLE(tu_queue, queue, _queue);
1532 VkResult result;
1533
1534 for (uint32_t i = 0; i < submitCount; ++i) {
1535 const VkSubmitInfo *submit = pSubmits + i;
1536 const bool last_submit = (i == submitCount - 1);
1537 struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
1538 uint32_t nr_in_syncobjs, nr_out_syncobjs;
1539 struct tu_bo_list bo_list;
1540 tu_bo_list_init(&bo_list);
1541
1542 result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
1543 pSubmits[i].waitSemaphoreCount,
1544 false, &in_syncobjs, &nr_in_syncobjs);
1545 if (result != VK_SUCCESS) {
1546 /* TODO: emit VK_ERROR_DEVICE_LOST */
1547 fprintf(stderr, "failed to allocate space for semaphore submission\n");
1548 abort();
1549 }
1550
1551 result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
1552 pSubmits[i].signalSemaphoreCount,
1553 false, &out_syncobjs, &nr_out_syncobjs);
1554 if (result != VK_SUCCESS) {
1555 /* TODO: emit VK_ERROR_DEVICE_LOST */
1556 fprintf(stderr, "failed to allocate space for semaphore submission\n");
1557 abort();
1558 }
1559
1560 uint32_t entry_count = 0;
1561 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1562 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1563 entry_count += cmdbuf->cs.entry_count;
1564 }
1565
1566 struct drm_msm_gem_submit_cmd cmds[entry_count];
1567 uint32_t entry_idx = 0;
1568 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1569 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1570 struct tu_cs *cs = &cmdbuf->cs;
1571 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1572 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1573 cmds[entry_idx].submit_idx =
1574 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1575 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1576 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1577 cmds[entry_idx].size = cs->entries[i].size;
1578 cmds[entry_idx].pad = 0;
1579 cmds[entry_idx].nr_relocs = 0;
1580 cmds[entry_idx].relocs = 0;
1581 }
1582
1583 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1584 }
1585
1586 uint32_t flags = MSM_PIPE_3D0;
1587 if (nr_in_syncobjs) {
1588 flags |= MSM_SUBMIT_SYNCOBJ_IN;
1589 }
1590 if (nr_out_syncobjs) {
1591 flags |= MSM_SUBMIT_SYNCOBJ_OUT;
1592 }
1593
1594 if (last_submit) {
1595 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1596 }
1597
1598 struct drm_msm_gem_submit req = {
1599 .flags = flags,
1600 .queueid = queue->msm_queue_id,
1601 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1602 .nr_bos = bo_list.count,
1603 .cmds = (uint64_t)(uintptr_t)cmds,
1604 .nr_cmds = entry_count,
1605 .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
1606 .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
1607 .nr_in_syncobjs = nr_in_syncobjs,
1608 .nr_out_syncobjs = nr_out_syncobjs,
1609 .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
1610 };
1611
1612 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1613 DRM_MSM_GEM_SUBMIT,
1614 &req, sizeof(req));
1615 if (ret) {
1616 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1617 abort();
1618 }
1619
1620 tu_bo_list_destroy(&bo_list);
1621 free(in_syncobjs);
1622 free(out_syncobjs);
1623
1624 tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
1625 pSubmits[i].waitSemaphoreCount);
1626 if (last_submit) {
1627 /* no need to merge fences as queue execution is serialized */
1628 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1629 } else if (last_submit) {
1630 close(req.fence_fd);
1631 }
1632 }
1633
1634 if (_fence != VK_NULL_HANDLE) {
1635 TU_FROM_HANDLE(tu_fence, fence, _fence);
1636 tu_fence_copy(fence, &queue->submit_fence);
1637 }
1638
1639 return VK_SUCCESS;
1640 }
1641
1642 VkResult
1643 tu_QueueWaitIdle(VkQueue _queue)
1644 {
1645 TU_FROM_HANDLE(tu_queue, queue, _queue);
1646
1647 tu_fence_wait_idle(&queue->submit_fence);
1648
1649 return VK_SUCCESS;
1650 }
1651
1652 VkResult
1653 tu_DeviceWaitIdle(VkDevice _device)
1654 {
1655 TU_FROM_HANDLE(tu_device, device, _device);
1656
1657 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1658 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1659 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1660 }
1661 }
1662 return VK_SUCCESS;
1663 }
1664
1665 VkResult
1666 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1667 uint32_t *pPropertyCount,
1668 VkExtensionProperties *pProperties)
1669 {
1670 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1671
1672 /* We spport no lyaers */
1673 if (pLayerName)
1674 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1675
1676 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1677 if (tu_instance_extensions_supported.extensions[i]) {
1678 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1679 }
1680 }
1681
1682 return vk_outarray_status(&out);
1683 }
1684
1685 VkResult
1686 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1687 const char *pLayerName,
1688 uint32_t *pPropertyCount,
1689 VkExtensionProperties *pProperties)
1690 {
1691 /* We spport no lyaers */
1692 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1693 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1694
1695 /* We spport no lyaers */
1696 if (pLayerName)
1697 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1698
1699 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1700 if (device->supported_extensions.extensions[i]) {
1701 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1702 }
1703 }
1704
1705 return vk_outarray_status(&out);
1706 }
1707
1708 PFN_vkVoidFunction
1709 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1710 {
1711 TU_FROM_HANDLE(tu_instance, instance, _instance);
1712
1713 return tu_lookup_entrypoint_checked(
1714 pName, instance ? instance->api_version : 0,
1715 instance ? &instance->enabled_extensions : NULL, NULL);
1716 }
1717
1718 /* The loader wants us to expose a second GetInstanceProcAddr function
1719 * to work around certain LD_PRELOAD issues seen in apps.
1720 */
1721 PUBLIC
1722 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1723 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1724
1725 PUBLIC
1726 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1727 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1728 {
1729 return tu_GetInstanceProcAddr(instance, pName);
1730 }
1731
1732 PFN_vkVoidFunction
1733 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1734 {
1735 TU_FROM_HANDLE(tu_device, device, _device);
1736
1737 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1738 &device->instance->enabled_extensions,
1739 &device->enabled_extensions);
1740 }
1741
1742 static VkResult
1743 tu_alloc_memory(struct tu_device *device,
1744 const VkMemoryAllocateInfo *pAllocateInfo,
1745 const VkAllocationCallbacks *pAllocator,
1746 VkDeviceMemory *pMem)
1747 {
1748 struct tu_device_memory *mem;
1749 VkResult result;
1750
1751 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1752
1753 if (pAllocateInfo->allocationSize == 0) {
1754 /* Apparently, this is allowed */
1755 *pMem = VK_NULL_HANDLE;
1756 return VK_SUCCESS;
1757 }
1758
1759 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1760 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1761 if (mem == NULL)
1762 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1763
1764 const VkImportMemoryFdInfoKHR *fd_info =
1765 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1766 if (fd_info && !fd_info->handleType)
1767 fd_info = NULL;
1768
1769 if (fd_info) {
1770 assert(fd_info->handleType ==
1771 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1772 fd_info->handleType ==
1773 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1774
1775 /*
1776 * TODO Importing the same fd twice gives us the same handle without
1777 * reference counting. We need to maintain a per-instance handle-to-bo
1778 * table and add reference count to tu_bo.
1779 */
1780 result = tu_bo_init_dmabuf(device, &mem->bo,
1781 pAllocateInfo->allocationSize, fd_info->fd);
1782 if (result == VK_SUCCESS) {
1783 /* take ownership and close the fd */
1784 close(fd_info->fd);
1785 }
1786 } else {
1787 result =
1788 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1789 }
1790
1791 if (result != VK_SUCCESS) {
1792 vk_free2(&device->alloc, pAllocator, mem);
1793 return result;
1794 }
1795
1796 mem->size = pAllocateInfo->allocationSize;
1797 mem->type_index = pAllocateInfo->memoryTypeIndex;
1798
1799 mem->map = NULL;
1800 mem->user_ptr = NULL;
1801
1802 *pMem = tu_device_memory_to_handle(mem);
1803
1804 return VK_SUCCESS;
1805 }
1806
1807 VkResult
1808 tu_AllocateMemory(VkDevice _device,
1809 const VkMemoryAllocateInfo *pAllocateInfo,
1810 const VkAllocationCallbacks *pAllocator,
1811 VkDeviceMemory *pMem)
1812 {
1813 TU_FROM_HANDLE(tu_device, device, _device);
1814 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1815 }
1816
1817 void
1818 tu_FreeMemory(VkDevice _device,
1819 VkDeviceMemory _mem,
1820 const VkAllocationCallbacks *pAllocator)
1821 {
1822 TU_FROM_HANDLE(tu_device, device, _device);
1823 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1824
1825 if (mem == NULL)
1826 return;
1827
1828 tu_bo_finish(device, &mem->bo);
1829 vk_free2(&device->alloc, pAllocator, mem);
1830 }
1831
1832 VkResult
1833 tu_MapMemory(VkDevice _device,
1834 VkDeviceMemory _memory,
1835 VkDeviceSize offset,
1836 VkDeviceSize size,
1837 VkMemoryMapFlags flags,
1838 void **ppData)
1839 {
1840 TU_FROM_HANDLE(tu_device, device, _device);
1841 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1842 VkResult result;
1843
1844 if (mem == NULL) {
1845 *ppData = NULL;
1846 return VK_SUCCESS;
1847 }
1848
1849 if (mem->user_ptr) {
1850 *ppData = mem->user_ptr;
1851 } else if (!mem->map) {
1852 result = tu_bo_map(device, &mem->bo);
1853 if (result != VK_SUCCESS)
1854 return result;
1855 *ppData = mem->map = mem->bo.map;
1856 } else
1857 *ppData = mem->map;
1858
1859 if (*ppData) {
1860 *ppData += offset;
1861 return VK_SUCCESS;
1862 }
1863
1864 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1865 }
1866
1867 void
1868 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1869 {
1870 /* I do not see any unmapping done by the freedreno Gallium driver. */
1871 }
1872
1873 VkResult
1874 tu_FlushMappedMemoryRanges(VkDevice _device,
1875 uint32_t memoryRangeCount,
1876 const VkMappedMemoryRange *pMemoryRanges)
1877 {
1878 return VK_SUCCESS;
1879 }
1880
1881 VkResult
1882 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1883 uint32_t memoryRangeCount,
1884 const VkMappedMemoryRange *pMemoryRanges)
1885 {
1886 return VK_SUCCESS;
1887 }
1888
1889 void
1890 tu_GetBufferMemoryRequirements(VkDevice _device,
1891 VkBuffer _buffer,
1892 VkMemoryRequirements *pMemoryRequirements)
1893 {
1894 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1895
1896 pMemoryRequirements->memoryTypeBits = 1;
1897 pMemoryRequirements->alignment = 64;
1898 pMemoryRequirements->size =
1899 align64(buffer->size, pMemoryRequirements->alignment);
1900 }
1901
1902 void
1903 tu_GetBufferMemoryRequirements2(
1904 VkDevice device,
1905 const VkBufferMemoryRequirementsInfo2 *pInfo,
1906 VkMemoryRequirements2 *pMemoryRequirements)
1907 {
1908 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1909 &pMemoryRequirements->memoryRequirements);
1910 }
1911
1912 void
1913 tu_GetImageMemoryRequirements(VkDevice _device,
1914 VkImage _image,
1915 VkMemoryRequirements *pMemoryRequirements)
1916 {
1917 TU_FROM_HANDLE(tu_image, image, _image);
1918
1919 pMemoryRequirements->memoryTypeBits = 1;
1920 pMemoryRequirements->size = image->layout.size;
1921 pMemoryRequirements->alignment = image->layout.base_align;
1922 }
1923
1924 void
1925 tu_GetImageMemoryRequirements2(VkDevice device,
1926 const VkImageMemoryRequirementsInfo2 *pInfo,
1927 VkMemoryRequirements2 *pMemoryRequirements)
1928 {
1929 tu_GetImageMemoryRequirements(device, pInfo->image,
1930 &pMemoryRequirements->memoryRequirements);
1931 }
1932
1933 void
1934 tu_GetImageSparseMemoryRequirements(
1935 VkDevice device,
1936 VkImage image,
1937 uint32_t *pSparseMemoryRequirementCount,
1938 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1939 {
1940 tu_stub();
1941 }
1942
1943 void
1944 tu_GetImageSparseMemoryRequirements2(
1945 VkDevice device,
1946 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1947 uint32_t *pSparseMemoryRequirementCount,
1948 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1949 {
1950 tu_stub();
1951 }
1952
1953 void
1954 tu_GetDeviceMemoryCommitment(VkDevice device,
1955 VkDeviceMemory memory,
1956 VkDeviceSize *pCommittedMemoryInBytes)
1957 {
1958 *pCommittedMemoryInBytes = 0;
1959 }
1960
1961 VkResult
1962 tu_BindBufferMemory2(VkDevice device,
1963 uint32_t bindInfoCount,
1964 const VkBindBufferMemoryInfo *pBindInfos)
1965 {
1966 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1967 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1968 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1969
1970 if (mem) {
1971 buffer->bo = &mem->bo;
1972 buffer->bo_offset = pBindInfos[i].memoryOffset;
1973 } else {
1974 buffer->bo = NULL;
1975 }
1976 }
1977 return VK_SUCCESS;
1978 }
1979
1980 VkResult
1981 tu_BindBufferMemory(VkDevice device,
1982 VkBuffer buffer,
1983 VkDeviceMemory memory,
1984 VkDeviceSize memoryOffset)
1985 {
1986 const VkBindBufferMemoryInfo info = {
1987 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1988 .buffer = buffer,
1989 .memory = memory,
1990 .memoryOffset = memoryOffset
1991 };
1992
1993 return tu_BindBufferMemory2(device, 1, &info);
1994 }
1995
1996 VkResult
1997 tu_BindImageMemory2(VkDevice device,
1998 uint32_t bindInfoCount,
1999 const VkBindImageMemoryInfo *pBindInfos)
2000 {
2001 for (uint32_t i = 0; i < bindInfoCount; ++i) {
2002 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
2003 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
2004
2005 if (mem) {
2006 image->bo = &mem->bo;
2007 image->bo_offset = pBindInfos[i].memoryOffset;
2008 } else {
2009 image->bo = NULL;
2010 image->bo_offset = 0;
2011 }
2012 }
2013
2014 return VK_SUCCESS;
2015 }
2016
2017 VkResult
2018 tu_BindImageMemory(VkDevice device,
2019 VkImage image,
2020 VkDeviceMemory memory,
2021 VkDeviceSize memoryOffset)
2022 {
2023 const VkBindImageMemoryInfo info = {
2024 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2025 .image = image,
2026 .memory = memory,
2027 .memoryOffset = memoryOffset
2028 };
2029
2030 return tu_BindImageMemory2(device, 1, &info);
2031 }
2032
2033 VkResult
2034 tu_QueueBindSparse(VkQueue _queue,
2035 uint32_t bindInfoCount,
2036 const VkBindSparseInfo *pBindInfo,
2037 VkFence _fence)
2038 {
2039 return VK_SUCCESS;
2040 }
2041
2042 // Queue semaphore functions
2043
2044
2045 static void
2046 tu_semaphore_part_destroy(struct tu_device *device,
2047 struct tu_semaphore_part *part)
2048 {
2049 switch(part->kind) {
2050 case TU_SEMAPHORE_NONE:
2051 break;
2052 case TU_SEMAPHORE_SYNCOBJ:
2053 drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
2054 break;
2055 }
2056 part->kind = TU_SEMAPHORE_NONE;
2057 }
2058
2059 static void
2060 tu_semaphore_remove_temp(struct tu_device *device,
2061 struct tu_semaphore *sem)
2062 {
2063 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2064 tu_semaphore_part_destroy(device, &sem->temporary);
2065 }
2066 }
2067
2068 VkResult
2069 tu_CreateSemaphore(VkDevice _device,
2070 const VkSemaphoreCreateInfo *pCreateInfo,
2071 const VkAllocationCallbacks *pAllocator,
2072 VkSemaphore *pSemaphore)
2073 {
2074 TU_FROM_HANDLE(tu_device, device, _device);
2075
2076 struct tu_semaphore *sem =
2077 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
2078 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2079 if (!sem)
2080 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2081
2082 const VkExportSemaphoreCreateInfo *export =
2083 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
2084 VkExternalSemaphoreHandleTypeFlags handleTypes =
2085 export ? export->handleTypes : 0;
2086
2087 sem->permanent.kind = TU_SEMAPHORE_NONE;
2088 sem->temporary.kind = TU_SEMAPHORE_NONE;
2089
2090 if (handleTypes) {
2091 if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
2092 vk_free2(&device->alloc, pAllocator, sem);
2093 return VK_ERROR_OUT_OF_HOST_MEMORY;
2094 }
2095 sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
2096 }
2097 *pSemaphore = tu_semaphore_to_handle(sem);
2098 return VK_SUCCESS;
2099 }
2100
2101 void
2102 tu_DestroySemaphore(VkDevice _device,
2103 VkSemaphore _semaphore,
2104 const VkAllocationCallbacks *pAllocator)
2105 {
2106 TU_FROM_HANDLE(tu_device, device, _device);
2107 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
2108 if (!_semaphore)
2109 return;
2110
2111 tu_semaphore_part_destroy(device, &sem->permanent);
2112 tu_semaphore_part_destroy(device, &sem->temporary);
2113
2114 vk_free2(&device->alloc, pAllocator, sem);
2115 }
2116
2117 VkResult
2118 tu_CreateEvent(VkDevice _device,
2119 const VkEventCreateInfo *pCreateInfo,
2120 const VkAllocationCallbacks *pAllocator,
2121 VkEvent *pEvent)
2122 {
2123 TU_FROM_HANDLE(tu_device, device, _device);
2124 struct tu_event *event =
2125 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
2126 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2127
2128 if (!event)
2129 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2130
2131 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2132 if (result != VK_SUCCESS)
2133 goto fail_alloc;
2134
2135 result = tu_bo_map(device, &event->bo);
2136 if (result != VK_SUCCESS)
2137 goto fail_map;
2138
2139 *pEvent = tu_event_to_handle(event);
2140
2141 return VK_SUCCESS;
2142
2143 fail_map:
2144 tu_bo_finish(device, &event->bo);
2145 fail_alloc:
2146 vk_free2(&device->alloc, pAllocator, event);
2147 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2148 }
2149
2150 void
2151 tu_DestroyEvent(VkDevice _device,
2152 VkEvent _event,
2153 const VkAllocationCallbacks *pAllocator)
2154 {
2155 TU_FROM_HANDLE(tu_device, device, _device);
2156 TU_FROM_HANDLE(tu_event, event, _event);
2157
2158 if (!event)
2159 return;
2160
2161 tu_bo_finish(device, &event->bo);
2162 vk_free2(&device->alloc, pAllocator, event);
2163 }
2164
2165 VkResult
2166 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2167 {
2168 TU_FROM_HANDLE(tu_event, event, _event);
2169
2170 if (*(uint64_t*) event->bo.map == 1)
2171 return VK_EVENT_SET;
2172 return VK_EVENT_RESET;
2173 }
2174
2175 VkResult
2176 tu_SetEvent(VkDevice _device, VkEvent _event)
2177 {
2178 TU_FROM_HANDLE(tu_event, event, _event);
2179 *(uint64_t*) event->bo.map = 1;
2180
2181 return VK_SUCCESS;
2182 }
2183
2184 VkResult
2185 tu_ResetEvent(VkDevice _device, VkEvent _event)
2186 {
2187 TU_FROM_HANDLE(tu_event, event, _event);
2188 *(uint64_t*) event->bo.map = 0;
2189
2190 return VK_SUCCESS;
2191 }
2192
2193 VkResult
2194 tu_CreateBuffer(VkDevice _device,
2195 const VkBufferCreateInfo *pCreateInfo,
2196 const VkAllocationCallbacks *pAllocator,
2197 VkBuffer *pBuffer)
2198 {
2199 TU_FROM_HANDLE(tu_device, device, _device);
2200 struct tu_buffer *buffer;
2201
2202 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2203
2204 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
2205 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2206 if (buffer == NULL)
2207 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2208
2209 buffer->size = pCreateInfo->size;
2210 buffer->usage = pCreateInfo->usage;
2211 buffer->flags = pCreateInfo->flags;
2212
2213 *pBuffer = tu_buffer_to_handle(buffer);
2214
2215 return VK_SUCCESS;
2216 }
2217
2218 void
2219 tu_DestroyBuffer(VkDevice _device,
2220 VkBuffer _buffer,
2221 const VkAllocationCallbacks *pAllocator)
2222 {
2223 TU_FROM_HANDLE(tu_device, device, _device);
2224 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2225
2226 if (!buffer)
2227 return;
2228
2229 vk_free2(&device->alloc, pAllocator, buffer);
2230 }
2231
2232 VkResult
2233 tu_CreateFramebuffer(VkDevice _device,
2234 const VkFramebufferCreateInfo *pCreateInfo,
2235 const VkAllocationCallbacks *pAllocator,
2236 VkFramebuffer *pFramebuffer)
2237 {
2238 TU_FROM_HANDLE(tu_device, device, _device);
2239 struct tu_framebuffer *framebuffer;
2240
2241 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2242
2243 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2244 pCreateInfo->attachmentCount;
2245 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2246 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2247 if (framebuffer == NULL)
2248 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2249
2250 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2251 framebuffer->width = pCreateInfo->width;
2252 framebuffer->height = pCreateInfo->height;
2253 framebuffer->layers = pCreateInfo->layers;
2254 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2255 VkImageView _iview = pCreateInfo->pAttachments[i];
2256 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2257 framebuffer->attachments[i].attachment = iview;
2258 }
2259
2260 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2261 return VK_SUCCESS;
2262 }
2263
2264 void
2265 tu_DestroyFramebuffer(VkDevice _device,
2266 VkFramebuffer _fb,
2267 const VkAllocationCallbacks *pAllocator)
2268 {
2269 TU_FROM_HANDLE(tu_device, device, _device);
2270 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2271
2272 if (!fb)
2273 return;
2274 vk_free2(&device->alloc, pAllocator, fb);
2275 }
2276
2277 static void
2278 tu_init_sampler(struct tu_device *device,
2279 struct tu_sampler *sampler,
2280 const VkSamplerCreateInfo *pCreateInfo)
2281 {
2282 const struct VkSamplerReductionModeCreateInfo *reduction =
2283 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2284 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2285 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2286
2287 unsigned aniso = pCreateInfo->anisotropyEnable ?
2288 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2289 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2290 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2291 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2292
2293 sampler->descriptor[0] =
2294 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2295 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2296 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2297 A6XX_TEX_SAMP_0_ANISO(aniso) |
2298 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2299 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2300 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2301 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2302 sampler->descriptor[1] =
2303 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2304 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2305 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2306 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2307 COND(pCreateInfo->compareEnable,
2308 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2309 /* This is an offset into the border_color BO, which we fill with all the
2310 * possible Vulkan border colors in the correct order, so we can just use
2311 * the Vulkan enum with no translation necessary.
2312 */
2313 sampler->descriptor[2] =
2314 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2315 sizeof(struct bcolor_entry));
2316 sampler->descriptor[3] = 0;
2317
2318 if (reduction) {
2319 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2320 tu6_reduction_mode(reduction->reductionMode));
2321 }
2322
2323 sampler->ycbcr_sampler = ycbcr_conversion ?
2324 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2325
2326 if (sampler->ycbcr_sampler &&
2327 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2328 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2329 }
2330
2331 /* TODO:
2332 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2333 */
2334 }
2335
2336 VkResult
2337 tu_CreateSampler(VkDevice _device,
2338 const VkSamplerCreateInfo *pCreateInfo,
2339 const VkAllocationCallbacks *pAllocator,
2340 VkSampler *pSampler)
2341 {
2342 TU_FROM_HANDLE(tu_device, device, _device);
2343 struct tu_sampler *sampler;
2344
2345 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2346
2347 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2348 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2349 if (!sampler)
2350 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2351
2352 tu_init_sampler(device, sampler, pCreateInfo);
2353 *pSampler = tu_sampler_to_handle(sampler);
2354
2355 return VK_SUCCESS;
2356 }
2357
2358 void
2359 tu_DestroySampler(VkDevice _device,
2360 VkSampler _sampler,
2361 const VkAllocationCallbacks *pAllocator)
2362 {
2363 TU_FROM_HANDLE(tu_device, device, _device);
2364 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2365
2366 if (!sampler)
2367 return;
2368 vk_free2(&device->alloc, pAllocator, sampler);
2369 }
2370
2371 /* vk_icd.h does not declare this function, so we declare it here to
2372 * suppress Wmissing-prototypes.
2373 */
2374 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2375 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2376
2377 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2378 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2379 {
2380 /* For the full details on loader interface versioning, see
2381 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2382 * What follows is a condensed summary, to help you navigate the large and
2383 * confusing official doc.
2384 *
2385 * - Loader interface v0 is incompatible with later versions. We don't
2386 * support it.
2387 *
2388 * - In loader interface v1:
2389 * - The first ICD entrypoint called by the loader is
2390 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2391 * entrypoint.
2392 * - The ICD must statically expose no other Vulkan symbol unless it
2393 * is linked with -Bsymbolic.
2394 * - Each dispatchable Vulkan handle created by the ICD must be
2395 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2396 * ICD must initialize VK_LOADER_DATA.loadMagic to
2397 * ICD_LOADER_MAGIC.
2398 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2399 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2400 * such loader-managed surfaces.
2401 *
2402 * - Loader interface v2 differs from v1 in:
2403 * - The first ICD entrypoint called by the loader is
2404 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2405 * statically expose this entrypoint.
2406 *
2407 * - Loader interface v3 differs from v2 in:
2408 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2409 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2410 * because the loader no longer does so.
2411 */
2412 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2413 return VK_SUCCESS;
2414 }
2415
2416 VkResult
2417 tu_GetMemoryFdKHR(VkDevice _device,
2418 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2419 int *pFd)
2420 {
2421 TU_FROM_HANDLE(tu_device, device, _device);
2422 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2423
2424 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2425
2426 /* At the moment, we support only the below handle types. */
2427 assert(pGetFdInfo->handleType ==
2428 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2429 pGetFdInfo->handleType ==
2430 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2431
2432 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2433 if (prime_fd < 0)
2434 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2435
2436 *pFd = prime_fd;
2437 return VK_SUCCESS;
2438 }
2439
2440 VkResult
2441 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2442 VkExternalMemoryHandleTypeFlagBits handleType,
2443 int fd,
2444 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2445 {
2446 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2447 pMemoryFdProperties->memoryTypeBits = 1;
2448 return VK_SUCCESS;
2449 }
2450
2451 VkResult
2452 tu_ImportSemaphoreFdKHR(VkDevice _device,
2453 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
2454 {
2455 TU_FROM_HANDLE(tu_device, device, _device);
2456 TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
2457 int ret;
2458 struct tu_semaphore_part *dst = NULL;
2459
2460 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2461 dst = &sem->temporary;
2462 } else {
2463 dst = &sem->permanent;
2464 }
2465
2466 uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
2467
2468 switch(pImportSemaphoreFdInfo->handleType) {
2469 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
2470 uint32_t old_syncobj = syncobj;
2471 ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
2472 if (ret == 0) {
2473 close(pImportSemaphoreFdInfo->fd);
2474 if (old_syncobj)
2475 drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
2476 }
2477 break;
2478 }
2479 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
2480 if (!syncobj) {
2481 ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
2482 if (ret)
2483 break;
2484 }
2485 if (pImportSemaphoreFdInfo->fd == -1) {
2486 ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
2487 } else {
2488 ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
2489 }
2490 if (!ret)
2491 close(pImportSemaphoreFdInfo->fd);
2492 break;
2493 }
2494 default:
2495 unreachable("Unhandled semaphore handle type");
2496 }
2497
2498 if (ret) {
2499 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2500 }
2501 dst->syncobj = syncobj;
2502 dst->kind = TU_SEMAPHORE_SYNCOBJ;
2503
2504 return VK_SUCCESS;
2505 }
2506
2507 VkResult
2508 tu_GetSemaphoreFdKHR(VkDevice _device,
2509 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
2510 int *pFd)
2511 {
2512 TU_FROM_HANDLE(tu_device, device, _device);
2513 TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
2514 int ret;
2515 uint32_t syncobj_handle;
2516
2517 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2518 assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
2519 syncobj_handle = sem->temporary.syncobj;
2520 } else {
2521 assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
2522 syncobj_handle = sem->permanent.syncobj;
2523 }
2524
2525 switch(pGetFdInfo->handleType) {
2526 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
2527 ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
2528 break;
2529 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2530 ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
2531 if (!ret) {
2532 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2533 tu_semaphore_part_destroy(device, &sem->temporary);
2534 } else {
2535 drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
2536 }
2537 }
2538 break;
2539 default:
2540 unreachable("Unhandled semaphore handle type");
2541 }
2542
2543 if (ret)
2544 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
2545 return VK_SUCCESS;
2546 }
2547
2548
2549 static bool tu_has_syncobj(struct tu_physical_device *pdev)
2550 {
2551 uint64_t value;
2552 if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
2553 return false;
2554 return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
2555 }
2556
2557 void
2558 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2559 VkPhysicalDevice physicalDevice,
2560 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2561 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2562 {
2563 TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
2564
2565 if (tu_has_syncobj(pdev) &&
2566 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
2567 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
2568 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2569 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2570 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
2571 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
2572 } else {
2573 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2574 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2575 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2576 }
2577 }
2578
2579 void
2580 tu_GetPhysicalDeviceExternalFenceProperties(
2581 VkPhysicalDevice physicalDevice,
2582 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2583 VkExternalFenceProperties *pExternalFenceProperties)
2584 {
2585 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2586 pExternalFenceProperties->compatibleHandleTypes = 0;
2587 pExternalFenceProperties->externalFenceFeatures = 0;
2588 }
2589
2590 VkResult
2591 tu_CreateDebugReportCallbackEXT(
2592 VkInstance _instance,
2593 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2594 const VkAllocationCallbacks *pAllocator,
2595 VkDebugReportCallbackEXT *pCallback)
2596 {
2597 TU_FROM_HANDLE(tu_instance, instance, _instance);
2598 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2599 pCreateInfo, pAllocator,
2600 &instance->alloc, pCallback);
2601 }
2602
2603 void
2604 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2605 VkDebugReportCallbackEXT _callback,
2606 const VkAllocationCallbacks *pAllocator)
2607 {
2608 TU_FROM_HANDLE(tu_instance, instance, _instance);
2609 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2610 _callback, pAllocator, &instance->alloc);
2611 }
2612
2613 void
2614 tu_DebugReportMessageEXT(VkInstance _instance,
2615 VkDebugReportFlagsEXT flags,
2616 VkDebugReportObjectTypeEXT objectType,
2617 uint64_t object,
2618 size_t location,
2619 int32_t messageCode,
2620 const char *pLayerPrefix,
2621 const char *pMessage)
2622 {
2623 TU_FROM_HANDLE(tu_instance, instance, _instance);
2624 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2625 object, location, messageCode, pLayerPrefix, pMessage);
2626 }
2627
2628 void
2629 tu_GetDeviceGroupPeerMemoryFeatures(
2630 VkDevice device,
2631 uint32_t heapIndex,
2632 uint32_t localDeviceIndex,
2633 uint32_t remoteDeviceIndex,
2634 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2635 {
2636 assert(localDeviceIndex == remoteDeviceIndex);
2637
2638 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2639 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2640 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2641 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2642 }
2643
2644 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2645 VkPhysicalDevice physicalDevice,
2646 VkSampleCountFlagBits samples,
2647 VkMultisamplePropertiesEXT* pMultisampleProperties)
2648 {
2649 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2650
2651 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2652 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2653 else
2654 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2655 }