turnip: Add a618 support
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 memset(device->name, 0, sizeof(device->name));
256 sprintf(device->name, "FD%d", device->gpu_id);
257
258 switch (device->gpu_id) {
259 case 618:
260 device->tile_align_w = 64;
261 device->tile_align_h = 16;
262 device->magic.RB_UNKNOWN_8E04_blit = 0x00100000;
263 device->magic.RB_CCU_CNTL_gmem = 0x3e400004;
264 device->magic.PC_UNKNOWN_9805 = 0x0;
265 device->magic.SP_UNKNOWN_A0F8 = 0x0;
266 break;
267 case 630:
268 case 640:
269 device->tile_align_w = 64;
270 device->tile_align_h = 16;
271 device->magic.RB_UNKNOWN_8E04_blit = 0x01000000;
272 device->magic.RB_CCU_CNTL_gmem = 0x7c400004;
273 device->magic.PC_UNKNOWN_9805 = 0x1;
274 device->magic.SP_UNKNOWN_A0F8 = 0x1;
275 break;
276 default:
277 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
278 "device %s is unsupported", device->name);
279 goto fail;
280 }
281 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
282 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
283 "cannot generate UUID");
284 goto fail;
285 }
286
287 /* The gpu id is already embedded in the uuid so we just pass "tu"
288 * when creating the cache.
289 */
290 char buf[VK_UUID_SIZE * 2 + 1];
291 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
292 device->disk_cache = disk_cache_create(device->name, buf, 0);
293
294 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
295 "testing use only.\n");
296
297 tu_get_driver_uuid(&device->device_uuid);
298 tu_get_device_uuid(&device->device_uuid);
299
300 tu_fill_device_extension_table(device, &device->supported_extensions);
301
302 if (result != VK_SUCCESS) {
303 vk_error(instance, result);
304 goto fail;
305 }
306
307 result = tu_wsi_init(device);
308 if (result != VK_SUCCESS) {
309 vk_error(instance, result);
310 goto fail;
311 }
312
313 return VK_SUCCESS;
314
315 fail:
316 close(fd);
317 if (master_fd != -1)
318 close(master_fd);
319 return result;
320 }
321
322 static void
323 tu_physical_device_finish(struct tu_physical_device *device)
324 {
325 tu_wsi_finish(device);
326
327 disk_cache_destroy(device->disk_cache);
328 close(device->local_fd);
329 if (device->master_fd != -1)
330 close(device->master_fd);
331 }
332
333 static void *
334 default_alloc_func(void *pUserData,
335 size_t size,
336 size_t align,
337 VkSystemAllocationScope allocationScope)
338 {
339 return malloc(size);
340 }
341
342 static void *
343 default_realloc_func(void *pUserData,
344 void *pOriginal,
345 size_t size,
346 size_t align,
347 VkSystemAllocationScope allocationScope)
348 {
349 return realloc(pOriginal, size);
350 }
351
352 static void
353 default_free_func(void *pUserData, void *pMemory)
354 {
355 free(pMemory);
356 }
357
358 static const VkAllocationCallbacks default_alloc = {
359 .pUserData = NULL,
360 .pfnAllocation = default_alloc_func,
361 .pfnReallocation = default_realloc_func,
362 .pfnFree = default_free_func,
363 };
364
365 static const struct debug_control tu_debug_options[] = {
366 { "startup", TU_DEBUG_STARTUP },
367 { "nir", TU_DEBUG_NIR },
368 { "ir3", TU_DEBUG_IR3 },
369 { "nobin", TU_DEBUG_NOBIN },
370 { NULL, 0 }
371 };
372
373 const char *
374 tu_get_debug_option_name(int id)
375 {
376 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
377 return tu_debug_options[id].string;
378 }
379
380 static int
381 tu_get_instance_extension_index(const char *name)
382 {
383 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
384 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
385 return i;
386 }
387 return -1;
388 }
389
390 VkResult
391 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
392 const VkAllocationCallbacks *pAllocator,
393 VkInstance *pInstance)
394 {
395 struct tu_instance *instance;
396 VkResult result;
397
398 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
399
400 uint32_t client_version;
401 if (pCreateInfo->pApplicationInfo &&
402 pCreateInfo->pApplicationInfo->apiVersion != 0) {
403 client_version = pCreateInfo->pApplicationInfo->apiVersion;
404 } else {
405 tu_EnumerateInstanceVersion(&client_version);
406 }
407
408 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
409 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
410 if (!instance)
411 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
412
413 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
414
415 if (pAllocator)
416 instance->alloc = *pAllocator;
417 else
418 instance->alloc = default_alloc;
419
420 instance->api_version = client_version;
421 instance->physical_device_count = -1;
422
423 instance->debug_flags =
424 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
425
426 if (instance->debug_flags & TU_DEBUG_STARTUP)
427 tu_logi("Created an instance");
428
429 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
430 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
431 int index = tu_get_instance_extension_index(ext_name);
432
433 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
434 vk_free2(&default_alloc, pAllocator, instance);
435 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
436 }
437
438 instance->enabled_extensions.extensions[index] = true;
439 }
440
441 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
442 if (result != VK_SUCCESS) {
443 vk_free2(&default_alloc, pAllocator, instance);
444 return vk_error(instance, result);
445 }
446
447 glsl_type_singleton_init_or_ref();
448
449 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
450
451 *pInstance = tu_instance_to_handle(instance);
452
453 return VK_SUCCESS;
454 }
455
456 void
457 tu_DestroyInstance(VkInstance _instance,
458 const VkAllocationCallbacks *pAllocator)
459 {
460 TU_FROM_HANDLE(tu_instance, instance, _instance);
461
462 if (!instance)
463 return;
464
465 for (int i = 0; i < instance->physical_device_count; ++i) {
466 tu_physical_device_finish(instance->physical_devices + i);
467 }
468
469 VG(VALGRIND_DESTROY_MEMPOOL(instance));
470
471 glsl_type_singleton_decref();
472
473 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
474
475 vk_free(&instance->alloc, instance);
476 }
477
478 static VkResult
479 tu_enumerate_devices(struct tu_instance *instance)
480 {
481 /* TODO: Check for more devices ? */
482 drmDevicePtr devices[8];
483 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
484 int max_devices;
485
486 instance->physical_device_count = 0;
487
488 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
489
490 if (instance->debug_flags & TU_DEBUG_STARTUP)
491 tu_logi("Found %d drm nodes", max_devices);
492
493 if (max_devices < 1)
494 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
495
496 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
497 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
498 devices[i]->bustype == DRM_BUS_PLATFORM) {
499
500 result = tu_physical_device_init(
501 instance->physical_devices + instance->physical_device_count,
502 instance, devices[i]);
503 if (result == VK_SUCCESS)
504 ++instance->physical_device_count;
505 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
506 break;
507 }
508 }
509 drmFreeDevices(devices, max_devices);
510
511 return result;
512 }
513
514 VkResult
515 tu_EnumeratePhysicalDevices(VkInstance _instance,
516 uint32_t *pPhysicalDeviceCount,
517 VkPhysicalDevice *pPhysicalDevices)
518 {
519 TU_FROM_HANDLE(tu_instance, instance, _instance);
520 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
521
522 VkResult result;
523
524 if (instance->physical_device_count < 0) {
525 result = tu_enumerate_devices(instance);
526 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
527 return result;
528 }
529
530 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
531 vk_outarray_append(&out, p)
532 {
533 *p = tu_physical_device_to_handle(instance->physical_devices + i);
534 }
535 }
536
537 return vk_outarray_status(&out);
538 }
539
540 VkResult
541 tu_EnumeratePhysicalDeviceGroups(
542 VkInstance _instance,
543 uint32_t *pPhysicalDeviceGroupCount,
544 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
545 {
546 TU_FROM_HANDLE(tu_instance, instance, _instance);
547 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
548 pPhysicalDeviceGroupCount);
549 VkResult result;
550
551 if (instance->physical_device_count < 0) {
552 result = tu_enumerate_devices(instance);
553 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
554 return result;
555 }
556
557 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
558 vk_outarray_append(&out, p)
559 {
560 p->physicalDeviceCount = 1;
561 p->physicalDevices[0] =
562 tu_physical_device_to_handle(instance->physical_devices + i);
563 p->subsetAllocation = false;
564 }
565 }
566
567 return vk_outarray_status(&out);
568 }
569
570 void
571 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
572 VkPhysicalDeviceFeatures *pFeatures)
573 {
574 memset(pFeatures, 0, sizeof(*pFeatures));
575
576 *pFeatures = (VkPhysicalDeviceFeatures) {
577 .robustBufferAccess = false,
578 .fullDrawIndexUint32 = false,
579 .imageCubeArray = false,
580 .independentBlend = false,
581 .geometryShader = false,
582 .tessellationShader = false,
583 .sampleRateShading = false,
584 .dualSrcBlend = false,
585 .logicOp = false,
586 .multiDrawIndirect = false,
587 .drawIndirectFirstInstance = false,
588 .depthClamp = false,
589 .depthBiasClamp = false,
590 .fillModeNonSolid = false,
591 .depthBounds = false,
592 .wideLines = false,
593 .largePoints = false,
594 .alphaToOne = false,
595 .multiViewport = false,
596 .samplerAnisotropy = true,
597 .textureCompressionETC2 = true,
598 .textureCompressionASTC_LDR = true,
599 .textureCompressionBC = true,
600 .occlusionQueryPrecise = true,
601 .pipelineStatisticsQuery = false,
602 .vertexPipelineStoresAndAtomics = false,
603 .fragmentStoresAndAtomics = false,
604 .shaderTessellationAndGeometryPointSize = false,
605 .shaderImageGatherExtended = false,
606 .shaderStorageImageExtendedFormats = false,
607 .shaderStorageImageMultisample = false,
608 .shaderUniformBufferArrayDynamicIndexing = false,
609 .shaderSampledImageArrayDynamicIndexing = false,
610 .shaderStorageBufferArrayDynamicIndexing = false,
611 .shaderStorageImageArrayDynamicIndexing = false,
612 .shaderStorageImageReadWithoutFormat = false,
613 .shaderStorageImageWriteWithoutFormat = false,
614 .shaderClipDistance = false,
615 .shaderCullDistance = false,
616 .shaderFloat64 = false,
617 .shaderInt64 = false,
618 .shaderInt16 = false,
619 .sparseBinding = false,
620 .variableMultisampleRate = false,
621 .inheritedQueries = false,
622 };
623 }
624
625 void
626 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
627 VkPhysicalDeviceFeatures2 *pFeatures)
628 {
629 vk_foreach_struct(ext, pFeatures->pNext)
630 {
631 switch (ext->sType) {
632 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
633 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
634 features->variablePointersStorageBuffer = false;
635 features->variablePointers = false;
636 break;
637 }
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
639 VkPhysicalDeviceMultiviewFeatures *features =
640 (VkPhysicalDeviceMultiviewFeatures *) ext;
641 features->multiview = false;
642 features->multiviewGeometryShader = false;
643 features->multiviewTessellationShader = false;
644 break;
645 }
646 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
647 VkPhysicalDeviceShaderDrawParametersFeatures *features =
648 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
649 features->shaderDrawParameters = false;
650 break;
651 }
652 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
653 VkPhysicalDeviceProtectedMemoryFeatures *features =
654 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
655 features->protectedMemory = false;
656 break;
657 }
658 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
659 VkPhysicalDevice16BitStorageFeatures *features =
660 (VkPhysicalDevice16BitStorageFeatures *) ext;
661 features->storageBuffer16BitAccess = false;
662 features->uniformAndStorageBuffer16BitAccess = false;
663 features->storagePushConstant16 = false;
664 features->storageInputOutput16 = false;
665 break;
666 }
667 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
668 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
669 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
670 features->samplerYcbcrConversion = false;
671 break;
672 }
673 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
674 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
675 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
676 features->shaderInputAttachmentArrayDynamicIndexing = false;
677 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
678 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
679 features->shaderUniformBufferArrayNonUniformIndexing = false;
680 features->shaderSampledImageArrayNonUniformIndexing = false;
681 features->shaderStorageBufferArrayNonUniformIndexing = false;
682 features->shaderStorageImageArrayNonUniformIndexing = false;
683 features->shaderInputAttachmentArrayNonUniformIndexing = false;
684 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
685 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
686 features->descriptorBindingUniformBufferUpdateAfterBind = false;
687 features->descriptorBindingSampledImageUpdateAfterBind = false;
688 features->descriptorBindingStorageImageUpdateAfterBind = false;
689 features->descriptorBindingStorageBufferUpdateAfterBind = false;
690 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
691 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
692 features->descriptorBindingUpdateUnusedWhilePending = false;
693 features->descriptorBindingPartiallyBound = false;
694 features->descriptorBindingVariableDescriptorCount = false;
695 features->runtimeDescriptorArray = false;
696 break;
697 }
698 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
699 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
700 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
701 features->conditionalRendering = false;
702 features->inheritedConditionalRendering = false;
703 break;
704 }
705 default:
706 break;
707 }
708 }
709 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
710 }
711
712 void
713 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
714 VkPhysicalDeviceProperties *pProperties)
715 {
716 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
717 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT |
718 VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
719
720 /* make sure that the entire descriptor set is addressable with a signed
721 * 32-bit int. So the sum of all limits scaled by descriptor size has to
722 * be at most 2 GiB. the combined image & samples object count as one of
723 * both. This limit is for the pipeline layout, not for the set layout, but
724 * there is no set limit, so we just set a pipeline limit. I don't think
725 * any app is going to hit this soon. */
726 size_t max_descriptor_set_size =
727 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
728 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
729 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
730 32 /* sampler, largest when combined with image */ +
731 64 /* sampled image */ + 64 /* storage image */);
732
733 VkPhysicalDeviceLimits limits = {
734 .maxImageDimension1D = (1 << 14),
735 .maxImageDimension2D = (1 << 14),
736 .maxImageDimension3D = (1 << 11),
737 .maxImageDimensionCube = (1 << 14),
738 .maxImageArrayLayers = (1 << 11),
739 .maxTexelBufferElements = 128 * 1024 * 1024,
740 .maxUniformBufferRange = UINT32_MAX,
741 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
742 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
743 .maxMemoryAllocationCount = UINT32_MAX,
744 .maxSamplerAllocationCount = 64 * 1024,
745 .bufferImageGranularity = 64, /* A cache line */
746 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
747 .maxBoundDescriptorSets = MAX_SETS,
748 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
749 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
750 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
751 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
752 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
753 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
754 .maxPerStageResources = max_descriptor_set_size,
755 .maxDescriptorSetSamplers = max_descriptor_set_size,
756 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
757 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
758 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
759 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
760 .maxDescriptorSetSampledImages = max_descriptor_set_size,
761 .maxDescriptorSetStorageImages = max_descriptor_set_size,
762 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
763 .maxVertexInputAttributes = 32,
764 .maxVertexInputBindings = 32,
765 .maxVertexInputAttributeOffset = 2047,
766 .maxVertexInputBindingStride = 2048,
767 .maxVertexOutputComponents = 128,
768 .maxTessellationGenerationLevel = 64,
769 .maxTessellationPatchSize = 32,
770 .maxTessellationControlPerVertexInputComponents = 128,
771 .maxTessellationControlPerVertexOutputComponents = 128,
772 .maxTessellationControlPerPatchOutputComponents = 120,
773 .maxTessellationControlTotalOutputComponents = 4096,
774 .maxTessellationEvaluationInputComponents = 128,
775 .maxTessellationEvaluationOutputComponents = 128,
776 .maxGeometryShaderInvocations = 127,
777 .maxGeometryInputComponents = 64,
778 .maxGeometryOutputComponents = 128,
779 .maxGeometryOutputVertices = 256,
780 .maxGeometryTotalOutputComponents = 1024,
781 .maxFragmentInputComponents = 128,
782 .maxFragmentOutputAttachments = 8,
783 .maxFragmentDualSrcAttachments = 1,
784 .maxFragmentCombinedOutputResources = 8,
785 .maxComputeSharedMemorySize = 32768,
786 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
787 .maxComputeWorkGroupInvocations = 2048,
788 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
789 .subPixelPrecisionBits = 4 /* FIXME */,
790 .subTexelPrecisionBits = 4 /* FIXME */,
791 .mipmapPrecisionBits = 4 /* FIXME */,
792 .maxDrawIndexedIndexValue = UINT32_MAX,
793 .maxDrawIndirectCount = UINT32_MAX,
794 .maxSamplerLodBias = 16,
795 .maxSamplerAnisotropy = 16,
796 .maxViewports = MAX_VIEWPORTS,
797 .maxViewportDimensions = { (1 << 14), (1 << 14) },
798 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
799 .viewportSubPixelBits = 8,
800 .minMemoryMapAlignment = 4096, /* A page */
801 .minTexelBufferOffsetAlignment = 64,
802 .minUniformBufferOffsetAlignment = 4,
803 .minStorageBufferOffsetAlignment = 4,
804 .minTexelOffset = -32,
805 .maxTexelOffset = 31,
806 .minTexelGatherOffset = -32,
807 .maxTexelGatherOffset = 31,
808 .minInterpolationOffset = -2,
809 .maxInterpolationOffset = 2,
810 .subPixelInterpolationOffsetBits = 8,
811 .maxFramebufferWidth = (1 << 14),
812 .maxFramebufferHeight = (1 << 14),
813 .maxFramebufferLayers = (1 << 10),
814 .framebufferColorSampleCounts = sample_counts,
815 .framebufferDepthSampleCounts = sample_counts,
816 .framebufferStencilSampleCounts = sample_counts,
817 .framebufferNoAttachmentsSampleCounts = sample_counts,
818 .maxColorAttachments = MAX_RTS,
819 .sampledImageColorSampleCounts = sample_counts,
820 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
821 .sampledImageDepthSampleCounts = sample_counts,
822 .sampledImageStencilSampleCounts = sample_counts,
823 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
824 .maxSampleMaskWords = 1,
825 .timestampComputeAndGraphics = false, /* FINISHME */
826 .timestampPeriod = 1,
827 .maxClipDistances = 8,
828 .maxCullDistances = 8,
829 .maxCombinedClipAndCullDistances = 8,
830 .discreteQueuePriorities = 1,
831 .pointSizeRange = { 0.125, 255.875 },
832 .lineWidthRange = { 0.0, 7.9921875 },
833 .pointSizeGranularity = (1.0 / 8.0),
834 .lineWidthGranularity = (1.0 / 128.0),
835 .strictLines = false, /* FINISHME */
836 .standardSampleLocations = true,
837 .optimalBufferCopyOffsetAlignment = 128,
838 .optimalBufferCopyRowPitchAlignment = 128,
839 .nonCoherentAtomSize = 64,
840 };
841
842 *pProperties = (VkPhysicalDeviceProperties) {
843 .apiVersion = tu_physical_device_api_version(pdevice),
844 .driverVersion = vk_get_driver_version(),
845 .vendorID = 0, /* TODO */
846 .deviceID = 0,
847 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
848 .limits = limits,
849 .sparseProperties = { 0 },
850 };
851
852 strcpy(pProperties->deviceName, pdevice->name);
853 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
854 }
855
856 void
857 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
858 VkPhysicalDeviceProperties2 *pProperties)
859 {
860 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
861 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
862
863 vk_foreach_struct(ext, pProperties->pNext)
864 {
865 switch (ext->sType) {
866 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
867 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
868 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
869 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
870 break;
871 }
872 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
873 VkPhysicalDeviceIDProperties *properties =
874 (VkPhysicalDeviceIDProperties *) ext;
875 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
876 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
877 properties->deviceLUIDValid = false;
878 break;
879 }
880 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
881 VkPhysicalDeviceMultiviewProperties *properties =
882 (VkPhysicalDeviceMultiviewProperties *) ext;
883 properties->maxMultiviewViewCount = MAX_VIEWS;
884 properties->maxMultiviewInstanceIndex = INT_MAX;
885 break;
886 }
887 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
888 VkPhysicalDevicePointClippingProperties *properties =
889 (VkPhysicalDevicePointClippingProperties *) ext;
890 properties->pointClippingBehavior =
891 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
892 break;
893 }
894 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
895 VkPhysicalDeviceMaintenance3Properties *properties =
896 (VkPhysicalDeviceMaintenance3Properties *) ext;
897 /* Make sure everything is addressable by a signed 32-bit int, and
898 * our largest descriptors are 96 bytes. */
899 properties->maxPerSetDescriptors = (1ull << 31) / 96;
900 /* Our buffer size fields allow only this much */
901 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
902 break;
903 }
904 default:
905 break;
906 }
907 }
908 }
909
910 static const VkQueueFamilyProperties tu_queue_family_properties = {
911 .queueFlags =
912 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
913 .queueCount = 1,
914 .timestampValidBits = 0, /* FINISHME */
915 .minImageTransferGranularity = { 1, 1, 1 },
916 };
917
918 void
919 tu_GetPhysicalDeviceQueueFamilyProperties(
920 VkPhysicalDevice physicalDevice,
921 uint32_t *pQueueFamilyPropertyCount,
922 VkQueueFamilyProperties *pQueueFamilyProperties)
923 {
924 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
925
926 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
927 }
928
929 void
930 tu_GetPhysicalDeviceQueueFamilyProperties2(
931 VkPhysicalDevice physicalDevice,
932 uint32_t *pQueueFamilyPropertyCount,
933 VkQueueFamilyProperties2 *pQueueFamilyProperties)
934 {
935 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
936
937 vk_outarray_append(&out, p)
938 {
939 p->queueFamilyProperties = tu_queue_family_properties;
940 }
941 }
942
943 static uint64_t
944 tu_get_system_heap_size()
945 {
946 struct sysinfo info;
947 sysinfo(&info);
948
949 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
950
951 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
952 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
953 */
954 uint64_t available_ram;
955 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
956 available_ram = total_ram / 2;
957 else
958 available_ram = total_ram * 3 / 4;
959
960 return available_ram;
961 }
962
963 void
964 tu_GetPhysicalDeviceMemoryProperties(
965 VkPhysicalDevice physicalDevice,
966 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
967 {
968 pMemoryProperties->memoryHeapCount = 1;
969 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
970 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
971
972 pMemoryProperties->memoryTypeCount = 1;
973 pMemoryProperties->memoryTypes[0].propertyFlags =
974 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
975 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
976 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
977 pMemoryProperties->memoryTypes[0].heapIndex = 0;
978 }
979
980 void
981 tu_GetPhysicalDeviceMemoryProperties2(
982 VkPhysicalDevice physicalDevice,
983 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
984 {
985 return tu_GetPhysicalDeviceMemoryProperties(
986 physicalDevice, &pMemoryProperties->memoryProperties);
987 }
988
989 static VkResult
990 tu_queue_init(struct tu_device *device,
991 struct tu_queue *queue,
992 uint32_t queue_family_index,
993 int idx,
994 VkDeviceQueueCreateFlags flags)
995 {
996 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
997 queue->device = device;
998 queue->queue_family_index = queue_family_index;
999 queue->queue_idx = idx;
1000 queue->flags = flags;
1001
1002 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1003 if (ret)
1004 return VK_ERROR_INITIALIZATION_FAILED;
1005
1006 tu_fence_init(&queue->submit_fence, false);
1007
1008 return VK_SUCCESS;
1009 }
1010
1011 static void
1012 tu_queue_finish(struct tu_queue *queue)
1013 {
1014 tu_fence_finish(&queue->submit_fence);
1015 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1016 }
1017
1018 static int
1019 tu_get_device_extension_index(const char *name)
1020 {
1021 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1022 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1023 return i;
1024 }
1025 return -1;
1026 }
1027
1028 VkResult
1029 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1030 const VkDeviceCreateInfo *pCreateInfo,
1031 const VkAllocationCallbacks *pAllocator,
1032 VkDevice *pDevice)
1033 {
1034 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1035 VkResult result;
1036 struct tu_device *device;
1037
1038 /* Check enabled features */
1039 if (pCreateInfo->pEnabledFeatures) {
1040 VkPhysicalDeviceFeatures supported_features;
1041 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1042 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1043 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1044 unsigned num_features =
1045 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1046 for (uint32_t i = 0; i < num_features; i++) {
1047 if (enabled_feature[i] && !supported_feature[i])
1048 return vk_error(physical_device->instance,
1049 VK_ERROR_FEATURE_NOT_PRESENT);
1050 }
1051 }
1052
1053 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1054 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1055 if (!device)
1056 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1057
1058 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1059 device->instance = physical_device->instance;
1060 device->physical_device = physical_device;
1061
1062 if (pAllocator)
1063 device->alloc = *pAllocator;
1064 else
1065 device->alloc = physical_device->instance->alloc;
1066
1067 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1068 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1069 int index = tu_get_device_extension_index(ext_name);
1070 if (index < 0 ||
1071 !physical_device->supported_extensions.extensions[index]) {
1072 vk_free(&device->alloc, device);
1073 return vk_error(physical_device->instance,
1074 VK_ERROR_EXTENSION_NOT_PRESENT);
1075 }
1076
1077 device->enabled_extensions.extensions[index] = true;
1078 }
1079
1080 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1081 const VkDeviceQueueCreateInfo *queue_create =
1082 &pCreateInfo->pQueueCreateInfos[i];
1083 uint32_t qfi = queue_create->queueFamilyIndex;
1084 device->queues[qfi] = vk_alloc(
1085 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1086 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1087 if (!device->queues[qfi]) {
1088 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1089 goto fail_queues;
1090 }
1091
1092 memset(device->queues[qfi], 0,
1093 queue_create->queueCount * sizeof(struct tu_queue));
1094
1095 device->queue_count[qfi] = queue_create->queueCount;
1096
1097 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1098 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1099 queue_create->flags);
1100 if (result != VK_SUCCESS)
1101 goto fail_queues;
1102 }
1103 }
1104
1105 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1106 if (!device->compiler)
1107 goto fail_queues;
1108
1109 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1110 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
1111
1112 device->vsc_data_pitch = 0x440 * 4;
1113 device->vsc_data2_pitch = 0x1040 * 4;
1114
1115 result = tu_bo_init_new(device, &device->vsc_data, VSC_DATA_SIZE(device->vsc_data_pitch));
1116 if (result != VK_SUCCESS)
1117 goto fail_vsc_data;
1118
1119 result = tu_bo_init_new(device, &device->vsc_data2, VSC_DATA2_SIZE(device->vsc_data2_pitch));
1120 if (result != VK_SUCCESS)
1121 goto fail_vsc_data2;
1122
1123 VkPipelineCacheCreateInfo ci;
1124 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1125 ci.pNext = NULL;
1126 ci.flags = 0;
1127 ci.pInitialData = NULL;
1128 ci.initialDataSize = 0;
1129 VkPipelineCache pc;
1130 result =
1131 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1132 if (result != VK_SUCCESS)
1133 goto fail_pipeline_cache;
1134
1135 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1136
1137 *pDevice = tu_device_to_handle(device);
1138 return VK_SUCCESS;
1139
1140 fail_pipeline_cache:
1141 tu_bo_finish(device, &device->vsc_data2);
1142
1143 fail_vsc_data2:
1144 tu_bo_finish(device, &device->vsc_data);
1145
1146 fail_vsc_data:
1147 ralloc_free(device->compiler);
1148
1149 fail_queues:
1150 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1151 for (unsigned q = 0; q < device->queue_count[i]; q++)
1152 tu_queue_finish(&device->queues[i][q]);
1153 if (device->queue_count[i])
1154 vk_free(&device->alloc, device->queues[i]);
1155 }
1156
1157 vk_free(&device->alloc, device);
1158 return result;
1159 }
1160
1161 void
1162 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1163 {
1164 TU_FROM_HANDLE(tu_device, device, _device);
1165
1166 if (!device)
1167 return;
1168
1169 tu_bo_finish(device, &device->vsc_data);
1170 tu_bo_finish(device, &device->vsc_data2);
1171
1172 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1173 for (unsigned q = 0; q < device->queue_count[i]; q++)
1174 tu_queue_finish(&device->queues[i][q]);
1175 if (device->queue_count[i])
1176 vk_free(&device->alloc, device->queues[i]);
1177 }
1178
1179 /* the compiler does not use pAllocator */
1180 ralloc_free(device->compiler);
1181
1182 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1183 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1184
1185 vk_free(&device->alloc, device);
1186 }
1187
1188 VkResult
1189 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1190 VkLayerProperties *pProperties)
1191 {
1192 *pPropertyCount = 0;
1193 return VK_SUCCESS;
1194 }
1195
1196 VkResult
1197 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1198 uint32_t *pPropertyCount,
1199 VkLayerProperties *pProperties)
1200 {
1201 *pPropertyCount = 0;
1202 return VK_SUCCESS;
1203 }
1204
1205 void
1206 tu_GetDeviceQueue2(VkDevice _device,
1207 const VkDeviceQueueInfo2 *pQueueInfo,
1208 VkQueue *pQueue)
1209 {
1210 TU_FROM_HANDLE(tu_device, device, _device);
1211 struct tu_queue *queue;
1212
1213 queue =
1214 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1215 if (pQueueInfo->flags != queue->flags) {
1216 /* From the Vulkan 1.1.70 spec:
1217 *
1218 * "The queue returned by vkGetDeviceQueue2 must have the same
1219 * flags value from this structure as that used at device
1220 * creation time in a VkDeviceQueueCreateInfo instance. If no
1221 * matching flags were specified at device creation time then
1222 * pQueue will return VK_NULL_HANDLE."
1223 */
1224 *pQueue = VK_NULL_HANDLE;
1225 return;
1226 }
1227
1228 *pQueue = tu_queue_to_handle(queue);
1229 }
1230
1231 void
1232 tu_GetDeviceQueue(VkDevice _device,
1233 uint32_t queueFamilyIndex,
1234 uint32_t queueIndex,
1235 VkQueue *pQueue)
1236 {
1237 const VkDeviceQueueInfo2 info =
1238 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1239 .queueFamilyIndex = queueFamilyIndex,
1240 .queueIndex = queueIndex };
1241
1242 tu_GetDeviceQueue2(_device, &info, pQueue);
1243 }
1244
1245 VkResult
1246 tu_QueueSubmit(VkQueue _queue,
1247 uint32_t submitCount,
1248 const VkSubmitInfo *pSubmits,
1249 VkFence _fence)
1250 {
1251 TU_FROM_HANDLE(tu_queue, queue, _queue);
1252
1253 for (uint32_t i = 0; i < submitCount; ++i) {
1254 const VkSubmitInfo *submit = pSubmits + i;
1255 const bool last_submit = (i == submitCount - 1);
1256 struct tu_bo_list bo_list;
1257 tu_bo_list_init(&bo_list);
1258
1259 uint32_t entry_count = 0;
1260 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1261 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1262 entry_count += cmdbuf->cs.entry_count;
1263 }
1264
1265 struct drm_msm_gem_submit_cmd cmds[entry_count];
1266 uint32_t entry_idx = 0;
1267 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1268 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1269 struct tu_cs *cs = &cmdbuf->cs;
1270 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1271 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1272 cmds[entry_idx].submit_idx =
1273 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1274 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1275 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1276 cmds[entry_idx].size = cs->entries[i].size;
1277 cmds[entry_idx].pad = 0;
1278 cmds[entry_idx].nr_relocs = 0;
1279 cmds[entry_idx].relocs = 0;
1280 }
1281
1282 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1283 }
1284
1285 uint32_t flags = MSM_PIPE_3D0;
1286 if (last_submit) {
1287 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1288 }
1289
1290 struct drm_msm_gem_submit req = {
1291 .flags = flags,
1292 .queueid = queue->msm_queue_id,
1293 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1294 .nr_bos = bo_list.count,
1295 .cmds = (uint64_t)(uintptr_t)cmds,
1296 .nr_cmds = entry_count,
1297 };
1298
1299 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1300 DRM_MSM_GEM_SUBMIT,
1301 &req, sizeof(req));
1302 if (ret) {
1303 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1304 abort();
1305 }
1306
1307 tu_bo_list_destroy(&bo_list);
1308
1309 if (last_submit) {
1310 /* no need to merge fences as queue execution is serialized */
1311 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1312 }
1313 }
1314
1315 if (_fence != VK_NULL_HANDLE) {
1316 TU_FROM_HANDLE(tu_fence, fence, _fence);
1317 tu_fence_copy(fence, &queue->submit_fence);
1318 }
1319
1320 return VK_SUCCESS;
1321 }
1322
1323 VkResult
1324 tu_QueueWaitIdle(VkQueue _queue)
1325 {
1326 TU_FROM_HANDLE(tu_queue, queue, _queue);
1327
1328 tu_fence_wait_idle(&queue->submit_fence);
1329
1330 return VK_SUCCESS;
1331 }
1332
1333 VkResult
1334 tu_DeviceWaitIdle(VkDevice _device)
1335 {
1336 TU_FROM_HANDLE(tu_device, device, _device);
1337
1338 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1339 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1340 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1341 }
1342 }
1343 return VK_SUCCESS;
1344 }
1345
1346 VkResult
1347 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1348 uint32_t *pPropertyCount,
1349 VkExtensionProperties *pProperties)
1350 {
1351 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1352
1353 /* We spport no lyaers */
1354 if (pLayerName)
1355 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1356
1357 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1358 if (tu_supported_instance_extensions.extensions[i]) {
1359 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1360 }
1361 }
1362
1363 return vk_outarray_status(&out);
1364 }
1365
1366 VkResult
1367 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1368 const char *pLayerName,
1369 uint32_t *pPropertyCount,
1370 VkExtensionProperties *pProperties)
1371 {
1372 /* We spport no lyaers */
1373 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1374 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1375
1376 /* We spport no lyaers */
1377 if (pLayerName)
1378 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1379
1380 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1381 if (device->supported_extensions.extensions[i]) {
1382 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1383 }
1384 }
1385
1386 return vk_outarray_status(&out);
1387 }
1388
1389 PFN_vkVoidFunction
1390 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1391 {
1392 TU_FROM_HANDLE(tu_instance, instance, _instance);
1393
1394 return tu_lookup_entrypoint_checked(
1395 pName, instance ? instance->api_version : 0,
1396 instance ? &instance->enabled_extensions : NULL, NULL);
1397 }
1398
1399 /* The loader wants us to expose a second GetInstanceProcAddr function
1400 * to work around certain LD_PRELOAD issues seen in apps.
1401 */
1402 PUBLIC
1403 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1404 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1405
1406 PUBLIC
1407 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1408 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1409 {
1410 return tu_GetInstanceProcAddr(instance, pName);
1411 }
1412
1413 PFN_vkVoidFunction
1414 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1415 {
1416 TU_FROM_HANDLE(tu_device, device, _device);
1417
1418 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1419 &device->instance->enabled_extensions,
1420 &device->enabled_extensions);
1421 }
1422
1423 static VkResult
1424 tu_alloc_memory(struct tu_device *device,
1425 const VkMemoryAllocateInfo *pAllocateInfo,
1426 const VkAllocationCallbacks *pAllocator,
1427 VkDeviceMemory *pMem)
1428 {
1429 struct tu_device_memory *mem;
1430 VkResult result;
1431
1432 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1433
1434 if (pAllocateInfo->allocationSize == 0) {
1435 /* Apparently, this is allowed */
1436 *pMem = VK_NULL_HANDLE;
1437 return VK_SUCCESS;
1438 }
1439
1440 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1441 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1442 if (mem == NULL)
1443 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1444
1445 const VkImportMemoryFdInfoKHR *fd_info =
1446 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1447 if (fd_info && !fd_info->handleType)
1448 fd_info = NULL;
1449
1450 if (fd_info) {
1451 assert(fd_info->handleType ==
1452 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1453 fd_info->handleType ==
1454 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1455
1456 /*
1457 * TODO Importing the same fd twice gives us the same handle without
1458 * reference counting. We need to maintain a per-instance handle-to-bo
1459 * table and add reference count to tu_bo.
1460 */
1461 result = tu_bo_init_dmabuf(device, &mem->bo,
1462 pAllocateInfo->allocationSize, fd_info->fd);
1463 if (result == VK_SUCCESS) {
1464 /* take ownership and close the fd */
1465 close(fd_info->fd);
1466 }
1467 } else {
1468 result =
1469 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1470 }
1471
1472 if (result != VK_SUCCESS) {
1473 vk_free2(&device->alloc, pAllocator, mem);
1474 return result;
1475 }
1476
1477 mem->size = pAllocateInfo->allocationSize;
1478 mem->type_index = pAllocateInfo->memoryTypeIndex;
1479
1480 mem->map = NULL;
1481 mem->user_ptr = NULL;
1482
1483 *pMem = tu_device_memory_to_handle(mem);
1484
1485 return VK_SUCCESS;
1486 }
1487
1488 VkResult
1489 tu_AllocateMemory(VkDevice _device,
1490 const VkMemoryAllocateInfo *pAllocateInfo,
1491 const VkAllocationCallbacks *pAllocator,
1492 VkDeviceMemory *pMem)
1493 {
1494 TU_FROM_HANDLE(tu_device, device, _device);
1495 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1496 }
1497
1498 void
1499 tu_FreeMemory(VkDevice _device,
1500 VkDeviceMemory _mem,
1501 const VkAllocationCallbacks *pAllocator)
1502 {
1503 TU_FROM_HANDLE(tu_device, device, _device);
1504 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1505
1506 if (mem == NULL)
1507 return;
1508
1509 tu_bo_finish(device, &mem->bo);
1510 vk_free2(&device->alloc, pAllocator, mem);
1511 }
1512
1513 VkResult
1514 tu_MapMemory(VkDevice _device,
1515 VkDeviceMemory _memory,
1516 VkDeviceSize offset,
1517 VkDeviceSize size,
1518 VkMemoryMapFlags flags,
1519 void **ppData)
1520 {
1521 TU_FROM_HANDLE(tu_device, device, _device);
1522 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1523 VkResult result;
1524
1525 if (mem == NULL) {
1526 *ppData = NULL;
1527 return VK_SUCCESS;
1528 }
1529
1530 if (mem->user_ptr) {
1531 *ppData = mem->user_ptr;
1532 } else if (!mem->map) {
1533 result = tu_bo_map(device, &mem->bo);
1534 if (result != VK_SUCCESS)
1535 return result;
1536 *ppData = mem->map = mem->bo.map;
1537 } else
1538 *ppData = mem->map;
1539
1540 if (*ppData) {
1541 *ppData += offset;
1542 return VK_SUCCESS;
1543 }
1544
1545 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1546 }
1547
1548 void
1549 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1550 {
1551 /* I do not see any unmapping done by the freedreno Gallium driver. */
1552 }
1553
1554 VkResult
1555 tu_FlushMappedMemoryRanges(VkDevice _device,
1556 uint32_t memoryRangeCount,
1557 const VkMappedMemoryRange *pMemoryRanges)
1558 {
1559 return VK_SUCCESS;
1560 }
1561
1562 VkResult
1563 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1564 uint32_t memoryRangeCount,
1565 const VkMappedMemoryRange *pMemoryRanges)
1566 {
1567 return VK_SUCCESS;
1568 }
1569
1570 void
1571 tu_GetBufferMemoryRequirements(VkDevice _device,
1572 VkBuffer _buffer,
1573 VkMemoryRequirements *pMemoryRequirements)
1574 {
1575 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1576
1577 pMemoryRequirements->memoryTypeBits = 1;
1578 pMemoryRequirements->alignment = 16;
1579 pMemoryRequirements->size =
1580 align64(buffer->size, pMemoryRequirements->alignment);
1581 }
1582
1583 void
1584 tu_GetBufferMemoryRequirements2(
1585 VkDevice device,
1586 const VkBufferMemoryRequirementsInfo2 *pInfo,
1587 VkMemoryRequirements2 *pMemoryRequirements)
1588 {
1589 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1590 &pMemoryRequirements->memoryRequirements);
1591 }
1592
1593 void
1594 tu_GetImageMemoryRequirements(VkDevice _device,
1595 VkImage _image,
1596 VkMemoryRequirements *pMemoryRequirements)
1597 {
1598 TU_FROM_HANDLE(tu_image, image, _image);
1599
1600 pMemoryRequirements->memoryTypeBits = 1;
1601 pMemoryRequirements->size = image->layout.size;
1602 pMemoryRequirements->alignment = image->alignment;
1603 }
1604
1605 void
1606 tu_GetImageMemoryRequirements2(VkDevice device,
1607 const VkImageMemoryRequirementsInfo2 *pInfo,
1608 VkMemoryRequirements2 *pMemoryRequirements)
1609 {
1610 tu_GetImageMemoryRequirements(device, pInfo->image,
1611 &pMemoryRequirements->memoryRequirements);
1612 }
1613
1614 void
1615 tu_GetImageSparseMemoryRequirements(
1616 VkDevice device,
1617 VkImage image,
1618 uint32_t *pSparseMemoryRequirementCount,
1619 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1620 {
1621 tu_stub();
1622 }
1623
1624 void
1625 tu_GetImageSparseMemoryRequirements2(
1626 VkDevice device,
1627 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1628 uint32_t *pSparseMemoryRequirementCount,
1629 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1630 {
1631 tu_stub();
1632 }
1633
1634 void
1635 tu_GetDeviceMemoryCommitment(VkDevice device,
1636 VkDeviceMemory memory,
1637 VkDeviceSize *pCommittedMemoryInBytes)
1638 {
1639 *pCommittedMemoryInBytes = 0;
1640 }
1641
1642 VkResult
1643 tu_BindBufferMemory2(VkDevice device,
1644 uint32_t bindInfoCount,
1645 const VkBindBufferMemoryInfo *pBindInfos)
1646 {
1647 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1648 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1649 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1650
1651 if (mem) {
1652 buffer->bo = &mem->bo;
1653 buffer->bo_offset = pBindInfos[i].memoryOffset;
1654 } else {
1655 buffer->bo = NULL;
1656 }
1657 }
1658 return VK_SUCCESS;
1659 }
1660
1661 VkResult
1662 tu_BindBufferMemory(VkDevice device,
1663 VkBuffer buffer,
1664 VkDeviceMemory memory,
1665 VkDeviceSize memoryOffset)
1666 {
1667 const VkBindBufferMemoryInfo info = {
1668 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1669 .buffer = buffer,
1670 .memory = memory,
1671 .memoryOffset = memoryOffset
1672 };
1673
1674 return tu_BindBufferMemory2(device, 1, &info);
1675 }
1676
1677 VkResult
1678 tu_BindImageMemory2(VkDevice device,
1679 uint32_t bindInfoCount,
1680 const VkBindImageMemoryInfo *pBindInfos)
1681 {
1682 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1683 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1684 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1685
1686 if (mem) {
1687 image->bo = &mem->bo;
1688 image->bo_offset = pBindInfos[i].memoryOffset;
1689 } else {
1690 image->bo = NULL;
1691 image->bo_offset = 0;
1692 }
1693 }
1694
1695 return VK_SUCCESS;
1696 }
1697
1698 VkResult
1699 tu_BindImageMemory(VkDevice device,
1700 VkImage image,
1701 VkDeviceMemory memory,
1702 VkDeviceSize memoryOffset)
1703 {
1704 const VkBindImageMemoryInfo info = {
1705 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1706 .image = image,
1707 .memory = memory,
1708 .memoryOffset = memoryOffset
1709 };
1710
1711 return tu_BindImageMemory2(device, 1, &info);
1712 }
1713
1714 VkResult
1715 tu_QueueBindSparse(VkQueue _queue,
1716 uint32_t bindInfoCount,
1717 const VkBindSparseInfo *pBindInfo,
1718 VkFence _fence)
1719 {
1720 return VK_SUCCESS;
1721 }
1722
1723 // Queue semaphore functions
1724
1725 VkResult
1726 tu_CreateSemaphore(VkDevice _device,
1727 const VkSemaphoreCreateInfo *pCreateInfo,
1728 const VkAllocationCallbacks *pAllocator,
1729 VkSemaphore *pSemaphore)
1730 {
1731 TU_FROM_HANDLE(tu_device, device, _device);
1732
1733 struct tu_semaphore *sem =
1734 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1735 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1736 if (!sem)
1737 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1738
1739 *pSemaphore = tu_semaphore_to_handle(sem);
1740 return VK_SUCCESS;
1741 }
1742
1743 void
1744 tu_DestroySemaphore(VkDevice _device,
1745 VkSemaphore _semaphore,
1746 const VkAllocationCallbacks *pAllocator)
1747 {
1748 TU_FROM_HANDLE(tu_device, device, _device);
1749 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1750 if (!_semaphore)
1751 return;
1752
1753 vk_free2(&device->alloc, pAllocator, sem);
1754 }
1755
1756 VkResult
1757 tu_CreateEvent(VkDevice _device,
1758 const VkEventCreateInfo *pCreateInfo,
1759 const VkAllocationCallbacks *pAllocator,
1760 VkEvent *pEvent)
1761 {
1762 TU_FROM_HANDLE(tu_device, device, _device);
1763 struct tu_event *event =
1764 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1765 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1766
1767 if (!event)
1768 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1769
1770 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1771 if (result != VK_SUCCESS)
1772 goto fail_alloc;
1773
1774 result = tu_bo_map(device, &event->bo);
1775 if (result != VK_SUCCESS)
1776 goto fail_map;
1777
1778 *pEvent = tu_event_to_handle(event);
1779
1780 return VK_SUCCESS;
1781
1782 fail_map:
1783 tu_bo_finish(device, &event->bo);
1784 fail_alloc:
1785 vk_free2(&device->alloc, pAllocator, event);
1786 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1787 }
1788
1789 void
1790 tu_DestroyEvent(VkDevice _device,
1791 VkEvent _event,
1792 const VkAllocationCallbacks *pAllocator)
1793 {
1794 TU_FROM_HANDLE(tu_device, device, _device);
1795 TU_FROM_HANDLE(tu_event, event, _event);
1796
1797 if (!event)
1798 return;
1799
1800 tu_bo_finish(device, &event->bo);
1801 vk_free2(&device->alloc, pAllocator, event);
1802 }
1803
1804 VkResult
1805 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1806 {
1807 TU_FROM_HANDLE(tu_event, event, _event);
1808
1809 if (*(uint64_t*) event->bo.map == 1)
1810 return VK_EVENT_SET;
1811 return VK_EVENT_RESET;
1812 }
1813
1814 VkResult
1815 tu_SetEvent(VkDevice _device, VkEvent _event)
1816 {
1817 TU_FROM_HANDLE(tu_event, event, _event);
1818 *(uint64_t*) event->bo.map = 1;
1819
1820 return VK_SUCCESS;
1821 }
1822
1823 VkResult
1824 tu_ResetEvent(VkDevice _device, VkEvent _event)
1825 {
1826 TU_FROM_HANDLE(tu_event, event, _event);
1827 *(uint64_t*) event->bo.map = 0;
1828
1829 return VK_SUCCESS;
1830 }
1831
1832 VkResult
1833 tu_CreateBuffer(VkDevice _device,
1834 const VkBufferCreateInfo *pCreateInfo,
1835 const VkAllocationCallbacks *pAllocator,
1836 VkBuffer *pBuffer)
1837 {
1838 TU_FROM_HANDLE(tu_device, device, _device);
1839 struct tu_buffer *buffer;
1840
1841 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1842
1843 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1844 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1845 if (buffer == NULL)
1846 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1847
1848 buffer->size = pCreateInfo->size;
1849 buffer->usage = pCreateInfo->usage;
1850 buffer->flags = pCreateInfo->flags;
1851
1852 *pBuffer = tu_buffer_to_handle(buffer);
1853
1854 return VK_SUCCESS;
1855 }
1856
1857 void
1858 tu_DestroyBuffer(VkDevice _device,
1859 VkBuffer _buffer,
1860 const VkAllocationCallbacks *pAllocator)
1861 {
1862 TU_FROM_HANDLE(tu_device, device, _device);
1863 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1864
1865 if (!buffer)
1866 return;
1867
1868 vk_free2(&device->alloc, pAllocator, buffer);
1869 }
1870
1871 static uint32_t
1872 tu_surface_max_layer_count(struct tu_image_view *iview)
1873 {
1874 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1875 ? iview->extent.depth
1876 : (iview->base_layer + iview->layer_count);
1877 }
1878
1879 VkResult
1880 tu_CreateFramebuffer(VkDevice _device,
1881 const VkFramebufferCreateInfo *pCreateInfo,
1882 const VkAllocationCallbacks *pAllocator,
1883 VkFramebuffer *pFramebuffer)
1884 {
1885 TU_FROM_HANDLE(tu_device, device, _device);
1886 struct tu_framebuffer *framebuffer;
1887
1888 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1889
1890 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1891 pCreateInfo->attachmentCount;
1892 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1893 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1894 if (framebuffer == NULL)
1895 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1896
1897 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1898 framebuffer->width = pCreateInfo->width;
1899 framebuffer->height = pCreateInfo->height;
1900 framebuffer->layers = pCreateInfo->layers;
1901 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1902 VkImageView _iview = pCreateInfo->pAttachments[i];
1903 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1904 framebuffer->attachments[i].attachment = iview;
1905
1906 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1907 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1908 framebuffer->layers =
1909 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1910 }
1911
1912 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1913 return VK_SUCCESS;
1914 }
1915
1916 void
1917 tu_DestroyFramebuffer(VkDevice _device,
1918 VkFramebuffer _fb,
1919 const VkAllocationCallbacks *pAllocator)
1920 {
1921 TU_FROM_HANDLE(tu_device, device, _device);
1922 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1923
1924 if (!fb)
1925 return;
1926 vk_free2(&device->alloc, pAllocator, fb);
1927 }
1928
1929 static enum a6xx_tex_clamp
1930 tu6_tex_wrap(VkSamplerAddressMode address_mode, bool *needs_border)
1931 {
1932 switch (address_mode) {
1933 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
1934 return A6XX_TEX_REPEAT;
1935 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
1936 return A6XX_TEX_MIRROR_REPEAT;
1937 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
1938 return A6XX_TEX_CLAMP_TO_EDGE;
1939 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
1940 *needs_border = true;
1941 return A6XX_TEX_CLAMP_TO_BORDER;
1942 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
1943 /* only works for PoT.. need to emulate otherwise! */
1944 return A6XX_TEX_MIRROR_CLAMP;
1945 default:
1946 unreachable("illegal tex wrap mode");
1947 break;
1948 }
1949 }
1950
1951 static enum a6xx_tex_filter
1952 tu6_tex_filter(VkFilter filter, unsigned aniso)
1953 {
1954 switch (filter) {
1955 case VK_FILTER_NEAREST:
1956 return A6XX_TEX_NEAREST;
1957 case VK_FILTER_LINEAR:
1958 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
1959 case VK_FILTER_CUBIC_IMG:
1960 default:
1961 unreachable("illegal texture filter");
1962 break;
1963 }
1964 }
1965
1966 static inline enum adreno_compare_func
1967 tu6_compare_func(VkCompareOp op)
1968 {
1969 return (enum adreno_compare_func) op;
1970 }
1971
1972 static void
1973 tu_init_sampler(struct tu_device *device,
1974 struct tu_sampler *sampler,
1975 const VkSamplerCreateInfo *pCreateInfo)
1976 {
1977 unsigned aniso = pCreateInfo->anisotropyEnable ?
1978 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
1979 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
1980 bool needs_border = false;
1981
1982 sampler->state[0] =
1983 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
1984 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
1985 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
1986 A6XX_TEX_SAMP_0_ANISO(aniso) |
1987 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU, &needs_border)) |
1988 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV, &needs_border)) |
1989 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW, &needs_border)) |
1990 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
1991 sampler->state[1] =
1992 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
1993 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
1994 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
1995 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
1996 COND(pCreateInfo->compareEnable,
1997 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
1998 sampler->state[2] = 0;
1999 sampler->state[3] = 0;
2000
2001 /* TODO:
2002 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2003 * border color
2004 */
2005
2006 sampler->needs_border = needs_border;
2007 sampler->border = pCreateInfo->borderColor;
2008 }
2009
2010 VkResult
2011 tu_CreateSampler(VkDevice _device,
2012 const VkSamplerCreateInfo *pCreateInfo,
2013 const VkAllocationCallbacks *pAllocator,
2014 VkSampler *pSampler)
2015 {
2016 TU_FROM_HANDLE(tu_device, device, _device);
2017 struct tu_sampler *sampler;
2018
2019 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2020
2021 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2022 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2023 if (!sampler)
2024 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2025
2026 tu_init_sampler(device, sampler, pCreateInfo);
2027 *pSampler = tu_sampler_to_handle(sampler);
2028
2029 return VK_SUCCESS;
2030 }
2031
2032 void
2033 tu_DestroySampler(VkDevice _device,
2034 VkSampler _sampler,
2035 const VkAllocationCallbacks *pAllocator)
2036 {
2037 TU_FROM_HANDLE(tu_device, device, _device);
2038 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2039
2040 if (!sampler)
2041 return;
2042 vk_free2(&device->alloc, pAllocator, sampler);
2043 }
2044
2045 /* vk_icd.h does not declare this function, so we declare it here to
2046 * suppress Wmissing-prototypes.
2047 */
2048 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2049 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2050
2051 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2052 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2053 {
2054 /* For the full details on loader interface versioning, see
2055 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2056 * What follows is a condensed summary, to help you navigate the large and
2057 * confusing official doc.
2058 *
2059 * - Loader interface v0 is incompatible with later versions. We don't
2060 * support it.
2061 *
2062 * - In loader interface v1:
2063 * - The first ICD entrypoint called by the loader is
2064 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2065 * entrypoint.
2066 * - The ICD must statically expose no other Vulkan symbol unless it
2067 * is linked with -Bsymbolic.
2068 * - Each dispatchable Vulkan handle created by the ICD must be
2069 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2070 * ICD must initialize VK_LOADER_DATA.loadMagic to
2071 * ICD_LOADER_MAGIC.
2072 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2073 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2074 * such loader-managed surfaces.
2075 *
2076 * - Loader interface v2 differs from v1 in:
2077 * - The first ICD entrypoint called by the loader is
2078 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2079 * statically expose this entrypoint.
2080 *
2081 * - Loader interface v3 differs from v2 in:
2082 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2083 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2084 * because the loader no longer does so.
2085 */
2086 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2087 return VK_SUCCESS;
2088 }
2089
2090 VkResult
2091 tu_GetMemoryFdKHR(VkDevice _device,
2092 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2093 int *pFd)
2094 {
2095 TU_FROM_HANDLE(tu_device, device, _device);
2096 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2097
2098 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2099
2100 /* At the moment, we support only the below handle types. */
2101 assert(pGetFdInfo->handleType ==
2102 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2103 pGetFdInfo->handleType ==
2104 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2105
2106 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2107 if (prime_fd < 0)
2108 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2109
2110 *pFd = prime_fd;
2111 return VK_SUCCESS;
2112 }
2113
2114 VkResult
2115 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2116 VkExternalMemoryHandleTypeFlagBits handleType,
2117 int fd,
2118 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2119 {
2120 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2121 pMemoryFdProperties->memoryTypeBits = 1;
2122 return VK_SUCCESS;
2123 }
2124
2125 void
2126 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2127 VkPhysicalDevice physicalDevice,
2128 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2129 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2130 {
2131 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2132 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2133 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2134 }
2135
2136 void
2137 tu_GetPhysicalDeviceExternalFenceProperties(
2138 VkPhysicalDevice physicalDevice,
2139 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2140 VkExternalFenceProperties *pExternalFenceProperties)
2141 {
2142 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2143 pExternalFenceProperties->compatibleHandleTypes = 0;
2144 pExternalFenceProperties->externalFenceFeatures = 0;
2145 }
2146
2147 VkResult
2148 tu_CreateDebugReportCallbackEXT(
2149 VkInstance _instance,
2150 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2151 const VkAllocationCallbacks *pAllocator,
2152 VkDebugReportCallbackEXT *pCallback)
2153 {
2154 TU_FROM_HANDLE(tu_instance, instance, _instance);
2155 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2156 pCreateInfo, pAllocator,
2157 &instance->alloc, pCallback);
2158 }
2159
2160 void
2161 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2162 VkDebugReportCallbackEXT _callback,
2163 const VkAllocationCallbacks *pAllocator)
2164 {
2165 TU_FROM_HANDLE(tu_instance, instance, _instance);
2166 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2167 _callback, pAllocator, &instance->alloc);
2168 }
2169
2170 void
2171 tu_DebugReportMessageEXT(VkInstance _instance,
2172 VkDebugReportFlagsEXT flags,
2173 VkDebugReportObjectTypeEXT objectType,
2174 uint64_t object,
2175 size_t location,
2176 int32_t messageCode,
2177 const char *pLayerPrefix,
2178 const char *pMessage)
2179 {
2180 TU_FROM_HANDLE(tu_instance, instance, _instance);
2181 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2182 object, location, messageCode, pLayerPrefix, pMessage);
2183 }
2184
2185 void
2186 tu_GetDeviceGroupPeerMemoryFeatures(
2187 VkDevice device,
2188 uint32_t heapIndex,
2189 uint32_t localDeviceIndex,
2190 uint32_t remoteDeviceIndex,
2191 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2192 {
2193 assert(localDeviceIndex == remoteDeviceIndex);
2194
2195 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2196 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2197 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2198 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2199 }