turnip: Disable timestamp queries for now.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 memset(device->name, 0, sizeof(device->name));
256 sprintf(device->name, "FD%d", device->gpu_id);
257
258 switch (device->gpu_id) {
259 case 630:
260 case 640:
261 device->tile_align_w = 32;
262 device->tile_align_h = 32;
263 break;
264 default:
265 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
266 "device %s is unsupported", device->name);
267 goto fail;
268 }
269 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
270 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
271 "cannot generate UUID");
272 goto fail;
273 }
274
275 /* The gpu id is already embedded in the uuid so we just pass "tu"
276 * when creating the cache.
277 */
278 char buf[VK_UUID_SIZE * 2 + 1];
279 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
280 device->disk_cache = disk_cache_create(device->name, buf, 0);
281
282 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
283 "testing use only.\n");
284
285 tu_get_driver_uuid(&device->device_uuid);
286 tu_get_device_uuid(&device->device_uuid);
287
288 tu_fill_device_extension_table(device, &device->supported_extensions);
289
290 if (result != VK_SUCCESS) {
291 vk_error(instance, result);
292 goto fail;
293 }
294
295 result = tu_wsi_init(device);
296 if (result != VK_SUCCESS) {
297 vk_error(instance, result);
298 goto fail;
299 }
300
301 return VK_SUCCESS;
302
303 fail:
304 close(fd);
305 if (master_fd != -1)
306 close(master_fd);
307 return result;
308 }
309
310 static void
311 tu_physical_device_finish(struct tu_physical_device *device)
312 {
313 tu_wsi_finish(device);
314
315 disk_cache_destroy(device->disk_cache);
316 close(device->local_fd);
317 if (device->master_fd != -1)
318 close(device->master_fd);
319 }
320
321 static void *
322 default_alloc_func(void *pUserData,
323 size_t size,
324 size_t align,
325 VkSystemAllocationScope allocationScope)
326 {
327 return malloc(size);
328 }
329
330 static void *
331 default_realloc_func(void *pUserData,
332 void *pOriginal,
333 size_t size,
334 size_t align,
335 VkSystemAllocationScope allocationScope)
336 {
337 return realloc(pOriginal, size);
338 }
339
340 static void
341 default_free_func(void *pUserData, void *pMemory)
342 {
343 free(pMemory);
344 }
345
346 static const VkAllocationCallbacks default_alloc = {
347 .pUserData = NULL,
348 .pfnAllocation = default_alloc_func,
349 .pfnReallocation = default_realloc_func,
350 .pfnFree = default_free_func,
351 };
352
353 static const struct debug_control tu_debug_options[] = {
354 { "startup", TU_DEBUG_STARTUP },
355 { "nir", TU_DEBUG_NIR },
356 { "ir3", TU_DEBUG_IR3 },
357 { NULL, 0 }
358 };
359
360 const char *
361 tu_get_debug_option_name(int id)
362 {
363 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
364 return tu_debug_options[id].string;
365 }
366
367 static int
368 tu_get_instance_extension_index(const char *name)
369 {
370 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
371 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
372 return i;
373 }
374 return -1;
375 }
376
377 VkResult
378 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
379 const VkAllocationCallbacks *pAllocator,
380 VkInstance *pInstance)
381 {
382 struct tu_instance *instance;
383 VkResult result;
384
385 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
386
387 uint32_t client_version;
388 if (pCreateInfo->pApplicationInfo &&
389 pCreateInfo->pApplicationInfo->apiVersion != 0) {
390 client_version = pCreateInfo->pApplicationInfo->apiVersion;
391 } else {
392 tu_EnumerateInstanceVersion(&client_version);
393 }
394
395 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
396 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
397 if (!instance)
398 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
399
400 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
401
402 if (pAllocator)
403 instance->alloc = *pAllocator;
404 else
405 instance->alloc = default_alloc;
406
407 instance->api_version = client_version;
408 instance->physical_device_count = -1;
409
410 instance->debug_flags =
411 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
412
413 if (instance->debug_flags & TU_DEBUG_STARTUP)
414 tu_logi("Created an instance");
415
416 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
417 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
418 int index = tu_get_instance_extension_index(ext_name);
419
420 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
421 vk_free2(&default_alloc, pAllocator, instance);
422 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
423 }
424
425 instance->enabled_extensions.extensions[index] = true;
426 }
427
428 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
429 if (result != VK_SUCCESS) {
430 vk_free2(&default_alloc, pAllocator, instance);
431 return vk_error(instance, result);
432 }
433
434 glsl_type_singleton_init_or_ref();
435
436 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
437
438 *pInstance = tu_instance_to_handle(instance);
439
440 return VK_SUCCESS;
441 }
442
443 void
444 tu_DestroyInstance(VkInstance _instance,
445 const VkAllocationCallbacks *pAllocator)
446 {
447 TU_FROM_HANDLE(tu_instance, instance, _instance);
448
449 if (!instance)
450 return;
451
452 for (int i = 0; i < instance->physical_device_count; ++i) {
453 tu_physical_device_finish(instance->physical_devices + i);
454 }
455
456 VG(VALGRIND_DESTROY_MEMPOOL(instance));
457
458 glsl_type_singleton_decref();
459
460 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
461
462 vk_free(&instance->alloc, instance);
463 }
464
465 static VkResult
466 tu_enumerate_devices(struct tu_instance *instance)
467 {
468 /* TODO: Check for more devices ? */
469 drmDevicePtr devices[8];
470 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
471 int max_devices;
472
473 instance->physical_device_count = 0;
474
475 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
476
477 if (instance->debug_flags & TU_DEBUG_STARTUP)
478 tu_logi("Found %d drm nodes", max_devices);
479
480 if (max_devices < 1)
481 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
482
483 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
484 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
485 devices[i]->bustype == DRM_BUS_PLATFORM) {
486
487 result = tu_physical_device_init(
488 instance->physical_devices + instance->physical_device_count,
489 instance, devices[i]);
490 if (result == VK_SUCCESS)
491 ++instance->physical_device_count;
492 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
493 break;
494 }
495 }
496 drmFreeDevices(devices, max_devices);
497
498 return result;
499 }
500
501 VkResult
502 tu_EnumeratePhysicalDevices(VkInstance _instance,
503 uint32_t *pPhysicalDeviceCount,
504 VkPhysicalDevice *pPhysicalDevices)
505 {
506 TU_FROM_HANDLE(tu_instance, instance, _instance);
507 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
508
509 VkResult result;
510
511 if (instance->physical_device_count < 0) {
512 result = tu_enumerate_devices(instance);
513 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
514 return result;
515 }
516
517 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
518 vk_outarray_append(&out, p)
519 {
520 *p = tu_physical_device_to_handle(instance->physical_devices + i);
521 }
522 }
523
524 return vk_outarray_status(&out);
525 }
526
527 VkResult
528 tu_EnumeratePhysicalDeviceGroups(
529 VkInstance _instance,
530 uint32_t *pPhysicalDeviceGroupCount,
531 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
532 {
533 TU_FROM_HANDLE(tu_instance, instance, _instance);
534 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
535 pPhysicalDeviceGroupCount);
536 VkResult result;
537
538 if (instance->physical_device_count < 0) {
539 result = tu_enumerate_devices(instance);
540 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
541 return result;
542 }
543
544 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
545 vk_outarray_append(&out, p)
546 {
547 p->physicalDeviceCount = 1;
548 p->physicalDevices[0] =
549 tu_physical_device_to_handle(instance->physical_devices + i);
550 p->subsetAllocation = false;
551 }
552 }
553
554 return vk_outarray_status(&out);
555 }
556
557 void
558 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
559 VkPhysicalDeviceFeatures *pFeatures)
560 {
561 memset(pFeatures, 0, sizeof(*pFeatures));
562
563 *pFeatures = (VkPhysicalDeviceFeatures) {
564 .robustBufferAccess = false,
565 .fullDrawIndexUint32 = false,
566 .imageCubeArray = false,
567 .independentBlend = false,
568 .geometryShader = false,
569 .tessellationShader = false,
570 .sampleRateShading = false,
571 .dualSrcBlend = false,
572 .logicOp = false,
573 .multiDrawIndirect = false,
574 .drawIndirectFirstInstance = false,
575 .depthClamp = false,
576 .depthBiasClamp = false,
577 .fillModeNonSolid = false,
578 .depthBounds = false,
579 .wideLines = false,
580 .largePoints = false,
581 .alphaToOne = false,
582 .multiViewport = false,
583 .samplerAnisotropy = true,
584 .textureCompressionETC2 = true,
585 .textureCompressionASTC_LDR = true,
586 .textureCompressionBC = true,
587 .occlusionQueryPrecise = false,
588 .pipelineStatisticsQuery = false,
589 .vertexPipelineStoresAndAtomics = false,
590 .fragmentStoresAndAtomics = false,
591 .shaderTessellationAndGeometryPointSize = false,
592 .shaderImageGatherExtended = false,
593 .shaderStorageImageExtendedFormats = false,
594 .shaderStorageImageMultisample = false,
595 .shaderUniformBufferArrayDynamicIndexing = false,
596 .shaderSampledImageArrayDynamicIndexing = false,
597 .shaderStorageBufferArrayDynamicIndexing = false,
598 .shaderStorageImageArrayDynamicIndexing = false,
599 .shaderStorageImageReadWithoutFormat = false,
600 .shaderStorageImageWriteWithoutFormat = false,
601 .shaderClipDistance = false,
602 .shaderCullDistance = false,
603 .shaderFloat64 = false,
604 .shaderInt64 = false,
605 .shaderInt16 = false,
606 .sparseBinding = false,
607 .variableMultisampleRate = false,
608 .inheritedQueries = false,
609 };
610 }
611
612 void
613 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
614 VkPhysicalDeviceFeatures2 *pFeatures)
615 {
616 vk_foreach_struct(ext, pFeatures->pNext)
617 {
618 switch (ext->sType) {
619 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
620 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
621 features->variablePointersStorageBuffer = false;
622 features->variablePointers = false;
623 break;
624 }
625 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
626 VkPhysicalDeviceMultiviewFeatures *features =
627 (VkPhysicalDeviceMultiviewFeatures *) ext;
628 features->multiview = false;
629 features->multiviewGeometryShader = false;
630 features->multiviewTessellationShader = false;
631 break;
632 }
633 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
634 VkPhysicalDeviceShaderDrawParametersFeatures *features =
635 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
636 features->shaderDrawParameters = false;
637 break;
638 }
639 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
640 VkPhysicalDeviceProtectedMemoryFeatures *features =
641 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
642 features->protectedMemory = false;
643 break;
644 }
645 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
646 VkPhysicalDevice16BitStorageFeatures *features =
647 (VkPhysicalDevice16BitStorageFeatures *) ext;
648 features->storageBuffer16BitAccess = false;
649 features->uniformAndStorageBuffer16BitAccess = false;
650 features->storagePushConstant16 = false;
651 features->storageInputOutput16 = false;
652 break;
653 }
654 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
655 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
656 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
657 features->samplerYcbcrConversion = false;
658 break;
659 }
660 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
661 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
662 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
663 features->shaderInputAttachmentArrayDynamicIndexing = false;
664 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
665 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
666 features->shaderUniformBufferArrayNonUniformIndexing = false;
667 features->shaderSampledImageArrayNonUniformIndexing = false;
668 features->shaderStorageBufferArrayNonUniformIndexing = false;
669 features->shaderStorageImageArrayNonUniformIndexing = false;
670 features->shaderInputAttachmentArrayNonUniformIndexing = false;
671 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
672 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
673 features->descriptorBindingUniformBufferUpdateAfterBind = false;
674 features->descriptorBindingSampledImageUpdateAfterBind = false;
675 features->descriptorBindingStorageImageUpdateAfterBind = false;
676 features->descriptorBindingStorageBufferUpdateAfterBind = false;
677 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
678 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
679 features->descriptorBindingUpdateUnusedWhilePending = false;
680 features->descriptorBindingPartiallyBound = false;
681 features->descriptorBindingVariableDescriptorCount = false;
682 features->runtimeDescriptorArray = false;
683 break;
684 }
685 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
686 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
687 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
688 features->conditionalRendering = false;
689 features->inheritedConditionalRendering = false;
690 break;
691 }
692 default:
693 break;
694 }
695 }
696 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
697 }
698
699 void
700 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
701 VkPhysicalDeviceProperties *pProperties)
702 {
703 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
704 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT |
705 VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
706
707 /* make sure that the entire descriptor set is addressable with a signed
708 * 32-bit int. So the sum of all limits scaled by descriptor size has to
709 * be at most 2 GiB. the combined image & samples object count as one of
710 * both. This limit is for the pipeline layout, not for the set layout, but
711 * there is no set limit, so we just set a pipeline limit. I don't think
712 * any app is going to hit this soon. */
713 size_t max_descriptor_set_size =
714 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
715 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
716 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
717 32 /* sampler, largest when combined with image */ +
718 64 /* sampled image */ + 64 /* storage image */);
719
720 VkPhysicalDeviceLimits limits = {
721 .maxImageDimension1D = (1 << 14),
722 .maxImageDimension2D = (1 << 14),
723 .maxImageDimension3D = (1 << 11),
724 .maxImageDimensionCube = (1 << 14),
725 .maxImageArrayLayers = (1 << 11),
726 .maxTexelBufferElements = 128 * 1024 * 1024,
727 .maxUniformBufferRange = UINT32_MAX,
728 .maxStorageBufferRange = UINT32_MAX,
729 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
730 .maxMemoryAllocationCount = UINT32_MAX,
731 .maxSamplerAllocationCount = 64 * 1024,
732 .bufferImageGranularity = 64, /* A cache line */
733 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
734 .maxBoundDescriptorSets = MAX_SETS,
735 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
736 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
737 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
738 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
739 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
740 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
741 .maxPerStageResources = max_descriptor_set_size,
742 .maxDescriptorSetSamplers = max_descriptor_set_size,
743 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
744 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
745 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
746 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
747 .maxDescriptorSetSampledImages = max_descriptor_set_size,
748 .maxDescriptorSetStorageImages = max_descriptor_set_size,
749 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
750 .maxVertexInputAttributes = 32,
751 .maxVertexInputBindings = 32,
752 .maxVertexInputAttributeOffset = 2047,
753 .maxVertexInputBindingStride = 2048,
754 .maxVertexOutputComponents = 128,
755 .maxTessellationGenerationLevel = 64,
756 .maxTessellationPatchSize = 32,
757 .maxTessellationControlPerVertexInputComponents = 128,
758 .maxTessellationControlPerVertexOutputComponents = 128,
759 .maxTessellationControlPerPatchOutputComponents = 120,
760 .maxTessellationControlTotalOutputComponents = 4096,
761 .maxTessellationEvaluationInputComponents = 128,
762 .maxTessellationEvaluationOutputComponents = 128,
763 .maxGeometryShaderInvocations = 127,
764 .maxGeometryInputComponents = 64,
765 .maxGeometryOutputComponents = 128,
766 .maxGeometryOutputVertices = 256,
767 .maxGeometryTotalOutputComponents = 1024,
768 .maxFragmentInputComponents = 128,
769 .maxFragmentOutputAttachments = 8,
770 .maxFragmentDualSrcAttachments = 1,
771 .maxFragmentCombinedOutputResources = 8,
772 .maxComputeSharedMemorySize = 32768,
773 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
774 .maxComputeWorkGroupInvocations = 2048,
775 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
776 .subPixelPrecisionBits = 4 /* FIXME */,
777 .subTexelPrecisionBits = 4 /* FIXME */,
778 .mipmapPrecisionBits = 4 /* FIXME */,
779 .maxDrawIndexedIndexValue = UINT32_MAX,
780 .maxDrawIndirectCount = UINT32_MAX,
781 .maxSamplerLodBias = 16,
782 .maxSamplerAnisotropy = 16,
783 .maxViewports = MAX_VIEWPORTS,
784 .maxViewportDimensions = { (1 << 14), (1 << 14) },
785 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
786 .viewportSubPixelBits = 8,
787 .minMemoryMapAlignment = 4096, /* A page */
788 .minTexelBufferOffsetAlignment = 1,
789 .minUniformBufferOffsetAlignment = 4,
790 .minStorageBufferOffsetAlignment = 4,
791 .minTexelOffset = -32,
792 .maxTexelOffset = 31,
793 .minTexelGatherOffset = -32,
794 .maxTexelGatherOffset = 31,
795 .minInterpolationOffset = -2,
796 .maxInterpolationOffset = 2,
797 .subPixelInterpolationOffsetBits = 8,
798 .maxFramebufferWidth = (1 << 14),
799 .maxFramebufferHeight = (1 << 14),
800 .maxFramebufferLayers = (1 << 10),
801 .framebufferColorSampleCounts = sample_counts,
802 .framebufferDepthSampleCounts = sample_counts,
803 .framebufferStencilSampleCounts = sample_counts,
804 .framebufferNoAttachmentsSampleCounts = sample_counts,
805 .maxColorAttachments = MAX_RTS,
806 .sampledImageColorSampleCounts = sample_counts,
807 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
808 .sampledImageDepthSampleCounts = sample_counts,
809 .sampledImageStencilSampleCounts = sample_counts,
810 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
811 .maxSampleMaskWords = 1,
812 .timestampComputeAndGraphics = false, /* FINISHME */
813 .timestampPeriod = 1,
814 .maxClipDistances = 8,
815 .maxCullDistances = 8,
816 .maxCombinedClipAndCullDistances = 8,
817 .discreteQueuePriorities = 1,
818 .pointSizeRange = { 0.125, 255.875 },
819 .lineWidthRange = { 0.0, 7.9921875 },
820 .pointSizeGranularity = (1.0 / 8.0),
821 .lineWidthGranularity = (1.0 / 128.0),
822 .strictLines = false, /* FINISHME */
823 .standardSampleLocations = true,
824 .optimalBufferCopyOffsetAlignment = 128,
825 .optimalBufferCopyRowPitchAlignment = 128,
826 .nonCoherentAtomSize = 64,
827 };
828
829 *pProperties = (VkPhysicalDeviceProperties) {
830 .apiVersion = tu_physical_device_api_version(pdevice),
831 .driverVersion = vk_get_driver_version(),
832 .vendorID = 0, /* TODO */
833 .deviceID = 0,
834 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
835 .limits = limits,
836 .sparseProperties = { 0 },
837 };
838
839 strcpy(pProperties->deviceName, pdevice->name);
840 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
841 }
842
843 void
844 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
845 VkPhysicalDeviceProperties2 *pProperties)
846 {
847 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
848 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
849
850 vk_foreach_struct(ext, pProperties->pNext)
851 {
852 switch (ext->sType) {
853 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
854 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
855 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
856 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
857 break;
858 }
859 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
860 VkPhysicalDeviceIDProperties *properties =
861 (VkPhysicalDeviceIDProperties *) ext;
862 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
863 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
864 properties->deviceLUIDValid = false;
865 break;
866 }
867 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
868 VkPhysicalDeviceMultiviewProperties *properties =
869 (VkPhysicalDeviceMultiviewProperties *) ext;
870 properties->maxMultiviewViewCount = MAX_VIEWS;
871 properties->maxMultiviewInstanceIndex = INT_MAX;
872 break;
873 }
874 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
875 VkPhysicalDevicePointClippingProperties *properties =
876 (VkPhysicalDevicePointClippingProperties *) ext;
877 properties->pointClippingBehavior =
878 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
879 break;
880 }
881 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
882 VkPhysicalDeviceMaintenance3Properties *properties =
883 (VkPhysicalDeviceMaintenance3Properties *) ext;
884 /* Make sure everything is addressable by a signed 32-bit int, and
885 * our largest descriptors are 96 bytes. */
886 properties->maxPerSetDescriptors = (1ull << 31) / 96;
887 /* Our buffer size fields allow only this much */
888 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
889 break;
890 }
891 default:
892 break;
893 }
894 }
895 }
896
897 static const VkQueueFamilyProperties tu_queue_family_properties = {
898 .queueFlags =
899 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
900 .queueCount = 1,
901 .timestampValidBits = 0, /* FINISHME */
902 .minImageTransferGranularity = { 1, 1, 1 },
903 };
904
905 void
906 tu_GetPhysicalDeviceQueueFamilyProperties(
907 VkPhysicalDevice physicalDevice,
908 uint32_t *pQueueFamilyPropertyCount,
909 VkQueueFamilyProperties *pQueueFamilyProperties)
910 {
911 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
912
913 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
914 }
915
916 void
917 tu_GetPhysicalDeviceQueueFamilyProperties2(
918 VkPhysicalDevice physicalDevice,
919 uint32_t *pQueueFamilyPropertyCount,
920 VkQueueFamilyProperties2 *pQueueFamilyProperties)
921 {
922 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
923
924 vk_outarray_append(&out, p)
925 {
926 p->queueFamilyProperties = tu_queue_family_properties;
927 }
928 }
929
930 static uint64_t
931 tu_get_system_heap_size()
932 {
933 struct sysinfo info;
934 sysinfo(&info);
935
936 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
937
938 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
939 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
940 */
941 uint64_t available_ram;
942 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
943 available_ram = total_ram / 2;
944 else
945 available_ram = total_ram * 3 / 4;
946
947 return available_ram;
948 }
949
950 void
951 tu_GetPhysicalDeviceMemoryProperties(
952 VkPhysicalDevice physicalDevice,
953 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
954 {
955 pMemoryProperties->memoryHeapCount = 1;
956 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
957 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
958
959 pMemoryProperties->memoryTypeCount = 1;
960 pMemoryProperties->memoryTypes[0].propertyFlags =
961 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
962 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
963 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
964 pMemoryProperties->memoryTypes[0].heapIndex = 0;
965 }
966
967 void
968 tu_GetPhysicalDeviceMemoryProperties2(
969 VkPhysicalDevice physicalDevice,
970 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
971 {
972 return tu_GetPhysicalDeviceMemoryProperties(
973 physicalDevice, &pMemoryProperties->memoryProperties);
974 }
975
976 static VkResult
977 tu_queue_init(struct tu_device *device,
978 struct tu_queue *queue,
979 uint32_t queue_family_index,
980 int idx,
981 VkDeviceQueueCreateFlags flags)
982 {
983 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
984 queue->device = device;
985 queue->queue_family_index = queue_family_index;
986 queue->queue_idx = idx;
987 queue->flags = flags;
988
989 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
990 if (ret)
991 return VK_ERROR_INITIALIZATION_FAILED;
992
993 tu_fence_init(&queue->submit_fence, false);
994
995 return VK_SUCCESS;
996 }
997
998 static void
999 tu_queue_finish(struct tu_queue *queue)
1000 {
1001 tu_fence_finish(&queue->submit_fence);
1002 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1003 }
1004
1005 static int
1006 tu_get_device_extension_index(const char *name)
1007 {
1008 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1009 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1010 return i;
1011 }
1012 return -1;
1013 }
1014
1015 VkResult
1016 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1017 const VkDeviceCreateInfo *pCreateInfo,
1018 const VkAllocationCallbacks *pAllocator,
1019 VkDevice *pDevice)
1020 {
1021 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1022 VkResult result;
1023 struct tu_device *device;
1024
1025 /* Check enabled features */
1026 if (pCreateInfo->pEnabledFeatures) {
1027 VkPhysicalDeviceFeatures supported_features;
1028 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1029 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1030 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1031 unsigned num_features =
1032 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1033 for (uint32_t i = 0; i < num_features; i++) {
1034 if (enabled_feature[i] && !supported_feature[i])
1035 return vk_error(physical_device->instance,
1036 VK_ERROR_FEATURE_NOT_PRESENT);
1037 }
1038 }
1039
1040 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1041 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1042 if (!device)
1043 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1044
1045 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1046 device->instance = physical_device->instance;
1047 device->physical_device = physical_device;
1048
1049 if (pAllocator)
1050 device->alloc = *pAllocator;
1051 else
1052 device->alloc = physical_device->instance->alloc;
1053
1054 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1055 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1056 int index = tu_get_device_extension_index(ext_name);
1057 if (index < 0 ||
1058 !physical_device->supported_extensions.extensions[index]) {
1059 vk_free(&device->alloc, device);
1060 return vk_error(physical_device->instance,
1061 VK_ERROR_EXTENSION_NOT_PRESENT);
1062 }
1063
1064 device->enabled_extensions.extensions[index] = true;
1065 }
1066
1067 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1068 const VkDeviceQueueCreateInfo *queue_create =
1069 &pCreateInfo->pQueueCreateInfos[i];
1070 uint32_t qfi = queue_create->queueFamilyIndex;
1071 device->queues[qfi] = vk_alloc(
1072 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1073 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1074 if (!device->queues[qfi]) {
1075 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1076 goto fail;
1077 }
1078
1079 memset(device->queues[qfi], 0,
1080 queue_create->queueCount * sizeof(struct tu_queue));
1081
1082 device->queue_count[qfi] = queue_create->queueCount;
1083
1084 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1085 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1086 queue_create->flags);
1087 if (result != VK_SUCCESS)
1088 goto fail;
1089 }
1090 }
1091
1092 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1093 if (!device->compiler)
1094 goto fail;
1095
1096 VkPipelineCacheCreateInfo ci;
1097 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1098 ci.pNext = NULL;
1099 ci.flags = 0;
1100 ci.pInitialData = NULL;
1101 ci.initialDataSize = 0;
1102 VkPipelineCache pc;
1103 result =
1104 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1105 if (result != VK_SUCCESS)
1106 goto fail;
1107
1108 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1109
1110 *pDevice = tu_device_to_handle(device);
1111 return VK_SUCCESS;
1112
1113 fail:
1114 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1115 for (unsigned q = 0; q < device->queue_count[i]; q++)
1116 tu_queue_finish(&device->queues[i][q]);
1117 if (device->queue_count[i])
1118 vk_free(&device->alloc, device->queues[i]);
1119 }
1120
1121 if (device->compiler)
1122 ralloc_free(device->compiler);
1123
1124 vk_free(&device->alloc, device);
1125 return result;
1126 }
1127
1128 void
1129 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1130 {
1131 TU_FROM_HANDLE(tu_device, device, _device);
1132
1133 if (!device)
1134 return;
1135
1136 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1137 for (unsigned q = 0; q < device->queue_count[i]; q++)
1138 tu_queue_finish(&device->queues[i][q]);
1139 if (device->queue_count[i])
1140 vk_free(&device->alloc, device->queues[i]);
1141 }
1142
1143 /* the compiler does not use pAllocator */
1144 ralloc_free(device->compiler);
1145
1146 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1147 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1148
1149 vk_free(&device->alloc, device);
1150 }
1151
1152 VkResult
1153 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1154 VkLayerProperties *pProperties)
1155 {
1156 *pPropertyCount = 0;
1157 return VK_SUCCESS;
1158 }
1159
1160 VkResult
1161 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1162 uint32_t *pPropertyCount,
1163 VkLayerProperties *pProperties)
1164 {
1165 *pPropertyCount = 0;
1166 return VK_SUCCESS;
1167 }
1168
1169 void
1170 tu_GetDeviceQueue2(VkDevice _device,
1171 const VkDeviceQueueInfo2 *pQueueInfo,
1172 VkQueue *pQueue)
1173 {
1174 TU_FROM_HANDLE(tu_device, device, _device);
1175 struct tu_queue *queue;
1176
1177 queue =
1178 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1179 if (pQueueInfo->flags != queue->flags) {
1180 /* From the Vulkan 1.1.70 spec:
1181 *
1182 * "The queue returned by vkGetDeviceQueue2 must have the same
1183 * flags value from this structure as that used at device
1184 * creation time in a VkDeviceQueueCreateInfo instance. If no
1185 * matching flags were specified at device creation time then
1186 * pQueue will return VK_NULL_HANDLE."
1187 */
1188 *pQueue = VK_NULL_HANDLE;
1189 return;
1190 }
1191
1192 *pQueue = tu_queue_to_handle(queue);
1193 }
1194
1195 void
1196 tu_GetDeviceQueue(VkDevice _device,
1197 uint32_t queueFamilyIndex,
1198 uint32_t queueIndex,
1199 VkQueue *pQueue)
1200 {
1201 const VkDeviceQueueInfo2 info =
1202 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1203 .queueFamilyIndex = queueFamilyIndex,
1204 .queueIndex = queueIndex };
1205
1206 tu_GetDeviceQueue2(_device, &info, pQueue);
1207 }
1208
1209 VkResult
1210 tu_QueueSubmit(VkQueue _queue,
1211 uint32_t submitCount,
1212 const VkSubmitInfo *pSubmits,
1213 VkFence _fence)
1214 {
1215 TU_FROM_HANDLE(tu_queue, queue, _queue);
1216
1217 for (uint32_t i = 0; i < submitCount; ++i) {
1218 const VkSubmitInfo *submit = pSubmits + i;
1219 const bool last_submit = (i == submitCount - 1);
1220 struct tu_bo_list bo_list;
1221 tu_bo_list_init(&bo_list);
1222
1223 uint32_t entry_count = 0;
1224 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1225 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1226 entry_count += cmdbuf->cs.entry_count;
1227 }
1228
1229 struct drm_msm_gem_submit_cmd cmds[entry_count];
1230 uint32_t entry_idx = 0;
1231 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1232 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1233 struct tu_cs *cs = &cmdbuf->cs;
1234 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1235 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1236 cmds[entry_idx].submit_idx =
1237 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1238 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1239 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1240 cmds[entry_idx].size = cs->entries[i].size;
1241 cmds[entry_idx].pad = 0;
1242 cmds[entry_idx].nr_relocs = 0;
1243 cmds[entry_idx].relocs = 0;
1244 }
1245
1246 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1247 }
1248
1249 uint32_t flags = MSM_PIPE_3D0;
1250 if (last_submit) {
1251 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1252 }
1253
1254 struct drm_msm_gem_submit req = {
1255 .flags = flags,
1256 .queueid = queue->msm_queue_id,
1257 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1258 .nr_bos = bo_list.count,
1259 .cmds = (uint64_t)(uintptr_t)cmds,
1260 .nr_cmds = entry_count,
1261 };
1262
1263 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1264 DRM_MSM_GEM_SUBMIT,
1265 &req, sizeof(req));
1266 if (ret) {
1267 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1268 abort();
1269 }
1270
1271 tu_bo_list_destroy(&bo_list);
1272
1273 if (last_submit) {
1274 /* no need to merge fences as queue execution is serialized */
1275 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1276 }
1277 }
1278
1279 if (_fence != VK_NULL_HANDLE) {
1280 TU_FROM_HANDLE(tu_fence, fence, _fence);
1281 tu_fence_copy(fence, &queue->submit_fence);
1282 }
1283
1284 return VK_SUCCESS;
1285 }
1286
1287 VkResult
1288 tu_QueueWaitIdle(VkQueue _queue)
1289 {
1290 TU_FROM_HANDLE(tu_queue, queue, _queue);
1291
1292 tu_fence_wait_idle(&queue->submit_fence);
1293
1294 return VK_SUCCESS;
1295 }
1296
1297 VkResult
1298 tu_DeviceWaitIdle(VkDevice _device)
1299 {
1300 TU_FROM_HANDLE(tu_device, device, _device);
1301
1302 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1303 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1304 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1305 }
1306 }
1307 return VK_SUCCESS;
1308 }
1309
1310 VkResult
1311 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1312 uint32_t *pPropertyCount,
1313 VkExtensionProperties *pProperties)
1314 {
1315 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1316
1317 /* We spport no lyaers */
1318 if (pLayerName)
1319 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1320
1321 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1322 if (tu_supported_instance_extensions.extensions[i]) {
1323 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1324 }
1325 }
1326
1327 return vk_outarray_status(&out);
1328 }
1329
1330 VkResult
1331 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1332 const char *pLayerName,
1333 uint32_t *pPropertyCount,
1334 VkExtensionProperties *pProperties)
1335 {
1336 /* We spport no lyaers */
1337 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1338 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1339
1340 /* We spport no lyaers */
1341 if (pLayerName)
1342 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1343
1344 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1345 if (device->supported_extensions.extensions[i]) {
1346 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1347 }
1348 }
1349
1350 return vk_outarray_status(&out);
1351 }
1352
1353 PFN_vkVoidFunction
1354 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1355 {
1356 TU_FROM_HANDLE(tu_instance, instance, _instance);
1357
1358 return tu_lookup_entrypoint_checked(
1359 pName, instance ? instance->api_version : 0,
1360 instance ? &instance->enabled_extensions : NULL, NULL);
1361 }
1362
1363 /* The loader wants us to expose a second GetInstanceProcAddr function
1364 * to work around certain LD_PRELOAD issues seen in apps.
1365 */
1366 PUBLIC
1367 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1368 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1369
1370 PUBLIC
1371 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1372 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1373 {
1374 return tu_GetInstanceProcAddr(instance, pName);
1375 }
1376
1377 PFN_vkVoidFunction
1378 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1379 {
1380 TU_FROM_HANDLE(tu_device, device, _device);
1381
1382 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1383 &device->instance->enabled_extensions,
1384 &device->enabled_extensions);
1385 }
1386
1387 static VkResult
1388 tu_alloc_memory(struct tu_device *device,
1389 const VkMemoryAllocateInfo *pAllocateInfo,
1390 const VkAllocationCallbacks *pAllocator,
1391 VkDeviceMemory *pMem)
1392 {
1393 struct tu_device_memory *mem;
1394 VkResult result;
1395
1396 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1397
1398 if (pAllocateInfo->allocationSize == 0) {
1399 /* Apparently, this is allowed */
1400 *pMem = VK_NULL_HANDLE;
1401 return VK_SUCCESS;
1402 }
1403
1404 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1405 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1406 if (mem == NULL)
1407 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1408
1409 const VkImportMemoryFdInfoKHR *fd_info =
1410 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1411 if (fd_info && !fd_info->handleType)
1412 fd_info = NULL;
1413
1414 if (fd_info) {
1415 assert(fd_info->handleType ==
1416 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1417 fd_info->handleType ==
1418 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1419
1420 /*
1421 * TODO Importing the same fd twice gives us the same handle without
1422 * reference counting. We need to maintain a per-instance handle-to-bo
1423 * table and add reference count to tu_bo.
1424 */
1425 result = tu_bo_init_dmabuf(device, &mem->bo,
1426 pAllocateInfo->allocationSize, fd_info->fd);
1427 if (result == VK_SUCCESS) {
1428 /* take ownership and close the fd */
1429 close(fd_info->fd);
1430 }
1431 } else {
1432 result =
1433 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1434 }
1435
1436 if (result != VK_SUCCESS) {
1437 vk_free2(&device->alloc, pAllocator, mem);
1438 return result;
1439 }
1440
1441 mem->size = pAllocateInfo->allocationSize;
1442 mem->type_index = pAllocateInfo->memoryTypeIndex;
1443
1444 mem->map = NULL;
1445 mem->user_ptr = NULL;
1446
1447 *pMem = tu_device_memory_to_handle(mem);
1448
1449 return VK_SUCCESS;
1450 }
1451
1452 VkResult
1453 tu_AllocateMemory(VkDevice _device,
1454 const VkMemoryAllocateInfo *pAllocateInfo,
1455 const VkAllocationCallbacks *pAllocator,
1456 VkDeviceMemory *pMem)
1457 {
1458 TU_FROM_HANDLE(tu_device, device, _device);
1459 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1460 }
1461
1462 void
1463 tu_FreeMemory(VkDevice _device,
1464 VkDeviceMemory _mem,
1465 const VkAllocationCallbacks *pAllocator)
1466 {
1467 TU_FROM_HANDLE(tu_device, device, _device);
1468 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1469
1470 if (mem == NULL)
1471 return;
1472
1473 tu_bo_finish(device, &mem->bo);
1474 vk_free2(&device->alloc, pAllocator, mem);
1475 }
1476
1477 VkResult
1478 tu_MapMemory(VkDevice _device,
1479 VkDeviceMemory _memory,
1480 VkDeviceSize offset,
1481 VkDeviceSize size,
1482 VkMemoryMapFlags flags,
1483 void **ppData)
1484 {
1485 TU_FROM_HANDLE(tu_device, device, _device);
1486 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1487 VkResult result;
1488
1489 if (mem == NULL) {
1490 *ppData = NULL;
1491 return VK_SUCCESS;
1492 }
1493
1494 if (mem->user_ptr) {
1495 *ppData = mem->user_ptr;
1496 } else if (!mem->map) {
1497 result = tu_bo_map(device, &mem->bo);
1498 if (result != VK_SUCCESS)
1499 return result;
1500 *ppData = mem->map = mem->bo.map;
1501 } else
1502 *ppData = mem->map;
1503
1504 if (*ppData) {
1505 *ppData += offset;
1506 return VK_SUCCESS;
1507 }
1508
1509 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1510 }
1511
1512 void
1513 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1514 {
1515 /* I do not see any unmapping done by the freedreno Gallium driver. */
1516 }
1517
1518 VkResult
1519 tu_FlushMappedMemoryRanges(VkDevice _device,
1520 uint32_t memoryRangeCount,
1521 const VkMappedMemoryRange *pMemoryRanges)
1522 {
1523 return VK_SUCCESS;
1524 }
1525
1526 VkResult
1527 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1528 uint32_t memoryRangeCount,
1529 const VkMappedMemoryRange *pMemoryRanges)
1530 {
1531 return VK_SUCCESS;
1532 }
1533
1534 void
1535 tu_GetBufferMemoryRequirements(VkDevice _device,
1536 VkBuffer _buffer,
1537 VkMemoryRequirements *pMemoryRequirements)
1538 {
1539 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1540
1541 pMemoryRequirements->memoryTypeBits = 1;
1542 pMemoryRequirements->alignment = 16;
1543 pMemoryRequirements->size =
1544 align64(buffer->size, pMemoryRequirements->alignment);
1545 }
1546
1547 void
1548 tu_GetBufferMemoryRequirements2(
1549 VkDevice device,
1550 const VkBufferMemoryRequirementsInfo2 *pInfo,
1551 VkMemoryRequirements2 *pMemoryRequirements)
1552 {
1553 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1554 &pMemoryRequirements->memoryRequirements);
1555 }
1556
1557 void
1558 tu_GetImageMemoryRequirements(VkDevice _device,
1559 VkImage _image,
1560 VkMemoryRequirements *pMemoryRequirements)
1561 {
1562 TU_FROM_HANDLE(tu_image, image, _image);
1563
1564 pMemoryRequirements->memoryTypeBits = 1;
1565 pMemoryRequirements->size = image->size;
1566 pMemoryRequirements->alignment = image->alignment;
1567 }
1568
1569 void
1570 tu_GetImageMemoryRequirements2(VkDevice device,
1571 const VkImageMemoryRequirementsInfo2 *pInfo,
1572 VkMemoryRequirements2 *pMemoryRequirements)
1573 {
1574 tu_GetImageMemoryRequirements(device, pInfo->image,
1575 &pMemoryRequirements->memoryRequirements);
1576 }
1577
1578 void
1579 tu_GetImageSparseMemoryRequirements(
1580 VkDevice device,
1581 VkImage image,
1582 uint32_t *pSparseMemoryRequirementCount,
1583 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1584 {
1585 tu_stub();
1586 }
1587
1588 void
1589 tu_GetImageSparseMemoryRequirements2(
1590 VkDevice device,
1591 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1592 uint32_t *pSparseMemoryRequirementCount,
1593 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1594 {
1595 tu_stub();
1596 }
1597
1598 void
1599 tu_GetDeviceMemoryCommitment(VkDevice device,
1600 VkDeviceMemory memory,
1601 VkDeviceSize *pCommittedMemoryInBytes)
1602 {
1603 *pCommittedMemoryInBytes = 0;
1604 }
1605
1606 VkResult
1607 tu_BindBufferMemory2(VkDevice device,
1608 uint32_t bindInfoCount,
1609 const VkBindBufferMemoryInfo *pBindInfos)
1610 {
1611 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1612 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1613 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1614
1615 if (mem) {
1616 buffer->bo = &mem->bo;
1617 buffer->bo_offset = pBindInfos[i].memoryOffset;
1618 } else {
1619 buffer->bo = NULL;
1620 }
1621 }
1622 return VK_SUCCESS;
1623 }
1624
1625 VkResult
1626 tu_BindBufferMemory(VkDevice device,
1627 VkBuffer buffer,
1628 VkDeviceMemory memory,
1629 VkDeviceSize memoryOffset)
1630 {
1631 const VkBindBufferMemoryInfo info = {
1632 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1633 .buffer = buffer,
1634 .memory = memory,
1635 .memoryOffset = memoryOffset
1636 };
1637
1638 return tu_BindBufferMemory2(device, 1, &info);
1639 }
1640
1641 VkResult
1642 tu_BindImageMemory2(VkDevice device,
1643 uint32_t bindInfoCount,
1644 const VkBindImageMemoryInfo *pBindInfos)
1645 {
1646 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1647 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1648 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1649
1650 if (mem) {
1651 image->bo = &mem->bo;
1652 image->bo_offset = pBindInfos[i].memoryOffset;
1653 } else {
1654 image->bo = NULL;
1655 image->bo_offset = 0;
1656 }
1657 }
1658
1659 return VK_SUCCESS;
1660 }
1661
1662 VkResult
1663 tu_BindImageMemory(VkDevice device,
1664 VkImage image,
1665 VkDeviceMemory memory,
1666 VkDeviceSize memoryOffset)
1667 {
1668 const VkBindImageMemoryInfo info = {
1669 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1670 .image = image,
1671 .memory = memory,
1672 .memoryOffset = memoryOffset
1673 };
1674
1675 return tu_BindImageMemory2(device, 1, &info);
1676 }
1677
1678 VkResult
1679 tu_QueueBindSparse(VkQueue _queue,
1680 uint32_t bindInfoCount,
1681 const VkBindSparseInfo *pBindInfo,
1682 VkFence _fence)
1683 {
1684 return VK_SUCCESS;
1685 }
1686
1687 // Queue semaphore functions
1688
1689 VkResult
1690 tu_CreateSemaphore(VkDevice _device,
1691 const VkSemaphoreCreateInfo *pCreateInfo,
1692 const VkAllocationCallbacks *pAllocator,
1693 VkSemaphore *pSemaphore)
1694 {
1695 TU_FROM_HANDLE(tu_device, device, _device);
1696
1697 struct tu_semaphore *sem =
1698 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1699 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1700 if (!sem)
1701 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1702
1703 *pSemaphore = tu_semaphore_to_handle(sem);
1704 return VK_SUCCESS;
1705 }
1706
1707 void
1708 tu_DestroySemaphore(VkDevice _device,
1709 VkSemaphore _semaphore,
1710 const VkAllocationCallbacks *pAllocator)
1711 {
1712 TU_FROM_HANDLE(tu_device, device, _device);
1713 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1714 if (!_semaphore)
1715 return;
1716
1717 vk_free2(&device->alloc, pAllocator, sem);
1718 }
1719
1720 VkResult
1721 tu_CreateEvent(VkDevice _device,
1722 const VkEventCreateInfo *pCreateInfo,
1723 const VkAllocationCallbacks *pAllocator,
1724 VkEvent *pEvent)
1725 {
1726 TU_FROM_HANDLE(tu_device, device, _device);
1727 struct tu_event *event =
1728 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1729 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1730
1731 if (!event)
1732 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1733
1734 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1735 if (result != VK_SUCCESS)
1736 goto fail_alloc;
1737
1738 result = tu_bo_map(device, &event->bo);
1739 if (result != VK_SUCCESS)
1740 goto fail_map;
1741
1742 *pEvent = tu_event_to_handle(event);
1743
1744 return VK_SUCCESS;
1745
1746 fail_map:
1747 tu_bo_finish(device, &event->bo);
1748 fail_alloc:
1749 vk_free2(&device->alloc, pAllocator, event);
1750 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1751 }
1752
1753 void
1754 tu_DestroyEvent(VkDevice _device,
1755 VkEvent _event,
1756 const VkAllocationCallbacks *pAllocator)
1757 {
1758 TU_FROM_HANDLE(tu_device, device, _device);
1759 TU_FROM_HANDLE(tu_event, event, _event);
1760
1761 if (!event)
1762 return;
1763 vk_free2(&device->alloc, pAllocator, event);
1764 }
1765
1766 VkResult
1767 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1768 {
1769 TU_FROM_HANDLE(tu_event, event, _event);
1770
1771 if (*(uint64_t*) event->bo.map == 1)
1772 return VK_EVENT_SET;
1773 return VK_EVENT_RESET;
1774 }
1775
1776 VkResult
1777 tu_SetEvent(VkDevice _device, VkEvent _event)
1778 {
1779 TU_FROM_HANDLE(tu_event, event, _event);
1780 *(uint64_t*) event->bo.map = 1;
1781
1782 return VK_SUCCESS;
1783 }
1784
1785 VkResult
1786 tu_ResetEvent(VkDevice _device, VkEvent _event)
1787 {
1788 TU_FROM_HANDLE(tu_event, event, _event);
1789 *(uint64_t*) event->bo.map = 0;
1790
1791 return VK_SUCCESS;
1792 }
1793
1794 VkResult
1795 tu_CreateBuffer(VkDevice _device,
1796 const VkBufferCreateInfo *pCreateInfo,
1797 const VkAllocationCallbacks *pAllocator,
1798 VkBuffer *pBuffer)
1799 {
1800 TU_FROM_HANDLE(tu_device, device, _device);
1801 struct tu_buffer *buffer;
1802
1803 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1804
1805 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1806 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1807 if (buffer == NULL)
1808 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1809
1810 buffer->size = pCreateInfo->size;
1811 buffer->usage = pCreateInfo->usage;
1812 buffer->flags = pCreateInfo->flags;
1813
1814 *pBuffer = tu_buffer_to_handle(buffer);
1815
1816 return VK_SUCCESS;
1817 }
1818
1819 void
1820 tu_DestroyBuffer(VkDevice _device,
1821 VkBuffer _buffer,
1822 const VkAllocationCallbacks *pAllocator)
1823 {
1824 TU_FROM_HANDLE(tu_device, device, _device);
1825 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1826
1827 if (!buffer)
1828 return;
1829
1830 vk_free2(&device->alloc, pAllocator, buffer);
1831 }
1832
1833 static uint32_t
1834 tu_surface_max_layer_count(struct tu_image_view *iview)
1835 {
1836 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1837 ? iview->extent.depth
1838 : (iview->base_layer + iview->layer_count);
1839 }
1840
1841 VkResult
1842 tu_CreateFramebuffer(VkDevice _device,
1843 const VkFramebufferCreateInfo *pCreateInfo,
1844 const VkAllocationCallbacks *pAllocator,
1845 VkFramebuffer *pFramebuffer)
1846 {
1847 TU_FROM_HANDLE(tu_device, device, _device);
1848 struct tu_framebuffer *framebuffer;
1849
1850 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1851
1852 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1853 pCreateInfo->attachmentCount;
1854 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1855 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1856 if (framebuffer == NULL)
1857 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1858
1859 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1860 framebuffer->width = pCreateInfo->width;
1861 framebuffer->height = pCreateInfo->height;
1862 framebuffer->layers = pCreateInfo->layers;
1863 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1864 VkImageView _iview = pCreateInfo->pAttachments[i];
1865 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1866 framebuffer->attachments[i].attachment = iview;
1867
1868 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1869 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1870 framebuffer->layers =
1871 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1872 }
1873
1874 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1875 return VK_SUCCESS;
1876 }
1877
1878 void
1879 tu_DestroyFramebuffer(VkDevice _device,
1880 VkFramebuffer _fb,
1881 const VkAllocationCallbacks *pAllocator)
1882 {
1883 TU_FROM_HANDLE(tu_device, device, _device);
1884 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1885
1886 if (!fb)
1887 return;
1888 vk_free2(&device->alloc, pAllocator, fb);
1889 }
1890
1891 static enum a6xx_tex_clamp
1892 tu6_tex_wrap(VkSamplerAddressMode address_mode, bool *needs_border)
1893 {
1894 switch (address_mode) {
1895 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
1896 return A6XX_TEX_REPEAT;
1897 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
1898 return A6XX_TEX_MIRROR_REPEAT;
1899 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
1900 return A6XX_TEX_CLAMP_TO_EDGE;
1901 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
1902 *needs_border = true;
1903 return A6XX_TEX_CLAMP_TO_BORDER;
1904 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
1905 /* only works for PoT.. need to emulate otherwise! */
1906 return A6XX_TEX_MIRROR_CLAMP;
1907 default:
1908 unreachable("illegal tex wrap mode");
1909 break;
1910 }
1911 }
1912
1913 static enum a6xx_tex_filter
1914 tu6_tex_filter(VkFilter filter, unsigned aniso)
1915 {
1916 switch (filter) {
1917 case VK_FILTER_NEAREST:
1918 return A6XX_TEX_NEAREST;
1919 case VK_FILTER_LINEAR:
1920 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
1921 case VK_FILTER_CUBIC_IMG:
1922 default:
1923 unreachable("illegal texture filter");
1924 break;
1925 }
1926 }
1927
1928 static void
1929 tu_init_sampler(struct tu_device *device,
1930 struct tu_sampler *sampler,
1931 const VkSamplerCreateInfo *pCreateInfo)
1932 {
1933 unsigned aniso = pCreateInfo->anisotropyEnable ?
1934 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
1935 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
1936 bool needs_border = false;
1937
1938 sampler->state[0] =
1939 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
1940 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
1941 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
1942 A6XX_TEX_SAMP_0_ANISO(aniso) |
1943 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU, &needs_border)) |
1944 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV, &needs_border)) |
1945 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW, &needs_border)) |
1946 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
1947 sampler->state[1] =
1948 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
1949 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
1950 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
1951 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
1952 COND(pCreateInfo->compareEnable, A6XX_TEX_SAMP_1_COMPARE_FUNC(pCreateInfo->compareOp));
1953 sampler->state[2] = 0;
1954 sampler->state[3] = 0;
1955
1956 /* TODO:
1957 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
1958 * border color
1959 */
1960
1961 sampler->needs_border = needs_border;
1962 }
1963
1964 VkResult
1965 tu_CreateSampler(VkDevice _device,
1966 const VkSamplerCreateInfo *pCreateInfo,
1967 const VkAllocationCallbacks *pAllocator,
1968 VkSampler *pSampler)
1969 {
1970 TU_FROM_HANDLE(tu_device, device, _device);
1971 struct tu_sampler *sampler;
1972
1973 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1974
1975 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1976 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1977 if (!sampler)
1978 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1979
1980 tu_init_sampler(device, sampler, pCreateInfo);
1981 *pSampler = tu_sampler_to_handle(sampler);
1982
1983 return VK_SUCCESS;
1984 }
1985
1986 void
1987 tu_DestroySampler(VkDevice _device,
1988 VkSampler _sampler,
1989 const VkAllocationCallbacks *pAllocator)
1990 {
1991 TU_FROM_HANDLE(tu_device, device, _device);
1992 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1993
1994 if (!sampler)
1995 return;
1996 vk_free2(&device->alloc, pAllocator, sampler);
1997 }
1998
1999 /* vk_icd.h does not declare this function, so we declare it here to
2000 * suppress Wmissing-prototypes.
2001 */
2002 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2003 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2004
2005 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2006 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2007 {
2008 /* For the full details on loader interface versioning, see
2009 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2010 * What follows is a condensed summary, to help you navigate the large and
2011 * confusing official doc.
2012 *
2013 * - Loader interface v0 is incompatible with later versions. We don't
2014 * support it.
2015 *
2016 * - In loader interface v1:
2017 * - The first ICD entrypoint called by the loader is
2018 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2019 * entrypoint.
2020 * - The ICD must statically expose no other Vulkan symbol unless it
2021 * is linked with -Bsymbolic.
2022 * - Each dispatchable Vulkan handle created by the ICD must be
2023 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2024 * ICD must initialize VK_LOADER_DATA.loadMagic to
2025 * ICD_LOADER_MAGIC.
2026 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2027 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2028 * such loader-managed surfaces.
2029 *
2030 * - Loader interface v2 differs from v1 in:
2031 * - The first ICD entrypoint called by the loader is
2032 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2033 * statically expose this entrypoint.
2034 *
2035 * - Loader interface v3 differs from v2 in:
2036 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2037 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2038 * because the loader no longer does so.
2039 */
2040 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2041 return VK_SUCCESS;
2042 }
2043
2044 VkResult
2045 tu_GetMemoryFdKHR(VkDevice _device,
2046 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2047 int *pFd)
2048 {
2049 TU_FROM_HANDLE(tu_device, device, _device);
2050 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2051
2052 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2053
2054 /* At the moment, we support only the below handle types. */
2055 assert(pGetFdInfo->handleType ==
2056 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2057 pGetFdInfo->handleType ==
2058 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2059
2060 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2061 if (prime_fd < 0)
2062 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2063
2064 *pFd = prime_fd;
2065 return VK_SUCCESS;
2066 }
2067
2068 VkResult
2069 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2070 VkExternalMemoryHandleTypeFlagBits handleType,
2071 int fd,
2072 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2073 {
2074 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2075 pMemoryFdProperties->memoryTypeBits = 1;
2076 return VK_SUCCESS;
2077 }
2078
2079 void
2080 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2081 VkPhysicalDevice physicalDevice,
2082 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2083 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2084 {
2085 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2086 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2087 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2088 }
2089
2090 void
2091 tu_GetPhysicalDeviceExternalFenceProperties(
2092 VkPhysicalDevice physicalDevice,
2093 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2094 VkExternalFenceProperties *pExternalFenceProperties)
2095 {
2096 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2097 pExternalFenceProperties->compatibleHandleTypes = 0;
2098 pExternalFenceProperties->externalFenceFeatures = 0;
2099 }
2100
2101 VkResult
2102 tu_CreateDebugReportCallbackEXT(
2103 VkInstance _instance,
2104 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2105 const VkAllocationCallbacks *pAllocator,
2106 VkDebugReportCallbackEXT *pCallback)
2107 {
2108 TU_FROM_HANDLE(tu_instance, instance, _instance);
2109 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2110 pCreateInfo, pAllocator,
2111 &instance->alloc, pCallback);
2112 }
2113
2114 void
2115 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2116 VkDebugReportCallbackEXT _callback,
2117 const VkAllocationCallbacks *pAllocator)
2118 {
2119 TU_FROM_HANDLE(tu_instance, instance, _instance);
2120 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2121 _callback, pAllocator, &instance->alloc);
2122 }
2123
2124 void
2125 tu_DebugReportMessageEXT(VkInstance _instance,
2126 VkDebugReportFlagsEXT flags,
2127 VkDebugReportObjectTypeEXT objectType,
2128 uint64_t object,
2129 size_t location,
2130 int32_t messageCode,
2131 const char *pLayerPrefix,
2132 const char *pMessage)
2133 {
2134 TU_FROM_HANDLE(tu_instance, instance, _instance);
2135 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2136 object, location, messageCode, pLayerPrefix, pMessage);
2137 }
2138
2139 void
2140 tu_GetDeviceGroupPeerMemoryFeatures(
2141 VkDevice device,
2142 uint32_t heapIndex,
2143 uint32_t localDeviceIndex,
2144 uint32_t remoteDeviceIndex,
2145 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2146 {
2147 assert(localDeviceIndex == remoteDeviceIndex);
2148
2149 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2150 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2151 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2152 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2153 }