b8f29394c8fd5aa4ba69f7da9887ae7ef0fd7f11
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/strtod.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 if (master_fd != -1)
202 close(master_fd);
203 close(fd);
204 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
205 "device %s does not use the msm kernel driver", path);
206 }
207
208 if (version->version_major != min_version_major ||
209 version->version_minor < min_version_minor) {
210 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
211 "kernel driver for device %s has version %d.%d, "
212 "but Vulkan requires version >= %d.%d",
213 path, version->version_major, version->version_minor,
214 min_version_major, min_version_minor);
215 drmFreeVersion(version);
216 close(fd);
217 return result;
218 }
219
220 drmFreeVersion(version);
221
222 if (instance->debug_flags & TU_DEBUG_STARTUP)
223 tu_logi("Found compatible device '%s'.", path);
224
225 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
226 device->instance = instance;
227 assert(strlen(path) < ARRAY_SIZE(device->path));
228 strncpy(device->path, path, ARRAY_SIZE(device->path));
229
230 if (instance->enabled_extensions.KHR_display) {
231 master_fd =
232 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
233 if (master_fd >= 0) {
234 /* TODO: free master_fd is accel is not working? */
235 }
236 }
237
238 device->master_fd = master_fd;
239 device->local_fd = fd;
240
241 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
242 if (instance->debug_flags & TU_DEBUG_STARTUP)
243 tu_logi("Could not query the GPU ID");
244 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
245 "could not get GPU ID");
246 goto fail;
247 }
248
249 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
250 if (instance->debug_flags & TU_DEBUG_STARTUP)
251 tu_logi("Could not query the GMEM size");
252 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
253 "could not get GMEM size");
254 goto fail;
255 }
256
257 memset(device->name, 0, sizeof(device->name));
258 sprintf(device->name, "FD%d", device->gpu_id);
259
260 switch (device->gpu_id) {
261 case 630:
262 device->tile_align_w = 32;
263 device->tile_align_h = 32;
264 break;
265 default:
266 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
267 "device %s is unsupported", device->name);
268 goto fail;
269 }
270 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
271 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
272 "cannot generate UUID");
273 goto fail;
274 }
275
276 /* The gpu id is already embedded in the uuid so we just pass "tu"
277 * when creating the cache.
278 */
279 char buf[VK_UUID_SIZE * 2 + 1];
280 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
281 device->disk_cache = disk_cache_create(device->name, buf, 0);
282
283 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
284 "testing use only.\n");
285
286 tu_get_driver_uuid(&device->device_uuid);
287 tu_get_device_uuid(&device->device_uuid);
288
289 tu_fill_device_extension_table(device, &device->supported_extensions);
290
291 if (result != VK_SUCCESS) {
292 vk_error(instance, result);
293 goto fail;
294 }
295
296 return VK_SUCCESS;
297
298 fail:
299 close(fd);
300 if (master_fd != -1)
301 close(master_fd);
302 return result;
303 }
304
305 static void
306 tu_physical_device_finish(struct tu_physical_device *device)
307 {
308 disk_cache_destroy(device->disk_cache);
309 close(device->local_fd);
310 if (device->master_fd != -1)
311 close(device->master_fd);
312 }
313
314 static void *
315 default_alloc_func(void *pUserData,
316 size_t size,
317 size_t align,
318 VkSystemAllocationScope allocationScope)
319 {
320 return malloc(size);
321 }
322
323 static void *
324 default_realloc_func(void *pUserData,
325 void *pOriginal,
326 size_t size,
327 size_t align,
328 VkSystemAllocationScope allocationScope)
329 {
330 return realloc(pOriginal, size);
331 }
332
333 static void
334 default_free_func(void *pUserData, void *pMemory)
335 {
336 free(pMemory);
337 }
338
339 static const VkAllocationCallbacks default_alloc = {
340 .pUserData = NULL,
341 .pfnAllocation = default_alloc_func,
342 .pfnReallocation = default_realloc_func,
343 .pfnFree = default_free_func,
344 };
345
346 static const struct debug_control tu_debug_options[] = {
347 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
348 };
349
350 const char *
351 tu_get_debug_option_name(int id)
352 {
353 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
354 return tu_debug_options[id].string;
355 }
356
357 static int
358 tu_get_instance_extension_index(const char *name)
359 {
360 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
361 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
362 return i;
363 }
364 return -1;
365 }
366
367 VkResult
368 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
369 const VkAllocationCallbacks *pAllocator,
370 VkInstance *pInstance)
371 {
372 struct tu_instance *instance;
373 VkResult result;
374
375 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
376
377 uint32_t client_version;
378 if (pCreateInfo->pApplicationInfo &&
379 pCreateInfo->pApplicationInfo->apiVersion != 0) {
380 client_version = pCreateInfo->pApplicationInfo->apiVersion;
381 } else {
382 tu_EnumerateInstanceVersion(&client_version);
383 }
384
385 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
386 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
387 if (!instance)
388 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
389
390 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
391
392 if (pAllocator)
393 instance->alloc = *pAllocator;
394 else
395 instance->alloc = default_alloc;
396
397 instance->api_version = client_version;
398 instance->physical_device_count = -1;
399
400 instance->debug_flags =
401 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
402
403 if (instance->debug_flags & TU_DEBUG_STARTUP)
404 tu_logi("Created an instance");
405
406 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
407 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
408 int index = tu_get_instance_extension_index(ext_name);
409
410 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
411 vk_free2(&default_alloc, pAllocator, instance);
412 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
413 }
414
415 instance->enabled_extensions.extensions[index] = true;
416 }
417
418 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
419 if (result != VK_SUCCESS) {
420 vk_free2(&default_alloc, pAllocator, instance);
421 return vk_error(instance, result);
422 }
423
424 _mesa_locale_init();
425
426 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
427
428 *pInstance = tu_instance_to_handle(instance);
429
430 return VK_SUCCESS;
431 }
432
433 void
434 tu_DestroyInstance(VkInstance _instance,
435 const VkAllocationCallbacks *pAllocator)
436 {
437 TU_FROM_HANDLE(tu_instance, instance, _instance);
438
439 if (!instance)
440 return;
441
442 for (int i = 0; i < instance->physical_device_count; ++i) {
443 tu_physical_device_finish(instance->physical_devices + i);
444 }
445
446 VG(VALGRIND_DESTROY_MEMPOOL(instance));
447
448 _mesa_locale_fini();
449
450 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
451
452 vk_free(&instance->alloc, instance);
453 }
454
455 static VkResult
456 tu_enumerate_devices(struct tu_instance *instance)
457 {
458 /* TODO: Check for more devices ? */
459 drmDevicePtr devices[8];
460 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
461 int max_devices;
462
463 instance->physical_device_count = 0;
464
465 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
466
467 if (instance->debug_flags & TU_DEBUG_STARTUP)
468 tu_logi("Found %d drm nodes", max_devices);
469
470 if (max_devices < 1)
471 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
472
473 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
474 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
475 devices[i]->bustype == DRM_BUS_PLATFORM) {
476
477 result = tu_physical_device_init(
478 instance->physical_devices + instance->physical_device_count,
479 instance, devices[i]);
480 if (result == VK_SUCCESS)
481 ++instance->physical_device_count;
482 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
483 break;
484 }
485 }
486 drmFreeDevices(devices, max_devices);
487
488 return result;
489 }
490
491 VkResult
492 tu_EnumeratePhysicalDevices(VkInstance _instance,
493 uint32_t *pPhysicalDeviceCount,
494 VkPhysicalDevice *pPhysicalDevices)
495 {
496 TU_FROM_HANDLE(tu_instance, instance, _instance);
497 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
498
499 VkResult result;
500
501 if (instance->physical_device_count < 0) {
502 result = tu_enumerate_devices(instance);
503 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
504 return result;
505 }
506
507 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
508 vk_outarray_append(&out, p)
509 {
510 *p = tu_physical_device_to_handle(instance->physical_devices + i);
511 }
512 }
513
514 return vk_outarray_status(&out);
515 }
516
517 VkResult
518 tu_EnumeratePhysicalDeviceGroups(
519 VkInstance _instance,
520 uint32_t *pPhysicalDeviceGroupCount,
521 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
522 {
523 TU_FROM_HANDLE(tu_instance, instance, _instance);
524 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
525 pPhysicalDeviceGroupCount);
526 VkResult result;
527
528 if (instance->physical_device_count < 0) {
529 result = tu_enumerate_devices(instance);
530 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
531 return result;
532 }
533
534 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
535 vk_outarray_append(&out, p)
536 {
537 p->physicalDeviceCount = 1;
538 p->physicalDevices[0] =
539 tu_physical_device_to_handle(instance->physical_devices + i);
540 p->subsetAllocation = false;
541 }
542 }
543
544 return vk_outarray_status(&out);
545 }
546
547 void
548 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
549 VkPhysicalDeviceFeatures *pFeatures)
550 {
551 memset(pFeatures, 0, sizeof(*pFeatures));
552
553 *pFeatures = (VkPhysicalDeviceFeatures) {
554 .robustBufferAccess = false,
555 .fullDrawIndexUint32 = false,
556 .imageCubeArray = false,
557 .independentBlend = false,
558 .geometryShader = false,
559 .tessellationShader = false,
560 .sampleRateShading = false,
561 .dualSrcBlend = false,
562 .logicOp = false,
563 .multiDrawIndirect = false,
564 .drawIndirectFirstInstance = false,
565 .depthClamp = false,
566 .depthBiasClamp = false,
567 .fillModeNonSolid = false,
568 .depthBounds = false,
569 .wideLines = false,
570 .largePoints = false,
571 .alphaToOne = false,
572 .multiViewport = false,
573 .samplerAnisotropy = false,
574 .textureCompressionETC2 = false,
575 .textureCompressionASTC_LDR = false,
576 .textureCompressionBC = false,
577 .occlusionQueryPrecise = false,
578 .pipelineStatisticsQuery = false,
579 .vertexPipelineStoresAndAtomics = false,
580 .fragmentStoresAndAtomics = false,
581 .shaderTessellationAndGeometryPointSize = false,
582 .shaderImageGatherExtended = false,
583 .shaderStorageImageExtendedFormats = false,
584 .shaderStorageImageMultisample = false,
585 .shaderUniformBufferArrayDynamicIndexing = false,
586 .shaderSampledImageArrayDynamicIndexing = false,
587 .shaderStorageBufferArrayDynamicIndexing = false,
588 .shaderStorageImageArrayDynamicIndexing = false,
589 .shaderStorageImageReadWithoutFormat = false,
590 .shaderStorageImageWriteWithoutFormat = false,
591 .shaderClipDistance = false,
592 .shaderCullDistance = false,
593 .shaderFloat64 = false,
594 .shaderInt64 = false,
595 .shaderInt16 = false,
596 .sparseBinding = false,
597 .variableMultisampleRate = false,
598 .inheritedQueries = false,
599 };
600 }
601
602 void
603 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
604 VkPhysicalDeviceFeatures2KHR *pFeatures)
605 {
606 vk_foreach_struct(ext, pFeatures->pNext)
607 {
608 switch (ext->sType) {
609 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
610 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
611 features->variablePointersStorageBuffer = false;
612 features->variablePointers = false;
613 break;
614 }
615 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
616 VkPhysicalDeviceMultiviewFeaturesKHR *features =
617 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
618 features->multiview = false;
619 features->multiviewGeometryShader = false;
620 features->multiviewTessellationShader = false;
621 break;
622 }
623 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
624 VkPhysicalDeviceShaderDrawParameterFeatures *features =
625 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
626 features->shaderDrawParameters = false;
627 break;
628 }
629 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
630 VkPhysicalDeviceProtectedMemoryFeatures *features =
631 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
632 features->protectedMemory = false;
633 break;
634 }
635 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
636 VkPhysicalDevice16BitStorageFeatures *features =
637 (VkPhysicalDevice16BitStorageFeatures *) ext;
638 features->storageBuffer16BitAccess = false;
639 features->uniformAndStorageBuffer16BitAccess = false;
640 features->storagePushConstant16 = false;
641 features->storageInputOutput16 = false;
642 break;
643 }
644 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
645 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
646 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
647 features->samplerYcbcrConversion = false;
648 break;
649 }
650 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
651 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
652 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
653 features->shaderInputAttachmentArrayDynamicIndexing = false;
654 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
655 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
656 features->shaderUniformBufferArrayNonUniformIndexing = false;
657 features->shaderSampledImageArrayNonUniformIndexing = false;
658 features->shaderStorageBufferArrayNonUniformIndexing = false;
659 features->shaderStorageImageArrayNonUniformIndexing = false;
660 features->shaderInputAttachmentArrayNonUniformIndexing = false;
661 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
662 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
663 features->descriptorBindingUniformBufferUpdateAfterBind = false;
664 features->descriptorBindingSampledImageUpdateAfterBind = false;
665 features->descriptorBindingStorageImageUpdateAfterBind = false;
666 features->descriptorBindingStorageBufferUpdateAfterBind = false;
667 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
668 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
669 features->descriptorBindingUpdateUnusedWhilePending = false;
670 features->descriptorBindingPartiallyBound = false;
671 features->descriptorBindingVariableDescriptorCount = false;
672 features->runtimeDescriptorArray = false;
673 break;
674 }
675 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
676 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
677 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
678 features->conditionalRendering = false;
679 features->inheritedConditionalRendering = false;
680 break;
681 }
682 default:
683 break;
684 }
685 }
686 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
687 }
688
689 void
690 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
691 VkPhysicalDeviceProperties *pProperties)
692 {
693 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
694 VkSampleCountFlags sample_counts = 0xf;
695
696 /* make sure that the entire descriptor set is addressable with a signed
697 * 32-bit int. So the sum of all limits scaled by descriptor size has to
698 * be at most 2 GiB. the combined image & samples object count as one of
699 * both. This limit is for the pipeline layout, not for the set layout, but
700 * there is no set limit, so we just set a pipeline limit. I don't think
701 * any app is going to hit this soon. */
702 size_t max_descriptor_set_size =
703 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
704 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
705 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
706 32 /* sampler, largest when combined with image */ +
707 64 /* sampled image */ + 64 /* storage image */);
708
709 VkPhysicalDeviceLimits limits = {
710 .maxImageDimension1D = (1 << 14),
711 .maxImageDimension2D = (1 << 14),
712 .maxImageDimension3D = (1 << 11),
713 .maxImageDimensionCube = (1 << 14),
714 .maxImageArrayLayers = (1 << 11),
715 .maxTexelBufferElements = 128 * 1024 * 1024,
716 .maxUniformBufferRange = UINT32_MAX,
717 .maxStorageBufferRange = UINT32_MAX,
718 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
719 .maxMemoryAllocationCount = UINT32_MAX,
720 .maxSamplerAllocationCount = 64 * 1024,
721 .bufferImageGranularity = 64, /* A cache line */
722 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
723 .maxBoundDescriptorSets = MAX_SETS,
724 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
725 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
726 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
727 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
728 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
729 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
730 .maxPerStageResources = max_descriptor_set_size,
731 .maxDescriptorSetSamplers = max_descriptor_set_size,
732 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
733 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
734 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
735 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
736 .maxDescriptorSetSampledImages = max_descriptor_set_size,
737 .maxDescriptorSetStorageImages = max_descriptor_set_size,
738 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
739 .maxVertexInputAttributes = 32,
740 .maxVertexInputBindings = 32,
741 .maxVertexInputAttributeOffset = 2047,
742 .maxVertexInputBindingStride = 2048,
743 .maxVertexOutputComponents = 128,
744 .maxTessellationGenerationLevel = 64,
745 .maxTessellationPatchSize = 32,
746 .maxTessellationControlPerVertexInputComponents = 128,
747 .maxTessellationControlPerVertexOutputComponents = 128,
748 .maxTessellationControlPerPatchOutputComponents = 120,
749 .maxTessellationControlTotalOutputComponents = 4096,
750 .maxTessellationEvaluationInputComponents = 128,
751 .maxTessellationEvaluationOutputComponents = 128,
752 .maxGeometryShaderInvocations = 127,
753 .maxGeometryInputComponents = 64,
754 .maxGeometryOutputComponents = 128,
755 .maxGeometryOutputVertices = 256,
756 .maxGeometryTotalOutputComponents = 1024,
757 .maxFragmentInputComponents = 128,
758 .maxFragmentOutputAttachments = 8,
759 .maxFragmentDualSrcAttachments = 1,
760 .maxFragmentCombinedOutputResources = 8,
761 .maxComputeSharedMemorySize = 32768,
762 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
763 .maxComputeWorkGroupInvocations = 2048,
764 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
765 .subPixelPrecisionBits = 4 /* FIXME */,
766 .subTexelPrecisionBits = 4 /* FIXME */,
767 .mipmapPrecisionBits = 4 /* FIXME */,
768 .maxDrawIndexedIndexValue = UINT32_MAX,
769 .maxDrawIndirectCount = UINT32_MAX,
770 .maxSamplerLodBias = 16,
771 .maxSamplerAnisotropy = 16,
772 .maxViewports = MAX_VIEWPORTS,
773 .maxViewportDimensions = { (1 << 14), (1 << 14) },
774 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
775 .viewportSubPixelBits = 8,
776 .minMemoryMapAlignment = 4096, /* A page */
777 .minTexelBufferOffsetAlignment = 1,
778 .minUniformBufferOffsetAlignment = 4,
779 .minStorageBufferOffsetAlignment = 4,
780 .minTexelOffset = -32,
781 .maxTexelOffset = 31,
782 .minTexelGatherOffset = -32,
783 .maxTexelGatherOffset = 31,
784 .minInterpolationOffset = -2,
785 .maxInterpolationOffset = 2,
786 .subPixelInterpolationOffsetBits = 8,
787 .maxFramebufferWidth = (1 << 14),
788 .maxFramebufferHeight = (1 << 14),
789 .maxFramebufferLayers = (1 << 10),
790 .framebufferColorSampleCounts = sample_counts,
791 .framebufferDepthSampleCounts = sample_counts,
792 .framebufferStencilSampleCounts = sample_counts,
793 .framebufferNoAttachmentsSampleCounts = sample_counts,
794 .maxColorAttachments = MAX_RTS,
795 .sampledImageColorSampleCounts = sample_counts,
796 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
797 .sampledImageDepthSampleCounts = sample_counts,
798 .sampledImageStencilSampleCounts = sample_counts,
799 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
800 .maxSampleMaskWords = 1,
801 .timestampComputeAndGraphics = true,
802 .timestampPeriod = 1,
803 .maxClipDistances = 8,
804 .maxCullDistances = 8,
805 .maxCombinedClipAndCullDistances = 8,
806 .discreteQueuePriorities = 1,
807 .pointSizeRange = { 0.125, 255.875 },
808 .lineWidthRange = { 0.0, 7.9921875 },
809 .pointSizeGranularity = (1.0 / 8.0),
810 .lineWidthGranularity = (1.0 / 128.0),
811 .strictLines = false, /* FINISHME */
812 .standardSampleLocations = true,
813 .optimalBufferCopyOffsetAlignment = 128,
814 .optimalBufferCopyRowPitchAlignment = 128,
815 .nonCoherentAtomSize = 64,
816 };
817
818 *pProperties = (VkPhysicalDeviceProperties) {
819 .apiVersion = tu_physical_device_api_version(pdevice),
820 .driverVersion = vk_get_driver_version(),
821 .vendorID = 0, /* TODO */
822 .deviceID = 0,
823 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
824 .limits = limits,
825 .sparseProperties = { 0 },
826 };
827
828 strcpy(pProperties->deviceName, pdevice->name);
829 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
830 }
831
832 void
833 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
834 VkPhysicalDeviceProperties2KHR *pProperties)
835 {
836 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
837 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
838
839 vk_foreach_struct(ext, pProperties->pNext)
840 {
841 switch (ext->sType) {
842 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
843 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
844 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
845 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
846 break;
847 }
848 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
849 VkPhysicalDeviceIDPropertiesKHR *properties =
850 (VkPhysicalDeviceIDPropertiesKHR *) ext;
851 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
852 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
853 properties->deviceLUIDValid = false;
854 break;
855 }
856 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
857 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
858 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
859 properties->maxMultiviewViewCount = MAX_VIEWS;
860 properties->maxMultiviewInstanceIndex = INT_MAX;
861 break;
862 }
863 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
864 VkPhysicalDevicePointClippingPropertiesKHR *properties =
865 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
866 properties->pointClippingBehavior =
867 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
868 break;
869 }
870 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
871 VkPhysicalDeviceMaintenance3Properties *properties =
872 (VkPhysicalDeviceMaintenance3Properties *) ext;
873 /* Make sure everything is addressable by a signed 32-bit int, and
874 * our largest descriptors are 96 bytes. */
875 properties->maxPerSetDescriptors = (1ull << 31) / 96;
876 /* Our buffer size fields allow only this much */
877 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
878 break;
879 }
880 default:
881 break;
882 }
883 }
884 }
885
886 static const VkQueueFamilyProperties tu_queue_family_properties = {
887 .queueFlags =
888 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
889 .queueCount = 1,
890 .timestampValidBits = 64,
891 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
892 };
893
894 void
895 tu_GetPhysicalDeviceQueueFamilyProperties(
896 VkPhysicalDevice physicalDevice,
897 uint32_t *pQueueFamilyPropertyCount,
898 VkQueueFamilyProperties *pQueueFamilyProperties)
899 {
900 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
901
902 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
903 }
904
905 void
906 tu_GetPhysicalDeviceQueueFamilyProperties2(
907 VkPhysicalDevice physicalDevice,
908 uint32_t *pQueueFamilyPropertyCount,
909 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
910 {
911 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
912
913 vk_outarray_append(&out, p)
914 {
915 p->queueFamilyProperties = tu_queue_family_properties;
916 }
917 }
918
919 static uint64_t
920 tu_get_system_heap_size()
921 {
922 struct sysinfo info;
923 sysinfo(&info);
924
925 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
926
927 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
928 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
929 */
930 uint64_t available_ram;
931 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
932 available_ram = total_ram / 2;
933 else
934 available_ram = total_ram * 3 / 4;
935
936 return available_ram;
937 }
938
939 void
940 tu_GetPhysicalDeviceMemoryProperties(
941 VkPhysicalDevice physicalDevice,
942 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
943 {
944 pMemoryProperties->memoryHeapCount = 1;
945 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
946 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
947
948 pMemoryProperties->memoryTypeCount = 1;
949 pMemoryProperties->memoryTypes[0].propertyFlags =
950 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
951 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
952 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
953 pMemoryProperties->memoryTypes[0].heapIndex = 0;
954 }
955
956 void
957 tu_GetPhysicalDeviceMemoryProperties2(
958 VkPhysicalDevice physicalDevice,
959 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
960 {
961 return tu_GetPhysicalDeviceMemoryProperties(
962 physicalDevice, &pMemoryProperties->memoryProperties);
963 }
964
965 static VkResult
966 tu_queue_init(struct tu_device *device,
967 struct tu_queue *queue,
968 uint32_t queue_family_index,
969 int idx,
970 VkDeviceQueueCreateFlags flags)
971 {
972 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
973 queue->device = device;
974 queue->queue_family_index = queue_family_index;
975 queue->queue_idx = idx;
976 queue->flags = flags;
977
978 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
979 if (ret)
980 return VK_ERROR_INITIALIZATION_FAILED;
981
982 queue->submit_fence_fd = -1;
983
984 return VK_SUCCESS;
985 }
986
987 static void
988 tu_queue_finish(struct tu_queue *queue)
989 {
990 if (queue->submit_fence_fd >= 0) {
991 close(queue->submit_fence_fd);
992 }
993 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
994 }
995
996 static int
997 tu_get_device_extension_index(const char *name)
998 {
999 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1000 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1001 return i;
1002 }
1003 return -1;
1004 }
1005
1006 VkResult
1007 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1008 const VkDeviceCreateInfo *pCreateInfo,
1009 const VkAllocationCallbacks *pAllocator,
1010 VkDevice *pDevice)
1011 {
1012 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1013 VkResult result;
1014 struct tu_device *device;
1015
1016 /* Check enabled features */
1017 if (pCreateInfo->pEnabledFeatures) {
1018 VkPhysicalDeviceFeatures supported_features;
1019 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1020 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1021 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1022 unsigned num_features =
1023 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1024 for (uint32_t i = 0; i < num_features; i++) {
1025 if (enabled_feature[i] && !supported_feature[i])
1026 return vk_error(physical_device->instance,
1027 VK_ERROR_FEATURE_NOT_PRESENT);
1028 }
1029 }
1030
1031 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1032 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1033 if (!device)
1034 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1035
1036 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1037 device->instance = physical_device->instance;
1038 device->physical_device = physical_device;
1039
1040 if (pAllocator)
1041 device->alloc = *pAllocator;
1042 else
1043 device->alloc = physical_device->instance->alloc;
1044
1045 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1046 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1047 int index = tu_get_device_extension_index(ext_name);
1048 if (index < 0 ||
1049 !physical_device->supported_extensions.extensions[index]) {
1050 vk_free(&device->alloc, device);
1051 return vk_error(physical_device->instance,
1052 VK_ERROR_EXTENSION_NOT_PRESENT);
1053 }
1054
1055 device->enabled_extensions.extensions[index] = true;
1056 }
1057
1058 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1059 const VkDeviceQueueCreateInfo *queue_create =
1060 &pCreateInfo->pQueueCreateInfos[i];
1061 uint32_t qfi = queue_create->queueFamilyIndex;
1062 device->queues[qfi] = vk_alloc(
1063 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1064 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1065 if (!device->queues[qfi]) {
1066 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1067 goto fail;
1068 }
1069
1070 memset(device->queues[qfi], 0,
1071 queue_create->queueCount * sizeof(struct tu_queue));
1072
1073 device->queue_count[qfi] = queue_create->queueCount;
1074
1075 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1076 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1077 queue_create->flags);
1078 if (result != VK_SUCCESS)
1079 goto fail;
1080 }
1081 }
1082
1083 VkPipelineCacheCreateInfo ci;
1084 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1085 ci.pNext = NULL;
1086 ci.flags = 0;
1087 ci.pInitialData = NULL;
1088 ci.initialDataSize = 0;
1089 VkPipelineCache pc;
1090 result =
1091 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1092 if (result != VK_SUCCESS)
1093 goto fail;
1094
1095 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1096
1097 *pDevice = tu_device_to_handle(device);
1098 return VK_SUCCESS;
1099
1100 fail:
1101 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1102 for (unsigned q = 0; q < device->queue_count[i]; q++)
1103 tu_queue_finish(&device->queues[i][q]);
1104 if (device->queue_count[i])
1105 vk_free(&device->alloc, device->queues[i]);
1106 }
1107
1108 vk_free(&device->alloc, device);
1109 return result;
1110 }
1111
1112 void
1113 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1114 {
1115 TU_FROM_HANDLE(tu_device, device, _device);
1116
1117 if (!device)
1118 return;
1119
1120 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1121 for (unsigned q = 0; q < device->queue_count[i]; q++)
1122 tu_queue_finish(&device->queues[i][q]);
1123 if (device->queue_count[i])
1124 vk_free(&device->alloc, device->queues[i]);
1125 }
1126
1127 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1128 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1129
1130 vk_free(&device->alloc, device);
1131 }
1132
1133 VkResult
1134 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1135 VkLayerProperties *pProperties)
1136 {
1137 *pPropertyCount = 0;
1138 return VK_SUCCESS;
1139 }
1140
1141 VkResult
1142 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1143 uint32_t *pPropertyCount,
1144 VkLayerProperties *pProperties)
1145 {
1146 *pPropertyCount = 0;
1147 return VK_SUCCESS;
1148 }
1149
1150 void
1151 tu_GetDeviceQueue2(VkDevice _device,
1152 const VkDeviceQueueInfo2 *pQueueInfo,
1153 VkQueue *pQueue)
1154 {
1155 TU_FROM_HANDLE(tu_device, device, _device);
1156 struct tu_queue *queue;
1157
1158 queue =
1159 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1160 if (pQueueInfo->flags != queue->flags) {
1161 /* From the Vulkan 1.1.70 spec:
1162 *
1163 * "The queue returned by vkGetDeviceQueue2 must have the same
1164 * flags value from this structure as that used at device
1165 * creation time in a VkDeviceQueueCreateInfo instance. If no
1166 * matching flags were specified at device creation time then
1167 * pQueue will return VK_NULL_HANDLE."
1168 */
1169 *pQueue = VK_NULL_HANDLE;
1170 return;
1171 }
1172
1173 *pQueue = tu_queue_to_handle(queue);
1174 }
1175
1176 void
1177 tu_GetDeviceQueue(VkDevice _device,
1178 uint32_t queueFamilyIndex,
1179 uint32_t queueIndex,
1180 VkQueue *pQueue)
1181 {
1182 const VkDeviceQueueInfo2 info =
1183 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1184 .queueFamilyIndex = queueFamilyIndex,
1185 .queueIndex = queueIndex };
1186
1187 tu_GetDeviceQueue2(_device, &info, pQueue);
1188 }
1189
1190 VkResult
1191 tu_QueueSubmit(VkQueue _queue,
1192 uint32_t submitCount,
1193 const VkSubmitInfo *pSubmits,
1194 VkFence _fence)
1195 {
1196 TU_FROM_HANDLE(tu_queue, queue, _queue);
1197
1198 for (uint32_t i = 0; i < submitCount; ++i) {
1199 const VkSubmitInfo *submit = pSubmits + i;
1200 const bool last_submit = (i == submitCount - 1);
1201 struct tu_bo_list bo_list;
1202 tu_bo_list_init(&bo_list);
1203
1204 uint32_t entry_count = 0;
1205 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1206 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1207 entry_count += cmdbuf->cs.entry_count;
1208 }
1209
1210 struct drm_msm_gem_submit_cmd cmds[entry_count];
1211 uint32_t entry_idx = 0;
1212 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1213 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1214 struct tu_cs *cs = &cmdbuf->cs;
1215 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1216 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1217 cmds[entry_idx].submit_idx = tu_bo_list_add(
1218 &bo_list, cs->entries[i].bo, MSM_SUBMIT_BO_READ);
1219 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1220 cmds[entry_idx].size = cs->entries[i].size;
1221 cmds[entry_idx].pad = 0;
1222 cmds[entry_idx].nr_relocs = 0;
1223 cmds[entry_idx].relocs = 0;
1224 }
1225
1226 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1227 }
1228
1229 uint32_t flags = MSM_PIPE_3D0;
1230 if (last_submit) {
1231 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1232 }
1233
1234 struct drm_msm_gem_submit req = {
1235 .flags = flags,
1236 .queueid = queue->msm_queue_id,
1237 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1238 .nr_bos = bo_list.count,
1239 .cmds = (uint64_t)(uintptr_t)cmds,
1240 .nr_cmds = entry_count,
1241 };
1242
1243 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1244 DRM_MSM_GEM_SUBMIT,
1245 &req, sizeof(req));
1246 if (ret) {
1247 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1248 abort();
1249 }
1250
1251 tu_bo_list_destroy(&bo_list);
1252
1253 if (last_submit) {
1254 /* no need to merge fences as queue execution is serialized */
1255 if (queue->submit_fence_fd >= 0) {
1256 close(queue->submit_fence_fd);
1257 }
1258 queue->submit_fence_fd = req.fence_fd;
1259 }
1260 }
1261 return VK_SUCCESS;
1262 }
1263
1264 VkResult
1265 tu_QueueWaitIdle(VkQueue _queue)
1266 {
1267 TU_FROM_HANDLE(tu_queue, queue, _queue);
1268
1269 if (queue->submit_fence_fd >= 0) {
1270 int ret = sync_wait(queue->submit_fence_fd, -1);
1271 if (ret)
1272 tu_loge("sync_wait on fence fd %d failed", queue->submit_fence_fd);
1273
1274 close(queue->submit_fence_fd);
1275 queue->submit_fence_fd = -1;
1276 }
1277
1278 return VK_SUCCESS;
1279 }
1280
1281 VkResult
1282 tu_DeviceWaitIdle(VkDevice _device)
1283 {
1284 TU_FROM_HANDLE(tu_device, device, _device);
1285
1286 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1287 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1288 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1289 }
1290 }
1291 return VK_SUCCESS;
1292 }
1293
1294 VkResult
1295 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1296 uint32_t *pPropertyCount,
1297 VkExtensionProperties *pProperties)
1298 {
1299 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1300
1301 /* We spport no lyaers */
1302 if (pLayerName)
1303 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1304
1305 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1306 if (tu_supported_instance_extensions.extensions[i]) {
1307 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1308 }
1309 }
1310
1311 return vk_outarray_status(&out);
1312 }
1313
1314 VkResult
1315 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1316 const char *pLayerName,
1317 uint32_t *pPropertyCount,
1318 VkExtensionProperties *pProperties)
1319 {
1320 /* We spport no lyaers */
1321 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1322 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1323
1324 /* We spport no lyaers */
1325 if (pLayerName)
1326 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1327
1328 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1329 if (device->supported_extensions.extensions[i]) {
1330 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1331 }
1332 }
1333
1334 return vk_outarray_status(&out);
1335 }
1336
1337 PFN_vkVoidFunction
1338 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1339 {
1340 TU_FROM_HANDLE(tu_instance, instance, _instance);
1341
1342 return tu_lookup_entrypoint_checked(
1343 pName, instance ? instance->api_version : 0,
1344 instance ? &instance->enabled_extensions : NULL, NULL);
1345 }
1346
1347 /* The loader wants us to expose a second GetInstanceProcAddr function
1348 * to work around certain LD_PRELOAD issues seen in apps.
1349 */
1350 PUBLIC
1351 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1352 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1353
1354 PUBLIC
1355 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1356 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1357 {
1358 return tu_GetInstanceProcAddr(instance, pName);
1359 }
1360
1361 PFN_vkVoidFunction
1362 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1363 {
1364 TU_FROM_HANDLE(tu_device, device, _device);
1365
1366 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1367 &device->instance->enabled_extensions,
1368 &device->enabled_extensions);
1369 }
1370
1371 static VkResult
1372 tu_alloc_memory(struct tu_device *device,
1373 const VkMemoryAllocateInfo *pAllocateInfo,
1374 const VkAllocationCallbacks *pAllocator,
1375 VkDeviceMemory *pMem)
1376 {
1377 struct tu_device_memory *mem;
1378 VkResult result;
1379
1380 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1381
1382 if (pAllocateInfo->allocationSize == 0) {
1383 /* Apparently, this is allowed */
1384 *pMem = VK_NULL_HANDLE;
1385 return VK_SUCCESS;
1386 }
1387
1388 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1389 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1390 if (mem == NULL)
1391 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1392
1393 const VkImportMemoryFdInfoKHR *fd_info =
1394 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1395 if (fd_info && !fd_info->handleType)
1396 fd_info = NULL;
1397
1398 if (fd_info) {
1399 assert(fd_info->handleType ==
1400 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1401 fd_info->handleType ==
1402 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1403
1404 /*
1405 * TODO Importing the same fd twice gives us the same handle without
1406 * reference counting. We need to maintain a per-instance handle-to-bo
1407 * table and add reference count to tu_bo.
1408 */
1409 result = tu_bo_init_dmabuf(device, &mem->bo,
1410 pAllocateInfo->allocationSize, fd_info->fd);
1411 if (result == VK_SUCCESS) {
1412 /* take ownership and close the fd */
1413 close(fd_info->fd);
1414 }
1415 } else {
1416 result =
1417 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1418 }
1419
1420 if (result != VK_SUCCESS) {
1421 vk_free2(&device->alloc, pAllocator, mem);
1422 return result;
1423 }
1424
1425 mem->size = pAllocateInfo->allocationSize;
1426 mem->type_index = pAllocateInfo->memoryTypeIndex;
1427
1428 mem->map = NULL;
1429 mem->user_ptr = NULL;
1430
1431 *pMem = tu_device_memory_to_handle(mem);
1432
1433 return VK_SUCCESS;
1434 }
1435
1436 VkResult
1437 tu_AllocateMemory(VkDevice _device,
1438 const VkMemoryAllocateInfo *pAllocateInfo,
1439 const VkAllocationCallbacks *pAllocator,
1440 VkDeviceMemory *pMem)
1441 {
1442 TU_FROM_HANDLE(tu_device, device, _device);
1443 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1444 }
1445
1446 void
1447 tu_FreeMemory(VkDevice _device,
1448 VkDeviceMemory _mem,
1449 const VkAllocationCallbacks *pAllocator)
1450 {
1451 TU_FROM_HANDLE(tu_device, device, _device);
1452 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1453
1454 if (mem == NULL)
1455 return;
1456
1457 tu_bo_finish(device, &mem->bo);
1458 vk_free2(&device->alloc, pAllocator, mem);
1459 }
1460
1461 VkResult
1462 tu_MapMemory(VkDevice _device,
1463 VkDeviceMemory _memory,
1464 VkDeviceSize offset,
1465 VkDeviceSize size,
1466 VkMemoryMapFlags flags,
1467 void **ppData)
1468 {
1469 TU_FROM_HANDLE(tu_device, device, _device);
1470 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1471 VkResult result;
1472
1473 if (mem == NULL) {
1474 *ppData = NULL;
1475 return VK_SUCCESS;
1476 }
1477
1478 if (mem->user_ptr) {
1479 *ppData = mem->user_ptr;
1480 } else if (!mem->map) {
1481 result = tu_bo_map(device, &mem->bo);
1482 if (result != VK_SUCCESS)
1483 return result;
1484 *ppData = mem->map = mem->bo.map;
1485 } else
1486 *ppData = mem->map;
1487
1488 if (*ppData) {
1489 *ppData += offset;
1490 return VK_SUCCESS;
1491 }
1492
1493 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1494 }
1495
1496 void
1497 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1498 {
1499 /* I do not see any unmapping done by the freedreno Gallium driver. */
1500 }
1501
1502 VkResult
1503 tu_FlushMappedMemoryRanges(VkDevice _device,
1504 uint32_t memoryRangeCount,
1505 const VkMappedMemoryRange *pMemoryRanges)
1506 {
1507 return VK_SUCCESS;
1508 }
1509
1510 VkResult
1511 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1512 uint32_t memoryRangeCount,
1513 const VkMappedMemoryRange *pMemoryRanges)
1514 {
1515 return VK_SUCCESS;
1516 }
1517
1518 void
1519 tu_GetBufferMemoryRequirements(VkDevice _device,
1520 VkBuffer _buffer,
1521 VkMemoryRequirements *pMemoryRequirements)
1522 {
1523 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1524
1525 pMemoryRequirements->memoryTypeBits = 1;
1526 pMemoryRequirements->alignment = 16;
1527 pMemoryRequirements->size =
1528 align64(buffer->size, pMemoryRequirements->alignment);
1529 }
1530
1531 void
1532 tu_GetBufferMemoryRequirements2(
1533 VkDevice device,
1534 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1535 VkMemoryRequirements2KHR *pMemoryRequirements)
1536 {
1537 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1538 &pMemoryRequirements->memoryRequirements);
1539 }
1540
1541 void
1542 tu_GetImageMemoryRequirements(VkDevice _device,
1543 VkImage _image,
1544 VkMemoryRequirements *pMemoryRequirements)
1545 {
1546 TU_FROM_HANDLE(tu_image, image, _image);
1547
1548 pMemoryRequirements->memoryTypeBits = 1;
1549 pMemoryRequirements->size = image->size;
1550 pMemoryRequirements->alignment = image->alignment;
1551 }
1552
1553 void
1554 tu_GetImageMemoryRequirements2(VkDevice device,
1555 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1556 VkMemoryRequirements2KHR *pMemoryRequirements)
1557 {
1558 tu_GetImageMemoryRequirements(device, pInfo->image,
1559 &pMemoryRequirements->memoryRequirements);
1560 }
1561
1562 void
1563 tu_GetImageSparseMemoryRequirements(
1564 VkDevice device,
1565 VkImage image,
1566 uint32_t *pSparseMemoryRequirementCount,
1567 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1568 {
1569 tu_stub();
1570 }
1571
1572 void
1573 tu_GetImageSparseMemoryRequirements2(
1574 VkDevice device,
1575 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1576 uint32_t *pSparseMemoryRequirementCount,
1577 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1578 {
1579 tu_stub();
1580 }
1581
1582 void
1583 tu_GetDeviceMemoryCommitment(VkDevice device,
1584 VkDeviceMemory memory,
1585 VkDeviceSize *pCommittedMemoryInBytes)
1586 {
1587 *pCommittedMemoryInBytes = 0;
1588 }
1589
1590 VkResult
1591 tu_BindBufferMemory2(VkDevice device,
1592 uint32_t bindInfoCount,
1593 const VkBindBufferMemoryInfoKHR *pBindInfos)
1594 {
1595 return VK_SUCCESS;
1596 }
1597
1598 VkResult
1599 tu_BindBufferMemory(VkDevice device,
1600 VkBuffer buffer,
1601 VkDeviceMemory memory,
1602 VkDeviceSize memoryOffset)
1603 {
1604 const VkBindBufferMemoryInfoKHR info = {
1605 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1606 .buffer = buffer,
1607 .memory = memory,
1608 .memoryOffset = memoryOffset
1609 };
1610
1611 return tu_BindBufferMemory2(device, 1, &info);
1612 }
1613
1614 VkResult
1615 tu_BindImageMemory2(VkDevice device,
1616 uint32_t bindInfoCount,
1617 const VkBindImageMemoryInfo *pBindInfos)
1618 {
1619 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1620 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1621 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1622
1623 if (mem) {
1624 image->bo = &mem->bo;
1625 image->bo_offset = pBindInfos[i].memoryOffset;
1626 } else {
1627 image->bo = NULL;
1628 image->bo_offset = 0;
1629 }
1630 }
1631
1632 return VK_SUCCESS;
1633 }
1634
1635 VkResult
1636 tu_BindImageMemory(VkDevice device,
1637 VkImage image,
1638 VkDeviceMemory memory,
1639 VkDeviceSize memoryOffset)
1640 {
1641 const VkBindImageMemoryInfo info = {
1642 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1643 .image = image,
1644 .memory = memory,
1645 .memoryOffset = memoryOffset
1646 };
1647
1648 return tu_BindImageMemory2(device, 1, &info);
1649 }
1650
1651 VkResult
1652 tu_QueueBindSparse(VkQueue _queue,
1653 uint32_t bindInfoCount,
1654 const VkBindSparseInfo *pBindInfo,
1655 VkFence _fence)
1656 {
1657 return VK_SUCCESS;
1658 }
1659
1660 VkResult
1661 tu_CreateFence(VkDevice _device,
1662 const VkFenceCreateInfo *pCreateInfo,
1663 const VkAllocationCallbacks *pAllocator,
1664 VkFence *pFence)
1665 {
1666 TU_FROM_HANDLE(tu_device, device, _device);
1667
1668 struct tu_fence *fence =
1669 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1670 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1671
1672 if (!fence)
1673 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1674
1675 *pFence = tu_fence_to_handle(fence);
1676
1677 return VK_SUCCESS;
1678 }
1679
1680 void
1681 tu_DestroyFence(VkDevice _device,
1682 VkFence _fence,
1683 const VkAllocationCallbacks *pAllocator)
1684 {
1685 TU_FROM_HANDLE(tu_device, device, _device);
1686 TU_FROM_HANDLE(tu_fence, fence, _fence);
1687
1688 if (!fence)
1689 return;
1690
1691 vk_free2(&device->alloc, pAllocator, fence);
1692 }
1693
1694 VkResult
1695 tu_WaitForFences(VkDevice _device,
1696 uint32_t fenceCount,
1697 const VkFence *pFences,
1698 VkBool32 waitAll,
1699 uint64_t timeout)
1700 {
1701 return VK_SUCCESS;
1702 }
1703
1704 VkResult
1705 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1706 {
1707 return VK_SUCCESS;
1708 }
1709
1710 VkResult
1711 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1712 {
1713 return VK_SUCCESS;
1714 }
1715
1716 // Queue semaphore functions
1717
1718 VkResult
1719 tu_CreateSemaphore(VkDevice _device,
1720 const VkSemaphoreCreateInfo *pCreateInfo,
1721 const VkAllocationCallbacks *pAllocator,
1722 VkSemaphore *pSemaphore)
1723 {
1724 TU_FROM_HANDLE(tu_device, device, _device);
1725
1726 struct tu_semaphore *sem =
1727 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1728 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1729 if (!sem)
1730 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1731
1732 *pSemaphore = tu_semaphore_to_handle(sem);
1733 return VK_SUCCESS;
1734 }
1735
1736 void
1737 tu_DestroySemaphore(VkDevice _device,
1738 VkSemaphore _semaphore,
1739 const VkAllocationCallbacks *pAllocator)
1740 {
1741 TU_FROM_HANDLE(tu_device, device, _device);
1742 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1743 if (!_semaphore)
1744 return;
1745
1746 vk_free2(&device->alloc, pAllocator, sem);
1747 }
1748
1749 VkResult
1750 tu_CreateEvent(VkDevice _device,
1751 const VkEventCreateInfo *pCreateInfo,
1752 const VkAllocationCallbacks *pAllocator,
1753 VkEvent *pEvent)
1754 {
1755 TU_FROM_HANDLE(tu_device, device, _device);
1756 struct tu_event *event =
1757 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1758 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1759
1760 if (!event)
1761 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1762
1763 *pEvent = tu_event_to_handle(event);
1764
1765 return VK_SUCCESS;
1766 }
1767
1768 void
1769 tu_DestroyEvent(VkDevice _device,
1770 VkEvent _event,
1771 const VkAllocationCallbacks *pAllocator)
1772 {
1773 TU_FROM_HANDLE(tu_device, device, _device);
1774 TU_FROM_HANDLE(tu_event, event, _event);
1775
1776 if (!event)
1777 return;
1778 vk_free2(&device->alloc, pAllocator, event);
1779 }
1780
1781 VkResult
1782 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1783 {
1784 TU_FROM_HANDLE(tu_event, event, _event);
1785
1786 if (*event->map == 1)
1787 return VK_EVENT_SET;
1788 return VK_EVENT_RESET;
1789 }
1790
1791 VkResult
1792 tu_SetEvent(VkDevice _device, VkEvent _event)
1793 {
1794 TU_FROM_HANDLE(tu_event, event, _event);
1795 *event->map = 1;
1796
1797 return VK_SUCCESS;
1798 }
1799
1800 VkResult
1801 tu_ResetEvent(VkDevice _device, VkEvent _event)
1802 {
1803 TU_FROM_HANDLE(tu_event, event, _event);
1804 *event->map = 0;
1805
1806 return VK_SUCCESS;
1807 }
1808
1809 VkResult
1810 tu_CreateBuffer(VkDevice _device,
1811 const VkBufferCreateInfo *pCreateInfo,
1812 const VkAllocationCallbacks *pAllocator,
1813 VkBuffer *pBuffer)
1814 {
1815 TU_FROM_HANDLE(tu_device, device, _device);
1816 struct tu_buffer *buffer;
1817
1818 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1819
1820 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1821 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1822 if (buffer == NULL)
1823 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1824
1825 buffer->size = pCreateInfo->size;
1826 buffer->usage = pCreateInfo->usage;
1827 buffer->flags = pCreateInfo->flags;
1828
1829 *pBuffer = tu_buffer_to_handle(buffer);
1830
1831 return VK_SUCCESS;
1832 }
1833
1834 void
1835 tu_DestroyBuffer(VkDevice _device,
1836 VkBuffer _buffer,
1837 const VkAllocationCallbacks *pAllocator)
1838 {
1839 TU_FROM_HANDLE(tu_device, device, _device);
1840 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1841
1842 if (!buffer)
1843 return;
1844
1845 vk_free2(&device->alloc, pAllocator, buffer);
1846 }
1847
1848 static uint32_t
1849 tu_surface_max_layer_count(struct tu_image_view *iview)
1850 {
1851 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1852 ? iview->extent.depth
1853 : (iview->base_layer + iview->layer_count);
1854 }
1855
1856 VkResult
1857 tu_CreateFramebuffer(VkDevice _device,
1858 const VkFramebufferCreateInfo *pCreateInfo,
1859 const VkAllocationCallbacks *pAllocator,
1860 VkFramebuffer *pFramebuffer)
1861 {
1862 TU_FROM_HANDLE(tu_device, device, _device);
1863 struct tu_framebuffer *framebuffer;
1864
1865 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1866
1867 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1868 pCreateInfo->attachmentCount;
1869 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1870 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1871 if (framebuffer == NULL)
1872 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1873
1874 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1875 framebuffer->width = pCreateInfo->width;
1876 framebuffer->height = pCreateInfo->height;
1877 framebuffer->layers = pCreateInfo->layers;
1878 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1879 VkImageView _iview = pCreateInfo->pAttachments[i];
1880 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1881 framebuffer->attachments[i].attachment = iview;
1882
1883 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1884 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1885 framebuffer->layers =
1886 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1887 }
1888
1889 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1890 return VK_SUCCESS;
1891 }
1892
1893 void
1894 tu_DestroyFramebuffer(VkDevice _device,
1895 VkFramebuffer _fb,
1896 const VkAllocationCallbacks *pAllocator)
1897 {
1898 TU_FROM_HANDLE(tu_device, device, _device);
1899 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1900
1901 if (!fb)
1902 return;
1903 vk_free2(&device->alloc, pAllocator, fb);
1904 }
1905
1906 static void
1907 tu_init_sampler(struct tu_device *device,
1908 struct tu_sampler *sampler,
1909 const VkSamplerCreateInfo *pCreateInfo)
1910 {
1911 }
1912
1913 VkResult
1914 tu_CreateSampler(VkDevice _device,
1915 const VkSamplerCreateInfo *pCreateInfo,
1916 const VkAllocationCallbacks *pAllocator,
1917 VkSampler *pSampler)
1918 {
1919 TU_FROM_HANDLE(tu_device, device, _device);
1920 struct tu_sampler *sampler;
1921
1922 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1923
1924 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1925 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1926 if (!sampler)
1927 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1928
1929 tu_init_sampler(device, sampler, pCreateInfo);
1930 *pSampler = tu_sampler_to_handle(sampler);
1931
1932 return VK_SUCCESS;
1933 }
1934
1935 void
1936 tu_DestroySampler(VkDevice _device,
1937 VkSampler _sampler,
1938 const VkAllocationCallbacks *pAllocator)
1939 {
1940 TU_FROM_HANDLE(tu_device, device, _device);
1941 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1942
1943 if (!sampler)
1944 return;
1945 vk_free2(&device->alloc, pAllocator, sampler);
1946 }
1947
1948 /* vk_icd.h does not declare this function, so we declare it here to
1949 * suppress Wmissing-prototypes.
1950 */
1951 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1952 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1953
1954 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1955 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1956 {
1957 /* For the full details on loader interface versioning, see
1958 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1959 * What follows is a condensed summary, to help you navigate the large and
1960 * confusing official doc.
1961 *
1962 * - Loader interface v0 is incompatible with later versions. We don't
1963 * support it.
1964 *
1965 * - In loader interface v1:
1966 * - The first ICD entrypoint called by the loader is
1967 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1968 * entrypoint.
1969 * - The ICD must statically expose no other Vulkan symbol unless it
1970 * is linked with -Bsymbolic.
1971 * - Each dispatchable Vulkan handle created by the ICD must be
1972 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1973 * ICD must initialize VK_LOADER_DATA.loadMagic to
1974 * ICD_LOADER_MAGIC.
1975 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1976 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1977 * such loader-managed surfaces.
1978 *
1979 * - Loader interface v2 differs from v1 in:
1980 * - The first ICD entrypoint called by the loader is
1981 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1982 * statically expose this entrypoint.
1983 *
1984 * - Loader interface v3 differs from v2 in:
1985 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1986 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1987 * because the loader no longer does so.
1988 */
1989 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1990 return VK_SUCCESS;
1991 }
1992
1993 VkResult
1994 tu_GetMemoryFdKHR(VkDevice _device,
1995 const VkMemoryGetFdInfoKHR *pGetFdInfo,
1996 int *pFd)
1997 {
1998 TU_FROM_HANDLE(tu_device, device, _device);
1999 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2000
2001 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2002
2003 /* At the moment, we support only the below handle types. */
2004 assert(pGetFdInfo->handleType ==
2005 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2006 pGetFdInfo->handleType ==
2007 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2008
2009 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2010 if (prime_fd < 0)
2011 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2012
2013 *pFd = prime_fd;
2014 return VK_SUCCESS;
2015 }
2016
2017 VkResult
2018 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2019 VkExternalMemoryHandleTypeFlagBits handleType,
2020 int fd,
2021 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2022 {
2023 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2024 pMemoryFdProperties->memoryTypeBits = 1;
2025 return VK_SUCCESS;
2026 }
2027
2028 void
2029 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2030 VkPhysicalDevice physicalDevice,
2031 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
2032 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
2033 {
2034 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2035 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2036 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2037 }
2038
2039 void
2040 tu_GetPhysicalDeviceExternalFenceProperties(
2041 VkPhysicalDevice physicalDevice,
2042 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
2043 VkExternalFencePropertiesKHR *pExternalFenceProperties)
2044 {
2045 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2046 pExternalFenceProperties->compatibleHandleTypes = 0;
2047 pExternalFenceProperties->externalFenceFeatures = 0;
2048 }
2049
2050 VkResult
2051 tu_CreateDebugReportCallbackEXT(
2052 VkInstance _instance,
2053 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2054 const VkAllocationCallbacks *pAllocator,
2055 VkDebugReportCallbackEXT *pCallback)
2056 {
2057 TU_FROM_HANDLE(tu_instance, instance, _instance);
2058 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2059 pCreateInfo, pAllocator,
2060 &instance->alloc, pCallback);
2061 }
2062
2063 void
2064 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2065 VkDebugReportCallbackEXT _callback,
2066 const VkAllocationCallbacks *pAllocator)
2067 {
2068 TU_FROM_HANDLE(tu_instance, instance, _instance);
2069 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2070 _callback, pAllocator, &instance->alloc);
2071 }
2072
2073 void
2074 tu_DebugReportMessageEXT(VkInstance _instance,
2075 VkDebugReportFlagsEXT flags,
2076 VkDebugReportObjectTypeEXT objectType,
2077 uint64_t object,
2078 size_t location,
2079 int32_t messageCode,
2080 const char *pLayerPrefix,
2081 const char *pMessage)
2082 {
2083 TU_FROM_HANDLE(tu_instance, instance, _instance);
2084 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2085 object, location, messageCode, pLayerPrefix, pMessage);
2086 }
2087
2088 void
2089 tu_GetDeviceGroupPeerMemoryFeatures(
2090 VkDevice device,
2091 uint32_t heapIndex,
2092 uint32_t localDeviceIndex,
2093 uint32_t remoteDeviceIndex,
2094 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2095 {
2096 assert(localDeviceIndex == remoteDeviceIndex);
2097
2098 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2099 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2100 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2101 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2102 }