turnip: preliminary support for shader modules
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/strtod.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 if (master_fd != -1)
202 close(master_fd);
203 close(fd);
204 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
205 "device %s does not use the msm kernel driver", path);
206 }
207
208 if (version->version_major != min_version_major ||
209 version->version_minor < min_version_minor) {
210 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
211 "kernel driver for device %s has version %d.%d, "
212 "but Vulkan requires version >= %d.%d",
213 path, version->version_major, version->version_minor,
214 min_version_major, min_version_minor);
215 drmFreeVersion(version);
216 close(fd);
217 return result;
218 }
219
220 drmFreeVersion(version);
221
222 if (instance->debug_flags & TU_DEBUG_STARTUP)
223 tu_logi("Found compatible device '%s'.", path);
224
225 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
226 device->instance = instance;
227 assert(strlen(path) < ARRAY_SIZE(device->path));
228 strncpy(device->path, path, ARRAY_SIZE(device->path));
229
230 if (instance->enabled_extensions.KHR_display) {
231 master_fd =
232 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
233 if (master_fd >= 0) {
234 /* TODO: free master_fd is accel is not working? */
235 }
236 }
237
238 device->master_fd = master_fd;
239 device->local_fd = fd;
240
241 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
242 if (instance->debug_flags & TU_DEBUG_STARTUP)
243 tu_logi("Could not query the GPU ID");
244 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
245 "could not get GPU ID");
246 goto fail;
247 }
248
249 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
250 if (instance->debug_flags & TU_DEBUG_STARTUP)
251 tu_logi("Could not query the GMEM size");
252 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
253 "could not get GMEM size");
254 goto fail;
255 }
256
257 memset(device->name, 0, sizeof(device->name));
258 sprintf(device->name, "FD%d", device->gpu_id);
259
260 switch (device->gpu_id) {
261 case 630:
262 device->tile_align_w = 32;
263 device->tile_align_h = 32;
264 break;
265 default:
266 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
267 "device %s is unsupported", device->name);
268 goto fail;
269 }
270 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
271 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
272 "cannot generate UUID");
273 goto fail;
274 }
275
276 /* The gpu id is already embedded in the uuid so we just pass "tu"
277 * when creating the cache.
278 */
279 char buf[VK_UUID_SIZE * 2 + 1];
280 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
281 device->disk_cache = disk_cache_create(device->name, buf, 0);
282
283 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
284 "testing use only.\n");
285
286 tu_get_driver_uuid(&device->device_uuid);
287 tu_get_device_uuid(&device->device_uuid);
288
289 tu_fill_device_extension_table(device, &device->supported_extensions);
290
291 if (result != VK_SUCCESS) {
292 vk_error(instance, result);
293 goto fail;
294 }
295
296 return VK_SUCCESS;
297
298 fail:
299 close(fd);
300 if (master_fd != -1)
301 close(master_fd);
302 return result;
303 }
304
305 static void
306 tu_physical_device_finish(struct tu_physical_device *device)
307 {
308 disk_cache_destroy(device->disk_cache);
309 close(device->local_fd);
310 if (device->master_fd != -1)
311 close(device->master_fd);
312 }
313
314 static void *
315 default_alloc_func(void *pUserData,
316 size_t size,
317 size_t align,
318 VkSystemAllocationScope allocationScope)
319 {
320 return malloc(size);
321 }
322
323 static void *
324 default_realloc_func(void *pUserData,
325 void *pOriginal,
326 size_t size,
327 size_t align,
328 VkSystemAllocationScope allocationScope)
329 {
330 return realloc(pOriginal, size);
331 }
332
333 static void
334 default_free_func(void *pUserData, void *pMemory)
335 {
336 free(pMemory);
337 }
338
339 static const VkAllocationCallbacks default_alloc = {
340 .pUserData = NULL,
341 .pfnAllocation = default_alloc_func,
342 .pfnReallocation = default_realloc_func,
343 .pfnFree = default_free_func,
344 };
345
346 static const struct debug_control tu_debug_options[] = {
347 { "startup", TU_DEBUG_STARTUP },
348 { "nir", TU_DEBUG_NIR },
349 { "ir3", TU_DEBUG_IR3 },
350 { NULL, 0 }
351 };
352
353 const char *
354 tu_get_debug_option_name(int id)
355 {
356 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
357 return tu_debug_options[id].string;
358 }
359
360 static int
361 tu_get_instance_extension_index(const char *name)
362 {
363 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
364 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
365 return i;
366 }
367 return -1;
368 }
369
370 VkResult
371 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
372 const VkAllocationCallbacks *pAllocator,
373 VkInstance *pInstance)
374 {
375 struct tu_instance *instance;
376 VkResult result;
377
378 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
379
380 uint32_t client_version;
381 if (pCreateInfo->pApplicationInfo &&
382 pCreateInfo->pApplicationInfo->apiVersion != 0) {
383 client_version = pCreateInfo->pApplicationInfo->apiVersion;
384 } else {
385 tu_EnumerateInstanceVersion(&client_version);
386 }
387
388 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
389 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
390 if (!instance)
391 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
392
393 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
394
395 if (pAllocator)
396 instance->alloc = *pAllocator;
397 else
398 instance->alloc = default_alloc;
399
400 instance->api_version = client_version;
401 instance->physical_device_count = -1;
402
403 instance->debug_flags =
404 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
405
406 if (instance->debug_flags & TU_DEBUG_STARTUP)
407 tu_logi("Created an instance");
408
409 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
410 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
411 int index = tu_get_instance_extension_index(ext_name);
412
413 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
414 vk_free2(&default_alloc, pAllocator, instance);
415 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
416 }
417
418 instance->enabled_extensions.extensions[index] = true;
419 }
420
421 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
422 if (result != VK_SUCCESS) {
423 vk_free2(&default_alloc, pAllocator, instance);
424 return vk_error(instance, result);
425 }
426
427 _mesa_locale_init();
428
429 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
430
431 *pInstance = tu_instance_to_handle(instance);
432
433 return VK_SUCCESS;
434 }
435
436 void
437 tu_DestroyInstance(VkInstance _instance,
438 const VkAllocationCallbacks *pAllocator)
439 {
440 TU_FROM_HANDLE(tu_instance, instance, _instance);
441
442 if (!instance)
443 return;
444
445 for (int i = 0; i < instance->physical_device_count; ++i) {
446 tu_physical_device_finish(instance->physical_devices + i);
447 }
448
449 VG(VALGRIND_DESTROY_MEMPOOL(instance));
450
451 _mesa_locale_fini();
452
453 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
454
455 vk_free(&instance->alloc, instance);
456 }
457
458 static VkResult
459 tu_enumerate_devices(struct tu_instance *instance)
460 {
461 /* TODO: Check for more devices ? */
462 drmDevicePtr devices[8];
463 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
464 int max_devices;
465
466 instance->physical_device_count = 0;
467
468 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
469
470 if (instance->debug_flags & TU_DEBUG_STARTUP)
471 tu_logi("Found %d drm nodes", max_devices);
472
473 if (max_devices < 1)
474 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
475
476 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
477 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
478 devices[i]->bustype == DRM_BUS_PLATFORM) {
479
480 result = tu_physical_device_init(
481 instance->physical_devices + instance->physical_device_count,
482 instance, devices[i]);
483 if (result == VK_SUCCESS)
484 ++instance->physical_device_count;
485 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
486 break;
487 }
488 }
489 drmFreeDevices(devices, max_devices);
490
491 return result;
492 }
493
494 VkResult
495 tu_EnumeratePhysicalDevices(VkInstance _instance,
496 uint32_t *pPhysicalDeviceCount,
497 VkPhysicalDevice *pPhysicalDevices)
498 {
499 TU_FROM_HANDLE(tu_instance, instance, _instance);
500 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
501
502 VkResult result;
503
504 if (instance->physical_device_count < 0) {
505 result = tu_enumerate_devices(instance);
506 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
507 return result;
508 }
509
510 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
511 vk_outarray_append(&out, p)
512 {
513 *p = tu_physical_device_to_handle(instance->physical_devices + i);
514 }
515 }
516
517 return vk_outarray_status(&out);
518 }
519
520 VkResult
521 tu_EnumeratePhysicalDeviceGroups(
522 VkInstance _instance,
523 uint32_t *pPhysicalDeviceGroupCount,
524 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
525 {
526 TU_FROM_HANDLE(tu_instance, instance, _instance);
527 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
528 pPhysicalDeviceGroupCount);
529 VkResult result;
530
531 if (instance->physical_device_count < 0) {
532 result = tu_enumerate_devices(instance);
533 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
534 return result;
535 }
536
537 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
538 vk_outarray_append(&out, p)
539 {
540 p->physicalDeviceCount = 1;
541 p->physicalDevices[0] =
542 tu_physical_device_to_handle(instance->physical_devices + i);
543 p->subsetAllocation = false;
544 }
545 }
546
547 return vk_outarray_status(&out);
548 }
549
550 void
551 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
552 VkPhysicalDeviceFeatures *pFeatures)
553 {
554 memset(pFeatures, 0, sizeof(*pFeatures));
555
556 *pFeatures = (VkPhysicalDeviceFeatures) {
557 .robustBufferAccess = false,
558 .fullDrawIndexUint32 = false,
559 .imageCubeArray = false,
560 .independentBlend = false,
561 .geometryShader = false,
562 .tessellationShader = false,
563 .sampleRateShading = false,
564 .dualSrcBlend = false,
565 .logicOp = false,
566 .multiDrawIndirect = false,
567 .drawIndirectFirstInstance = false,
568 .depthClamp = false,
569 .depthBiasClamp = false,
570 .fillModeNonSolid = false,
571 .depthBounds = false,
572 .wideLines = false,
573 .largePoints = false,
574 .alphaToOne = false,
575 .multiViewport = false,
576 .samplerAnisotropy = false,
577 .textureCompressionETC2 = false,
578 .textureCompressionASTC_LDR = false,
579 .textureCompressionBC = false,
580 .occlusionQueryPrecise = false,
581 .pipelineStatisticsQuery = false,
582 .vertexPipelineStoresAndAtomics = false,
583 .fragmentStoresAndAtomics = false,
584 .shaderTessellationAndGeometryPointSize = false,
585 .shaderImageGatherExtended = false,
586 .shaderStorageImageExtendedFormats = false,
587 .shaderStorageImageMultisample = false,
588 .shaderUniformBufferArrayDynamicIndexing = false,
589 .shaderSampledImageArrayDynamicIndexing = false,
590 .shaderStorageBufferArrayDynamicIndexing = false,
591 .shaderStorageImageArrayDynamicIndexing = false,
592 .shaderStorageImageReadWithoutFormat = false,
593 .shaderStorageImageWriteWithoutFormat = false,
594 .shaderClipDistance = false,
595 .shaderCullDistance = false,
596 .shaderFloat64 = false,
597 .shaderInt64 = false,
598 .shaderInt16 = false,
599 .sparseBinding = false,
600 .variableMultisampleRate = false,
601 .inheritedQueries = false,
602 };
603 }
604
605 void
606 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
607 VkPhysicalDeviceFeatures2KHR *pFeatures)
608 {
609 vk_foreach_struct(ext, pFeatures->pNext)
610 {
611 switch (ext->sType) {
612 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
613 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
614 features->variablePointersStorageBuffer = false;
615 features->variablePointers = false;
616 break;
617 }
618 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
619 VkPhysicalDeviceMultiviewFeaturesKHR *features =
620 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
621 features->multiview = false;
622 features->multiviewGeometryShader = false;
623 features->multiviewTessellationShader = false;
624 break;
625 }
626 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
627 VkPhysicalDeviceShaderDrawParameterFeatures *features =
628 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
629 features->shaderDrawParameters = false;
630 break;
631 }
632 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
633 VkPhysicalDeviceProtectedMemoryFeatures *features =
634 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
635 features->protectedMemory = false;
636 break;
637 }
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
639 VkPhysicalDevice16BitStorageFeatures *features =
640 (VkPhysicalDevice16BitStorageFeatures *) ext;
641 features->storageBuffer16BitAccess = false;
642 features->uniformAndStorageBuffer16BitAccess = false;
643 features->storagePushConstant16 = false;
644 features->storageInputOutput16 = false;
645 break;
646 }
647 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
648 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
649 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
650 features->samplerYcbcrConversion = false;
651 break;
652 }
653 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
654 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
655 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
656 features->shaderInputAttachmentArrayDynamicIndexing = false;
657 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
658 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
659 features->shaderUniformBufferArrayNonUniformIndexing = false;
660 features->shaderSampledImageArrayNonUniformIndexing = false;
661 features->shaderStorageBufferArrayNonUniformIndexing = false;
662 features->shaderStorageImageArrayNonUniformIndexing = false;
663 features->shaderInputAttachmentArrayNonUniformIndexing = false;
664 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
665 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
666 features->descriptorBindingUniformBufferUpdateAfterBind = false;
667 features->descriptorBindingSampledImageUpdateAfterBind = false;
668 features->descriptorBindingStorageImageUpdateAfterBind = false;
669 features->descriptorBindingStorageBufferUpdateAfterBind = false;
670 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
671 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
672 features->descriptorBindingUpdateUnusedWhilePending = false;
673 features->descriptorBindingPartiallyBound = false;
674 features->descriptorBindingVariableDescriptorCount = false;
675 features->runtimeDescriptorArray = false;
676 break;
677 }
678 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
679 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
680 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
681 features->conditionalRendering = false;
682 features->inheritedConditionalRendering = false;
683 break;
684 }
685 default:
686 break;
687 }
688 }
689 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
690 }
691
692 void
693 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
694 VkPhysicalDeviceProperties *pProperties)
695 {
696 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
697 VkSampleCountFlags sample_counts = 0xf;
698
699 /* make sure that the entire descriptor set is addressable with a signed
700 * 32-bit int. So the sum of all limits scaled by descriptor size has to
701 * be at most 2 GiB. the combined image & samples object count as one of
702 * both. This limit is for the pipeline layout, not for the set layout, but
703 * there is no set limit, so we just set a pipeline limit. I don't think
704 * any app is going to hit this soon. */
705 size_t max_descriptor_set_size =
706 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
707 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
708 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
709 32 /* sampler, largest when combined with image */ +
710 64 /* sampled image */ + 64 /* storage image */);
711
712 VkPhysicalDeviceLimits limits = {
713 .maxImageDimension1D = (1 << 14),
714 .maxImageDimension2D = (1 << 14),
715 .maxImageDimension3D = (1 << 11),
716 .maxImageDimensionCube = (1 << 14),
717 .maxImageArrayLayers = (1 << 11),
718 .maxTexelBufferElements = 128 * 1024 * 1024,
719 .maxUniformBufferRange = UINT32_MAX,
720 .maxStorageBufferRange = UINT32_MAX,
721 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
722 .maxMemoryAllocationCount = UINT32_MAX,
723 .maxSamplerAllocationCount = 64 * 1024,
724 .bufferImageGranularity = 64, /* A cache line */
725 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
726 .maxBoundDescriptorSets = MAX_SETS,
727 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
728 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
729 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
730 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
731 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
732 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
733 .maxPerStageResources = max_descriptor_set_size,
734 .maxDescriptorSetSamplers = max_descriptor_set_size,
735 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
736 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
737 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
738 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
739 .maxDescriptorSetSampledImages = max_descriptor_set_size,
740 .maxDescriptorSetStorageImages = max_descriptor_set_size,
741 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
742 .maxVertexInputAttributes = 32,
743 .maxVertexInputBindings = 32,
744 .maxVertexInputAttributeOffset = 2047,
745 .maxVertexInputBindingStride = 2048,
746 .maxVertexOutputComponents = 128,
747 .maxTessellationGenerationLevel = 64,
748 .maxTessellationPatchSize = 32,
749 .maxTessellationControlPerVertexInputComponents = 128,
750 .maxTessellationControlPerVertexOutputComponents = 128,
751 .maxTessellationControlPerPatchOutputComponents = 120,
752 .maxTessellationControlTotalOutputComponents = 4096,
753 .maxTessellationEvaluationInputComponents = 128,
754 .maxTessellationEvaluationOutputComponents = 128,
755 .maxGeometryShaderInvocations = 127,
756 .maxGeometryInputComponents = 64,
757 .maxGeometryOutputComponents = 128,
758 .maxGeometryOutputVertices = 256,
759 .maxGeometryTotalOutputComponents = 1024,
760 .maxFragmentInputComponents = 128,
761 .maxFragmentOutputAttachments = 8,
762 .maxFragmentDualSrcAttachments = 1,
763 .maxFragmentCombinedOutputResources = 8,
764 .maxComputeSharedMemorySize = 32768,
765 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
766 .maxComputeWorkGroupInvocations = 2048,
767 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
768 .subPixelPrecisionBits = 4 /* FIXME */,
769 .subTexelPrecisionBits = 4 /* FIXME */,
770 .mipmapPrecisionBits = 4 /* FIXME */,
771 .maxDrawIndexedIndexValue = UINT32_MAX,
772 .maxDrawIndirectCount = UINT32_MAX,
773 .maxSamplerLodBias = 16,
774 .maxSamplerAnisotropy = 16,
775 .maxViewports = MAX_VIEWPORTS,
776 .maxViewportDimensions = { (1 << 14), (1 << 14) },
777 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
778 .viewportSubPixelBits = 8,
779 .minMemoryMapAlignment = 4096, /* A page */
780 .minTexelBufferOffsetAlignment = 1,
781 .minUniformBufferOffsetAlignment = 4,
782 .minStorageBufferOffsetAlignment = 4,
783 .minTexelOffset = -32,
784 .maxTexelOffset = 31,
785 .minTexelGatherOffset = -32,
786 .maxTexelGatherOffset = 31,
787 .minInterpolationOffset = -2,
788 .maxInterpolationOffset = 2,
789 .subPixelInterpolationOffsetBits = 8,
790 .maxFramebufferWidth = (1 << 14),
791 .maxFramebufferHeight = (1 << 14),
792 .maxFramebufferLayers = (1 << 10),
793 .framebufferColorSampleCounts = sample_counts,
794 .framebufferDepthSampleCounts = sample_counts,
795 .framebufferStencilSampleCounts = sample_counts,
796 .framebufferNoAttachmentsSampleCounts = sample_counts,
797 .maxColorAttachments = MAX_RTS,
798 .sampledImageColorSampleCounts = sample_counts,
799 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
800 .sampledImageDepthSampleCounts = sample_counts,
801 .sampledImageStencilSampleCounts = sample_counts,
802 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
803 .maxSampleMaskWords = 1,
804 .timestampComputeAndGraphics = true,
805 .timestampPeriod = 1,
806 .maxClipDistances = 8,
807 .maxCullDistances = 8,
808 .maxCombinedClipAndCullDistances = 8,
809 .discreteQueuePriorities = 1,
810 .pointSizeRange = { 0.125, 255.875 },
811 .lineWidthRange = { 0.0, 7.9921875 },
812 .pointSizeGranularity = (1.0 / 8.0),
813 .lineWidthGranularity = (1.0 / 128.0),
814 .strictLines = false, /* FINISHME */
815 .standardSampleLocations = true,
816 .optimalBufferCopyOffsetAlignment = 128,
817 .optimalBufferCopyRowPitchAlignment = 128,
818 .nonCoherentAtomSize = 64,
819 };
820
821 *pProperties = (VkPhysicalDeviceProperties) {
822 .apiVersion = tu_physical_device_api_version(pdevice),
823 .driverVersion = vk_get_driver_version(),
824 .vendorID = 0, /* TODO */
825 .deviceID = 0,
826 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
827 .limits = limits,
828 .sparseProperties = { 0 },
829 };
830
831 strcpy(pProperties->deviceName, pdevice->name);
832 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
833 }
834
835 void
836 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
837 VkPhysicalDeviceProperties2KHR *pProperties)
838 {
839 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
840 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
841
842 vk_foreach_struct(ext, pProperties->pNext)
843 {
844 switch (ext->sType) {
845 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
846 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
847 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
848 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
849 break;
850 }
851 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
852 VkPhysicalDeviceIDPropertiesKHR *properties =
853 (VkPhysicalDeviceIDPropertiesKHR *) ext;
854 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
855 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
856 properties->deviceLUIDValid = false;
857 break;
858 }
859 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
860 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
861 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
862 properties->maxMultiviewViewCount = MAX_VIEWS;
863 properties->maxMultiviewInstanceIndex = INT_MAX;
864 break;
865 }
866 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
867 VkPhysicalDevicePointClippingPropertiesKHR *properties =
868 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
869 properties->pointClippingBehavior =
870 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
871 break;
872 }
873 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
874 VkPhysicalDeviceMaintenance3Properties *properties =
875 (VkPhysicalDeviceMaintenance3Properties *) ext;
876 /* Make sure everything is addressable by a signed 32-bit int, and
877 * our largest descriptors are 96 bytes. */
878 properties->maxPerSetDescriptors = (1ull << 31) / 96;
879 /* Our buffer size fields allow only this much */
880 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
881 break;
882 }
883 default:
884 break;
885 }
886 }
887 }
888
889 static const VkQueueFamilyProperties tu_queue_family_properties = {
890 .queueFlags =
891 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
892 .queueCount = 1,
893 .timestampValidBits = 64,
894 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
895 };
896
897 void
898 tu_GetPhysicalDeviceQueueFamilyProperties(
899 VkPhysicalDevice physicalDevice,
900 uint32_t *pQueueFamilyPropertyCount,
901 VkQueueFamilyProperties *pQueueFamilyProperties)
902 {
903 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
904
905 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
906 }
907
908 void
909 tu_GetPhysicalDeviceQueueFamilyProperties2(
910 VkPhysicalDevice physicalDevice,
911 uint32_t *pQueueFamilyPropertyCount,
912 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
913 {
914 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
915
916 vk_outarray_append(&out, p)
917 {
918 p->queueFamilyProperties = tu_queue_family_properties;
919 }
920 }
921
922 static uint64_t
923 tu_get_system_heap_size()
924 {
925 struct sysinfo info;
926 sysinfo(&info);
927
928 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
929
930 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
931 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
932 */
933 uint64_t available_ram;
934 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
935 available_ram = total_ram / 2;
936 else
937 available_ram = total_ram * 3 / 4;
938
939 return available_ram;
940 }
941
942 void
943 tu_GetPhysicalDeviceMemoryProperties(
944 VkPhysicalDevice physicalDevice,
945 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
946 {
947 pMemoryProperties->memoryHeapCount = 1;
948 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
949 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
950
951 pMemoryProperties->memoryTypeCount = 1;
952 pMemoryProperties->memoryTypes[0].propertyFlags =
953 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
954 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
955 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
956 pMemoryProperties->memoryTypes[0].heapIndex = 0;
957 }
958
959 void
960 tu_GetPhysicalDeviceMemoryProperties2(
961 VkPhysicalDevice physicalDevice,
962 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
963 {
964 return tu_GetPhysicalDeviceMemoryProperties(
965 physicalDevice, &pMemoryProperties->memoryProperties);
966 }
967
968 static VkResult
969 tu_queue_init(struct tu_device *device,
970 struct tu_queue *queue,
971 uint32_t queue_family_index,
972 int idx,
973 VkDeviceQueueCreateFlags flags)
974 {
975 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
976 queue->device = device;
977 queue->queue_family_index = queue_family_index;
978 queue->queue_idx = idx;
979 queue->flags = flags;
980
981 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
982 if (ret)
983 return VK_ERROR_INITIALIZATION_FAILED;
984
985 tu_fence_init(&queue->submit_fence, false);
986
987 return VK_SUCCESS;
988 }
989
990 static void
991 tu_queue_finish(struct tu_queue *queue)
992 {
993 tu_fence_finish(&queue->submit_fence);
994 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
995 }
996
997 static int
998 tu_get_device_extension_index(const char *name)
999 {
1000 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1001 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1002 return i;
1003 }
1004 return -1;
1005 }
1006
1007 VkResult
1008 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1009 const VkDeviceCreateInfo *pCreateInfo,
1010 const VkAllocationCallbacks *pAllocator,
1011 VkDevice *pDevice)
1012 {
1013 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1014 VkResult result;
1015 struct tu_device *device;
1016
1017 /* Check enabled features */
1018 if (pCreateInfo->pEnabledFeatures) {
1019 VkPhysicalDeviceFeatures supported_features;
1020 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1021 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1022 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1023 unsigned num_features =
1024 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1025 for (uint32_t i = 0; i < num_features; i++) {
1026 if (enabled_feature[i] && !supported_feature[i])
1027 return vk_error(physical_device->instance,
1028 VK_ERROR_FEATURE_NOT_PRESENT);
1029 }
1030 }
1031
1032 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1033 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1034 if (!device)
1035 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1036
1037 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1038 device->instance = physical_device->instance;
1039 device->physical_device = physical_device;
1040
1041 if (pAllocator)
1042 device->alloc = *pAllocator;
1043 else
1044 device->alloc = physical_device->instance->alloc;
1045
1046 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1047 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1048 int index = tu_get_device_extension_index(ext_name);
1049 if (index < 0 ||
1050 !physical_device->supported_extensions.extensions[index]) {
1051 vk_free(&device->alloc, device);
1052 return vk_error(physical_device->instance,
1053 VK_ERROR_EXTENSION_NOT_PRESENT);
1054 }
1055
1056 device->enabled_extensions.extensions[index] = true;
1057 }
1058
1059 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1060 const VkDeviceQueueCreateInfo *queue_create =
1061 &pCreateInfo->pQueueCreateInfos[i];
1062 uint32_t qfi = queue_create->queueFamilyIndex;
1063 device->queues[qfi] = vk_alloc(
1064 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1065 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1066 if (!device->queues[qfi]) {
1067 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1068 goto fail;
1069 }
1070
1071 memset(device->queues[qfi], 0,
1072 queue_create->queueCount * sizeof(struct tu_queue));
1073
1074 device->queue_count[qfi] = queue_create->queueCount;
1075
1076 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1077 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1078 queue_create->flags);
1079 if (result != VK_SUCCESS)
1080 goto fail;
1081 }
1082 }
1083
1084 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1085 if (!device->compiler)
1086 goto fail;
1087
1088 VkPipelineCacheCreateInfo ci;
1089 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1090 ci.pNext = NULL;
1091 ci.flags = 0;
1092 ci.pInitialData = NULL;
1093 ci.initialDataSize = 0;
1094 VkPipelineCache pc;
1095 result =
1096 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1097 if (result != VK_SUCCESS)
1098 goto fail;
1099
1100 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1101
1102 *pDevice = tu_device_to_handle(device);
1103 return VK_SUCCESS;
1104
1105 fail:
1106 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1107 for (unsigned q = 0; q < device->queue_count[i]; q++)
1108 tu_queue_finish(&device->queues[i][q]);
1109 if (device->queue_count[i])
1110 vk_free(&device->alloc, device->queues[i]);
1111 }
1112
1113 if (device->compiler)
1114 ralloc_free(device->compiler);
1115
1116 vk_free(&device->alloc, device);
1117 return result;
1118 }
1119
1120 void
1121 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1122 {
1123 TU_FROM_HANDLE(tu_device, device, _device);
1124
1125 if (!device)
1126 return;
1127
1128 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1129 for (unsigned q = 0; q < device->queue_count[i]; q++)
1130 tu_queue_finish(&device->queues[i][q]);
1131 if (device->queue_count[i])
1132 vk_free(&device->alloc, device->queues[i]);
1133 }
1134
1135 /* the compiler does not use pAllocator */
1136 ralloc_free(device->compiler);
1137
1138 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1139 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1140
1141 vk_free(&device->alloc, device);
1142 }
1143
1144 VkResult
1145 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1146 VkLayerProperties *pProperties)
1147 {
1148 *pPropertyCount = 0;
1149 return VK_SUCCESS;
1150 }
1151
1152 VkResult
1153 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1154 uint32_t *pPropertyCount,
1155 VkLayerProperties *pProperties)
1156 {
1157 *pPropertyCount = 0;
1158 return VK_SUCCESS;
1159 }
1160
1161 void
1162 tu_GetDeviceQueue2(VkDevice _device,
1163 const VkDeviceQueueInfo2 *pQueueInfo,
1164 VkQueue *pQueue)
1165 {
1166 TU_FROM_HANDLE(tu_device, device, _device);
1167 struct tu_queue *queue;
1168
1169 queue =
1170 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1171 if (pQueueInfo->flags != queue->flags) {
1172 /* From the Vulkan 1.1.70 spec:
1173 *
1174 * "The queue returned by vkGetDeviceQueue2 must have the same
1175 * flags value from this structure as that used at device
1176 * creation time in a VkDeviceQueueCreateInfo instance. If no
1177 * matching flags were specified at device creation time then
1178 * pQueue will return VK_NULL_HANDLE."
1179 */
1180 *pQueue = VK_NULL_HANDLE;
1181 return;
1182 }
1183
1184 *pQueue = tu_queue_to_handle(queue);
1185 }
1186
1187 void
1188 tu_GetDeviceQueue(VkDevice _device,
1189 uint32_t queueFamilyIndex,
1190 uint32_t queueIndex,
1191 VkQueue *pQueue)
1192 {
1193 const VkDeviceQueueInfo2 info =
1194 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1195 .queueFamilyIndex = queueFamilyIndex,
1196 .queueIndex = queueIndex };
1197
1198 tu_GetDeviceQueue2(_device, &info, pQueue);
1199 }
1200
1201 VkResult
1202 tu_QueueSubmit(VkQueue _queue,
1203 uint32_t submitCount,
1204 const VkSubmitInfo *pSubmits,
1205 VkFence _fence)
1206 {
1207 TU_FROM_HANDLE(tu_queue, queue, _queue);
1208
1209 for (uint32_t i = 0; i < submitCount; ++i) {
1210 const VkSubmitInfo *submit = pSubmits + i;
1211 const bool last_submit = (i == submitCount - 1);
1212 struct tu_bo_list bo_list;
1213 tu_bo_list_init(&bo_list);
1214
1215 uint32_t entry_count = 0;
1216 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1217 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1218 entry_count += cmdbuf->cs.entry_count;
1219 }
1220
1221 struct drm_msm_gem_submit_cmd cmds[entry_count];
1222 uint32_t entry_idx = 0;
1223 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1224 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1225 struct tu_cs *cs = &cmdbuf->cs;
1226 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1227 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1228 cmds[entry_idx].submit_idx =
1229 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1230 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1231 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1232 cmds[entry_idx].size = cs->entries[i].size;
1233 cmds[entry_idx].pad = 0;
1234 cmds[entry_idx].nr_relocs = 0;
1235 cmds[entry_idx].relocs = 0;
1236 }
1237
1238 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1239 }
1240
1241 uint32_t flags = MSM_PIPE_3D0;
1242 if (last_submit) {
1243 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1244 }
1245
1246 struct drm_msm_gem_submit req = {
1247 .flags = flags,
1248 .queueid = queue->msm_queue_id,
1249 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1250 .nr_bos = bo_list.count,
1251 .cmds = (uint64_t)(uintptr_t)cmds,
1252 .nr_cmds = entry_count,
1253 };
1254
1255 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1256 DRM_MSM_GEM_SUBMIT,
1257 &req, sizeof(req));
1258 if (ret) {
1259 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1260 abort();
1261 }
1262
1263 tu_bo_list_destroy(&bo_list);
1264
1265 if (last_submit) {
1266 /* no need to merge fences as queue execution is serialized */
1267 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1268 }
1269 }
1270
1271 if (_fence != VK_NULL_HANDLE) {
1272 TU_FROM_HANDLE(tu_fence, fence, _fence);
1273 tu_fence_copy(fence, &queue->submit_fence);
1274 }
1275
1276 return VK_SUCCESS;
1277 }
1278
1279 VkResult
1280 tu_QueueWaitIdle(VkQueue _queue)
1281 {
1282 TU_FROM_HANDLE(tu_queue, queue, _queue);
1283
1284 tu_fence_wait_idle(&queue->submit_fence);
1285
1286 return VK_SUCCESS;
1287 }
1288
1289 VkResult
1290 tu_DeviceWaitIdle(VkDevice _device)
1291 {
1292 TU_FROM_HANDLE(tu_device, device, _device);
1293
1294 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1295 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1296 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1297 }
1298 }
1299 return VK_SUCCESS;
1300 }
1301
1302 VkResult
1303 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1304 uint32_t *pPropertyCount,
1305 VkExtensionProperties *pProperties)
1306 {
1307 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1308
1309 /* We spport no lyaers */
1310 if (pLayerName)
1311 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1312
1313 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1314 if (tu_supported_instance_extensions.extensions[i]) {
1315 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1316 }
1317 }
1318
1319 return vk_outarray_status(&out);
1320 }
1321
1322 VkResult
1323 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1324 const char *pLayerName,
1325 uint32_t *pPropertyCount,
1326 VkExtensionProperties *pProperties)
1327 {
1328 /* We spport no lyaers */
1329 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1330 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1331
1332 /* We spport no lyaers */
1333 if (pLayerName)
1334 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1335
1336 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1337 if (device->supported_extensions.extensions[i]) {
1338 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1339 }
1340 }
1341
1342 return vk_outarray_status(&out);
1343 }
1344
1345 PFN_vkVoidFunction
1346 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1347 {
1348 TU_FROM_HANDLE(tu_instance, instance, _instance);
1349
1350 return tu_lookup_entrypoint_checked(
1351 pName, instance ? instance->api_version : 0,
1352 instance ? &instance->enabled_extensions : NULL, NULL);
1353 }
1354
1355 /* The loader wants us to expose a second GetInstanceProcAddr function
1356 * to work around certain LD_PRELOAD issues seen in apps.
1357 */
1358 PUBLIC
1359 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1360 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1361
1362 PUBLIC
1363 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1364 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1365 {
1366 return tu_GetInstanceProcAddr(instance, pName);
1367 }
1368
1369 PFN_vkVoidFunction
1370 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1371 {
1372 TU_FROM_HANDLE(tu_device, device, _device);
1373
1374 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1375 &device->instance->enabled_extensions,
1376 &device->enabled_extensions);
1377 }
1378
1379 static VkResult
1380 tu_alloc_memory(struct tu_device *device,
1381 const VkMemoryAllocateInfo *pAllocateInfo,
1382 const VkAllocationCallbacks *pAllocator,
1383 VkDeviceMemory *pMem)
1384 {
1385 struct tu_device_memory *mem;
1386 VkResult result;
1387
1388 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1389
1390 if (pAllocateInfo->allocationSize == 0) {
1391 /* Apparently, this is allowed */
1392 *pMem = VK_NULL_HANDLE;
1393 return VK_SUCCESS;
1394 }
1395
1396 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1397 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1398 if (mem == NULL)
1399 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1400
1401 const VkImportMemoryFdInfoKHR *fd_info =
1402 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1403 if (fd_info && !fd_info->handleType)
1404 fd_info = NULL;
1405
1406 if (fd_info) {
1407 assert(fd_info->handleType ==
1408 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1409 fd_info->handleType ==
1410 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1411
1412 /*
1413 * TODO Importing the same fd twice gives us the same handle without
1414 * reference counting. We need to maintain a per-instance handle-to-bo
1415 * table and add reference count to tu_bo.
1416 */
1417 result = tu_bo_init_dmabuf(device, &mem->bo,
1418 pAllocateInfo->allocationSize, fd_info->fd);
1419 if (result == VK_SUCCESS) {
1420 /* take ownership and close the fd */
1421 close(fd_info->fd);
1422 }
1423 } else {
1424 result =
1425 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1426 }
1427
1428 if (result != VK_SUCCESS) {
1429 vk_free2(&device->alloc, pAllocator, mem);
1430 return result;
1431 }
1432
1433 mem->size = pAllocateInfo->allocationSize;
1434 mem->type_index = pAllocateInfo->memoryTypeIndex;
1435
1436 mem->map = NULL;
1437 mem->user_ptr = NULL;
1438
1439 *pMem = tu_device_memory_to_handle(mem);
1440
1441 return VK_SUCCESS;
1442 }
1443
1444 VkResult
1445 tu_AllocateMemory(VkDevice _device,
1446 const VkMemoryAllocateInfo *pAllocateInfo,
1447 const VkAllocationCallbacks *pAllocator,
1448 VkDeviceMemory *pMem)
1449 {
1450 TU_FROM_HANDLE(tu_device, device, _device);
1451 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1452 }
1453
1454 void
1455 tu_FreeMemory(VkDevice _device,
1456 VkDeviceMemory _mem,
1457 const VkAllocationCallbacks *pAllocator)
1458 {
1459 TU_FROM_HANDLE(tu_device, device, _device);
1460 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1461
1462 if (mem == NULL)
1463 return;
1464
1465 tu_bo_finish(device, &mem->bo);
1466 vk_free2(&device->alloc, pAllocator, mem);
1467 }
1468
1469 VkResult
1470 tu_MapMemory(VkDevice _device,
1471 VkDeviceMemory _memory,
1472 VkDeviceSize offset,
1473 VkDeviceSize size,
1474 VkMemoryMapFlags flags,
1475 void **ppData)
1476 {
1477 TU_FROM_HANDLE(tu_device, device, _device);
1478 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1479 VkResult result;
1480
1481 if (mem == NULL) {
1482 *ppData = NULL;
1483 return VK_SUCCESS;
1484 }
1485
1486 if (mem->user_ptr) {
1487 *ppData = mem->user_ptr;
1488 } else if (!mem->map) {
1489 result = tu_bo_map(device, &mem->bo);
1490 if (result != VK_SUCCESS)
1491 return result;
1492 *ppData = mem->map = mem->bo.map;
1493 } else
1494 *ppData = mem->map;
1495
1496 if (*ppData) {
1497 *ppData += offset;
1498 return VK_SUCCESS;
1499 }
1500
1501 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1502 }
1503
1504 void
1505 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1506 {
1507 /* I do not see any unmapping done by the freedreno Gallium driver. */
1508 }
1509
1510 VkResult
1511 tu_FlushMappedMemoryRanges(VkDevice _device,
1512 uint32_t memoryRangeCount,
1513 const VkMappedMemoryRange *pMemoryRanges)
1514 {
1515 return VK_SUCCESS;
1516 }
1517
1518 VkResult
1519 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1520 uint32_t memoryRangeCount,
1521 const VkMappedMemoryRange *pMemoryRanges)
1522 {
1523 return VK_SUCCESS;
1524 }
1525
1526 void
1527 tu_GetBufferMemoryRequirements(VkDevice _device,
1528 VkBuffer _buffer,
1529 VkMemoryRequirements *pMemoryRequirements)
1530 {
1531 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1532
1533 pMemoryRequirements->memoryTypeBits = 1;
1534 pMemoryRequirements->alignment = 16;
1535 pMemoryRequirements->size =
1536 align64(buffer->size, pMemoryRequirements->alignment);
1537 }
1538
1539 void
1540 tu_GetBufferMemoryRequirements2(
1541 VkDevice device,
1542 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1543 VkMemoryRequirements2KHR *pMemoryRequirements)
1544 {
1545 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1546 &pMemoryRequirements->memoryRequirements);
1547 }
1548
1549 void
1550 tu_GetImageMemoryRequirements(VkDevice _device,
1551 VkImage _image,
1552 VkMemoryRequirements *pMemoryRequirements)
1553 {
1554 TU_FROM_HANDLE(tu_image, image, _image);
1555
1556 pMemoryRequirements->memoryTypeBits = 1;
1557 pMemoryRequirements->size = image->size;
1558 pMemoryRequirements->alignment = image->alignment;
1559 }
1560
1561 void
1562 tu_GetImageMemoryRequirements2(VkDevice device,
1563 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1564 VkMemoryRequirements2KHR *pMemoryRequirements)
1565 {
1566 tu_GetImageMemoryRequirements(device, pInfo->image,
1567 &pMemoryRequirements->memoryRequirements);
1568 }
1569
1570 void
1571 tu_GetImageSparseMemoryRequirements(
1572 VkDevice device,
1573 VkImage image,
1574 uint32_t *pSparseMemoryRequirementCount,
1575 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1576 {
1577 tu_stub();
1578 }
1579
1580 void
1581 tu_GetImageSparseMemoryRequirements2(
1582 VkDevice device,
1583 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1584 uint32_t *pSparseMemoryRequirementCount,
1585 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1586 {
1587 tu_stub();
1588 }
1589
1590 void
1591 tu_GetDeviceMemoryCommitment(VkDevice device,
1592 VkDeviceMemory memory,
1593 VkDeviceSize *pCommittedMemoryInBytes)
1594 {
1595 *pCommittedMemoryInBytes = 0;
1596 }
1597
1598 VkResult
1599 tu_BindBufferMemory2(VkDevice device,
1600 uint32_t bindInfoCount,
1601 const VkBindBufferMemoryInfoKHR *pBindInfos)
1602 {
1603 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1604 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1605 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1606
1607 if (mem) {
1608 buffer->bo = &mem->bo;
1609 buffer->bo_offset = pBindInfos[i].memoryOffset;
1610 } else {
1611 buffer->bo = NULL;
1612 }
1613 }
1614 return VK_SUCCESS;
1615 }
1616
1617 VkResult
1618 tu_BindBufferMemory(VkDevice device,
1619 VkBuffer buffer,
1620 VkDeviceMemory memory,
1621 VkDeviceSize memoryOffset)
1622 {
1623 const VkBindBufferMemoryInfoKHR info = {
1624 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1625 .buffer = buffer,
1626 .memory = memory,
1627 .memoryOffset = memoryOffset
1628 };
1629
1630 return tu_BindBufferMemory2(device, 1, &info);
1631 }
1632
1633 VkResult
1634 tu_BindImageMemory2(VkDevice device,
1635 uint32_t bindInfoCount,
1636 const VkBindImageMemoryInfo *pBindInfos)
1637 {
1638 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1639 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1640 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1641
1642 if (mem) {
1643 image->bo = &mem->bo;
1644 image->bo_offset = pBindInfos[i].memoryOffset;
1645 } else {
1646 image->bo = NULL;
1647 image->bo_offset = 0;
1648 }
1649 }
1650
1651 return VK_SUCCESS;
1652 }
1653
1654 VkResult
1655 tu_BindImageMemory(VkDevice device,
1656 VkImage image,
1657 VkDeviceMemory memory,
1658 VkDeviceSize memoryOffset)
1659 {
1660 const VkBindImageMemoryInfo info = {
1661 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1662 .image = image,
1663 .memory = memory,
1664 .memoryOffset = memoryOffset
1665 };
1666
1667 return tu_BindImageMemory2(device, 1, &info);
1668 }
1669
1670 VkResult
1671 tu_QueueBindSparse(VkQueue _queue,
1672 uint32_t bindInfoCount,
1673 const VkBindSparseInfo *pBindInfo,
1674 VkFence _fence)
1675 {
1676 return VK_SUCCESS;
1677 }
1678
1679 // Queue semaphore functions
1680
1681 VkResult
1682 tu_CreateSemaphore(VkDevice _device,
1683 const VkSemaphoreCreateInfo *pCreateInfo,
1684 const VkAllocationCallbacks *pAllocator,
1685 VkSemaphore *pSemaphore)
1686 {
1687 TU_FROM_HANDLE(tu_device, device, _device);
1688
1689 struct tu_semaphore *sem =
1690 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1691 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1692 if (!sem)
1693 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1694
1695 *pSemaphore = tu_semaphore_to_handle(sem);
1696 return VK_SUCCESS;
1697 }
1698
1699 void
1700 tu_DestroySemaphore(VkDevice _device,
1701 VkSemaphore _semaphore,
1702 const VkAllocationCallbacks *pAllocator)
1703 {
1704 TU_FROM_HANDLE(tu_device, device, _device);
1705 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1706 if (!_semaphore)
1707 return;
1708
1709 vk_free2(&device->alloc, pAllocator, sem);
1710 }
1711
1712 VkResult
1713 tu_CreateEvent(VkDevice _device,
1714 const VkEventCreateInfo *pCreateInfo,
1715 const VkAllocationCallbacks *pAllocator,
1716 VkEvent *pEvent)
1717 {
1718 TU_FROM_HANDLE(tu_device, device, _device);
1719 struct tu_event *event =
1720 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1721 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1722
1723 if (!event)
1724 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1725
1726 *pEvent = tu_event_to_handle(event);
1727
1728 return VK_SUCCESS;
1729 }
1730
1731 void
1732 tu_DestroyEvent(VkDevice _device,
1733 VkEvent _event,
1734 const VkAllocationCallbacks *pAllocator)
1735 {
1736 TU_FROM_HANDLE(tu_device, device, _device);
1737 TU_FROM_HANDLE(tu_event, event, _event);
1738
1739 if (!event)
1740 return;
1741 vk_free2(&device->alloc, pAllocator, event);
1742 }
1743
1744 VkResult
1745 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1746 {
1747 TU_FROM_HANDLE(tu_event, event, _event);
1748
1749 if (*event->map == 1)
1750 return VK_EVENT_SET;
1751 return VK_EVENT_RESET;
1752 }
1753
1754 VkResult
1755 tu_SetEvent(VkDevice _device, VkEvent _event)
1756 {
1757 TU_FROM_HANDLE(tu_event, event, _event);
1758 *event->map = 1;
1759
1760 return VK_SUCCESS;
1761 }
1762
1763 VkResult
1764 tu_ResetEvent(VkDevice _device, VkEvent _event)
1765 {
1766 TU_FROM_HANDLE(tu_event, event, _event);
1767 *event->map = 0;
1768
1769 return VK_SUCCESS;
1770 }
1771
1772 VkResult
1773 tu_CreateBuffer(VkDevice _device,
1774 const VkBufferCreateInfo *pCreateInfo,
1775 const VkAllocationCallbacks *pAllocator,
1776 VkBuffer *pBuffer)
1777 {
1778 TU_FROM_HANDLE(tu_device, device, _device);
1779 struct tu_buffer *buffer;
1780
1781 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1782
1783 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1784 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1785 if (buffer == NULL)
1786 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1787
1788 buffer->size = pCreateInfo->size;
1789 buffer->usage = pCreateInfo->usage;
1790 buffer->flags = pCreateInfo->flags;
1791
1792 *pBuffer = tu_buffer_to_handle(buffer);
1793
1794 return VK_SUCCESS;
1795 }
1796
1797 void
1798 tu_DestroyBuffer(VkDevice _device,
1799 VkBuffer _buffer,
1800 const VkAllocationCallbacks *pAllocator)
1801 {
1802 TU_FROM_HANDLE(tu_device, device, _device);
1803 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1804
1805 if (!buffer)
1806 return;
1807
1808 vk_free2(&device->alloc, pAllocator, buffer);
1809 }
1810
1811 static uint32_t
1812 tu_surface_max_layer_count(struct tu_image_view *iview)
1813 {
1814 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1815 ? iview->extent.depth
1816 : (iview->base_layer + iview->layer_count);
1817 }
1818
1819 VkResult
1820 tu_CreateFramebuffer(VkDevice _device,
1821 const VkFramebufferCreateInfo *pCreateInfo,
1822 const VkAllocationCallbacks *pAllocator,
1823 VkFramebuffer *pFramebuffer)
1824 {
1825 TU_FROM_HANDLE(tu_device, device, _device);
1826 struct tu_framebuffer *framebuffer;
1827
1828 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1829
1830 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1831 pCreateInfo->attachmentCount;
1832 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1833 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1834 if (framebuffer == NULL)
1835 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1836
1837 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1838 framebuffer->width = pCreateInfo->width;
1839 framebuffer->height = pCreateInfo->height;
1840 framebuffer->layers = pCreateInfo->layers;
1841 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1842 VkImageView _iview = pCreateInfo->pAttachments[i];
1843 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1844 framebuffer->attachments[i].attachment = iview;
1845
1846 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1847 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1848 framebuffer->layers =
1849 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1850 }
1851
1852 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1853 return VK_SUCCESS;
1854 }
1855
1856 void
1857 tu_DestroyFramebuffer(VkDevice _device,
1858 VkFramebuffer _fb,
1859 const VkAllocationCallbacks *pAllocator)
1860 {
1861 TU_FROM_HANDLE(tu_device, device, _device);
1862 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1863
1864 if (!fb)
1865 return;
1866 vk_free2(&device->alloc, pAllocator, fb);
1867 }
1868
1869 static void
1870 tu_init_sampler(struct tu_device *device,
1871 struct tu_sampler *sampler,
1872 const VkSamplerCreateInfo *pCreateInfo)
1873 {
1874 }
1875
1876 VkResult
1877 tu_CreateSampler(VkDevice _device,
1878 const VkSamplerCreateInfo *pCreateInfo,
1879 const VkAllocationCallbacks *pAllocator,
1880 VkSampler *pSampler)
1881 {
1882 TU_FROM_HANDLE(tu_device, device, _device);
1883 struct tu_sampler *sampler;
1884
1885 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1886
1887 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1888 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1889 if (!sampler)
1890 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1891
1892 tu_init_sampler(device, sampler, pCreateInfo);
1893 *pSampler = tu_sampler_to_handle(sampler);
1894
1895 return VK_SUCCESS;
1896 }
1897
1898 void
1899 tu_DestroySampler(VkDevice _device,
1900 VkSampler _sampler,
1901 const VkAllocationCallbacks *pAllocator)
1902 {
1903 TU_FROM_HANDLE(tu_device, device, _device);
1904 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1905
1906 if (!sampler)
1907 return;
1908 vk_free2(&device->alloc, pAllocator, sampler);
1909 }
1910
1911 /* vk_icd.h does not declare this function, so we declare it here to
1912 * suppress Wmissing-prototypes.
1913 */
1914 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1915 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1916
1917 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1918 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1919 {
1920 /* For the full details on loader interface versioning, see
1921 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1922 * What follows is a condensed summary, to help you navigate the large and
1923 * confusing official doc.
1924 *
1925 * - Loader interface v0 is incompatible with later versions. We don't
1926 * support it.
1927 *
1928 * - In loader interface v1:
1929 * - The first ICD entrypoint called by the loader is
1930 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1931 * entrypoint.
1932 * - The ICD must statically expose no other Vulkan symbol unless it
1933 * is linked with -Bsymbolic.
1934 * - Each dispatchable Vulkan handle created by the ICD must be
1935 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1936 * ICD must initialize VK_LOADER_DATA.loadMagic to
1937 * ICD_LOADER_MAGIC.
1938 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1939 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1940 * such loader-managed surfaces.
1941 *
1942 * - Loader interface v2 differs from v1 in:
1943 * - The first ICD entrypoint called by the loader is
1944 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1945 * statically expose this entrypoint.
1946 *
1947 * - Loader interface v3 differs from v2 in:
1948 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1949 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1950 * because the loader no longer does so.
1951 */
1952 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1953 return VK_SUCCESS;
1954 }
1955
1956 VkResult
1957 tu_GetMemoryFdKHR(VkDevice _device,
1958 const VkMemoryGetFdInfoKHR *pGetFdInfo,
1959 int *pFd)
1960 {
1961 TU_FROM_HANDLE(tu_device, device, _device);
1962 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
1963
1964 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
1965
1966 /* At the moment, we support only the below handle types. */
1967 assert(pGetFdInfo->handleType ==
1968 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1969 pGetFdInfo->handleType ==
1970 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1971
1972 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
1973 if (prime_fd < 0)
1974 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1975
1976 *pFd = prime_fd;
1977 return VK_SUCCESS;
1978 }
1979
1980 VkResult
1981 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
1982 VkExternalMemoryHandleTypeFlagBits handleType,
1983 int fd,
1984 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
1985 {
1986 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1987 pMemoryFdProperties->memoryTypeBits = 1;
1988 return VK_SUCCESS;
1989 }
1990
1991 void
1992 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1993 VkPhysicalDevice physicalDevice,
1994 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1995 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1996 {
1997 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1998 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1999 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2000 }
2001
2002 void
2003 tu_GetPhysicalDeviceExternalFenceProperties(
2004 VkPhysicalDevice physicalDevice,
2005 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
2006 VkExternalFencePropertiesKHR *pExternalFenceProperties)
2007 {
2008 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2009 pExternalFenceProperties->compatibleHandleTypes = 0;
2010 pExternalFenceProperties->externalFenceFeatures = 0;
2011 }
2012
2013 VkResult
2014 tu_CreateDebugReportCallbackEXT(
2015 VkInstance _instance,
2016 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2017 const VkAllocationCallbacks *pAllocator,
2018 VkDebugReportCallbackEXT *pCallback)
2019 {
2020 TU_FROM_HANDLE(tu_instance, instance, _instance);
2021 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2022 pCreateInfo, pAllocator,
2023 &instance->alloc, pCallback);
2024 }
2025
2026 void
2027 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2028 VkDebugReportCallbackEXT _callback,
2029 const VkAllocationCallbacks *pAllocator)
2030 {
2031 TU_FROM_HANDLE(tu_instance, instance, _instance);
2032 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2033 _callback, pAllocator, &instance->alloc);
2034 }
2035
2036 void
2037 tu_DebugReportMessageEXT(VkInstance _instance,
2038 VkDebugReportFlagsEXT flags,
2039 VkDebugReportObjectTypeEXT objectType,
2040 uint64_t object,
2041 size_t location,
2042 int32_t messageCode,
2043 const char *pLayerPrefix,
2044 const char *pMessage)
2045 {
2046 TU_FROM_HANDLE(tu_instance, instance, _instance);
2047 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2048 object, location, messageCode, pLayerPrefix, pMessage);
2049 }
2050
2051 void
2052 tu_GetDeviceGroupPeerMemoryFeatures(
2053 VkDevice device,
2054 uint32_t heapIndex,
2055 uint32_t localDeviceIndex,
2056 uint32_t remoteDeviceIndex,
2057 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2058 {
2059 assert(localDeviceIndex == remoteDeviceIndex);
2060
2061 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2062 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2063 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2064 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2065 }