turnip: remove unnecessary libfreedreno_drm dep
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <stdbool.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/sysinfo.h>
35 #include <unistd.h>
36 #include <xf86drm.h>
37
38 #include "util/debug.h"
39 #include "util/disk_cache.h"
40 #include "util/strtod.h"
41 #include "vk_format.h"
42 #include "vk_util.h"
43
44 #include "drm/msm_drm.h"
45
46 static int
47 tu_device_get_cache_uuid(uint16_t family, void *uuid)
48 {
49 uint32_t mesa_timestamp;
50 uint16_t f = family;
51 memset(uuid, 0, VK_UUID_SIZE);
52 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
53 &mesa_timestamp))
54 return -1;
55
56 memcpy(uuid, &mesa_timestamp, 4);
57 memcpy((char *) uuid + 4, &f, 2);
58 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
59 return 0;
60 }
61
62 static void
63 tu_get_driver_uuid(void *uuid)
64 {
65 memset(uuid, 0, VK_UUID_SIZE);
66 snprintf(uuid, VK_UUID_SIZE, "freedreno");
67 }
68
69 static void
70 tu_get_device_uuid(void *uuid)
71 {
72 memset(uuid, 0, VK_UUID_SIZE);
73 }
74
75 VkResult
76 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
77 {
78 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
79 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
80 */
81 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
82 if (!gem_handle)
83 goto fail_new;
84
85 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
86 * want immediate backing pages because vkAllocateMemory and friends must
87 * not lazily fail.
88 *
89 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
90 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
91 * maybe I misunderstand.
92 */
93
94 /* TODO: Do we need 'offset' if we have 'iova'? */
95 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
96 if (!offset)
97 goto fail_info;
98
99 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
100 if (!iova)
101 goto fail_info;
102
103 *bo = (struct tu_bo) {
104 .gem_handle = gem_handle,
105 .size = size,
106 .offset = offset,
107 .iova = iova,
108 };
109
110 return VK_SUCCESS;
111
112 fail_info:
113 tu_gem_close(dev, bo->gem_handle);
114 fail_new:
115 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
116 }
117
118 VkResult
119 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
120 {
121 if (bo->map)
122 return VK_SUCCESS;
123
124 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
125 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
126 dev->physical_device->local_fd, bo->offset);
127 if (map == MAP_FAILED)
128 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
129
130 bo->map = map;
131 return VK_SUCCESS;
132 }
133
134 void
135 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
136 {
137 assert(bo->gem_handle);
138
139 if (bo->map)
140 munmap(bo->map, bo->size);
141
142 tu_gem_close(dev, bo->gem_handle);
143 }
144
145 static VkResult
146 tu_physical_device_init(struct tu_physical_device *device,
147 struct tu_instance *instance,
148 drmDevicePtr drm_device)
149 {
150 const char *path = drm_device->nodes[DRM_NODE_RENDER];
151 VkResult result = VK_SUCCESS;
152 drmVersionPtr version;
153 int fd;
154 int master_fd = -1;
155 uint64_t val;
156
157 fd = open(path, O_RDWR | O_CLOEXEC);
158 if (fd < 0) {
159 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
160 "failed to open device %s", path);
161 }
162
163 /* Version 1.3 added MSM_INFO_IOVA. */
164 const int min_version_major = 1;
165 const int min_version_minor = 3;
166
167 version = drmGetVersion(fd);
168 if (!version) {
169 close(fd);
170 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
171 "failed to query kernel driver version for device %s",
172 path);
173 }
174
175 if (strcmp(version->name, "msm")) {
176 drmFreeVersion(version);
177 if (master_fd != -1)
178 close(master_fd);
179 close(fd);
180 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
181 "device %s does not use the msm kernel driver", path);
182 }
183
184 if (version->version_major != min_version_major ||
185 version->version_minor < min_version_minor) {
186 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
187 "kernel driver for device %s has version %d.%d, "
188 "but Vulkan requires version >= %d.%d",
189 path, version->version_major, version->version_minor,
190 min_version_major, min_version_minor);
191 drmFreeVersion(version);
192 close(fd);
193 return result;
194 }
195
196 drmFreeVersion(version);
197
198 if (instance->debug_flags & TU_DEBUG_STARTUP)
199 tu_logi("Found compatible device '%s'.", path);
200
201 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
202 device->instance = instance;
203 assert(strlen(path) < ARRAY_SIZE(device->path));
204 strncpy(device->path, path, ARRAY_SIZE(device->path));
205
206 if (instance->enabled_extensions.KHR_display) {
207 master_fd =
208 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
209 if (master_fd >= 0) {
210 /* TODO: free master_fd is accel is not working? */
211 }
212 }
213
214 device->master_fd = master_fd;
215 device->local_fd = fd;
216
217 if (tu_drm_query_param(device, MSM_PARAM_GPU_ID, &val)) {
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Could not query the GPU ID");
220 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
221 "could not get GPU ID");
222 goto fail;
223 }
224 device->gpu_id = val;
225
226 if (tu_drm_query_param(device, MSM_PARAM_GMEM_SIZE, &val)) {
227 if (instance->debug_flags & TU_DEBUG_STARTUP)
228 tu_logi("Could not query the GMEM size");
229 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
230 "could not get GMEM size");
231 goto fail;
232 }
233 device->gmem_size = val;
234
235 memset(device->name, 0, sizeof(device->name));
236 sprintf(device->name, "FD%d", device->gpu_id);
237
238 switch (device->gpu_id) {
239 case 530:
240 case 630:
241 break;
242 default:
243 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
244 "device %s is unsupported", device->name);
245 goto fail;
246 }
247 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
248 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
249 "cannot generate UUID");
250 goto fail;
251 }
252
253 /* The gpu id is already embedded in the uuid so we just pass "tu"
254 * when creating the cache.
255 */
256 char buf[VK_UUID_SIZE * 2 + 1];
257 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
258 device->disk_cache = disk_cache_create(device->name, buf, 0);
259
260 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
261 "testing use only.\n");
262
263 tu_get_driver_uuid(&device->device_uuid);
264 tu_get_device_uuid(&device->device_uuid);
265
266 tu_fill_device_extension_table(device, &device->supported_extensions);
267
268 if (result != VK_SUCCESS) {
269 vk_error(instance, result);
270 goto fail;
271 }
272
273 return VK_SUCCESS;
274
275 fail:
276 close(fd);
277 if (master_fd != -1)
278 close(master_fd);
279 return result;
280 }
281
282 static void
283 tu_physical_device_finish(struct tu_physical_device *device)
284 {
285 disk_cache_destroy(device->disk_cache);
286 close(device->local_fd);
287 if (device->master_fd != -1)
288 close(device->master_fd);
289 }
290
291 static void *
292 default_alloc_func(void *pUserData,
293 size_t size,
294 size_t align,
295 VkSystemAllocationScope allocationScope)
296 {
297 return malloc(size);
298 }
299
300 static void *
301 default_realloc_func(void *pUserData,
302 void *pOriginal,
303 size_t size,
304 size_t align,
305 VkSystemAllocationScope allocationScope)
306 {
307 return realloc(pOriginal, size);
308 }
309
310 static void
311 default_free_func(void *pUserData, void *pMemory)
312 {
313 free(pMemory);
314 }
315
316 static const VkAllocationCallbacks default_alloc = {
317 .pUserData = NULL,
318 .pfnAllocation = default_alloc_func,
319 .pfnReallocation = default_realloc_func,
320 .pfnFree = default_free_func,
321 };
322
323 static const struct debug_control tu_debug_options[] = {
324 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
325 };
326
327 const char *
328 tu_get_debug_option_name(int id)
329 {
330 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
331 return tu_debug_options[id].string;
332 }
333
334 static int
335 tu_get_instance_extension_index(const char *name)
336 {
337 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
338 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
339 return i;
340 }
341 return -1;
342 }
343
344 VkResult
345 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
346 const VkAllocationCallbacks *pAllocator,
347 VkInstance *pInstance)
348 {
349 struct tu_instance *instance;
350 VkResult result;
351
352 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
353
354 uint32_t client_version;
355 if (pCreateInfo->pApplicationInfo &&
356 pCreateInfo->pApplicationInfo->apiVersion != 0) {
357 client_version = pCreateInfo->pApplicationInfo->apiVersion;
358 } else {
359 tu_EnumerateInstanceVersion(&client_version);
360 }
361
362 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
363 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
364 if (!instance)
365 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
366
367 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
368
369 if (pAllocator)
370 instance->alloc = *pAllocator;
371 else
372 instance->alloc = default_alloc;
373
374 instance->api_version = client_version;
375 instance->physical_device_count = -1;
376
377 instance->debug_flags =
378 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
379
380 if (instance->debug_flags & TU_DEBUG_STARTUP)
381 tu_logi("Created an instance");
382
383 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
384 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
385 int index = tu_get_instance_extension_index(ext_name);
386
387 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
388 vk_free2(&default_alloc, pAllocator, instance);
389 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
390 }
391
392 instance->enabled_extensions.extensions[index] = true;
393 }
394
395 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
396 if (result != VK_SUCCESS) {
397 vk_free2(&default_alloc, pAllocator, instance);
398 return vk_error(instance, result);
399 }
400
401 _mesa_locale_init();
402
403 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
404
405 *pInstance = tu_instance_to_handle(instance);
406
407 return VK_SUCCESS;
408 }
409
410 void
411 tu_DestroyInstance(VkInstance _instance,
412 const VkAllocationCallbacks *pAllocator)
413 {
414 TU_FROM_HANDLE(tu_instance, instance, _instance);
415
416 if (!instance)
417 return;
418
419 for (int i = 0; i < instance->physical_device_count; ++i) {
420 tu_physical_device_finish(instance->physical_devices + i);
421 }
422
423 VG(VALGRIND_DESTROY_MEMPOOL(instance));
424
425 _mesa_locale_fini();
426
427 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
428
429 vk_free(&instance->alloc, instance);
430 }
431
432 static VkResult
433 tu_enumerate_devices(struct tu_instance *instance)
434 {
435 /* TODO: Check for more devices ? */
436 drmDevicePtr devices[8];
437 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
438 int max_devices;
439
440 instance->physical_device_count = 0;
441
442 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
443
444 if (instance->debug_flags & TU_DEBUG_STARTUP)
445 tu_logi("Found %d drm nodes", max_devices);
446
447 if (max_devices < 1)
448 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
449
450 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
451 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
452 devices[i]->bustype == DRM_BUS_PLATFORM) {
453
454 result = tu_physical_device_init(
455 instance->physical_devices + instance->physical_device_count,
456 instance, devices[i]);
457 if (result == VK_SUCCESS)
458 ++instance->physical_device_count;
459 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
460 break;
461 }
462 }
463 drmFreeDevices(devices, max_devices);
464
465 return result;
466 }
467
468 VkResult
469 tu_EnumeratePhysicalDevices(VkInstance _instance,
470 uint32_t *pPhysicalDeviceCount,
471 VkPhysicalDevice *pPhysicalDevices)
472 {
473 TU_FROM_HANDLE(tu_instance, instance, _instance);
474 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
475
476 VkResult result;
477
478 if (instance->physical_device_count < 0) {
479 result = tu_enumerate_devices(instance);
480 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
481 return result;
482 }
483
484 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
485 vk_outarray_append(&out, p)
486 {
487 *p = tu_physical_device_to_handle(instance->physical_devices + i);
488 }
489 }
490
491 return vk_outarray_status(&out);
492 }
493
494 VkResult
495 tu_EnumeratePhysicalDeviceGroups(
496 VkInstance _instance,
497 uint32_t *pPhysicalDeviceGroupCount,
498 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
499 {
500 TU_FROM_HANDLE(tu_instance, instance, _instance);
501 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
502 pPhysicalDeviceGroupCount);
503 VkResult result;
504
505 if (instance->physical_device_count < 0) {
506 result = tu_enumerate_devices(instance);
507 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
508 return result;
509 }
510
511 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
512 vk_outarray_append(&out, p)
513 {
514 p->physicalDeviceCount = 1;
515 p->physicalDevices[0] =
516 tu_physical_device_to_handle(instance->physical_devices + i);
517 p->subsetAllocation = false;
518 }
519 }
520
521 return vk_outarray_status(&out);
522 }
523
524 void
525 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
526 VkPhysicalDeviceFeatures *pFeatures)
527 {
528 memset(pFeatures, 0, sizeof(*pFeatures));
529
530 *pFeatures = (VkPhysicalDeviceFeatures) {
531 .robustBufferAccess = false,
532 .fullDrawIndexUint32 = false,
533 .imageCubeArray = false,
534 .independentBlend = false,
535 .geometryShader = false,
536 .tessellationShader = false,
537 .sampleRateShading = false,
538 .dualSrcBlend = false,
539 .logicOp = false,
540 .multiDrawIndirect = false,
541 .drawIndirectFirstInstance = false,
542 .depthClamp = false,
543 .depthBiasClamp = false,
544 .fillModeNonSolid = false,
545 .depthBounds = false,
546 .wideLines = false,
547 .largePoints = false,
548 .alphaToOne = false,
549 .multiViewport = false,
550 .samplerAnisotropy = false,
551 .textureCompressionETC2 = false,
552 .textureCompressionASTC_LDR = false,
553 .textureCompressionBC = false,
554 .occlusionQueryPrecise = false,
555 .pipelineStatisticsQuery = false,
556 .vertexPipelineStoresAndAtomics = false,
557 .fragmentStoresAndAtomics = false,
558 .shaderTessellationAndGeometryPointSize = false,
559 .shaderImageGatherExtended = false,
560 .shaderStorageImageExtendedFormats = false,
561 .shaderStorageImageMultisample = false,
562 .shaderUniformBufferArrayDynamicIndexing = false,
563 .shaderSampledImageArrayDynamicIndexing = false,
564 .shaderStorageBufferArrayDynamicIndexing = false,
565 .shaderStorageImageArrayDynamicIndexing = false,
566 .shaderStorageImageReadWithoutFormat = false,
567 .shaderStorageImageWriteWithoutFormat = false,
568 .shaderClipDistance = false,
569 .shaderCullDistance = false,
570 .shaderFloat64 = false,
571 .shaderInt64 = false,
572 .shaderInt16 = false,
573 .sparseBinding = false,
574 .variableMultisampleRate = false,
575 .inheritedQueries = false,
576 };
577 }
578
579 void
580 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
581 VkPhysicalDeviceFeatures2KHR *pFeatures)
582 {
583 vk_foreach_struct(ext, pFeatures->pNext)
584 {
585 switch (ext->sType) {
586 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
587 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
588 features->variablePointersStorageBuffer = false;
589 features->variablePointers = false;
590 break;
591 }
592 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
593 VkPhysicalDeviceMultiviewFeaturesKHR *features =
594 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
595 features->multiview = false;
596 features->multiviewGeometryShader = false;
597 features->multiviewTessellationShader = false;
598 break;
599 }
600 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
601 VkPhysicalDeviceShaderDrawParameterFeatures *features =
602 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
603 features->shaderDrawParameters = false;
604 break;
605 }
606 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
607 VkPhysicalDeviceProtectedMemoryFeatures *features =
608 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
609 features->protectedMemory = false;
610 break;
611 }
612 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
613 VkPhysicalDevice16BitStorageFeatures *features =
614 (VkPhysicalDevice16BitStorageFeatures *) ext;
615 features->storageBuffer16BitAccess = false;
616 features->uniformAndStorageBuffer16BitAccess = false;
617 features->storagePushConstant16 = false;
618 features->storageInputOutput16 = false;
619 break;
620 }
621 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
622 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
623 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
624 features->samplerYcbcrConversion = false;
625 break;
626 }
627 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
628 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
629 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
630 features->shaderInputAttachmentArrayDynamicIndexing = false;
631 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
632 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
633 features->shaderUniformBufferArrayNonUniformIndexing = false;
634 features->shaderSampledImageArrayNonUniformIndexing = false;
635 features->shaderStorageBufferArrayNonUniformIndexing = false;
636 features->shaderStorageImageArrayNonUniformIndexing = false;
637 features->shaderInputAttachmentArrayNonUniformIndexing = false;
638 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
639 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
640 features->descriptorBindingUniformBufferUpdateAfterBind = false;
641 features->descriptorBindingSampledImageUpdateAfterBind = false;
642 features->descriptorBindingStorageImageUpdateAfterBind = false;
643 features->descriptorBindingStorageBufferUpdateAfterBind = false;
644 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
645 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
646 features->descriptorBindingUpdateUnusedWhilePending = false;
647 features->descriptorBindingPartiallyBound = false;
648 features->descriptorBindingVariableDescriptorCount = false;
649 features->runtimeDescriptorArray = false;
650 break;
651 }
652 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
653 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
654 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
655 features->conditionalRendering = false;
656 features->inheritedConditionalRendering = false;
657 break;
658 }
659 default:
660 break;
661 }
662 }
663 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
664 }
665
666 void
667 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
668 VkPhysicalDeviceProperties *pProperties)
669 {
670 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
671 VkSampleCountFlags sample_counts = 0xf;
672
673 /* make sure that the entire descriptor set is addressable with a signed
674 * 32-bit int. So the sum of all limits scaled by descriptor size has to
675 * be at most 2 GiB. the combined image & samples object count as one of
676 * both. This limit is for the pipeline layout, not for the set layout, but
677 * there is no set limit, so we just set a pipeline limit. I don't think
678 * any app is going to hit this soon. */
679 size_t max_descriptor_set_size =
680 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
681 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
682 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
683 32 /* sampler, largest when combined with image */ +
684 64 /* sampled image */ + 64 /* storage image */);
685
686 VkPhysicalDeviceLimits limits = {
687 .maxImageDimension1D = (1 << 14),
688 .maxImageDimension2D = (1 << 14),
689 .maxImageDimension3D = (1 << 11),
690 .maxImageDimensionCube = (1 << 14),
691 .maxImageArrayLayers = (1 << 11),
692 .maxTexelBufferElements = 128 * 1024 * 1024,
693 .maxUniformBufferRange = UINT32_MAX,
694 .maxStorageBufferRange = UINT32_MAX,
695 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
696 .maxMemoryAllocationCount = UINT32_MAX,
697 .maxSamplerAllocationCount = 64 * 1024,
698 .bufferImageGranularity = 64, /* A cache line */
699 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
700 .maxBoundDescriptorSets = MAX_SETS,
701 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
702 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
703 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
704 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
705 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
706 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
707 .maxPerStageResources = max_descriptor_set_size,
708 .maxDescriptorSetSamplers = max_descriptor_set_size,
709 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
710 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
711 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
712 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
713 .maxDescriptorSetSampledImages = max_descriptor_set_size,
714 .maxDescriptorSetStorageImages = max_descriptor_set_size,
715 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
716 .maxVertexInputAttributes = 32,
717 .maxVertexInputBindings = 32,
718 .maxVertexInputAttributeOffset = 2047,
719 .maxVertexInputBindingStride = 2048,
720 .maxVertexOutputComponents = 128,
721 .maxTessellationGenerationLevel = 64,
722 .maxTessellationPatchSize = 32,
723 .maxTessellationControlPerVertexInputComponents = 128,
724 .maxTessellationControlPerVertexOutputComponents = 128,
725 .maxTessellationControlPerPatchOutputComponents = 120,
726 .maxTessellationControlTotalOutputComponents = 4096,
727 .maxTessellationEvaluationInputComponents = 128,
728 .maxTessellationEvaluationOutputComponents = 128,
729 .maxGeometryShaderInvocations = 127,
730 .maxGeometryInputComponents = 64,
731 .maxGeometryOutputComponents = 128,
732 .maxGeometryOutputVertices = 256,
733 .maxGeometryTotalOutputComponents = 1024,
734 .maxFragmentInputComponents = 128,
735 .maxFragmentOutputAttachments = 8,
736 .maxFragmentDualSrcAttachments = 1,
737 .maxFragmentCombinedOutputResources = 8,
738 .maxComputeSharedMemorySize = 32768,
739 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
740 .maxComputeWorkGroupInvocations = 2048,
741 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
742 .subPixelPrecisionBits = 4 /* FIXME */,
743 .subTexelPrecisionBits = 4 /* FIXME */,
744 .mipmapPrecisionBits = 4 /* FIXME */,
745 .maxDrawIndexedIndexValue = UINT32_MAX,
746 .maxDrawIndirectCount = UINT32_MAX,
747 .maxSamplerLodBias = 16,
748 .maxSamplerAnisotropy = 16,
749 .maxViewports = MAX_VIEWPORTS,
750 .maxViewportDimensions = { (1 << 14), (1 << 14) },
751 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
752 .viewportSubPixelBits = 8,
753 .minMemoryMapAlignment = 4096, /* A page */
754 .minTexelBufferOffsetAlignment = 1,
755 .minUniformBufferOffsetAlignment = 4,
756 .minStorageBufferOffsetAlignment = 4,
757 .minTexelOffset = -32,
758 .maxTexelOffset = 31,
759 .minTexelGatherOffset = -32,
760 .maxTexelGatherOffset = 31,
761 .minInterpolationOffset = -2,
762 .maxInterpolationOffset = 2,
763 .subPixelInterpolationOffsetBits = 8,
764 .maxFramebufferWidth = (1 << 14),
765 .maxFramebufferHeight = (1 << 14),
766 .maxFramebufferLayers = (1 << 10),
767 .framebufferColorSampleCounts = sample_counts,
768 .framebufferDepthSampleCounts = sample_counts,
769 .framebufferStencilSampleCounts = sample_counts,
770 .framebufferNoAttachmentsSampleCounts = sample_counts,
771 .maxColorAttachments = MAX_RTS,
772 .sampledImageColorSampleCounts = sample_counts,
773 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
774 .sampledImageDepthSampleCounts = sample_counts,
775 .sampledImageStencilSampleCounts = sample_counts,
776 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
777 .maxSampleMaskWords = 1,
778 .timestampComputeAndGraphics = true,
779 .timestampPeriod = 1,
780 .maxClipDistances = 8,
781 .maxCullDistances = 8,
782 .maxCombinedClipAndCullDistances = 8,
783 .discreteQueuePriorities = 1,
784 .pointSizeRange = { 0.125, 255.875 },
785 .lineWidthRange = { 0.0, 7.9921875 },
786 .pointSizeGranularity = (1.0 / 8.0),
787 .lineWidthGranularity = (1.0 / 128.0),
788 .strictLines = false, /* FINISHME */
789 .standardSampleLocations = true,
790 .optimalBufferCopyOffsetAlignment = 128,
791 .optimalBufferCopyRowPitchAlignment = 128,
792 .nonCoherentAtomSize = 64,
793 };
794
795 *pProperties = (VkPhysicalDeviceProperties) {
796 .apiVersion = tu_physical_device_api_version(pdevice),
797 .driverVersion = vk_get_driver_version(),
798 .vendorID = 0, /* TODO */
799 .deviceID = 0,
800 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
801 .limits = limits,
802 .sparseProperties = { 0 },
803 };
804
805 strcpy(pProperties->deviceName, pdevice->name);
806 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
807 }
808
809 void
810 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
811 VkPhysicalDeviceProperties2KHR *pProperties)
812 {
813 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
814 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
815
816 vk_foreach_struct(ext, pProperties->pNext)
817 {
818 switch (ext->sType) {
819 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
820 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
821 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
822 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
823 break;
824 }
825 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
826 VkPhysicalDeviceIDPropertiesKHR *properties =
827 (VkPhysicalDeviceIDPropertiesKHR *) ext;
828 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
829 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
830 properties->deviceLUIDValid = false;
831 break;
832 }
833 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
834 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
835 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
836 properties->maxMultiviewViewCount = MAX_VIEWS;
837 properties->maxMultiviewInstanceIndex = INT_MAX;
838 break;
839 }
840 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
841 VkPhysicalDevicePointClippingPropertiesKHR *properties =
842 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
843 properties->pointClippingBehavior =
844 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
845 break;
846 }
847 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
848 VkPhysicalDeviceMaintenance3Properties *properties =
849 (VkPhysicalDeviceMaintenance3Properties *) ext;
850 /* Make sure everything is addressable by a signed 32-bit int, and
851 * our largest descriptors are 96 bytes. */
852 properties->maxPerSetDescriptors = (1ull << 31) / 96;
853 /* Our buffer size fields allow only this much */
854 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
855 break;
856 }
857 default:
858 break;
859 }
860 }
861 }
862
863 static const VkQueueFamilyProperties tu_queue_family_properties = {
864 .queueFlags =
865 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
866 .queueCount = 1,
867 .timestampValidBits = 64,
868 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
869 };
870
871 void
872 tu_GetPhysicalDeviceQueueFamilyProperties(
873 VkPhysicalDevice physicalDevice,
874 uint32_t *pQueueFamilyPropertyCount,
875 VkQueueFamilyProperties *pQueueFamilyProperties)
876 {
877 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
878
879 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
880 }
881
882 void
883 tu_GetPhysicalDeviceQueueFamilyProperties2(
884 VkPhysicalDevice physicalDevice,
885 uint32_t *pQueueFamilyPropertyCount,
886 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
887 {
888 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
889
890 vk_outarray_append(&out, p)
891 {
892 p->queueFamilyProperties = tu_queue_family_properties;
893 }
894 }
895
896 static uint64_t
897 tu_get_system_heap_size()
898 {
899 struct sysinfo info;
900 sysinfo(&info);
901
902 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
903
904 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
905 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
906 */
907 uint64_t available_ram;
908 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
909 available_ram = total_ram / 2;
910 else
911 available_ram = total_ram * 3 / 4;
912
913 return available_ram;
914 }
915
916 void
917 tu_GetPhysicalDeviceMemoryProperties(
918 VkPhysicalDevice physicalDevice,
919 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
920 {
921 pMemoryProperties->memoryHeapCount = 1;
922 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
923 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
924
925 pMemoryProperties->memoryTypeCount = 1;
926 pMemoryProperties->memoryTypes[0].propertyFlags =
927 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
928 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
929 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
930 pMemoryProperties->memoryTypes[0].heapIndex = 0;
931 }
932
933 void
934 tu_GetPhysicalDeviceMemoryProperties2(
935 VkPhysicalDevice physicalDevice,
936 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
937 {
938 return tu_GetPhysicalDeviceMemoryProperties(
939 physicalDevice, &pMemoryProperties->memoryProperties);
940 }
941
942 static VkResult
943 tu_queue_init(struct tu_device *device,
944 struct tu_queue *queue,
945 uint32_t queue_family_index,
946 int idx,
947 VkDeviceQueueCreateFlags flags)
948 {
949 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
950 queue->device = device;
951 queue->queue_family_index = queue_family_index;
952 queue->queue_idx = idx;
953 queue->flags = flags;
954
955 struct drm_msm_submitqueue req = {
956 .flags = 0,
957 .prio = 0,
958 };
959
960 int ret = drmCommandWriteRead(device->physical_device->local_fd,
961 DRM_MSM_SUBMITQUEUE_NEW,
962 &req, sizeof(req));
963 if (ret)
964 return VK_ERROR_INITIALIZATION_FAILED;
965
966 queue->msm_queue_id = req.id;
967 return VK_SUCCESS;
968 }
969
970 static void
971 tu_queue_finish(struct tu_queue *queue)
972 {
973 drmCommandWrite(queue->device->physical_device->local_fd,
974 DRM_MSM_SUBMITQUEUE_CLOSE,
975 &queue->msm_queue_id, sizeof(uint32_t));
976 }
977
978 static int
979 tu_get_device_extension_index(const char *name)
980 {
981 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
982 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
983 return i;
984 }
985 return -1;
986 }
987
988 VkResult
989 tu_CreateDevice(VkPhysicalDevice physicalDevice,
990 const VkDeviceCreateInfo *pCreateInfo,
991 const VkAllocationCallbacks *pAllocator,
992 VkDevice *pDevice)
993 {
994 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
995 VkResult result;
996 struct tu_device *device;
997
998 /* Check enabled features */
999 if (pCreateInfo->pEnabledFeatures) {
1000 VkPhysicalDeviceFeatures supported_features;
1001 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1002 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1003 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1004 unsigned num_features =
1005 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1006 for (uint32_t i = 0; i < num_features; i++) {
1007 if (enabled_feature[i] && !supported_feature[i])
1008 return vk_error(physical_device->instance,
1009 VK_ERROR_FEATURE_NOT_PRESENT);
1010 }
1011 }
1012
1013 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1014 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1015 if (!device)
1016 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1017
1018 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1019 device->instance = physical_device->instance;
1020 device->physical_device = physical_device;
1021
1022 if (pAllocator)
1023 device->alloc = *pAllocator;
1024 else
1025 device->alloc = physical_device->instance->alloc;
1026
1027 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1028 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1029 int index = tu_get_device_extension_index(ext_name);
1030 if (index < 0 ||
1031 !physical_device->supported_extensions.extensions[index]) {
1032 vk_free(&device->alloc, device);
1033 return vk_error(physical_device->instance,
1034 VK_ERROR_EXTENSION_NOT_PRESENT);
1035 }
1036
1037 device->enabled_extensions.extensions[index] = true;
1038 }
1039
1040 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1041 const VkDeviceQueueCreateInfo *queue_create =
1042 &pCreateInfo->pQueueCreateInfos[i];
1043 uint32_t qfi = queue_create->queueFamilyIndex;
1044 device->queues[qfi] = vk_alloc(
1045 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1046 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1047 if (!device->queues[qfi]) {
1048 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1049 goto fail;
1050 }
1051
1052 memset(device->queues[qfi], 0,
1053 queue_create->queueCount * sizeof(struct tu_queue));
1054
1055 device->queue_count[qfi] = queue_create->queueCount;
1056
1057 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1058 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1059 queue_create->flags);
1060 if (result != VK_SUCCESS)
1061 goto fail;
1062 }
1063 }
1064
1065 VkPipelineCacheCreateInfo ci;
1066 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1067 ci.pNext = NULL;
1068 ci.flags = 0;
1069 ci.pInitialData = NULL;
1070 ci.initialDataSize = 0;
1071 VkPipelineCache pc;
1072 result =
1073 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1074 if (result != VK_SUCCESS)
1075 goto fail;
1076
1077 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1078
1079 *pDevice = tu_device_to_handle(device);
1080 return VK_SUCCESS;
1081
1082 fail:
1083 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1084 for (unsigned q = 0; q < device->queue_count[i]; q++)
1085 tu_queue_finish(&device->queues[i][q]);
1086 if (device->queue_count[i])
1087 vk_free(&device->alloc, device->queues[i]);
1088 }
1089
1090 vk_free(&device->alloc, device);
1091 return result;
1092 }
1093
1094 void
1095 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1096 {
1097 TU_FROM_HANDLE(tu_device, device, _device);
1098
1099 if (!device)
1100 return;
1101
1102 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1103 for (unsigned q = 0; q < device->queue_count[i]; q++)
1104 tu_queue_finish(&device->queues[i][q]);
1105 if (device->queue_count[i])
1106 vk_free(&device->alloc, device->queues[i]);
1107 }
1108
1109 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1110 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1111
1112 vk_free(&device->alloc, device);
1113 }
1114
1115 VkResult
1116 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1117 VkLayerProperties *pProperties)
1118 {
1119 *pPropertyCount = 0;
1120 return VK_SUCCESS;
1121 }
1122
1123 VkResult
1124 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1125 uint32_t *pPropertyCount,
1126 VkLayerProperties *pProperties)
1127 {
1128 *pPropertyCount = 0;
1129 return VK_SUCCESS;
1130 }
1131
1132 void
1133 tu_GetDeviceQueue2(VkDevice _device,
1134 const VkDeviceQueueInfo2 *pQueueInfo,
1135 VkQueue *pQueue)
1136 {
1137 TU_FROM_HANDLE(tu_device, device, _device);
1138 struct tu_queue *queue;
1139
1140 queue =
1141 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1142 if (pQueueInfo->flags != queue->flags) {
1143 /* From the Vulkan 1.1.70 spec:
1144 *
1145 * "The queue returned by vkGetDeviceQueue2 must have the same
1146 * flags value from this structure as that used at device
1147 * creation time in a VkDeviceQueueCreateInfo instance. If no
1148 * matching flags were specified at device creation time then
1149 * pQueue will return VK_NULL_HANDLE."
1150 */
1151 *pQueue = VK_NULL_HANDLE;
1152 return;
1153 }
1154
1155 *pQueue = tu_queue_to_handle(queue);
1156 }
1157
1158 void
1159 tu_GetDeviceQueue(VkDevice _device,
1160 uint32_t queueFamilyIndex,
1161 uint32_t queueIndex,
1162 VkQueue *pQueue)
1163 {
1164 const VkDeviceQueueInfo2 info =
1165 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1166 .queueFamilyIndex = queueFamilyIndex,
1167 .queueIndex = queueIndex };
1168
1169 tu_GetDeviceQueue2(_device, &info, pQueue);
1170 }
1171
1172 VkResult
1173 tu_QueueSubmit(VkQueue _queue,
1174 uint32_t submitCount,
1175 const VkSubmitInfo *pSubmits,
1176 VkFence _fence)
1177 {
1178 TU_FROM_HANDLE(tu_queue, queue, _queue);
1179
1180 for (uint32_t i = 0; i < submitCount; ++i) {
1181 const VkSubmitInfo *submit = pSubmits + i;
1182 struct tu_bo_list bo_list;
1183 tu_bo_list_init(&bo_list);
1184
1185 uint32_t entry_count = 0;
1186 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1187 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1188 entry_count += cmdbuf->cs.entry_count;
1189 }
1190
1191 struct drm_msm_gem_submit_cmd cmds[entry_count];
1192 uint32_t entry_idx = 0;
1193 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1194 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1195 struct tu_cmd_stream *stream = &cmdbuf->cs;
1196 for (unsigned i = 0; i < stream->entry_count; ++i, ++entry_idx) {
1197 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1198 cmds[entry_idx].submit_idx = tu_bo_list_add(&bo_list, stream->entries[i].bo);
1199 cmds[entry_idx].submit_offset = stream->entries[i].offset;
1200 cmds[entry_idx].size = stream->entries[i].size;
1201 cmds[entry_idx].pad = 0;
1202 cmds[entry_idx].nr_relocs = 0;
1203 cmds[entry_idx].relocs = 0;
1204
1205 }
1206 }
1207
1208 struct drm_msm_gem_submit_bo bos[bo_list.count];
1209 for (unsigned i = 0; i < bo_list.count; ++i) {
1210 bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
1211 bos[i].handle = bo_list.handles[i];
1212 bos[i].presumed = 0;
1213 }
1214
1215 struct drm_msm_gem_submit req = {
1216 .flags = MSM_PIPE_3D0,
1217 .queueid = queue->msm_queue_id,
1218 .bos = (uint64_t)(uintptr_t)bos,
1219 .nr_bos = bo_list.count,
1220 .cmds = (uint64_t)(uintptr_t)cmds,
1221 .nr_cmds = entry_count,
1222 };
1223
1224 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1225 DRM_MSM_GEM_SUBMIT,
1226 &req, sizeof(req));
1227 if (ret) {
1228 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1229 abort();
1230 }
1231
1232 tu_bo_list_destroy(&bo_list);
1233 }
1234 return VK_SUCCESS;
1235 }
1236
1237 VkResult
1238 tu_QueueWaitIdle(VkQueue _queue)
1239 {
1240 return VK_SUCCESS;
1241 }
1242
1243 VkResult
1244 tu_DeviceWaitIdle(VkDevice _device)
1245 {
1246 TU_FROM_HANDLE(tu_device, device, _device);
1247
1248 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1249 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1250 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1251 }
1252 }
1253 return VK_SUCCESS;
1254 }
1255
1256 VkResult
1257 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1258 uint32_t *pPropertyCount,
1259 VkExtensionProperties *pProperties)
1260 {
1261 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1262
1263 /* We spport no lyaers */
1264 if (pLayerName)
1265 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1266
1267 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1268 if (tu_supported_instance_extensions.extensions[i]) {
1269 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1270 }
1271 }
1272
1273 return vk_outarray_status(&out);
1274 }
1275
1276 VkResult
1277 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1278 const char *pLayerName,
1279 uint32_t *pPropertyCount,
1280 VkExtensionProperties *pProperties)
1281 {
1282 /* We spport no lyaers */
1283 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1284 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1285
1286 /* We spport no lyaers */
1287 if (pLayerName)
1288 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1289
1290 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1291 if (device->supported_extensions.extensions[i]) {
1292 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1293 }
1294 }
1295
1296 return vk_outarray_status(&out);
1297 }
1298
1299 PFN_vkVoidFunction
1300 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1301 {
1302 TU_FROM_HANDLE(tu_instance, instance, _instance);
1303
1304 return tu_lookup_entrypoint_checked(
1305 pName, instance ? instance->api_version : 0,
1306 instance ? &instance->enabled_extensions : NULL, NULL);
1307 }
1308
1309 /* The loader wants us to expose a second GetInstanceProcAddr function
1310 * to work around certain LD_PRELOAD issues seen in apps.
1311 */
1312 PUBLIC
1313 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1314 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1315
1316 PUBLIC
1317 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1318 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1319 {
1320 return tu_GetInstanceProcAddr(instance, pName);
1321 }
1322
1323 PFN_vkVoidFunction
1324 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1325 {
1326 TU_FROM_HANDLE(tu_device, device, _device);
1327
1328 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1329 &device->instance->enabled_extensions,
1330 &device->enabled_extensions);
1331 }
1332
1333 static VkResult
1334 tu_alloc_memory(struct tu_device *device,
1335 const VkMemoryAllocateInfo *pAllocateInfo,
1336 const VkAllocationCallbacks *pAllocator,
1337 VkDeviceMemory *pMem)
1338 {
1339 struct tu_device_memory *mem;
1340 VkResult result;
1341
1342 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1343
1344 if (pAllocateInfo->allocationSize == 0) {
1345 /* Apparently, this is allowed */
1346 *pMem = VK_NULL_HANDLE;
1347 return VK_SUCCESS;
1348 }
1349
1350 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1351 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1352 if (mem == NULL)
1353 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1354
1355 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1356 if (result != VK_SUCCESS) {
1357 vk_free2(&device->alloc, pAllocator, mem);
1358 return result;
1359 }
1360
1361 mem->size = pAllocateInfo->allocationSize;
1362 mem->type_index = pAllocateInfo->memoryTypeIndex;
1363
1364 mem->map = NULL;
1365 mem->user_ptr = NULL;
1366
1367 *pMem = tu_device_memory_to_handle(mem);
1368
1369 return VK_SUCCESS;
1370 }
1371
1372 VkResult
1373 tu_AllocateMemory(VkDevice _device,
1374 const VkMemoryAllocateInfo *pAllocateInfo,
1375 const VkAllocationCallbacks *pAllocator,
1376 VkDeviceMemory *pMem)
1377 {
1378 TU_FROM_HANDLE(tu_device, device, _device);
1379 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1380 }
1381
1382 void
1383 tu_FreeMemory(VkDevice _device,
1384 VkDeviceMemory _mem,
1385 const VkAllocationCallbacks *pAllocator)
1386 {
1387 TU_FROM_HANDLE(tu_device, device, _device);
1388 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1389
1390 if (mem == NULL)
1391 return;
1392
1393 tu_bo_finish(device, &mem->bo);
1394 vk_free2(&device->alloc, pAllocator, mem);
1395 }
1396
1397 VkResult
1398 tu_MapMemory(VkDevice _device,
1399 VkDeviceMemory _memory,
1400 VkDeviceSize offset,
1401 VkDeviceSize size,
1402 VkMemoryMapFlags flags,
1403 void **ppData)
1404 {
1405 TU_FROM_HANDLE(tu_device, device, _device);
1406 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1407 VkResult result;
1408
1409 if (mem == NULL) {
1410 *ppData = NULL;
1411 return VK_SUCCESS;
1412 }
1413
1414 if (mem->user_ptr) {
1415 *ppData = mem->user_ptr;
1416 } else if (!mem->map) {
1417 result = tu_bo_map(device, &mem->bo);
1418 if (result != VK_SUCCESS)
1419 return result;
1420 *ppData = mem->map = mem->bo.map;
1421 } else
1422 *ppData = mem->map;
1423
1424 if (*ppData) {
1425 *ppData += offset;
1426 return VK_SUCCESS;
1427 }
1428
1429 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1430 }
1431
1432 void
1433 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1434 {
1435 /* I do not see any unmapping done by the freedreno Gallium driver. */
1436 }
1437
1438 VkResult
1439 tu_FlushMappedMemoryRanges(VkDevice _device,
1440 uint32_t memoryRangeCount,
1441 const VkMappedMemoryRange *pMemoryRanges)
1442 {
1443 return VK_SUCCESS;
1444 }
1445
1446 VkResult
1447 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1448 uint32_t memoryRangeCount,
1449 const VkMappedMemoryRange *pMemoryRanges)
1450 {
1451 return VK_SUCCESS;
1452 }
1453
1454 void
1455 tu_GetBufferMemoryRequirements(VkDevice _device,
1456 VkBuffer _buffer,
1457 VkMemoryRequirements *pMemoryRequirements)
1458 {
1459 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1460
1461 pMemoryRequirements->memoryTypeBits = 1;
1462 pMemoryRequirements->alignment = 16;
1463 pMemoryRequirements->size =
1464 align64(buffer->size, pMemoryRequirements->alignment);
1465 }
1466
1467 void
1468 tu_GetBufferMemoryRequirements2(
1469 VkDevice device,
1470 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1471 VkMemoryRequirements2KHR *pMemoryRequirements)
1472 {
1473 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1474 &pMemoryRequirements->memoryRequirements);
1475 }
1476
1477 void
1478 tu_GetImageMemoryRequirements(VkDevice _device,
1479 VkImage _image,
1480 VkMemoryRequirements *pMemoryRequirements)
1481 {
1482 TU_FROM_HANDLE(tu_image, image, _image);
1483
1484 pMemoryRequirements->memoryTypeBits = 1;
1485 pMemoryRequirements->size = image->size;
1486 pMemoryRequirements->alignment = image->alignment;
1487 }
1488
1489 void
1490 tu_GetImageMemoryRequirements2(VkDevice device,
1491 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1492 VkMemoryRequirements2KHR *pMemoryRequirements)
1493 {
1494 tu_GetImageMemoryRequirements(device, pInfo->image,
1495 &pMemoryRequirements->memoryRequirements);
1496 }
1497
1498 void
1499 tu_GetImageSparseMemoryRequirements(
1500 VkDevice device,
1501 VkImage image,
1502 uint32_t *pSparseMemoryRequirementCount,
1503 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1504 {
1505 tu_stub();
1506 }
1507
1508 void
1509 tu_GetImageSparseMemoryRequirements2(
1510 VkDevice device,
1511 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1512 uint32_t *pSparseMemoryRequirementCount,
1513 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1514 {
1515 tu_stub();
1516 }
1517
1518 void
1519 tu_GetDeviceMemoryCommitment(VkDevice device,
1520 VkDeviceMemory memory,
1521 VkDeviceSize *pCommittedMemoryInBytes)
1522 {
1523 *pCommittedMemoryInBytes = 0;
1524 }
1525
1526 VkResult
1527 tu_BindBufferMemory2(VkDevice device,
1528 uint32_t bindInfoCount,
1529 const VkBindBufferMemoryInfoKHR *pBindInfos)
1530 {
1531 return VK_SUCCESS;
1532 }
1533
1534 VkResult
1535 tu_BindBufferMemory(VkDevice device,
1536 VkBuffer buffer,
1537 VkDeviceMemory memory,
1538 VkDeviceSize memoryOffset)
1539 {
1540 const VkBindBufferMemoryInfoKHR info = {
1541 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1542 .buffer = buffer,
1543 .memory = memory,
1544 .memoryOffset = memoryOffset
1545 };
1546
1547 return tu_BindBufferMemory2(device, 1, &info);
1548 }
1549
1550 VkResult
1551 tu_BindImageMemory2(VkDevice device,
1552 uint32_t bindInfoCount,
1553 const VkBindImageMemoryInfoKHR *pBindInfos)
1554 {
1555 return VK_SUCCESS;
1556 }
1557
1558 VkResult
1559 tu_BindImageMemory(VkDevice device,
1560 VkImage image,
1561 VkDeviceMemory memory,
1562 VkDeviceSize memoryOffset)
1563 {
1564 const VkBindImageMemoryInfoKHR info = {
1565 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1566 .image = image,
1567 .memory = memory,
1568 .memoryOffset = memoryOffset
1569 };
1570
1571 return tu_BindImageMemory2(device, 1, &info);
1572 }
1573
1574 VkResult
1575 tu_QueueBindSparse(VkQueue _queue,
1576 uint32_t bindInfoCount,
1577 const VkBindSparseInfo *pBindInfo,
1578 VkFence _fence)
1579 {
1580 return VK_SUCCESS;
1581 }
1582
1583 VkResult
1584 tu_CreateFence(VkDevice _device,
1585 const VkFenceCreateInfo *pCreateInfo,
1586 const VkAllocationCallbacks *pAllocator,
1587 VkFence *pFence)
1588 {
1589 TU_FROM_HANDLE(tu_device, device, _device);
1590
1591 struct tu_fence *fence =
1592 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1593 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1594
1595 if (!fence)
1596 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1597
1598 *pFence = tu_fence_to_handle(fence);
1599
1600 return VK_SUCCESS;
1601 }
1602
1603 void
1604 tu_DestroyFence(VkDevice _device,
1605 VkFence _fence,
1606 const VkAllocationCallbacks *pAllocator)
1607 {
1608 TU_FROM_HANDLE(tu_device, device, _device);
1609 TU_FROM_HANDLE(tu_fence, fence, _fence);
1610
1611 if (!fence)
1612 return;
1613
1614 vk_free2(&device->alloc, pAllocator, fence);
1615 }
1616
1617 VkResult
1618 tu_WaitForFences(VkDevice _device,
1619 uint32_t fenceCount,
1620 const VkFence *pFences,
1621 VkBool32 waitAll,
1622 uint64_t timeout)
1623 {
1624 return VK_SUCCESS;
1625 }
1626
1627 VkResult
1628 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1629 {
1630 return VK_SUCCESS;
1631 }
1632
1633 VkResult
1634 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1635 {
1636 return VK_SUCCESS;
1637 }
1638
1639 // Queue semaphore functions
1640
1641 VkResult
1642 tu_CreateSemaphore(VkDevice _device,
1643 const VkSemaphoreCreateInfo *pCreateInfo,
1644 const VkAllocationCallbacks *pAllocator,
1645 VkSemaphore *pSemaphore)
1646 {
1647 TU_FROM_HANDLE(tu_device, device, _device);
1648
1649 struct tu_semaphore *sem =
1650 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1651 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1652 if (!sem)
1653 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1654
1655 *pSemaphore = tu_semaphore_to_handle(sem);
1656 return VK_SUCCESS;
1657 }
1658
1659 void
1660 tu_DestroySemaphore(VkDevice _device,
1661 VkSemaphore _semaphore,
1662 const VkAllocationCallbacks *pAllocator)
1663 {
1664 TU_FROM_HANDLE(tu_device, device, _device);
1665 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1666 if (!_semaphore)
1667 return;
1668
1669 vk_free2(&device->alloc, pAllocator, sem);
1670 }
1671
1672 VkResult
1673 tu_CreateEvent(VkDevice _device,
1674 const VkEventCreateInfo *pCreateInfo,
1675 const VkAllocationCallbacks *pAllocator,
1676 VkEvent *pEvent)
1677 {
1678 TU_FROM_HANDLE(tu_device, device, _device);
1679 struct tu_event *event =
1680 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1681 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1682
1683 if (!event)
1684 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1685
1686 *pEvent = tu_event_to_handle(event);
1687
1688 return VK_SUCCESS;
1689 }
1690
1691 void
1692 tu_DestroyEvent(VkDevice _device,
1693 VkEvent _event,
1694 const VkAllocationCallbacks *pAllocator)
1695 {
1696 TU_FROM_HANDLE(tu_device, device, _device);
1697 TU_FROM_HANDLE(tu_event, event, _event);
1698
1699 if (!event)
1700 return;
1701 vk_free2(&device->alloc, pAllocator, event);
1702 }
1703
1704 VkResult
1705 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1706 {
1707 TU_FROM_HANDLE(tu_event, event, _event);
1708
1709 if (*event->map == 1)
1710 return VK_EVENT_SET;
1711 return VK_EVENT_RESET;
1712 }
1713
1714 VkResult
1715 tu_SetEvent(VkDevice _device, VkEvent _event)
1716 {
1717 TU_FROM_HANDLE(tu_event, event, _event);
1718 *event->map = 1;
1719
1720 return VK_SUCCESS;
1721 }
1722
1723 VkResult
1724 tu_ResetEvent(VkDevice _device, VkEvent _event)
1725 {
1726 TU_FROM_HANDLE(tu_event, event, _event);
1727 *event->map = 0;
1728
1729 return VK_SUCCESS;
1730 }
1731
1732 VkResult
1733 tu_CreateBuffer(VkDevice _device,
1734 const VkBufferCreateInfo *pCreateInfo,
1735 const VkAllocationCallbacks *pAllocator,
1736 VkBuffer *pBuffer)
1737 {
1738 TU_FROM_HANDLE(tu_device, device, _device);
1739 struct tu_buffer *buffer;
1740
1741 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1742
1743 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1744 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1745 if (buffer == NULL)
1746 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1747
1748 buffer->size = pCreateInfo->size;
1749 buffer->usage = pCreateInfo->usage;
1750 buffer->flags = pCreateInfo->flags;
1751
1752 *pBuffer = tu_buffer_to_handle(buffer);
1753
1754 return VK_SUCCESS;
1755 }
1756
1757 void
1758 tu_DestroyBuffer(VkDevice _device,
1759 VkBuffer _buffer,
1760 const VkAllocationCallbacks *pAllocator)
1761 {
1762 TU_FROM_HANDLE(tu_device, device, _device);
1763 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1764
1765 if (!buffer)
1766 return;
1767
1768 vk_free2(&device->alloc, pAllocator, buffer);
1769 }
1770
1771 static uint32_t
1772 tu_surface_max_layer_count(struct tu_image_view *iview)
1773 {
1774 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1775 ? iview->extent.depth
1776 : (iview->base_layer + iview->layer_count);
1777 }
1778
1779 VkResult
1780 tu_CreateFramebuffer(VkDevice _device,
1781 const VkFramebufferCreateInfo *pCreateInfo,
1782 const VkAllocationCallbacks *pAllocator,
1783 VkFramebuffer *pFramebuffer)
1784 {
1785 TU_FROM_HANDLE(tu_device, device, _device);
1786 struct tu_framebuffer *framebuffer;
1787
1788 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1789
1790 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1791 pCreateInfo->attachmentCount;
1792 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1793 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1794 if (framebuffer == NULL)
1795 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1796
1797 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1798 framebuffer->width = pCreateInfo->width;
1799 framebuffer->height = pCreateInfo->height;
1800 framebuffer->layers = pCreateInfo->layers;
1801 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1802 VkImageView _iview = pCreateInfo->pAttachments[i];
1803 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1804 framebuffer->attachments[i].attachment = iview;
1805
1806 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1807 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1808 framebuffer->layers =
1809 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1810 }
1811
1812 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1813 return VK_SUCCESS;
1814 }
1815
1816 void
1817 tu_DestroyFramebuffer(VkDevice _device,
1818 VkFramebuffer _fb,
1819 const VkAllocationCallbacks *pAllocator)
1820 {
1821 TU_FROM_HANDLE(tu_device, device, _device);
1822 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1823
1824 if (!fb)
1825 return;
1826 vk_free2(&device->alloc, pAllocator, fb);
1827 }
1828
1829 static void
1830 tu_init_sampler(struct tu_device *device,
1831 struct tu_sampler *sampler,
1832 const VkSamplerCreateInfo *pCreateInfo)
1833 {
1834 }
1835
1836 VkResult
1837 tu_CreateSampler(VkDevice _device,
1838 const VkSamplerCreateInfo *pCreateInfo,
1839 const VkAllocationCallbacks *pAllocator,
1840 VkSampler *pSampler)
1841 {
1842 TU_FROM_HANDLE(tu_device, device, _device);
1843 struct tu_sampler *sampler;
1844
1845 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1846
1847 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1848 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1849 if (!sampler)
1850 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1851
1852 tu_init_sampler(device, sampler, pCreateInfo);
1853 *pSampler = tu_sampler_to_handle(sampler);
1854
1855 return VK_SUCCESS;
1856 }
1857
1858 void
1859 tu_DestroySampler(VkDevice _device,
1860 VkSampler _sampler,
1861 const VkAllocationCallbacks *pAllocator)
1862 {
1863 TU_FROM_HANDLE(tu_device, device, _device);
1864 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1865
1866 if (!sampler)
1867 return;
1868 vk_free2(&device->alloc, pAllocator, sampler);
1869 }
1870
1871 /* vk_icd.h does not declare this function, so we declare it here to
1872 * suppress Wmissing-prototypes.
1873 */
1874 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1875 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1876
1877 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1878 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1879 {
1880 /* For the full details on loader interface versioning, see
1881 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1882 * What follows is a condensed summary, to help you navigate the large and
1883 * confusing official doc.
1884 *
1885 * - Loader interface v0 is incompatible with later versions. We don't
1886 * support it.
1887 *
1888 * - In loader interface v1:
1889 * - The first ICD entrypoint called by the loader is
1890 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1891 * entrypoint.
1892 * - The ICD must statically expose no other Vulkan symbol unless it
1893 * is linked with -Bsymbolic.
1894 * - Each dispatchable Vulkan handle created by the ICD must be
1895 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1896 * ICD must initialize VK_LOADER_DATA.loadMagic to
1897 * ICD_LOADER_MAGIC.
1898 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1899 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1900 * such loader-managed surfaces.
1901 *
1902 * - Loader interface v2 differs from v1 in:
1903 * - The first ICD entrypoint called by the loader is
1904 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1905 * statically expose this entrypoint.
1906 *
1907 * - Loader interface v3 differs from v2 in:
1908 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1909 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1910 * because the loader no longer does so.
1911 */
1912 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1913 return VK_SUCCESS;
1914 }
1915
1916 void
1917 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1918 VkPhysicalDevice physicalDevice,
1919 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1920 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1921 {
1922 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1923 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1924 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1925 }
1926
1927 void
1928 tu_GetPhysicalDeviceExternalFenceProperties(
1929 VkPhysicalDevice physicalDevice,
1930 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1931 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1932 {
1933 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1934 pExternalFenceProperties->compatibleHandleTypes = 0;
1935 pExternalFenceProperties->externalFenceFeatures = 0;
1936 }
1937
1938 VkResult
1939 tu_CreateDebugReportCallbackEXT(
1940 VkInstance _instance,
1941 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1942 const VkAllocationCallbacks *pAllocator,
1943 VkDebugReportCallbackEXT *pCallback)
1944 {
1945 TU_FROM_HANDLE(tu_instance, instance, _instance);
1946 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1947 pCreateInfo, pAllocator,
1948 &instance->alloc, pCallback);
1949 }
1950
1951 void
1952 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1953 VkDebugReportCallbackEXT _callback,
1954 const VkAllocationCallbacks *pAllocator)
1955 {
1956 TU_FROM_HANDLE(tu_instance, instance, _instance);
1957 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1958 _callback, pAllocator, &instance->alloc);
1959 }
1960
1961 void
1962 tu_DebugReportMessageEXT(VkInstance _instance,
1963 VkDebugReportFlagsEXT flags,
1964 VkDebugReportObjectTypeEXT objectType,
1965 uint64_t object,
1966 size_t location,
1967 int32_t messageCode,
1968 const char *pLayerPrefix,
1969 const char *pMessage)
1970 {
1971 TU_FROM_HANDLE(tu_instance, instance, _instance);
1972 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1973 object, location, messageCode, pLayerPrefix, pMessage);
1974 }
1975
1976 void
1977 tu_GetDeviceGroupPeerMemoryFeatures(
1978 VkDevice device,
1979 uint32_t heapIndex,
1980 uint32_t localDeviceIndex,
1981 uint32_t remoteDeviceIndex,
1982 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1983 {
1984 assert(localDeviceIndex == remoteDeviceIndex);
1985
1986 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1987 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1988 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1989 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1990 }