turnip: run sed and clang-format on tu_cs
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/strtod.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 VkResult
77 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
78 {
79 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
80 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
81 */
82 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
83 if (!gem_handle)
84 goto fail_new;
85
86 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
87 if (!iova)
88 goto fail_info;
89
90 *bo = (struct tu_bo) {
91 .gem_handle = gem_handle,
92 .size = size,
93 .iova = iova,
94 };
95
96 return VK_SUCCESS;
97
98 fail_info:
99 tu_gem_close(dev, bo->gem_handle);
100 fail_new:
101 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
102 }
103
104 VkResult
105 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
106 {
107 if (bo->map)
108 return VK_SUCCESS;
109
110 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
111 if (!offset)
112 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113
114 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
115 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
116 dev->physical_device->local_fd, offset);
117 if (map == MAP_FAILED)
118 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
119
120 bo->map = map;
121 return VK_SUCCESS;
122 }
123
124 void
125 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
126 {
127 assert(bo->gem_handle);
128
129 if (bo->map)
130 munmap(bo->map, bo->size);
131
132 tu_gem_close(dev, bo->gem_handle);
133 }
134
135 static VkResult
136 tu_physical_device_init(struct tu_physical_device *device,
137 struct tu_instance *instance,
138 drmDevicePtr drm_device)
139 {
140 const char *path = drm_device->nodes[DRM_NODE_RENDER];
141 VkResult result = VK_SUCCESS;
142 drmVersionPtr version;
143 int fd;
144 int master_fd = -1;
145
146 fd = open(path, O_RDWR | O_CLOEXEC);
147 if (fd < 0) {
148 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
149 "failed to open device %s", path);
150 }
151
152 /* Version 1.3 added MSM_INFO_IOVA. */
153 const int min_version_major = 1;
154 const int min_version_minor = 3;
155
156 version = drmGetVersion(fd);
157 if (!version) {
158 close(fd);
159 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
160 "failed to query kernel driver version for device %s",
161 path);
162 }
163
164 if (strcmp(version->name, "msm")) {
165 drmFreeVersion(version);
166 if (master_fd != -1)
167 close(master_fd);
168 close(fd);
169 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
170 "device %s does not use the msm kernel driver", path);
171 }
172
173 if (version->version_major != min_version_major ||
174 version->version_minor < min_version_minor) {
175 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
176 "kernel driver for device %s has version %d.%d, "
177 "but Vulkan requires version >= %d.%d",
178 path, version->version_major, version->version_minor,
179 min_version_major, min_version_minor);
180 drmFreeVersion(version);
181 close(fd);
182 return result;
183 }
184
185 drmFreeVersion(version);
186
187 if (instance->debug_flags & TU_DEBUG_STARTUP)
188 tu_logi("Found compatible device '%s'.", path);
189
190 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
191 device->instance = instance;
192 assert(strlen(path) < ARRAY_SIZE(device->path));
193 strncpy(device->path, path, ARRAY_SIZE(device->path));
194
195 if (instance->enabled_extensions.KHR_display) {
196 master_fd =
197 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
198 if (master_fd >= 0) {
199 /* TODO: free master_fd is accel is not working? */
200 }
201 }
202
203 device->master_fd = master_fd;
204 device->local_fd = fd;
205
206 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
207 if (instance->debug_flags & TU_DEBUG_STARTUP)
208 tu_logi("Could not query the GPU ID");
209 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
210 "could not get GPU ID");
211 goto fail;
212 }
213
214 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
215 if (instance->debug_flags & TU_DEBUG_STARTUP)
216 tu_logi("Could not query the GMEM size");
217 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
218 "could not get GMEM size");
219 goto fail;
220 }
221
222 memset(device->name, 0, sizeof(device->name));
223 sprintf(device->name, "FD%d", device->gpu_id);
224
225 switch (device->gpu_id) {
226 case 530:
227 case 630:
228 break;
229 default:
230 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
231 "device %s is unsupported", device->name);
232 goto fail;
233 }
234 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
235 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
236 "cannot generate UUID");
237 goto fail;
238 }
239
240 /* The gpu id is already embedded in the uuid so we just pass "tu"
241 * when creating the cache.
242 */
243 char buf[VK_UUID_SIZE * 2 + 1];
244 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
245 device->disk_cache = disk_cache_create(device->name, buf, 0);
246
247 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
248 "testing use only.\n");
249
250 tu_get_driver_uuid(&device->device_uuid);
251 tu_get_device_uuid(&device->device_uuid);
252
253 tu_fill_device_extension_table(device, &device->supported_extensions);
254
255 if (result != VK_SUCCESS) {
256 vk_error(instance, result);
257 goto fail;
258 }
259
260 return VK_SUCCESS;
261
262 fail:
263 close(fd);
264 if (master_fd != -1)
265 close(master_fd);
266 return result;
267 }
268
269 static void
270 tu_physical_device_finish(struct tu_physical_device *device)
271 {
272 disk_cache_destroy(device->disk_cache);
273 close(device->local_fd);
274 if (device->master_fd != -1)
275 close(device->master_fd);
276 }
277
278 static void *
279 default_alloc_func(void *pUserData,
280 size_t size,
281 size_t align,
282 VkSystemAllocationScope allocationScope)
283 {
284 return malloc(size);
285 }
286
287 static void *
288 default_realloc_func(void *pUserData,
289 void *pOriginal,
290 size_t size,
291 size_t align,
292 VkSystemAllocationScope allocationScope)
293 {
294 return realloc(pOriginal, size);
295 }
296
297 static void
298 default_free_func(void *pUserData, void *pMemory)
299 {
300 free(pMemory);
301 }
302
303 static const VkAllocationCallbacks default_alloc = {
304 .pUserData = NULL,
305 .pfnAllocation = default_alloc_func,
306 .pfnReallocation = default_realloc_func,
307 .pfnFree = default_free_func,
308 };
309
310 static const struct debug_control tu_debug_options[] = {
311 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
312 };
313
314 const char *
315 tu_get_debug_option_name(int id)
316 {
317 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
318 return tu_debug_options[id].string;
319 }
320
321 static int
322 tu_get_instance_extension_index(const char *name)
323 {
324 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
325 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
326 return i;
327 }
328 return -1;
329 }
330
331 VkResult
332 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
333 const VkAllocationCallbacks *pAllocator,
334 VkInstance *pInstance)
335 {
336 struct tu_instance *instance;
337 VkResult result;
338
339 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
340
341 uint32_t client_version;
342 if (pCreateInfo->pApplicationInfo &&
343 pCreateInfo->pApplicationInfo->apiVersion != 0) {
344 client_version = pCreateInfo->pApplicationInfo->apiVersion;
345 } else {
346 tu_EnumerateInstanceVersion(&client_version);
347 }
348
349 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
350 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
351 if (!instance)
352 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
353
354 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
355
356 if (pAllocator)
357 instance->alloc = *pAllocator;
358 else
359 instance->alloc = default_alloc;
360
361 instance->api_version = client_version;
362 instance->physical_device_count = -1;
363
364 instance->debug_flags =
365 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
366
367 if (instance->debug_flags & TU_DEBUG_STARTUP)
368 tu_logi("Created an instance");
369
370 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
371 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
372 int index = tu_get_instance_extension_index(ext_name);
373
374 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
375 vk_free2(&default_alloc, pAllocator, instance);
376 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
377 }
378
379 instance->enabled_extensions.extensions[index] = true;
380 }
381
382 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
383 if (result != VK_SUCCESS) {
384 vk_free2(&default_alloc, pAllocator, instance);
385 return vk_error(instance, result);
386 }
387
388 _mesa_locale_init();
389
390 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
391
392 *pInstance = tu_instance_to_handle(instance);
393
394 return VK_SUCCESS;
395 }
396
397 void
398 tu_DestroyInstance(VkInstance _instance,
399 const VkAllocationCallbacks *pAllocator)
400 {
401 TU_FROM_HANDLE(tu_instance, instance, _instance);
402
403 if (!instance)
404 return;
405
406 for (int i = 0; i < instance->physical_device_count; ++i) {
407 tu_physical_device_finish(instance->physical_devices + i);
408 }
409
410 VG(VALGRIND_DESTROY_MEMPOOL(instance));
411
412 _mesa_locale_fini();
413
414 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
415
416 vk_free(&instance->alloc, instance);
417 }
418
419 static VkResult
420 tu_enumerate_devices(struct tu_instance *instance)
421 {
422 /* TODO: Check for more devices ? */
423 drmDevicePtr devices[8];
424 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
425 int max_devices;
426
427 instance->physical_device_count = 0;
428
429 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
430
431 if (instance->debug_flags & TU_DEBUG_STARTUP)
432 tu_logi("Found %d drm nodes", max_devices);
433
434 if (max_devices < 1)
435 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
436
437 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
438 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
439 devices[i]->bustype == DRM_BUS_PLATFORM) {
440
441 result = tu_physical_device_init(
442 instance->physical_devices + instance->physical_device_count,
443 instance, devices[i]);
444 if (result == VK_SUCCESS)
445 ++instance->physical_device_count;
446 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
447 break;
448 }
449 }
450 drmFreeDevices(devices, max_devices);
451
452 return result;
453 }
454
455 VkResult
456 tu_EnumeratePhysicalDevices(VkInstance _instance,
457 uint32_t *pPhysicalDeviceCount,
458 VkPhysicalDevice *pPhysicalDevices)
459 {
460 TU_FROM_HANDLE(tu_instance, instance, _instance);
461 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
462
463 VkResult result;
464
465 if (instance->physical_device_count < 0) {
466 result = tu_enumerate_devices(instance);
467 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
468 return result;
469 }
470
471 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
472 vk_outarray_append(&out, p)
473 {
474 *p = tu_physical_device_to_handle(instance->physical_devices + i);
475 }
476 }
477
478 return vk_outarray_status(&out);
479 }
480
481 VkResult
482 tu_EnumeratePhysicalDeviceGroups(
483 VkInstance _instance,
484 uint32_t *pPhysicalDeviceGroupCount,
485 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
486 {
487 TU_FROM_HANDLE(tu_instance, instance, _instance);
488 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
489 pPhysicalDeviceGroupCount);
490 VkResult result;
491
492 if (instance->physical_device_count < 0) {
493 result = tu_enumerate_devices(instance);
494 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
495 return result;
496 }
497
498 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
499 vk_outarray_append(&out, p)
500 {
501 p->physicalDeviceCount = 1;
502 p->physicalDevices[0] =
503 tu_physical_device_to_handle(instance->physical_devices + i);
504 p->subsetAllocation = false;
505 }
506 }
507
508 return vk_outarray_status(&out);
509 }
510
511 void
512 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
513 VkPhysicalDeviceFeatures *pFeatures)
514 {
515 memset(pFeatures, 0, sizeof(*pFeatures));
516
517 *pFeatures = (VkPhysicalDeviceFeatures) {
518 .robustBufferAccess = false,
519 .fullDrawIndexUint32 = false,
520 .imageCubeArray = false,
521 .independentBlend = false,
522 .geometryShader = false,
523 .tessellationShader = false,
524 .sampleRateShading = false,
525 .dualSrcBlend = false,
526 .logicOp = false,
527 .multiDrawIndirect = false,
528 .drawIndirectFirstInstance = false,
529 .depthClamp = false,
530 .depthBiasClamp = false,
531 .fillModeNonSolid = false,
532 .depthBounds = false,
533 .wideLines = false,
534 .largePoints = false,
535 .alphaToOne = false,
536 .multiViewport = false,
537 .samplerAnisotropy = false,
538 .textureCompressionETC2 = false,
539 .textureCompressionASTC_LDR = false,
540 .textureCompressionBC = false,
541 .occlusionQueryPrecise = false,
542 .pipelineStatisticsQuery = false,
543 .vertexPipelineStoresAndAtomics = false,
544 .fragmentStoresAndAtomics = false,
545 .shaderTessellationAndGeometryPointSize = false,
546 .shaderImageGatherExtended = false,
547 .shaderStorageImageExtendedFormats = false,
548 .shaderStorageImageMultisample = false,
549 .shaderUniformBufferArrayDynamicIndexing = false,
550 .shaderSampledImageArrayDynamicIndexing = false,
551 .shaderStorageBufferArrayDynamicIndexing = false,
552 .shaderStorageImageArrayDynamicIndexing = false,
553 .shaderStorageImageReadWithoutFormat = false,
554 .shaderStorageImageWriteWithoutFormat = false,
555 .shaderClipDistance = false,
556 .shaderCullDistance = false,
557 .shaderFloat64 = false,
558 .shaderInt64 = false,
559 .shaderInt16 = false,
560 .sparseBinding = false,
561 .variableMultisampleRate = false,
562 .inheritedQueries = false,
563 };
564 }
565
566 void
567 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
568 VkPhysicalDeviceFeatures2KHR *pFeatures)
569 {
570 vk_foreach_struct(ext, pFeatures->pNext)
571 {
572 switch (ext->sType) {
573 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
574 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
575 features->variablePointersStorageBuffer = false;
576 features->variablePointers = false;
577 break;
578 }
579 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
580 VkPhysicalDeviceMultiviewFeaturesKHR *features =
581 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
582 features->multiview = false;
583 features->multiviewGeometryShader = false;
584 features->multiviewTessellationShader = false;
585 break;
586 }
587 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
588 VkPhysicalDeviceShaderDrawParameterFeatures *features =
589 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
590 features->shaderDrawParameters = false;
591 break;
592 }
593 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
594 VkPhysicalDeviceProtectedMemoryFeatures *features =
595 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
596 features->protectedMemory = false;
597 break;
598 }
599 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
600 VkPhysicalDevice16BitStorageFeatures *features =
601 (VkPhysicalDevice16BitStorageFeatures *) ext;
602 features->storageBuffer16BitAccess = false;
603 features->uniformAndStorageBuffer16BitAccess = false;
604 features->storagePushConstant16 = false;
605 features->storageInputOutput16 = false;
606 break;
607 }
608 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
609 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
610 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
611 features->samplerYcbcrConversion = false;
612 break;
613 }
614 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
615 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
616 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
617 features->shaderInputAttachmentArrayDynamicIndexing = false;
618 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
619 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
620 features->shaderUniformBufferArrayNonUniformIndexing = false;
621 features->shaderSampledImageArrayNonUniformIndexing = false;
622 features->shaderStorageBufferArrayNonUniformIndexing = false;
623 features->shaderStorageImageArrayNonUniformIndexing = false;
624 features->shaderInputAttachmentArrayNonUniformIndexing = false;
625 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
626 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
627 features->descriptorBindingUniformBufferUpdateAfterBind = false;
628 features->descriptorBindingSampledImageUpdateAfterBind = false;
629 features->descriptorBindingStorageImageUpdateAfterBind = false;
630 features->descriptorBindingStorageBufferUpdateAfterBind = false;
631 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
632 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
633 features->descriptorBindingUpdateUnusedWhilePending = false;
634 features->descriptorBindingPartiallyBound = false;
635 features->descriptorBindingVariableDescriptorCount = false;
636 features->runtimeDescriptorArray = false;
637 break;
638 }
639 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
640 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
641 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
642 features->conditionalRendering = false;
643 features->inheritedConditionalRendering = false;
644 break;
645 }
646 default:
647 break;
648 }
649 }
650 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
651 }
652
653 void
654 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
655 VkPhysicalDeviceProperties *pProperties)
656 {
657 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
658 VkSampleCountFlags sample_counts = 0xf;
659
660 /* make sure that the entire descriptor set is addressable with a signed
661 * 32-bit int. So the sum of all limits scaled by descriptor size has to
662 * be at most 2 GiB. the combined image & samples object count as one of
663 * both. This limit is for the pipeline layout, not for the set layout, but
664 * there is no set limit, so we just set a pipeline limit. I don't think
665 * any app is going to hit this soon. */
666 size_t max_descriptor_set_size =
667 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
668 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
669 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
670 32 /* sampler, largest when combined with image */ +
671 64 /* sampled image */ + 64 /* storage image */);
672
673 VkPhysicalDeviceLimits limits = {
674 .maxImageDimension1D = (1 << 14),
675 .maxImageDimension2D = (1 << 14),
676 .maxImageDimension3D = (1 << 11),
677 .maxImageDimensionCube = (1 << 14),
678 .maxImageArrayLayers = (1 << 11),
679 .maxTexelBufferElements = 128 * 1024 * 1024,
680 .maxUniformBufferRange = UINT32_MAX,
681 .maxStorageBufferRange = UINT32_MAX,
682 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
683 .maxMemoryAllocationCount = UINT32_MAX,
684 .maxSamplerAllocationCount = 64 * 1024,
685 .bufferImageGranularity = 64, /* A cache line */
686 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
687 .maxBoundDescriptorSets = MAX_SETS,
688 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
689 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
690 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
691 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
692 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
693 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
694 .maxPerStageResources = max_descriptor_set_size,
695 .maxDescriptorSetSamplers = max_descriptor_set_size,
696 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
697 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
698 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
699 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
700 .maxDescriptorSetSampledImages = max_descriptor_set_size,
701 .maxDescriptorSetStorageImages = max_descriptor_set_size,
702 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
703 .maxVertexInputAttributes = 32,
704 .maxVertexInputBindings = 32,
705 .maxVertexInputAttributeOffset = 2047,
706 .maxVertexInputBindingStride = 2048,
707 .maxVertexOutputComponents = 128,
708 .maxTessellationGenerationLevel = 64,
709 .maxTessellationPatchSize = 32,
710 .maxTessellationControlPerVertexInputComponents = 128,
711 .maxTessellationControlPerVertexOutputComponents = 128,
712 .maxTessellationControlPerPatchOutputComponents = 120,
713 .maxTessellationControlTotalOutputComponents = 4096,
714 .maxTessellationEvaluationInputComponents = 128,
715 .maxTessellationEvaluationOutputComponents = 128,
716 .maxGeometryShaderInvocations = 127,
717 .maxGeometryInputComponents = 64,
718 .maxGeometryOutputComponents = 128,
719 .maxGeometryOutputVertices = 256,
720 .maxGeometryTotalOutputComponents = 1024,
721 .maxFragmentInputComponents = 128,
722 .maxFragmentOutputAttachments = 8,
723 .maxFragmentDualSrcAttachments = 1,
724 .maxFragmentCombinedOutputResources = 8,
725 .maxComputeSharedMemorySize = 32768,
726 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
727 .maxComputeWorkGroupInvocations = 2048,
728 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
729 .subPixelPrecisionBits = 4 /* FIXME */,
730 .subTexelPrecisionBits = 4 /* FIXME */,
731 .mipmapPrecisionBits = 4 /* FIXME */,
732 .maxDrawIndexedIndexValue = UINT32_MAX,
733 .maxDrawIndirectCount = UINT32_MAX,
734 .maxSamplerLodBias = 16,
735 .maxSamplerAnisotropy = 16,
736 .maxViewports = MAX_VIEWPORTS,
737 .maxViewportDimensions = { (1 << 14), (1 << 14) },
738 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
739 .viewportSubPixelBits = 8,
740 .minMemoryMapAlignment = 4096, /* A page */
741 .minTexelBufferOffsetAlignment = 1,
742 .minUniformBufferOffsetAlignment = 4,
743 .minStorageBufferOffsetAlignment = 4,
744 .minTexelOffset = -32,
745 .maxTexelOffset = 31,
746 .minTexelGatherOffset = -32,
747 .maxTexelGatherOffset = 31,
748 .minInterpolationOffset = -2,
749 .maxInterpolationOffset = 2,
750 .subPixelInterpolationOffsetBits = 8,
751 .maxFramebufferWidth = (1 << 14),
752 .maxFramebufferHeight = (1 << 14),
753 .maxFramebufferLayers = (1 << 10),
754 .framebufferColorSampleCounts = sample_counts,
755 .framebufferDepthSampleCounts = sample_counts,
756 .framebufferStencilSampleCounts = sample_counts,
757 .framebufferNoAttachmentsSampleCounts = sample_counts,
758 .maxColorAttachments = MAX_RTS,
759 .sampledImageColorSampleCounts = sample_counts,
760 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
761 .sampledImageDepthSampleCounts = sample_counts,
762 .sampledImageStencilSampleCounts = sample_counts,
763 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
764 .maxSampleMaskWords = 1,
765 .timestampComputeAndGraphics = true,
766 .timestampPeriod = 1,
767 .maxClipDistances = 8,
768 .maxCullDistances = 8,
769 .maxCombinedClipAndCullDistances = 8,
770 .discreteQueuePriorities = 1,
771 .pointSizeRange = { 0.125, 255.875 },
772 .lineWidthRange = { 0.0, 7.9921875 },
773 .pointSizeGranularity = (1.0 / 8.0),
774 .lineWidthGranularity = (1.0 / 128.0),
775 .strictLines = false, /* FINISHME */
776 .standardSampleLocations = true,
777 .optimalBufferCopyOffsetAlignment = 128,
778 .optimalBufferCopyRowPitchAlignment = 128,
779 .nonCoherentAtomSize = 64,
780 };
781
782 *pProperties = (VkPhysicalDeviceProperties) {
783 .apiVersion = tu_physical_device_api_version(pdevice),
784 .driverVersion = vk_get_driver_version(),
785 .vendorID = 0, /* TODO */
786 .deviceID = 0,
787 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
788 .limits = limits,
789 .sparseProperties = { 0 },
790 };
791
792 strcpy(pProperties->deviceName, pdevice->name);
793 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
794 }
795
796 void
797 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
798 VkPhysicalDeviceProperties2KHR *pProperties)
799 {
800 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
801 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
802
803 vk_foreach_struct(ext, pProperties->pNext)
804 {
805 switch (ext->sType) {
806 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
807 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
808 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
809 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
810 break;
811 }
812 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
813 VkPhysicalDeviceIDPropertiesKHR *properties =
814 (VkPhysicalDeviceIDPropertiesKHR *) ext;
815 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
816 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
817 properties->deviceLUIDValid = false;
818 break;
819 }
820 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
821 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
822 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
823 properties->maxMultiviewViewCount = MAX_VIEWS;
824 properties->maxMultiviewInstanceIndex = INT_MAX;
825 break;
826 }
827 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
828 VkPhysicalDevicePointClippingPropertiesKHR *properties =
829 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
830 properties->pointClippingBehavior =
831 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
832 break;
833 }
834 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
835 VkPhysicalDeviceMaintenance3Properties *properties =
836 (VkPhysicalDeviceMaintenance3Properties *) ext;
837 /* Make sure everything is addressable by a signed 32-bit int, and
838 * our largest descriptors are 96 bytes. */
839 properties->maxPerSetDescriptors = (1ull << 31) / 96;
840 /* Our buffer size fields allow only this much */
841 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
842 break;
843 }
844 default:
845 break;
846 }
847 }
848 }
849
850 static const VkQueueFamilyProperties tu_queue_family_properties = {
851 .queueFlags =
852 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
853 .queueCount = 1,
854 .timestampValidBits = 64,
855 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
856 };
857
858 void
859 tu_GetPhysicalDeviceQueueFamilyProperties(
860 VkPhysicalDevice physicalDevice,
861 uint32_t *pQueueFamilyPropertyCount,
862 VkQueueFamilyProperties *pQueueFamilyProperties)
863 {
864 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
865
866 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
867 }
868
869 void
870 tu_GetPhysicalDeviceQueueFamilyProperties2(
871 VkPhysicalDevice physicalDevice,
872 uint32_t *pQueueFamilyPropertyCount,
873 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
874 {
875 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
876
877 vk_outarray_append(&out, p)
878 {
879 p->queueFamilyProperties = tu_queue_family_properties;
880 }
881 }
882
883 static uint64_t
884 tu_get_system_heap_size()
885 {
886 struct sysinfo info;
887 sysinfo(&info);
888
889 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
890
891 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
892 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
893 */
894 uint64_t available_ram;
895 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
896 available_ram = total_ram / 2;
897 else
898 available_ram = total_ram * 3 / 4;
899
900 return available_ram;
901 }
902
903 void
904 tu_GetPhysicalDeviceMemoryProperties(
905 VkPhysicalDevice physicalDevice,
906 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
907 {
908 pMemoryProperties->memoryHeapCount = 1;
909 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
910 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
911
912 pMemoryProperties->memoryTypeCount = 1;
913 pMemoryProperties->memoryTypes[0].propertyFlags =
914 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
915 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
916 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
917 pMemoryProperties->memoryTypes[0].heapIndex = 0;
918 }
919
920 void
921 tu_GetPhysicalDeviceMemoryProperties2(
922 VkPhysicalDevice physicalDevice,
923 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
924 {
925 return tu_GetPhysicalDeviceMemoryProperties(
926 physicalDevice, &pMemoryProperties->memoryProperties);
927 }
928
929 static VkResult
930 tu_queue_init(struct tu_device *device,
931 struct tu_queue *queue,
932 uint32_t queue_family_index,
933 int idx,
934 VkDeviceQueueCreateFlags flags)
935 {
936 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
937 queue->device = device;
938 queue->queue_family_index = queue_family_index;
939 queue->queue_idx = idx;
940 queue->flags = flags;
941
942 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
943 if (ret)
944 return VK_ERROR_INITIALIZATION_FAILED;
945
946 queue->submit_fence_fd = -1;
947
948 return VK_SUCCESS;
949 }
950
951 static void
952 tu_queue_finish(struct tu_queue *queue)
953 {
954 if (queue->submit_fence_fd >= 0) {
955 close(queue->submit_fence_fd);
956 }
957 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
958 }
959
960 static int
961 tu_get_device_extension_index(const char *name)
962 {
963 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
964 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
965 return i;
966 }
967 return -1;
968 }
969
970 VkResult
971 tu_CreateDevice(VkPhysicalDevice physicalDevice,
972 const VkDeviceCreateInfo *pCreateInfo,
973 const VkAllocationCallbacks *pAllocator,
974 VkDevice *pDevice)
975 {
976 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
977 VkResult result;
978 struct tu_device *device;
979
980 /* Check enabled features */
981 if (pCreateInfo->pEnabledFeatures) {
982 VkPhysicalDeviceFeatures supported_features;
983 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
984 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
985 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
986 unsigned num_features =
987 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
988 for (uint32_t i = 0; i < num_features; i++) {
989 if (enabled_feature[i] && !supported_feature[i])
990 return vk_error(physical_device->instance,
991 VK_ERROR_FEATURE_NOT_PRESENT);
992 }
993 }
994
995 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
996 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
997 if (!device)
998 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
999
1000 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1001 device->instance = physical_device->instance;
1002 device->physical_device = physical_device;
1003
1004 if (pAllocator)
1005 device->alloc = *pAllocator;
1006 else
1007 device->alloc = physical_device->instance->alloc;
1008
1009 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1010 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1011 int index = tu_get_device_extension_index(ext_name);
1012 if (index < 0 ||
1013 !physical_device->supported_extensions.extensions[index]) {
1014 vk_free(&device->alloc, device);
1015 return vk_error(physical_device->instance,
1016 VK_ERROR_EXTENSION_NOT_PRESENT);
1017 }
1018
1019 device->enabled_extensions.extensions[index] = true;
1020 }
1021
1022 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1023 const VkDeviceQueueCreateInfo *queue_create =
1024 &pCreateInfo->pQueueCreateInfos[i];
1025 uint32_t qfi = queue_create->queueFamilyIndex;
1026 device->queues[qfi] = vk_alloc(
1027 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1028 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1029 if (!device->queues[qfi]) {
1030 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1031 goto fail;
1032 }
1033
1034 memset(device->queues[qfi], 0,
1035 queue_create->queueCount * sizeof(struct tu_queue));
1036
1037 device->queue_count[qfi] = queue_create->queueCount;
1038
1039 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1040 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1041 queue_create->flags);
1042 if (result != VK_SUCCESS)
1043 goto fail;
1044 }
1045 }
1046
1047 VkPipelineCacheCreateInfo ci;
1048 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1049 ci.pNext = NULL;
1050 ci.flags = 0;
1051 ci.pInitialData = NULL;
1052 ci.initialDataSize = 0;
1053 VkPipelineCache pc;
1054 result =
1055 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1056 if (result != VK_SUCCESS)
1057 goto fail;
1058
1059 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1060
1061 *pDevice = tu_device_to_handle(device);
1062 return VK_SUCCESS;
1063
1064 fail:
1065 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1066 for (unsigned q = 0; q < device->queue_count[i]; q++)
1067 tu_queue_finish(&device->queues[i][q]);
1068 if (device->queue_count[i])
1069 vk_free(&device->alloc, device->queues[i]);
1070 }
1071
1072 vk_free(&device->alloc, device);
1073 return result;
1074 }
1075
1076 void
1077 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1078 {
1079 TU_FROM_HANDLE(tu_device, device, _device);
1080
1081 if (!device)
1082 return;
1083
1084 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1085 for (unsigned q = 0; q < device->queue_count[i]; q++)
1086 tu_queue_finish(&device->queues[i][q]);
1087 if (device->queue_count[i])
1088 vk_free(&device->alloc, device->queues[i]);
1089 }
1090
1091 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1092 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1093
1094 vk_free(&device->alloc, device);
1095 }
1096
1097 VkResult
1098 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1099 VkLayerProperties *pProperties)
1100 {
1101 *pPropertyCount = 0;
1102 return VK_SUCCESS;
1103 }
1104
1105 VkResult
1106 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1107 uint32_t *pPropertyCount,
1108 VkLayerProperties *pProperties)
1109 {
1110 *pPropertyCount = 0;
1111 return VK_SUCCESS;
1112 }
1113
1114 void
1115 tu_GetDeviceQueue2(VkDevice _device,
1116 const VkDeviceQueueInfo2 *pQueueInfo,
1117 VkQueue *pQueue)
1118 {
1119 TU_FROM_HANDLE(tu_device, device, _device);
1120 struct tu_queue *queue;
1121
1122 queue =
1123 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1124 if (pQueueInfo->flags != queue->flags) {
1125 /* From the Vulkan 1.1.70 spec:
1126 *
1127 * "The queue returned by vkGetDeviceQueue2 must have the same
1128 * flags value from this structure as that used at device
1129 * creation time in a VkDeviceQueueCreateInfo instance. If no
1130 * matching flags were specified at device creation time then
1131 * pQueue will return VK_NULL_HANDLE."
1132 */
1133 *pQueue = VK_NULL_HANDLE;
1134 return;
1135 }
1136
1137 *pQueue = tu_queue_to_handle(queue);
1138 }
1139
1140 void
1141 tu_GetDeviceQueue(VkDevice _device,
1142 uint32_t queueFamilyIndex,
1143 uint32_t queueIndex,
1144 VkQueue *pQueue)
1145 {
1146 const VkDeviceQueueInfo2 info =
1147 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1148 .queueFamilyIndex = queueFamilyIndex,
1149 .queueIndex = queueIndex };
1150
1151 tu_GetDeviceQueue2(_device, &info, pQueue);
1152 }
1153
1154 VkResult
1155 tu_QueueSubmit(VkQueue _queue,
1156 uint32_t submitCount,
1157 const VkSubmitInfo *pSubmits,
1158 VkFence _fence)
1159 {
1160 TU_FROM_HANDLE(tu_queue, queue, _queue);
1161
1162 for (uint32_t i = 0; i < submitCount; ++i) {
1163 const VkSubmitInfo *submit = pSubmits + i;
1164 const bool last_submit = (i == submitCount - 1);
1165 struct tu_bo_list bo_list;
1166 tu_bo_list_init(&bo_list);
1167
1168 uint32_t entry_count = 0;
1169 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1170 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1171 entry_count += cmdbuf->cs.entry_count;
1172 }
1173
1174 struct drm_msm_gem_submit_cmd cmds[entry_count];
1175 uint32_t entry_idx = 0;
1176 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1177 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1178 struct tu_cs *cs = &cmdbuf->cs;
1179 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1180 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1181 cmds[entry_idx].submit_idx =
1182 tu_bo_list_add(&bo_list, cs->entries[i].bo);
1183 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1184 cmds[entry_idx].size = cs->entries[i].size;
1185 cmds[entry_idx].pad = 0;
1186 cmds[entry_idx].nr_relocs = 0;
1187 cmds[entry_idx].relocs = 0;
1188 }
1189 }
1190
1191 struct drm_msm_gem_submit_bo bos[bo_list.count];
1192 for (unsigned i = 0; i < bo_list.count; ++i) {
1193 bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
1194 bos[i].handle = bo_list.handles[i];
1195 bos[i].presumed = 0;
1196 }
1197
1198 uint32_t flags = MSM_PIPE_3D0;
1199 if (last_submit) {
1200 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1201 }
1202
1203 struct drm_msm_gem_submit req = {
1204 .flags = flags,
1205 .queueid = queue->msm_queue_id,
1206 .bos = (uint64_t)(uintptr_t)bos,
1207 .nr_bos = bo_list.count,
1208 .cmds = (uint64_t)(uintptr_t)cmds,
1209 .nr_cmds = entry_count,
1210 };
1211
1212 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1213 DRM_MSM_GEM_SUBMIT,
1214 &req, sizeof(req));
1215 if (ret) {
1216 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1217 abort();
1218 }
1219
1220 tu_bo_list_destroy(&bo_list);
1221
1222 if (last_submit) {
1223 /* no need to merge fences as queue execution is serialized */
1224 if (queue->submit_fence_fd >= 0) {
1225 close(queue->submit_fence_fd);
1226 }
1227 queue->submit_fence_fd = req.fence_fd;
1228 }
1229 }
1230 return VK_SUCCESS;
1231 }
1232
1233 VkResult
1234 tu_QueueWaitIdle(VkQueue _queue)
1235 {
1236 TU_FROM_HANDLE(tu_queue, queue, _queue);
1237
1238 if (queue->submit_fence_fd >= 0) {
1239 int ret = sync_wait(queue->submit_fence_fd, -1);
1240 if (ret)
1241 tu_loge("sync_wait on fence fd %d failed", queue->submit_fence_fd);
1242
1243 close(queue->submit_fence_fd);
1244 queue->submit_fence_fd = -1;
1245 }
1246
1247 return VK_SUCCESS;
1248 }
1249
1250 VkResult
1251 tu_DeviceWaitIdle(VkDevice _device)
1252 {
1253 TU_FROM_HANDLE(tu_device, device, _device);
1254
1255 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1256 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1257 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1258 }
1259 }
1260 return VK_SUCCESS;
1261 }
1262
1263 VkResult
1264 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1265 uint32_t *pPropertyCount,
1266 VkExtensionProperties *pProperties)
1267 {
1268 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1269
1270 /* We spport no lyaers */
1271 if (pLayerName)
1272 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1273
1274 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1275 if (tu_supported_instance_extensions.extensions[i]) {
1276 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1277 }
1278 }
1279
1280 return vk_outarray_status(&out);
1281 }
1282
1283 VkResult
1284 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1285 const char *pLayerName,
1286 uint32_t *pPropertyCount,
1287 VkExtensionProperties *pProperties)
1288 {
1289 /* We spport no lyaers */
1290 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1291 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1292
1293 /* We spport no lyaers */
1294 if (pLayerName)
1295 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1296
1297 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1298 if (device->supported_extensions.extensions[i]) {
1299 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1300 }
1301 }
1302
1303 return vk_outarray_status(&out);
1304 }
1305
1306 PFN_vkVoidFunction
1307 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1308 {
1309 TU_FROM_HANDLE(tu_instance, instance, _instance);
1310
1311 return tu_lookup_entrypoint_checked(
1312 pName, instance ? instance->api_version : 0,
1313 instance ? &instance->enabled_extensions : NULL, NULL);
1314 }
1315
1316 /* The loader wants us to expose a second GetInstanceProcAddr function
1317 * to work around certain LD_PRELOAD issues seen in apps.
1318 */
1319 PUBLIC
1320 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1321 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1322
1323 PUBLIC
1324 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1325 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1326 {
1327 return tu_GetInstanceProcAddr(instance, pName);
1328 }
1329
1330 PFN_vkVoidFunction
1331 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1332 {
1333 TU_FROM_HANDLE(tu_device, device, _device);
1334
1335 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1336 &device->instance->enabled_extensions,
1337 &device->enabled_extensions);
1338 }
1339
1340 static VkResult
1341 tu_alloc_memory(struct tu_device *device,
1342 const VkMemoryAllocateInfo *pAllocateInfo,
1343 const VkAllocationCallbacks *pAllocator,
1344 VkDeviceMemory *pMem)
1345 {
1346 struct tu_device_memory *mem;
1347 VkResult result;
1348
1349 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1350
1351 if (pAllocateInfo->allocationSize == 0) {
1352 /* Apparently, this is allowed */
1353 *pMem = VK_NULL_HANDLE;
1354 return VK_SUCCESS;
1355 }
1356
1357 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1358 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1359 if (mem == NULL)
1360 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1361
1362 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1363 if (result != VK_SUCCESS) {
1364 vk_free2(&device->alloc, pAllocator, mem);
1365 return result;
1366 }
1367
1368 mem->size = pAllocateInfo->allocationSize;
1369 mem->type_index = pAllocateInfo->memoryTypeIndex;
1370
1371 mem->map = NULL;
1372 mem->user_ptr = NULL;
1373
1374 *pMem = tu_device_memory_to_handle(mem);
1375
1376 return VK_SUCCESS;
1377 }
1378
1379 VkResult
1380 tu_AllocateMemory(VkDevice _device,
1381 const VkMemoryAllocateInfo *pAllocateInfo,
1382 const VkAllocationCallbacks *pAllocator,
1383 VkDeviceMemory *pMem)
1384 {
1385 TU_FROM_HANDLE(tu_device, device, _device);
1386 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1387 }
1388
1389 void
1390 tu_FreeMemory(VkDevice _device,
1391 VkDeviceMemory _mem,
1392 const VkAllocationCallbacks *pAllocator)
1393 {
1394 TU_FROM_HANDLE(tu_device, device, _device);
1395 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1396
1397 if (mem == NULL)
1398 return;
1399
1400 tu_bo_finish(device, &mem->bo);
1401 vk_free2(&device->alloc, pAllocator, mem);
1402 }
1403
1404 VkResult
1405 tu_MapMemory(VkDevice _device,
1406 VkDeviceMemory _memory,
1407 VkDeviceSize offset,
1408 VkDeviceSize size,
1409 VkMemoryMapFlags flags,
1410 void **ppData)
1411 {
1412 TU_FROM_HANDLE(tu_device, device, _device);
1413 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1414 VkResult result;
1415
1416 if (mem == NULL) {
1417 *ppData = NULL;
1418 return VK_SUCCESS;
1419 }
1420
1421 if (mem->user_ptr) {
1422 *ppData = mem->user_ptr;
1423 } else if (!mem->map) {
1424 result = tu_bo_map(device, &mem->bo);
1425 if (result != VK_SUCCESS)
1426 return result;
1427 *ppData = mem->map = mem->bo.map;
1428 } else
1429 *ppData = mem->map;
1430
1431 if (*ppData) {
1432 *ppData += offset;
1433 return VK_SUCCESS;
1434 }
1435
1436 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1437 }
1438
1439 void
1440 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1441 {
1442 /* I do not see any unmapping done by the freedreno Gallium driver. */
1443 }
1444
1445 VkResult
1446 tu_FlushMappedMemoryRanges(VkDevice _device,
1447 uint32_t memoryRangeCount,
1448 const VkMappedMemoryRange *pMemoryRanges)
1449 {
1450 return VK_SUCCESS;
1451 }
1452
1453 VkResult
1454 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1455 uint32_t memoryRangeCount,
1456 const VkMappedMemoryRange *pMemoryRanges)
1457 {
1458 return VK_SUCCESS;
1459 }
1460
1461 void
1462 tu_GetBufferMemoryRequirements(VkDevice _device,
1463 VkBuffer _buffer,
1464 VkMemoryRequirements *pMemoryRequirements)
1465 {
1466 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1467
1468 pMemoryRequirements->memoryTypeBits = 1;
1469 pMemoryRequirements->alignment = 16;
1470 pMemoryRequirements->size =
1471 align64(buffer->size, pMemoryRequirements->alignment);
1472 }
1473
1474 void
1475 tu_GetBufferMemoryRequirements2(
1476 VkDevice device,
1477 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1478 VkMemoryRequirements2KHR *pMemoryRequirements)
1479 {
1480 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1481 &pMemoryRequirements->memoryRequirements);
1482 }
1483
1484 void
1485 tu_GetImageMemoryRequirements(VkDevice _device,
1486 VkImage _image,
1487 VkMemoryRequirements *pMemoryRequirements)
1488 {
1489 TU_FROM_HANDLE(tu_image, image, _image);
1490
1491 pMemoryRequirements->memoryTypeBits = 1;
1492 pMemoryRequirements->size = image->size;
1493 pMemoryRequirements->alignment = image->alignment;
1494 }
1495
1496 void
1497 tu_GetImageMemoryRequirements2(VkDevice device,
1498 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1499 VkMemoryRequirements2KHR *pMemoryRequirements)
1500 {
1501 tu_GetImageMemoryRequirements(device, pInfo->image,
1502 &pMemoryRequirements->memoryRequirements);
1503 }
1504
1505 void
1506 tu_GetImageSparseMemoryRequirements(
1507 VkDevice device,
1508 VkImage image,
1509 uint32_t *pSparseMemoryRequirementCount,
1510 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1511 {
1512 tu_stub();
1513 }
1514
1515 void
1516 tu_GetImageSparseMemoryRequirements2(
1517 VkDevice device,
1518 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1519 uint32_t *pSparseMemoryRequirementCount,
1520 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1521 {
1522 tu_stub();
1523 }
1524
1525 void
1526 tu_GetDeviceMemoryCommitment(VkDevice device,
1527 VkDeviceMemory memory,
1528 VkDeviceSize *pCommittedMemoryInBytes)
1529 {
1530 *pCommittedMemoryInBytes = 0;
1531 }
1532
1533 VkResult
1534 tu_BindBufferMemory2(VkDevice device,
1535 uint32_t bindInfoCount,
1536 const VkBindBufferMemoryInfoKHR *pBindInfos)
1537 {
1538 return VK_SUCCESS;
1539 }
1540
1541 VkResult
1542 tu_BindBufferMemory(VkDevice device,
1543 VkBuffer buffer,
1544 VkDeviceMemory memory,
1545 VkDeviceSize memoryOffset)
1546 {
1547 const VkBindBufferMemoryInfoKHR info = {
1548 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1549 .buffer = buffer,
1550 .memory = memory,
1551 .memoryOffset = memoryOffset
1552 };
1553
1554 return tu_BindBufferMemory2(device, 1, &info);
1555 }
1556
1557 VkResult
1558 tu_BindImageMemory2(VkDevice device,
1559 uint32_t bindInfoCount,
1560 const VkBindImageMemoryInfoKHR *pBindInfos)
1561 {
1562 return VK_SUCCESS;
1563 }
1564
1565 VkResult
1566 tu_BindImageMemory(VkDevice device,
1567 VkImage image,
1568 VkDeviceMemory memory,
1569 VkDeviceSize memoryOffset)
1570 {
1571 const VkBindImageMemoryInfoKHR info = {
1572 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1573 .image = image,
1574 .memory = memory,
1575 .memoryOffset = memoryOffset
1576 };
1577
1578 return tu_BindImageMemory2(device, 1, &info);
1579 }
1580
1581 VkResult
1582 tu_QueueBindSparse(VkQueue _queue,
1583 uint32_t bindInfoCount,
1584 const VkBindSparseInfo *pBindInfo,
1585 VkFence _fence)
1586 {
1587 return VK_SUCCESS;
1588 }
1589
1590 VkResult
1591 tu_CreateFence(VkDevice _device,
1592 const VkFenceCreateInfo *pCreateInfo,
1593 const VkAllocationCallbacks *pAllocator,
1594 VkFence *pFence)
1595 {
1596 TU_FROM_HANDLE(tu_device, device, _device);
1597
1598 struct tu_fence *fence =
1599 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1600 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1601
1602 if (!fence)
1603 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1604
1605 *pFence = tu_fence_to_handle(fence);
1606
1607 return VK_SUCCESS;
1608 }
1609
1610 void
1611 tu_DestroyFence(VkDevice _device,
1612 VkFence _fence,
1613 const VkAllocationCallbacks *pAllocator)
1614 {
1615 TU_FROM_HANDLE(tu_device, device, _device);
1616 TU_FROM_HANDLE(tu_fence, fence, _fence);
1617
1618 if (!fence)
1619 return;
1620
1621 vk_free2(&device->alloc, pAllocator, fence);
1622 }
1623
1624 VkResult
1625 tu_WaitForFences(VkDevice _device,
1626 uint32_t fenceCount,
1627 const VkFence *pFences,
1628 VkBool32 waitAll,
1629 uint64_t timeout)
1630 {
1631 return VK_SUCCESS;
1632 }
1633
1634 VkResult
1635 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1636 {
1637 return VK_SUCCESS;
1638 }
1639
1640 VkResult
1641 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1642 {
1643 return VK_SUCCESS;
1644 }
1645
1646 // Queue semaphore functions
1647
1648 VkResult
1649 tu_CreateSemaphore(VkDevice _device,
1650 const VkSemaphoreCreateInfo *pCreateInfo,
1651 const VkAllocationCallbacks *pAllocator,
1652 VkSemaphore *pSemaphore)
1653 {
1654 TU_FROM_HANDLE(tu_device, device, _device);
1655
1656 struct tu_semaphore *sem =
1657 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1658 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1659 if (!sem)
1660 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1661
1662 *pSemaphore = tu_semaphore_to_handle(sem);
1663 return VK_SUCCESS;
1664 }
1665
1666 void
1667 tu_DestroySemaphore(VkDevice _device,
1668 VkSemaphore _semaphore,
1669 const VkAllocationCallbacks *pAllocator)
1670 {
1671 TU_FROM_HANDLE(tu_device, device, _device);
1672 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1673 if (!_semaphore)
1674 return;
1675
1676 vk_free2(&device->alloc, pAllocator, sem);
1677 }
1678
1679 VkResult
1680 tu_CreateEvent(VkDevice _device,
1681 const VkEventCreateInfo *pCreateInfo,
1682 const VkAllocationCallbacks *pAllocator,
1683 VkEvent *pEvent)
1684 {
1685 TU_FROM_HANDLE(tu_device, device, _device);
1686 struct tu_event *event =
1687 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1688 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1689
1690 if (!event)
1691 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1692
1693 *pEvent = tu_event_to_handle(event);
1694
1695 return VK_SUCCESS;
1696 }
1697
1698 void
1699 tu_DestroyEvent(VkDevice _device,
1700 VkEvent _event,
1701 const VkAllocationCallbacks *pAllocator)
1702 {
1703 TU_FROM_HANDLE(tu_device, device, _device);
1704 TU_FROM_HANDLE(tu_event, event, _event);
1705
1706 if (!event)
1707 return;
1708 vk_free2(&device->alloc, pAllocator, event);
1709 }
1710
1711 VkResult
1712 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1713 {
1714 TU_FROM_HANDLE(tu_event, event, _event);
1715
1716 if (*event->map == 1)
1717 return VK_EVENT_SET;
1718 return VK_EVENT_RESET;
1719 }
1720
1721 VkResult
1722 tu_SetEvent(VkDevice _device, VkEvent _event)
1723 {
1724 TU_FROM_HANDLE(tu_event, event, _event);
1725 *event->map = 1;
1726
1727 return VK_SUCCESS;
1728 }
1729
1730 VkResult
1731 tu_ResetEvent(VkDevice _device, VkEvent _event)
1732 {
1733 TU_FROM_HANDLE(tu_event, event, _event);
1734 *event->map = 0;
1735
1736 return VK_SUCCESS;
1737 }
1738
1739 VkResult
1740 tu_CreateBuffer(VkDevice _device,
1741 const VkBufferCreateInfo *pCreateInfo,
1742 const VkAllocationCallbacks *pAllocator,
1743 VkBuffer *pBuffer)
1744 {
1745 TU_FROM_HANDLE(tu_device, device, _device);
1746 struct tu_buffer *buffer;
1747
1748 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1749
1750 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1751 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1752 if (buffer == NULL)
1753 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1754
1755 buffer->size = pCreateInfo->size;
1756 buffer->usage = pCreateInfo->usage;
1757 buffer->flags = pCreateInfo->flags;
1758
1759 *pBuffer = tu_buffer_to_handle(buffer);
1760
1761 return VK_SUCCESS;
1762 }
1763
1764 void
1765 tu_DestroyBuffer(VkDevice _device,
1766 VkBuffer _buffer,
1767 const VkAllocationCallbacks *pAllocator)
1768 {
1769 TU_FROM_HANDLE(tu_device, device, _device);
1770 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1771
1772 if (!buffer)
1773 return;
1774
1775 vk_free2(&device->alloc, pAllocator, buffer);
1776 }
1777
1778 static uint32_t
1779 tu_surface_max_layer_count(struct tu_image_view *iview)
1780 {
1781 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1782 ? iview->extent.depth
1783 : (iview->base_layer + iview->layer_count);
1784 }
1785
1786 VkResult
1787 tu_CreateFramebuffer(VkDevice _device,
1788 const VkFramebufferCreateInfo *pCreateInfo,
1789 const VkAllocationCallbacks *pAllocator,
1790 VkFramebuffer *pFramebuffer)
1791 {
1792 TU_FROM_HANDLE(tu_device, device, _device);
1793 struct tu_framebuffer *framebuffer;
1794
1795 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1796
1797 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1798 pCreateInfo->attachmentCount;
1799 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1800 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1801 if (framebuffer == NULL)
1802 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1803
1804 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1805 framebuffer->width = pCreateInfo->width;
1806 framebuffer->height = pCreateInfo->height;
1807 framebuffer->layers = pCreateInfo->layers;
1808 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1809 VkImageView _iview = pCreateInfo->pAttachments[i];
1810 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1811 framebuffer->attachments[i].attachment = iview;
1812
1813 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1814 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1815 framebuffer->layers =
1816 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1817 }
1818
1819 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1820 return VK_SUCCESS;
1821 }
1822
1823 void
1824 tu_DestroyFramebuffer(VkDevice _device,
1825 VkFramebuffer _fb,
1826 const VkAllocationCallbacks *pAllocator)
1827 {
1828 TU_FROM_HANDLE(tu_device, device, _device);
1829 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1830
1831 if (!fb)
1832 return;
1833 vk_free2(&device->alloc, pAllocator, fb);
1834 }
1835
1836 static void
1837 tu_init_sampler(struct tu_device *device,
1838 struct tu_sampler *sampler,
1839 const VkSamplerCreateInfo *pCreateInfo)
1840 {
1841 }
1842
1843 VkResult
1844 tu_CreateSampler(VkDevice _device,
1845 const VkSamplerCreateInfo *pCreateInfo,
1846 const VkAllocationCallbacks *pAllocator,
1847 VkSampler *pSampler)
1848 {
1849 TU_FROM_HANDLE(tu_device, device, _device);
1850 struct tu_sampler *sampler;
1851
1852 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1853
1854 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1855 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1856 if (!sampler)
1857 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1858
1859 tu_init_sampler(device, sampler, pCreateInfo);
1860 *pSampler = tu_sampler_to_handle(sampler);
1861
1862 return VK_SUCCESS;
1863 }
1864
1865 void
1866 tu_DestroySampler(VkDevice _device,
1867 VkSampler _sampler,
1868 const VkAllocationCallbacks *pAllocator)
1869 {
1870 TU_FROM_HANDLE(tu_device, device, _device);
1871 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1872
1873 if (!sampler)
1874 return;
1875 vk_free2(&device->alloc, pAllocator, sampler);
1876 }
1877
1878 /* vk_icd.h does not declare this function, so we declare it here to
1879 * suppress Wmissing-prototypes.
1880 */
1881 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1882 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1883
1884 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1885 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1886 {
1887 /* For the full details on loader interface versioning, see
1888 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1889 * What follows is a condensed summary, to help you navigate the large and
1890 * confusing official doc.
1891 *
1892 * - Loader interface v0 is incompatible with later versions. We don't
1893 * support it.
1894 *
1895 * - In loader interface v1:
1896 * - The first ICD entrypoint called by the loader is
1897 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1898 * entrypoint.
1899 * - The ICD must statically expose no other Vulkan symbol unless it
1900 * is linked with -Bsymbolic.
1901 * - Each dispatchable Vulkan handle created by the ICD must be
1902 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1903 * ICD must initialize VK_LOADER_DATA.loadMagic to
1904 * ICD_LOADER_MAGIC.
1905 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1906 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1907 * such loader-managed surfaces.
1908 *
1909 * - Loader interface v2 differs from v1 in:
1910 * - The first ICD entrypoint called by the loader is
1911 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1912 * statically expose this entrypoint.
1913 *
1914 * - Loader interface v3 differs from v2 in:
1915 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1916 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1917 * because the loader no longer does so.
1918 */
1919 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1920 return VK_SUCCESS;
1921 }
1922
1923 void
1924 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1925 VkPhysicalDevice physicalDevice,
1926 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1927 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1928 {
1929 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1930 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1931 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1932 }
1933
1934 void
1935 tu_GetPhysicalDeviceExternalFenceProperties(
1936 VkPhysicalDevice physicalDevice,
1937 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1938 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1939 {
1940 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1941 pExternalFenceProperties->compatibleHandleTypes = 0;
1942 pExternalFenceProperties->externalFenceFeatures = 0;
1943 }
1944
1945 VkResult
1946 tu_CreateDebugReportCallbackEXT(
1947 VkInstance _instance,
1948 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1949 const VkAllocationCallbacks *pAllocator,
1950 VkDebugReportCallbackEXT *pCallback)
1951 {
1952 TU_FROM_HANDLE(tu_instance, instance, _instance);
1953 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1954 pCreateInfo, pAllocator,
1955 &instance->alloc, pCallback);
1956 }
1957
1958 void
1959 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1960 VkDebugReportCallbackEXT _callback,
1961 const VkAllocationCallbacks *pAllocator)
1962 {
1963 TU_FROM_HANDLE(tu_instance, instance, _instance);
1964 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1965 _callback, pAllocator, &instance->alloc);
1966 }
1967
1968 void
1969 tu_DebugReportMessageEXT(VkInstance _instance,
1970 VkDebugReportFlagsEXT flags,
1971 VkDebugReportObjectTypeEXT objectType,
1972 uint64_t object,
1973 size_t location,
1974 int32_t messageCode,
1975 const char *pLayerPrefix,
1976 const char *pMessage)
1977 {
1978 TU_FROM_HANDLE(tu_instance, instance, _instance);
1979 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1980 object, location, messageCode, pLayerPrefix, pMessage);
1981 }
1982
1983 void
1984 tu_GetDeviceGroupPeerMemoryFeatures(
1985 VkDevice device,
1986 uint32_t heapIndex,
1987 uint32_t localDeviceIndex,
1988 uint32_t remoteDeviceIndex,
1989 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1990 {
1991 assert(localDeviceIndex == remoteDeviceIndex);
1992
1993 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1994 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1995 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1996 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1997 }