turnip: preliminary support for tu_GetRenderAreaGranularity
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/strtod.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 VkResult
77 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
78 {
79 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
80 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
81 */
82 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
83 if (!gem_handle)
84 goto fail_new;
85
86 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
87 if (!iova)
88 goto fail_info;
89
90 *bo = (struct tu_bo) {
91 .gem_handle = gem_handle,
92 .size = size,
93 .iova = iova,
94 };
95
96 return VK_SUCCESS;
97
98 fail_info:
99 tu_gem_close(dev, bo->gem_handle);
100 fail_new:
101 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
102 }
103
104 VkResult
105 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
106 {
107 if (bo->map)
108 return VK_SUCCESS;
109
110 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
111 if (!offset)
112 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
113
114 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
115 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
116 dev->physical_device->local_fd, offset);
117 if (map == MAP_FAILED)
118 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
119
120 bo->map = map;
121 return VK_SUCCESS;
122 }
123
124 void
125 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
126 {
127 assert(bo->gem_handle);
128
129 if (bo->map)
130 munmap(bo->map, bo->size);
131
132 tu_gem_close(dev, bo->gem_handle);
133 }
134
135 static VkResult
136 tu_physical_device_init(struct tu_physical_device *device,
137 struct tu_instance *instance,
138 drmDevicePtr drm_device)
139 {
140 const char *path = drm_device->nodes[DRM_NODE_RENDER];
141 VkResult result = VK_SUCCESS;
142 drmVersionPtr version;
143 int fd;
144 int master_fd = -1;
145
146 fd = open(path, O_RDWR | O_CLOEXEC);
147 if (fd < 0) {
148 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
149 "failed to open device %s", path);
150 }
151
152 /* Version 1.3 added MSM_INFO_IOVA. */
153 const int min_version_major = 1;
154 const int min_version_minor = 3;
155
156 version = drmGetVersion(fd);
157 if (!version) {
158 close(fd);
159 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
160 "failed to query kernel driver version for device %s",
161 path);
162 }
163
164 if (strcmp(version->name, "msm")) {
165 drmFreeVersion(version);
166 if (master_fd != -1)
167 close(master_fd);
168 close(fd);
169 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
170 "device %s does not use the msm kernel driver", path);
171 }
172
173 if (version->version_major != min_version_major ||
174 version->version_minor < min_version_minor) {
175 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
176 "kernel driver for device %s has version %d.%d, "
177 "but Vulkan requires version >= %d.%d",
178 path, version->version_major, version->version_minor,
179 min_version_major, min_version_minor);
180 drmFreeVersion(version);
181 close(fd);
182 return result;
183 }
184
185 drmFreeVersion(version);
186
187 if (instance->debug_flags & TU_DEBUG_STARTUP)
188 tu_logi("Found compatible device '%s'.", path);
189
190 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
191 device->instance = instance;
192 assert(strlen(path) < ARRAY_SIZE(device->path));
193 strncpy(device->path, path, ARRAY_SIZE(device->path));
194
195 if (instance->enabled_extensions.KHR_display) {
196 master_fd =
197 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
198 if (master_fd >= 0) {
199 /* TODO: free master_fd is accel is not working? */
200 }
201 }
202
203 device->master_fd = master_fd;
204 device->local_fd = fd;
205
206 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
207 if (instance->debug_flags & TU_DEBUG_STARTUP)
208 tu_logi("Could not query the GPU ID");
209 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
210 "could not get GPU ID");
211 goto fail;
212 }
213
214 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
215 if (instance->debug_flags & TU_DEBUG_STARTUP)
216 tu_logi("Could not query the GMEM size");
217 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
218 "could not get GMEM size");
219 goto fail;
220 }
221
222 memset(device->name, 0, sizeof(device->name));
223 sprintf(device->name, "FD%d", device->gpu_id);
224
225 switch (device->gpu_id) {
226 case 630:
227 device->tile_align_w = 32;
228 device->tile_align_h = 32;
229 break;
230 default:
231 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
232 "device %s is unsupported", device->name);
233 goto fail;
234 }
235 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
236 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
237 "cannot generate UUID");
238 goto fail;
239 }
240
241 /* The gpu id is already embedded in the uuid so we just pass "tu"
242 * when creating the cache.
243 */
244 char buf[VK_UUID_SIZE * 2 + 1];
245 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
246 device->disk_cache = disk_cache_create(device->name, buf, 0);
247
248 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
249 "testing use only.\n");
250
251 tu_get_driver_uuid(&device->device_uuid);
252 tu_get_device_uuid(&device->device_uuid);
253
254 tu_fill_device_extension_table(device, &device->supported_extensions);
255
256 if (result != VK_SUCCESS) {
257 vk_error(instance, result);
258 goto fail;
259 }
260
261 return VK_SUCCESS;
262
263 fail:
264 close(fd);
265 if (master_fd != -1)
266 close(master_fd);
267 return result;
268 }
269
270 static void
271 tu_physical_device_finish(struct tu_physical_device *device)
272 {
273 disk_cache_destroy(device->disk_cache);
274 close(device->local_fd);
275 if (device->master_fd != -1)
276 close(device->master_fd);
277 }
278
279 static void *
280 default_alloc_func(void *pUserData,
281 size_t size,
282 size_t align,
283 VkSystemAllocationScope allocationScope)
284 {
285 return malloc(size);
286 }
287
288 static void *
289 default_realloc_func(void *pUserData,
290 void *pOriginal,
291 size_t size,
292 size_t align,
293 VkSystemAllocationScope allocationScope)
294 {
295 return realloc(pOriginal, size);
296 }
297
298 static void
299 default_free_func(void *pUserData, void *pMemory)
300 {
301 free(pMemory);
302 }
303
304 static const VkAllocationCallbacks default_alloc = {
305 .pUserData = NULL,
306 .pfnAllocation = default_alloc_func,
307 .pfnReallocation = default_realloc_func,
308 .pfnFree = default_free_func,
309 };
310
311 static const struct debug_control tu_debug_options[] = {
312 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
313 };
314
315 const char *
316 tu_get_debug_option_name(int id)
317 {
318 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
319 return tu_debug_options[id].string;
320 }
321
322 static int
323 tu_get_instance_extension_index(const char *name)
324 {
325 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
326 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
327 return i;
328 }
329 return -1;
330 }
331
332 VkResult
333 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
334 const VkAllocationCallbacks *pAllocator,
335 VkInstance *pInstance)
336 {
337 struct tu_instance *instance;
338 VkResult result;
339
340 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
341
342 uint32_t client_version;
343 if (pCreateInfo->pApplicationInfo &&
344 pCreateInfo->pApplicationInfo->apiVersion != 0) {
345 client_version = pCreateInfo->pApplicationInfo->apiVersion;
346 } else {
347 tu_EnumerateInstanceVersion(&client_version);
348 }
349
350 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
351 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
352 if (!instance)
353 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
354
355 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
356
357 if (pAllocator)
358 instance->alloc = *pAllocator;
359 else
360 instance->alloc = default_alloc;
361
362 instance->api_version = client_version;
363 instance->physical_device_count = -1;
364
365 instance->debug_flags =
366 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
367
368 if (instance->debug_flags & TU_DEBUG_STARTUP)
369 tu_logi("Created an instance");
370
371 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
372 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
373 int index = tu_get_instance_extension_index(ext_name);
374
375 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
376 vk_free2(&default_alloc, pAllocator, instance);
377 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
378 }
379
380 instance->enabled_extensions.extensions[index] = true;
381 }
382
383 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
384 if (result != VK_SUCCESS) {
385 vk_free2(&default_alloc, pAllocator, instance);
386 return vk_error(instance, result);
387 }
388
389 _mesa_locale_init();
390
391 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
392
393 *pInstance = tu_instance_to_handle(instance);
394
395 return VK_SUCCESS;
396 }
397
398 void
399 tu_DestroyInstance(VkInstance _instance,
400 const VkAllocationCallbacks *pAllocator)
401 {
402 TU_FROM_HANDLE(tu_instance, instance, _instance);
403
404 if (!instance)
405 return;
406
407 for (int i = 0; i < instance->physical_device_count; ++i) {
408 tu_physical_device_finish(instance->physical_devices + i);
409 }
410
411 VG(VALGRIND_DESTROY_MEMPOOL(instance));
412
413 _mesa_locale_fini();
414
415 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
416
417 vk_free(&instance->alloc, instance);
418 }
419
420 static VkResult
421 tu_enumerate_devices(struct tu_instance *instance)
422 {
423 /* TODO: Check for more devices ? */
424 drmDevicePtr devices[8];
425 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
426 int max_devices;
427
428 instance->physical_device_count = 0;
429
430 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
431
432 if (instance->debug_flags & TU_DEBUG_STARTUP)
433 tu_logi("Found %d drm nodes", max_devices);
434
435 if (max_devices < 1)
436 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
437
438 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
439 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
440 devices[i]->bustype == DRM_BUS_PLATFORM) {
441
442 result = tu_physical_device_init(
443 instance->physical_devices + instance->physical_device_count,
444 instance, devices[i]);
445 if (result == VK_SUCCESS)
446 ++instance->physical_device_count;
447 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
448 break;
449 }
450 }
451 drmFreeDevices(devices, max_devices);
452
453 return result;
454 }
455
456 VkResult
457 tu_EnumeratePhysicalDevices(VkInstance _instance,
458 uint32_t *pPhysicalDeviceCount,
459 VkPhysicalDevice *pPhysicalDevices)
460 {
461 TU_FROM_HANDLE(tu_instance, instance, _instance);
462 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
463
464 VkResult result;
465
466 if (instance->physical_device_count < 0) {
467 result = tu_enumerate_devices(instance);
468 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
469 return result;
470 }
471
472 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
473 vk_outarray_append(&out, p)
474 {
475 *p = tu_physical_device_to_handle(instance->physical_devices + i);
476 }
477 }
478
479 return vk_outarray_status(&out);
480 }
481
482 VkResult
483 tu_EnumeratePhysicalDeviceGroups(
484 VkInstance _instance,
485 uint32_t *pPhysicalDeviceGroupCount,
486 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
487 {
488 TU_FROM_HANDLE(tu_instance, instance, _instance);
489 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
490 pPhysicalDeviceGroupCount);
491 VkResult result;
492
493 if (instance->physical_device_count < 0) {
494 result = tu_enumerate_devices(instance);
495 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
496 return result;
497 }
498
499 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
500 vk_outarray_append(&out, p)
501 {
502 p->physicalDeviceCount = 1;
503 p->physicalDevices[0] =
504 tu_physical_device_to_handle(instance->physical_devices + i);
505 p->subsetAllocation = false;
506 }
507 }
508
509 return vk_outarray_status(&out);
510 }
511
512 void
513 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
514 VkPhysicalDeviceFeatures *pFeatures)
515 {
516 memset(pFeatures, 0, sizeof(*pFeatures));
517
518 *pFeatures = (VkPhysicalDeviceFeatures) {
519 .robustBufferAccess = false,
520 .fullDrawIndexUint32 = false,
521 .imageCubeArray = false,
522 .independentBlend = false,
523 .geometryShader = false,
524 .tessellationShader = false,
525 .sampleRateShading = false,
526 .dualSrcBlend = false,
527 .logicOp = false,
528 .multiDrawIndirect = false,
529 .drawIndirectFirstInstance = false,
530 .depthClamp = false,
531 .depthBiasClamp = false,
532 .fillModeNonSolid = false,
533 .depthBounds = false,
534 .wideLines = false,
535 .largePoints = false,
536 .alphaToOne = false,
537 .multiViewport = false,
538 .samplerAnisotropy = false,
539 .textureCompressionETC2 = false,
540 .textureCompressionASTC_LDR = false,
541 .textureCompressionBC = false,
542 .occlusionQueryPrecise = false,
543 .pipelineStatisticsQuery = false,
544 .vertexPipelineStoresAndAtomics = false,
545 .fragmentStoresAndAtomics = false,
546 .shaderTessellationAndGeometryPointSize = false,
547 .shaderImageGatherExtended = false,
548 .shaderStorageImageExtendedFormats = false,
549 .shaderStorageImageMultisample = false,
550 .shaderUniformBufferArrayDynamicIndexing = false,
551 .shaderSampledImageArrayDynamicIndexing = false,
552 .shaderStorageBufferArrayDynamicIndexing = false,
553 .shaderStorageImageArrayDynamicIndexing = false,
554 .shaderStorageImageReadWithoutFormat = false,
555 .shaderStorageImageWriteWithoutFormat = false,
556 .shaderClipDistance = false,
557 .shaderCullDistance = false,
558 .shaderFloat64 = false,
559 .shaderInt64 = false,
560 .shaderInt16 = false,
561 .sparseBinding = false,
562 .variableMultisampleRate = false,
563 .inheritedQueries = false,
564 };
565 }
566
567 void
568 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
569 VkPhysicalDeviceFeatures2KHR *pFeatures)
570 {
571 vk_foreach_struct(ext, pFeatures->pNext)
572 {
573 switch (ext->sType) {
574 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
575 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
576 features->variablePointersStorageBuffer = false;
577 features->variablePointers = false;
578 break;
579 }
580 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
581 VkPhysicalDeviceMultiviewFeaturesKHR *features =
582 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
583 features->multiview = false;
584 features->multiviewGeometryShader = false;
585 features->multiviewTessellationShader = false;
586 break;
587 }
588 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
589 VkPhysicalDeviceShaderDrawParameterFeatures *features =
590 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
591 features->shaderDrawParameters = false;
592 break;
593 }
594 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
595 VkPhysicalDeviceProtectedMemoryFeatures *features =
596 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
597 features->protectedMemory = false;
598 break;
599 }
600 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
601 VkPhysicalDevice16BitStorageFeatures *features =
602 (VkPhysicalDevice16BitStorageFeatures *) ext;
603 features->storageBuffer16BitAccess = false;
604 features->uniformAndStorageBuffer16BitAccess = false;
605 features->storagePushConstant16 = false;
606 features->storageInputOutput16 = false;
607 break;
608 }
609 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
610 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
611 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
612 features->samplerYcbcrConversion = false;
613 break;
614 }
615 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
616 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
617 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
618 features->shaderInputAttachmentArrayDynamicIndexing = false;
619 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
620 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
621 features->shaderUniformBufferArrayNonUniformIndexing = false;
622 features->shaderSampledImageArrayNonUniformIndexing = false;
623 features->shaderStorageBufferArrayNonUniformIndexing = false;
624 features->shaderStorageImageArrayNonUniformIndexing = false;
625 features->shaderInputAttachmentArrayNonUniformIndexing = false;
626 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
627 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
628 features->descriptorBindingUniformBufferUpdateAfterBind = false;
629 features->descriptorBindingSampledImageUpdateAfterBind = false;
630 features->descriptorBindingStorageImageUpdateAfterBind = false;
631 features->descriptorBindingStorageBufferUpdateAfterBind = false;
632 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
633 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
634 features->descriptorBindingUpdateUnusedWhilePending = false;
635 features->descriptorBindingPartiallyBound = false;
636 features->descriptorBindingVariableDescriptorCount = false;
637 features->runtimeDescriptorArray = false;
638 break;
639 }
640 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
641 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
642 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
643 features->conditionalRendering = false;
644 features->inheritedConditionalRendering = false;
645 break;
646 }
647 default:
648 break;
649 }
650 }
651 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
652 }
653
654 void
655 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
656 VkPhysicalDeviceProperties *pProperties)
657 {
658 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
659 VkSampleCountFlags sample_counts = 0xf;
660
661 /* make sure that the entire descriptor set is addressable with a signed
662 * 32-bit int. So the sum of all limits scaled by descriptor size has to
663 * be at most 2 GiB. the combined image & samples object count as one of
664 * both. This limit is for the pipeline layout, not for the set layout, but
665 * there is no set limit, so we just set a pipeline limit. I don't think
666 * any app is going to hit this soon. */
667 size_t max_descriptor_set_size =
668 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
669 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
670 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
671 32 /* sampler, largest when combined with image */ +
672 64 /* sampled image */ + 64 /* storage image */);
673
674 VkPhysicalDeviceLimits limits = {
675 .maxImageDimension1D = (1 << 14),
676 .maxImageDimension2D = (1 << 14),
677 .maxImageDimension3D = (1 << 11),
678 .maxImageDimensionCube = (1 << 14),
679 .maxImageArrayLayers = (1 << 11),
680 .maxTexelBufferElements = 128 * 1024 * 1024,
681 .maxUniformBufferRange = UINT32_MAX,
682 .maxStorageBufferRange = UINT32_MAX,
683 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
684 .maxMemoryAllocationCount = UINT32_MAX,
685 .maxSamplerAllocationCount = 64 * 1024,
686 .bufferImageGranularity = 64, /* A cache line */
687 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
688 .maxBoundDescriptorSets = MAX_SETS,
689 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
690 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
691 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
692 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
693 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
694 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
695 .maxPerStageResources = max_descriptor_set_size,
696 .maxDescriptorSetSamplers = max_descriptor_set_size,
697 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
698 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
699 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
700 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
701 .maxDescriptorSetSampledImages = max_descriptor_set_size,
702 .maxDescriptorSetStorageImages = max_descriptor_set_size,
703 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
704 .maxVertexInputAttributes = 32,
705 .maxVertexInputBindings = 32,
706 .maxVertexInputAttributeOffset = 2047,
707 .maxVertexInputBindingStride = 2048,
708 .maxVertexOutputComponents = 128,
709 .maxTessellationGenerationLevel = 64,
710 .maxTessellationPatchSize = 32,
711 .maxTessellationControlPerVertexInputComponents = 128,
712 .maxTessellationControlPerVertexOutputComponents = 128,
713 .maxTessellationControlPerPatchOutputComponents = 120,
714 .maxTessellationControlTotalOutputComponents = 4096,
715 .maxTessellationEvaluationInputComponents = 128,
716 .maxTessellationEvaluationOutputComponents = 128,
717 .maxGeometryShaderInvocations = 127,
718 .maxGeometryInputComponents = 64,
719 .maxGeometryOutputComponents = 128,
720 .maxGeometryOutputVertices = 256,
721 .maxGeometryTotalOutputComponents = 1024,
722 .maxFragmentInputComponents = 128,
723 .maxFragmentOutputAttachments = 8,
724 .maxFragmentDualSrcAttachments = 1,
725 .maxFragmentCombinedOutputResources = 8,
726 .maxComputeSharedMemorySize = 32768,
727 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
728 .maxComputeWorkGroupInvocations = 2048,
729 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
730 .subPixelPrecisionBits = 4 /* FIXME */,
731 .subTexelPrecisionBits = 4 /* FIXME */,
732 .mipmapPrecisionBits = 4 /* FIXME */,
733 .maxDrawIndexedIndexValue = UINT32_MAX,
734 .maxDrawIndirectCount = UINT32_MAX,
735 .maxSamplerLodBias = 16,
736 .maxSamplerAnisotropy = 16,
737 .maxViewports = MAX_VIEWPORTS,
738 .maxViewportDimensions = { (1 << 14), (1 << 14) },
739 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
740 .viewportSubPixelBits = 8,
741 .minMemoryMapAlignment = 4096, /* A page */
742 .minTexelBufferOffsetAlignment = 1,
743 .minUniformBufferOffsetAlignment = 4,
744 .minStorageBufferOffsetAlignment = 4,
745 .minTexelOffset = -32,
746 .maxTexelOffset = 31,
747 .minTexelGatherOffset = -32,
748 .maxTexelGatherOffset = 31,
749 .minInterpolationOffset = -2,
750 .maxInterpolationOffset = 2,
751 .subPixelInterpolationOffsetBits = 8,
752 .maxFramebufferWidth = (1 << 14),
753 .maxFramebufferHeight = (1 << 14),
754 .maxFramebufferLayers = (1 << 10),
755 .framebufferColorSampleCounts = sample_counts,
756 .framebufferDepthSampleCounts = sample_counts,
757 .framebufferStencilSampleCounts = sample_counts,
758 .framebufferNoAttachmentsSampleCounts = sample_counts,
759 .maxColorAttachments = MAX_RTS,
760 .sampledImageColorSampleCounts = sample_counts,
761 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
762 .sampledImageDepthSampleCounts = sample_counts,
763 .sampledImageStencilSampleCounts = sample_counts,
764 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
765 .maxSampleMaskWords = 1,
766 .timestampComputeAndGraphics = true,
767 .timestampPeriod = 1,
768 .maxClipDistances = 8,
769 .maxCullDistances = 8,
770 .maxCombinedClipAndCullDistances = 8,
771 .discreteQueuePriorities = 1,
772 .pointSizeRange = { 0.125, 255.875 },
773 .lineWidthRange = { 0.0, 7.9921875 },
774 .pointSizeGranularity = (1.0 / 8.0),
775 .lineWidthGranularity = (1.0 / 128.0),
776 .strictLines = false, /* FINISHME */
777 .standardSampleLocations = true,
778 .optimalBufferCopyOffsetAlignment = 128,
779 .optimalBufferCopyRowPitchAlignment = 128,
780 .nonCoherentAtomSize = 64,
781 };
782
783 *pProperties = (VkPhysicalDeviceProperties) {
784 .apiVersion = tu_physical_device_api_version(pdevice),
785 .driverVersion = vk_get_driver_version(),
786 .vendorID = 0, /* TODO */
787 .deviceID = 0,
788 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
789 .limits = limits,
790 .sparseProperties = { 0 },
791 };
792
793 strcpy(pProperties->deviceName, pdevice->name);
794 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
795 }
796
797 void
798 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
799 VkPhysicalDeviceProperties2KHR *pProperties)
800 {
801 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
802 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
803
804 vk_foreach_struct(ext, pProperties->pNext)
805 {
806 switch (ext->sType) {
807 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
808 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
809 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
810 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
811 break;
812 }
813 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
814 VkPhysicalDeviceIDPropertiesKHR *properties =
815 (VkPhysicalDeviceIDPropertiesKHR *) ext;
816 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
817 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
818 properties->deviceLUIDValid = false;
819 break;
820 }
821 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
822 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
823 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
824 properties->maxMultiviewViewCount = MAX_VIEWS;
825 properties->maxMultiviewInstanceIndex = INT_MAX;
826 break;
827 }
828 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
829 VkPhysicalDevicePointClippingPropertiesKHR *properties =
830 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
831 properties->pointClippingBehavior =
832 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
833 break;
834 }
835 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
836 VkPhysicalDeviceMaintenance3Properties *properties =
837 (VkPhysicalDeviceMaintenance3Properties *) ext;
838 /* Make sure everything is addressable by a signed 32-bit int, and
839 * our largest descriptors are 96 bytes. */
840 properties->maxPerSetDescriptors = (1ull << 31) / 96;
841 /* Our buffer size fields allow only this much */
842 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
843 break;
844 }
845 default:
846 break;
847 }
848 }
849 }
850
851 static const VkQueueFamilyProperties tu_queue_family_properties = {
852 .queueFlags =
853 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
854 .queueCount = 1,
855 .timestampValidBits = 64,
856 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
857 };
858
859 void
860 tu_GetPhysicalDeviceQueueFamilyProperties(
861 VkPhysicalDevice physicalDevice,
862 uint32_t *pQueueFamilyPropertyCount,
863 VkQueueFamilyProperties *pQueueFamilyProperties)
864 {
865 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
866
867 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
868 }
869
870 void
871 tu_GetPhysicalDeviceQueueFamilyProperties2(
872 VkPhysicalDevice physicalDevice,
873 uint32_t *pQueueFamilyPropertyCount,
874 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
875 {
876 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
877
878 vk_outarray_append(&out, p)
879 {
880 p->queueFamilyProperties = tu_queue_family_properties;
881 }
882 }
883
884 static uint64_t
885 tu_get_system_heap_size()
886 {
887 struct sysinfo info;
888 sysinfo(&info);
889
890 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
891
892 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
893 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
894 */
895 uint64_t available_ram;
896 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
897 available_ram = total_ram / 2;
898 else
899 available_ram = total_ram * 3 / 4;
900
901 return available_ram;
902 }
903
904 void
905 tu_GetPhysicalDeviceMemoryProperties(
906 VkPhysicalDevice physicalDevice,
907 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
908 {
909 pMemoryProperties->memoryHeapCount = 1;
910 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
911 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
912
913 pMemoryProperties->memoryTypeCount = 1;
914 pMemoryProperties->memoryTypes[0].propertyFlags =
915 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
916 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
917 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
918 pMemoryProperties->memoryTypes[0].heapIndex = 0;
919 }
920
921 void
922 tu_GetPhysicalDeviceMemoryProperties2(
923 VkPhysicalDevice physicalDevice,
924 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
925 {
926 return tu_GetPhysicalDeviceMemoryProperties(
927 physicalDevice, &pMemoryProperties->memoryProperties);
928 }
929
930 static VkResult
931 tu_queue_init(struct tu_device *device,
932 struct tu_queue *queue,
933 uint32_t queue_family_index,
934 int idx,
935 VkDeviceQueueCreateFlags flags)
936 {
937 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
938 queue->device = device;
939 queue->queue_family_index = queue_family_index;
940 queue->queue_idx = idx;
941 queue->flags = flags;
942
943 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
944 if (ret)
945 return VK_ERROR_INITIALIZATION_FAILED;
946
947 queue->submit_fence_fd = -1;
948
949 return VK_SUCCESS;
950 }
951
952 static void
953 tu_queue_finish(struct tu_queue *queue)
954 {
955 if (queue->submit_fence_fd >= 0) {
956 close(queue->submit_fence_fd);
957 }
958 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
959 }
960
961 static int
962 tu_get_device_extension_index(const char *name)
963 {
964 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
965 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
966 return i;
967 }
968 return -1;
969 }
970
971 VkResult
972 tu_CreateDevice(VkPhysicalDevice physicalDevice,
973 const VkDeviceCreateInfo *pCreateInfo,
974 const VkAllocationCallbacks *pAllocator,
975 VkDevice *pDevice)
976 {
977 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
978 VkResult result;
979 struct tu_device *device;
980
981 /* Check enabled features */
982 if (pCreateInfo->pEnabledFeatures) {
983 VkPhysicalDeviceFeatures supported_features;
984 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
985 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
986 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
987 unsigned num_features =
988 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
989 for (uint32_t i = 0; i < num_features; i++) {
990 if (enabled_feature[i] && !supported_feature[i])
991 return vk_error(physical_device->instance,
992 VK_ERROR_FEATURE_NOT_PRESENT);
993 }
994 }
995
996 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
997 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
998 if (!device)
999 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1000
1001 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1002 device->instance = physical_device->instance;
1003 device->physical_device = physical_device;
1004
1005 if (pAllocator)
1006 device->alloc = *pAllocator;
1007 else
1008 device->alloc = physical_device->instance->alloc;
1009
1010 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1011 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1012 int index = tu_get_device_extension_index(ext_name);
1013 if (index < 0 ||
1014 !physical_device->supported_extensions.extensions[index]) {
1015 vk_free(&device->alloc, device);
1016 return vk_error(physical_device->instance,
1017 VK_ERROR_EXTENSION_NOT_PRESENT);
1018 }
1019
1020 device->enabled_extensions.extensions[index] = true;
1021 }
1022
1023 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1024 const VkDeviceQueueCreateInfo *queue_create =
1025 &pCreateInfo->pQueueCreateInfos[i];
1026 uint32_t qfi = queue_create->queueFamilyIndex;
1027 device->queues[qfi] = vk_alloc(
1028 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1029 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1030 if (!device->queues[qfi]) {
1031 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1032 goto fail;
1033 }
1034
1035 memset(device->queues[qfi], 0,
1036 queue_create->queueCount * sizeof(struct tu_queue));
1037
1038 device->queue_count[qfi] = queue_create->queueCount;
1039
1040 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1041 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1042 queue_create->flags);
1043 if (result != VK_SUCCESS)
1044 goto fail;
1045 }
1046 }
1047
1048 VkPipelineCacheCreateInfo ci;
1049 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1050 ci.pNext = NULL;
1051 ci.flags = 0;
1052 ci.pInitialData = NULL;
1053 ci.initialDataSize = 0;
1054 VkPipelineCache pc;
1055 result =
1056 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1057 if (result != VK_SUCCESS)
1058 goto fail;
1059
1060 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1061
1062 *pDevice = tu_device_to_handle(device);
1063 return VK_SUCCESS;
1064
1065 fail:
1066 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1067 for (unsigned q = 0; q < device->queue_count[i]; q++)
1068 tu_queue_finish(&device->queues[i][q]);
1069 if (device->queue_count[i])
1070 vk_free(&device->alloc, device->queues[i]);
1071 }
1072
1073 vk_free(&device->alloc, device);
1074 return result;
1075 }
1076
1077 void
1078 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1079 {
1080 TU_FROM_HANDLE(tu_device, device, _device);
1081
1082 if (!device)
1083 return;
1084
1085 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1086 for (unsigned q = 0; q < device->queue_count[i]; q++)
1087 tu_queue_finish(&device->queues[i][q]);
1088 if (device->queue_count[i])
1089 vk_free(&device->alloc, device->queues[i]);
1090 }
1091
1092 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1093 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1094
1095 vk_free(&device->alloc, device);
1096 }
1097
1098 VkResult
1099 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1100 VkLayerProperties *pProperties)
1101 {
1102 *pPropertyCount = 0;
1103 return VK_SUCCESS;
1104 }
1105
1106 VkResult
1107 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1108 uint32_t *pPropertyCount,
1109 VkLayerProperties *pProperties)
1110 {
1111 *pPropertyCount = 0;
1112 return VK_SUCCESS;
1113 }
1114
1115 void
1116 tu_GetDeviceQueue2(VkDevice _device,
1117 const VkDeviceQueueInfo2 *pQueueInfo,
1118 VkQueue *pQueue)
1119 {
1120 TU_FROM_HANDLE(tu_device, device, _device);
1121 struct tu_queue *queue;
1122
1123 queue =
1124 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1125 if (pQueueInfo->flags != queue->flags) {
1126 /* From the Vulkan 1.1.70 spec:
1127 *
1128 * "The queue returned by vkGetDeviceQueue2 must have the same
1129 * flags value from this structure as that used at device
1130 * creation time in a VkDeviceQueueCreateInfo instance. If no
1131 * matching flags were specified at device creation time then
1132 * pQueue will return VK_NULL_HANDLE."
1133 */
1134 *pQueue = VK_NULL_HANDLE;
1135 return;
1136 }
1137
1138 *pQueue = tu_queue_to_handle(queue);
1139 }
1140
1141 void
1142 tu_GetDeviceQueue(VkDevice _device,
1143 uint32_t queueFamilyIndex,
1144 uint32_t queueIndex,
1145 VkQueue *pQueue)
1146 {
1147 const VkDeviceQueueInfo2 info =
1148 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1149 .queueFamilyIndex = queueFamilyIndex,
1150 .queueIndex = queueIndex };
1151
1152 tu_GetDeviceQueue2(_device, &info, pQueue);
1153 }
1154
1155 VkResult
1156 tu_QueueSubmit(VkQueue _queue,
1157 uint32_t submitCount,
1158 const VkSubmitInfo *pSubmits,
1159 VkFence _fence)
1160 {
1161 TU_FROM_HANDLE(tu_queue, queue, _queue);
1162
1163 for (uint32_t i = 0; i < submitCount; ++i) {
1164 const VkSubmitInfo *submit = pSubmits + i;
1165 const bool last_submit = (i == submitCount - 1);
1166 struct tu_bo_list bo_list;
1167 tu_bo_list_init(&bo_list);
1168
1169 uint32_t entry_count = 0;
1170 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1171 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1172 entry_count += cmdbuf->cs.entry_count;
1173 }
1174
1175 struct drm_msm_gem_submit_cmd cmds[entry_count];
1176 uint32_t entry_idx = 0;
1177 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1178 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1179 struct tu_cs *cs = &cmdbuf->cs;
1180 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1181 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1182 cmds[entry_idx].submit_idx = tu_bo_list_add(
1183 &bo_list, cs->entries[i].bo, MSM_SUBMIT_BO_READ);
1184 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1185 cmds[entry_idx].size = cs->entries[i].size;
1186 cmds[entry_idx].pad = 0;
1187 cmds[entry_idx].nr_relocs = 0;
1188 cmds[entry_idx].relocs = 0;
1189 }
1190
1191 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1192 }
1193
1194 uint32_t flags = MSM_PIPE_3D0;
1195 if (last_submit) {
1196 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1197 }
1198
1199 struct drm_msm_gem_submit req = {
1200 .flags = flags,
1201 .queueid = queue->msm_queue_id,
1202 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1203 .nr_bos = bo_list.count,
1204 .cmds = (uint64_t)(uintptr_t)cmds,
1205 .nr_cmds = entry_count,
1206 };
1207
1208 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1209 DRM_MSM_GEM_SUBMIT,
1210 &req, sizeof(req));
1211 if (ret) {
1212 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1213 abort();
1214 }
1215
1216 tu_bo_list_destroy(&bo_list);
1217
1218 if (last_submit) {
1219 /* no need to merge fences as queue execution is serialized */
1220 if (queue->submit_fence_fd >= 0) {
1221 close(queue->submit_fence_fd);
1222 }
1223 queue->submit_fence_fd = req.fence_fd;
1224 }
1225 }
1226 return VK_SUCCESS;
1227 }
1228
1229 VkResult
1230 tu_QueueWaitIdle(VkQueue _queue)
1231 {
1232 TU_FROM_HANDLE(tu_queue, queue, _queue);
1233
1234 if (queue->submit_fence_fd >= 0) {
1235 int ret = sync_wait(queue->submit_fence_fd, -1);
1236 if (ret)
1237 tu_loge("sync_wait on fence fd %d failed", queue->submit_fence_fd);
1238
1239 close(queue->submit_fence_fd);
1240 queue->submit_fence_fd = -1;
1241 }
1242
1243 return VK_SUCCESS;
1244 }
1245
1246 VkResult
1247 tu_DeviceWaitIdle(VkDevice _device)
1248 {
1249 TU_FROM_HANDLE(tu_device, device, _device);
1250
1251 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1252 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1253 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1254 }
1255 }
1256 return VK_SUCCESS;
1257 }
1258
1259 VkResult
1260 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1261 uint32_t *pPropertyCount,
1262 VkExtensionProperties *pProperties)
1263 {
1264 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1265
1266 /* We spport no lyaers */
1267 if (pLayerName)
1268 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1269
1270 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1271 if (tu_supported_instance_extensions.extensions[i]) {
1272 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1273 }
1274 }
1275
1276 return vk_outarray_status(&out);
1277 }
1278
1279 VkResult
1280 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1281 const char *pLayerName,
1282 uint32_t *pPropertyCount,
1283 VkExtensionProperties *pProperties)
1284 {
1285 /* We spport no lyaers */
1286 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1287 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1288
1289 /* We spport no lyaers */
1290 if (pLayerName)
1291 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1292
1293 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1294 if (device->supported_extensions.extensions[i]) {
1295 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1296 }
1297 }
1298
1299 return vk_outarray_status(&out);
1300 }
1301
1302 PFN_vkVoidFunction
1303 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1304 {
1305 TU_FROM_HANDLE(tu_instance, instance, _instance);
1306
1307 return tu_lookup_entrypoint_checked(
1308 pName, instance ? instance->api_version : 0,
1309 instance ? &instance->enabled_extensions : NULL, NULL);
1310 }
1311
1312 /* The loader wants us to expose a second GetInstanceProcAddr function
1313 * to work around certain LD_PRELOAD issues seen in apps.
1314 */
1315 PUBLIC
1316 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1317 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1318
1319 PUBLIC
1320 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1321 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1322 {
1323 return tu_GetInstanceProcAddr(instance, pName);
1324 }
1325
1326 PFN_vkVoidFunction
1327 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1328 {
1329 TU_FROM_HANDLE(tu_device, device, _device);
1330
1331 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1332 &device->instance->enabled_extensions,
1333 &device->enabled_extensions);
1334 }
1335
1336 static VkResult
1337 tu_alloc_memory(struct tu_device *device,
1338 const VkMemoryAllocateInfo *pAllocateInfo,
1339 const VkAllocationCallbacks *pAllocator,
1340 VkDeviceMemory *pMem)
1341 {
1342 struct tu_device_memory *mem;
1343 VkResult result;
1344
1345 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1346
1347 if (pAllocateInfo->allocationSize == 0) {
1348 /* Apparently, this is allowed */
1349 *pMem = VK_NULL_HANDLE;
1350 return VK_SUCCESS;
1351 }
1352
1353 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1354 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1355 if (mem == NULL)
1356 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1357
1358 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1359 if (result != VK_SUCCESS) {
1360 vk_free2(&device->alloc, pAllocator, mem);
1361 return result;
1362 }
1363
1364 mem->size = pAllocateInfo->allocationSize;
1365 mem->type_index = pAllocateInfo->memoryTypeIndex;
1366
1367 mem->map = NULL;
1368 mem->user_ptr = NULL;
1369
1370 *pMem = tu_device_memory_to_handle(mem);
1371
1372 return VK_SUCCESS;
1373 }
1374
1375 VkResult
1376 tu_AllocateMemory(VkDevice _device,
1377 const VkMemoryAllocateInfo *pAllocateInfo,
1378 const VkAllocationCallbacks *pAllocator,
1379 VkDeviceMemory *pMem)
1380 {
1381 TU_FROM_HANDLE(tu_device, device, _device);
1382 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1383 }
1384
1385 void
1386 tu_FreeMemory(VkDevice _device,
1387 VkDeviceMemory _mem,
1388 const VkAllocationCallbacks *pAllocator)
1389 {
1390 TU_FROM_HANDLE(tu_device, device, _device);
1391 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1392
1393 if (mem == NULL)
1394 return;
1395
1396 tu_bo_finish(device, &mem->bo);
1397 vk_free2(&device->alloc, pAllocator, mem);
1398 }
1399
1400 VkResult
1401 tu_MapMemory(VkDevice _device,
1402 VkDeviceMemory _memory,
1403 VkDeviceSize offset,
1404 VkDeviceSize size,
1405 VkMemoryMapFlags flags,
1406 void **ppData)
1407 {
1408 TU_FROM_HANDLE(tu_device, device, _device);
1409 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1410 VkResult result;
1411
1412 if (mem == NULL) {
1413 *ppData = NULL;
1414 return VK_SUCCESS;
1415 }
1416
1417 if (mem->user_ptr) {
1418 *ppData = mem->user_ptr;
1419 } else if (!mem->map) {
1420 result = tu_bo_map(device, &mem->bo);
1421 if (result != VK_SUCCESS)
1422 return result;
1423 *ppData = mem->map = mem->bo.map;
1424 } else
1425 *ppData = mem->map;
1426
1427 if (*ppData) {
1428 *ppData += offset;
1429 return VK_SUCCESS;
1430 }
1431
1432 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1433 }
1434
1435 void
1436 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1437 {
1438 /* I do not see any unmapping done by the freedreno Gallium driver. */
1439 }
1440
1441 VkResult
1442 tu_FlushMappedMemoryRanges(VkDevice _device,
1443 uint32_t memoryRangeCount,
1444 const VkMappedMemoryRange *pMemoryRanges)
1445 {
1446 return VK_SUCCESS;
1447 }
1448
1449 VkResult
1450 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1451 uint32_t memoryRangeCount,
1452 const VkMappedMemoryRange *pMemoryRanges)
1453 {
1454 return VK_SUCCESS;
1455 }
1456
1457 void
1458 tu_GetBufferMemoryRequirements(VkDevice _device,
1459 VkBuffer _buffer,
1460 VkMemoryRequirements *pMemoryRequirements)
1461 {
1462 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1463
1464 pMemoryRequirements->memoryTypeBits = 1;
1465 pMemoryRequirements->alignment = 16;
1466 pMemoryRequirements->size =
1467 align64(buffer->size, pMemoryRequirements->alignment);
1468 }
1469
1470 void
1471 tu_GetBufferMemoryRequirements2(
1472 VkDevice device,
1473 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1474 VkMemoryRequirements2KHR *pMemoryRequirements)
1475 {
1476 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1477 &pMemoryRequirements->memoryRequirements);
1478 }
1479
1480 void
1481 tu_GetImageMemoryRequirements(VkDevice _device,
1482 VkImage _image,
1483 VkMemoryRequirements *pMemoryRequirements)
1484 {
1485 TU_FROM_HANDLE(tu_image, image, _image);
1486
1487 pMemoryRequirements->memoryTypeBits = 1;
1488 pMemoryRequirements->size = image->size;
1489 pMemoryRequirements->alignment = image->alignment;
1490 }
1491
1492 void
1493 tu_GetImageMemoryRequirements2(VkDevice device,
1494 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1495 VkMemoryRequirements2KHR *pMemoryRequirements)
1496 {
1497 tu_GetImageMemoryRequirements(device, pInfo->image,
1498 &pMemoryRequirements->memoryRequirements);
1499 }
1500
1501 void
1502 tu_GetImageSparseMemoryRequirements(
1503 VkDevice device,
1504 VkImage image,
1505 uint32_t *pSparseMemoryRequirementCount,
1506 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1507 {
1508 tu_stub();
1509 }
1510
1511 void
1512 tu_GetImageSparseMemoryRequirements2(
1513 VkDevice device,
1514 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1515 uint32_t *pSparseMemoryRequirementCount,
1516 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1517 {
1518 tu_stub();
1519 }
1520
1521 void
1522 tu_GetDeviceMemoryCommitment(VkDevice device,
1523 VkDeviceMemory memory,
1524 VkDeviceSize *pCommittedMemoryInBytes)
1525 {
1526 *pCommittedMemoryInBytes = 0;
1527 }
1528
1529 VkResult
1530 tu_BindBufferMemory2(VkDevice device,
1531 uint32_t bindInfoCount,
1532 const VkBindBufferMemoryInfoKHR *pBindInfos)
1533 {
1534 return VK_SUCCESS;
1535 }
1536
1537 VkResult
1538 tu_BindBufferMemory(VkDevice device,
1539 VkBuffer buffer,
1540 VkDeviceMemory memory,
1541 VkDeviceSize memoryOffset)
1542 {
1543 const VkBindBufferMemoryInfoKHR info = {
1544 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1545 .buffer = buffer,
1546 .memory = memory,
1547 .memoryOffset = memoryOffset
1548 };
1549
1550 return tu_BindBufferMemory2(device, 1, &info);
1551 }
1552
1553 VkResult
1554 tu_BindImageMemory2(VkDevice device,
1555 uint32_t bindInfoCount,
1556 const VkBindImageMemoryInfo *pBindInfos)
1557 {
1558 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1559 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1560 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1561
1562 if (mem) {
1563 image->bo = &mem->bo;
1564 image->bo_offset = pBindInfos[i].memoryOffset;
1565 } else {
1566 image->bo = NULL;
1567 image->bo_offset = 0;
1568 }
1569 }
1570
1571 return VK_SUCCESS;
1572 }
1573
1574 VkResult
1575 tu_BindImageMemory(VkDevice device,
1576 VkImage image,
1577 VkDeviceMemory memory,
1578 VkDeviceSize memoryOffset)
1579 {
1580 const VkBindImageMemoryInfo info = {
1581 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1582 .image = image,
1583 .memory = memory,
1584 .memoryOffset = memoryOffset
1585 };
1586
1587 return tu_BindImageMemory2(device, 1, &info);
1588 }
1589
1590 VkResult
1591 tu_QueueBindSparse(VkQueue _queue,
1592 uint32_t bindInfoCount,
1593 const VkBindSparseInfo *pBindInfo,
1594 VkFence _fence)
1595 {
1596 return VK_SUCCESS;
1597 }
1598
1599 VkResult
1600 tu_CreateFence(VkDevice _device,
1601 const VkFenceCreateInfo *pCreateInfo,
1602 const VkAllocationCallbacks *pAllocator,
1603 VkFence *pFence)
1604 {
1605 TU_FROM_HANDLE(tu_device, device, _device);
1606
1607 struct tu_fence *fence =
1608 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1609 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1610
1611 if (!fence)
1612 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1613
1614 *pFence = tu_fence_to_handle(fence);
1615
1616 return VK_SUCCESS;
1617 }
1618
1619 void
1620 tu_DestroyFence(VkDevice _device,
1621 VkFence _fence,
1622 const VkAllocationCallbacks *pAllocator)
1623 {
1624 TU_FROM_HANDLE(tu_device, device, _device);
1625 TU_FROM_HANDLE(tu_fence, fence, _fence);
1626
1627 if (!fence)
1628 return;
1629
1630 vk_free2(&device->alloc, pAllocator, fence);
1631 }
1632
1633 VkResult
1634 tu_WaitForFences(VkDevice _device,
1635 uint32_t fenceCount,
1636 const VkFence *pFences,
1637 VkBool32 waitAll,
1638 uint64_t timeout)
1639 {
1640 return VK_SUCCESS;
1641 }
1642
1643 VkResult
1644 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1645 {
1646 return VK_SUCCESS;
1647 }
1648
1649 VkResult
1650 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1651 {
1652 return VK_SUCCESS;
1653 }
1654
1655 // Queue semaphore functions
1656
1657 VkResult
1658 tu_CreateSemaphore(VkDevice _device,
1659 const VkSemaphoreCreateInfo *pCreateInfo,
1660 const VkAllocationCallbacks *pAllocator,
1661 VkSemaphore *pSemaphore)
1662 {
1663 TU_FROM_HANDLE(tu_device, device, _device);
1664
1665 struct tu_semaphore *sem =
1666 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1667 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1668 if (!sem)
1669 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1670
1671 *pSemaphore = tu_semaphore_to_handle(sem);
1672 return VK_SUCCESS;
1673 }
1674
1675 void
1676 tu_DestroySemaphore(VkDevice _device,
1677 VkSemaphore _semaphore,
1678 const VkAllocationCallbacks *pAllocator)
1679 {
1680 TU_FROM_HANDLE(tu_device, device, _device);
1681 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1682 if (!_semaphore)
1683 return;
1684
1685 vk_free2(&device->alloc, pAllocator, sem);
1686 }
1687
1688 VkResult
1689 tu_CreateEvent(VkDevice _device,
1690 const VkEventCreateInfo *pCreateInfo,
1691 const VkAllocationCallbacks *pAllocator,
1692 VkEvent *pEvent)
1693 {
1694 TU_FROM_HANDLE(tu_device, device, _device);
1695 struct tu_event *event =
1696 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1697 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1698
1699 if (!event)
1700 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1701
1702 *pEvent = tu_event_to_handle(event);
1703
1704 return VK_SUCCESS;
1705 }
1706
1707 void
1708 tu_DestroyEvent(VkDevice _device,
1709 VkEvent _event,
1710 const VkAllocationCallbacks *pAllocator)
1711 {
1712 TU_FROM_HANDLE(tu_device, device, _device);
1713 TU_FROM_HANDLE(tu_event, event, _event);
1714
1715 if (!event)
1716 return;
1717 vk_free2(&device->alloc, pAllocator, event);
1718 }
1719
1720 VkResult
1721 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1722 {
1723 TU_FROM_HANDLE(tu_event, event, _event);
1724
1725 if (*event->map == 1)
1726 return VK_EVENT_SET;
1727 return VK_EVENT_RESET;
1728 }
1729
1730 VkResult
1731 tu_SetEvent(VkDevice _device, VkEvent _event)
1732 {
1733 TU_FROM_HANDLE(tu_event, event, _event);
1734 *event->map = 1;
1735
1736 return VK_SUCCESS;
1737 }
1738
1739 VkResult
1740 tu_ResetEvent(VkDevice _device, VkEvent _event)
1741 {
1742 TU_FROM_HANDLE(tu_event, event, _event);
1743 *event->map = 0;
1744
1745 return VK_SUCCESS;
1746 }
1747
1748 VkResult
1749 tu_CreateBuffer(VkDevice _device,
1750 const VkBufferCreateInfo *pCreateInfo,
1751 const VkAllocationCallbacks *pAllocator,
1752 VkBuffer *pBuffer)
1753 {
1754 TU_FROM_HANDLE(tu_device, device, _device);
1755 struct tu_buffer *buffer;
1756
1757 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1758
1759 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1760 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1761 if (buffer == NULL)
1762 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1763
1764 buffer->size = pCreateInfo->size;
1765 buffer->usage = pCreateInfo->usage;
1766 buffer->flags = pCreateInfo->flags;
1767
1768 *pBuffer = tu_buffer_to_handle(buffer);
1769
1770 return VK_SUCCESS;
1771 }
1772
1773 void
1774 tu_DestroyBuffer(VkDevice _device,
1775 VkBuffer _buffer,
1776 const VkAllocationCallbacks *pAllocator)
1777 {
1778 TU_FROM_HANDLE(tu_device, device, _device);
1779 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1780
1781 if (!buffer)
1782 return;
1783
1784 vk_free2(&device->alloc, pAllocator, buffer);
1785 }
1786
1787 static uint32_t
1788 tu_surface_max_layer_count(struct tu_image_view *iview)
1789 {
1790 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1791 ? iview->extent.depth
1792 : (iview->base_layer + iview->layer_count);
1793 }
1794
1795 VkResult
1796 tu_CreateFramebuffer(VkDevice _device,
1797 const VkFramebufferCreateInfo *pCreateInfo,
1798 const VkAllocationCallbacks *pAllocator,
1799 VkFramebuffer *pFramebuffer)
1800 {
1801 TU_FROM_HANDLE(tu_device, device, _device);
1802 struct tu_framebuffer *framebuffer;
1803
1804 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1805
1806 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1807 pCreateInfo->attachmentCount;
1808 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1809 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1810 if (framebuffer == NULL)
1811 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1812
1813 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1814 framebuffer->width = pCreateInfo->width;
1815 framebuffer->height = pCreateInfo->height;
1816 framebuffer->layers = pCreateInfo->layers;
1817 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1818 VkImageView _iview = pCreateInfo->pAttachments[i];
1819 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1820 framebuffer->attachments[i].attachment = iview;
1821
1822 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1823 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1824 framebuffer->layers =
1825 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1826 }
1827
1828 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1829 return VK_SUCCESS;
1830 }
1831
1832 void
1833 tu_DestroyFramebuffer(VkDevice _device,
1834 VkFramebuffer _fb,
1835 const VkAllocationCallbacks *pAllocator)
1836 {
1837 TU_FROM_HANDLE(tu_device, device, _device);
1838 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1839
1840 if (!fb)
1841 return;
1842 vk_free2(&device->alloc, pAllocator, fb);
1843 }
1844
1845 static void
1846 tu_init_sampler(struct tu_device *device,
1847 struct tu_sampler *sampler,
1848 const VkSamplerCreateInfo *pCreateInfo)
1849 {
1850 }
1851
1852 VkResult
1853 tu_CreateSampler(VkDevice _device,
1854 const VkSamplerCreateInfo *pCreateInfo,
1855 const VkAllocationCallbacks *pAllocator,
1856 VkSampler *pSampler)
1857 {
1858 TU_FROM_HANDLE(tu_device, device, _device);
1859 struct tu_sampler *sampler;
1860
1861 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1862
1863 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1864 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1865 if (!sampler)
1866 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1867
1868 tu_init_sampler(device, sampler, pCreateInfo);
1869 *pSampler = tu_sampler_to_handle(sampler);
1870
1871 return VK_SUCCESS;
1872 }
1873
1874 void
1875 tu_DestroySampler(VkDevice _device,
1876 VkSampler _sampler,
1877 const VkAllocationCallbacks *pAllocator)
1878 {
1879 TU_FROM_HANDLE(tu_device, device, _device);
1880 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1881
1882 if (!sampler)
1883 return;
1884 vk_free2(&device->alloc, pAllocator, sampler);
1885 }
1886
1887 /* vk_icd.h does not declare this function, so we declare it here to
1888 * suppress Wmissing-prototypes.
1889 */
1890 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1891 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1892
1893 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1894 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1895 {
1896 /* For the full details on loader interface versioning, see
1897 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1898 * What follows is a condensed summary, to help you navigate the large and
1899 * confusing official doc.
1900 *
1901 * - Loader interface v0 is incompatible with later versions. We don't
1902 * support it.
1903 *
1904 * - In loader interface v1:
1905 * - The first ICD entrypoint called by the loader is
1906 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1907 * entrypoint.
1908 * - The ICD must statically expose no other Vulkan symbol unless it
1909 * is linked with -Bsymbolic.
1910 * - Each dispatchable Vulkan handle created by the ICD must be
1911 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1912 * ICD must initialize VK_LOADER_DATA.loadMagic to
1913 * ICD_LOADER_MAGIC.
1914 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1915 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1916 * such loader-managed surfaces.
1917 *
1918 * - Loader interface v2 differs from v1 in:
1919 * - The first ICD entrypoint called by the loader is
1920 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1921 * statically expose this entrypoint.
1922 *
1923 * - Loader interface v3 differs from v2 in:
1924 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1925 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1926 * because the loader no longer does so.
1927 */
1928 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1929 return VK_SUCCESS;
1930 }
1931
1932 void
1933 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1934 VkPhysicalDevice physicalDevice,
1935 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1936 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1937 {
1938 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1939 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1940 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1941 }
1942
1943 void
1944 tu_GetPhysicalDeviceExternalFenceProperties(
1945 VkPhysicalDevice physicalDevice,
1946 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1947 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1948 {
1949 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1950 pExternalFenceProperties->compatibleHandleTypes = 0;
1951 pExternalFenceProperties->externalFenceFeatures = 0;
1952 }
1953
1954 VkResult
1955 tu_CreateDebugReportCallbackEXT(
1956 VkInstance _instance,
1957 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1958 const VkAllocationCallbacks *pAllocator,
1959 VkDebugReportCallbackEXT *pCallback)
1960 {
1961 TU_FROM_HANDLE(tu_instance, instance, _instance);
1962 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1963 pCreateInfo, pAllocator,
1964 &instance->alloc, pCallback);
1965 }
1966
1967 void
1968 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1969 VkDebugReportCallbackEXT _callback,
1970 const VkAllocationCallbacks *pAllocator)
1971 {
1972 TU_FROM_HANDLE(tu_instance, instance, _instance);
1973 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1974 _callback, pAllocator, &instance->alloc);
1975 }
1976
1977 void
1978 tu_DebugReportMessageEXT(VkInstance _instance,
1979 VkDebugReportFlagsEXT flags,
1980 VkDebugReportObjectTypeEXT objectType,
1981 uint64_t object,
1982 size_t location,
1983 int32_t messageCode,
1984 const char *pLayerPrefix,
1985 const char *pMessage)
1986 {
1987 TU_FROM_HANDLE(tu_instance, instance, _instance);
1988 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1989 object, location, messageCode, pLayerPrefix, pMessage);
1990 }
1991
1992 void
1993 tu_GetDeviceGroupPeerMemoryFeatures(
1994 VkDevice device,
1995 uint32_t heapIndex,
1996 uint32_t localDeviceIndex,
1997 uint32_t remoteDeviceIndex,
1998 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1999 {
2000 assert(localDeviceIndex == remoteDeviceIndex);
2001
2002 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2003 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2004 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2005 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2006 }