turnip: use msm_drm.h from inc_freedreno
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <stdbool.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/sysinfo.h>
35 #include <unistd.h>
36 #include <xf86drm.h>
37
38 #include "util/debug.h"
39 #include "util/disk_cache.h"
40 #include "util/strtod.h"
41 #include "vk_format.h"
42 #include "vk_util.h"
43
44 #include "drm/msm_drm.h"
45
46 static int
47 tu_device_get_cache_uuid(uint16_t family, void *uuid)
48 {
49 uint32_t mesa_timestamp;
50 uint16_t f = family;
51 memset(uuid, 0, VK_UUID_SIZE);
52 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
53 &mesa_timestamp))
54 return -1;
55
56 memcpy(uuid, &mesa_timestamp, 4);
57 memcpy((char *) uuid + 4, &f, 2);
58 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
59 return 0;
60 }
61
62 static void
63 tu_get_driver_uuid(void *uuid)
64 {
65 memset(uuid, 0, VK_UUID_SIZE);
66 snprintf(uuid, VK_UUID_SIZE, "freedreno");
67 }
68
69 static void
70 tu_get_device_uuid(void *uuid)
71 {
72 memset(uuid, 0, VK_UUID_SIZE);
73 }
74
75 VkResult
76 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
77 {
78 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
79 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
80 */
81 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
82 if (!gem_handle)
83 goto fail_new;
84
85 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
86 * want immediate backing pages because vkAllocateMemory and friends must
87 * not lazily fail.
88 *
89 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
90 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
91 * maybe I misunderstand.
92 */
93
94 /* TODO: Do we need 'offset' if we have 'iova'? */
95 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
96 if (!offset)
97 goto fail_info;
98
99 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
100 if (!iova)
101 goto fail_info;
102
103 *bo = (struct tu_bo) {
104 .gem_handle = gem_handle,
105 .size = size,
106 .offset = offset,
107 .iova = iova,
108 };
109
110 return VK_SUCCESS;
111
112 fail_info:
113 tu_gem_close(dev, bo->gem_handle);
114 fail_new:
115 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
116 }
117
118 VkResult
119 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
120 {
121 if (bo->map)
122 return VK_SUCCESS;
123
124 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
125 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
126 dev->physical_device->local_fd, bo->offset);
127 if (map == MAP_FAILED)
128 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
129
130 bo->map = map;
131 return VK_SUCCESS;
132 }
133
134 void
135 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
136 {
137 assert(bo->gem_handle);
138
139 if (bo->map)
140 munmap(bo->map, bo->size);
141
142 tu_gem_close(dev, bo->gem_handle);
143 }
144
145 static VkResult
146 tu_physical_device_init(struct tu_physical_device *device,
147 struct tu_instance *instance,
148 drmDevicePtr drm_device)
149 {
150 const char *path = drm_device->nodes[DRM_NODE_RENDER];
151 VkResult result = VK_SUCCESS;
152 drmVersionPtr version;
153 int fd;
154 int master_fd = -1;
155 uint64_t val;
156
157 fd = open(path, O_RDWR | O_CLOEXEC);
158 if (fd < 0) {
159 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
160 "failed to open device %s", path);
161 }
162
163 /* Version 1.3 added MSM_INFO_IOVA. */
164 const int min_version_major = 1;
165 const int min_version_minor = 3;
166
167 version = drmGetVersion(fd);
168 if (!version) {
169 close(fd);
170 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
171 "failed to query kernel driver version for device %s",
172 path);
173 }
174
175 if (strcmp(version->name, "msm")) {
176 drmFreeVersion(version);
177 if (master_fd != -1)
178 close(master_fd);
179 close(fd);
180 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
181 "device %s does not use the msm kernel driver", path);
182 }
183
184 if (version->version_major != min_version_major ||
185 version->version_minor < min_version_minor) {
186 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
187 "kernel driver for device %s has version %d.%d, "
188 "but Vulkan requires version >= %d.%d",
189 path, version->version_major, version->version_minor,
190 min_version_major, min_version_minor);
191 drmFreeVersion(version);
192 close(fd);
193 return result;
194 }
195
196 drmFreeVersion(version);
197
198 if (instance->debug_flags & TU_DEBUG_STARTUP)
199 tu_logi("Found compatible device '%s'.", path);
200
201 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
202 device->instance = instance;
203 assert(strlen(path) < ARRAY_SIZE(device->path));
204 strncpy(device->path, path, ARRAY_SIZE(device->path));
205
206 if (instance->enabled_extensions.KHR_display) {
207 master_fd =
208 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
209 if (master_fd >= 0) {
210 /* TODO: free master_fd is accel is not working? */
211 }
212 }
213
214 device->master_fd = master_fd;
215 device->local_fd = fd;
216
217 device->drm_device = fd_device_new_dup(fd);
218 if (!device->drm_device) {
219 if (instance->debug_flags & TU_DEBUG_STARTUP)
220 tu_logi("Could not create the libdrm device");
221 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
222 "could not create the libdrm device");
223 goto fail;
224 }
225
226 if (tu_drm_query_param(device, MSM_PARAM_GPU_ID, &val)) {
227 if (instance->debug_flags & TU_DEBUG_STARTUP)
228 tu_logi("Could not query the GPU ID");
229 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
230 "could not get GPU ID");
231 goto fail;
232 }
233 device->gpu_id = val;
234
235 if (tu_drm_query_param(device, MSM_PARAM_GMEM_SIZE, &val)) {
236 if (instance->debug_flags & TU_DEBUG_STARTUP)
237 tu_logi("Could not query the GMEM size");
238 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
239 "could not get GMEM size");
240 goto fail;
241 }
242 device->gmem_size = val;
243
244 memset(device->name, 0, sizeof(device->name));
245 sprintf(device->name, "FD%d", device->gpu_id);
246
247 switch (device->gpu_id) {
248 case 530:
249 case 630:
250 break;
251 default:
252 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
253 "device %s is unsupported", device->name);
254 goto fail;
255 }
256 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
257 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
258 "cannot generate UUID");
259 goto fail;
260 }
261
262 /* The gpu id is already embedded in the uuid so we just pass "tu"
263 * when creating the cache.
264 */
265 char buf[VK_UUID_SIZE * 2 + 1];
266 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
267 device->disk_cache = disk_cache_create(device->name, buf, 0);
268
269 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
270 "testing use only.\n");
271
272 tu_get_driver_uuid(&device->device_uuid);
273 tu_get_device_uuid(&device->device_uuid);
274
275 tu_fill_device_extension_table(device, &device->supported_extensions);
276
277 if (result != VK_SUCCESS) {
278 vk_error(instance, result);
279 goto fail;
280 }
281
282 return VK_SUCCESS;
283
284 fail:
285 if (device->drm_device)
286 fd_device_del(device->drm_device);
287 close(fd);
288 if (master_fd != -1)
289 close(master_fd);
290 return result;
291 }
292
293 static void
294 tu_physical_device_finish(struct tu_physical_device *device)
295 {
296 disk_cache_destroy(device->disk_cache);
297 close(device->local_fd);
298 if (device->master_fd != -1)
299 close(device->master_fd);
300 }
301
302 static void *
303 default_alloc_func(void *pUserData,
304 size_t size,
305 size_t align,
306 VkSystemAllocationScope allocationScope)
307 {
308 return malloc(size);
309 }
310
311 static void *
312 default_realloc_func(void *pUserData,
313 void *pOriginal,
314 size_t size,
315 size_t align,
316 VkSystemAllocationScope allocationScope)
317 {
318 return realloc(pOriginal, size);
319 }
320
321 static void
322 default_free_func(void *pUserData, void *pMemory)
323 {
324 free(pMemory);
325 }
326
327 static const VkAllocationCallbacks default_alloc = {
328 .pUserData = NULL,
329 .pfnAllocation = default_alloc_func,
330 .pfnReallocation = default_realloc_func,
331 .pfnFree = default_free_func,
332 };
333
334 static const struct debug_control tu_debug_options[] = {
335 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
336 };
337
338 const char *
339 tu_get_debug_option_name(int id)
340 {
341 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
342 return tu_debug_options[id].string;
343 }
344
345 static int
346 tu_get_instance_extension_index(const char *name)
347 {
348 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
349 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
350 return i;
351 }
352 return -1;
353 }
354
355 VkResult
356 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
357 const VkAllocationCallbacks *pAllocator,
358 VkInstance *pInstance)
359 {
360 struct tu_instance *instance;
361 VkResult result;
362
363 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
364
365 uint32_t client_version;
366 if (pCreateInfo->pApplicationInfo &&
367 pCreateInfo->pApplicationInfo->apiVersion != 0) {
368 client_version = pCreateInfo->pApplicationInfo->apiVersion;
369 } else {
370 tu_EnumerateInstanceVersion(&client_version);
371 }
372
373 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
374 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
375 if (!instance)
376 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
377
378 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
379
380 if (pAllocator)
381 instance->alloc = *pAllocator;
382 else
383 instance->alloc = default_alloc;
384
385 instance->api_version = client_version;
386 instance->physical_device_count = -1;
387
388 instance->debug_flags =
389 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
390
391 if (instance->debug_flags & TU_DEBUG_STARTUP)
392 tu_logi("Created an instance");
393
394 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
395 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
396 int index = tu_get_instance_extension_index(ext_name);
397
398 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
399 vk_free2(&default_alloc, pAllocator, instance);
400 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
401 }
402
403 instance->enabled_extensions.extensions[index] = true;
404 }
405
406 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
407 if (result != VK_SUCCESS) {
408 vk_free2(&default_alloc, pAllocator, instance);
409 return vk_error(instance, result);
410 }
411
412 _mesa_locale_init();
413
414 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
415
416 *pInstance = tu_instance_to_handle(instance);
417
418 return VK_SUCCESS;
419 }
420
421 void
422 tu_DestroyInstance(VkInstance _instance,
423 const VkAllocationCallbacks *pAllocator)
424 {
425 TU_FROM_HANDLE(tu_instance, instance, _instance);
426
427 if (!instance)
428 return;
429
430 for (int i = 0; i < instance->physical_device_count; ++i) {
431 tu_physical_device_finish(instance->physical_devices + i);
432 }
433
434 VG(VALGRIND_DESTROY_MEMPOOL(instance));
435
436 _mesa_locale_fini();
437
438 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
439
440 vk_free(&instance->alloc, instance);
441 }
442
443 static VkResult
444 tu_enumerate_devices(struct tu_instance *instance)
445 {
446 /* TODO: Check for more devices ? */
447 drmDevicePtr devices[8];
448 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
449 int max_devices;
450
451 instance->physical_device_count = 0;
452
453 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
454
455 if (instance->debug_flags & TU_DEBUG_STARTUP)
456 tu_logi("Found %d drm nodes", max_devices);
457
458 if (max_devices < 1)
459 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
460
461 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
462 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
463 devices[i]->bustype == DRM_BUS_PLATFORM) {
464
465 result = tu_physical_device_init(
466 instance->physical_devices + instance->physical_device_count,
467 instance, devices[i]);
468 if (result == VK_SUCCESS)
469 ++instance->physical_device_count;
470 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
471 break;
472 }
473 }
474 drmFreeDevices(devices, max_devices);
475
476 return result;
477 }
478
479 VkResult
480 tu_EnumeratePhysicalDevices(VkInstance _instance,
481 uint32_t *pPhysicalDeviceCount,
482 VkPhysicalDevice *pPhysicalDevices)
483 {
484 TU_FROM_HANDLE(tu_instance, instance, _instance);
485 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
486
487 VkResult result;
488
489 if (instance->physical_device_count < 0) {
490 result = tu_enumerate_devices(instance);
491 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
492 return result;
493 }
494
495 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
496 vk_outarray_append(&out, p)
497 {
498 *p = tu_physical_device_to_handle(instance->physical_devices + i);
499 }
500 }
501
502 return vk_outarray_status(&out);
503 }
504
505 VkResult
506 tu_EnumeratePhysicalDeviceGroups(
507 VkInstance _instance,
508 uint32_t *pPhysicalDeviceGroupCount,
509 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
510 {
511 TU_FROM_HANDLE(tu_instance, instance, _instance);
512 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
513 pPhysicalDeviceGroupCount);
514 VkResult result;
515
516 if (instance->physical_device_count < 0) {
517 result = tu_enumerate_devices(instance);
518 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
519 return result;
520 }
521
522 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
523 vk_outarray_append(&out, p)
524 {
525 p->physicalDeviceCount = 1;
526 p->physicalDevices[0] =
527 tu_physical_device_to_handle(instance->physical_devices + i);
528 p->subsetAllocation = false;
529 }
530 }
531
532 return vk_outarray_status(&out);
533 }
534
535 void
536 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
537 VkPhysicalDeviceFeatures *pFeatures)
538 {
539 memset(pFeatures, 0, sizeof(*pFeatures));
540
541 *pFeatures = (VkPhysicalDeviceFeatures) {
542 .robustBufferAccess = false,
543 .fullDrawIndexUint32 = false,
544 .imageCubeArray = false,
545 .independentBlend = false,
546 .geometryShader = false,
547 .tessellationShader = false,
548 .sampleRateShading = false,
549 .dualSrcBlend = false,
550 .logicOp = false,
551 .multiDrawIndirect = false,
552 .drawIndirectFirstInstance = false,
553 .depthClamp = false,
554 .depthBiasClamp = false,
555 .fillModeNonSolid = false,
556 .depthBounds = false,
557 .wideLines = false,
558 .largePoints = false,
559 .alphaToOne = false,
560 .multiViewport = false,
561 .samplerAnisotropy = false,
562 .textureCompressionETC2 = false,
563 .textureCompressionASTC_LDR = false,
564 .textureCompressionBC = false,
565 .occlusionQueryPrecise = false,
566 .pipelineStatisticsQuery = false,
567 .vertexPipelineStoresAndAtomics = false,
568 .fragmentStoresAndAtomics = false,
569 .shaderTessellationAndGeometryPointSize = false,
570 .shaderImageGatherExtended = false,
571 .shaderStorageImageExtendedFormats = false,
572 .shaderStorageImageMultisample = false,
573 .shaderUniformBufferArrayDynamicIndexing = false,
574 .shaderSampledImageArrayDynamicIndexing = false,
575 .shaderStorageBufferArrayDynamicIndexing = false,
576 .shaderStorageImageArrayDynamicIndexing = false,
577 .shaderStorageImageReadWithoutFormat = false,
578 .shaderStorageImageWriteWithoutFormat = false,
579 .shaderClipDistance = false,
580 .shaderCullDistance = false,
581 .shaderFloat64 = false,
582 .shaderInt64 = false,
583 .shaderInt16 = false,
584 .sparseBinding = false,
585 .variableMultisampleRate = false,
586 .inheritedQueries = false,
587 };
588 }
589
590 void
591 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
592 VkPhysicalDeviceFeatures2KHR *pFeatures)
593 {
594 vk_foreach_struct(ext, pFeatures->pNext)
595 {
596 switch (ext->sType) {
597 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
598 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
599 features->variablePointersStorageBuffer = false;
600 features->variablePointers = false;
601 break;
602 }
603 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
604 VkPhysicalDeviceMultiviewFeaturesKHR *features =
605 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
606 features->multiview = false;
607 features->multiviewGeometryShader = false;
608 features->multiviewTessellationShader = false;
609 break;
610 }
611 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
612 VkPhysicalDeviceShaderDrawParameterFeatures *features =
613 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
614 features->shaderDrawParameters = false;
615 break;
616 }
617 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
618 VkPhysicalDeviceProtectedMemoryFeatures *features =
619 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
620 features->protectedMemory = false;
621 break;
622 }
623 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
624 VkPhysicalDevice16BitStorageFeatures *features =
625 (VkPhysicalDevice16BitStorageFeatures *) ext;
626 features->storageBuffer16BitAccess = false;
627 features->uniformAndStorageBuffer16BitAccess = false;
628 features->storagePushConstant16 = false;
629 features->storageInputOutput16 = false;
630 break;
631 }
632 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
633 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
634 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
635 features->samplerYcbcrConversion = false;
636 break;
637 }
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
639 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
640 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
641 features->shaderInputAttachmentArrayDynamicIndexing = false;
642 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
643 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
644 features->shaderUniformBufferArrayNonUniformIndexing = false;
645 features->shaderSampledImageArrayNonUniformIndexing = false;
646 features->shaderStorageBufferArrayNonUniformIndexing = false;
647 features->shaderStorageImageArrayNonUniformIndexing = false;
648 features->shaderInputAttachmentArrayNonUniformIndexing = false;
649 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
650 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
651 features->descriptorBindingUniformBufferUpdateAfterBind = false;
652 features->descriptorBindingSampledImageUpdateAfterBind = false;
653 features->descriptorBindingStorageImageUpdateAfterBind = false;
654 features->descriptorBindingStorageBufferUpdateAfterBind = false;
655 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
656 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
657 features->descriptorBindingUpdateUnusedWhilePending = false;
658 features->descriptorBindingPartiallyBound = false;
659 features->descriptorBindingVariableDescriptorCount = false;
660 features->runtimeDescriptorArray = false;
661 break;
662 }
663 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
664 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
665 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
666 features->conditionalRendering = false;
667 features->inheritedConditionalRendering = false;
668 break;
669 }
670 default:
671 break;
672 }
673 }
674 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
675 }
676
677 void
678 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
679 VkPhysicalDeviceProperties *pProperties)
680 {
681 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
682 VkSampleCountFlags sample_counts = 0xf;
683
684 /* make sure that the entire descriptor set is addressable with a signed
685 * 32-bit int. So the sum of all limits scaled by descriptor size has to
686 * be at most 2 GiB. the combined image & samples object count as one of
687 * both. This limit is for the pipeline layout, not for the set layout, but
688 * there is no set limit, so we just set a pipeline limit. I don't think
689 * any app is going to hit this soon. */
690 size_t max_descriptor_set_size =
691 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
692 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
693 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
694 32 /* sampler, largest when combined with image */ +
695 64 /* sampled image */ + 64 /* storage image */);
696
697 VkPhysicalDeviceLimits limits = {
698 .maxImageDimension1D = (1 << 14),
699 .maxImageDimension2D = (1 << 14),
700 .maxImageDimension3D = (1 << 11),
701 .maxImageDimensionCube = (1 << 14),
702 .maxImageArrayLayers = (1 << 11),
703 .maxTexelBufferElements = 128 * 1024 * 1024,
704 .maxUniformBufferRange = UINT32_MAX,
705 .maxStorageBufferRange = UINT32_MAX,
706 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
707 .maxMemoryAllocationCount = UINT32_MAX,
708 .maxSamplerAllocationCount = 64 * 1024,
709 .bufferImageGranularity = 64, /* A cache line */
710 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
711 .maxBoundDescriptorSets = MAX_SETS,
712 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
713 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
714 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
715 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
716 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
717 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
718 .maxPerStageResources = max_descriptor_set_size,
719 .maxDescriptorSetSamplers = max_descriptor_set_size,
720 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
721 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
722 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
723 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
724 .maxDescriptorSetSampledImages = max_descriptor_set_size,
725 .maxDescriptorSetStorageImages = max_descriptor_set_size,
726 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
727 .maxVertexInputAttributes = 32,
728 .maxVertexInputBindings = 32,
729 .maxVertexInputAttributeOffset = 2047,
730 .maxVertexInputBindingStride = 2048,
731 .maxVertexOutputComponents = 128,
732 .maxTessellationGenerationLevel = 64,
733 .maxTessellationPatchSize = 32,
734 .maxTessellationControlPerVertexInputComponents = 128,
735 .maxTessellationControlPerVertexOutputComponents = 128,
736 .maxTessellationControlPerPatchOutputComponents = 120,
737 .maxTessellationControlTotalOutputComponents = 4096,
738 .maxTessellationEvaluationInputComponents = 128,
739 .maxTessellationEvaluationOutputComponents = 128,
740 .maxGeometryShaderInvocations = 127,
741 .maxGeometryInputComponents = 64,
742 .maxGeometryOutputComponents = 128,
743 .maxGeometryOutputVertices = 256,
744 .maxGeometryTotalOutputComponents = 1024,
745 .maxFragmentInputComponents = 128,
746 .maxFragmentOutputAttachments = 8,
747 .maxFragmentDualSrcAttachments = 1,
748 .maxFragmentCombinedOutputResources = 8,
749 .maxComputeSharedMemorySize = 32768,
750 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
751 .maxComputeWorkGroupInvocations = 2048,
752 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
753 .subPixelPrecisionBits = 4 /* FIXME */,
754 .subTexelPrecisionBits = 4 /* FIXME */,
755 .mipmapPrecisionBits = 4 /* FIXME */,
756 .maxDrawIndexedIndexValue = UINT32_MAX,
757 .maxDrawIndirectCount = UINT32_MAX,
758 .maxSamplerLodBias = 16,
759 .maxSamplerAnisotropy = 16,
760 .maxViewports = MAX_VIEWPORTS,
761 .maxViewportDimensions = { (1 << 14), (1 << 14) },
762 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
763 .viewportSubPixelBits = 8,
764 .minMemoryMapAlignment = 4096, /* A page */
765 .minTexelBufferOffsetAlignment = 1,
766 .minUniformBufferOffsetAlignment = 4,
767 .minStorageBufferOffsetAlignment = 4,
768 .minTexelOffset = -32,
769 .maxTexelOffset = 31,
770 .minTexelGatherOffset = -32,
771 .maxTexelGatherOffset = 31,
772 .minInterpolationOffset = -2,
773 .maxInterpolationOffset = 2,
774 .subPixelInterpolationOffsetBits = 8,
775 .maxFramebufferWidth = (1 << 14),
776 .maxFramebufferHeight = (1 << 14),
777 .maxFramebufferLayers = (1 << 10),
778 .framebufferColorSampleCounts = sample_counts,
779 .framebufferDepthSampleCounts = sample_counts,
780 .framebufferStencilSampleCounts = sample_counts,
781 .framebufferNoAttachmentsSampleCounts = sample_counts,
782 .maxColorAttachments = MAX_RTS,
783 .sampledImageColorSampleCounts = sample_counts,
784 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
785 .sampledImageDepthSampleCounts = sample_counts,
786 .sampledImageStencilSampleCounts = sample_counts,
787 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
788 .maxSampleMaskWords = 1,
789 .timestampComputeAndGraphics = true,
790 .timestampPeriod = 1,
791 .maxClipDistances = 8,
792 .maxCullDistances = 8,
793 .maxCombinedClipAndCullDistances = 8,
794 .discreteQueuePriorities = 1,
795 .pointSizeRange = { 0.125, 255.875 },
796 .lineWidthRange = { 0.0, 7.9921875 },
797 .pointSizeGranularity = (1.0 / 8.0),
798 .lineWidthGranularity = (1.0 / 128.0),
799 .strictLines = false, /* FINISHME */
800 .standardSampleLocations = true,
801 .optimalBufferCopyOffsetAlignment = 128,
802 .optimalBufferCopyRowPitchAlignment = 128,
803 .nonCoherentAtomSize = 64,
804 };
805
806 *pProperties = (VkPhysicalDeviceProperties) {
807 .apiVersion = tu_physical_device_api_version(pdevice),
808 .driverVersion = vk_get_driver_version(),
809 .vendorID = 0, /* TODO */
810 .deviceID = 0,
811 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
812 .limits = limits,
813 .sparseProperties = { 0 },
814 };
815
816 strcpy(pProperties->deviceName, pdevice->name);
817 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
818 }
819
820 void
821 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
822 VkPhysicalDeviceProperties2KHR *pProperties)
823 {
824 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
825 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
826
827 vk_foreach_struct(ext, pProperties->pNext)
828 {
829 switch (ext->sType) {
830 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
831 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
832 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
833 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
834 break;
835 }
836 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
837 VkPhysicalDeviceIDPropertiesKHR *properties =
838 (VkPhysicalDeviceIDPropertiesKHR *) ext;
839 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
840 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
841 properties->deviceLUIDValid = false;
842 break;
843 }
844 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
845 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
846 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
847 properties->maxMultiviewViewCount = MAX_VIEWS;
848 properties->maxMultiviewInstanceIndex = INT_MAX;
849 break;
850 }
851 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
852 VkPhysicalDevicePointClippingPropertiesKHR *properties =
853 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
854 properties->pointClippingBehavior =
855 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
856 break;
857 }
858 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
859 VkPhysicalDeviceMaintenance3Properties *properties =
860 (VkPhysicalDeviceMaintenance3Properties *) ext;
861 /* Make sure everything is addressable by a signed 32-bit int, and
862 * our largest descriptors are 96 bytes. */
863 properties->maxPerSetDescriptors = (1ull << 31) / 96;
864 /* Our buffer size fields allow only this much */
865 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
866 break;
867 }
868 default:
869 break;
870 }
871 }
872 }
873
874 static const VkQueueFamilyProperties tu_queue_family_properties = {
875 .queueFlags =
876 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
877 .queueCount = 1,
878 .timestampValidBits = 64,
879 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
880 };
881
882 void
883 tu_GetPhysicalDeviceQueueFamilyProperties(
884 VkPhysicalDevice physicalDevice,
885 uint32_t *pQueueFamilyPropertyCount,
886 VkQueueFamilyProperties *pQueueFamilyProperties)
887 {
888 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
889
890 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
891 }
892
893 void
894 tu_GetPhysicalDeviceQueueFamilyProperties2(
895 VkPhysicalDevice physicalDevice,
896 uint32_t *pQueueFamilyPropertyCount,
897 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
898 {
899 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
900
901 vk_outarray_append(&out, p)
902 {
903 p->queueFamilyProperties = tu_queue_family_properties;
904 }
905 }
906
907 static uint64_t
908 tu_get_system_heap_size()
909 {
910 struct sysinfo info;
911 sysinfo(&info);
912
913 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
914
915 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
916 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
917 */
918 uint64_t available_ram;
919 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
920 available_ram = total_ram / 2;
921 else
922 available_ram = total_ram * 3 / 4;
923
924 return available_ram;
925 }
926
927 void
928 tu_GetPhysicalDeviceMemoryProperties(
929 VkPhysicalDevice physicalDevice,
930 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
931 {
932 pMemoryProperties->memoryHeapCount = 1;
933 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
934 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
935
936 pMemoryProperties->memoryTypeCount = 1;
937 pMemoryProperties->memoryTypes[0].propertyFlags =
938 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
939 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
940 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
941 pMemoryProperties->memoryTypes[0].heapIndex = 0;
942 }
943
944 void
945 tu_GetPhysicalDeviceMemoryProperties2(
946 VkPhysicalDevice physicalDevice,
947 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
948 {
949 return tu_GetPhysicalDeviceMemoryProperties(
950 physicalDevice, &pMemoryProperties->memoryProperties);
951 }
952
953 static VkResult
954 tu_queue_init(struct tu_device *device,
955 struct tu_queue *queue,
956 uint32_t queue_family_index,
957 int idx,
958 VkDeviceQueueCreateFlags flags)
959 {
960 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
961 queue->device = device;
962 queue->queue_family_index = queue_family_index;
963 queue->queue_idx = idx;
964 queue->flags = flags;
965
966 struct drm_msm_submitqueue req = {
967 .flags = 0,
968 .prio = 0,
969 };
970
971 int ret = drmCommandWriteRead(device->physical_device->local_fd,
972 DRM_MSM_SUBMITQUEUE_NEW,
973 &req, sizeof(req));
974 if (ret)
975 return VK_ERROR_INITIALIZATION_FAILED;
976
977 queue->msm_queue_id = req.id;
978 return VK_SUCCESS;
979 }
980
981 static void
982 tu_queue_finish(struct tu_queue *queue)
983 {
984 drmCommandWrite(queue->device->physical_device->local_fd,
985 DRM_MSM_SUBMITQUEUE_CLOSE,
986 &queue->msm_queue_id, sizeof(uint32_t));
987 }
988
989 static int
990 tu_get_device_extension_index(const char *name)
991 {
992 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
993 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
994 return i;
995 }
996 return -1;
997 }
998
999 VkResult
1000 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1001 const VkDeviceCreateInfo *pCreateInfo,
1002 const VkAllocationCallbacks *pAllocator,
1003 VkDevice *pDevice)
1004 {
1005 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1006 VkResult result;
1007 struct tu_device *device;
1008
1009 /* Check enabled features */
1010 if (pCreateInfo->pEnabledFeatures) {
1011 VkPhysicalDeviceFeatures supported_features;
1012 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1013 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1014 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1015 unsigned num_features =
1016 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1017 for (uint32_t i = 0; i < num_features; i++) {
1018 if (enabled_feature[i] && !supported_feature[i])
1019 return vk_error(physical_device->instance,
1020 VK_ERROR_FEATURE_NOT_PRESENT);
1021 }
1022 }
1023
1024 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1025 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1026 if (!device)
1027 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1028
1029 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1030 device->instance = physical_device->instance;
1031 device->physical_device = physical_device;
1032
1033 if (pAllocator)
1034 device->alloc = *pAllocator;
1035 else
1036 device->alloc = physical_device->instance->alloc;
1037
1038 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1039 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1040 int index = tu_get_device_extension_index(ext_name);
1041 if (index < 0 ||
1042 !physical_device->supported_extensions.extensions[index]) {
1043 vk_free(&device->alloc, device);
1044 return vk_error(physical_device->instance,
1045 VK_ERROR_EXTENSION_NOT_PRESENT);
1046 }
1047
1048 device->enabled_extensions.extensions[index] = true;
1049 }
1050
1051 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1052 const VkDeviceQueueCreateInfo *queue_create =
1053 &pCreateInfo->pQueueCreateInfos[i];
1054 uint32_t qfi = queue_create->queueFamilyIndex;
1055 device->queues[qfi] = vk_alloc(
1056 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1057 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1058 if (!device->queues[qfi]) {
1059 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1060 goto fail;
1061 }
1062
1063 memset(device->queues[qfi], 0,
1064 queue_create->queueCount * sizeof(struct tu_queue));
1065
1066 device->queue_count[qfi] = queue_create->queueCount;
1067
1068 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1069 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1070 queue_create->flags);
1071 if (result != VK_SUCCESS)
1072 goto fail;
1073 }
1074 }
1075
1076 VkPipelineCacheCreateInfo ci;
1077 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1078 ci.pNext = NULL;
1079 ci.flags = 0;
1080 ci.pInitialData = NULL;
1081 ci.initialDataSize = 0;
1082 VkPipelineCache pc;
1083 result =
1084 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1085 if (result != VK_SUCCESS)
1086 goto fail;
1087
1088 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1089
1090 *pDevice = tu_device_to_handle(device);
1091 return VK_SUCCESS;
1092
1093 fail:
1094 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1095 for (unsigned q = 0; q < device->queue_count[i]; q++)
1096 tu_queue_finish(&device->queues[i][q]);
1097 if (device->queue_count[i])
1098 vk_free(&device->alloc, device->queues[i]);
1099 }
1100
1101 vk_free(&device->alloc, device);
1102 return result;
1103 }
1104
1105 void
1106 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1107 {
1108 TU_FROM_HANDLE(tu_device, device, _device);
1109
1110 if (!device)
1111 return;
1112
1113 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1114 for (unsigned q = 0; q < device->queue_count[i]; q++)
1115 tu_queue_finish(&device->queues[i][q]);
1116 if (device->queue_count[i])
1117 vk_free(&device->alloc, device->queues[i]);
1118 }
1119
1120 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1121 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1122
1123 vk_free(&device->alloc, device);
1124 }
1125
1126 VkResult
1127 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1128 VkLayerProperties *pProperties)
1129 {
1130 *pPropertyCount = 0;
1131 return VK_SUCCESS;
1132 }
1133
1134 VkResult
1135 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1136 uint32_t *pPropertyCount,
1137 VkLayerProperties *pProperties)
1138 {
1139 *pPropertyCount = 0;
1140 return VK_SUCCESS;
1141 }
1142
1143 void
1144 tu_GetDeviceQueue2(VkDevice _device,
1145 const VkDeviceQueueInfo2 *pQueueInfo,
1146 VkQueue *pQueue)
1147 {
1148 TU_FROM_HANDLE(tu_device, device, _device);
1149 struct tu_queue *queue;
1150
1151 queue =
1152 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1153 if (pQueueInfo->flags != queue->flags) {
1154 /* From the Vulkan 1.1.70 spec:
1155 *
1156 * "The queue returned by vkGetDeviceQueue2 must have the same
1157 * flags value from this structure as that used at device
1158 * creation time in a VkDeviceQueueCreateInfo instance. If no
1159 * matching flags were specified at device creation time then
1160 * pQueue will return VK_NULL_HANDLE."
1161 */
1162 *pQueue = VK_NULL_HANDLE;
1163 return;
1164 }
1165
1166 *pQueue = tu_queue_to_handle(queue);
1167 }
1168
1169 void
1170 tu_GetDeviceQueue(VkDevice _device,
1171 uint32_t queueFamilyIndex,
1172 uint32_t queueIndex,
1173 VkQueue *pQueue)
1174 {
1175 const VkDeviceQueueInfo2 info =
1176 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1177 .queueFamilyIndex = queueFamilyIndex,
1178 .queueIndex = queueIndex };
1179
1180 tu_GetDeviceQueue2(_device, &info, pQueue);
1181 }
1182
1183 VkResult
1184 tu_QueueSubmit(VkQueue _queue,
1185 uint32_t submitCount,
1186 const VkSubmitInfo *pSubmits,
1187 VkFence _fence)
1188 {
1189 TU_FROM_HANDLE(tu_queue, queue, _queue);
1190
1191 for (uint32_t i = 0; i < submitCount; ++i) {
1192 const VkSubmitInfo *submit = pSubmits + i;
1193 struct tu_bo_list bo_list;
1194 tu_bo_list_init(&bo_list);
1195
1196 uint32_t entry_count = 0;
1197 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1198 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1199 entry_count += cmdbuf->cs.entry_count;
1200 }
1201
1202 struct drm_msm_gem_submit_cmd cmds[entry_count];
1203 uint32_t entry_idx = 0;
1204 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1205 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1206 struct tu_cmd_stream *stream = &cmdbuf->cs;
1207 for (unsigned i = 0; i < stream->entry_count; ++i, ++entry_idx) {
1208 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1209 cmds[entry_idx].submit_idx = tu_bo_list_add(&bo_list, stream->entries[i].bo);
1210 cmds[entry_idx].submit_offset = stream->entries[i].offset;
1211 cmds[entry_idx].size = stream->entries[i].size;
1212 cmds[entry_idx].pad = 0;
1213 cmds[entry_idx].nr_relocs = 0;
1214 cmds[entry_idx].relocs = 0;
1215
1216 }
1217 }
1218
1219 struct drm_msm_gem_submit_bo bos[bo_list.count];
1220 for (unsigned i = 0; i < bo_list.count; ++i) {
1221 bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
1222 bos[i].handle = bo_list.handles[i];
1223 bos[i].presumed = 0;
1224 }
1225
1226 struct drm_msm_gem_submit req = {
1227 .flags = MSM_PIPE_3D0,
1228 .queueid = queue->msm_queue_id,
1229 .bos = (uint64_t)(uintptr_t)bos,
1230 .nr_bos = bo_list.count,
1231 .cmds = (uint64_t)(uintptr_t)cmds,
1232 .nr_cmds = entry_count,
1233 };
1234
1235 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1236 DRM_MSM_GEM_SUBMIT,
1237 &req, sizeof(req));
1238 if (ret) {
1239 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1240 abort();
1241 }
1242
1243 tu_bo_list_destroy(&bo_list);
1244 }
1245 return VK_SUCCESS;
1246 }
1247
1248 VkResult
1249 tu_QueueWaitIdle(VkQueue _queue)
1250 {
1251 return VK_SUCCESS;
1252 }
1253
1254 VkResult
1255 tu_DeviceWaitIdle(VkDevice _device)
1256 {
1257 TU_FROM_HANDLE(tu_device, device, _device);
1258
1259 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1260 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1261 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1262 }
1263 }
1264 return VK_SUCCESS;
1265 }
1266
1267 VkResult
1268 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1269 uint32_t *pPropertyCount,
1270 VkExtensionProperties *pProperties)
1271 {
1272 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1273
1274 /* We spport no lyaers */
1275 if (pLayerName)
1276 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1277
1278 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1279 if (tu_supported_instance_extensions.extensions[i]) {
1280 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1281 }
1282 }
1283
1284 return vk_outarray_status(&out);
1285 }
1286
1287 VkResult
1288 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1289 const char *pLayerName,
1290 uint32_t *pPropertyCount,
1291 VkExtensionProperties *pProperties)
1292 {
1293 /* We spport no lyaers */
1294 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1295 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1296
1297 /* We spport no lyaers */
1298 if (pLayerName)
1299 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1300
1301 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1302 if (device->supported_extensions.extensions[i]) {
1303 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1304 }
1305 }
1306
1307 return vk_outarray_status(&out);
1308 }
1309
1310 PFN_vkVoidFunction
1311 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1312 {
1313 TU_FROM_HANDLE(tu_instance, instance, _instance);
1314
1315 return tu_lookup_entrypoint_checked(
1316 pName, instance ? instance->api_version : 0,
1317 instance ? &instance->enabled_extensions : NULL, NULL);
1318 }
1319
1320 /* The loader wants us to expose a second GetInstanceProcAddr function
1321 * to work around certain LD_PRELOAD issues seen in apps.
1322 */
1323 PUBLIC
1324 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1325 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1326
1327 PUBLIC
1328 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1329 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1330 {
1331 return tu_GetInstanceProcAddr(instance, pName);
1332 }
1333
1334 PFN_vkVoidFunction
1335 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1336 {
1337 TU_FROM_HANDLE(tu_device, device, _device);
1338
1339 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1340 &device->instance->enabled_extensions,
1341 &device->enabled_extensions);
1342 }
1343
1344 static VkResult
1345 tu_alloc_memory(struct tu_device *device,
1346 const VkMemoryAllocateInfo *pAllocateInfo,
1347 const VkAllocationCallbacks *pAllocator,
1348 VkDeviceMemory *pMem)
1349 {
1350 struct tu_device_memory *mem;
1351 VkResult result;
1352
1353 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1354
1355 if (pAllocateInfo->allocationSize == 0) {
1356 /* Apparently, this is allowed */
1357 *pMem = VK_NULL_HANDLE;
1358 return VK_SUCCESS;
1359 }
1360
1361 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1362 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1363 if (mem == NULL)
1364 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1365
1366 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1367 if (result != VK_SUCCESS) {
1368 vk_free2(&device->alloc, pAllocator, mem);
1369 return result;
1370 }
1371
1372 mem->size = pAllocateInfo->allocationSize;
1373 mem->type_index = pAllocateInfo->memoryTypeIndex;
1374
1375 mem->map = NULL;
1376 mem->user_ptr = NULL;
1377
1378 *pMem = tu_device_memory_to_handle(mem);
1379
1380 return VK_SUCCESS;
1381 }
1382
1383 VkResult
1384 tu_AllocateMemory(VkDevice _device,
1385 const VkMemoryAllocateInfo *pAllocateInfo,
1386 const VkAllocationCallbacks *pAllocator,
1387 VkDeviceMemory *pMem)
1388 {
1389 TU_FROM_HANDLE(tu_device, device, _device);
1390 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1391 }
1392
1393 void
1394 tu_FreeMemory(VkDevice _device,
1395 VkDeviceMemory _mem,
1396 const VkAllocationCallbacks *pAllocator)
1397 {
1398 TU_FROM_HANDLE(tu_device, device, _device);
1399 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1400
1401 if (mem == NULL)
1402 return;
1403
1404 tu_bo_finish(device, &mem->bo);
1405 vk_free2(&device->alloc, pAllocator, mem);
1406 }
1407
1408 VkResult
1409 tu_MapMemory(VkDevice _device,
1410 VkDeviceMemory _memory,
1411 VkDeviceSize offset,
1412 VkDeviceSize size,
1413 VkMemoryMapFlags flags,
1414 void **ppData)
1415 {
1416 TU_FROM_HANDLE(tu_device, device, _device);
1417 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1418 VkResult result;
1419
1420 if (mem == NULL) {
1421 *ppData = NULL;
1422 return VK_SUCCESS;
1423 }
1424
1425 if (mem->user_ptr) {
1426 *ppData = mem->user_ptr;
1427 } else if (!mem->map) {
1428 result = tu_bo_map(device, &mem->bo);
1429 if (result != VK_SUCCESS)
1430 return result;
1431 *ppData = mem->map = mem->bo.map;
1432 } else
1433 *ppData = mem->map;
1434
1435 if (*ppData) {
1436 *ppData += offset;
1437 return VK_SUCCESS;
1438 }
1439
1440 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1441 }
1442
1443 void
1444 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1445 {
1446 /* I do not see any unmapping done by the freedreno Gallium driver. */
1447 }
1448
1449 VkResult
1450 tu_FlushMappedMemoryRanges(VkDevice _device,
1451 uint32_t memoryRangeCount,
1452 const VkMappedMemoryRange *pMemoryRanges)
1453 {
1454 return VK_SUCCESS;
1455 }
1456
1457 VkResult
1458 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1459 uint32_t memoryRangeCount,
1460 const VkMappedMemoryRange *pMemoryRanges)
1461 {
1462 return VK_SUCCESS;
1463 }
1464
1465 void
1466 tu_GetBufferMemoryRequirements(VkDevice _device,
1467 VkBuffer _buffer,
1468 VkMemoryRequirements *pMemoryRequirements)
1469 {
1470 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1471
1472 pMemoryRequirements->memoryTypeBits = 1;
1473 pMemoryRequirements->alignment = 16;
1474 pMemoryRequirements->size =
1475 align64(buffer->size, pMemoryRequirements->alignment);
1476 }
1477
1478 void
1479 tu_GetBufferMemoryRequirements2(
1480 VkDevice device,
1481 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1482 VkMemoryRequirements2KHR *pMemoryRequirements)
1483 {
1484 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1485 &pMemoryRequirements->memoryRequirements);
1486 }
1487
1488 void
1489 tu_GetImageMemoryRequirements(VkDevice _device,
1490 VkImage _image,
1491 VkMemoryRequirements *pMemoryRequirements)
1492 {
1493 TU_FROM_HANDLE(tu_image, image, _image);
1494
1495 pMemoryRequirements->memoryTypeBits = 1;
1496 pMemoryRequirements->size = image->size;
1497 pMemoryRequirements->alignment = image->alignment;
1498 }
1499
1500 void
1501 tu_GetImageMemoryRequirements2(VkDevice device,
1502 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1503 VkMemoryRequirements2KHR *pMemoryRequirements)
1504 {
1505 tu_GetImageMemoryRequirements(device, pInfo->image,
1506 &pMemoryRequirements->memoryRequirements);
1507 }
1508
1509 void
1510 tu_GetImageSparseMemoryRequirements(
1511 VkDevice device,
1512 VkImage image,
1513 uint32_t *pSparseMemoryRequirementCount,
1514 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1515 {
1516 tu_stub();
1517 }
1518
1519 void
1520 tu_GetImageSparseMemoryRequirements2(
1521 VkDevice device,
1522 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1523 uint32_t *pSparseMemoryRequirementCount,
1524 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1525 {
1526 tu_stub();
1527 }
1528
1529 void
1530 tu_GetDeviceMemoryCommitment(VkDevice device,
1531 VkDeviceMemory memory,
1532 VkDeviceSize *pCommittedMemoryInBytes)
1533 {
1534 *pCommittedMemoryInBytes = 0;
1535 }
1536
1537 VkResult
1538 tu_BindBufferMemory2(VkDevice device,
1539 uint32_t bindInfoCount,
1540 const VkBindBufferMemoryInfoKHR *pBindInfos)
1541 {
1542 return VK_SUCCESS;
1543 }
1544
1545 VkResult
1546 tu_BindBufferMemory(VkDevice device,
1547 VkBuffer buffer,
1548 VkDeviceMemory memory,
1549 VkDeviceSize memoryOffset)
1550 {
1551 const VkBindBufferMemoryInfoKHR info = {
1552 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1553 .buffer = buffer,
1554 .memory = memory,
1555 .memoryOffset = memoryOffset
1556 };
1557
1558 return tu_BindBufferMemory2(device, 1, &info);
1559 }
1560
1561 VkResult
1562 tu_BindImageMemory2(VkDevice device,
1563 uint32_t bindInfoCount,
1564 const VkBindImageMemoryInfoKHR *pBindInfos)
1565 {
1566 return VK_SUCCESS;
1567 }
1568
1569 VkResult
1570 tu_BindImageMemory(VkDevice device,
1571 VkImage image,
1572 VkDeviceMemory memory,
1573 VkDeviceSize memoryOffset)
1574 {
1575 const VkBindImageMemoryInfoKHR info = {
1576 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1577 .image = image,
1578 .memory = memory,
1579 .memoryOffset = memoryOffset
1580 };
1581
1582 return tu_BindImageMemory2(device, 1, &info);
1583 }
1584
1585 VkResult
1586 tu_QueueBindSparse(VkQueue _queue,
1587 uint32_t bindInfoCount,
1588 const VkBindSparseInfo *pBindInfo,
1589 VkFence _fence)
1590 {
1591 return VK_SUCCESS;
1592 }
1593
1594 VkResult
1595 tu_CreateFence(VkDevice _device,
1596 const VkFenceCreateInfo *pCreateInfo,
1597 const VkAllocationCallbacks *pAllocator,
1598 VkFence *pFence)
1599 {
1600 TU_FROM_HANDLE(tu_device, device, _device);
1601
1602 struct tu_fence *fence =
1603 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1604 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1605
1606 if (!fence)
1607 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1608
1609 *pFence = tu_fence_to_handle(fence);
1610
1611 return VK_SUCCESS;
1612 }
1613
1614 void
1615 tu_DestroyFence(VkDevice _device,
1616 VkFence _fence,
1617 const VkAllocationCallbacks *pAllocator)
1618 {
1619 TU_FROM_HANDLE(tu_device, device, _device);
1620 TU_FROM_HANDLE(tu_fence, fence, _fence);
1621
1622 if (!fence)
1623 return;
1624
1625 vk_free2(&device->alloc, pAllocator, fence);
1626 }
1627
1628 VkResult
1629 tu_WaitForFences(VkDevice _device,
1630 uint32_t fenceCount,
1631 const VkFence *pFences,
1632 VkBool32 waitAll,
1633 uint64_t timeout)
1634 {
1635 return VK_SUCCESS;
1636 }
1637
1638 VkResult
1639 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1640 {
1641 return VK_SUCCESS;
1642 }
1643
1644 VkResult
1645 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1646 {
1647 return VK_SUCCESS;
1648 }
1649
1650 // Queue semaphore functions
1651
1652 VkResult
1653 tu_CreateSemaphore(VkDevice _device,
1654 const VkSemaphoreCreateInfo *pCreateInfo,
1655 const VkAllocationCallbacks *pAllocator,
1656 VkSemaphore *pSemaphore)
1657 {
1658 TU_FROM_HANDLE(tu_device, device, _device);
1659
1660 struct tu_semaphore *sem =
1661 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1662 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1663 if (!sem)
1664 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1665
1666 *pSemaphore = tu_semaphore_to_handle(sem);
1667 return VK_SUCCESS;
1668 }
1669
1670 void
1671 tu_DestroySemaphore(VkDevice _device,
1672 VkSemaphore _semaphore,
1673 const VkAllocationCallbacks *pAllocator)
1674 {
1675 TU_FROM_HANDLE(tu_device, device, _device);
1676 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1677 if (!_semaphore)
1678 return;
1679
1680 vk_free2(&device->alloc, pAllocator, sem);
1681 }
1682
1683 VkResult
1684 tu_CreateEvent(VkDevice _device,
1685 const VkEventCreateInfo *pCreateInfo,
1686 const VkAllocationCallbacks *pAllocator,
1687 VkEvent *pEvent)
1688 {
1689 TU_FROM_HANDLE(tu_device, device, _device);
1690 struct tu_event *event =
1691 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1692 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1693
1694 if (!event)
1695 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1696
1697 *pEvent = tu_event_to_handle(event);
1698
1699 return VK_SUCCESS;
1700 }
1701
1702 void
1703 tu_DestroyEvent(VkDevice _device,
1704 VkEvent _event,
1705 const VkAllocationCallbacks *pAllocator)
1706 {
1707 TU_FROM_HANDLE(tu_device, device, _device);
1708 TU_FROM_HANDLE(tu_event, event, _event);
1709
1710 if (!event)
1711 return;
1712 vk_free2(&device->alloc, pAllocator, event);
1713 }
1714
1715 VkResult
1716 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1717 {
1718 TU_FROM_HANDLE(tu_event, event, _event);
1719
1720 if (*event->map == 1)
1721 return VK_EVENT_SET;
1722 return VK_EVENT_RESET;
1723 }
1724
1725 VkResult
1726 tu_SetEvent(VkDevice _device, VkEvent _event)
1727 {
1728 TU_FROM_HANDLE(tu_event, event, _event);
1729 *event->map = 1;
1730
1731 return VK_SUCCESS;
1732 }
1733
1734 VkResult
1735 tu_ResetEvent(VkDevice _device, VkEvent _event)
1736 {
1737 TU_FROM_HANDLE(tu_event, event, _event);
1738 *event->map = 0;
1739
1740 return VK_SUCCESS;
1741 }
1742
1743 VkResult
1744 tu_CreateBuffer(VkDevice _device,
1745 const VkBufferCreateInfo *pCreateInfo,
1746 const VkAllocationCallbacks *pAllocator,
1747 VkBuffer *pBuffer)
1748 {
1749 TU_FROM_HANDLE(tu_device, device, _device);
1750 struct tu_buffer *buffer;
1751
1752 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1753
1754 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1755 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1756 if (buffer == NULL)
1757 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1758
1759 buffer->size = pCreateInfo->size;
1760 buffer->usage = pCreateInfo->usage;
1761 buffer->flags = pCreateInfo->flags;
1762
1763 *pBuffer = tu_buffer_to_handle(buffer);
1764
1765 return VK_SUCCESS;
1766 }
1767
1768 void
1769 tu_DestroyBuffer(VkDevice _device,
1770 VkBuffer _buffer,
1771 const VkAllocationCallbacks *pAllocator)
1772 {
1773 TU_FROM_HANDLE(tu_device, device, _device);
1774 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1775
1776 if (!buffer)
1777 return;
1778
1779 vk_free2(&device->alloc, pAllocator, buffer);
1780 }
1781
1782 static uint32_t
1783 tu_surface_max_layer_count(struct tu_image_view *iview)
1784 {
1785 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1786 ? iview->extent.depth
1787 : (iview->base_layer + iview->layer_count);
1788 }
1789
1790 VkResult
1791 tu_CreateFramebuffer(VkDevice _device,
1792 const VkFramebufferCreateInfo *pCreateInfo,
1793 const VkAllocationCallbacks *pAllocator,
1794 VkFramebuffer *pFramebuffer)
1795 {
1796 TU_FROM_HANDLE(tu_device, device, _device);
1797 struct tu_framebuffer *framebuffer;
1798
1799 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1800
1801 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1802 pCreateInfo->attachmentCount;
1803 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1804 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1805 if (framebuffer == NULL)
1806 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1807
1808 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1809 framebuffer->width = pCreateInfo->width;
1810 framebuffer->height = pCreateInfo->height;
1811 framebuffer->layers = pCreateInfo->layers;
1812 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1813 VkImageView _iview = pCreateInfo->pAttachments[i];
1814 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1815 framebuffer->attachments[i].attachment = iview;
1816
1817 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1818 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1819 framebuffer->layers =
1820 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1821 }
1822
1823 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1824 return VK_SUCCESS;
1825 }
1826
1827 void
1828 tu_DestroyFramebuffer(VkDevice _device,
1829 VkFramebuffer _fb,
1830 const VkAllocationCallbacks *pAllocator)
1831 {
1832 TU_FROM_HANDLE(tu_device, device, _device);
1833 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1834
1835 if (!fb)
1836 return;
1837 vk_free2(&device->alloc, pAllocator, fb);
1838 }
1839
1840 static void
1841 tu_init_sampler(struct tu_device *device,
1842 struct tu_sampler *sampler,
1843 const VkSamplerCreateInfo *pCreateInfo)
1844 {
1845 }
1846
1847 VkResult
1848 tu_CreateSampler(VkDevice _device,
1849 const VkSamplerCreateInfo *pCreateInfo,
1850 const VkAllocationCallbacks *pAllocator,
1851 VkSampler *pSampler)
1852 {
1853 TU_FROM_HANDLE(tu_device, device, _device);
1854 struct tu_sampler *sampler;
1855
1856 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1857
1858 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1859 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1860 if (!sampler)
1861 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1862
1863 tu_init_sampler(device, sampler, pCreateInfo);
1864 *pSampler = tu_sampler_to_handle(sampler);
1865
1866 return VK_SUCCESS;
1867 }
1868
1869 void
1870 tu_DestroySampler(VkDevice _device,
1871 VkSampler _sampler,
1872 const VkAllocationCallbacks *pAllocator)
1873 {
1874 TU_FROM_HANDLE(tu_device, device, _device);
1875 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1876
1877 if (!sampler)
1878 return;
1879 vk_free2(&device->alloc, pAllocator, sampler);
1880 }
1881
1882 /* vk_icd.h does not declare this function, so we declare it here to
1883 * suppress Wmissing-prototypes.
1884 */
1885 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1886 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1887
1888 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1889 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1890 {
1891 /* For the full details on loader interface versioning, see
1892 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1893 * What follows is a condensed summary, to help you navigate the large and
1894 * confusing official doc.
1895 *
1896 * - Loader interface v0 is incompatible with later versions. We don't
1897 * support it.
1898 *
1899 * - In loader interface v1:
1900 * - The first ICD entrypoint called by the loader is
1901 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1902 * entrypoint.
1903 * - The ICD must statically expose no other Vulkan symbol unless it
1904 * is linked with -Bsymbolic.
1905 * - Each dispatchable Vulkan handle created by the ICD must be
1906 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1907 * ICD must initialize VK_LOADER_DATA.loadMagic to
1908 * ICD_LOADER_MAGIC.
1909 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1910 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1911 * such loader-managed surfaces.
1912 *
1913 * - Loader interface v2 differs from v1 in:
1914 * - The first ICD entrypoint called by the loader is
1915 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1916 * statically expose this entrypoint.
1917 *
1918 * - Loader interface v3 differs from v2 in:
1919 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1920 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1921 * because the loader no longer does so.
1922 */
1923 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1924 return VK_SUCCESS;
1925 }
1926
1927 void
1928 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1929 VkPhysicalDevice physicalDevice,
1930 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1931 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1932 {
1933 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1934 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1935 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1936 }
1937
1938 void
1939 tu_GetPhysicalDeviceExternalFenceProperties(
1940 VkPhysicalDevice physicalDevice,
1941 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1942 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1943 {
1944 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1945 pExternalFenceProperties->compatibleHandleTypes = 0;
1946 pExternalFenceProperties->externalFenceFeatures = 0;
1947 }
1948
1949 VkResult
1950 tu_CreateDebugReportCallbackEXT(
1951 VkInstance _instance,
1952 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1953 const VkAllocationCallbacks *pAllocator,
1954 VkDebugReportCallbackEXT *pCallback)
1955 {
1956 TU_FROM_HANDLE(tu_instance, instance, _instance);
1957 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1958 pCreateInfo, pAllocator,
1959 &instance->alloc, pCallback);
1960 }
1961
1962 void
1963 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1964 VkDebugReportCallbackEXT _callback,
1965 const VkAllocationCallbacks *pAllocator)
1966 {
1967 TU_FROM_HANDLE(tu_instance, instance, _instance);
1968 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1969 _callback, pAllocator, &instance->alloc);
1970 }
1971
1972 void
1973 tu_DebugReportMessageEXT(VkInstance _instance,
1974 VkDebugReportFlagsEXT flags,
1975 VkDebugReportObjectTypeEXT objectType,
1976 uint64_t object,
1977 size_t location,
1978 int32_t messageCode,
1979 const char *pLayerPrefix,
1980 const char *pMessage)
1981 {
1982 TU_FROM_HANDLE(tu_instance, instance, _instance);
1983 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1984 object, location, messageCode, pLayerPrefix, pMessage);
1985 }
1986
1987 void
1988 tu_GetDeviceGroupPeerMemoryFeatures(
1989 VkDevice device,
1990 uint32_t heapIndex,
1991 uint32_t localDeviceIndex,
1992 uint32_t remoteDeviceIndex,
1993 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1994 {
1995 assert(localDeviceIndex == remoteDeviceIndex);
1996
1997 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1998 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1999 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2000 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2001 }