turnip: Implement submission.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <msm_drm.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/strtod.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 static int
46 tu_device_get_cache_uuid(uint16_t family, void *uuid)
47 {
48 uint32_t mesa_timestamp;
49 uint16_t f = family;
50 memset(uuid, 0, VK_UUID_SIZE);
51 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
52 &mesa_timestamp))
53 return -1;
54
55 memcpy(uuid, &mesa_timestamp, 4);
56 memcpy((char *) uuid + 4, &f, 2);
57 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
58 return 0;
59 }
60
61 static void
62 tu_get_driver_uuid(void *uuid)
63 {
64 memset(uuid, 0, VK_UUID_SIZE);
65 snprintf(uuid, VK_UUID_SIZE, "freedreno");
66 }
67
68 static void
69 tu_get_device_uuid(void *uuid)
70 {
71 memset(uuid, 0, VK_UUID_SIZE);
72 }
73
74 VkResult
75 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
76 {
77 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
78 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
79 */
80 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
81 if (!gem_handle)
82 goto fail_new;
83
84 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
85 * want immediate backing pages because vkAllocateMemory and friends must
86 * not lazily fail.
87 *
88 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
89 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
90 * maybe I misunderstand.
91 */
92
93 /* TODO: Do we need 'offset' if we have 'iova'? */
94 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
95 if (!offset)
96 goto fail_info;
97
98 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
99 if (!iova)
100 goto fail_info;
101
102 *bo = (struct tu_bo) {
103 .gem_handle = gem_handle,
104 .size = size,
105 .offset = offset,
106 .iova = iova,
107 };
108
109 return VK_SUCCESS;
110
111 fail_info:
112 tu_gem_close(dev, bo->gem_handle);
113 fail_new:
114 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
115 }
116
117 VkResult
118 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
119 {
120 if (bo->map)
121 return VK_SUCCESS;
122
123 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
124 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
125 dev->physical_device->local_fd, bo->offset);
126 if (map == MAP_FAILED)
127 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
128
129 bo->map = map;
130 return VK_SUCCESS;
131 }
132
133 void
134 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
135 {
136 assert(bo->gem_handle);
137
138 if (bo->map)
139 munmap(bo->map, bo->size);
140
141 tu_gem_close(dev, bo->gem_handle);
142 }
143
144 static VkResult
145 tu_physical_device_init(struct tu_physical_device *device,
146 struct tu_instance *instance,
147 drmDevicePtr drm_device)
148 {
149 const char *path = drm_device->nodes[DRM_NODE_RENDER];
150 VkResult result = VK_SUCCESS;
151 drmVersionPtr version;
152 int fd;
153 int master_fd = -1;
154 uint64_t val;
155
156 fd = open(path, O_RDWR | O_CLOEXEC);
157 if (fd < 0) {
158 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
159 "failed to open device %s", path);
160 }
161
162 /* Version 1.3 added MSM_INFO_IOVA. */
163 const int min_version_major = 1;
164 const int min_version_minor = 3;
165
166 version = drmGetVersion(fd);
167 if (!version) {
168 close(fd);
169 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
170 "failed to query kernel driver version for device %s",
171 path);
172 }
173
174 if (strcmp(version->name, "msm")) {
175 drmFreeVersion(version);
176 if (master_fd != -1)
177 close(master_fd);
178 close(fd);
179 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
180 "device %s does not use the msm kernel driver", path);
181 }
182
183 if (version->version_major != min_version_major ||
184 version->version_minor < min_version_minor) {
185 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
186 "kernel driver for device %s has version %d.%d, "
187 "but Vulkan requires version >= %d.%d",
188 path, version->version_major, version->version_minor,
189 min_version_major, min_version_minor);
190 drmFreeVersion(version);
191 close(fd);
192 return result;
193 }
194
195 drmFreeVersion(version);
196
197 if (instance->debug_flags & TU_DEBUG_STARTUP)
198 tu_logi("Found compatible device '%s'.", path);
199
200 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
201 device->instance = instance;
202 assert(strlen(path) < ARRAY_SIZE(device->path));
203 strncpy(device->path, path, ARRAY_SIZE(device->path));
204
205 if (instance->enabled_extensions.KHR_display) {
206 master_fd =
207 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
208 if (master_fd >= 0) {
209 /* TODO: free master_fd is accel is not working? */
210 }
211 }
212
213 device->master_fd = master_fd;
214 device->local_fd = fd;
215
216 device->drm_device = fd_device_new_dup(fd);
217 if (!device->drm_device) {
218 if (instance->debug_flags & TU_DEBUG_STARTUP)
219 tu_logi("Could not create the libdrm device");
220 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
221 "could not create the libdrm device");
222 goto fail;
223 }
224
225 if (tu_drm_query_param(device, MSM_PARAM_GPU_ID, &val)) {
226 if (instance->debug_flags & TU_DEBUG_STARTUP)
227 tu_logi("Could not query the GPU ID");
228 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
229 "could not get GPU ID");
230 goto fail;
231 }
232 device->gpu_id = val;
233
234 if (tu_drm_query_param(device, MSM_PARAM_GMEM_SIZE, &val)) {
235 if (instance->debug_flags & TU_DEBUG_STARTUP)
236 tu_logi("Could not query the GMEM size");
237 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
238 "could not get GMEM size");
239 goto fail;
240 }
241 device->gmem_size = val;
242
243 memset(device->name, 0, sizeof(device->name));
244 sprintf(device->name, "FD%d", device->gpu_id);
245
246 switch (device->gpu_id) {
247 case 530:
248 case 630:
249 break;
250 default:
251 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
252 "device %s is unsupported", device->name);
253 goto fail;
254 }
255 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
256 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
257 "cannot generate UUID");
258 goto fail;
259 }
260
261 /* The gpu id is already embedded in the uuid so we just pass "tu"
262 * when creating the cache.
263 */
264 char buf[VK_UUID_SIZE * 2 + 1];
265 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
266 device->disk_cache = disk_cache_create(device->name, buf, 0);
267
268 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
269 "testing use only.\n");
270
271 tu_get_driver_uuid(&device->device_uuid);
272 tu_get_device_uuid(&device->device_uuid);
273
274 tu_fill_device_extension_table(device, &device->supported_extensions);
275
276 if (result != VK_SUCCESS) {
277 vk_error(instance, result);
278 goto fail;
279 }
280
281 return VK_SUCCESS;
282
283 fail:
284 if (device->drm_device)
285 fd_device_del(device->drm_device);
286 close(fd);
287 if (master_fd != -1)
288 close(master_fd);
289 return result;
290 }
291
292 static void
293 tu_physical_device_finish(struct tu_physical_device *device)
294 {
295 disk_cache_destroy(device->disk_cache);
296 close(device->local_fd);
297 if (device->master_fd != -1)
298 close(device->master_fd);
299 }
300
301 static void *
302 default_alloc_func(void *pUserData,
303 size_t size,
304 size_t align,
305 VkSystemAllocationScope allocationScope)
306 {
307 return malloc(size);
308 }
309
310 static void *
311 default_realloc_func(void *pUserData,
312 void *pOriginal,
313 size_t size,
314 size_t align,
315 VkSystemAllocationScope allocationScope)
316 {
317 return realloc(pOriginal, size);
318 }
319
320 static void
321 default_free_func(void *pUserData, void *pMemory)
322 {
323 free(pMemory);
324 }
325
326 static const VkAllocationCallbacks default_alloc = {
327 .pUserData = NULL,
328 .pfnAllocation = default_alloc_func,
329 .pfnReallocation = default_realloc_func,
330 .pfnFree = default_free_func,
331 };
332
333 static const struct debug_control tu_debug_options[] = {
334 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
335 };
336
337 const char *
338 tu_get_debug_option_name(int id)
339 {
340 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
341 return tu_debug_options[id].string;
342 }
343
344 static int
345 tu_get_instance_extension_index(const char *name)
346 {
347 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
348 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
349 return i;
350 }
351 return -1;
352 }
353
354 VkResult
355 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
356 const VkAllocationCallbacks *pAllocator,
357 VkInstance *pInstance)
358 {
359 struct tu_instance *instance;
360 VkResult result;
361
362 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
363
364 uint32_t client_version;
365 if (pCreateInfo->pApplicationInfo &&
366 pCreateInfo->pApplicationInfo->apiVersion != 0) {
367 client_version = pCreateInfo->pApplicationInfo->apiVersion;
368 } else {
369 tu_EnumerateInstanceVersion(&client_version);
370 }
371
372 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
373 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
374 if (!instance)
375 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
376
377 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
378
379 if (pAllocator)
380 instance->alloc = *pAllocator;
381 else
382 instance->alloc = default_alloc;
383
384 instance->api_version = client_version;
385 instance->physical_device_count = -1;
386
387 instance->debug_flags =
388 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
389
390 if (instance->debug_flags & TU_DEBUG_STARTUP)
391 tu_logi("Created an instance");
392
393 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
394 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
395 int index = tu_get_instance_extension_index(ext_name);
396
397 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
398 vk_free2(&default_alloc, pAllocator, instance);
399 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
400 }
401
402 instance->enabled_extensions.extensions[index] = true;
403 }
404
405 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
406 if (result != VK_SUCCESS) {
407 vk_free2(&default_alloc, pAllocator, instance);
408 return vk_error(instance, result);
409 }
410
411 _mesa_locale_init();
412
413 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
414
415 *pInstance = tu_instance_to_handle(instance);
416
417 return VK_SUCCESS;
418 }
419
420 void
421 tu_DestroyInstance(VkInstance _instance,
422 const VkAllocationCallbacks *pAllocator)
423 {
424 TU_FROM_HANDLE(tu_instance, instance, _instance);
425
426 if (!instance)
427 return;
428
429 for (int i = 0; i < instance->physical_device_count; ++i) {
430 tu_physical_device_finish(instance->physical_devices + i);
431 }
432
433 VG(VALGRIND_DESTROY_MEMPOOL(instance));
434
435 _mesa_locale_fini();
436
437 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
438
439 vk_free(&instance->alloc, instance);
440 }
441
442 static VkResult
443 tu_enumerate_devices(struct tu_instance *instance)
444 {
445 /* TODO: Check for more devices ? */
446 drmDevicePtr devices[8];
447 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
448 int max_devices;
449
450 instance->physical_device_count = 0;
451
452 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
453
454 if (instance->debug_flags & TU_DEBUG_STARTUP)
455 tu_logi("Found %d drm nodes", max_devices);
456
457 if (max_devices < 1)
458 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
459
460 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
461 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
462 devices[i]->bustype == DRM_BUS_PLATFORM) {
463
464 result = tu_physical_device_init(
465 instance->physical_devices + instance->physical_device_count,
466 instance, devices[i]);
467 if (result == VK_SUCCESS)
468 ++instance->physical_device_count;
469 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
470 break;
471 }
472 }
473 drmFreeDevices(devices, max_devices);
474
475 return result;
476 }
477
478 VkResult
479 tu_EnumeratePhysicalDevices(VkInstance _instance,
480 uint32_t *pPhysicalDeviceCount,
481 VkPhysicalDevice *pPhysicalDevices)
482 {
483 TU_FROM_HANDLE(tu_instance, instance, _instance);
484 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
485
486 VkResult result;
487
488 if (instance->physical_device_count < 0) {
489 result = tu_enumerate_devices(instance);
490 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
491 return result;
492 }
493
494 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
495 vk_outarray_append(&out, p)
496 {
497 *p = tu_physical_device_to_handle(instance->physical_devices + i);
498 }
499 }
500
501 return vk_outarray_status(&out);
502 }
503
504 VkResult
505 tu_EnumeratePhysicalDeviceGroups(
506 VkInstance _instance,
507 uint32_t *pPhysicalDeviceGroupCount,
508 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
509 {
510 TU_FROM_HANDLE(tu_instance, instance, _instance);
511 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
512 pPhysicalDeviceGroupCount);
513 VkResult result;
514
515 if (instance->physical_device_count < 0) {
516 result = tu_enumerate_devices(instance);
517 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
518 return result;
519 }
520
521 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
522 vk_outarray_append(&out, p)
523 {
524 p->physicalDeviceCount = 1;
525 p->physicalDevices[0] =
526 tu_physical_device_to_handle(instance->physical_devices + i);
527 p->subsetAllocation = false;
528 }
529 }
530
531 return vk_outarray_status(&out);
532 }
533
534 void
535 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
536 VkPhysicalDeviceFeatures *pFeatures)
537 {
538 memset(pFeatures, 0, sizeof(*pFeatures));
539
540 *pFeatures = (VkPhysicalDeviceFeatures) {
541 .robustBufferAccess = false,
542 .fullDrawIndexUint32 = false,
543 .imageCubeArray = false,
544 .independentBlend = false,
545 .geometryShader = false,
546 .tessellationShader = false,
547 .sampleRateShading = false,
548 .dualSrcBlend = false,
549 .logicOp = false,
550 .multiDrawIndirect = false,
551 .drawIndirectFirstInstance = false,
552 .depthClamp = false,
553 .depthBiasClamp = false,
554 .fillModeNonSolid = false,
555 .depthBounds = false,
556 .wideLines = false,
557 .largePoints = false,
558 .alphaToOne = false,
559 .multiViewport = false,
560 .samplerAnisotropy = false,
561 .textureCompressionETC2 = false,
562 .textureCompressionASTC_LDR = false,
563 .textureCompressionBC = false,
564 .occlusionQueryPrecise = false,
565 .pipelineStatisticsQuery = false,
566 .vertexPipelineStoresAndAtomics = false,
567 .fragmentStoresAndAtomics = false,
568 .shaderTessellationAndGeometryPointSize = false,
569 .shaderImageGatherExtended = false,
570 .shaderStorageImageExtendedFormats = false,
571 .shaderStorageImageMultisample = false,
572 .shaderUniformBufferArrayDynamicIndexing = false,
573 .shaderSampledImageArrayDynamicIndexing = false,
574 .shaderStorageBufferArrayDynamicIndexing = false,
575 .shaderStorageImageArrayDynamicIndexing = false,
576 .shaderStorageImageReadWithoutFormat = false,
577 .shaderStorageImageWriteWithoutFormat = false,
578 .shaderClipDistance = false,
579 .shaderCullDistance = false,
580 .shaderFloat64 = false,
581 .shaderInt64 = false,
582 .shaderInt16 = false,
583 .sparseBinding = false,
584 .variableMultisampleRate = false,
585 .inheritedQueries = false,
586 };
587 }
588
589 void
590 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
591 VkPhysicalDeviceFeatures2KHR *pFeatures)
592 {
593 vk_foreach_struct(ext, pFeatures->pNext)
594 {
595 switch (ext->sType) {
596 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
597 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
598 features->variablePointersStorageBuffer = false;
599 features->variablePointers = false;
600 break;
601 }
602 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
603 VkPhysicalDeviceMultiviewFeaturesKHR *features =
604 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
605 features->multiview = false;
606 features->multiviewGeometryShader = false;
607 features->multiviewTessellationShader = false;
608 break;
609 }
610 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
611 VkPhysicalDeviceShaderDrawParameterFeatures *features =
612 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
613 features->shaderDrawParameters = false;
614 break;
615 }
616 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
617 VkPhysicalDeviceProtectedMemoryFeatures *features =
618 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
619 features->protectedMemory = false;
620 break;
621 }
622 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
623 VkPhysicalDevice16BitStorageFeatures *features =
624 (VkPhysicalDevice16BitStorageFeatures *) ext;
625 features->storageBuffer16BitAccess = false;
626 features->uniformAndStorageBuffer16BitAccess = false;
627 features->storagePushConstant16 = false;
628 features->storageInputOutput16 = false;
629 break;
630 }
631 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
632 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
633 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
634 features->samplerYcbcrConversion = false;
635 break;
636 }
637 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
638 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
639 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
640 features->shaderInputAttachmentArrayDynamicIndexing = false;
641 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
642 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
643 features->shaderUniformBufferArrayNonUniformIndexing = false;
644 features->shaderSampledImageArrayNonUniformIndexing = false;
645 features->shaderStorageBufferArrayNonUniformIndexing = false;
646 features->shaderStorageImageArrayNonUniformIndexing = false;
647 features->shaderInputAttachmentArrayNonUniformIndexing = false;
648 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
649 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
650 features->descriptorBindingUniformBufferUpdateAfterBind = false;
651 features->descriptorBindingSampledImageUpdateAfterBind = false;
652 features->descriptorBindingStorageImageUpdateAfterBind = false;
653 features->descriptorBindingStorageBufferUpdateAfterBind = false;
654 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
655 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
656 features->descriptorBindingUpdateUnusedWhilePending = false;
657 features->descriptorBindingPartiallyBound = false;
658 features->descriptorBindingVariableDescriptorCount = false;
659 features->runtimeDescriptorArray = false;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
663 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
664 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
665 features->conditionalRendering = false;
666 features->inheritedConditionalRendering = false;
667 break;
668 }
669 default:
670 break;
671 }
672 }
673 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
674 }
675
676 void
677 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
678 VkPhysicalDeviceProperties *pProperties)
679 {
680 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
681 VkSampleCountFlags sample_counts = 0xf;
682
683 /* make sure that the entire descriptor set is addressable with a signed
684 * 32-bit int. So the sum of all limits scaled by descriptor size has to
685 * be at most 2 GiB. the combined image & samples object count as one of
686 * both. This limit is for the pipeline layout, not for the set layout, but
687 * there is no set limit, so we just set a pipeline limit. I don't think
688 * any app is going to hit this soon. */
689 size_t max_descriptor_set_size =
690 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
691 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
692 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
693 32 /* sampler, largest when combined with image */ +
694 64 /* sampled image */ + 64 /* storage image */);
695
696 VkPhysicalDeviceLimits limits = {
697 .maxImageDimension1D = (1 << 14),
698 .maxImageDimension2D = (1 << 14),
699 .maxImageDimension3D = (1 << 11),
700 .maxImageDimensionCube = (1 << 14),
701 .maxImageArrayLayers = (1 << 11),
702 .maxTexelBufferElements = 128 * 1024 * 1024,
703 .maxUniformBufferRange = UINT32_MAX,
704 .maxStorageBufferRange = UINT32_MAX,
705 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
706 .maxMemoryAllocationCount = UINT32_MAX,
707 .maxSamplerAllocationCount = 64 * 1024,
708 .bufferImageGranularity = 64, /* A cache line */
709 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
710 .maxBoundDescriptorSets = MAX_SETS,
711 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
712 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
713 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
714 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
715 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
716 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
717 .maxPerStageResources = max_descriptor_set_size,
718 .maxDescriptorSetSamplers = max_descriptor_set_size,
719 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
720 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
721 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
722 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
723 .maxDescriptorSetSampledImages = max_descriptor_set_size,
724 .maxDescriptorSetStorageImages = max_descriptor_set_size,
725 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
726 .maxVertexInputAttributes = 32,
727 .maxVertexInputBindings = 32,
728 .maxVertexInputAttributeOffset = 2047,
729 .maxVertexInputBindingStride = 2048,
730 .maxVertexOutputComponents = 128,
731 .maxTessellationGenerationLevel = 64,
732 .maxTessellationPatchSize = 32,
733 .maxTessellationControlPerVertexInputComponents = 128,
734 .maxTessellationControlPerVertexOutputComponents = 128,
735 .maxTessellationControlPerPatchOutputComponents = 120,
736 .maxTessellationControlTotalOutputComponents = 4096,
737 .maxTessellationEvaluationInputComponents = 128,
738 .maxTessellationEvaluationOutputComponents = 128,
739 .maxGeometryShaderInvocations = 127,
740 .maxGeometryInputComponents = 64,
741 .maxGeometryOutputComponents = 128,
742 .maxGeometryOutputVertices = 256,
743 .maxGeometryTotalOutputComponents = 1024,
744 .maxFragmentInputComponents = 128,
745 .maxFragmentOutputAttachments = 8,
746 .maxFragmentDualSrcAttachments = 1,
747 .maxFragmentCombinedOutputResources = 8,
748 .maxComputeSharedMemorySize = 32768,
749 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
750 .maxComputeWorkGroupInvocations = 2048,
751 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
752 .subPixelPrecisionBits = 4 /* FIXME */,
753 .subTexelPrecisionBits = 4 /* FIXME */,
754 .mipmapPrecisionBits = 4 /* FIXME */,
755 .maxDrawIndexedIndexValue = UINT32_MAX,
756 .maxDrawIndirectCount = UINT32_MAX,
757 .maxSamplerLodBias = 16,
758 .maxSamplerAnisotropy = 16,
759 .maxViewports = MAX_VIEWPORTS,
760 .maxViewportDimensions = { (1 << 14), (1 << 14) },
761 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
762 .viewportSubPixelBits = 8,
763 .minMemoryMapAlignment = 4096, /* A page */
764 .minTexelBufferOffsetAlignment = 1,
765 .minUniformBufferOffsetAlignment = 4,
766 .minStorageBufferOffsetAlignment = 4,
767 .minTexelOffset = -32,
768 .maxTexelOffset = 31,
769 .minTexelGatherOffset = -32,
770 .maxTexelGatherOffset = 31,
771 .minInterpolationOffset = -2,
772 .maxInterpolationOffset = 2,
773 .subPixelInterpolationOffsetBits = 8,
774 .maxFramebufferWidth = (1 << 14),
775 .maxFramebufferHeight = (1 << 14),
776 .maxFramebufferLayers = (1 << 10),
777 .framebufferColorSampleCounts = sample_counts,
778 .framebufferDepthSampleCounts = sample_counts,
779 .framebufferStencilSampleCounts = sample_counts,
780 .framebufferNoAttachmentsSampleCounts = sample_counts,
781 .maxColorAttachments = MAX_RTS,
782 .sampledImageColorSampleCounts = sample_counts,
783 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
784 .sampledImageDepthSampleCounts = sample_counts,
785 .sampledImageStencilSampleCounts = sample_counts,
786 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
787 .maxSampleMaskWords = 1,
788 .timestampComputeAndGraphics = true,
789 .timestampPeriod = 1,
790 .maxClipDistances = 8,
791 .maxCullDistances = 8,
792 .maxCombinedClipAndCullDistances = 8,
793 .discreteQueuePriorities = 1,
794 .pointSizeRange = { 0.125, 255.875 },
795 .lineWidthRange = { 0.0, 7.9921875 },
796 .pointSizeGranularity = (1.0 / 8.0),
797 .lineWidthGranularity = (1.0 / 128.0),
798 .strictLines = false, /* FINISHME */
799 .standardSampleLocations = true,
800 .optimalBufferCopyOffsetAlignment = 128,
801 .optimalBufferCopyRowPitchAlignment = 128,
802 .nonCoherentAtomSize = 64,
803 };
804
805 *pProperties = (VkPhysicalDeviceProperties) {
806 .apiVersion = tu_physical_device_api_version(pdevice),
807 .driverVersion = vk_get_driver_version(),
808 .vendorID = 0, /* TODO */
809 .deviceID = 0,
810 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
811 .limits = limits,
812 .sparseProperties = { 0 },
813 };
814
815 strcpy(pProperties->deviceName, pdevice->name);
816 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
817 }
818
819 void
820 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
821 VkPhysicalDeviceProperties2KHR *pProperties)
822 {
823 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
824 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
825
826 vk_foreach_struct(ext, pProperties->pNext)
827 {
828 switch (ext->sType) {
829 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
830 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
831 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
832 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
833 break;
834 }
835 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
836 VkPhysicalDeviceIDPropertiesKHR *properties =
837 (VkPhysicalDeviceIDPropertiesKHR *) ext;
838 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
839 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
840 properties->deviceLUIDValid = false;
841 break;
842 }
843 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
844 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
845 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
846 properties->maxMultiviewViewCount = MAX_VIEWS;
847 properties->maxMultiviewInstanceIndex = INT_MAX;
848 break;
849 }
850 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
851 VkPhysicalDevicePointClippingPropertiesKHR *properties =
852 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
853 properties->pointClippingBehavior =
854 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
855 break;
856 }
857 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
858 VkPhysicalDeviceMaintenance3Properties *properties =
859 (VkPhysicalDeviceMaintenance3Properties *) ext;
860 /* Make sure everything is addressable by a signed 32-bit int, and
861 * our largest descriptors are 96 bytes. */
862 properties->maxPerSetDescriptors = (1ull << 31) / 96;
863 /* Our buffer size fields allow only this much */
864 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
865 break;
866 }
867 default:
868 break;
869 }
870 }
871 }
872
873 static const VkQueueFamilyProperties tu_queue_family_properties = {
874 .queueFlags =
875 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
876 .queueCount = 1,
877 .timestampValidBits = 64,
878 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
879 };
880
881 void
882 tu_GetPhysicalDeviceQueueFamilyProperties(
883 VkPhysicalDevice physicalDevice,
884 uint32_t *pQueueFamilyPropertyCount,
885 VkQueueFamilyProperties *pQueueFamilyProperties)
886 {
887 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
888
889 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
890 }
891
892 void
893 tu_GetPhysicalDeviceQueueFamilyProperties2(
894 VkPhysicalDevice physicalDevice,
895 uint32_t *pQueueFamilyPropertyCount,
896 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
897 {
898 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
899
900 vk_outarray_append(&out, p)
901 {
902 p->queueFamilyProperties = tu_queue_family_properties;
903 }
904 }
905
906 static uint64_t
907 tu_get_system_heap_size()
908 {
909 struct sysinfo info;
910 sysinfo(&info);
911
912 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
913
914 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
915 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
916 */
917 uint64_t available_ram;
918 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
919 available_ram = total_ram / 2;
920 else
921 available_ram = total_ram * 3 / 4;
922
923 return available_ram;
924 }
925
926 void
927 tu_GetPhysicalDeviceMemoryProperties(
928 VkPhysicalDevice physicalDevice,
929 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
930 {
931 pMemoryProperties->memoryHeapCount = 1;
932 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
933 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
934
935 pMemoryProperties->memoryTypeCount = 1;
936 pMemoryProperties->memoryTypes[0].propertyFlags =
937 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
938 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
939 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
940 pMemoryProperties->memoryTypes[0].heapIndex = 0;
941 }
942
943 void
944 tu_GetPhysicalDeviceMemoryProperties2(
945 VkPhysicalDevice physicalDevice,
946 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
947 {
948 return tu_GetPhysicalDeviceMemoryProperties(
949 physicalDevice, &pMemoryProperties->memoryProperties);
950 }
951
952 static VkResult
953 tu_queue_init(struct tu_device *device,
954 struct tu_queue *queue,
955 uint32_t queue_family_index,
956 int idx,
957 VkDeviceQueueCreateFlags flags)
958 {
959 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
960 queue->device = device;
961 queue->queue_family_index = queue_family_index;
962 queue->queue_idx = idx;
963 queue->flags = flags;
964
965 struct drm_msm_submitqueue req = {
966 .flags = 0,
967 .prio = 0,
968 };
969
970 int ret = drmCommandWriteRead(device->physical_device->local_fd,
971 DRM_MSM_SUBMITQUEUE_NEW,
972 &req, sizeof(req));
973 if (ret)
974 return VK_ERROR_INITIALIZATION_FAILED;
975
976 queue->msm_queue_id = req.id;
977 return VK_SUCCESS;
978 }
979
980 static void
981 tu_queue_finish(struct tu_queue *queue)
982 {
983 drmCommandWrite(queue->device->physical_device->local_fd,
984 DRM_MSM_SUBMITQUEUE_CLOSE,
985 &queue->msm_queue_id, sizeof(uint32_t));
986 }
987
988 static int
989 tu_get_device_extension_index(const char *name)
990 {
991 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
992 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
993 return i;
994 }
995 return -1;
996 }
997
998 VkResult
999 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1000 const VkDeviceCreateInfo *pCreateInfo,
1001 const VkAllocationCallbacks *pAllocator,
1002 VkDevice *pDevice)
1003 {
1004 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1005 VkResult result;
1006 struct tu_device *device;
1007
1008 /* Check enabled features */
1009 if (pCreateInfo->pEnabledFeatures) {
1010 VkPhysicalDeviceFeatures supported_features;
1011 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1012 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1013 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1014 unsigned num_features =
1015 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1016 for (uint32_t i = 0; i < num_features; i++) {
1017 if (enabled_feature[i] && !supported_feature[i])
1018 return vk_error(physical_device->instance,
1019 VK_ERROR_FEATURE_NOT_PRESENT);
1020 }
1021 }
1022
1023 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1024 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1025 if (!device)
1026 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1027
1028 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1029 device->instance = physical_device->instance;
1030 device->physical_device = physical_device;
1031
1032 if (pAllocator)
1033 device->alloc = *pAllocator;
1034 else
1035 device->alloc = physical_device->instance->alloc;
1036
1037 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1038 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1039 int index = tu_get_device_extension_index(ext_name);
1040 if (index < 0 ||
1041 !physical_device->supported_extensions.extensions[index]) {
1042 vk_free(&device->alloc, device);
1043 return vk_error(physical_device->instance,
1044 VK_ERROR_EXTENSION_NOT_PRESENT);
1045 }
1046
1047 device->enabled_extensions.extensions[index] = true;
1048 }
1049
1050 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1051 const VkDeviceQueueCreateInfo *queue_create =
1052 &pCreateInfo->pQueueCreateInfos[i];
1053 uint32_t qfi = queue_create->queueFamilyIndex;
1054 device->queues[qfi] = vk_alloc(
1055 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1056 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1057 if (!device->queues[qfi]) {
1058 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1059 goto fail;
1060 }
1061
1062 memset(device->queues[qfi], 0,
1063 queue_create->queueCount * sizeof(struct tu_queue));
1064
1065 device->queue_count[qfi] = queue_create->queueCount;
1066
1067 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1068 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1069 queue_create->flags);
1070 if (result != VK_SUCCESS)
1071 goto fail;
1072 }
1073 }
1074
1075 VkPipelineCacheCreateInfo ci;
1076 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1077 ci.pNext = NULL;
1078 ci.flags = 0;
1079 ci.pInitialData = NULL;
1080 ci.initialDataSize = 0;
1081 VkPipelineCache pc;
1082 result =
1083 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1084 if (result != VK_SUCCESS)
1085 goto fail;
1086
1087 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1088
1089 *pDevice = tu_device_to_handle(device);
1090 return VK_SUCCESS;
1091
1092 fail:
1093 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1094 for (unsigned q = 0; q < device->queue_count[i]; q++)
1095 tu_queue_finish(&device->queues[i][q]);
1096 if (device->queue_count[i])
1097 vk_free(&device->alloc, device->queues[i]);
1098 }
1099
1100 vk_free(&device->alloc, device);
1101 return result;
1102 }
1103
1104 void
1105 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1106 {
1107 TU_FROM_HANDLE(tu_device, device, _device);
1108
1109 if (!device)
1110 return;
1111
1112 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1113 for (unsigned q = 0; q < device->queue_count[i]; q++)
1114 tu_queue_finish(&device->queues[i][q]);
1115 if (device->queue_count[i])
1116 vk_free(&device->alloc, device->queues[i]);
1117 }
1118
1119 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1120 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1121
1122 vk_free(&device->alloc, device);
1123 }
1124
1125 VkResult
1126 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1127 VkLayerProperties *pProperties)
1128 {
1129 *pPropertyCount = 0;
1130 return VK_SUCCESS;
1131 }
1132
1133 VkResult
1134 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1135 uint32_t *pPropertyCount,
1136 VkLayerProperties *pProperties)
1137 {
1138 *pPropertyCount = 0;
1139 return VK_SUCCESS;
1140 }
1141
1142 void
1143 tu_GetDeviceQueue2(VkDevice _device,
1144 const VkDeviceQueueInfo2 *pQueueInfo,
1145 VkQueue *pQueue)
1146 {
1147 TU_FROM_HANDLE(tu_device, device, _device);
1148 struct tu_queue *queue;
1149
1150 queue =
1151 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1152 if (pQueueInfo->flags != queue->flags) {
1153 /* From the Vulkan 1.1.70 spec:
1154 *
1155 * "The queue returned by vkGetDeviceQueue2 must have the same
1156 * flags value from this structure as that used at device
1157 * creation time in a VkDeviceQueueCreateInfo instance. If no
1158 * matching flags were specified at device creation time then
1159 * pQueue will return VK_NULL_HANDLE."
1160 */
1161 *pQueue = VK_NULL_HANDLE;
1162 return;
1163 }
1164
1165 *pQueue = tu_queue_to_handle(queue);
1166 }
1167
1168 void
1169 tu_GetDeviceQueue(VkDevice _device,
1170 uint32_t queueFamilyIndex,
1171 uint32_t queueIndex,
1172 VkQueue *pQueue)
1173 {
1174 const VkDeviceQueueInfo2 info =
1175 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1176 .queueFamilyIndex = queueFamilyIndex,
1177 .queueIndex = queueIndex };
1178
1179 tu_GetDeviceQueue2(_device, &info, pQueue);
1180 }
1181
1182 VkResult
1183 tu_QueueSubmit(VkQueue _queue,
1184 uint32_t submitCount,
1185 const VkSubmitInfo *pSubmits,
1186 VkFence _fence)
1187 {
1188 TU_FROM_HANDLE(tu_queue, queue, _queue);
1189
1190 for (uint32_t i = 0; i < submitCount; ++i) {
1191 const VkSubmitInfo *submit = pSubmits + i;
1192 struct tu_bo_list bo_list;
1193 tu_bo_list_init(&bo_list);
1194
1195 uint32_t entry_count = 0;
1196 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1197 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1198 entry_count += cmdbuf->primary_cmd_stream.entry_count;
1199 }
1200
1201 struct drm_msm_gem_submit_cmd cmds[entry_count];
1202 uint32_t entry_idx = 0;
1203 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1204 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1205 struct tu_cmd_stream *stream = &cmdbuf->primary_cmd_stream;
1206 for (unsigned i = 0; i < stream->entry_count; ++i, ++entry_idx) {
1207 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1208 cmds[entry_idx].submit_idx = tu_bo_list_add(&bo_list, stream->entries[i].bo);
1209 cmds[entry_idx].submit_offset = stream->entries[i].offset;
1210 cmds[entry_idx].size = stream->entries[i].size;
1211 cmds[entry_idx].pad = 0;
1212 cmds[entry_idx].nr_relocs = 0;
1213 cmds[entry_idx].relocs = 0;
1214
1215 }
1216 }
1217
1218 struct drm_msm_gem_submit_bo bos[bo_list.count];
1219 for (unsigned i = 0; i < bo_list.count; ++i) {
1220 bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
1221 bos[i].handle = bo_list.handles[i];
1222 bos[i].presumed = 0;
1223 }
1224
1225 struct drm_msm_gem_submit req = {
1226 .flags = MSM_PIPE_3D0,
1227 .queueid = queue->msm_queue_id,
1228 .bos = (uint64_t)(uintptr_t)bos,
1229 .nr_bos = bo_list.count,
1230 .cmds = (uint64_t)(uintptr_t)cmds,
1231 .nr_cmds = entry_count,
1232 };
1233
1234 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1235 DRM_MSM_GEM_SUBMIT,
1236 &req, sizeof(req));
1237 if (ret) {
1238 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1239 abort();
1240 }
1241
1242 tu_bo_list_destroy(&bo_list);
1243 }
1244 return VK_SUCCESS;
1245 }
1246
1247 VkResult
1248 tu_QueueWaitIdle(VkQueue _queue)
1249 {
1250 return VK_SUCCESS;
1251 }
1252
1253 VkResult
1254 tu_DeviceWaitIdle(VkDevice _device)
1255 {
1256 TU_FROM_HANDLE(tu_device, device, _device);
1257
1258 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1259 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1260 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1261 }
1262 }
1263 return VK_SUCCESS;
1264 }
1265
1266 VkResult
1267 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1268 uint32_t *pPropertyCount,
1269 VkExtensionProperties *pProperties)
1270 {
1271 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1272
1273 /* We spport no lyaers */
1274 if (pLayerName)
1275 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1276
1277 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1278 if (tu_supported_instance_extensions.extensions[i]) {
1279 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1280 }
1281 }
1282
1283 return vk_outarray_status(&out);
1284 }
1285
1286 VkResult
1287 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1288 const char *pLayerName,
1289 uint32_t *pPropertyCount,
1290 VkExtensionProperties *pProperties)
1291 {
1292 /* We spport no lyaers */
1293 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1294 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1295
1296 /* We spport no lyaers */
1297 if (pLayerName)
1298 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1299
1300 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1301 if (device->supported_extensions.extensions[i]) {
1302 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1303 }
1304 }
1305
1306 return vk_outarray_status(&out);
1307 }
1308
1309 PFN_vkVoidFunction
1310 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1311 {
1312 TU_FROM_HANDLE(tu_instance, instance, _instance);
1313
1314 return tu_lookup_entrypoint_checked(
1315 pName, instance ? instance->api_version : 0,
1316 instance ? &instance->enabled_extensions : NULL, NULL);
1317 }
1318
1319 /* The loader wants us to expose a second GetInstanceProcAddr function
1320 * to work around certain LD_PRELOAD issues seen in apps.
1321 */
1322 PUBLIC
1323 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1324 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1325
1326 PUBLIC
1327 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1328 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1329 {
1330 return tu_GetInstanceProcAddr(instance, pName);
1331 }
1332
1333 PFN_vkVoidFunction
1334 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1335 {
1336 TU_FROM_HANDLE(tu_device, device, _device);
1337
1338 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1339 &device->instance->enabled_extensions,
1340 &device->enabled_extensions);
1341 }
1342
1343 static VkResult
1344 tu_alloc_memory(struct tu_device *device,
1345 const VkMemoryAllocateInfo *pAllocateInfo,
1346 const VkAllocationCallbacks *pAllocator,
1347 VkDeviceMemory *pMem)
1348 {
1349 struct tu_device_memory *mem;
1350 VkResult result;
1351
1352 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1353
1354 if (pAllocateInfo->allocationSize == 0) {
1355 /* Apparently, this is allowed */
1356 *pMem = VK_NULL_HANDLE;
1357 return VK_SUCCESS;
1358 }
1359
1360 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1361 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1362 if (mem == NULL)
1363 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1364
1365 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1366 if (result != VK_SUCCESS) {
1367 vk_free2(&device->alloc, pAllocator, mem);
1368 return result;
1369 }
1370
1371 mem->size = pAllocateInfo->allocationSize;
1372 mem->type_index = pAllocateInfo->memoryTypeIndex;
1373
1374 mem->map = NULL;
1375 mem->user_ptr = NULL;
1376
1377 *pMem = tu_device_memory_to_handle(mem);
1378
1379 return VK_SUCCESS;
1380 }
1381
1382 VkResult
1383 tu_AllocateMemory(VkDevice _device,
1384 const VkMemoryAllocateInfo *pAllocateInfo,
1385 const VkAllocationCallbacks *pAllocator,
1386 VkDeviceMemory *pMem)
1387 {
1388 TU_FROM_HANDLE(tu_device, device, _device);
1389 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1390 }
1391
1392 void
1393 tu_FreeMemory(VkDevice _device,
1394 VkDeviceMemory _mem,
1395 const VkAllocationCallbacks *pAllocator)
1396 {
1397 TU_FROM_HANDLE(tu_device, device, _device);
1398 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1399
1400 if (mem == NULL)
1401 return;
1402
1403 tu_bo_finish(device, &mem->bo);
1404 vk_free2(&device->alloc, pAllocator, mem);
1405 }
1406
1407 VkResult
1408 tu_MapMemory(VkDevice _device,
1409 VkDeviceMemory _memory,
1410 VkDeviceSize offset,
1411 VkDeviceSize size,
1412 VkMemoryMapFlags flags,
1413 void **ppData)
1414 {
1415 TU_FROM_HANDLE(tu_device, device, _device);
1416 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1417 VkResult result;
1418
1419 if (mem == NULL) {
1420 *ppData = NULL;
1421 return VK_SUCCESS;
1422 }
1423
1424 if (mem->user_ptr) {
1425 *ppData = mem->user_ptr;
1426 } else if (!mem->map) {
1427 result = tu_bo_map(device, &mem->bo);
1428 if (result != VK_SUCCESS)
1429 return result;
1430 *ppData = mem->map = mem->bo.map;
1431 } else
1432 *ppData = mem->map;
1433
1434 if (*ppData) {
1435 *ppData += offset;
1436 return VK_SUCCESS;
1437 }
1438
1439 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1440 }
1441
1442 void
1443 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1444 {
1445 /* I do not see any unmapping done by the freedreno Gallium driver. */
1446 }
1447
1448 VkResult
1449 tu_FlushMappedMemoryRanges(VkDevice _device,
1450 uint32_t memoryRangeCount,
1451 const VkMappedMemoryRange *pMemoryRanges)
1452 {
1453 return VK_SUCCESS;
1454 }
1455
1456 VkResult
1457 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1458 uint32_t memoryRangeCount,
1459 const VkMappedMemoryRange *pMemoryRanges)
1460 {
1461 return VK_SUCCESS;
1462 }
1463
1464 void
1465 tu_GetBufferMemoryRequirements(VkDevice _device,
1466 VkBuffer _buffer,
1467 VkMemoryRequirements *pMemoryRequirements)
1468 {
1469 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1470
1471 pMemoryRequirements->memoryTypeBits = 1;
1472 pMemoryRequirements->alignment = 16;
1473 pMemoryRequirements->size =
1474 align64(buffer->size, pMemoryRequirements->alignment);
1475 }
1476
1477 void
1478 tu_GetBufferMemoryRequirements2(
1479 VkDevice device,
1480 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1481 VkMemoryRequirements2KHR *pMemoryRequirements)
1482 {
1483 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1484 &pMemoryRequirements->memoryRequirements);
1485 }
1486
1487 void
1488 tu_GetImageMemoryRequirements(VkDevice _device,
1489 VkImage _image,
1490 VkMemoryRequirements *pMemoryRequirements)
1491 {
1492 TU_FROM_HANDLE(tu_image, image, _image);
1493
1494 pMemoryRequirements->memoryTypeBits = 1;
1495 pMemoryRequirements->size = image->size;
1496 pMemoryRequirements->alignment = image->alignment;
1497 }
1498
1499 void
1500 tu_GetImageMemoryRequirements2(VkDevice device,
1501 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1502 VkMemoryRequirements2KHR *pMemoryRequirements)
1503 {
1504 tu_GetImageMemoryRequirements(device, pInfo->image,
1505 &pMemoryRequirements->memoryRequirements);
1506 }
1507
1508 void
1509 tu_GetImageSparseMemoryRequirements(
1510 VkDevice device,
1511 VkImage image,
1512 uint32_t *pSparseMemoryRequirementCount,
1513 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1514 {
1515 tu_stub();
1516 }
1517
1518 void
1519 tu_GetImageSparseMemoryRequirements2(
1520 VkDevice device,
1521 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1522 uint32_t *pSparseMemoryRequirementCount,
1523 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1524 {
1525 tu_stub();
1526 }
1527
1528 void
1529 tu_GetDeviceMemoryCommitment(VkDevice device,
1530 VkDeviceMemory memory,
1531 VkDeviceSize *pCommittedMemoryInBytes)
1532 {
1533 *pCommittedMemoryInBytes = 0;
1534 }
1535
1536 VkResult
1537 tu_BindBufferMemory2(VkDevice device,
1538 uint32_t bindInfoCount,
1539 const VkBindBufferMemoryInfoKHR *pBindInfos)
1540 {
1541 return VK_SUCCESS;
1542 }
1543
1544 VkResult
1545 tu_BindBufferMemory(VkDevice device,
1546 VkBuffer buffer,
1547 VkDeviceMemory memory,
1548 VkDeviceSize memoryOffset)
1549 {
1550 const VkBindBufferMemoryInfoKHR info = {
1551 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1552 .buffer = buffer,
1553 .memory = memory,
1554 .memoryOffset = memoryOffset
1555 };
1556
1557 return tu_BindBufferMemory2(device, 1, &info);
1558 }
1559
1560 VkResult
1561 tu_BindImageMemory2(VkDevice device,
1562 uint32_t bindInfoCount,
1563 const VkBindImageMemoryInfoKHR *pBindInfos)
1564 {
1565 return VK_SUCCESS;
1566 }
1567
1568 VkResult
1569 tu_BindImageMemory(VkDevice device,
1570 VkImage image,
1571 VkDeviceMemory memory,
1572 VkDeviceSize memoryOffset)
1573 {
1574 const VkBindImageMemoryInfoKHR info = {
1575 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1576 .image = image,
1577 .memory = memory,
1578 .memoryOffset = memoryOffset
1579 };
1580
1581 return tu_BindImageMemory2(device, 1, &info);
1582 }
1583
1584 VkResult
1585 tu_QueueBindSparse(VkQueue _queue,
1586 uint32_t bindInfoCount,
1587 const VkBindSparseInfo *pBindInfo,
1588 VkFence _fence)
1589 {
1590 return VK_SUCCESS;
1591 }
1592
1593 VkResult
1594 tu_CreateFence(VkDevice _device,
1595 const VkFenceCreateInfo *pCreateInfo,
1596 const VkAllocationCallbacks *pAllocator,
1597 VkFence *pFence)
1598 {
1599 TU_FROM_HANDLE(tu_device, device, _device);
1600
1601 struct tu_fence *fence =
1602 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1603 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1604
1605 if (!fence)
1606 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1607
1608 *pFence = tu_fence_to_handle(fence);
1609
1610 return VK_SUCCESS;
1611 }
1612
1613 void
1614 tu_DestroyFence(VkDevice _device,
1615 VkFence _fence,
1616 const VkAllocationCallbacks *pAllocator)
1617 {
1618 TU_FROM_HANDLE(tu_device, device, _device);
1619 TU_FROM_HANDLE(tu_fence, fence, _fence);
1620
1621 if (!fence)
1622 return;
1623
1624 vk_free2(&device->alloc, pAllocator, fence);
1625 }
1626
1627 VkResult
1628 tu_WaitForFences(VkDevice _device,
1629 uint32_t fenceCount,
1630 const VkFence *pFences,
1631 VkBool32 waitAll,
1632 uint64_t timeout)
1633 {
1634 return VK_SUCCESS;
1635 }
1636
1637 VkResult
1638 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1639 {
1640 return VK_SUCCESS;
1641 }
1642
1643 VkResult
1644 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1645 {
1646 return VK_SUCCESS;
1647 }
1648
1649 // Queue semaphore functions
1650
1651 VkResult
1652 tu_CreateSemaphore(VkDevice _device,
1653 const VkSemaphoreCreateInfo *pCreateInfo,
1654 const VkAllocationCallbacks *pAllocator,
1655 VkSemaphore *pSemaphore)
1656 {
1657 TU_FROM_HANDLE(tu_device, device, _device);
1658
1659 struct tu_semaphore *sem =
1660 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1661 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1662 if (!sem)
1663 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1664
1665 *pSemaphore = tu_semaphore_to_handle(sem);
1666 return VK_SUCCESS;
1667 }
1668
1669 void
1670 tu_DestroySemaphore(VkDevice _device,
1671 VkSemaphore _semaphore,
1672 const VkAllocationCallbacks *pAllocator)
1673 {
1674 TU_FROM_HANDLE(tu_device, device, _device);
1675 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1676 if (!_semaphore)
1677 return;
1678
1679 vk_free2(&device->alloc, pAllocator, sem);
1680 }
1681
1682 VkResult
1683 tu_CreateEvent(VkDevice _device,
1684 const VkEventCreateInfo *pCreateInfo,
1685 const VkAllocationCallbacks *pAllocator,
1686 VkEvent *pEvent)
1687 {
1688 TU_FROM_HANDLE(tu_device, device, _device);
1689 struct tu_event *event =
1690 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1691 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1692
1693 if (!event)
1694 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1695
1696 *pEvent = tu_event_to_handle(event);
1697
1698 return VK_SUCCESS;
1699 }
1700
1701 void
1702 tu_DestroyEvent(VkDevice _device,
1703 VkEvent _event,
1704 const VkAllocationCallbacks *pAllocator)
1705 {
1706 TU_FROM_HANDLE(tu_device, device, _device);
1707 TU_FROM_HANDLE(tu_event, event, _event);
1708
1709 if (!event)
1710 return;
1711 vk_free2(&device->alloc, pAllocator, event);
1712 }
1713
1714 VkResult
1715 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1716 {
1717 TU_FROM_HANDLE(tu_event, event, _event);
1718
1719 if (*event->map == 1)
1720 return VK_EVENT_SET;
1721 return VK_EVENT_RESET;
1722 }
1723
1724 VkResult
1725 tu_SetEvent(VkDevice _device, VkEvent _event)
1726 {
1727 TU_FROM_HANDLE(tu_event, event, _event);
1728 *event->map = 1;
1729
1730 return VK_SUCCESS;
1731 }
1732
1733 VkResult
1734 tu_ResetEvent(VkDevice _device, VkEvent _event)
1735 {
1736 TU_FROM_HANDLE(tu_event, event, _event);
1737 *event->map = 0;
1738
1739 return VK_SUCCESS;
1740 }
1741
1742 VkResult
1743 tu_CreateBuffer(VkDevice _device,
1744 const VkBufferCreateInfo *pCreateInfo,
1745 const VkAllocationCallbacks *pAllocator,
1746 VkBuffer *pBuffer)
1747 {
1748 TU_FROM_HANDLE(tu_device, device, _device);
1749 struct tu_buffer *buffer;
1750
1751 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1752
1753 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1754 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1755 if (buffer == NULL)
1756 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1757
1758 buffer->size = pCreateInfo->size;
1759 buffer->usage = pCreateInfo->usage;
1760 buffer->flags = pCreateInfo->flags;
1761
1762 *pBuffer = tu_buffer_to_handle(buffer);
1763
1764 return VK_SUCCESS;
1765 }
1766
1767 void
1768 tu_DestroyBuffer(VkDevice _device,
1769 VkBuffer _buffer,
1770 const VkAllocationCallbacks *pAllocator)
1771 {
1772 TU_FROM_HANDLE(tu_device, device, _device);
1773 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1774
1775 if (!buffer)
1776 return;
1777
1778 vk_free2(&device->alloc, pAllocator, buffer);
1779 }
1780
1781 static uint32_t
1782 tu_surface_max_layer_count(struct tu_image_view *iview)
1783 {
1784 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1785 ? iview->extent.depth
1786 : (iview->base_layer + iview->layer_count);
1787 }
1788
1789 VkResult
1790 tu_CreateFramebuffer(VkDevice _device,
1791 const VkFramebufferCreateInfo *pCreateInfo,
1792 const VkAllocationCallbacks *pAllocator,
1793 VkFramebuffer *pFramebuffer)
1794 {
1795 TU_FROM_HANDLE(tu_device, device, _device);
1796 struct tu_framebuffer *framebuffer;
1797
1798 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1799
1800 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1801 pCreateInfo->attachmentCount;
1802 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1803 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1804 if (framebuffer == NULL)
1805 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1806
1807 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1808 framebuffer->width = pCreateInfo->width;
1809 framebuffer->height = pCreateInfo->height;
1810 framebuffer->layers = pCreateInfo->layers;
1811 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1812 VkImageView _iview = pCreateInfo->pAttachments[i];
1813 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1814 framebuffer->attachments[i].attachment = iview;
1815
1816 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1817 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1818 framebuffer->layers =
1819 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1820 }
1821
1822 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1823 return VK_SUCCESS;
1824 }
1825
1826 void
1827 tu_DestroyFramebuffer(VkDevice _device,
1828 VkFramebuffer _fb,
1829 const VkAllocationCallbacks *pAllocator)
1830 {
1831 TU_FROM_HANDLE(tu_device, device, _device);
1832 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1833
1834 if (!fb)
1835 return;
1836 vk_free2(&device->alloc, pAllocator, fb);
1837 }
1838
1839 static void
1840 tu_init_sampler(struct tu_device *device,
1841 struct tu_sampler *sampler,
1842 const VkSamplerCreateInfo *pCreateInfo)
1843 {
1844 }
1845
1846 VkResult
1847 tu_CreateSampler(VkDevice _device,
1848 const VkSamplerCreateInfo *pCreateInfo,
1849 const VkAllocationCallbacks *pAllocator,
1850 VkSampler *pSampler)
1851 {
1852 TU_FROM_HANDLE(tu_device, device, _device);
1853 struct tu_sampler *sampler;
1854
1855 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1856
1857 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1858 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1859 if (!sampler)
1860 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1861
1862 tu_init_sampler(device, sampler, pCreateInfo);
1863 *pSampler = tu_sampler_to_handle(sampler);
1864
1865 return VK_SUCCESS;
1866 }
1867
1868 void
1869 tu_DestroySampler(VkDevice _device,
1870 VkSampler _sampler,
1871 const VkAllocationCallbacks *pAllocator)
1872 {
1873 TU_FROM_HANDLE(tu_device, device, _device);
1874 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1875
1876 if (!sampler)
1877 return;
1878 vk_free2(&device->alloc, pAllocator, sampler);
1879 }
1880
1881 /* vk_icd.h does not declare this function, so we declare it here to
1882 * suppress Wmissing-prototypes.
1883 */
1884 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1885 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1886
1887 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1888 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1889 {
1890 /* For the full details on loader interface versioning, see
1891 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1892 * What follows is a condensed summary, to help you navigate the large and
1893 * confusing official doc.
1894 *
1895 * - Loader interface v0 is incompatible with later versions. We don't
1896 * support it.
1897 *
1898 * - In loader interface v1:
1899 * - The first ICD entrypoint called by the loader is
1900 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1901 * entrypoint.
1902 * - The ICD must statically expose no other Vulkan symbol unless it
1903 * is linked with -Bsymbolic.
1904 * - Each dispatchable Vulkan handle created by the ICD must be
1905 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1906 * ICD must initialize VK_LOADER_DATA.loadMagic to
1907 * ICD_LOADER_MAGIC.
1908 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1909 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1910 * such loader-managed surfaces.
1911 *
1912 * - Loader interface v2 differs from v1 in:
1913 * - The first ICD entrypoint called by the loader is
1914 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1915 * statically expose this entrypoint.
1916 *
1917 * - Loader interface v3 differs from v2 in:
1918 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1919 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1920 * because the loader no longer does so.
1921 */
1922 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1923 return VK_SUCCESS;
1924 }
1925
1926 void
1927 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1928 VkPhysicalDevice physicalDevice,
1929 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1930 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1931 {
1932 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1933 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1934 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1935 }
1936
1937 void
1938 tu_GetPhysicalDeviceExternalFenceProperties(
1939 VkPhysicalDevice physicalDevice,
1940 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1941 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1942 {
1943 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1944 pExternalFenceProperties->compatibleHandleTypes = 0;
1945 pExternalFenceProperties->externalFenceFeatures = 0;
1946 }
1947
1948 VkResult
1949 tu_CreateDebugReportCallbackEXT(
1950 VkInstance _instance,
1951 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1952 const VkAllocationCallbacks *pAllocator,
1953 VkDebugReportCallbackEXT *pCallback)
1954 {
1955 TU_FROM_HANDLE(tu_instance, instance, _instance);
1956 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1957 pCreateInfo, pAllocator,
1958 &instance->alloc, pCallback);
1959 }
1960
1961 void
1962 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1963 VkDebugReportCallbackEXT _callback,
1964 const VkAllocationCallbacks *pAllocator)
1965 {
1966 TU_FROM_HANDLE(tu_instance, instance, _instance);
1967 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1968 _callback, pAllocator, &instance->alloc);
1969 }
1970
1971 void
1972 tu_DebugReportMessageEXT(VkInstance _instance,
1973 VkDebugReportFlagsEXT flags,
1974 VkDebugReportObjectTypeEXT objectType,
1975 uint64_t object,
1976 size_t location,
1977 int32_t messageCode,
1978 const char *pLayerPrefix,
1979 const char *pMessage)
1980 {
1981 TU_FROM_HANDLE(tu_instance, instance, _instance);
1982 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1983 object, location, messageCode, pLayerPrefix, pMessage);
1984 }
1985
1986 void
1987 tu_GetDeviceGroupPeerMemoryFeatures(
1988 VkDevice device,
1989 uint32_t heapIndex,
1990 uint32_t localDeviceIndex,
1991 uint32_t remoteDeviceIndex,
1992 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1993 {
1994 assert(localDeviceIndex == remoteDeviceIndex);
1995
1996 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1997 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1998 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1999 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2000 }