turnip: add wrappers around DRM_MSM_SUBMITQUEUE_*
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <stdbool.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/sysinfo.h>
35 #include <unistd.h>
36 #include <xf86drm.h>
37
38 #include "util/debug.h"
39 #include "util/disk_cache.h"
40 #include "util/strtod.h"
41 #include "vk_format.h"
42 #include "vk_util.h"
43
44 #include "drm/msm_drm.h"
45
46 static int
47 tu_device_get_cache_uuid(uint16_t family, void *uuid)
48 {
49 uint32_t mesa_timestamp;
50 uint16_t f = family;
51 memset(uuid, 0, VK_UUID_SIZE);
52 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
53 &mesa_timestamp))
54 return -1;
55
56 memcpy(uuid, &mesa_timestamp, 4);
57 memcpy((char *) uuid + 4, &f, 2);
58 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
59 return 0;
60 }
61
62 static void
63 tu_get_driver_uuid(void *uuid)
64 {
65 memset(uuid, 0, VK_UUID_SIZE);
66 snprintf(uuid, VK_UUID_SIZE, "freedreno");
67 }
68
69 static void
70 tu_get_device_uuid(void *uuid)
71 {
72 memset(uuid, 0, VK_UUID_SIZE);
73 }
74
75 VkResult
76 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
77 {
78 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
79 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
80 */
81 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
82 if (!gem_handle)
83 goto fail_new;
84
85 /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
86 * want immediate backing pages because vkAllocateMemory and friends must
87 * not lazily fail.
88 *
89 * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
90 * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
91 * maybe I misunderstand.
92 */
93
94 /* TODO: Do we need 'offset' if we have 'iova'? */
95 uint64_t offset = tu_gem_info_offset(dev, gem_handle);
96 if (!offset)
97 goto fail_info;
98
99 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
100 if (!iova)
101 goto fail_info;
102
103 *bo = (struct tu_bo) {
104 .gem_handle = gem_handle,
105 .size = size,
106 .offset = offset,
107 .iova = iova,
108 };
109
110 return VK_SUCCESS;
111
112 fail_info:
113 tu_gem_close(dev, bo->gem_handle);
114 fail_new:
115 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
116 }
117
118 VkResult
119 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
120 {
121 if (bo->map)
122 return VK_SUCCESS;
123
124 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
125 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
126 dev->physical_device->local_fd, bo->offset);
127 if (map == MAP_FAILED)
128 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
129
130 bo->map = map;
131 return VK_SUCCESS;
132 }
133
134 void
135 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
136 {
137 assert(bo->gem_handle);
138
139 if (bo->map)
140 munmap(bo->map, bo->size);
141
142 tu_gem_close(dev, bo->gem_handle);
143 }
144
145 static VkResult
146 tu_physical_device_init(struct tu_physical_device *device,
147 struct tu_instance *instance,
148 drmDevicePtr drm_device)
149 {
150 const char *path = drm_device->nodes[DRM_NODE_RENDER];
151 VkResult result = VK_SUCCESS;
152 drmVersionPtr version;
153 int fd;
154 int master_fd = -1;
155
156 fd = open(path, O_RDWR | O_CLOEXEC);
157 if (fd < 0) {
158 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
159 "failed to open device %s", path);
160 }
161
162 /* Version 1.3 added MSM_INFO_IOVA. */
163 const int min_version_major = 1;
164 const int min_version_minor = 3;
165
166 version = drmGetVersion(fd);
167 if (!version) {
168 close(fd);
169 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
170 "failed to query kernel driver version for device %s",
171 path);
172 }
173
174 if (strcmp(version->name, "msm")) {
175 drmFreeVersion(version);
176 if (master_fd != -1)
177 close(master_fd);
178 close(fd);
179 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
180 "device %s does not use the msm kernel driver", path);
181 }
182
183 if (version->version_major != min_version_major ||
184 version->version_minor < min_version_minor) {
185 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
186 "kernel driver for device %s has version %d.%d, "
187 "but Vulkan requires version >= %d.%d",
188 path, version->version_major, version->version_minor,
189 min_version_major, min_version_minor);
190 drmFreeVersion(version);
191 close(fd);
192 return result;
193 }
194
195 drmFreeVersion(version);
196
197 if (instance->debug_flags & TU_DEBUG_STARTUP)
198 tu_logi("Found compatible device '%s'.", path);
199
200 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
201 device->instance = instance;
202 assert(strlen(path) < ARRAY_SIZE(device->path));
203 strncpy(device->path, path, ARRAY_SIZE(device->path));
204
205 if (instance->enabled_extensions.KHR_display) {
206 master_fd =
207 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
208 if (master_fd >= 0) {
209 /* TODO: free master_fd is accel is not working? */
210 }
211 }
212
213 device->master_fd = master_fd;
214 device->local_fd = fd;
215
216 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
217 if (instance->debug_flags & TU_DEBUG_STARTUP)
218 tu_logi("Could not query the GPU ID");
219 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
220 "could not get GPU ID");
221 goto fail;
222 }
223
224 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
225 if (instance->debug_flags & TU_DEBUG_STARTUP)
226 tu_logi("Could not query the GMEM size");
227 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
228 "could not get GMEM size");
229 goto fail;
230 }
231
232 memset(device->name, 0, sizeof(device->name));
233 sprintf(device->name, "FD%d", device->gpu_id);
234
235 switch (device->gpu_id) {
236 case 530:
237 case 630:
238 break;
239 default:
240 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
241 "device %s is unsupported", device->name);
242 goto fail;
243 }
244 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
245 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
246 "cannot generate UUID");
247 goto fail;
248 }
249
250 /* The gpu id is already embedded in the uuid so we just pass "tu"
251 * when creating the cache.
252 */
253 char buf[VK_UUID_SIZE * 2 + 1];
254 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
255 device->disk_cache = disk_cache_create(device->name, buf, 0);
256
257 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
258 "testing use only.\n");
259
260 tu_get_driver_uuid(&device->device_uuid);
261 tu_get_device_uuid(&device->device_uuid);
262
263 tu_fill_device_extension_table(device, &device->supported_extensions);
264
265 if (result != VK_SUCCESS) {
266 vk_error(instance, result);
267 goto fail;
268 }
269
270 return VK_SUCCESS;
271
272 fail:
273 close(fd);
274 if (master_fd != -1)
275 close(master_fd);
276 return result;
277 }
278
279 static void
280 tu_physical_device_finish(struct tu_physical_device *device)
281 {
282 disk_cache_destroy(device->disk_cache);
283 close(device->local_fd);
284 if (device->master_fd != -1)
285 close(device->master_fd);
286 }
287
288 static void *
289 default_alloc_func(void *pUserData,
290 size_t size,
291 size_t align,
292 VkSystemAllocationScope allocationScope)
293 {
294 return malloc(size);
295 }
296
297 static void *
298 default_realloc_func(void *pUserData,
299 void *pOriginal,
300 size_t size,
301 size_t align,
302 VkSystemAllocationScope allocationScope)
303 {
304 return realloc(pOriginal, size);
305 }
306
307 static void
308 default_free_func(void *pUserData, void *pMemory)
309 {
310 free(pMemory);
311 }
312
313 static const VkAllocationCallbacks default_alloc = {
314 .pUserData = NULL,
315 .pfnAllocation = default_alloc_func,
316 .pfnReallocation = default_realloc_func,
317 .pfnFree = default_free_func,
318 };
319
320 static const struct debug_control tu_debug_options[] = {
321 { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
322 };
323
324 const char *
325 tu_get_debug_option_name(int id)
326 {
327 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
328 return tu_debug_options[id].string;
329 }
330
331 static int
332 tu_get_instance_extension_index(const char *name)
333 {
334 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
335 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
336 return i;
337 }
338 return -1;
339 }
340
341 VkResult
342 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
343 const VkAllocationCallbacks *pAllocator,
344 VkInstance *pInstance)
345 {
346 struct tu_instance *instance;
347 VkResult result;
348
349 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
350
351 uint32_t client_version;
352 if (pCreateInfo->pApplicationInfo &&
353 pCreateInfo->pApplicationInfo->apiVersion != 0) {
354 client_version = pCreateInfo->pApplicationInfo->apiVersion;
355 } else {
356 tu_EnumerateInstanceVersion(&client_version);
357 }
358
359 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
360 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
361 if (!instance)
362 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
363
364 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
365
366 if (pAllocator)
367 instance->alloc = *pAllocator;
368 else
369 instance->alloc = default_alloc;
370
371 instance->api_version = client_version;
372 instance->physical_device_count = -1;
373
374 instance->debug_flags =
375 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
376
377 if (instance->debug_flags & TU_DEBUG_STARTUP)
378 tu_logi("Created an instance");
379
380 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
381 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
382 int index = tu_get_instance_extension_index(ext_name);
383
384 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
385 vk_free2(&default_alloc, pAllocator, instance);
386 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
387 }
388
389 instance->enabled_extensions.extensions[index] = true;
390 }
391
392 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
393 if (result != VK_SUCCESS) {
394 vk_free2(&default_alloc, pAllocator, instance);
395 return vk_error(instance, result);
396 }
397
398 _mesa_locale_init();
399
400 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
401
402 *pInstance = tu_instance_to_handle(instance);
403
404 return VK_SUCCESS;
405 }
406
407 void
408 tu_DestroyInstance(VkInstance _instance,
409 const VkAllocationCallbacks *pAllocator)
410 {
411 TU_FROM_HANDLE(tu_instance, instance, _instance);
412
413 if (!instance)
414 return;
415
416 for (int i = 0; i < instance->physical_device_count; ++i) {
417 tu_physical_device_finish(instance->physical_devices + i);
418 }
419
420 VG(VALGRIND_DESTROY_MEMPOOL(instance));
421
422 _mesa_locale_fini();
423
424 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
425
426 vk_free(&instance->alloc, instance);
427 }
428
429 static VkResult
430 tu_enumerate_devices(struct tu_instance *instance)
431 {
432 /* TODO: Check for more devices ? */
433 drmDevicePtr devices[8];
434 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
435 int max_devices;
436
437 instance->physical_device_count = 0;
438
439 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
440
441 if (instance->debug_flags & TU_DEBUG_STARTUP)
442 tu_logi("Found %d drm nodes", max_devices);
443
444 if (max_devices < 1)
445 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
446
447 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
448 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
449 devices[i]->bustype == DRM_BUS_PLATFORM) {
450
451 result = tu_physical_device_init(
452 instance->physical_devices + instance->physical_device_count,
453 instance, devices[i]);
454 if (result == VK_SUCCESS)
455 ++instance->physical_device_count;
456 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
457 break;
458 }
459 }
460 drmFreeDevices(devices, max_devices);
461
462 return result;
463 }
464
465 VkResult
466 tu_EnumeratePhysicalDevices(VkInstance _instance,
467 uint32_t *pPhysicalDeviceCount,
468 VkPhysicalDevice *pPhysicalDevices)
469 {
470 TU_FROM_HANDLE(tu_instance, instance, _instance);
471 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
472
473 VkResult result;
474
475 if (instance->physical_device_count < 0) {
476 result = tu_enumerate_devices(instance);
477 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
478 return result;
479 }
480
481 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
482 vk_outarray_append(&out, p)
483 {
484 *p = tu_physical_device_to_handle(instance->physical_devices + i);
485 }
486 }
487
488 return vk_outarray_status(&out);
489 }
490
491 VkResult
492 tu_EnumeratePhysicalDeviceGroups(
493 VkInstance _instance,
494 uint32_t *pPhysicalDeviceGroupCount,
495 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
496 {
497 TU_FROM_HANDLE(tu_instance, instance, _instance);
498 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
499 pPhysicalDeviceGroupCount);
500 VkResult result;
501
502 if (instance->physical_device_count < 0) {
503 result = tu_enumerate_devices(instance);
504 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
505 return result;
506 }
507
508 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
509 vk_outarray_append(&out, p)
510 {
511 p->physicalDeviceCount = 1;
512 p->physicalDevices[0] =
513 tu_physical_device_to_handle(instance->physical_devices + i);
514 p->subsetAllocation = false;
515 }
516 }
517
518 return vk_outarray_status(&out);
519 }
520
521 void
522 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
523 VkPhysicalDeviceFeatures *pFeatures)
524 {
525 memset(pFeatures, 0, sizeof(*pFeatures));
526
527 *pFeatures = (VkPhysicalDeviceFeatures) {
528 .robustBufferAccess = false,
529 .fullDrawIndexUint32 = false,
530 .imageCubeArray = false,
531 .independentBlend = false,
532 .geometryShader = false,
533 .tessellationShader = false,
534 .sampleRateShading = false,
535 .dualSrcBlend = false,
536 .logicOp = false,
537 .multiDrawIndirect = false,
538 .drawIndirectFirstInstance = false,
539 .depthClamp = false,
540 .depthBiasClamp = false,
541 .fillModeNonSolid = false,
542 .depthBounds = false,
543 .wideLines = false,
544 .largePoints = false,
545 .alphaToOne = false,
546 .multiViewport = false,
547 .samplerAnisotropy = false,
548 .textureCompressionETC2 = false,
549 .textureCompressionASTC_LDR = false,
550 .textureCompressionBC = false,
551 .occlusionQueryPrecise = false,
552 .pipelineStatisticsQuery = false,
553 .vertexPipelineStoresAndAtomics = false,
554 .fragmentStoresAndAtomics = false,
555 .shaderTessellationAndGeometryPointSize = false,
556 .shaderImageGatherExtended = false,
557 .shaderStorageImageExtendedFormats = false,
558 .shaderStorageImageMultisample = false,
559 .shaderUniformBufferArrayDynamicIndexing = false,
560 .shaderSampledImageArrayDynamicIndexing = false,
561 .shaderStorageBufferArrayDynamicIndexing = false,
562 .shaderStorageImageArrayDynamicIndexing = false,
563 .shaderStorageImageReadWithoutFormat = false,
564 .shaderStorageImageWriteWithoutFormat = false,
565 .shaderClipDistance = false,
566 .shaderCullDistance = false,
567 .shaderFloat64 = false,
568 .shaderInt64 = false,
569 .shaderInt16 = false,
570 .sparseBinding = false,
571 .variableMultisampleRate = false,
572 .inheritedQueries = false,
573 };
574 }
575
576 void
577 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
578 VkPhysicalDeviceFeatures2KHR *pFeatures)
579 {
580 vk_foreach_struct(ext, pFeatures->pNext)
581 {
582 switch (ext->sType) {
583 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
584 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
585 features->variablePointersStorageBuffer = false;
586 features->variablePointers = false;
587 break;
588 }
589 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
590 VkPhysicalDeviceMultiviewFeaturesKHR *features =
591 (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
592 features->multiview = false;
593 features->multiviewGeometryShader = false;
594 features->multiviewTessellationShader = false;
595 break;
596 }
597 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
598 VkPhysicalDeviceShaderDrawParameterFeatures *features =
599 (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
600 features->shaderDrawParameters = false;
601 break;
602 }
603 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
604 VkPhysicalDeviceProtectedMemoryFeatures *features =
605 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
606 features->protectedMemory = false;
607 break;
608 }
609 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
610 VkPhysicalDevice16BitStorageFeatures *features =
611 (VkPhysicalDevice16BitStorageFeatures *) ext;
612 features->storageBuffer16BitAccess = false;
613 features->uniformAndStorageBuffer16BitAccess = false;
614 features->storagePushConstant16 = false;
615 features->storageInputOutput16 = false;
616 break;
617 }
618 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
619 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
620 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
621 features->samplerYcbcrConversion = false;
622 break;
623 }
624 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
625 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
626 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
627 features->shaderInputAttachmentArrayDynamicIndexing = false;
628 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
629 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
630 features->shaderUniformBufferArrayNonUniformIndexing = false;
631 features->shaderSampledImageArrayNonUniformIndexing = false;
632 features->shaderStorageBufferArrayNonUniformIndexing = false;
633 features->shaderStorageImageArrayNonUniformIndexing = false;
634 features->shaderInputAttachmentArrayNonUniformIndexing = false;
635 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
636 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
637 features->descriptorBindingUniformBufferUpdateAfterBind = false;
638 features->descriptorBindingSampledImageUpdateAfterBind = false;
639 features->descriptorBindingStorageImageUpdateAfterBind = false;
640 features->descriptorBindingStorageBufferUpdateAfterBind = false;
641 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
642 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
643 features->descriptorBindingUpdateUnusedWhilePending = false;
644 features->descriptorBindingPartiallyBound = false;
645 features->descriptorBindingVariableDescriptorCount = false;
646 features->runtimeDescriptorArray = false;
647 break;
648 }
649 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
650 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
651 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
652 features->conditionalRendering = false;
653 features->inheritedConditionalRendering = false;
654 break;
655 }
656 default:
657 break;
658 }
659 }
660 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
661 }
662
663 void
664 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
665 VkPhysicalDeviceProperties *pProperties)
666 {
667 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
668 VkSampleCountFlags sample_counts = 0xf;
669
670 /* make sure that the entire descriptor set is addressable with a signed
671 * 32-bit int. So the sum of all limits scaled by descriptor size has to
672 * be at most 2 GiB. the combined image & samples object count as one of
673 * both. This limit is for the pipeline layout, not for the set layout, but
674 * there is no set limit, so we just set a pipeline limit. I don't think
675 * any app is going to hit this soon. */
676 size_t max_descriptor_set_size =
677 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
678 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
679 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
680 32 /* sampler, largest when combined with image */ +
681 64 /* sampled image */ + 64 /* storage image */);
682
683 VkPhysicalDeviceLimits limits = {
684 .maxImageDimension1D = (1 << 14),
685 .maxImageDimension2D = (1 << 14),
686 .maxImageDimension3D = (1 << 11),
687 .maxImageDimensionCube = (1 << 14),
688 .maxImageArrayLayers = (1 << 11),
689 .maxTexelBufferElements = 128 * 1024 * 1024,
690 .maxUniformBufferRange = UINT32_MAX,
691 .maxStorageBufferRange = UINT32_MAX,
692 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
693 .maxMemoryAllocationCount = UINT32_MAX,
694 .maxSamplerAllocationCount = 64 * 1024,
695 .bufferImageGranularity = 64, /* A cache line */
696 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
697 .maxBoundDescriptorSets = MAX_SETS,
698 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
699 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
700 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
701 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
702 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
703 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
704 .maxPerStageResources = max_descriptor_set_size,
705 .maxDescriptorSetSamplers = max_descriptor_set_size,
706 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
707 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
708 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
709 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
710 .maxDescriptorSetSampledImages = max_descriptor_set_size,
711 .maxDescriptorSetStorageImages = max_descriptor_set_size,
712 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
713 .maxVertexInputAttributes = 32,
714 .maxVertexInputBindings = 32,
715 .maxVertexInputAttributeOffset = 2047,
716 .maxVertexInputBindingStride = 2048,
717 .maxVertexOutputComponents = 128,
718 .maxTessellationGenerationLevel = 64,
719 .maxTessellationPatchSize = 32,
720 .maxTessellationControlPerVertexInputComponents = 128,
721 .maxTessellationControlPerVertexOutputComponents = 128,
722 .maxTessellationControlPerPatchOutputComponents = 120,
723 .maxTessellationControlTotalOutputComponents = 4096,
724 .maxTessellationEvaluationInputComponents = 128,
725 .maxTessellationEvaluationOutputComponents = 128,
726 .maxGeometryShaderInvocations = 127,
727 .maxGeometryInputComponents = 64,
728 .maxGeometryOutputComponents = 128,
729 .maxGeometryOutputVertices = 256,
730 .maxGeometryTotalOutputComponents = 1024,
731 .maxFragmentInputComponents = 128,
732 .maxFragmentOutputAttachments = 8,
733 .maxFragmentDualSrcAttachments = 1,
734 .maxFragmentCombinedOutputResources = 8,
735 .maxComputeSharedMemorySize = 32768,
736 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
737 .maxComputeWorkGroupInvocations = 2048,
738 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
739 .subPixelPrecisionBits = 4 /* FIXME */,
740 .subTexelPrecisionBits = 4 /* FIXME */,
741 .mipmapPrecisionBits = 4 /* FIXME */,
742 .maxDrawIndexedIndexValue = UINT32_MAX,
743 .maxDrawIndirectCount = UINT32_MAX,
744 .maxSamplerLodBias = 16,
745 .maxSamplerAnisotropy = 16,
746 .maxViewports = MAX_VIEWPORTS,
747 .maxViewportDimensions = { (1 << 14), (1 << 14) },
748 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
749 .viewportSubPixelBits = 8,
750 .minMemoryMapAlignment = 4096, /* A page */
751 .minTexelBufferOffsetAlignment = 1,
752 .minUniformBufferOffsetAlignment = 4,
753 .minStorageBufferOffsetAlignment = 4,
754 .minTexelOffset = -32,
755 .maxTexelOffset = 31,
756 .minTexelGatherOffset = -32,
757 .maxTexelGatherOffset = 31,
758 .minInterpolationOffset = -2,
759 .maxInterpolationOffset = 2,
760 .subPixelInterpolationOffsetBits = 8,
761 .maxFramebufferWidth = (1 << 14),
762 .maxFramebufferHeight = (1 << 14),
763 .maxFramebufferLayers = (1 << 10),
764 .framebufferColorSampleCounts = sample_counts,
765 .framebufferDepthSampleCounts = sample_counts,
766 .framebufferStencilSampleCounts = sample_counts,
767 .framebufferNoAttachmentsSampleCounts = sample_counts,
768 .maxColorAttachments = MAX_RTS,
769 .sampledImageColorSampleCounts = sample_counts,
770 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
771 .sampledImageDepthSampleCounts = sample_counts,
772 .sampledImageStencilSampleCounts = sample_counts,
773 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
774 .maxSampleMaskWords = 1,
775 .timestampComputeAndGraphics = true,
776 .timestampPeriod = 1,
777 .maxClipDistances = 8,
778 .maxCullDistances = 8,
779 .maxCombinedClipAndCullDistances = 8,
780 .discreteQueuePriorities = 1,
781 .pointSizeRange = { 0.125, 255.875 },
782 .lineWidthRange = { 0.0, 7.9921875 },
783 .pointSizeGranularity = (1.0 / 8.0),
784 .lineWidthGranularity = (1.0 / 128.0),
785 .strictLines = false, /* FINISHME */
786 .standardSampleLocations = true,
787 .optimalBufferCopyOffsetAlignment = 128,
788 .optimalBufferCopyRowPitchAlignment = 128,
789 .nonCoherentAtomSize = 64,
790 };
791
792 *pProperties = (VkPhysicalDeviceProperties) {
793 .apiVersion = tu_physical_device_api_version(pdevice),
794 .driverVersion = vk_get_driver_version(),
795 .vendorID = 0, /* TODO */
796 .deviceID = 0,
797 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
798 .limits = limits,
799 .sparseProperties = { 0 },
800 };
801
802 strcpy(pProperties->deviceName, pdevice->name);
803 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
804 }
805
806 void
807 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
808 VkPhysicalDeviceProperties2KHR *pProperties)
809 {
810 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
811 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
812
813 vk_foreach_struct(ext, pProperties->pNext)
814 {
815 switch (ext->sType) {
816 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
817 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
818 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
819 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
820 break;
821 }
822 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
823 VkPhysicalDeviceIDPropertiesKHR *properties =
824 (VkPhysicalDeviceIDPropertiesKHR *) ext;
825 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
826 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
827 properties->deviceLUIDValid = false;
828 break;
829 }
830 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
831 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
832 (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
833 properties->maxMultiviewViewCount = MAX_VIEWS;
834 properties->maxMultiviewInstanceIndex = INT_MAX;
835 break;
836 }
837 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
838 VkPhysicalDevicePointClippingPropertiesKHR *properties =
839 (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
840 properties->pointClippingBehavior =
841 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
842 break;
843 }
844 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
845 VkPhysicalDeviceMaintenance3Properties *properties =
846 (VkPhysicalDeviceMaintenance3Properties *) ext;
847 /* Make sure everything is addressable by a signed 32-bit int, and
848 * our largest descriptors are 96 bytes. */
849 properties->maxPerSetDescriptors = (1ull << 31) / 96;
850 /* Our buffer size fields allow only this much */
851 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
852 break;
853 }
854 default:
855 break;
856 }
857 }
858 }
859
860 static const VkQueueFamilyProperties tu_queue_family_properties = {
861 .queueFlags =
862 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
863 .queueCount = 1,
864 .timestampValidBits = 64,
865 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
866 };
867
868 void
869 tu_GetPhysicalDeviceQueueFamilyProperties(
870 VkPhysicalDevice physicalDevice,
871 uint32_t *pQueueFamilyPropertyCount,
872 VkQueueFamilyProperties *pQueueFamilyProperties)
873 {
874 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
875
876 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
877 }
878
879 void
880 tu_GetPhysicalDeviceQueueFamilyProperties2(
881 VkPhysicalDevice physicalDevice,
882 uint32_t *pQueueFamilyPropertyCount,
883 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
884 {
885 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
886
887 vk_outarray_append(&out, p)
888 {
889 p->queueFamilyProperties = tu_queue_family_properties;
890 }
891 }
892
893 static uint64_t
894 tu_get_system_heap_size()
895 {
896 struct sysinfo info;
897 sysinfo(&info);
898
899 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
900
901 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
902 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
903 */
904 uint64_t available_ram;
905 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
906 available_ram = total_ram / 2;
907 else
908 available_ram = total_ram * 3 / 4;
909
910 return available_ram;
911 }
912
913 void
914 tu_GetPhysicalDeviceMemoryProperties(
915 VkPhysicalDevice physicalDevice,
916 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
917 {
918 pMemoryProperties->memoryHeapCount = 1;
919 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
920 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
921
922 pMemoryProperties->memoryTypeCount = 1;
923 pMemoryProperties->memoryTypes[0].propertyFlags =
924 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
925 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
926 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
927 pMemoryProperties->memoryTypes[0].heapIndex = 0;
928 }
929
930 void
931 tu_GetPhysicalDeviceMemoryProperties2(
932 VkPhysicalDevice physicalDevice,
933 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
934 {
935 return tu_GetPhysicalDeviceMemoryProperties(
936 physicalDevice, &pMemoryProperties->memoryProperties);
937 }
938
939 static VkResult
940 tu_queue_init(struct tu_device *device,
941 struct tu_queue *queue,
942 uint32_t queue_family_index,
943 int idx,
944 VkDeviceQueueCreateFlags flags)
945 {
946 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
947 queue->device = device;
948 queue->queue_family_index = queue_family_index;
949 queue->queue_idx = idx;
950 queue->flags = flags;
951
952 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
953 if (ret)
954 return VK_ERROR_INITIALIZATION_FAILED;
955
956 return VK_SUCCESS;
957 }
958
959 static void
960 tu_queue_finish(struct tu_queue *queue)
961 {
962 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
963 }
964
965 static int
966 tu_get_device_extension_index(const char *name)
967 {
968 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
969 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
970 return i;
971 }
972 return -1;
973 }
974
975 VkResult
976 tu_CreateDevice(VkPhysicalDevice physicalDevice,
977 const VkDeviceCreateInfo *pCreateInfo,
978 const VkAllocationCallbacks *pAllocator,
979 VkDevice *pDevice)
980 {
981 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
982 VkResult result;
983 struct tu_device *device;
984
985 /* Check enabled features */
986 if (pCreateInfo->pEnabledFeatures) {
987 VkPhysicalDeviceFeatures supported_features;
988 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
989 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
990 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
991 unsigned num_features =
992 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
993 for (uint32_t i = 0; i < num_features; i++) {
994 if (enabled_feature[i] && !supported_feature[i])
995 return vk_error(physical_device->instance,
996 VK_ERROR_FEATURE_NOT_PRESENT);
997 }
998 }
999
1000 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1001 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1002 if (!device)
1003 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1004
1005 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1006 device->instance = physical_device->instance;
1007 device->physical_device = physical_device;
1008
1009 if (pAllocator)
1010 device->alloc = *pAllocator;
1011 else
1012 device->alloc = physical_device->instance->alloc;
1013
1014 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1015 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1016 int index = tu_get_device_extension_index(ext_name);
1017 if (index < 0 ||
1018 !physical_device->supported_extensions.extensions[index]) {
1019 vk_free(&device->alloc, device);
1020 return vk_error(physical_device->instance,
1021 VK_ERROR_EXTENSION_NOT_PRESENT);
1022 }
1023
1024 device->enabled_extensions.extensions[index] = true;
1025 }
1026
1027 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1028 const VkDeviceQueueCreateInfo *queue_create =
1029 &pCreateInfo->pQueueCreateInfos[i];
1030 uint32_t qfi = queue_create->queueFamilyIndex;
1031 device->queues[qfi] = vk_alloc(
1032 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1033 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1034 if (!device->queues[qfi]) {
1035 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1036 goto fail;
1037 }
1038
1039 memset(device->queues[qfi], 0,
1040 queue_create->queueCount * sizeof(struct tu_queue));
1041
1042 device->queue_count[qfi] = queue_create->queueCount;
1043
1044 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1045 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1046 queue_create->flags);
1047 if (result != VK_SUCCESS)
1048 goto fail;
1049 }
1050 }
1051
1052 VkPipelineCacheCreateInfo ci;
1053 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1054 ci.pNext = NULL;
1055 ci.flags = 0;
1056 ci.pInitialData = NULL;
1057 ci.initialDataSize = 0;
1058 VkPipelineCache pc;
1059 result =
1060 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1061 if (result != VK_SUCCESS)
1062 goto fail;
1063
1064 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1065
1066 *pDevice = tu_device_to_handle(device);
1067 return VK_SUCCESS;
1068
1069 fail:
1070 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1071 for (unsigned q = 0; q < device->queue_count[i]; q++)
1072 tu_queue_finish(&device->queues[i][q]);
1073 if (device->queue_count[i])
1074 vk_free(&device->alloc, device->queues[i]);
1075 }
1076
1077 vk_free(&device->alloc, device);
1078 return result;
1079 }
1080
1081 void
1082 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1083 {
1084 TU_FROM_HANDLE(tu_device, device, _device);
1085
1086 if (!device)
1087 return;
1088
1089 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1090 for (unsigned q = 0; q < device->queue_count[i]; q++)
1091 tu_queue_finish(&device->queues[i][q]);
1092 if (device->queue_count[i])
1093 vk_free(&device->alloc, device->queues[i]);
1094 }
1095
1096 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1097 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1098
1099 vk_free(&device->alloc, device);
1100 }
1101
1102 VkResult
1103 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1104 VkLayerProperties *pProperties)
1105 {
1106 *pPropertyCount = 0;
1107 return VK_SUCCESS;
1108 }
1109
1110 VkResult
1111 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1112 uint32_t *pPropertyCount,
1113 VkLayerProperties *pProperties)
1114 {
1115 *pPropertyCount = 0;
1116 return VK_SUCCESS;
1117 }
1118
1119 void
1120 tu_GetDeviceQueue2(VkDevice _device,
1121 const VkDeviceQueueInfo2 *pQueueInfo,
1122 VkQueue *pQueue)
1123 {
1124 TU_FROM_HANDLE(tu_device, device, _device);
1125 struct tu_queue *queue;
1126
1127 queue =
1128 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1129 if (pQueueInfo->flags != queue->flags) {
1130 /* From the Vulkan 1.1.70 spec:
1131 *
1132 * "The queue returned by vkGetDeviceQueue2 must have the same
1133 * flags value from this structure as that used at device
1134 * creation time in a VkDeviceQueueCreateInfo instance. If no
1135 * matching flags were specified at device creation time then
1136 * pQueue will return VK_NULL_HANDLE."
1137 */
1138 *pQueue = VK_NULL_HANDLE;
1139 return;
1140 }
1141
1142 *pQueue = tu_queue_to_handle(queue);
1143 }
1144
1145 void
1146 tu_GetDeviceQueue(VkDevice _device,
1147 uint32_t queueFamilyIndex,
1148 uint32_t queueIndex,
1149 VkQueue *pQueue)
1150 {
1151 const VkDeviceQueueInfo2 info =
1152 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1153 .queueFamilyIndex = queueFamilyIndex,
1154 .queueIndex = queueIndex };
1155
1156 tu_GetDeviceQueue2(_device, &info, pQueue);
1157 }
1158
1159 VkResult
1160 tu_QueueSubmit(VkQueue _queue,
1161 uint32_t submitCount,
1162 const VkSubmitInfo *pSubmits,
1163 VkFence _fence)
1164 {
1165 TU_FROM_HANDLE(tu_queue, queue, _queue);
1166
1167 for (uint32_t i = 0; i < submitCount; ++i) {
1168 const VkSubmitInfo *submit = pSubmits + i;
1169 struct tu_bo_list bo_list;
1170 tu_bo_list_init(&bo_list);
1171
1172 uint32_t entry_count = 0;
1173 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1174 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1175 entry_count += cmdbuf->cs.entry_count;
1176 }
1177
1178 struct drm_msm_gem_submit_cmd cmds[entry_count];
1179 uint32_t entry_idx = 0;
1180 for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1181 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1182 struct tu_cmd_stream *stream = &cmdbuf->cs;
1183 for (unsigned i = 0; i < stream->entry_count; ++i, ++entry_idx) {
1184 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1185 cmds[entry_idx].submit_idx = tu_bo_list_add(&bo_list, stream->entries[i].bo);
1186 cmds[entry_idx].submit_offset = stream->entries[i].offset;
1187 cmds[entry_idx].size = stream->entries[i].size;
1188 cmds[entry_idx].pad = 0;
1189 cmds[entry_idx].nr_relocs = 0;
1190 cmds[entry_idx].relocs = 0;
1191
1192 }
1193 }
1194
1195 struct drm_msm_gem_submit_bo bos[bo_list.count];
1196 for (unsigned i = 0; i < bo_list.count; ++i) {
1197 bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
1198 bos[i].handle = bo_list.handles[i];
1199 bos[i].presumed = 0;
1200 }
1201
1202 struct drm_msm_gem_submit req = {
1203 .flags = MSM_PIPE_3D0,
1204 .queueid = queue->msm_queue_id,
1205 .bos = (uint64_t)(uintptr_t)bos,
1206 .nr_bos = bo_list.count,
1207 .cmds = (uint64_t)(uintptr_t)cmds,
1208 .nr_cmds = entry_count,
1209 };
1210
1211 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1212 DRM_MSM_GEM_SUBMIT,
1213 &req, sizeof(req));
1214 if (ret) {
1215 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1216 abort();
1217 }
1218
1219 tu_bo_list_destroy(&bo_list);
1220 }
1221 return VK_SUCCESS;
1222 }
1223
1224 VkResult
1225 tu_QueueWaitIdle(VkQueue _queue)
1226 {
1227 return VK_SUCCESS;
1228 }
1229
1230 VkResult
1231 tu_DeviceWaitIdle(VkDevice _device)
1232 {
1233 TU_FROM_HANDLE(tu_device, device, _device);
1234
1235 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1236 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1237 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1238 }
1239 }
1240 return VK_SUCCESS;
1241 }
1242
1243 VkResult
1244 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1245 uint32_t *pPropertyCount,
1246 VkExtensionProperties *pProperties)
1247 {
1248 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1249
1250 /* We spport no lyaers */
1251 if (pLayerName)
1252 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1253
1254 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1255 if (tu_supported_instance_extensions.extensions[i]) {
1256 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1257 }
1258 }
1259
1260 return vk_outarray_status(&out);
1261 }
1262
1263 VkResult
1264 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1265 const char *pLayerName,
1266 uint32_t *pPropertyCount,
1267 VkExtensionProperties *pProperties)
1268 {
1269 /* We spport no lyaers */
1270 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1271 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1272
1273 /* We spport no lyaers */
1274 if (pLayerName)
1275 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1276
1277 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1278 if (device->supported_extensions.extensions[i]) {
1279 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1280 }
1281 }
1282
1283 return vk_outarray_status(&out);
1284 }
1285
1286 PFN_vkVoidFunction
1287 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1288 {
1289 TU_FROM_HANDLE(tu_instance, instance, _instance);
1290
1291 return tu_lookup_entrypoint_checked(
1292 pName, instance ? instance->api_version : 0,
1293 instance ? &instance->enabled_extensions : NULL, NULL);
1294 }
1295
1296 /* The loader wants us to expose a second GetInstanceProcAddr function
1297 * to work around certain LD_PRELOAD issues seen in apps.
1298 */
1299 PUBLIC
1300 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1301 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1302
1303 PUBLIC
1304 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1305 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1306 {
1307 return tu_GetInstanceProcAddr(instance, pName);
1308 }
1309
1310 PFN_vkVoidFunction
1311 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1312 {
1313 TU_FROM_HANDLE(tu_device, device, _device);
1314
1315 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1316 &device->instance->enabled_extensions,
1317 &device->enabled_extensions);
1318 }
1319
1320 static VkResult
1321 tu_alloc_memory(struct tu_device *device,
1322 const VkMemoryAllocateInfo *pAllocateInfo,
1323 const VkAllocationCallbacks *pAllocator,
1324 VkDeviceMemory *pMem)
1325 {
1326 struct tu_device_memory *mem;
1327 VkResult result;
1328
1329 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1330
1331 if (pAllocateInfo->allocationSize == 0) {
1332 /* Apparently, this is allowed */
1333 *pMem = VK_NULL_HANDLE;
1334 return VK_SUCCESS;
1335 }
1336
1337 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1338 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1339 if (mem == NULL)
1340 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1341
1342 result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1343 if (result != VK_SUCCESS) {
1344 vk_free2(&device->alloc, pAllocator, mem);
1345 return result;
1346 }
1347
1348 mem->size = pAllocateInfo->allocationSize;
1349 mem->type_index = pAllocateInfo->memoryTypeIndex;
1350
1351 mem->map = NULL;
1352 mem->user_ptr = NULL;
1353
1354 *pMem = tu_device_memory_to_handle(mem);
1355
1356 return VK_SUCCESS;
1357 }
1358
1359 VkResult
1360 tu_AllocateMemory(VkDevice _device,
1361 const VkMemoryAllocateInfo *pAllocateInfo,
1362 const VkAllocationCallbacks *pAllocator,
1363 VkDeviceMemory *pMem)
1364 {
1365 TU_FROM_HANDLE(tu_device, device, _device);
1366 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1367 }
1368
1369 void
1370 tu_FreeMemory(VkDevice _device,
1371 VkDeviceMemory _mem,
1372 const VkAllocationCallbacks *pAllocator)
1373 {
1374 TU_FROM_HANDLE(tu_device, device, _device);
1375 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1376
1377 if (mem == NULL)
1378 return;
1379
1380 tu_bo_finish(device, &mem->bo);
1381 vk_free2(&device->alloc, pAllocator, mem);
1382 }
1383
1384 VkResult
1385 tu_MapMemory(VkDevice _device,
1386 VkDeviceMemory _memory,
1387 VkDeviceSize offset,
1388 VkDeviceSize size,
1389 VkMemoryMapFlags flags,
1390 void **ppData)
1391 {
1392 TU_FROM_HANDLE(tu_device, device, _device);
1393 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1394 VkResult result;
1395
1396 if (mem == NULL) {
1397 *ppData = NULL;
1398 return VK_SUCCESS;
1399 }
1400
1401 if (mem->user_ptr) {
1402 *ppData = mem->user_ptr;
1403 } else if (!mem->map) {
1404 result = tu_bo_map(device, &mem->bo);
1405 if (result != VK_SUCCESS)
1406 return result;
1407 *ppData = mem->map = mem->bo.map;
1408 } else
1409 *ppData = mem->map;
1410
1411 if (*ppData) {
1412 *ppData += offset;
1413 return VK_SUCCESS;
1414 }
1415
1416 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1417 }
1418
1419 void
1420 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1421 {
1422 /* I do not see any unmapping done by the freedreno Gallium driver. */
1423 }
1424
1425 VkResult
1426 tu_FlushMappedMemoryRanges(VkDevice _device,
1427 uint32_t memoryRangeCount,
1428 const VkMappedMemoryRange *pMemoryRanges)
1429 {
1430 return VK_SUCCESS;
1431 }
1432
1433 VkResult
1434 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1435 uint32_t memoryRangeCount,
1436 const VkMappedMemoryRange *pMemoryRanges)
1437 {
1438 return VK_SUCCESS;
1439 }
1440
1441 void
1442 tu_GetBufferMemoryRequirements(VkDevice _device,
1443 VkBuffer _buffer,
1444 VkMemoryRequirements *pMemoryRequirements)
1445 {
1446 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1447
1448 pMemoryRequirements->memoryTypeBits = 1;
1449 pMemoryRequirements->alignment = 16;
1450 pMemoryRequirements->size =
1451 align64(buffer->size, pMemoryRequirements->alignment);
1452 }
1453
1454 void
1455 tu_GetBufferMemoryRequirements2(
1456 VkDevice device,
1457 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1458 VkMemoryRequirements2KHR *pMemoryRequirements)
1459 {
1460 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1461 &pMemoryRequirements->memoryRequirements);
1462 }
1463
1464 void
1465 tu_GetImageMemoryRequirements(VkDevice _device,
1466 VkImage _image,
1467 VkMemoryRequirements *pMemoryRequirements)
1468 {
1469 TU_FROM_HANDLE(tu_image, image, _image);
1470
1471 pMemoryRequirements->memoryTypeBits = 1;
1472 pMemoryRequirements->size = image->size;
1473 pMemoryRequirements->alignment = image->alignment;
1474 }
1475
1476 void
1477 tu_GetImageMemoryRequirements2(VkDevice device,
1478 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1479 VkMemoryRequirements2KHR *pMemoryRequirements)
1480 {
1481 tu_GetImageMemoryRequirements(device, pInfo->image,
1482 &pMemoryRequirements->memoryRequirements);
1483 }
1484
1485 void
1486 tu_GetImageSparseMemoryRequirements(
1487 VkDevice device,
1488 VkImage image,
1489 uint32_t *pSparseMemoryRequirementCount,
1490 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1491 {
1492 tu_stub();
1493 }
1494
1495 void
1496 tu_GetImageSparseMemoryRequirements2(
1497 VkDevice device,
1498 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1499 uint32_t *pSparseMemoryRequirementCount,
1500 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1501 {
1502 tu_stub();
1503 }
1504
1505 void
1506 tu_GetDeviceMemoryCommitment(VkDevice device,
1507 VkDeviceMemory memory,
1508 VkDeviceSize *pCommittedMemoryInBytes)
1509 {
1510 *pCommittedMemoryInBytes = 0;
1511 }
1512
1513 VkResult
1514 tu_BindBufferMemory2(VkDevice device,
1515 uint32_t bindInfoCount,
1516 const VkBindBufferMemoryInfoKHR *pBindInfos)
1517 {
1518 return VK_SUCCESS;
1519 }
1520
1521 VkResult
1522 tu_BindBufferMemory(VkDevice device,
1523 VkBuffer buffer,
1524 VkDeviceMemory memory,
1525 VkDeviceSize memoryOffset)
1526 {
1527 const VkBindBufferMemoryInfoKHR info = {
1528 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1529 .buffer = buffer,
1530 .memory = memory,
1531 .memoryOffset = memoryOffset
1532 };
1533
1534 return tu_BindBufferMemory2(device, 1, &info);
1535 }
1536
1537 VkResult
1538 tu_BindImageMemory2(VkDevice device,
1539 uint32_t bindInfoCount,
1540 const VkBindImageMemoryInfoKHR *pBindInfos)
1541 {
1542 return VK_SUCCESS;
1543 }
1544
1545 VkResult
1546 tu_BindImageMemory(VkDevice device,
1547 VkImage image,
1548 VkDeviceMemory memory,
1549 VkDeviceSize memoryOffset)
1550 {
1551 const VkBindImageMemoryInfoKHR info = {
1552 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1553 .image = image,
1554 .memory = memory,
1555 .memoryOffset = memoryOffset
1556 };
1557
1558 return tu_BindImageMemory2(device, 1, &info);
1559 }
1560
1561 VkResult
1562 tu_QueueBindSparse(VkQueue _queue,
1563 uint32_t bindInfoCount,
1564 const VkBindSparseInfo *pBindInfo,
1565 VkFence _fence)
1566 {
1567 return VK_SUCCESS;
1568 }
1569
1570 VkResult
1571 tu_CreateFence(VkDevice _device,
1572 const VkFenceCreateInfo *pCreateInfo,
1573 const VkAllocationCallbacks *pAllocator,
1574 VkFence *pFence)
1575 {
1576 TU_FROM_HANDLE(tu_device, device, _device);
1577
1578 struct tu_fence *fence =
1579 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1580 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1581
1582 if (!fence)
1583 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1584
1585 *pFence = tu_fence_to_handle(fence);
1586
1587 return VK_SUCCESS;
1588 }
1589
1590 void
1591 tu_DestroyFence(VkDevice _device,
1592 VkFence _fence,
1593 const VkAllocationCallbacks *pAllocator)
1594 {
1595 TU_FROM_HANDLE(tu_device, device, _device);
1596 TU_FROM_HANDLE(tu_fence, fence, _fence);
1597
1598 if (!fence)
1599 return;
1600
1601 vk_free2(&device->alloc, pAllocator, fence);
1602 }
1603
1604 VkResult
1605 tu_WaitForFences(VkDevice _device,
1606 uint32_t fenceCount,
1607 const VkFence *pFences,
1608 VkBool32 waitAll,
1609 uint64_t timeout)
1610 {
1611 return VK_SUCCESS;
1612 }
1613
1614 VkResult
1615 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1616 {
1617 return VK_SUCCESS;
1618 }
1619
1620 VkResult
1621 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1622 {
1623 return VK_SUCCESS;
1624 }
1625
1626 // Queue semaphore functions
1627
1628 VkResult
1629 tu_CreateSemaphore(VkDevice _device,
1630 const VkSemaphoreCreateInfo *pCreateInfo,
1631 const VkAllocationCallbacks *pAllocator,
1632 VkSemaphore *pSemaphore)
1633 {
1634 TU_FROM_HANDLE(tu_device, device, _device);
1635
1636 struct tu_semaphore *sem =
1637 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1638 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1639 if (!sem)
1640 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1641
1642 *pSemaphore = tu_semaphore_to_handle(sem);
1643 return VK_SUCCESS;
1644 }
1645
1646 void
1647 tu_DestroySemaphore(VkDevice _device,
1648 VkSemaphore _semaphore,
1649 const VkAllocationCallbacks *pAllocator)
1650 {
1651 TU_FROM_HANDLE(tu_device, device, _device);
1652 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1653 if (!_semaphore)
1654 return;
1655
1656 vk_free2(&device->alloc, pAllocator, sem);
1657 }
1658
1659 VkResult
1660 tu_CreateEvent(VkDevice _device,
1661 const VkEventCreateInfo *pCreateInfo,
1662 const VkAllocationCallbacks *pAllocator,
1663 VkEvent *pEvent)
1664 {
1665 TU_FROM_HANDLE(tu_device, device, _device);
1666 struct tu_event *event =
1667 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1668 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1669
1670 if (!event)
1671 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1672
1673 *pEvent = tu_event_to_handle(event);
1674
1675 return VK_SUCCESS;
1676 }
1677
1678 void
1679 tu_DestroyEvent(VkDevice _device,
1680 VkEvent _event,
1681 const VkAllocationCallbacks *pAllocator)
1682 {
1683 TU_FROM_HANDLE(tu_device, device, _device);
1684 TU_FROM_HANDLE(tu_event, event, _event);
1685
1686 if (!event)
1687 return;
1688 vk_free2(&device->alloc, pAllocator, event);
1689 }
1690
1691 VkResult
1692 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1693 {
1694 TU_FROM_HANDLE(tu_event, event, _event);
1695
1696 if (*event->map == 1)
1697 return VK_EVENT_SET;
1698 return VK_EVENT_RESET;
1699 }
1700
1701 VkResult
1702 tu_SetEvent(VkDevice _device, VkEvent _event)
1703 {
1704 TU_FROM_HANDLE(tu_event, event, _event);
1705 *event->map = 1;
1706
1707 return VK_SUCCESS;
1708 }
1709
1710 VkResult
1711 tu_ResetEvent(VkDevice _device, VkEvent _event)
1712 {
1713 TU_FROM_HANDLE(tu_event, event, _event);
1714 *event->map = 0;
1715
1716 return VK_SUCCESS;
1717 }
1718
1719 VkResult
1720 tu_CreateBuffer(VkDevice _device,
1721 const VkBufferCreateInfo *pCreateInfo,
1722 const VkAllocationCallbacks *pAllocator,
1723 VkBuffer *pBuffer)
1724 {
1725 TU_FROM_HANDLE(tu_device, device, _device);
1726 struct tu_buffer *buffer;
1727
1728 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1729
1730 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1731 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1732 if (buffer == NULL)
1733 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1734
1735 buffer->size = pCreateInfo->size;
1736 buffer->usage = pCreateInfo->usage;
1737 buffer->flags = pCreateInfo->flags;
1738
1739 *pBuffer = tu_buffer_to_handle(buffer);
1740
1741 return VK_SUCCESS;
1742 }
1743
1744 void
1745 tu_DestroyBuffer(VkDevice _device,
1746 VkBuffer _buffer,
1747 const VkAllocationCallbacks *pAllocator)
1748 {
1749 TU_FROM_HANDLE(tu_device, device, _device);
1750 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1751
1752 if (!buffer)
1753 return;
1754
1755 vk_free2(&device->alloc, pAllocator, buffer);
1756 }
1757
1758 static uint32_t
1759 tu_surface_max_layer_count(struct tu_image_view *iview)
1760 {
1761 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1762 ? iview->extent.depth
1763 : (iview->base_layer + iview->layer_count);
1764 }
1765
1766 VkResult
1767 tu_CreateFramebuffer(VkDevice _device,
1768 const VkFramebufferCreateInfo *pCreateInfo,
1769 const VkAllocationCallbacks *pAllocator,
1770 VkFramebuffer *pFramebuffer)
1771 {
1772 TU_FROM_HANDLE(tu_device, device, _device);
1773 struct tu_framebuffer *framebuffer;
1774
1775 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1776
1777 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1778 pCreateInfo->attachmentCount;
1779 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1780 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1781 if (framebuffer == NULL)
1782 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1783
1784 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1785 framebuffer->width = pCreateInfo->width;
1786 framebuffer->height = pCreateInfo->height;
1787 framebuffer->layers = pCreateInfo->layers;
1788 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1789 VkImageView _iview = pCreateInfo->pAttachments[i];
1790 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1791 framebuffer->attachments[i].attachment = iview;
1792
1793 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1794 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1795 framebuffer->layers =
1796 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1797 }
1798
1799 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1800 return VK_SUCCESS;
1801 }
1802
1803 void
1804 tu_DestroyFramebuffer(VkDevice _device,
1805 VkFramebuffer _fb,
1806 const VkAllocationCallbacks *pAllocator)
1807 {
1808 TU_FROM_HANDLE(tu_device, device, _device);
1809 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1810
1811 if (!fb)
1812 return;
1813 vk_free2(&device->alloc, pAllocator, fb);
1814 }
1815
1816 static void
1817 tu_init_sampler(struct tu_device *device,
1818 struct tu_sampler *sampler,
1819 const VkSamplerCreateInfo *pCreateInfo)
1820 {
1821 }
1822
1823 VkResult
1824 tu_CreateSampler(VkDevice _device,
1825 const VkSamplerCreateInfo *pCreateInfo,
1826 const VkAllocationCallbacks *pAllocator,
1827 VkSampler *pSampler)
1828 {
1829 TU_FROM_HANDLE(tu_device, device, _device);
1830 struct tu_sampler *sampler;
1831
1832 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1833
1834 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1835 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1836 if (!sampler)
1837 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1838
1839 tu_init_sampler(device, sampler, pCreateInfo);
1840 *pSampler = tu_sampler_to_handle(sampler);
1841
1842 return VK_SUCCESS;
1843 }
1844
1845 void
1846 tu_DestroySampler(VkDevice _device,
1847 VkSampler _sampler,
1848 const VkAllocationCallbacks *pAllocator)
1849 {
1850 TU_FROM_HANDLE(tu_device, device, _device);
1851 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1852
1853 if (!sampler)
1854 return;
1855 vk_free2(&device->alloc, pAllocator, sampler);
1856 }
1857
1858 /* vk_icd.h does not declare this function, so we declare it here to
1859 * suppress Wmissing-prototypes.
1860 */
1861 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1862 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1863
1864 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1865 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1866 {
1867 /* For the full details on loader interface versioning, see
1868 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1869 * What follows is a condensed summary, to help you navigate the large and
1870 * confusing official doc.
1871 *
1872 * - Loader interface v0 is incompatible with later versions. We don't
1873 * support it.
1874 *
1875 * - In loader interface v1:
1876 * - The first ICD entrypoint called by the loader is
1877 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1878 * entrypoint.
1879 * - The ICD must statically expose no other Vulkan symbol unless it
1880 * is linked with -Bsymbolic.
1881 * - Each dispatchable Vulkan handle created by the ICD must be
1882 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1883 * ICD must initialize VK_LOADER_DATA.loadMagic to
1884 * ICD_LOADER_MAGIC.
1885 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1886 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1887 * such loader-managed surfaces.
1888 *
1889 * - Loader interface v2 differs from v1 in:
1890 * - The first ICD entrypoint called by the loader is
1891 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1892 * statically expose this entrypoint.
1893 *
1894 * - Loader interface v3 differs from v2 in:
1895 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1896 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1897 * because the loader no longer does so.
1898 */
1899 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1900 return VK_SUCCESS;
1901 }
1902
1903 void
1904 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1905 VkPhysicalDevice physicalDevice,
1906 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1907 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1908 {
1909 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1910 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1911 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1912 }
1913
1914 void
1915 tu_GetPhysicalDeviceExternalFenceProperties(
1916 VkPhysicalDevice physicalDevice,
1917 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1918 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1919 {
1920 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1921 pExternalFenceProperties->compatibleHandleTypes = 0;
1922 pExternalFenceProperties->externalFenceFeatures = 0;
1923 }
1924
1925 VkResult
1926 tu_CreateDebugReportCallbackEXT(
1927 VkInstance _instance,
1928 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1929 const VkAllocationCallbacks *pAllocator,
1930 VkDebugReportCallbackEXT *pCallback)
1931 {
1932 TU_FROM_HANDLE(tu_instance, instance, _instance);
1933 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1934 pCreateInfo, pAllocator,
1935 &instance->alloc, pCallback);
1936 }
1937
1938 void
1939 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1940 VkDebugReportCallbackEXT _callback,
1941 const VkAllocationCallbacks *pAllocator)
1942 {
1943 TU_FROM_HANDLE(tu_instance, instance, _instance);
1944 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1945 _callback, pAllocator, &instance->alloc);
1946 }
1947
1948 void
1949 tu_DebugReportMessageEXT(VkInstance _instance,
1950 VkDebugReportFlagsEXT flags,
1951 VkDebugReportObjectTypeEXT objectType,
1952 uint64_t object,
1953 size_t location,
1954 int32_t messageCode,
1955 const char *pLayerPrefix,
1956 const char *pMessage)
1957 {
1958 TU_FROM_HANDLE(tu_instance, instance, _instance);
1959 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
1960 object, location, messageCode, pLayerPrefix, pMessage);
1961 }
1962
1963 void
1964 tu_GetDeviceGroupPeerMemoryFeatures(
1965 VkDevice device,
1966 uint32_t heapIndex,
1967 uint32_t localDeviceIndex,
1968 uint32_t remoteDeviceIndex,
1969 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1970 {
1971 assert(localDeviceIndex == remoteDeviceIndex);
1972
1973 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1974 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1975 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1976 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1977 }