turnip: basic msaa working
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "util/strtod.h"
43 #include "vk_format.h"
44 #include "vk_util.h"
45
46 #include "drm-uapi/msm_drm.h"
47
48 static int
49 tu_device_get_cache_uuid(uint16_t family, void *uuid)
50 {
51 uint32_t mesa_timestamp;
52 uint16_t f = family;
53 memset(uuid, 0, VK_UUID_SIZE);
54 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
55 &mesa_timestamp))
56 return -1;
57
58 memcpy(uuid, &mesa_timestamp, 4);
59 memcpy((char *) uuid + 4, &f, 2);
60 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
61 return 0;
62 }
63
64 static void
65 tu_get_driver_uuid(void *uuid)
66 {
67 memset(uuid, 0, VK_UUID_SIZE);
68 snprintf(uuid, VK_UUID_SIZE, "freedreno");
69 }
70
71 static void
72 tu_get_device_uuid(void *uuid)
73 {
74 memset(uuid, 0, VK_UUID_SIZE);
75 }
76
77 static VkResult
78 tu_bo_init(struct tu_device *dev,
79 struct tu_bo *bo,
80 uint32_t gem_handle,
81 uint64_t size)
82 {
83 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
84 if (!iova)
85 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
86
87 *bo = (struct tu_bo) {
88 .gem_handle = gem_handle,
89 .size = size,
90 .iova = iova,
91 };
92
93 return VK_SUCCESS;
94 }
95
96 VkResult
97 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
98 {
99 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
100 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
101 */
102 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
103 if (!gem_handle)
104 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
105
106 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
107 if (result != VK_SUCCESS) {
108 tu_gem_close(dev, gem_handle);
109 return vk_error(dev->instance, result);
110 }
111
112 return VK_SUCCESS;
113 }
114
115 VkResult
116 tu_bo_init_dmabuf(struct tu_device *dev,
117 struct tu_bo *bo,
118 uint64_t size,
119 int fd)
120 {
121 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
122 if (!gem_handle)
123 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
124
125 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
126 if (result != VK_SUCCESS) {
127 tu_gem_close(dev, gem_handle);
128 return vk_error(dev->instance, result);
129 }
130
131 return VK_SUCCESS;
132 }
133
134 int
135 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
136 {
137 return tu_gem_export_dmabuf(dev, bo->gem_handle);
138 }
139
140 VkResult
141 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
142 {
143 if (bo->map)
144 return VK_SUCCESS;
145
146 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
147 if (!offset)
148 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
149
150 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
151 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
152 dev->physical_device->local_fd, offset);
153 if (map == MAP_FAILED)
154 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
155
156 bo->map = map;
157 return VK_SUCCESS;
158 }
159
160 void
161 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
162 {
163 assert(bo->gem_handle);
164
165 if (bo->map)
166 munmap(bo->map, bo->size);
167
168 tu_gem_close(dev, bo->gem_handle);
169 }
170
171 static VkResult
172 tu_physical_device_init(struct tu_physical_device *device,
173 struct tu_instance *instance,
174 drmDevicePtr drm_device)
175 {
176 const char *path = drm_device->nodes[DRM_NODE_RENDER];
177 VkResult result = VK_SUCCESS;
178 drmVersionPtr version;
179 int fd;
180 int master_fd = -1;
181
182 fd = open(path, O_RDWR | O_CLOEXEC);
183 if (fd < 0) {
184 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
185 "failed to open device %s", path);
186 }
187
188 /* Version 1.3 added MSM_INFO_IOVA. */
189 const int min_version_major = 1;
190 const int min_version_minor = 3;
191
192 version = drmGetVersion(fd);
193 if (!version) {
194 close(fd);
195 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
196 "failed to query kernel driver version for device %s",
197 path);
198 }
199
200 if (strcmp(version->name, "msm")) {
201 drmFreeVersion(version);
202 close(fd);
203 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
204 "device %s does not use the msm kernel driver", path);
205 }
206
207 if (version->version_major != min_version_major ||
208 version->version_minor < min_version_minor) {
209 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
210 "kernel driver for device %s has version %d.%d, "
211 "but Vulkan requires version >= %d.%d",
212 path, version->version_major, version->version_minor,
213 min_version_major, min_version_minor);
214 drmFreeVersion(version);
215 close(fd);
216 return result;
217 }
218
219 drmFreeVersion(version);
220
221 if (instance->debug_flags & TU_DEBUG_STARTUP)
222 tu_logi("Found compatible device '%s'.", path);
223
224 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
225 device->instance = instance;
226 assert(strlen(path) < ARRAY_SIZE(device->path));
227 strncpy(device->path, path, ARRAY_SIZE(device->path));
228
229 if (instance->enabled_extensions.KHR_display) {
230 master_fd =
231 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
232 if (master_fd >= 0) {
233 /* TODO: free master_fd is accel is not working? */
234 }
235 }
236
237 device->master_fd = master_fd;
238 device->local_fd = fd;
239
240 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
241 if (instance->debug_flags & TU_DEBUG_STARTUP)
242 tu_logi("Could not query the GPU ID");
243 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
244 "could not get GPU ID");
245 goto fail;
246 }
247
248 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
249 if (instance->debug_flags & TU_DEBUG_STARTUP)
250 tu_logi("Could not query the GMEM size");
251 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
252 "could not get GMEM size");
253 goto fail;
254 }
255
256 memset(device->name, 0, sizeof(device->name));
257 sprintf(device->name, "FD%d", device->gpu_id);
258
259 switch (device->gpu_id) {
260 case 630:
261 device->tile_align_w = 32;
262 device->tile_align_h = 32;
263 break;
264 default:
265 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
266 "device %s is unsupported", device->name);
267 goto fail;
268 }
269 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
270 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
271 "cannot generate UUID");
272 goto fail;
273 }
274
275 /* The gpu id is already embedded in the uuid so we just pass "tu"
276 * when creating the cache.
277 */
278 char buf[VK_UUID_SIZE * 2 + 1];
279 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
280 device->disk_cache = disk_cache_create(device->name, buf, 0);
281
282 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
283 "testing use only.\n");
284
285 tu_get_driver_uuid(&device->device_uuid);
286 tu_get_device_uuid(&device->device_uuid);
287
288 tu_fill_device_extension_table(device, &device->supported_extensions);
289
290 if (result != VK_SUCCESS) {
291 vk_error(instance, result);
292 goto fail;
293 }
294
295 result = tu_wsi_init(device);
296 if (result != VK_SUCCESS) {
297 vk_error(instance, result);
298 goto fail;
299 }
300
301 return VK_SUCCESS;
302
303 fail:
304 close(fd);
305 if (master_fd != -1)
306 close(master_fd);
307 return result;
308 }
309
310 static void
311 tu_physical_device_finish(struct tu_physical_device *device)
312 {
313 tu_wsi_finish(device);
314
315 disk_cache_destroy(device->disk_cache);
316 close(device->local_fd);
317 if (device->master_fd != -1)
318 close(device->master_fd);
319 }
320
321 static void *
322 default_alloc_func(void *pUserData,
323 size_t size,
324 size_t align,
325 VkSystemAllocationScope allocationScope)
326 {
327 return malloc(size);
328 }
329
330 static void *
331 default_realloc_func(void *pUserData,
332 void *pOriginal,
333 size_t size,
334 size_t align,
335 VkSystemAllocationScope allocationScope)
336 {
337 return realloc(pOriginal, size);
338 }
339
340 static void
341 default_free_func(void *pUserData, void *pMemory)
342 {
343 free(pMemory);
344 }
345
346 static const VkAllocationCallbacks default_alloc = {
347 .pUserData = NULL,
348 .pfnAllocation = default_alloc_func,
349 .pfnReallocation = default_realloc_func,
350 .pfnFree = default_free_func,
351 };
352
353 static const struct debug_control tu_debug_options[] = {
354 { "startup", TU_DEBUG_STARTUP },
355 { "nir", TU_DEBUG_NIR },
356 { "ir3", TU_DEBUG_IR3 },
357 { NULL, 0 }
358 };
359
360 const char *
361 tu_get_debug_option_name(int id)
362 {
363 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
364 return tu_debug_options[id].string;
365 }
366
367 static int
368 tu_get_instance_extension_index(const char *name)
369 {
370 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
371 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
372 return i;
373 }
374 return -1;
375 }
376
377 VkResult
378 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
379 const VkAllocationCallbacks *pAllocator,
380 VkInstance *pInstance)
381 {
382 struct tu_instance *instance;
383 VkResult result;
384
385 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
386
387 uint32_t client_version;
388 if (pCreateInfo->pApplicationInfo &&
389 pCreateInfo->pApplicationInfo->apiVersion != 0) {
390 client_version = pCreateInfo->pApplicationInfo->apiVersion;
391 } else {
392 tu_EnumerateInstanceVersion(&client_version);
393 }
394
395 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
396 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
397 if (!instance)
398 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
399
400 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
401
402 if (pAllocator)
403 instance->alloc = *pAllocator;
404 else
405 instance->alloc = default_alloc;
406
407 instance->api_version = client_version;
408 instance->physical_device_count = -1;
409
410 instance->debug_flags =
411 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
412
413 if (instance->debug_flags & TU_DEBUG_STARTUP)
414 tu_logi("Created an instance");
415
416 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
417 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
418 int index = tu_get_instance_extension_index(ext_name);
419
420 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
421 vk_free2(&default_alloc, pAllocator, instance);
422 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
423 }
424
425 instance->enabled_extensions.extensions[index] = true;
426 }
427
428 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
429 if (result != VK_SUCCESS) {
430 vk_free2(&default_alloc, pAllocator, instance);
431 return vk_error(instance, result);
432 }
433
434 _mesa_locale_init();
435 glsl_type_singleton_init_or_ref();
436
437 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
438
439 *pInstance = tu_instance_to_handle(instance);
440
441 return VK_SUCCESS;
442 }
443
444 void
445 tu_DestroyInstance(VkInstance _instance,
446 const VkAllocationCallbacks *pAllocator)
447 {
448 TU_FROM_HANDLE(tu_instance, instance, _instance);
449
450 if (!instance)
451 return;
452
453 for (int i = 0; i < instance->physical_device_count; ++i) {
454 tu_physical_device_finish(instance->physical_devices + i);
455 }
456
457 VG(VALGRIND_DESTROY_MEMPOOL(instance));
458
459 glsl_type_singleton_decref();
460 _mesa_locale_fini();
461
462 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
463
464 vk_free(&instance->alloc, instance);
465 }
466
467 static VkResult
468 tu_enumerate_devices(struct tu_instance *instance)
469 {
470 /* TODO: Check for more devices ? */
471 drmDevicePtr devices[8];
472 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
473 int max_devices;
474
475 instance->physical_device_count = 0;
476
477 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
478
479 if (instance->debug_flags & TU_DEBUG_STARTUP)
480 tu_logi("Found %d drm nodes", max_devices);
481
482 if (max_devices < 1)
483 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
484
485 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
486 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
487 devices[i]->bustype == DRM_BUS_PLATFORM) {
488
489 result = tu_physical_device_init(
490 instance->physical_devices + instance->physical_device_count,
491 instance, devices[i]);
492 if (result == VK_SUCCESS)
493 ++instance->physical_device_count;
494 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
495 break;
496 }
497 }
498 drmFreeDevices(devices, max_devices);
499
500 return result;
501 }
502
503 VkResult
504 tu_EnumeratePhysicalDevices(VkInstance _instance,
505 uint32_t *pPhysicalDeviceCount,
506 VkPhysicalDevice *pPhysicalDevices)
507 {
508 TU_FROM_HANDLE(tu_instance, instance, _instance);
509 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
510
511 VkResult result;
512
513 if (instance->physical_device_count < 0) {
514 result = tu_enumerate_devices(instance);
515 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
516 return result;
517 }
518
519 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
520 vk_outarray_append(&out, p)
521 {
522 *p = tu_physical_device_to_handle(instance->physical_devices + i);
523 }
524 }
525
526 return vk_outarray_status(&out);
527 }
528
529 VkResult
530 tu_EnumeratePhysicalDeviceGroups(
531 VkInstance _instance,
532 uint32_t *pPhysicalDeviceGroupCount,
533 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
534 {
535 TU_FROM_HANDLE(tu_instance, instance, _instance);
536 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
537 pPhysicalDeviceGroupCount);
538 VkResult result;
539
540 if (instance->physical_device_count < 0) {
541 result = tu_enumerate_devices(instance);
542 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
543 return result;
544 }
545
546 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
547 vk_outarray_append(&out, p)
548 {
549 p->physicalDeviceCount = 1;
550 p->physicalDevices[0] =
551 tu_physical_device_to_handle(instance->physical_devices + i);
552 p->subsetAllocation = false;
553 }
554 }
555
556 return vk_outarray_status(&out);
557 }
558
559 void
560 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
561 VkPhysicalDeviceFeatures *pFeatures)
562 {
563 memset(pFeatures, 0, sizeof(*pFeatures));
564
565 *pFeatures = (VkPhysicalDeviceFeatures) {
566 .robustBufferAccess = false,
567 .fullDrawIndexUint32 = false,
568 .imageCubeArray = false,
569 .independentBlend = false,
570 .geometryShader = false,
571 .tessellationShader = false,
572 .sampleRateShading = false,
573 .dualSrcBlend = false,
574 .logicOp = false,
575 .multiDrawIndirect = false,
576 .drawIndirectFirstInstance = false,
577 .depthClamp = false,
578 .depthBiasClamp = false,
579 .fillModeNonSolid = false,
580 .depthBounds = false,
581 .wideLines = false,
582 .largePoints = false,
583 .alphaToOne = false,
584 .multiViewport = false,
585 .samplerAnisotropy = true,
586 .textureCompressionETC2 = true,
587 .textureCompressionASTC_LDR = true,
588 .textureCompressionBC = true,
589 .occlusionQueryPrecise = false,
590 .pipelineStatisticsQuery = false,
591 .vertexPipelineStoresAndAtomics = false,
592 .fragmentStoresAndAtomics = false,
593 .shaderTessellationAndGeometryPointSize = false,
594 .shaderImageGatherExtended = false,
595 .shaderStorageImageExtendedFormats = false,
596 .shaderStorageImageMultisample = false,
597 .shaderUniformBufferArrayDynamicIndexing = false,
598 .shaderSampledImageArrayDynamicIndexing = false,
599 .shaderStorageBufferArrayDynamicIndexing = false,
600 .shaderStorageImageArrayDynamicIndexing = false,
601 .shaderStorageImageReadWithoutFormat = false,
602 .shaderStorageImageWriteWithoutFormat = false,
603 .shaderClipDistance = false,
604 .shaderCullDistance = false,
605 .shaderFloat64 = false,
606 .shaderInt64 = false,
607 .shaderInt16 = false,
608 .sparseBinding = false,
609 .variableMultisampleRate = false,
610 .inheritedQueries = false,
611 };
612 }
613
614 void
615 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
616 VkPhysicalDeviceFeatures2 *pFeatures)
617 {
618 vk_foreach_struct(ext, pFeatures->pNext)
619 {
620 switch (ext->sType) {
621 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
622 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
623 features->variablePointersStorageBuffer = false;
624 features->variablePointers = false;
625 break;
626 }
627 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
628 VkPhysicalDeviceMultiviewFeatures *features =
629 (VkPhysicalDeviceMultiviewFeatures *) ext;
630 features->multiview = false;
631 features->multiviewGeometryShader = false;
632 features->multiviewTessellationShader = false;
633 break;
634 }
635 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
636 VkPhysicalDeviceShaderDrawParametersFeatures *features =
637 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
638 features->shaderDrawParameters = false;
639 break;
640 }
641 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
642 VkPhysicalDeviceProtectedMemoryFeatures *features =
643 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
644 features->protectedMemory = false;
645 break;
646 }
647 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
648 VkPhysicalDevice16BitStorageFeatures *features =
649 (VkPhysicalDevice16BitStorageFeatures *) ext;
650 features->storageBuffer16BitAccess = false;
651 features->uniformAndStorageBuffer16BitAccess = false;
652 features->storagePushConstant16 = false;
653 features->storageInputOutput16 = false;
654 break;
655 }
656 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
657 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
658 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
659 features->samplerYcbcrConversion = false;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
663 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
664 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
665 features->shaderInputAttachmentArrayDynamicIndexing = false;
666 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
667 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
668 features->shaderUniformBufferArrayNonUniformIndexing = false;
669 features->shaderSampledImageArrayNonUniformIndexing = false;
670 features->shaderStorageBufferArrayNonUniformIndexing = false;
671 features->shaderStorageImageArrayNonUniformIndexing = false;
672 features->shaderInputAttachmentArrayNonUniformIndexing = false;
673 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
674 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
675 features->descriptorBindingUniformBufferUpdateAfterBind = false;
676 features->descriptorBindingSampledImageUpdateAfterBind = false;
677 features->descriptorBindingStorageImageUpdateAfterBind = false;
678 features->descriptorBindingStorageBufferUpdateAfterBind = false;
679 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
680 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
681 features->descriptorBindingUpdateUnusedWhilePending = false;
682 features->descriptorBindingPartiallyBound = false;
683 features->descriptorBindingVariableDescriptorCount = false;
684 features->runtimeDescriptorArray = false;
685 break;
686 }
687 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
688 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
689 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
690 features->conditionalRendering = false;
691 features->inheritedConditionalRendering = false;
692 break;
693 }
694 default:
695 break;
696 }
697 }
698 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
699 }
700
701 void
702 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
703 VkPhysicalDeviceProperties *pProperties)
704 {
705 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
706 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT |
707 VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
708
709 /* make sure that the entire descriptor set is addressable with a signed
710 * 32-bit int. So the sum of all limits scaled by descriptor size has to
711 * be at most 2 GiB. the combined image & samples object count as one of
712 * both. This limit is for the pipeline layout, not for the set layout, but
713 * there is no set limit, so we just set a pipeline limit. I don't think
714 * any app is going to hit this soon. */
715 size_t max_descriptor_set_size =
716 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
717 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
718 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
719 32 /* sampler, largest when combined with image */ +
720 64 /* sampled image */ + 64 /* storage image */);
721
722 VkPhysicalDeviceLimits limits = {
723 .maxImageDimension1D = (1 << 14),
724 .maxImageDimension2D = (1 << 14),
725 .maxImageDimension3D = (1 << 11),
726 .maxImageDimensionCube = (1 << 14),
727 .maxImageArrayLayers = (1 << 11),
728 .maxTexelBufferElements = 128 * 1024 * 1024,
729 .maxUniformBufferRange = UINT32_MAX,
730 .maxStorageBufferRange = UINT32_MAX,
731 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
732 .maxMemoryAllocationCount = UINT32_MAX,
733 .maxSamplerAllocationCount = 64 * 1024,
734 .bufferImageGranularity = 64, /* A cache line */
735 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
736 .maxBoundDescriptorSets = MAX_SETS,
737 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
738 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
739 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
740 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
741 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
742 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
743 .maxPerStageResources = max_descriptor_set_size,
744 .maxDescriptorSetSamplers = max_descriptor_set_size,
745 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
746 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
747 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
748 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
749 .maxDescriptorSetSampledImages = max_descriptor_set_size,
750 .maxDescriptorSetStorageImages = max_descriptor_set_size,
751 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
752 .maxVertexInputAttributes = 32,
753 .maxVertexInputBindings = 32,
754 .maxVertexInputAttributeOffset = 2047,
755 .maxVertexInputBindingStride = 2048,
756 .maxVertexOutputComponents = 128,
757 .maxTessellationGenerationLevel = 64,
758 .maxTessellationPatchSize = 32,
759 .maxTessellationControlPerVertexInputComponents = 128,
760 .maxTessellationControlPerVertexOutputComponents = 128,
761 .maxTessellationControlPerPatchOutputComponents = 120,
762 .maxTessellationControlTotalOutputComponents = 4096,
763 .maxTessellationEvaluationInputComponents = 128,
764 .maxTessellationEvaluationOutputComponents = 128,
765 .maxGeometryShaderInvocations = 127,
766 .maxGeometryInputComponents = 64,
767 .maxGeometryOutputComponents = 128,
768 .maxGeometryOutputVertices = 256,
769 .maxGeometryTotalOutputComponents = 1024,
770 .maxFragmentInputComponents = 128,
771 .maxFragmentOutputAttachments = 8,
772 .maxFragmentDualSrcAttachments = 1,
773 .maxFragmentCombinedOutputResources = 8,
774 .maxComputeSharedMemorySize = 32768,
775 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
776 .maxComputeWorkGroupInvocations = 2048,
777 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
778 .subPixelPrecisionBits = 4 /* FIXME */,
779 .subTexelPrecisionBits = 4 /* FIXME */,
780 .mipmapPrecisionBits = 4 /* FIXME */,
781 .maxDrawIndexedIndexValue = UINT32_MAX,
782 .maxDrawIndirectCount = UINT32_MAX,
783 .maxSamplerLodBias = 16,
784 .maxSamplerAnisotropy = 16,
785 .maxViewports = MAX_VIEWPORTS,
786 .maxViewportDimensions = { (1 << 14), (1 << 14) },
787 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
788 .viewportSubPixelBits = 8,
789 .minMemoryMapAlignment = 4096, /* A page */
790 .minTexelBufferOffsetAlignment = 1,
791 .minUniformBufferOffsetAlignment = 4,
792 .minStorageBufferOffsetAlignment = 4,
793 .minTexelOffset = -32,
794 .maxTexelOffset = 31,
795 .minTexelGatherOffset = -32,
796 .maxTexelGatherOffset = 31,
797 .minInterpolationOffset = -2,
798 .maxInterpolationOffset = 2,
799 .subPixelInterpolationOffsetBits = 8,
800 .maxFramebufferWidth = (1 << 14),
801 .maxFramebufferHeight = (1 << 14),
802 .maxFramebufferLayers = (1 << 10),
803 .framebufferColorSampleCounts = sample_counts,
804 .framebufferDepthSampleCounts = sample_counts,
805 .framebufferStencilSampleCounts = sample_counts,
806 .framebufferNoAttachmentsSampleCounts = sample_counts,
807 .maxColorAttachments = MAX_RTS,
808 .sampledImageColorSampleCounts = sample_counts,
809 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
810 .sampledImageDepthSampleCounts = sample_counts,
811 .sampledImageStencilSampleCounts = sample_counts,
812 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
813 .maxSampleMaskWords = 1,
814 .timestampComputeAndGraphics = true,
815 .timestampPeriod = 1,
816 .maxClipDistances = 8,
817 .maxCullDistances = 8,
818 .maxCombinedClipAndCullDistances = 8,
819 .discreteQueuePriorities = 1,
820 .pointSizeRange = { 0.125, 255.875 },
821 .lineWidthRange = { 0.0, 7.9921875 },
822 .pointSizeGranularity = (1.0 / 8.0),
823 .lineWidthGranularity = (1.0 / 128.0),
824 .strictLines = false, /* FINISHME */
825 .standardSampleLocations = true,
826 .optimalBufferCopyOffsetAlignment = 128,
827 .optimalBufferCopyRowPitchAlignment = 128,
828 .nonCoherentAtomSize = 64,
829 };
830
831 *pProperties = (VkPhysicalDeviceProperties) {
832 .apiVersion = tu_physical_device_api_version(pdevice),
833 .driverVersion = vk_get_driver_version(),
834 .vendorID = 0, /* TODO */
835 .deviceID = 0,
836 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
837 .limits = limits,
838 .sparseProperties = { 0 },
839 };
840
841 strcpy(pProperties->deviceName, pdevice->name);
842 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
843 }
844
845 void
846 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
847 VkPhysicalDeviceProperties2 *pProperties)
848 {
849 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
850 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
851
852 vk_foreach_struct(ext, pProperties->pNext)
853 {
854 switch (ext->sType) {
855 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
856 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
857 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
858 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
859 break;
860 }
861 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
862 VkPhysicalDeviceIDProperties *properties =
863 (VkPhysicalDeviceIDProperties *) ext;
864 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
865 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
866 properties->deviceLUIDValid = false;
867 break;
868 }
869 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
870 VkPhysicalDeviceMultiviewProperties *properties =
871 (VkPhysicalDeviceMultiviewProperties *) ext;
872 properties->maxMultiviewViewCount = MAX_VIEWS;
873 properties->maxMultiviewInstanceIndex = INT_MAX;
874 break;
875 }
876 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
877 VkPhysicalDevicePointClippingProperties *properties =
878 (VkPhysicalDevicePointClippingProperties *) ext;
879 properties->pointClippingBehavior =
880 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
881 break;
882 }
883 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
884 VkPhysicalDeviceMaintenance3Properties *properties =
885 (VkPhysicalDeviceMaintenance3Properties *) ext;
886 /* Make sure everything is addressable by a signed 32-bit int, and
887 * our largest descriptors are 96 bytes. */
888 properties->maxPerSetDescriptors = (1ull << 31) / 96;
889 /* Our buffer size fields allow only this much */
890 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
891 break;
892 }
893 default:
894 break;
895 }
896 }
897 }
898
899 static const VkQueueFamilyProperties tu_queue_family_properties = {
900 .queueFlags =
901 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
902 .queueCount = 1,
903 .timestampValidBits = 64,
904 .minImageTransferGranularity = { 1, 1, 1 },
905 };
906
907 void
908 tu_GetPhysicalDeviceQueueFamilyProperties(
909 VkPhysicalDevice physicalDevice,
910 uint32_t *pQueueFamilyPropertyCount,
911 VkQueueFamilyProperties *pQueueFamilyProperties)
912 {
913 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
914
915 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
916 }
917
918 void
919 tu_GetPhysicalDeviceQueueFamilyProperties2(
920 VkPhysicalDevice physicalDevice,
921 uint32_t *pQueueFamilyPropertyCount,
922 VkQueueFamilyProperties2 *pQueueFamilyProperties)
923 {
924 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
925
926 vk_outarray_append(&out, p)
927 {
928 p->queueFamilyProperties = tu_queue_family_properties;
929 }
930 }
931
932 static uint64_t
933 tu_get_system_heap_size()
934 {
935 struct sysinfo info;
936 sysinfo(&info);
937
938 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
939
940 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
941 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
942 */
943 uint64_t available_ram;
944 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
945 available_ram = total_ram / 2;
946 else
947 available_ram = total_ram * 3 / 4;
948
949 return available_ram;
950 }
951
952 void
953 tu_GetPhysicalDeviceMemoryProperties(
954 VkPhysicalDevice physicalDevice,
955 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
956 {
957 pMemoryProperties->memoryHeapCount = 1;
958 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
959 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
960
961 pMemoryProperties->memoryTypeCount = 1;
962 pMemoryProperties->memoryTypes[0].propertyFlags =
963 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
964 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
965 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
966 pMemoryProperties->memoryTypes[0].heapIndex = 0;
967 }
968
969 void
970 tu_GetPhysicalDeviceMemoryProperties2(
971 VkPhysicalDevice physicalDevice,
972 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
973 {
974 return tu_GetPhysicalDeviceMemoryProperties(
975 physicalDevice, &pMemoryProperties->memoryProperties);
976 }
977
978 static VkResult
979 tu_queue_init(struct tu_device *device,
980 struct tu_queue *queue,
981 uint32_t queue_family_index,
982 int idx,
983 VkDeviceQueueCreateFlags flags)
984 {
985 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
986 queue->device = device;
987 queue->queue_family_index = queue_family_index;
988 queue->queue_idx = idx;
989 queue->flags = flags;
990
991 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
992 if (ret)
993 return VK_ERROR_INITIALIZATION_FAILED;
994
995 tu_fence_init(&queue->submit_fence, false);
996
997 return VK_SUCCESS;
998 }
999
1000 static void
1001 tu_queue_finish(struct tu_queue *queue)
1002 {
1003 tu_fence_finish(&queue->submit_fence);
1004 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1005 }
1006
1007 static int
1008 tu_get_device_extension_index(const char *name)
1009 {
1010 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1011 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1012 return i;
1013 }
1014 return -1;
1015 }
1016
1017 VkResult
1018 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1019 const VkDeviceCreateInfo *pCreateInfo,
1020 const VkAllocationCallbacks *pAllocator,
1021 VkDevice *pDevice)
1022 {
1023 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1024 VkResult result;
1025 struct tu_device *device;
1026
1027 /* Check enabled features */
1028 if (pCreateInfo->pEnabledFeatures) {
1029 VkPhysicalDeviceFeatures supported_features;
1030 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1031 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1032 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1033 unsigned num_features =
1034 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1035 for (uint32_t i = 0; i < num_features; i++) {
1036 if (enabled_feature[i] && !supported_feature[i])
1037 return vk_error(physical_device->instance,
1038 VK_ERROR_FEATURE_NOT_PRESENT);
1039 }
1040 }
1041
1042 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1043 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1044 if (!device)
1045 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1046
1047 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1048 device->instance = physical_device->instance;
1049 device->physical_device = physical_device;
1050
1051 if (pAllocator)
1052 device->alloc = *pAllocator;
1053 else
1054 device->alloc = physical_device->instance->alloc;
1055
1056 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1057 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1058 int index = tu_get_device_extension_index(ext_name);
1059 if (index < 0 ||
1060 !physical_device->supported_extensions.extensions[index]) {
1061 vk_free(&device->alloc, device);
1062 return vk_error(physical_device->instance,
1063 VK_ERROR_EXTENSION_NOT_PRESENT);
1064 }
1065
1066 device->enabled_extensions.extensions[index] = true;
1067 }
1068
1069 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1070 const VkDeviceQueueCreateInfo *queue_create =
1071 &pCreateInfo->pQueueCreateInfos[i];
1072 uint32_t qfi = queue_create->queueFamilyIndex;
1073 device->queues[qfi] = vk_alloc(
1074 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1075 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1076 if (!device->queues[qfi]) {
1077 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1078 goto fail;
1079 }
1080
1081 memset(device->queues[qfi], 0,
1082 queue_create->queueCount * sizeof(struct tu_queue));
1083
1084 device->queue_count[qfi] = queue_create->queueCount;
1085
1086 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1087 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1088 queue_create->flags);
1089 if (result != VK_SUCCESS)
1090 goto fail;
1091 }
1092 }
1093
1094 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1095 if (!device->compiler)
1096 goto fail;
1097
1098 VkPipelineCacheCreateInfo ci;
1099 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1100 ci.pNext = NULL;
1101 ci.flags = 0;
1102 ci.pInitialData = NULL;
1103 ci.initialDataSize = 0;
1104 VkPipelineCache pc;
1105 result =
1106 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1107 if (result != VK_SUCCESS)
1108 goto fail;
1109
1110 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1111
1112 *pDevice = tu_device_to_handle(device);
1113 return VK_SUCCESS;
1114
1115 fail:
1116 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1117 for (unsigned q = 0; q < device->queue_count[i]; q++)
1118 tu_queue_finish(&device->queues[i][q]);
1119 if (device->queue_count[i])
1120 vk_free(&device->alloc, device->queues[i]);
1121 }
1122
1123 if (device->compiler)
1124 ralloc_free(device->compiler);
1125
1126 vk_free(&device->alloc, device);
1127 return result;
1128 }
1129
1130 void
1131 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1132 {
1133 TU_FROM_HANDLE(tu_device, device, _device);
1134
1135 if (!device)
1136 return;
1137
1138 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1139 for (unsigned q = 0; q < device->queue_count[i]; q++)
1140 tu_queue_finish(&device->queues[i][q]);
1141 if (device->queue_count[i])
1142 vk_free(&device->alloc, device->queues[i]);
1143 }
1144
1145 /* the compiler does not use pAllocator */
1146 ralloc_free(device->compiler);
1147
1148 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1149 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1150
1151 vk_free(&device->alloc, device);
1152 }
1153
1154 VkResult
1155 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1156 VkLayerProperties *pProperties)
1157 {
1158 *pPropertyCount = 0;
1159 return VK_SUCCESS;
1160 }
1161
1162 VkResult
1163 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1164 uint32_t *pPropertyCount,
1165 VkLayerProperties *pProperties)
1166 {
1167 *pPropertyCount = 0;
1168 return VK_SUCCESS;
1169 }
1170
1171 void
1172 tu_GetDeviceQueue2(VkDevice _device,
1173 const VkDeviceQueueInfo2 *pQueueInfo,
1174 VkQueue *pQueue)
1175 {
1176 TU_FROM_HANDLE(tu_device, device, _device);
1177 struct tu_queue *queue;
1178
1179 queue =
1180 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1181 if (pQueueInfo->flags != queue->flags) {
1182 /* From the Vulkan 1.1.70 spec:
1183 *
1184 * "The queue returned by vkGetDeviceQueue2 must have the same
1185 * flags value from this structure as that used at device
1186 * creation time in a VkDeviceQueueCreateInfo instance. If no
1187 * matching flags were specified at device creation time then
1188 * pQueue will return VK_NULL_HANDLE."
1189 */
1190 *pQueue = VK_NULL_HANDLE;
1191 return;
1192 }
1193
1194 *pQueue = tu_queue_to_handle(queue);
1195 }
1196
1197 void
1198 tu_GetDeviceQueue(VkDevice _device,
1199 uint32_t queueFamilyIndex,
1200 uint32_t queueIndex,
1201 VkQueue *pQueue)
1202 {
1203 const VkDeviceQueueInfo2 info =
1204 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1205 .queueFamilyIndex = queueFamilyIndex,
1206 .queueIndex = queueIndex };
1207
1208 tu_GetDeviceQueue2(_device, &info, pQueue);
1209 }
1210
1211 VkResult
1212 tu_QueueSubmit(VkQueue _queue,
1213 uint32_t submitCount,
1214 const VkSubmitInfo *pSubmits,
1215 VkFence _fence)
1216 {
1217 TU_FROM_HANDLE(tu_queue, queue, _queue);
1218
1219 for (uint32_t i = 0; i < submitCount; ++i) {
1220 const VkSubmitInfo *submit = pSubmits + i;
1221 const bool last_submit = (i == submitCount - 1);
1222 struct tu_bo_list bo_list;
1223 tu_bo_list_init(&bo_list);
1224
1225 uint32_t entry_count = 0;
1226 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1227 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1228 entry_count += cmdbuf->cs.entry_count;
1229 }
1230
1231 struct drm_msm_gem_submit_cmd cmds[entry_count];
1232 uint32_t entry_idx = 0;
1233 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1234 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1235 struct tu_cs *cs = &cmdbuf->cs;
1236 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1237 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1238 cmds[entry_idx].submit_idx =
1239 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1240 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1241 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1242 cmds[entry_idx].size = cs->entries[i].size;
1243 cmds[entry_idx].pad = 0;
1244 cmds[entry_idx].nr_relocs = 0;
1245 cmds[entry_idx].relocs = 0;
1246 }
1247
1248 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1249 }
1250
1251 uint32_t flags = MSM_PIPE_3D0;
1252 if (last_submit) {
1253 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1254 }
1255
1256 struct drm_msm_gem_submit req = {
1257 .flags = flags,
1258 .queueid = queue->msm_queue_id,
1259 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1260 .nr_bos = bo_list.count,
1261 .cmds = (uint64_t)(uintptr_t)cmds,
1262 .nr_cmds = entry_count,
1263 };
1264
1265 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1266 DRM_MSM_GEM_SUBMIT,
1267 &req, sizeof(req));
1268 if (ret) {
1269 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1270 abort();
1271 }
1272
1273 tu_bo_list_destroy(&bo_list);
1274
1275 if (last_submit) {
1276 /* no need to merge fences as queue execution is serialized */
1277 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1278 }
1279 }
1280
1281 if (_fence != VK_NULL_HANDLE) {
1282 TU_FROM_HANDLE(tu_fence, fence, _fence);
1283 tu_fence_copy(fence, &queue->submit_fence);
1284 }
1285
1286 return VK_SUCCESS;
1287 }
1288
1289 VkResult
1290 tu_QueueWaitIdle(VkQueue _queue)
1291 {
1292 TU_FROM_HANDLE(tu_queue, queue, _queue);
1293
1294 tu_fence_wait_idle(&queue->submit_fence);
1295
1296 return VK_SUCCESS;
1297 }
1298
1299 VkResult
1300 tu_DeviceWaitIdle(VkDevice _device)
1301 {
1302 TU_FROM_HANDLE(tu_device, device, _device);
1303
1304 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1305 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1306 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1307 }
1308 }
1309 return VK_SUCCESS;
1310 }
1311
1312 VkResult
1313 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1314 uint32_t *pPropertyCount,
1315 VkExtensionProperties *pProperties)
1316 {
1317 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1318
1319 /* We spport no lyaers */
1320 if (pLayerName)
1321 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1322
1323 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1324 if (tu_supported_instance_extensions.extensions[i]) {
1325 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1326 }
1327 }
1328
1329 return vk_outarray_status(&out);
1330 }
1331
1332 VkResult
1333 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1334 const char *pLayerName,
1335 uint32_t *pPropertyCount,
1336 VkExtensionProperties *pProperties)
1337 {
1338 /* We spport no lyaers */
1339 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1340 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1341
1342 /* We spport no lyaers */
1343 if (pLayerName)
1344 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1345
1346 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1347 if (device->supported_extensions.extensions[i]) {
1348 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1349 }
1350 }
1351
1352 return vk_outarray_status(&out);
1353 }
1354
1355 PFN_vkVoidFunction
1356 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1357 {
1358 TU_FROM_HANDLE(tu_instance, instance, _instance);
1359
1360 return tu_lookup_entrypoint_checked(
1361 pName, instance ? instance->api_version : 0,
1362 instance ? &instance->enabled_extensions : NULL, NULL);
1363 }
1364
1365 /* The loader wants us to expose a second GetInstanceProcAddr function
1366 * to work around certain LD_PRELOAD issues seen in apps.
1367 */
1368 PUBLIC
1369 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1370 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1371
1372 PUBLIC
1373 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1374 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1375 {
1376 return tu_GetInstanceProcAddr(instance, pName);
1377 }
1378
1379 PFN_vkVoidFunction
1380 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1381 {
1382 TU_FROM_HANDLE(tu_device, device, _device);
1383
1384 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1385 &device->instance->enabled_extensions,
1386 &device->enabled_extensions);
1387 }
1388
1389 static VkResult
1390 tu_alloc_memory(struct tu_device *device,
1391 const VkMemoryAllocateInfo *pAllocateInfo,
1392 const VkAllocationCallbacks *pAllocator,
1393 VkDeviceMemory *pMem)
1394 {
1395 struct tu_device_memory *mem;
1396 VkResult result;
1397
1398 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1399
1400 if (pAllocateInfo->allocationSize == 0) {
1401 /* Apparently, this is allowed */
1402 *pMem = VK_NULL_HANDLE;
1403 return VK_SUCCESS;
1404 }
1405
1406 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1407 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1408 if (mem == NULL)
1409 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1410
1411 const VkImportMemoryFdInfoKHR *fd_info =
1412 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1413 if (fd_info && !fd_info->handleType)
1414 fd_info = NULL;
1415
1416 if (fd_info) {
1417 assert(fd_info->handleType ==
1418 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1419 fd_info->handleType ==
1420 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1421
1422 /*
1423 * TODO Importing the same fd twice gives us the same handle without
1424 * reference counting. We need to maintain a per-instance handle-to-bo
1425 * table and add reference count to tu_bo.
1426 */
1427 result = tu_bo_init_dmabuf(device, &mem->bo,
1428 pAllocateInfo->allocationSize, fd_info->fd);
1429 if (result == VK_SUCCESS) {
1430 /* take ownership and close the fd */
1431 close(fd_info->fd);
1432 }
1433 } else {
1434 result =
1435 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1436 }
1437
1438 if (result != VK_SUCCESS) {
1439 vk_free2(&device->alloc, pAllocator, mem);
1440 return result;
1441 }
1442
1443 mem->size = pAllocateInfo->allocationSize;
1444 mem->type_index = pAllocateInfo->memoryTypeIndex;
1445
1446 mem->map = NULL;
1447 mem->user_ptr = NULL;
1448
1449 *pMem = tu_device_memory_to_handle(mem);
1450
1451 return VK_SUCCESS;
1452 }
1453
1454 VkResult
1455 tu_AllocateMemory(VkDevice _device,
1456 const VkMemoryAllocateInfo *pAllocateInfo,
1457 const VkAllocationCallbacks *pAllocator,
1458 VkDeviceMemory *pMem)
1459 {
1460 TU_FROM_HANDLE(tu_device, device, _device);
1461 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1462 }
1463
1464 void
1465 tu_FreeMemory(VkDevice _device,
1466 VkDeviceMemory _mem,
1467 const VkAllocationCallbacks *pAllocator)
1468 {
1469 TU_FROM_HANDLE(tu_device, device, _device);
1470 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1471
1472 if (mem == NULL)
1473 return;
1474
1475 tu_bo_finish(device, &mem->bo);
1476 vk_free2(&device->alloc, pAllocator, mem);
1477 }
1478
1479 VkResult
1480 tu_MapMemory(VkDevice _device,
1481 VkDeviceMemory _memory,
1482 VkDeviceSize offset,
1483 VkDeviceSize size,
1484 VkMemoryMapFlags flags,
1485 void **ppData)
1486 {
1487 TU_FROM_HANDLE(tu_device, device, _device);
1488 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1489 VkResult result;
1490
1491 if (mem == NULL) {
1492 *ppData = NULL;
1493 return VK_SUCCESS;
1494 }
1495
1496 if (mem->user_ptr) {
1497 *ppData = mem->user_ptr;
1498 } else if (!mem->map) {
1499 result = tu_bo_map(device, &mem->bo);
1500 if (result != VK_SUCCESS)
1501 return result;
1502 *ppData = mem->map = mem->bo.map;
1503 } else
1504 *ppData = mem->map;
1505
1506 if (*ppData) {
1507 *ppData += offset;
1508 return VK_SUCCESS;
1509 }
1510
1511 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1512 }
1513
1514 void
1515 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1516 {
1517 /* I do not see any unmapping done by the freedreno Gallium driver. */
1518 }
1519
1520 VkResult
1521 tu_FlushMappedMemoryRanges(VkDevice _device,
1522 uint32_t memoryRangeCount,
1523 const VkMappedMemoryRange *pMemoryRanges)
1524 {
1525 return VK_SUCCESS;
1526 }
1527
1528 VkResult
1529 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1530 uint32_t memoryRangeCount,
1531 const VkMappedMemoryRange *pMemoryRanges)
1532 {
1533 return VK_SUCCESS;
1534 }
1535
1536 void
1537 tu_GetBufferMemoryRequirements(VkDevice _device,
1538 VkBuffer _buffer,
1539 VkMemoryRequirements *pMemoryRequirements)
1540 {
1541 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1542
1543 pMemoryRequirements->memoryTypeBits = 1;
1544 pMemoryRequirements->alignment = 16;
1545 pMemoryRequirements->size =
1546 align64(buffer->size, pMemoryRequirements->alignment);
1547 }
1548
1549 void
1550 tu_GetBufferMemoryRequirements2(
1551 VkDevice device,
1552 const VkBufferMemoryRequirementsInfo2 *pInfo,
1553 VkMemoryRequirements2 *pMemoryRequirements)
1554 {
1555 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1556 &pMemoryRequirements->memoryRequirements);
1557 }
1558
1559 void
1560 tu_GetImageMemoryRequirements(VkDevice _device,
1561 VkImage _image,
1562 VkMemoryRequirements *pMemoryRequirements)
1563 {
1564 TU_FROM_HANDLE(tu_image, image, _image);
1565
1566 pMemoryRequirements->memoryTypeBits = 1;
1567 pMemoryRequirements->size = image->size;
1568 pMemoryRequirements->alignment = image->alignment;
1569 }
1570
1571 void
1572 tu_GetImageMemoryRequirements2(VkDevice device,
1573 const VkImageMemoryRequirementsInfo2 *pInfo,
1574 VkMemoryRequirements2 *pMemoryRequirements)
1575 {
1576 tu_GetImageMemoryRequirements(device, pInfo->image,
1577 &pMemoryRequirements->memoryRequirements);
1578 }
1579
1580 void
1581 tu_GetImageSparseMemoryRequirements(
1582 VkDevice device,
1583 VkImage image,
1584 uint32_t *pSparseMemoryRequirementCount,
1585 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1586 {
1587 tu_stub();
1588 }
1589
1590 void
1591 tu_GetImageSparseMemoryRequirements2(
1592 VkDevice device,
1593 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1594 uint32_t *pSparseMemoryRequirementCount,
1595 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1596 {
1597 tu_stub();
1598 }
1599
1600 void
1601 tu_GetDeviceMemoryCommitment(VkDevice device,
1602 VkDeviceMemory memory,
1603 VkDeviceSize *pCommittedMemoryInBytes)
1604 {
1605 *pCommittedMemoryInBytes = 0;
1606 }
1607
1608 VkResult
1609 tu_BindBufferMemory2(VkDevice device,
1610 uint32_t bindInfoCount,
1611 const VkBindBufferMemoryInfo *pBindInfos)
1612 {
1613 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1614 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1615 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1616
1617 if (mem) {
1618 buffer->bo = &mem->bo;
1619 buffer->bo_offset = pBindInfos[i].memoryOffset;
1620 } else {
1621 buffer->bo = NULL;
1622 }
1623 }
1624 return VK_SUCCESS;
1625 }
1626
1627 VkResult
1628 tu_BindBufferMemory(VkDevice device,
1629 VkBuffer buffer,
1630 VkDeviceMemory memory,
1631 VkDeviceSize memoryOffset)
1632 {
1633 const VkBindBufferMemoryInfo info = {
1634 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1635 .buffer = buffer,
1636 .memory = memory,
1637 .memoryOffset = memoryOffset
1638 };
1639
1640 return tu_BindBufferMemory2(device, 1, &info);
1641 }
1642
1643 VkResult
1644 tu_BindImageMemory2(VkDevice device,
1645 uint32_t bindInfoCount,
1646 const VkBindImageMemoryInfo *pBindInfos)
1647 {
1648 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1649 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1650 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1651
1652 if (mem) {
1653 image->bo = &mem->bo;
1654 image->bo_offset = pBindInfos[i].memoryOffset;
1655 } else {
1656 image->bo = NULL;
1657 image->bo_offset = 0;
1658 }
1659 }
1660
1661 return VK_SUCCESS;
1662 }
1663
1664 VkResult
1665 tu_BindImageMemory(VkDevice device,
1666 VkImage image,
1667 VkDeviceMemory memory,
1668 VkDeviceSize memoryOffset)
1669 {
1670 const VkBindImageMemoryInfo info = {
1671 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1672 .image = image,
1673 .memory = memory,
1674 .memoryOffset = memoryOffset
1675 };
1676
1677 return tu_BindImageMemory2(device, 1, &info);
1678 }
1679
1680 VkResult
1681 tu_QueueBindSparse(VkQueue _queue,
1682 uint32_t bindInfoCount,
1683 const VkBindSparseInfo *pBindInfo,
1684 VkFence _fence)
1685 {
1686 return VK_SUCCESS;
1687 }
1688
1689 // Queue semaphore functions
1690
1691 VkResult
1692 tu_CreateSemaphore(VkDevice _device,
1693 const VkSemaphoreCreateInfo *pCreateInfo,
1694 const VkAllocationCallbacks *pAllocator,
1695 VkSemaphore *pSemaphore)
1696 {
1697 TU_FROM_HANDLE(tu_device, device, _device);
1698
1699 struct tu_semaphore *sem =
1700 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1701 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1702 if (!sem)
1703 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1704
1705 *pSemaphore = tu_semaphore_to_handle(sem);
1706 return VK_SUCCESS;
1707 }
1708
1709 void
1710 tu_DestroySemaphore(VkDevice _device,
1711 VkSemaphore _semaphore,
1712 const VkAllocationCallbacks *pAllocator)
1713 {
1714 TU_FROM_HANDLE(tu_device, device, _device);
1715 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1716 if (!_semaphore)
1717 return;
1718
1719 vk_free2(&device->alloc, pAllocator, sem);
1720 }
1721
1722 VkResult
1723 tu_CreateEvent(VkDevice _device,
1724 const VkEventCreateInfo *pCreateInfo,
1725 const VkAllocationCallbacks *pAllocator,
1726 VkEvent *pEvent)
1727 {
1728 TU_FROM_HANDLE(tu_device, device, _device);
1729 struct tu_event *event =
1730 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1731 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1732
1733 if (!event)
1734 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1735
1736 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1737 if (result != VK_SUCCESS)
1738 goto fail_alloc;
1739
1740 result = tu_bo_map(device, &event->bo);
1741 if (result != VK_SUCCESS)
1742 goto fail_map;
1743
1744 *pEvent = tu_event_to_handle(event);
1745
1746 return VK_SUCCESS;
1747
1748 fail_map:
1749 tu_bo_finish(device, &event->bo);
1750 fail_alloc:
1751 vk_free2(&device->alloc, pAllocator, event);
1752 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1753 }
1754
1755 void
1756 tu_DestroyEvent(VkDevice _device,
1757 VkEvent _event,
1758 const VkAllocationCallbacks *pAllocator)
1759 {
1760 TU_FROM_HANDLE(tu_device, device, _device);
1761 TU_FROM_HANDLE(tu_event, event, _event);
1762
1763 if (!event)
1764 return;
1765 vk_free2(&device->alloc, pAllocator, event);
1766 }
1767
1768 VkResult
1769 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1770 {
1771 TU_FROM_HANDLE(tu_event, event, _event);
1772
1773 if (*(uint64_t*) event->bo.map == 1)
1774 return VK_EVENT_SET;
1775 return VK_EVENT_RESET;
1776 }
1777
1778 VkResult
1779 tu_SetEvent(VkDevice _device, VkEvent _event)
1780 {
1781 TU_FROM_HANDLE(tu_event, event, _event);
1782 *(uint64_t*) event->bo.map = 1;
1783
1784 return VK_SUCCESS;
1785 }
1786
1787 VkResult
1788 tu_ResetEvent(VkDevice _device, VkEvent _event)
1789 {
1790 TU_FROM_HANDLE(tu_event, event, _event);
1791 *(uint64_t*) event->bo.map = 0;
1792
1793 return VK_SUCCESS;
1794 }
1795
1796 VkResult
1797 tu_CreateBuffer(VkDevice _device,
1798 const VkBufferCreateInfo *pCreateInfo,
1799 const VkAllocationCallbacks *pAllocator,
1800 VkBuffer *pBuffer)
1801 {
1802 TU_FROM_HANDLE(tu_device, device, _device);
1803 struct tu_buffer *buffer;
1804
1805 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1806
1807 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1808 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1809 if (buffer == NULL)
1810 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1811
1812 buffer->size = pCreateInfo->size;
1813 buffer->usage = pCreateInfo->usage;
1814 buffer->flags = pCreateInfo->flags;
1815
1816 *pBuffer = tu_buffer_to_handle(buffer);
1817
1818 return VK_SUCCESS;
1819 }
1820
1821 void
1822 tu_DestroyBuffer(VkDevice _device,
1823 VkBuffer _buffer,
1824 const VkAllocationCallbacks *pAllocator)
1825 {
1826 TU_FROM_HANDLE(tu_device, device, _device);
1827 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1828
1829 if (!buffer)
1830 return;
1831
1832 vk_free2(&device->alloc, pAllocator, buffer);
1833 }
1834
1835 static uint32_t
1836 tu_surface_max_layer_count(struct tu_image_view *iview)
1837 {
1838 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1839 ? iview->extent.depth
1840 : (iview->base_layer + iview->layer_count);
1841 }
1842
1843 VkResult
1844 tu_CreateFramebuffer(VkDevice _device,
1845 const VkFramebufferCreateInfo *pCreateInfo,
1846 const VkAllocationCallbacks *pAllocator,
1847 VkFramebuffer *pFramebuffer)
1848 {
1849 TU_FROM_HANDLE(tu_device, device, _device);
1850 struct tu_framebuffer *framebuffer;
1851
1852 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1853
1854 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1855 pCreateInfo->attachmentCount;
1856 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1857 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1858 if (framebuffer == NULL)
1859 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1860
1861 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1862 framebuffer->width = pCreateInfo->width;
1863 framebuffer->height = pCreateInfo->height;
1864 framebuffer->layers = pCreateInfo->layers;
1865 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1866 VkImageView _iview = pCreateInfo->pAttachments[i];
1867 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1868 framebuffer->attachments[i].attachment = iview;
1869
1870 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1871 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1872 framebuffer->layers =
1873 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1874 }
1875
1876 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1877 return VK_SUCCESS;
1878 }
1879
1880 void
1881 tu_DestroyFramebuffer(VkDevice _device,
1882 VkFramebuffer _fb,
1883 const VkAllocationCallbacks *pAllocator)
1884 {
1885 TU_FROM_HANDLE(tu_device, device, _device);
1886 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1887
1888 if (!fb)
1889 return;
1890 vk_free2(&device->alloc, pAllocator, fb);
1891 }
1892
1893 static enum a6xx_tex_clamp
1894 tu6_tex_wrap(VkSamplerAddressMode address_mode, bool *needs_border)
1895 {
1896 switch (address_mode) {
1897 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
1898 return A6XX_TEX_REPEAT;
1899 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
1900 return A6XX_TEX_MIRROR_REPEAT;
1901 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
1902 return A6XX_TEX_CLAMP_TO_EDGE;
1903 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
1904 *needs_border = true;
1905 return A6XX_TEX_CLAMP_TO_BORDER;
1906 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
1907 /* only works for PoT.. need to emulate otherwise! */
1908 return A6XX_TEX_MIRROR_CLAMP;
1909 default:
1910 unreachable("illegal tex wrap mode");
1911 break;
1912 }
1913 }
1914
1915 static enum a6xx_tex_filter
1916 tu6_tex_filter(VkFilter filter, unsigned aniso)
1917 {
1918 switch (filter) {
1919 case VK_FILTER_NEAREST:
1920 return A6XX_TEX_NEAREST;
1921 case VK_FILTER_LINEAR:
1922 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
1923 case VK_FILTER_CUBIC_IMG:
1924 default:
1925 unreachable("illegal texture filter");
1926 break;
1927 }
1928 }
1929
1930 static void
1931 tu_init_sampler(struct tu_device *device,
1932 struct tu_sampler *sampler,
1933 const VkSamplerCreateInfo *pCreateInfo)
1934 {
1935 unsigned aniso = pCreateInfo->anisotropyEnable ?
1936 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
1937 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
1938 bool needs_border = false;
1939
1940 sampler->state[0] =
1941 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
1942 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
1943 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
1944 A6XX_TEX_SAMP_0_ANISO(aniso) |
1945 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU, &needs_border)) |
1946 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV, &needs_border)) |
1947 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW, &needs_border)) |
1948 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
1949 sampler->state[1] =
1950 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
1951 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
1952 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
1953 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
1954 COND(pCreateInfo->compareEnable, A6XX_TEX_SAMP_1_COMPARE_FUNC(pCreateInfo->compareOp));
1955 sampler->state[2] = 0;
1956 sampler->state[3] = 0;
1957
1958 /* TODO:
1959 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
1960 * border color
1961 */
1962
1963 sampler->needs_border = needs_border;
1964 }
1965
1966 VkResult
1967 tu_CreateSampler(VkDevice _device,
1968 const VkSamplerCreateInfo *pCreateInfo,
1969 const VkAllocationCallbacks *pAllocator,
1970 VkSampler *pSampler)
1971 {
1972 TU_FROM_HANDLE(tu_device, device, _device);
1973 struct tu_sampler *sampler;
1974
1975 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1976
1977 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1978 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1979 if (!sampler)
1980 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1981
1982 tu_init_sampler(device, sampler, pCreateInfo);
1983 *pSampler = tu_sampler_to_handle(sampler);
1984
1985 return VK_SUCCESS;
1986 }
1987
1988 void
1989 tu_DestroySampler(VkDevice _device,
1990 VkSampler _sampler,
1991 const VkAllocationCallbacks *pAllocator)
1992 {
1993 TU_FROM_HANDLE(tu_device, device, _device);
1994 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1995
1996 if (!sampler)
1997 return;
1998 vk_free2(&device->alloc, pAllocator, sampler);
1999 }
2000
2001 /* vk_icd.h does not declare this function, so we declare it here to
2002 * suppress Wmissing-prototypes.
2003 */
2004 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2005 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2006
2007 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2008 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2009 {
2010 /* For the full details on loader interface versioning, see
2011 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2012 * What follows is a condensed summary, to help you navigate the large and
2013 * confusing official doc.
2014 *
2015 * - Loader interface v0 is incompatible with later versions. We don't
2016 * support it.
2017 *
2018 * - In loader interface v1:
2019 * - The first ICD entrypoint called by the loader is
2020 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2021 * entrypoint.
2022 * - The ICD must statically expose no other Vulkan symbol unless it
2023 * is linked with -Bsymbolic.
2024 * - Each dispatchable Vulkan handle created by the ICD must be
2025 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2026 * ICD must initialize VK_LOADER_DATA.loadMagic to
2027 * ICD_LOADER_MAGIC.
2028 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2029 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2030 * such loader-managed surfaces.
2031 *
2032 * - Loader interface v2 differs from v1 in:
2033 * - The first ICD entrypoint called by the loader is
2034 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2035 * statically expose this entrypoint.
2036 *
2037 * - Loader interface v3 differs from v2 in:
2038 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2039 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2040 * because the loader no longer does so.
2041 */
2042 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2043 return VK_SUCCESS;
2044 }
2045
2046 VkResult
2047 tu_GetMemoryFdKHR(VkDevice _device,
2048 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2049 int *pFd)
2050 {
2051 TU_FROM_HANDLE(tu_device, device, _device);
2052 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2053
2054 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2055
2056 /* At the moment, we support only the below handle types. */
2057 assert(pGetFdInfo->handleType ==
2058 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2059 pGetFdInfo->handleType ==
2060 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2061
2062 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2063 if (prime_fd < 0)
2064 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2065
2066 *pFd = prime_fd;
2067 return VK_SUCCESS;
2068 }
2069
2070 VkResult
2071 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2072 VkExternalMemoryHandleTypeFlagBits handleType,
2073 int fd,
2074 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2075 {
2076 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2077 pMemoryFdProperties->memoryTypeBits = 1;
2078 return VK_SUCCESS;
2079 }
2080
2081 void
2082 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2083 VkPhysicalDevice physicalDevice,
2084 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2085 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2086 {
2087 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2088 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2089 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2090 }
2091
2092 void
2093 tu_GetPhysicalDeviceExternalFenceProperties(
2094 VkPhysicalDevice physicalDevice,
2095 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2096 VkExternalFenceProperties *pExternalFenceProperties)
2097 {
2098 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2099 pExternalFenceProperties->compatibleHandleTypes = 0;
2100 pExternalFenceProperties->externalFenceFeatures = 0;
2101 }
2102
2103 VkResult
2104 tu_CreateDebugReportCallbackEXT(
2105 VkInstance _instance,
2106 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2107 const VkAllocationCallbacks *pAllocator,
2108 VkDebugReportCallbackEXT *pCallback)
2109 {
2110 TU_FROM_HANDLE(tu_instance, instance, _instance);
2111 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2112 pCreateInfo, pAllocator,
2113 &instance->alloc, pCallback);
2114 }
2115
2116 void
2117 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2118 VkDebugReportCallbackEXT _callback,
2119 const VkAllocationCallbacks *pAllocator)
2120 {
2121 TU_FROM_HANDLE(tu_instance, instance, _instance);
2122 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2123 _callback, pAllocator, &instance->alloc);
2124 }
2125
2126 void
2127 tu_DebugReportMessageEXT(VkInstance _instance,
2128 VkDebugReportFlagsEXT flags,
2129 VkDebugReportObjectTypeEXT objectType,
2130 uint64_t object,
2131 size_t location,
2132 int32_t messageCode,
2133 const char *pLayerPrefix,
2134 const char *pMessage)
2135 {
2136 TU_FROM_HANDLE(tu_instance, instance, _instance);
2137 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2138 object, location, messageCode, pLayerPrefix, pMessage);
2139 }
2140
2141 void
2142 tu_GetDeviceGroupPeerMemoryFeatures(
2143 VkDevice device,
2144 uint32_t heapIndex,
2145 uint32_t localDeviceIndex,
2146 uint32_t remoteDeviceIndex,
2147 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2148 {
2149 assert(localDeviceIndex == remoteDeviceIndex);
2150
2151 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2152 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2153 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2154 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2155 }