turnip: improve vertex input handling
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
256 if (instance->debug_flags & TU_DEBUG_STARTUP)
257 tu_logi("Could not query the GMEM size");
258 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
259 "could not get GMEM size");
260 goto fail;
261 }
262
263 memset(device->name, 0, sizeof(device->name));
264 sprintf(device->name, "FD%d", device->gpu_id);
265
266 switch (device->gpu_id) {
267 case 618:
268 device->tile_align_w = 64;
269 device->tile_align_h = 16;
270 device->magic.RB_UNKNOWN_8E04_blit = 0x00100000;
271 device->magic.RB_CCU_CNTL_gmem = 0x3e400004;
272 device->magic.PC_UNKNOWN_9805 = 0x0;
273 device->magic.SP_UNKNOWN_A0F8 = 0x0;
274 break;
275 case 630:
276 case 640:
277 device->tile_align_w = 64;
278 device->tile_align_h = 16;
279 device->magic.RB_UNKNOWN_8E04_blit = 0x01000000;
280 device->magic.RB_CCU_CNTL_gmem = 0x7c400004;
281 device->magic.PC_UNKNOWN_9805 = 0x1;
282 device->magic.SP_UNKNOWN_A0F8 = 0x1;
283 break;
284 default:
285 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
286 "device %s is unsupported", device->name);
287 goto fail;
288 }
289 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
290 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
291 "cannot generate UUID");
292 goto fail;
293 }
294
295 /* The gpu id is already embedded in the uuid so we just pass "tu"
296 * when creating the cache.
297 */
298 char buf[VK_UUID_SIZE * 2 + 1];
299 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
300 device->disk_cache = disk_cache_create(device->name, buf, 0);
301
302 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
303 "testing use only.\n");
304
305 tu_get_driver_uuid(&device->device_uuid);
306 tu_get_device_uuid(&device->device_uuid);
307
308 tu_fill_device_extension_table(device, &device->supported_extensions);
309
310 if (result != VK_SUCCESS) {
311 vk_error(instance, result);
312 goto fail;
313 }
314
315 result = tu_wsi_init(device);
316 if (result != VK_SUCCESS) {
317 vk_error(instance, result);
318 goto fail;
319 }
320
321 return VK_SUCCESS;
322
323 fail:
324 close(fd);
325 if (master_fd != -1)
326 close(master_fd);
327 return result;
328 }
329
330 static void
331 tu_physical_device_finish(struct tu_physical_device *device)
332 {
333 tu_wsi_finish(device);
334
335 disk_cache_destroy(device->disk_cache);
336 close(device->local_fd);
337 if (device->master_fd != -1)
338 close(device->master_fd);
339 }
340
341 static VKAPI_ATTR void *
342 default_alloc_func(void *pUserData,
343 size_t size,
344 size_t align,
345 VkSystemAllocationScope allocationScope)
346 {
347 return malloc(size);
348 }
349
350 static VKAPI_ATTR void *
351 default_realloc_func(void *pUserData,
352 void *pOriginal,
353 size_t size,
354 size_t align,
355 VkSystemAllocationScope allocationScope)
356 {
357 return realloc(pOriginal, size);
358 }
359
360 static VKAPI_ATTR void
361 default_free_func(void *pUserData, void *pMemory)
362 {
363 free(pMemory);
364 }
365
366 static const VkAllocationCallbacks default_alloc = {
367 .pUserData = NULL,
368 .pfnAllocation = default_alloc_func,
369 .pfnReallocation = default_realloc_func,
370 .pfnFree = default_free_func,
371 };
372
373 static const struct debug_control tu_debug_options[] = {
374 { "startup", TU_DEBUG_STARTUP },
375 { "nir", TU_DEBUG_NIR },
376 { "ir3", TU_DEBUG_IR3 },
377 { "nobin", TU_DEBUG_NOBIN },
378 { "sysmem", TU_DEBUG_SYSMEM },
379 { "forcebin", TU_DEBUG_FORCEBIN },
380 { NULL, 0 }
381 };
382
383 const char *
384 tu_get_debug_option_name(int id)
385 {
386 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
387 return tu_debug_options[id].string;
388 }
389
390 static int
391 tu_get_instance_extension_index(const char *name)
392 {
393 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
394 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
395 return i;
396 }
397 return -1;
398 }
399
400 VkResult
401 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
402 const VkAllocationCallbacks *pAllocator,
403 VkInstance *pInstance)
404 {
405 struct tu_instance *instance;
406 VkResult result;
407
408 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
409
410 uint32_t client_version;
411 if (pCreateInfo->pApplicationInfo &&
412 pCreateInfo->pApplicationInfo->apiVersion != 0) {
413 client_version = pCreateInfo->pApplicationInfo->apiVersion;
414 } else {
415 tu_EnumerateInstanceVersion(&client_version);
416 }
417
418 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
419 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
420 if (!instance)
421 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
422
423 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
424
425 if (pAllocator)
426 instance->alloc = *pAllocator;
427 else
428 instance->alloc = default_alloc;
429
430 instance->api_version = client_version;
431 instance->physical_device_count = -1;
432
433 instance->debug_flags =
434 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
435
436 if (instance->debug_flags & TU_DEBUG_STARTUP)
437 tu_logi("Created an instance");
438
439 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
440 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
441 int index = tu_get_instance_extension_index(ext_name);
442
443 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
444 vk_free2(&default_alloc, pAllocator, instance);
445 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
446 }
447
448 instance->enabled_extensions.extensions[index] = true;
449 }
450
451 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
452 if (result != VK_SUCCESS) {
453 vk_free2(&default_alloc, pAllocator, instance);
454 return vk_error(instance, result);
455 }
456
457 glsl_type_singleton_init_or_ref();
458
459 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
460
461 *pInstance = tu_instance_to_handle(instance);
462
463 return VK_SUCCESS;
464 }
465
466 void
467 tu_DestroyInstance(VkInstance _instance,
468 const VkAllocationCallbacks *pAllocator)
469 {
470 TU_FROM_HANDLE(tu_instance, instance, _instance);
471
472 if (!instance)
473 return;
474
475 for (int i = 0; i < instance->physical_device_count; ++i) {
476 tu_physical_device_finish(instance->physical_devices + i);
477 }
478
479 VG(VALGRIND_DESTROY_MEMPOOL(instance));
480
481 glsl_type_singleton_decref();
482
483 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
484
485 vk_free(&instance->alloc, instance);
486 }
487
488 static VkResult
489 tu_enumerate_devices(struct tu_instance *instance)
490 {
491 /* TODO: Check for more devices ? */
492 drmDevicePtr devices[8];
493 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
494 int max_devices;
495
496 instance->physical_device_count = 0;
497
498 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
499
500 if (instance->debug_flags & TU_DEBUG_STARTUP)
501 tu_logi("Found %d drm nodes", max_devices);
502
503 if (max_devices < 1)
504 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
505
506 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
507 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
508 devices[i]->bustype == DRM_BUS_PLATFORM) {
509
510 result = tu_physical_device_init(
511 instance->physical_devices + instance->physical_device_count,
512 instance, devices[i]);
513 if (result == VK_SUCCESS)
514 ++instance->physical_device_count;
515 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
516 break;
517 }
518 }
519 drmFreeDevices(devices, max_devices);
520
521 return result;
522 }
523
524 VkResult
525 tu_EnumeratePhysicalDevices(VkInstance _instance,
526 uint32_t *pPhysicalDeviceCount,
527 VkPhysicalDevice *pPhysicalDevices)
528 {
529 TU_FROM_HANDLE(tu_instance, instance, _instance);
530 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
531
532 VkResult result;
533
534 if (instance->physical_device_count < 0) {
535 result = tu_enumerate_devices(instance);
536 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
537 return result;
538 }
539
540 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
541 vk_outarray_append(&out, p)
542 {
543 *p = tu_physical_device_to_handle(instance->physical_devices + i);
544 }
545 }
546
547 return vk_outarray_status(&out);
548 }
549
550 VkResult
551 tu_EnumeratePhysicalDeviceGroups(
552 VkInstance _instance,
553 uint32_t *pPhysicalDeviceGroupCount,
554 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
555 {
556 TU_FROM_HANDLE(tu_instance, instance, _instance);
557 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
558 pPhysicalDeviceGroupCount);
559 VkResult result;
560
561 if (instance->physical_device_count < 0) {
562 result = tu_enumerate_devices(instance);
563 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
564 return result;
565 }
566
567 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
568 vk_outarray_append(&out, p)
569 {
570 p->physicalDeviceCount = 1;
571 p->physicalDevices[0] =
572 tu_physical_device_to_handle(instance->physical_devices + i);
573 p->subsetAllocation = false;
574 }
575 }
576
577 return vk_outarray_status(&out);
578 }
579
580 void
581 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
582 VkPhysicalDeviceFeatures *pFeatures)
583 {
584 memset(pFeatures, 0, sizeof(*pFeatures));
585
586 *pFeatures = (VkPhysicalDeviceFeatures) {
587 .robustBufferAccess = false,
588 .fullDrawIndexUint32 = true,
589 .imageCubeArray = false,
590 .independentBlend = true,
591 .geometryShader = true,
592 .tessellationShader = false,
593 .sampleRateShading = true,
594 .dualSrcBlend = true,
595 .logicOp = true,
596 .multiDrawIndirect = false,
597 .drawIndirectFirstInstance = false,
598 .depthClamp = true,
599 .depthBiasClamp = false,
600 .fillModeNonSolid = false,
601 .depthBounds = false,
602 .wideLines = false,
603 .largePoints = false,
604 .alphaToOne = false,
605 .multiViewport = false,
606 .samplerAnisotropy = true,
607 .textureCompressionETC2 = true,
608 .textureCompressionASTC_LDR = true,
609 .textureCompressionBC = true,
610 .occlusionQueryPrecise = true,
611 .pipelineStatisticsQuery = false,
612 .vertexPipelineStoresAndAtomics = false,
613 .fragmentStoresAndAtomics = false,
614 .shaderTessellationAndGeometryPointSize = false,
615 .shaderImageGatherExtended = false,
616 .shaderStorageImageExtendedFormats = false,
617 .shaderStorageImageMultisample = false,
618 .shaderUniformBufferArrayDynamicIndexing = false,
619 .shaderSampledImageArrayDynamicIndexing = false,
620 .shaderStorageBufferArrayDynamicIndexing = false,
621 .shaderStorageImageArrayDynamicIndexing = false,
622 .shaderStorageImageReadWithoutFormat = false,
623 .shaderStorageImageWriteWithoutFormat = false,
624 .shaderClipDistance = false,
625 .shaderCullDistance = false,
626 .shaderFloat64 = false,
627 .shaderInt64 = false,
628 .shaderInt16 = false,
629 .sparseBinding = false,
630 .variableMultisampleRate = false,
631 .inheritedQueries = false,
632 };
633 }
634
635 void
636 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
637 VkPhysicalDeviceFeatures2 *pFeatures)
638 {
639 vk_foreach_struct(ext, pFeatures->pNext)
640 {
641 switch (ext->sType) {
642 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
643 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
644 features->variablePointersStorageBuffer = false;
645 features->variablePointers = false;
646 break;
647 }
648 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
649 VkPhysicalDeviceMultiviewFeatures *features =
650 (VkPhysicalDeviceMultiviewFeatures *) ext;
651 features->multiview = false;
652 features->multiviewGeometryShader = false;
653 features->multiviewTessellationShader = false;
654 break;
655 }
656 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
657 VkPhysicalDeviceShaderDrawParametersFeatures *features =
658 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
659 features->shaderDrawParameters = false;
660 break;
661 }
662 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
663 VkPhysicalDeviceProtectedMemoryFeatures *features =
664 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
665 features->protectedMemory = false;
666 break;
667 }
668 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
669 VkPhysicalDevice16BitStorageFeatures *features =
670 (VkPhysicalDevice16BitStorageFeatures *) ext;
671 features->storageBuffer16BitAccess = false;
672 features->uniformAndStorageBuffer16BitAccess = false;
673 features->storagePushConstant16 = false;
674 features->storageInputOutput16 = false;
675 break;
676 }
677 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
678 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
679 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
680 features->samplerYcbcrConversion = false;
681 break;
682 }
683 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
684 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
685 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
686 features->shaderInputAttachmentArrayDynamicIndexing = false;
687 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
688 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
689 features->shaderUniformBufferArrayNonUniformIndexing = false;
690 features->shaderSampledImageArrayNonUniformIndexing = false;
691 features->shaderStorageBufferArrayNonUniformIndexing = false;
692 features->shaderStorageImageArrayNonUniformIndexing = false;
693 features->shaderInputAttachmentArrayNonUniformIndexing = false;
694 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
695 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
696 features->descriptorBindingUniformBufferUpdateAfterBind = false;
697 features->descriptorBindingSampledImageUpdateAfterBind = false;
698 features->descriptorBindingStorageImageUpdateAfterBind = false;
699 features->descriptorBindingStorageBufferUpdateAfterBind = false;
700 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
701 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
702 features->descriptorBindingUpdateUnusedWhilePending = false;
703 features->descriptorBindingPartiallyBound = false;
704 features->descriptorBindingVariableDescriptorCount = false;
705 features->runtimeDescriptorArray = false;
706 break;
707 }
708 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
709 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
710 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
711 features->conditionalRendering = false;
712 features->inheritedConditionalRendering = false;
713 break;
714 }
715 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
716 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
717 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
718 features->transformFeedback = true;
719 features->geometryStreams = false;
720 break;
721 }
722 default:
723 break;
724 }
725 }
726 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
727 }
728
729 void
730 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
731 VkPhysicalDeviceProperties *pProperties)
732 {
733 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
734 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT |
735 VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
736
737 /* make sure that the entire descriptor set is addressable with a signed
738 * 32-bit int. So the sum of all limits scaled by descriptor size has to
739 * be at most 2 GiB. the combined image & samples object count as one of
740 * both. This limit is for the pipeline layout, not for the set layout, but
741 * there is no set limit, so we just set a pipeline limit. I don't think
742 * any app is going to hit this soon. */
743 size_t max_descriptor_set_size =
744 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
745 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
746 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
747 32 /* sampler, largest when combined with image */ +
748 64 /* sampled image */ + 64 /* storage image */);
749
750 VkPhysicalDeviceLimits limits = {
751 .maxImageDimension1D = (1 << 14),
752 .maxImageDimension2D = (1 << 14),
753 .maxImageDimension3D = (1 << 11),
754 .maxImageDimensionCube = (1 << 14),
755 .maxImageArrayLayers = (1 << 11),
756 .maxTexelBufferElements = 128 * 1024 * 1024,
757 .maxUniformBufferRange = UINT32_MAX,
758 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
759 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
760 .maxMemoryAllocationCount = UINT32_MAX,
761 .maxSamplerAllocationCount = 64 * 1024,
762 .bufferImageGranularity = 64, /* A cache line */
763 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
764 .maxBoundDescriptorSets = MAX_SETS,
765 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
766 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
767 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
768 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
769 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
770 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
771 .maxPerStageResources = max_descriptor_set_size,
772 .maxDescriptorSetSamplers = max_descriptor_set_size,
773 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
774 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
775 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
776 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
777 .maxDescriptorSetSampledImages = max_descriptor_set_size,
778 .maxDescriptorSetStorageImages = max_descriptor_set_size,
779 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
780 .maxVertexInputAttributes = 32,
781 .maxVertexInputBindings = 32,
782 .maxVertexInputAttributeOffset = 4095,
783 .maxVertexInputBindingStride = 2048,
784 .maxVertexOutputComponents = 128,
785 .maxTessellationGenerationLevel = 64,
786 .maxTessellationPatchSize = 32,
787 .maxTessellationControlPerVertexInputComponents = 128,
788 .maxTessellationControlPerVertexOutputComponents = 128,
789 .maxTessellationControlPerPatchOutputComponents = 120,
790 .maxTessellationControlTotalOutputComponents = 4096,
791 .maxTessellationEvaluationInputComponents = 128,
792 .maxTessellationEvaluationOutputComponents = 128,
793 .maxGeometryShaderInvocations = 32,
794 .maxGeometryInputComponents = 64,
795 .maxGeometryOutputComponents = 128,
796 .maxGeometryOutputVertices = 256,
797 .maxGeometryTotalOutputComponents = 1024,
798 .maxFragmentInputComponents = 128,
799 .maxFragmentOutputAttachments = 8,
800 .maxFragmentDualSrcAttachments = 1,
801 .maxFragmentCombinedOutputResources = 8,
802 .maxComputeSharedMemorySize = 32768,
803 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
804 .maxComputeWorkGroupInvocations = 2048,
805 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
806 .subPixelPrecisionBits = 8,
807 .subTexelPrecisionBits = 4 /* FIXME */,
808 .mipmapPrecisionBits = 4 /* FIXME */,
809 .maxDrawIndexedIndexValue = UINT32_MAX,
810 .maxDrawIndirectCount = UINT32_MAX,
811 .maxSamplerLodBias = 16,
812 .maxSamplerAnisotropy = 16,
813 .maxViewports = MAX_VIEWPORTS,
814 .maxViewportDimensions = { (1 << 14), (1 << 14) },
815 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
816 .viewportSubPixelBits = 8,
817 .minMemoryMapAlignment = 4096, /* A page */
818 .minTexelBufferOffsetAlignment = 64,
819 .minUniformBufferOffsetAlignment = 4,
820 .minStorageBufferOffsetAlignment = 4,
821 .minTexelOffset = -32,
822 .maxTexelOffset = 31,
823 .minTexelGatherOffset = -32,
824 .maxTexelGatherOffset = 31,
825 .minInterpolationOffset = -2,
826 .maxInterpolationOffset = 2,
827 .subPixelInterpolationOffsetBits = 8,
828 .maxFramebufferWidth = (1 << 14),
829 .maxFramebufferHeight = (1 << 14),
830 .maxFramebufferLayers = (1 << 10),
831 .framebufferColorSampleCounts = sample_counts,
832 .framebufferDepthSampleCounts = sample_counts,
833 .framebufferStencilSampleCounts = sample_counts,
834 .framebufferNoAttachmentsSampleCounts = sample_counts,
835 .maxColorAttachments = MAX_RTS,
836 .sampledImageColorSampleCounts = sample_counts,
837 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
838 .sampledImageDepthSampleCounts = sample_counts,
839 .sampledImageStencilSampleCounts = sample_counts,
840 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
841 .maxSampleMaskWords = 1,
842 .timestampComputeAndGraphics = true,
843 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
844 .maxClipDistances = 8,
845 .maxCullDistances = 8,
846 .maxCombinedClipAndCullDistances = 8,
847 .discreteQueuePriorities = 1,
848 .pointSizeRange = { 0.125, 255.875 },
849 .lineWidthRange = { 0.0, 7.9921875 },
850 .pointSizeGranularity = (1.0 / 8.0),
851 .lineWidthGranularity = (1.0 / 128.0),
852 .strictLines = false, /* FINISHME */
853 .standardSampleLocations = true,
854 .optimalBufferCopyOffsetAlignment = 128,
855 .optimalBufferCopyRowPitchAlignment = 128,
856 .nonCoherentAtomSize = 64,
857 };
858
859 *pProperties = (VkPhysicalDeviceProperties) {
860 .apiVersion = tu_physical_device_api_version(pdevice),
861 .driverVersion = vk_get_driver_version(),
862 .vendorID = 0, /* TODO */
863 .deviceID = 0,
864 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
865 .limits = limits,
866 .sparseProperties = { 0 },
867 };
868
869 strcpy(pProperties->deviceName, pdevice->name);
870 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
871 }
872
873 void
874 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
875 VkPhysicalDeviceProperties2 *pProperties)
876 {
877 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
878 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
879
880 vk_foreach_struct(ext, pProperties->pNext)
881 {
882 switch (ext->sType) {
883 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
884 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
885 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
886 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
887 break;
888 }
889 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
890 VkPhysicalDeviceIDProperties *properties =
891 (VkPhysicalDeviceIDProperties *) ext;
892 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
893 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
894 properties->deviceLUIDValid = false;
895 break;
896 }
897 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
898 VkPhysicalDeviceMultiviewProperties *properties =
899 (VkPhysicalDeviceMultiviewProperties *) ext;
900 properties->maxMultiviewViewCount = MAX_VIEWS;
901 properties->maxMultiviewInstanceIndex = INT_MAX;
902 break;
903 }
904 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
905 VkPhysicalDevicePointClippingProperties *properties =
906 (VkPhysicalDevicePointClippingProperties *) ext;
907 properties->pointClippingBehavior =
908 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
909 break;
910 }
911 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
912 VkPhysicalDeviceMaintenance3Properties *properties =
913 (VkPhysicalDeviceMaintenance3Properties *) ext;
914 /* Make sure everything is addressable by a signed 32-bit int, and
915 * our largest descriptors are 96 bytes. */
916 properties->maxPerSetDescriptors = (1ull << 31) / 96;
917 /* Our buffer size fields allow only this much */
918 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
919 break;
920 }
921 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
922 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
923 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
924
925 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
926 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
927 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
928 properties->maxTransformFeedbackStreamDataSize = 512;
929 properties->maxTransformFeedbackBufferDataSize = 512;
930 properties->maxTransformFeedbackBufferDataStride = 512;
931 /* TODO: enable xfb query */
932 properties->transformFeedbackQueries = false;
933 properties->transformFeedbackStreamsLinesTriangles = false;
934 properties->transformFeedbackRasterizationStreamSelect = false;
935 properties->transformFeedbackDraw = true;
936 break;
937 }
938 default:
939 break;
940 }
941 }
942 }
943
944 static const VkQueueFamilyProperties tu_queue_family_properties = {
945 .queueFlags =
946 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
947 .queueCount = 1,
948 .timestampValidBits = 48,
949 .minImageTransferGranularity = { 1, 1, 1 },
950 };
951
952 void
953 tu_GetPhysicalDeviceQueueFamilyProperties(
954 VkPhysicalDevice physicalDevice,
955 uint32_t *pQueueFamilyPropertyCount,
956 VkQueueFamilyProperties *pQueueFamilyProperties)
957 {
958 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
959
960 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
961 }
962
963 void
964 tu_GetPhysicalDeviceQueueFamilyProperties2(
965 VkPhysicalDevice physicalDevice,
966 uint32_t *pQueueFamilyPropertyCount,
967 VkQueueFamilyProperties2 *pQueueFamilyProperties)
968 {
969 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
970
971 vk_outarray_append(&out, p)
972 {
973 p->queueFamilyProperties = tu_queue_family_properties;
974 }
975 }
976
977 static uint64_t
978 tu_get_system_heap_size()
979 {
980 struct sysinfo info;
981 sysinfo(&info);
982
983 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
984
985 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
986 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
987 */
988 uint64_t available_ram;
989 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
990 available_ram = total_ram / 2;
991 else
992 available_ram = total_ram * 3 / 4;
993
994 return available_ram;
995 }
996
997 void
998 tu_GetPhysicalDeviceMemoryProperties(
999 VkPhysicalDevice physicalDevice,
1000 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1001 {
1002 pMemoryProperties->memoryHeapCount = 1;
1003 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
1004 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
1005
1006 pMemoryProperties->memoryTypeCount = 1;
1007 pMemoryProperties->memoryTypes[0].propertyFlags =
1008 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
1009 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1010 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
1011 pMemoryProperties->memoryTypes[0].heapIndex = 0;
1012 }
1013
1014 void
1015 tu_GetPhysicalDeviceMemoryProperties2(
1016 VkPhysicalDevice physicalDevice,
1017 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
1018 {
1019 return tu_GetPhysicalDeviceMemoryProperties(
1020 physicalDevice, &pMemoryProperties->memoryProperties);
1021 }
1022
1023 static VkResult
1024 tu_queue_init(struct tu_device *device,
1025 struct tu_queue *queue,
1026 uint32_t queue_family_index,
1027 int idx,
1028 VkDeviceQueueCreateFlags flags)
1029 {
1030 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1031 queue->device = device;
1032 queue->queue_family_index = queue_family_index;
1033 queue->queue_idx = idx;
1034 queue->flags = flags;
1035
1036 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
1037 if (ret)
1038 return VK_ERROR_INITIALIZATION_FAILED;
1039
1040 tu_fence_init(&queue->submit_fence, false);
1041
1042 return VK_SUCCESS;
1043 }
1044
1045 static void
1046 tu_queue_finish(struct tu_queue *queue)
1047 {
1048 tu_fence_finish(&queue->submit_fence);
1049 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1050 }
1051
1052 static int
1053 tu_get_device_extension_index(const char *name)
1054 {
1055 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1056 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1057 return i;
1058 }
1059 return -1;
1060 }
1061
1062 struct PACKED bcolor_entry {
1063 uint32_t fp32[4];
1064 uint16_t ui16[4];
1065 int16_t si16[4];
1066 uint16_t fp16[4];
1067 uint16_t rgb565;
1068 uint16_t rgb5a1;
1069 uint16_t rgba4;
1070 uint8_t __pad0[2];
1071 uint8_t ui8[4];
1072 int8_t si8[4];
1073 uint32_t rgb10a2;
1074 uint32_t z24; /* also s8? */
1075 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1076 uint8_t __pad1[56];
1077 } border_color[] = {
1078 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1079 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1080 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1081 .fp32[3] = 0x3f800000,
1082 .ui16[3] = 0xffff,
1083 .si16[3] = 0x7fff,
1084 .fp16[3] = 0x3c00,
1085 .rgb5a1 = 0x8000,
1086 .rgba4 = 0xf000,
1087 .ui8[3] = 0xff,
1088 .si8[3] = 0x7f,
1089 .rgb10a2 = 0xc0000000,
1090 .srgb[3] = 0x3c00,
1091 },
1092 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1093 .fp32[3] = 1,
1094 .fp16[3] = 1,
1095 },
1096 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1097 .fp32[0 ... 3] = 0x3f800000,
1098 .ui16[0 ... 3] = 0xffff,
1099 .si16[0 ... 3] = 0x7fff,
1100 .fp16[0 ... 3] = 0x3c00,
1101 .rgb565 = 0xffff,
1102 .rgb5a1 = 0xffff,
1103 .rgba4 = 0xffff,
1104 .ui8[0 ... 3] = 0xff,
1105 .si8[0 ... 3] = 0x7f,
1106 .rgb10a2 = 0xffffffff,
1107 .z24 = 0xffffff,
1108 .srgb[0 ... 3] = 0x3c00,
1109 },
1110 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1111 .fp32[0 ... 3] = 1,
1112 .fp16[0 ... 3] = 1,
1113 },
1114 };
1115
1116
1117 VkResult
1118 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1119 const VkDeviceCreateInfo *pCreateInfo,
1120 const VkAllocationCallbacks *pAllocator,
1121 VkDevice *pDevice)
1122 {
1123 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1124 VkResult result;
1125 struct tu_device *device;
1126
1127 /* Check enabled features */
1128 if (pCreateInfo->pEnabledFeatures) {
1129 VkPhysicalDeviceFeatures supported_features;
1130 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1131 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1132 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1133 unsigned num_features =
1134 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1135 for (uint32_t i = 0; i < num_features; i++) {
1136 if (enabled_feature[i] && !supported_feature[i])
1137 return vk_error(physical_device->instance,
1138 VK_ERROR_FEATURE_NOT_PRESENT);
1139 }
1140 }
1141
1142 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1143 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1144 if (!device)
1145 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1146
1147 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1148 device->instance = physical_device->instance;
1149 device->physical_device = physical_device;
1150
1151 if (pAllocator)
1152 device->alloc = *pAllocator;
1153 else
1154 device->alloc = physical_device->instance->alloc;
1155
1156 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1157 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1158 int index = tu_get_device_extension_index(ext_name);
1159 if (index < 0 ||
1160 !physical_device->supported_extensions.extensions[index]) {
1161 vk_free(&device->alloc, device);
1162 return vk_error(physical_device->instance,
1163 VK_ERROR_EXTENSION_NOT_PRESENT);
1164 }
1165
1166 device->enabled_extensions.extensions[index] = true;
1167 }
1168
1169 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1170 const VkDeviceQueueCreateInfo *queue_create =
1171 &pCreateInfo->pQueueCreateInfos[i];
1172 uint32_t qfi = queue_create->queueFamilyIndex;
1173 device->queues[qfi] = vk_alloc(
1174 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1175 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1176 if (!device->queues[qfi]) {
1177 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1178 goto fail_queues;
1179 }
1180
1181 memset(device->queues[qfi], 0,
1182 queue_create->queueCount * sizeof(struct tu_queue));
1183
1184 device->queue_count[qfi] = queue_create->queueCount;
1185
1186 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1187 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1188 queue_create->flags);
1189 if (result != VK_SUCCESS)
1190 goto fail_queues;
1191 }
1192 }
1193
1194 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1195 if (!device->compiler)
1196 goto fail_queues;
1197
1198 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1199 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
1200
1201 device->vsc_data_pitch = 0x440 * 4;
1202 device->vsc_data2_pitch = 0x1040 * 4;
1203
1204 result = tu_bo_init_new(device, &device->vsc_data, VSC_DATA_SIZE(device->vsc_data_pitch));
1205 if (result != VK_SUCCESS)
1206 goto fail_vsc_data;
1207
1208 result = tu_bo_init_new(device, &device->vsc_data2, VSC_DATA2_SIZE(device->vsc_data2_pitch));
1209 if (result != VK_SUCCESS)
1210 goto fail_vsc_data2;
1211
1212 STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
1213 result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
1214 if (result != VK_SUCCESS)
1215 goto fail_border_color;
1216
1217 result = tu_bo_map(device, &device->border_color);
1218 if (result != VK_SUCCESS)
1219 goto fail_border_color_map;
1220
1221 memcpy(device->border_color.map, border_color, sizeof(border_color));
1222
1223 VkPipelineCacheCreateInfo ci;
1224 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1225 ci.pNext = NULL;
1226 ci.flags = 0;
1227 ci.pInitialData = NULL;
1228 ci.initialDataSize = 0;
1229 VkPipelineCache pc;
1230 result =
1231 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1232 if (result != VK_SUCCESS)
1233 goto fail_pipeline_cache;
1234
1235 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1236
1237 *pDevice = tu_device_to_handle(device);
1238 return VK_SUCCESS;
1239
1240 fail_pipeline_cache:
1241 fail_border_color_map:
1242 tu_bo_finish(device, &device->border_color);
1243
1244 fail_border_color:
1245 tu_bo_finish(device, &device->vsc_data2);
1246
1247 fail_vsc_data2:
1248 tu_bo_finish(device, &device->vsc_data);
1249
1250 fail_vsc_data:
1251 ralloc_free(device->compiler);
1252
1253 fail_queues:
1254 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1255 for (unsigned q = 0; q < device->queue_count[i]; q++)
1256 tu_queue_finish(&device->queues[i][q]);
1257 if (device->queue_count[i])
1258 vk_free(&device->alloc, device->queues[i]);
1259 }
1260
1261 vk_free(&device->alloc, device);
1262 return result;
1263 }
1264
1265 void
1266 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1267 {
1268 TU_FROM_HANDLE(tu_device, device, _device);
1269
1270 if (!device)
1271 return;
1272
1273 tu_bo_finish(device, &device->vsc_data);
1274 tu_bo_finish(device, &device->vsc_data2);
1275
1276 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1277 for (unsigned q = 0; q < device->queue_count[i]; q++)
1278 tu_queue_finish(&device->queues[i][q]);
1279 if (device->queue_count[i])
1280 vk_free(&device->alloc, device->queues[i]);
1281 }
1282
1283 /* the compiler does not use pAllocator */
1284 ralloc_free(device->compiler);
1285
1286 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1287 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1288
1289 vk_free(&device->alloc, device);
1290 }
1291
1292 VkResult
1293 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1294 VkLayerProperties *pProperties)
1295 {
1296 *pPropertyCount = 0;
1297 return VK_SUCCESS;
1298 }
1299
1300 VkResult
1301 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1302 uint32_t *pPropertyCount,
1303 VkLayerProperties *pProperties)
1304 {
1305 *pPropertyCount = 0;
1306 return VK_SUCCESS;
1307 }
1308
1309 void
1310 tu_GetDeviceQueue2(VkDevice _device,
1311 const VkDeviceQueueInfo2 *pQueueInfo,
1312 VkQueue *pQueue)
1313 {
1314 TU_FROM_HANDLE(tu_device, device, _device);
1315 struct tu_queue *queue;
1316
1317 queue =
1318 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1319 if (pQueueInfo->flags != queue->flags) {
1320 /* From the Vulkan 1.1.70 spec:
1321 *
1322 * "The queue returned by vkGetDeviceQueue2 must have the same
1323 * flags value from this structure as that used at device
1324 * creation time in a VkDeviceQueueCreateInfo instance. If no
1325 * matching flags were specified at device creation time then
1326 * pQueue will return VK_NULL_HANDLE."
1327 */
1328 *pQueue = VK_NULL_HANDLE;
1329 return;
1330 }
1331
1332 *pQueue = tu_queue_to_handle(queue);
1333 }
1334
1335 void
1336 tu_GetDeviceQueue(VkDevice _device,
1337 uint32_t queueFamilyIndex,
1338 uint32_t queueIndex,
1339 VkQueue *pQueue)
1340 {
1341 const VkDeviceQueueInfo2 info =
1342 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1343 .queueFamilyIndex = queueFamilyIndex,
1344 .queueIndex = queueIndex };
1345
1346 tu_GetDeviceQueue2(_device, &info, pQueue);
1347 }
1348
1349 VkResult
1350 tu_QueueSubmit(VkQueue _queue,
1351 uint32_t submitCount,
1352 const VkSubmitInfo *pSubmits,
1353 VkFence _fence)
1354 {
1355 TU_FROM_HANDLE(tu_queue, queue, _queue);
1356
1357 for (uint32_t i = 0; i < submitCount; ++i) {
1358 const VkSubmitInfo *submit = pSubmits + i;
1359 const bool last_submit = (i == submitCount - 1);
1360 struct tu_bo_list bo_list;
1361 tu_bo_list_init(&bo_list);
1362
1363 uint32_t entry_count = 0;
1364 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1365 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1366 entry_count += cmdbuf->cs.entry_count;
1367 }
1368
1369 struct drm_msm_gem_submit_cmd cmds[entry_count];
1370 uint32_t entry_idx = 0;
1371 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1372 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1373 struct tu_cs *cs = &cmdbuf->cs;
1374 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1375 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1376 cmds[entry_idx].submit_idx =
1377 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1378 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1379 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1380 cmds[entry_idx].size = cs->entries[i].size;
1381 cmds[entry_idx].pad = 0;
1382 cmds[entry_idx].nr_relocs = 0;
1383 cmds[entry_idx].relocs = 0;
1384 }
1385
1386 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1387 }
1388
1389 uint32_t flags = MSM_PIPE_3D0;
1390 if (last_submit) {
1391 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1392 }
1393
1394 struct drm_msm_gem_submit req = {
1395 .flags = flags,
1396 .queueid = queue->msm_queue_id,
1397 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1398 .nr_bos = bo_list.count,
1399 .cmds = (uint64_t)(uintptr_t)cmds,
1400 .nr_cmds = entry_count,
1401 };
1402
1403 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1404 DRM_MSM_GEM_SUBMIT,
1405 &req, sizeof(req));
1406 if (ret) {
1407 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1408 abort();
1409 }
1410
1411 tu_bo_list_destroy(&bo_list);
1412
1413 if (last_submit) {
1414 /* no need to merge fences as queue execution is serialized */
1415 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1416 }
1417 }
1418
1419 if (_fence != VK_NULL_HANDLE) {
1420 TU_FROM_HANDLE(tu_fence, fence, _fence);
1421 tu_fence_copy(fence, &queue->submit_fence);
1422 }
1423
1424 return VK_SUCCESS;
1425 }
1426
1427 VkResult
1428 tu_QueueWaitIdle(VkQueue _queue)
1429 {
1430 TU_FROM_HANDLE(tu_queue, queue, _queue);
1431
1432 tu_fence_wait_idle(&queue->submit_fence);
1433
1434 return VK_SUCCESS;
1435 }
1436
1437 VkResult
1438 tu_DeviceWaitIdle(VkDevice _device)
1439 {
1440 TU_FROM_HANDLE(tu_device, device, _device);
1441
1442 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1443 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1444 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1445 }
1446 }
1447 return VK_SUCCESS;
1448 }
1449
1450 VkResult
1451 tu_ImportSemaphoreFdKHR(VkDevice _device,
1452 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1453 {
1454 tu_stub();
1455
1456 return VK_SUCCESS;
1457 }
1458
1459 VkResult
1460 tu_GetSemaphoreFdKHR(VkDevice _device,
1461 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1462 int *pFd)
1463 {
1464 tu_stub();
1465
1466 return VK_SUCCESS;
1467 }
1468
1469 VkResult
1470 tu_ImportFenceFdKHR(VkDevice _device,
1471 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
1472 {
1473 tu_stub();
1474
1475 return VK_SUCCESS;
1476 }
1477
1478 VkResult
1479 tu_GetFenceFdKHR(VkDevice _device,
1480 const VkFenceGetFdInfoKHR *pGetFdInfo,
1481 int *pFd)
1482 {
1483 tu_stub();
1484
1485 return VK_SUCCESS;
1486 }
1487
1488 VkResult
1489 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1490 uint32_t *pPropertyCount,
1491 VkExtensionProperties *pProperties)
1492 {
1493 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1494
1495 /* We spport no lyaers */
1496 if (pLayerName)
1497 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1498
1499 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1500 if (tu_supported_instance_extensions.extensions[i]) {
1501 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1502 }
1503 }
1504
1505 return vk_outarray_status(&out);
1506 }
1507
1508 VkResult
1509 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1510 const char *pLayerName,
1511 uint32_t *pPropertyCount,
1512 VkExtensionProperties *pProperties)
1513 {
1514 /* We spport no lyaers */
1515 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1516 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1517
1518 /* We spport no lyaers */
1519 if (pLayerName)
1520 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1521
1522 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1523 if (device->supported_extensions.extensions[i]) {
1524 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1525 }
1526 }
1527
1528 return vk_outarray_status(&out);
1529 }
1530
1531 PFN_vkVoidFunction
1532 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1533 {
1534 TU_FROM_HANDLE(tu_instance, instance, _instance);
1535
1536 return tu_lookup_entrypoint_checked(
1537 pName, instance ? instance->api_version : 0,
1538 instance ? &instance->enabled_extensions : NULL, NULL);
1539 }
1540
1541 /* The loader wants us to expose a second GetInstanceProcAddr function
1542 * to work around certain LD_PRELOAD issues seen in apps.
1543 */
1544 PUBLIC
1545 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1546 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1547
1548 PUBLIC
1549 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1550 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1551 {
1552 return tu_GetInstanceProcAddr(instance, pName);
1553 }
1554
1555 PFN_vkVoidFunction
1556 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1557 {
1558 TU_FROM_HANDLE(tu_device, device, _device);
1559
1560 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1561 &device->instance->enabled_extensions,
1562 &device->enabled_extensions);
1563 }
1564
1565 static VkResult
1566 tu_alloc_memory(struct tu_device *device,
1567 const VkMemoryAllocateInfo *pAllocateInfo,
1568 const VkAllocationCallbacks *pAllocator,
1569 VkDeviceMemory *pMem)
1570 {
1571 struct tu_device_memory *mem;
1572 VkResult result;
1573
1574 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1575
1576 if (pAllocateInfo->allocationSize == 0) {
1577 /* Apparently, this is allowed */
1578 *pMem = VK_NULL_HANDLE;
1579 return VK_SUCCESS;
1580 }
1581
1582 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1583 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1584 if (mem == NULL)
1585 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1586
1587 const VkImportMemoryFdInfoKHR *fd_info =
1588 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1589 if (fd_info && !fd_info->handleType)
1590 fd_info = NULL;
1591
1592 if (fd_info) {
1593 assert(fd_info->handleType ==
1594 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1595 fd_info->handleType ==
1596 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1597
1598 /*
1599 * TODO Importing the same fd twice gives us the same handle without
1600 * reference counting. We need to maintain a per-instance handle-to-bo
1601 * table and add reference count to tu_bo.
1602 */
1603 result = tu_bo_init_dmabuf(device, &mem->bo,
1604 pAllocateInfo->allocationSize, fd_info->fd);
1605 if (result == VK_SUCCESS) {
1606 /* take ownership and close the fd */
1607 close(fd_info->fd);
1608 }
1609 } else {
1610 result =
1611 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1612 }
1613
1614 if (result != VK_SUCCESS) {
1615 vk_free2(&device->alloc, pAllocator, mem);
1616 return result;
1617 }
1618
1619 mem->size = pAllocateInfo->allocationSize;
1620 mem->type_index = pAllocateInfo->memoryTypeIndex;
1621
1622 mem->map = NULL;
1623 mem->user_ptr = NULL;
1624
1625 *pMem = tu_device_memory_to_handle(mem);
1626
1627 return VK_SUCCESS;
1628 }
1629
1630 VkResult
1631 tu_AllocateMemory(VkDevice _device,
1632 const VkMemoryAllocateInfo *pAllocateInfo,
1633 const VkAllocationCallbacks *pAllocator,
1634 VkDeviceMemory *pMem)
1635 {
1636 TU_FROM_HANDLE(tu_device, device, _device);
1637 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1638 }
1639
1640 void
1641 tu_FreeMemory(VkDevice _device,
1642 VkDeviceMemory _mem,
1643 const VkAllocationCallbacks *pAllocator)
1644 {
1645 TU_FROM_HANDLE(tu_device, device, _device);
1646 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1647
1648 if (mem == NULL)
1649 return;
1650
1651 tu_bo_finish(device, &mem->bo);
1652 vk_free2(&device->alloc, pAllocator, mem);
1653 }
1654
1655 VkResult
1656 tu_MapMemory(VkDevice _device,
1657 VkDeviceMemory _memory,
1658 VkDeviceSize offset,
1659 VkDeviceSize size,
1660 VkMemoryMapFlags flags,
1661 void **ppData)
1662 {
1663 TU_FROM_HANDLE(tu_device, device, _device);
1664 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1665 VkResult result;
1666
1667 if (mem == NULL) {
1668 *ppData = NULL;
1669 return VK_SUCCESS;
1670 }
1671
1672 if (mem->user_ptr) {
1673 *ppData = mem->user_ptr;
1674 } else if (!mem->map) {
1675 result = tu_bo_map(device, &mem->bo);
1676 if (result != VK_SUCCESS)
1677 return result;
1678 *ppData = mem->map = mem->bo.map;
1679 } else
1680 *ppData = mem->map;
1681
1682 if (*ppData) {
1683 *ppData += offset;
1684 return VK_SUCCESS;
1685 }
1686
1687 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1688 }
1689
1690 void
1691 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1692 {
1693 /* I do not see any unmapping done by the freedreno Gallium driver. */
1694 }
1695
1696 VkResult
1697 tu_FlushMappedMemoryRanges(VkDevice _device,
1698 uint32_t memoryRangeCount,
1699 const VkMappedMemoryRange *pMemoryRanges)
1700 {
1701 return VK_SUCCESS;
1702 }
1703
1704 VkResult
1705 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1706 uint32_t memoryRangeCount,
1707 const VkMappedMemoryRange *pMemoryRanges)
1708 {
1709 return VK_SUCCESS;
1710 }
1711
1712 void
1713 tu_GetBufferMemoryRequirements(VkDevice _device,
1714 VkBuffer _buffer,
1715 VkMemoryRequirements *pMemoryRequirements)
1716 {
1717 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1718
1719 pMemoryRequirements->memoryTypeBits = 1;
1720 pMemoryRequirements->alignment = 16;
1721 pMemoryRequirements->size =
1722 align64(buffer->size, pMemoryRequirements->alignment);
1723 }
1724
1725 void
1726 tu_GetBufferMemoryRequirements2(
1727 VkDevice device,
1728 const VkBufferMemoryRequirementsInfo2 *pInfo,
1729 VkMemoryRequirements2 *pMemoryRequirements)
1730 {
1731 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1732 &pMemoryRequirements->memoryRequirements);
1733 }
1734
1735 void
1736 tu_GetImageMemoryRequirements(VkDevice _device,
1737 VkImage _image,
1738 VkMemoryRequirements *pMemoryRequirements)
1739 {
1740 TU_FROM_HANDLE(tu_image, image, _image);
1741
1742 pMemoryRequirements->memoryTypeBits = 1;
1743 pMemoryRequirements->size = image->layout.size;
1744 pMemoryRequirements->alignment = image->layout.base_align;
1745 }
1746
1747 void
1748 tu_GetImageMemoryRequirements2(VkDevice device,
1749 const VkImageMemoryRequirementsInfo2 *pInfo,
1750 VkMemoryRequirements2 *pMemoryRequirements)
1751 {
1752 tu_GetImageMemoryRequirements(device, pInfo->image,
1753 &pMemoryRequirements->memoryRequirements);
1754 }
1755
1756 void
1757 tu_GetImageSparseMemoryRequirements(
1758 VkDevice device,
1759 VkImage image,
1760 uint32_t *pSparseMemoryRequirementCount,
1761 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1762 {
1763 tu_stub();
1764 }
1765
1766 void
1767 tu_GetImageSparseMemoryRequirements2(
1768 VkDevice device,
1769 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1770 uint32_t *pSparseMemoryRequirementCount,
1771 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1772 {
1773 tu_stub();
1774 }
1775
1776 void
1777 tu_GetDeviceMemoryCommitment(VkDevice device,
1778 VkDeviceMemory memory,
1779 VkDeviceSize *pCommittedMemoryInBytes)
1780 {
1781 *pCommittedMemoryInBytes = 0;
1782 }
1783
1784 VkResult
1785 tu_BindBufferMemory2(VkDevice device,
1786 uint32_t bindInfoCount,
1787 const VkBindBufferMemoryInfo *pBindInfos)
1788 {
1789 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1790 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1791 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1792
1793 if (mem) {
1794 buffer->bo = &mem->bo;
1795 buffer->bo_offset = pBindInfos[i].memoryOffset;
1796 } else {
1797 buffer->bo = NULL;
1798 }
1799 }
1800 return VK_SUCCESS;
1801 }
1802
1803 VkResult
1804 tu_BindBufferMemory(VkDevice device,
1805 VkBuffer buffer,
1806 VkDeviceMemory memory,
1807 VkDeviceSize memoryOffset)
1808 {
1809 const VkBindBufferMemoryInfo info = {
1810 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1811 .buffer = buffer,
1812 .memory = memory,
1813 .memoryOffset = memoryOffset
1814 };
1815
1816 return tu_BindBufferMemory2(device, 1, &info);
1817 }
1818
1819 VkResult
1820 tu_BindImageMemory2(VkDevice device,
1821 uint32_t bindInfoCount,
1822 const VkBindImageMemoryInfo *pBindInfos)
1823 {
1824 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1825 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1826 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1827
1828 if (mem) {
1829 image->bo = &mem->bo;
1830 image->bo_offset = pBindInfos[i].memoryOffset;
1831 } else {
1832 image->bo = NULL;
1833 image->bo_offset = 0;
1834 }
1835 }
1836
1837 return VK_SUCCESS;
1838 }
1839
1840 VkResult
1841 tu_BindImageMemory(VkDevice device,
1842 VkImage image,
1843 VkDeviceMemory memory,
1844 VkDeviceSize memoryOffset)
1845 {
1846 const VkBindImageMemoryInfo info = {
1847 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1848 .image = image,
1849 .memory = memory,
1850 .memoryOffset = memoryOffset
1851 };
1852
1853 return tu_BindImageMemory2(device, 1, &info);
1854 }
1855
1856 VkResult
1857 tu_QueueBindSparse(VkQueue _queue,
1858 uint32_t bindInfoCount,
1859 const VkBindSparseInfo *pBindInfo,
1860 VkFence _fence)
1861 {
1862 return VK_SUCCESS;
1863 }
1864
1865 // Queue semaphore functions
1866
1867 VkResult
1868 tu_CreateSemaphore(VkDevice _device,
1869 const VkSemaphoreCreateInfo *pCreateInfo,
1870 const VkAllocationCallbacks *pAllocator,
1871 VkSemaphore *pSemaphore)
1872 {
1873 TU_FROM_HANDLE(tu_device, device, _device);
1874
1875 struct tu_semaphore *sem =
1876 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1877 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1878 if (!sem)
1879 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1880
1881 *pSemaphore = tu_semaphore_to_handle(sem);
1882 return VK_SUCCESS;
1883 }
1884
1885 void
1886 tu_DestroySemaphore(VkDevice _device,
1887 VkSemaphore _semaphore,
1888 const VkAllocationCallbacks *pAllocator)
1889 {
1890 TU_FROM_HANDLE(tu_device, device, _device);
1891 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1892 if (!_semaphore)
1893 return;
1894
1895 vk_free2(&device->alloc, pAllocator, sem);
1896 }
1897
1898 VkResult
1899 tu_CreateEvent(VkDevice _device,
1900 const VkEventCreateInfo *pCreateInfo,
1901 const VkAllocationCallbacks *pAllocator,
1902 VkEvent *pEvent)
1903 {
1904 TU_FROM_HANDLE(tu_device, device, _device);
1905 struct tu_event *event =
1906 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1907 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1908
1909 if (!event)
1910 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1911
1912 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1913 if (result != VK_SUCCESS)
1914 goto fail_alloc;
1915
1916 result = tu_bo_map(device, &event->bo);
1917 if (result != VK_SUCCESS)
1918 goto fail_map;
1919
1920 *pEvent = tu_event_to_handle(event);
1921
1922 return VK_SUCCESS;
1923
1924 fail_map:
1925 tu_bo_finish(device, &event->bo);
1926 fail_alloc:
1927 vk_free2(&device->alloc, pAllocator, event);
1928 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1929 }
1930
1931 void
1932 tu_DestroyEvent(VkDevice _device,
1933 VkEvent _event,
1934 const VkAllocationCallbacks *pAllocator)
1935 {
1936 TU_FROM_HANDLE(tu_device, device, _device);
1937 TU_FROM_HANDLE(tu_event, event, _event);
1938
1939 if (!event)
1940 return;
1941
1942 tu_bo_finish(device, &event->bo);
1943 vk_free2(&device->alloc, pAllocator, event);
1944 }
1945
1946 VkResult
1947 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1948 {
1949 TU_FROM_HANDLE(tu_event, event, _event);
1950
1951 if (*(uint64_t*) event->bo.map == 1)
1952 return VK_EVENT_SET;
1953 return VK_EVENT_RESET;
1954 }
1955
1956 VkResult
1957 tu_SetEvent(VkDevice _device, VkEvent _event)
1958 {
1959 TU_FROM_HANDLE(tu_event, event, _event);
1960 *(uint64_t*) event->bo.map = 1;
1961
1962 return VK_SUCCESS;
1963 }
1964
1965 VkResult
1966 tu_ResetEvent(VkDevice _device, VkEvent _event)
1967 {
1968 TU_FROM_HANDLE(tu_event, event, _event);
1969 *(uint64_t*) event->bo.map = 0;
1970
1971 return VK_SUCCESS;
1972 }
1973
1974 VkResult
1975 tu_CreateBuffer(VkDevice _device,
1976 const VkBufferCreateInfo *pCreateInfo,
1977 const VkAllocationCallbacks *pAllocator,
1978 VkBuffer *pBuffer)
1979 {
1980 TU_FROM_HANDLE(tu_device, device, _device);
1981 struct tu_buffer *buffer;
1982
1983 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1984
1985 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1986 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1987 if (buffer == NULL)
1988 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1989
1990 buffer->size = pCreateInfo->size;
1991 buffer->usage = pCreateInfo->usage;
1992 buffer->flags = pCreateInfo->flags;
1993
1994 *pBuffer = tu_buffer_to_handle(buffer);
1995
1996 return VK_SUCCESS;
1997 }
1998
1999 void
2000 tu_DestroyBuffer(VkDevice _device,
2001 VkBuffer _buffer,
2002 const VkAllocationCallbacks *pAllocator)
2003 {
2004 TU_FROM_HANDLE(tu_device, device, _device);
2005 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2006
2007 if (!buffer)
2008 return;
2009
2010 vk_free2(&device->alloc, pAllocator, buffer);
2011 }
2012
2013 static uint32_t
2014 tu_surface_max_layer_count(struct tu_image_view *iview)
2015 {
2016 return iview->type == VK_IMAGE_VIEW_TYPE_3D
2017 ? iview->extent.depth
2018 : (iview->base_layer + iview->layer_count);
2019 }
2020
2021 VkResult
2022 tu_CreateFramebuffer(VkDevice _device,
2023 const VkFramebufferCreateInfo *pCreateInfo,
2024 const VkAllocationCallbacks *pAllocator,
2025 VkFramebuffer *pFramebuffer)
2026 {
2027 TU_FROM_HANDLE(tu_device, device, _device);
2028 struct tu_framebuffer *framebuffer;
2029
2030 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2031
2032 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2033 pCreateInfo->attachmentCount;
2034 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
2035 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2036 if (framebuffer == NULL)
2037 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2038
2039 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2040 framebuffer->width = pCreateInfo->width;
2041 framebuffer->height = pCreateInfo->height;
2042 framebuffer->layers = pCreateInfo->layers;
2043 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2044 VkImageView _iview = pCreateInfo->pAttachments[i];
2045 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2046 framebuffer->attachments[i].attachment = iview;
2047
2048 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
2049 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
2050 framebuffer->layers =
2051 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
2052 }
2053
2054 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2055 return VK_SUCCESS;
2056 }
2057
2058 void
2059 tu_DestroyFramebuffer(VkDevice _device,
2060 VkFramebuffer _fb,
2061 const VkAllocationCallbacks *pAllocator)
2062 {
2063 TU_FROM_HANDLE(tu_device, device, _device);
2064 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2065
2066 if (!fb)
2067 return;
2068 vk_free2(&device->alloc, pAllocator, fb);
2069 }
2070
2071 static enum a6xx_tex_clamp
2072 tu6_tex_wrap(VkSamplerAddressMode address_mode)
2073 {
2074 switch (address_mode) {
2075 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
2076 return A6XX_TEX_REPEAT;
2077 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
2078 return A6XX_TEX_MIRROR_REPEAT;
2079 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
2080 return A6XX_TEX_CLAMP_TO_EDGE;
2081 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
2082 return A6XX_TEX_CLAMP_TO_BORDER;
2083 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
2084 /* only works for PoT.. need to emulate otherwise! */
2085 return A6XX_TEX_MIRROR_CLAMP;
2086 default:
2087 unreachable("illegal tex wrap mode");
2088 break;
2089 }
2090 }
2091
2092 static enum a6xx_tex_filter
2093 tu6_tex_filter(VkFilter filter, unsigned aniso)
2094 {
2095 switch (filter) {
2096 case VK_FILTER_NEAREST:
2097 return A6XX_TEX_NEAREST;
2098 case VK_FILTER_LINEAR:
2099 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
2100 case VK_FILTER_CUBIC_IMG:
2101 default:
2102 unreachable("illegal texture filter");
2103 break;
2104 }
2105 }
2106
2107 static inline enum adreno_compare_func
2108 tu6_compare_func(VkCompareOp op)
2109 {
2110 return (enum adreno_compare_func) op;
2111 }
2112
2113 static void
2114 tu_init_sampler(struct tu_device *device,
2115 struct tu_sampler *sampler,
2116 const VkSamplerCreateInfo *pCreateInfo)
2117 {
2118 unsigned aniso = pCreateInfo->anisotropyEnable ?
2119 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2120 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2121
2122 sampler->descriptor[0] =
2123 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2124 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2125 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2126 A6XX_TEX_SAMP_0_ANISO(aniso) |
2127 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2128 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2129 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2130 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2131 sampler->descriptor[1] =
2132 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2133 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2134 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
2135 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
2136 COND(pCreateInfo->compareEnable,
2137 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2138 /* This is an offset into the border_color BO, which we fill with all the
2139 * possible Vulkan border colors in the correct order, so we can just use
2140 * the Vulkan enum with no translation necessary.
2141 */
2142 sampler->descriptor[2] =
2143 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2144 sizeof(struct bcolor_entry));
2145 sampler->descriptor[3] = 0;
2146
2147 /* TODO:
2148 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2149 */
2150 }
2151
2152 VkResult
2153 tu_CreateSampler(VkDevice _device,
2154 const VkSamplerCreateInfo *pCreateInfo,
2155 const VkAllocationCallbacks *pAllocator,
2156 VkSampler *pSampler)
2157 {
2158 TU_FROM_HANDLE(tu_device, device, _device);
2159 struct tu_sampler *sampler;
2160
2161 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2162
2163 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2164 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2165 if (!sampler)
2166 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2167
2168 tu_init_sampler(device, sampler, pCreateInfo);
2169 *pSampler = tu_sampler_to_handle(sampler);
2170
2171 return VK_SUCCESS;
2172 }
2173
2174 void
2175 tu_DestroySampler(VkDevice _device,
2176 VkSampler _sampler,
2177 const VkAllocationCallbacks *pAllocator)
2178 {
2179 TU_FROM_HANDLE(tu_device, device, _device);
2180 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2181
2182 if (!sampler)
2183 return;
2184 vk_free2(&device->alloc, pAllocator, sampler);
2185 }
2186
2187 /* vk_icd.h does not declare this function, so we declare it here to
2188 * suppress Wmissing-prototypes.
2189 */
2190 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2191 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2192
2193 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2194 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2195 {
2196 /* For the full details on loader interface versioning, see
2197 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2198 * What follows is a condensed summary, to help you navigate the large and
2199 * confusing official doc.
2200 *
2201 * - Loader interface v0 is incompatible with later versions. We don't
2202 * support it.
2203 *
2204 * - In loader interface v1:
2205 * - The first ICD entrypoint called by the loader is
2206 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2207 * entrypoint.
2208 * - The ICD must statically expose no other Vulkan symbol unless it
2209 * is linked with -Bsymbolic.
2210 * - Each dispatchable Vulkan handle created by the ICD must be
2211 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2212 * ICD must initialize VK_LOADER_DATA.loadMagic to
2213 * ICD_LOADER_MAGIC.
2214 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2215 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2216 * such loader-managed surfaces.
2217 *
2218 * - Loader interface v2 differs from v1 in:
2219 * - The first ICD entrypoint called by the loader is
2220 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2221 * statically expose this entrypoint.
2222 *
2223 * - Loader interface v3 differs from v2 in:
2224 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2225 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2226 * because the loader no longer does so.
2227 */
2228 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2229 return VK_SUCCESS;
2230 }
2231
2232 VkResult
2233 tu_GetMemoryFdKHR(VkDevice _device,
2234 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2235 int *pFd)
2236 {
2237 TU_FROM_HANDLE(tu_device, device, _device);
2238 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2239
2240 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2241
2242 /* At the moment, we support only the below handle types. */
2243 assert(pGetFdInfo->handleType ==
2244 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2245 pGetFdInfo->handleType ==
2246 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2247
2248 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2249 if (prime_fd < 0)
2250 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2251
2252 *pFd = prime_fd;
2253 return VK_SUCCESS;
2254 }
2255
2256 VkResult
2257 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2258 VkExternalMemoryHandleTypeFlagBits handleType,
2259 int fd,
2260 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2261 {
2262 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2263 pMemoryFdProperties->memoryTypeBits = 1;
2264 return VK_SUCCESS;
2265 }
2266
2267 void
2268 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2269 VkPhysicalDevice physicalDevice,
2270 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2271 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2272 {
2273 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2274 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2275 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2276 }
2277
2278 void
2279 tu_GetPhysicalDeviceExternalFenceProperties(
2280 VkPhysicalDevice physicalDevice,
2281 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2282 VkExternalFenceProperties *pExternalFenceProperties)
2283 {
2284 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2285 pExternalFenceProperties->compatibleHandleTypes = 0;
2286 pExternalFenceProperties->externalFenceFeatures = 0;
2287 }
2288
2289 VkResult
2290 tu_CreateDebugReportCallbackEXT(
2291 VkInstance _instance,
2292 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2293 const VkAllocationCallbacks *pAllocator,
2294 VkDebugReportCallbackEXT *pCallback)
2295 {
2296 TU_FROM_HANDLE(tu_instance, instance, _instance);
2297 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2298 pCreateInfo, pAllocator,
2299 &instance->alloc, pCallback);
2300 }
2301
2302 void
2303 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2304 VkDebugReportCallbackEXT _callback,
2305 const VkAllocationCallbacks *pAllocator)
2306 {
2307 TU_FROM_HANDLE(tu_instance, instance, _instance);
2308 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2309 _callback, pAllocator, &instance->alloc);
2310 }
2311
2312 void
2313 tu_DebugReportMessageEXT(VkInstance _instance,
2314 VkDebugReportFlagsEXT flags,
2315 VkDebugReportObjectTypeEXT objectType,
2316 uint64_t object,
2317 size_t location,
2318 int32_t messageCode,
2319 const char *pLayerPrefix,
2320 const char *pMessage)
2321 {
2322 TU_FROM_HANDLE(tu_instance, instance, _instance);
2323 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2324 object, location, messageCode, pLayerPrefix, pMessage);
2325 }
2326
2327 void
2328 tu_GetDeviceGroupPeerMemoryFeatures(
2329 VkDevice device,
2330 uint32_t heapIndex,
2331 uint32_t localDeviceIndex,
2332 uint32_t remoteDeviceIndex,
2333 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2334 {
2335 assert(localDeviceIndex == remoteDeviceIndex);
2336
2337 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2338 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2339 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2340 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2341 }