turnip: Add magic register values to tu_physical_device
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/mman.h>
35 #include <sys/sysinfo.h>
36 #include <unistd.h>
37 #include <xf86drm.h>
38
39 #include "compiler/glsl_types.h"
40 #include "util/debug.h"
41 #include "util/disk_cache.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 static int
48 tu_device_get_cache_uuid(uint16_t family, void *uuid)
49 {
50 uint32_t mesa_timestamp;
51 uint16_t f = family;
52 memset(uuid, 0, VK_UUID_SIZE);
53 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54 &mesa_timestamp))
55 return -1;
56
57 memcpy(uuid, &mesa_timestamp, 4);
58 memcpy((char *) uuid + 4, &f, 2);
59 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60 return 0;
61 }
62
63 static void
64 tu_get_driver_uuid(void *uuid)
65 {
66 memset(uuid, 0, VK_UUID_SIZE);
67 snprintf(uuid, VK_UUID_SIZE, "freedreno");
68 }
69
70 static void
71 tu_get_device_uuid(void *uuid)
72 {
73 memset(uuid, 0, VK_UUID_SIZE);
74 }
75
76 static VkResult
77 tu_bo_init(struct tu_device *dev,
78 struct tu_bo *bo,
79 uint32_t gem_handle,
80 uint64_t size)
81 {
82 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83 if (!iova)
84 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86 *bo = (struct tu_bo) {
87 .gem_handle = gem_handle,
88 .size = size,
89 .iova = iova,
90 };
91
92 return VK_SUCCESS;
93 }
94
95 VkResult
96 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97 {
98 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100 */
101 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102 if (!gem_handle)
103 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106 if (result != VK_SUCCESS) {
107 tu_gem_close(dev, gem_handle);
108 return vk_error(dev->instance, result);
109 }
110
111 return VK_SUCCESS;
112 }
113
114 VkResult
115 tu_bo_init_dmabuf(struct tu_device *dev,
116 struct tu_bo *bo,
117 uint64_t size,
118 int fd)
119 {
120 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121 if (!gem_handle)
122 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125 if (result != VK_SUCCESS) {
126 tu_gem_close(dev, gem_handle);
127 return vk_error(dev->instance, result);
128 }
129
130 return VK_SUCCESS;
131 }
132
133 int
134 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135 {
136 return tu_gem_export_dmabuf(dev, bo->gem_handle);
137 }
138
139 VkResult
140 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141 {
142 if (bo->map)
143 return VK_SUCCESS;
144
145 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146 if (!offset)
147 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151 dev->physical_device->local_fd, offset);
152 if (map == MAP_FAILED)
153 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155 bo->map = map;
156 return VK_SUCCESS;
157 }
158
159 void
160 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161 {
162 assert(bo->gem_handle);
163
164 if (bo->map)
165 munmap(bo->map, bo->size);
166
167 tu_gem_close(dev, bo->gem_handle);
168 }
169
170 static VkResult
171 tu_physical_device_init(struct tu_physical_device *device,
172 struct tu_instance *instance,
173 drmDevicePtr drm_device)
174 {
175 const char *path = drm_device->nodes[DRM_NODE_RENDER];
176 VkResult result = VK_SUCCESS;
177 drmVersionPtr version;
178 int fd;
179 int master_fd = -1;
180
181 fd = open(path, O_RDWR | O_CLOEXEC);
182 if (fd < 0) {
183 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184 "failed to open device %s", path);
185 }
186
187 /* Version 1.3 added MSM_INFO_IOVA. */
188 const int min_version_major = 1;
189 const int min_version_minor = 3;
190
191 version = drmGetVersion(fd);
192 if (!version) {
193 close(fd);
194 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195 "failed to query kernel driver version for device %s",
196 path);
197 }
198
199 if (strcmp(version->name, "msm")) {
200 drmFreeVersion(version);
201 close(fd);
202 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203 "device %s does not use the msm kernel driver", path);
204 }
205
206 if (version->version_major != min_version_major ||
207 version->version_minor < min_version_minor) {
208 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209 "kernel driver for device %s has version %d.%d, "
210 "but Vulkan requires version >= %d.%d",
211 path, version->version_major, version->version_minor,
212 min_version_major, min_version_minor);
213 drmFreeVersion(version);
214 close(fd);
215 return result;
216 }
217
218 drmFreeVersion(version);
219
220 if (instance->debug_flags & TU_DEBUG_STARTUP)
221 tu_logi("Found compatible device '%s'.", path);
222
223 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224 device->instance = instance;
225 assert(strlen(path) < ARRAY_SIZE(device->path));
226 strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228 if (instance->enabled_extensions.KHR_display) {
229 master_fd =
230 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231 if (master_fd >= 0) {
232 /* TODO: free master_fd is accel is not working? */
233 }
234 }
235
236 device->master_fd = master_fd;
237 device->local_fd = fd;
238
239 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240 if (instance->debug_flags & TU_DEBUG_STARTUP)
241 tu_logi("Could not query the GPU ID");
242 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243 "could not get GPU ID");
244 goto fail;
245 }
246
247 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248 if (instance->debug_flags & TU_DEBUG_STARTUP)
249 tu_logi("Could not query the GMEM size");
250 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251 "could not get GMEM size");
252 goto fail;
253 }
254
255 memset(device->name, 0, sizeof(device->name));
256 sprintf(device->name, "FD%d", device->gpu_id);
257
258 switch (device->gpu_id) {
259 case 630:
260 case 640:
261 device->tile_align_w = 64;
262 device->tile_align_h = 16;
263 device->magic.RB_UNKNOWN_8E04_blit = 0x01000000;
264 device->magic.RB_CCU_CNTL_gmem = 0x7c400004;
265 device->magic.PC_UNKNOWN_9805 = 0x1;
266 device->magic.SP_UNKNOWN_A0F8 = 0x1;
267 break;
268 default:
269 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
270 "device %s is unsupported", device->name);
271 goto fail;
272 }
273 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
274 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
275 "cannot generate UUID");
276 goto fail;
277 }
278
279 /* The gpu id is already embedded in the uuid so we just pass "tu"
280 * when creating the cache.
281 */
282 char buf[VK_UUID_SIZE * 2 + 1];
283 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
284 device->disk_cache = disk_cache_create(device->name, buf, 0);
285
286 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
287 "testing use only.\n");
288
289 tu_get_driver_uuid(&device->device_uuid);
290 tu_get_device_uuid(&device->device_uuid);
291
292 tu_fill_device_extension_table(device, &device->supported_extensions);
293
294 if (result != VK_SUCCESS) {
295 vk_error(instance, result);
296 goto fail;
297 }
298
299 result = tu_wsi_init(device);
300 if (result != VK_SUCCESS) {
301 vk_error(instance, result);
302 goto fail;
303 }
304
305 return VK_SUCCESS;
306
307 fail:
308 close(fd);
309 if (master_fd != -1)
310 close(master_fd);
311 return result;
312 }
313
314 static void
315 tu_physical_device_finish(struct tu_physical_device *device)
316 {
317 tu_wsi_finish(device);
318
319 disk_cache_destroy(device->disk_cache);
320 close(device->local_fd);
321 if (device->master_fd != -1)
322 close(device->master_fd);
323 }
324
325 static void *
326 default_alloc_func(void *pUserData,
327 size_t size,
328 size_t align,
329 VkSystemAllocationScope allocationScope)
330 {
331 return malloc(size);
332 }
333
334 static void *
335 default_realloc_func(void *pUserData,
336 void *pOriginal,
337 size_t size,
338 size_t align,
339 VkSystemAllocationScope allocationScope)
340 {
341 return realloc(pOriginal, size);
342 }
343
344 static void
345 default_free_func(void *pUserData, void *pMemory)
346 {
347 free(pMemory);
348 }
349
350 static const VkAllocationCallbacks default_alloc = {
351 .pUserData = NULL,
352 .pfnAllocation = default_alloc_func,
353 .pfnReallocation = default_realloc_func,
354 .pfnFree = default_free_func,
355 };
356
357 static const struct debug_control tu_debug_options[] = {
358 { "startup", TU_DEBUG_STARTUP },
359 { "nir", TU_DEBUG_NIR },
360 { "ir3", TU_DEBUG_IR3 },
361 { "nobin", TU_DEBUG_NOBIN },
362 { NULL, 0 }
363 };
364
365 const char *
366 tu_get_debug_option_name(int id)
367 {
368 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
369 return tu_debug_options[id].string;
370 }
371
372 static int
373 tu_get_instance_extension_index(const char *name)
374 {
375 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
376 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
377 return i;
378 }
379 return -1;
380 }
381
382 VkResult
383 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
384 const VkAllocationCallbacks *pAllocator,
385 VkInstance *pInstance)
386 {
387 struct tu_instance *instance;
388 VkResult result;
389
390 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
391
392 uint32_t client_version;
393 if (pCreateInfo->pApplicationInfo &&
394 pCreateInfo->pApplicationInfo->apiVersion != 0) {
395 client_version = pCreateInfo->pApplicationInfo->apiVersion;
396 } else {
397 tu_EnumerateInstanceVersion(&client_version);
398 }
399
400 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
401 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
402 if (!instance)
403 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
404
405 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
406
407 if (pAllocator)
408 instance->alloc = *pAllocator;
409 else
410 instance->alloc = default_alloc;
411
412 instance->api_version = client_version;
413 instance->physical_device_count = -1;
414
415 instance->debug_flags =
416 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
417
418 if (instance->debug_flags & TU_DEBUG_STARTUP)
419 tu_logi("Created an instance");
420
421 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
422 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
423 int index = tu_get_instance_extension_index(ext_name);
424
425 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
426 vk_free2(&default_alloc, pAllocator, instance);
427 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
428 }
429
430 instance->enabled_extensions.extensions[index] = true;
431 }
432
433 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
434 if (result != VK_SUCCESS) {
435 vk_free2(&default_alloc, pAllocator, instance);
436 return vk_error(instance, result);
437 }
438
439 glsl_type_singleton_init_or_ref();
440
441 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
442
443 *pInstance = tu_instance_to_handle(instance);
444
445 return VK_SUCCESS;
446 }
447
448 void
449 tu_DestroyInstance(VkInstance _instance,
450 const VkAllocationCallbacks *pAllocator)
451 {
452 TU_FROM_HANDLE(tu_instance, instance, _instance);
453
454 if (!instance)
455 return;
456
457 for (int i = 0; i < instance->physical_device_count; ++i) {
458 tu_physical_device_finish(instance->physical_devices + i);
459 }
460
461 VG(VALGRIND_DESTROY_MEMPOOL(instance));
462
463 glsl_type_singleton_decref();
464
465 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
466
467 vk_free(&instance->alloc, instance);
468 }
469
470 static VkResult
471 tu_enumerate_devices(struct tu_instance *instance)
472 {
473 /* TODO: Check for more devices ? */
474 drmDevicePtr devices[8];
475 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
476 int max_devices;
477
478 instance->physical_device_count = 0;
479
480 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
481
482 if (instance->debug_flags & TU_DEBUG_STARTUP)
483 tu_logi("Found %d drm nodes", max_devices);
484
485 if (max_devices < 1)
486 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
487
488 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
489 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
490 devices[i]->bustype == DRM_BUS_PLATFORM) {
491
492 result = tu_physical_device_init(
493 instance->physical_devices + instance->physical_device_count,
494 instance, devices[i]);
495 if (result == VK_SUCCESS)
496 ++instance->physical_device_count;
497 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
498 break;
499 }
500 }
501 drmFreeDevices(devices, max_devices);
502
503 return result;
504 }
505
506 VkResult
507 tu_EnumeratePhysicalDevices(VkInstance _instance,
508 uint32_t *pPhysicalDeviceCount,
509 VkPhysicalDevice *pPhysicalDevices)
510 {
511 TU_FROM_HANDLE(tu_instance, instance, _instance);
512 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
513
514 VkResult result;
515
516 if (instance->physical_device_count < 0) {
517 result = tu_enumerate_devices(instance);
518 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
519 return result;
520 }
521
522 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
523 vk_outarray_append(&out, p)
524 {
525 *p = tu_physical_device_to_handle(instance->physical_devices + i);
526 }
527 }
528
529 return vk_outarray_status(&out);
530 }
531
532 VkResult
533 tu_EnumeratePhysicalDeviceGroups(
534 VkInstance _instance,
535 uint32_t *pPhysicalDeviceGroupCount,
536 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
537 {
538 TU_FROM_HANDLE(tu_instance, instance, _instance);
539 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
540 pPhysicalDeviceGroupCount);
541 VkResult result;
542
543 if (instance->physical_device_count < 0) {
544 result = tu_enumerate_devices(instance);
545 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
546 return result;
547 }
548
549 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
550 vk_outarray_append(&out, p)
551 {
552 p->physicalDeviceCount = 1;
553 p->physicalDevices[0] =
554 tu_physical_device_to_handle(instance->physical_devices + i);
555 p->subsetAllocation = false;
556 }
557 }
558
559 return vk_outarray_status(&out);
560 }
561
562 void
563 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
564 VkPhysicalDeviceFeatures *pFeatures)
565 {
566 memset(pFeatures, 0, sizeof(*pFeatures));
567
568 *pFeatures = (VkPhysicalDeviceFeatures) {
569 .robustBufferAccess = false,
570 .fullDrawIndexUint32 = false,
571 .imageCubeArray = false,
572 .independentBlend = false,
573 .geometryShader = false,
574 .tessellationShader = false,
575 .sampleRateShading = false,
576 .dualSrcBlend = false,
577 .logicOp = false,
578 .multiDrawIndirect = false,
579 .drawIndirectFirstInstance = false,
580 .depthClamp = false,
581 .depthBiasClamp = false,
582 .fillModeNonSolid = false,
583 .depthBounds = false,
584 .wideLines = false,
585 .largePoints = false,
586 .alphaToOne = false,
587 .multiViewport = false,
588 .samplerAnisotropy = true,
589 .textureCompressionETC2 = true,
590 .textureCompressionASTC_LDR = true,
591 .textureCompressionBC = true,
592 .occlusionQueryPrecise = true,
593 .pipelineStatisticsQuery = false,
594 .vertexPipelineStoresAndAtomics = false,
595 .fragmentStoresAndAtomics = false,
596 .shaderTessellationAndGeometryPointSize = false,
597 .shaderImageGatherExtended = false,
598 .shaderStorageImageExtendedFormats = false,
599 .shaderStorageImageMultisample = false,
600 .shaderUniformBufferArrayDynamicIndexing = false,
601 .shaderSampledImageArrayDynamicIndexing = false,
602 .shaderStorageBufferArrayDynamicIndexing = false,
603 .shaderStorageImageArrayDynamicIndexing = false,
604 .shaderStorageImageReadWithoutFormat = false,
605 .shaderStorageImageWriteWithoutFormat = false,
606 .shaderClipDistance = false,
607 .shaderCullDistance = false,
608 .shaderFloat64 = false,
609 .shaderInt64 = false,
610 .shaderInt16 = false,
611 .sparseBinding = false,
612 .variableMultisampleRate = false,
613 .inheritedQueries = false,
614 };
615 }
616
617 void
618 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
619 VkPhysicalDeviceFeatures2 *pFeatures)
620 {
621 vk_foreach_struct(ext, pFeatures->pNext)
622 {
623 switch (ext->sType) {
624 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
625 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
626 features->variablePointersStorageBuffer = false;
627 features->variablePointers = false;
628 break;
629 }
630 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
631 VkPhysicalDeviceMultiviewFeatures *features =
632 (VkPhysicalDeviceMultiviewFeatures *) ext;
633 features->multiview = false;
634 features->multiviewGeometryShader = false;
635 features->multiviewTessellationShader = false;
636 break;
637 }
638 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
639 VkPhysicalDeviceShaderDrawParametersFeatures *features =
640 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
641 features->shaderDrawParameters = false;
642 break;
643 }
644 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
645 VkPhysicalDeviceProtectedMemoryFeatures *features =
646 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
647 features->protectedMemory = false;
648 break;
649 }
650 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
651 VkPhysicalDevice16BitStorageFeatures *features =
652 (VkPhysicalDevice16BitStorageFeatures *) ext;
653 features->storageBuffer16BitAccess = false;
654 features->uniformAndStorageBuffer16BitAccess = false;
655 features->storagePushConstant16 = false;
656 features->storageInputOutput16 = false;
657 break;
658 }
659 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
660 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
661 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
662 features->samplerYcbcrConversion = false;
663 break;
664 }
665 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
666 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
667 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
668 features->shaderInputAttachmentArrayDynamicIndexing = false;
669 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
670 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
671 features->shaderUniformBufferArrayNonUniformIndexing = false;
672 features->shaderSampledImageArrayNonUniformIndexing = false;
673 features->shaderStorageBufferArrayNonUniformIndexing = false;
674 features->shaderStorageImageArrayNonUniformIndexing = false;
675 features->shaderInputAttachmentArrayNonUniformIndexing = false;
676 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
677 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
678 features->descriptorBindingUniformBufferUpdateAfterBind = false;
679 features->descriptorBindingSampledImageUpdateAfterBind = false;
680 features->descriptorBindingStorageImageUpdateAfterBind = false;
681 features->descriptorBindingStorageBufferUpdateAfterBind = false;
682 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
683 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
684 features->descriptorBindingUpdateUnusedWhilePending = false;
685 features->descriptorBindingPartiallyBound = false;
686 features->descriptorBindingVariableDescriptorCount = false;
687 features->runtimeDescriptorArray = false;
688 break;
689 }
690 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
691 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
692 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
693 features->conditionalRendering = false;
694 features->inheritedConditionalRendering = false;
695 break;
696 }
697 default:
698 break;
699 }
700 }
701 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
702 }
703
704 void
705 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
706 VkPhysicalDeviceProperties *pProperties)
707 {
708 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
709 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT |
710 VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
711
712 /* make sure that the entire descriptor set is addressable with a signed
713 * 32-bit int. So the sum of all limits scaled by descriptor size has to
714 * be at most 2 GiB. the combined image & samples object count as one of
715 * both. This limit is for the pipeline layout, not for the set layout, but
716 * there is no set limit, so we just set a pipeline limit. I don't think
717 * any app is going to hit this soon. */
718 size_t max_descriptor_set_size =
719 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
720 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
721 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
722 32 /* sampler, largest when combined with image */ +
723 64 /* sampled image */ + 64 /* storage image */);
724
725 VkPhysicalDeviceLimits limits = {
726 .maxImageDimension1D = (1 << 14),
727 .maxImageDimension2D = (1 << 14),
728 .maxImageDimension3D = (1 << 11),
729 .maxImageDimensionCube = (1 << 14),
730 .maxImageArrayLayers = (1 << 11),
731 .maxTexelBufferElements = 128 * 1024 * 1024,
732 .maxUniformBufferRange = UINT32_MAX,
733 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
734 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
735 .maxMemoryAllocationCount = UINT32_MAX,
736 .maxSamplerAllocationCount = 64 * 1024,
737 .bufferImageGranularity = 64, /* A cache line */
738 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
739 .maxBoundDescriptorSets = MAX_SETS,
740 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
741 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
742 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
743 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
744 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
745 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
746 .maxPerStageResources = max_descriptor_set_size,
747 .maxDescriptorSetSamplers = max_descriptor_set_size,
748 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
749 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
750 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
751 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
752 .maxDescriptorSetSampledImages = max_descriptor_set_size,
753 .maxDescriptorSetStorageImages = max_descriptor_set_size,
754 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
755 .maxVertexInputAttributes = 32,
756 .maxVertexInputBindings = 32,
757 .maxVertexInputAttributeOffset = 2047,
758 .maxVertexInputBindingStride = 2048,
759 .maxVertexOutputComponents = 128,
760 .maxTessellationGenerationLevel = 64,
761 .maxTessellationPatchSize = 32,
762 .maxTessellationControlPerVertexInputComponents = 128,
763 .maxTessellationControlPerVertexOutputComponents = 128,
764 .maxTessellationControlPerPatchOutputComponents = 120,
765 .maxTessellationControlTotalOutputComponents = 4096,
766 .maxTessellationEvaluationInputComponents = 128,
767 .maxTessellationEvaluationOutputComponents = 128,
768 .maxGeometryShaderInvocations = 127,
769 .maxGeometryInputComponents = 64,
770 .maxGeometryOutputComponents = 128,
771 .maxGeometryOutputVertices = 256,
772 .maxGeometryTotalOutputComponents = 1024,
773 .maxFragmentInputComponents = 128,
774 .maxFragmentOutputAttachments = 8,
775 .maxFragmentDualSrcAttachments = 1,
776 .maxFragmentCombinedOutputResources = 8,
777 .maxComputeSharedMemorySize = 32768,
778 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
779 .maxComputeWorkGroupInvocations = 2048,
780 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
781 .subPixelPrecisionBits = 4 /* FIXME */,
782 .subTexelPrecisionBits = 4 /* FIXME */,
783 .mipmapPrecisionBits = 4 /* FIXME */,
784 .maxDrawIndexedIndexValue = UINT32_MAX,
785 .maxDrawIndirectCount = UINT32_MAX,
786 .maxSamplerLodBias = 16,
787 .maxSamplerAnisotropy = 16,
788 .maxViewports = MAX_VIEWPORTS,
789 .maxViewportDimensions = { (1 << 14), (1 << 14) },
790 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
791 .viewportSubPixelBits = 8,
792 .minMemoryMapAlignment = 4096, /* A page */
793 .minTexelBufferOffsetAlignment = 64,
794 .minUniformBufferOffsetAlignment = 4,
795 .minStorageBufferOffsetAlignment = 4,
796 .minTexelOffset = -32,
797 .maxTexelOffset = 31,
798 .minTexelGatherOffset = -32,
799 .maxTexelGatherOffset = 31,
800 .minInterpolationOffset = -2,
801 .maxInterpolationOffset = 2,
802 .subPixelInterpolationOffsetBits = 8,
803 .maxFramebufferWidth = (1 << 14),
804 .maxFramebufferHeight = (1 << 14),
805 .maxFramebufferLayers = (1 << 10),
806 .framebufferColorSampleCounts = sample_counts,
807 .framebufferDepthSampleCounts = sample_counts,
808 .framebufferStencilSampleCounts = sample_counts,
809 .framebufferNoAttachmentsSampleCounts = sample_counts,
810 .maxColorAttachments = MAX_RTS,
811 .sampledImageColorSampleCounts = sample_counts,
812 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
813 .sampledImageDepthSampleCounts = sample_counts,
814 .sampledImageStencilSampleCounts = sample_counts,
815 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
816 .maxSampleMaskWords = 1,
817 .timestampComputeAndGraphics = false, /* FINISHME */
818 .timestampPeriod = 1,
819 .maxClipDistances = 8,
820 .maxCullDistances = 8,
821 .maxCombinedClipAndCullDistances = 8,
822 .discreteQueuePriorities = 1,
823 .pointSizeRange = { 0.125, 255.875 },
824 .lineWidthRange = { 0.0, 7.9921875 },
825 .pointSizeGranularity = (1.0 / 8.0),
826 .lineWidthGranularity = (1.0 / 128.0),
827 .strictLines = false, /* FINISHME */
828 .standardSampleLocations = true,
829 .optimalBufferCopyOffsetAlignment = 128,
830 .optimalBufferCopyRowPitchAlignment = 128,
831 .nonCoherentAtomSize = 64,
832 };
833
834 *pProperties = (VkPhysicalDeviceProperties) {
835 .apiVersion = tu_physical_device_api_version(pdevice),
836 .driverVersion = vk_get_driver_version(),
837 .vendorID = 0, /* TODO */
838 .deviceID = 0,
839 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
840 .limits = limits,
841 .sparseProperties = { 0 },
842 };
843
844 strcpy(pProperties->deviceName, pdevice->name);
845 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
846 }
847
848 void
849 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
850 VkPhysicalDeviceProperties2 *pProperties)
851 {
852 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
853 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
854
855 vk_foreach_struct(ext, pProperties->pNext)
856 {
857 switch (ext->sType) {
858 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
859 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
860 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
861 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
862 break;
863 }
864 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
865 VkPhysicalDeviceIDProperties *properties =
866 (VkPhysicalDeviceIDProperties *) ext;
867 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
868 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
869 properties->deviceLUIDValid = false;
870 break;
871 }
872 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
873 VkPhysicalDeviceMultiviewProperties *properties =
874 (VkPhysicalDeviceMultiviewProperties *) ext;
875 properties->maxMultiviewViewCount = MAX_VIEWS;
876 properties->maxMultiviewInstanceIndex = INT_MAX;
877 break;
878 }
879 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
880 VkPhysicalDevicePointClippingProperties *properties =
881 (VkPhysicalDevicePointClippingProperties *) ext;
882 properties->pointClippingBehavior =
883 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
884 break;
885 }
886 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
887 VkPhysicalDeviceMaintenance3Properties *properties =
888 (VkPhysicalDeviceMaintenance3Properties *) ext;
889 /* Make sure everything is addressable by a signed 32-bit int, and
890 * our largest descriptors are 96 bytes. */
891 properties->maxPerSetDescriptors = (1ull << 31) / 96;
892 /* Our buffer size fields allow only this much */
893 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
894 break;
895 }
896 default:
897 break;
898 }
899 }
900 }
901
902 static const VkQueueFamilyProperties tu_queue_family_properties = {
903 .queueFlags =
904 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
905 .queueCount = 1,
906 .timestampValidBits = 0, /* FINISHME */
907 .minImageTransferGranularity = { 1, 1, 1 },
908 };
909
910 void
911 tu_GetPhysicalDeviceQueueFamilyProperties(
912 VkPhysicalDevice physicalDevice,
913 uint32_t *pQueueFamilyPropertyCount,
914 VkQueueFamilyProperties *pQueueFamilyProperties)
915 {
916 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
917
918 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
919 }
920
921 void
922 tu_GetPhysicalDeviceQueueFamilyProperties2(
923 VkPhysicalDevice physicalDevice,
924 uint32_t *pQueueFamilyPropertyCount,
925 VkQueueFamilyProperties2 *pQueueFamilyProperties)
926 {
927 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
928
929 vk_outarray_append(&out, p)
930 {
931 p->queueFamilyProperties = tu_queue_family_properties;
932 }
933 }
934
935 static uint64_t
936 tu_get_system_heap_size()
937 {
938 struct sysinfo info;
939 sysinfo(&info);
940
941 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
942
943 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
944 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
945 */
946 uint64_t available_ram;
947 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
948 available_ram = total_ram / 2;
949 else
950 available_ram = total_ram * 3 / 4;
951
952 return available_ram;
953 }
954
955 void
956 tu_GetPhysicalDeviceMemoryProperties(
957 VkPhysicalDevice physicalDevice,
958 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
959 {
960 pMemoryProperties->memoryHeapCount = 1;
961 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
962 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
963
964 pMemoryProperties->memoryTypeCount = 1;
965 pMemoryProperties->memoryTypes[0].propertyFlags =
966 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
967 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
968 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
969 pMemoryProperties->memoryTypes[0].heapIndex = 0;
970 }
971
972 void
973 tu_GetPhysicalDeviceMemoryProperties2(
974 VkPhysicalDevice physicalDevice,
975 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
976 {
977 return tu_GetPhysicalDeviceMemoryProperties(
978 physicalDevice, &pMemoryProperties->memoryProperties);
979 }
980
981 static VkResult
982 tu_queue_init(struct tu_device *device,
983 struct tu_queue *queue,
984 uint32_t queue_family_index,
985 int idx,
986 VkDeviceQueueCreateFlags flags)
987 {
988 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
989 queue->device = device;
990 queue->queue_family_index = queue_family_index;
991 queue->queue_idx = idx;
992 queue->flags = flags;
993
994 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
995 if (ret)
996 return VK_ERROR_INITIALIZATION_FAILED;
997
998 tu_fence_init(&queue->submit_fence, false);
999
1000 return VK_SUCCESS;
1001 }
1002
1003 static void
1004 tu_queue_finish(struct tu_queue *queue)
1005 {
1006 tu_fence_finish(&queue->submit_fence);
1007 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1008 }
1009
1010 static int
1011 tu_get_device_extension_index(const char *name)
1012 {
1013 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1014 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1015 return i;
1016 }
1017 return -1;
1018 }
1019
1020 VkResult
1021 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1022 const VkDeviceCreateInfo *pCreateInfo,
1023 const VkAllocationCallbacks *pAllocator,
1024 VkDevice *pDevice)
1025 {
1026 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1027 VkResult result;
1028 struct tu_device *device;
1029
1030 /* Check enabled features */
1031 if (pCreateInfo->pEnabledFeatures) {
1032 VkPhysicalDeviceFeatures supported_features;
1033 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1034 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1035 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1036 unsigned num_features =
1037 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1038 for (uint32_t i = 0; i < num_features; i++) {
1039 if (enabled_feature[i] && !supported_feature[i])
1040 return vk_error(physical_device->instance,
1041 VK_ERROR_FEATURE_NOT_PRESENT);
1042 }
1043 }
1044
1045 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1046 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1047 if (!device)
1048 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1049
1050 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1051 device->instance = physical_device->instance;
1052 device->physical_device = physical_device;
1053
1054 if (pAllocator)
1055 device->alloc = *pAllocator;
1056 else
1057 device->alloc = physical_device->instance->alloc;
1058
1059 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1060 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1061 int index = tu_get_device_extension_index(ext_name);
1062 if (index < 0 ||
1063 !physical_device->supported_extensions.extensions[index]) {
1064 vk_free(&device->alloc, device);
1065 return vk_error(physical_device->instance,
1066 VK_ERROR_EXTENSION_NOT_PRESENT);
1067 }
1068
1069 device->enabled_extensions.extensions[index] = true;
1070 }
1071
1072 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1073 const VkDeviceQueueCreateInfo *queue_create =
1074 &pCreateInfo->pQueueCreateInfos[i];
1075 uint32_t qfi = queue_create->queueFamilyIndex;
1076 device->queues[qfi] = vk_alloc(
1077 &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1078 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1079 if (!device->queues[qfi]) {
1080 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1081 goto fail_queues;
1082 }
1083
1084 memset(device->queues[qfi], 0,
1085 queue_create->queueCount * sizeof(struct tu_queue));
1086
1087 device->queue_count[qfi] = queue_create->queueCount;
1088
1089 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1090 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1091 queue_create->flags);
1092 if (result != VK_SUCCESS)
1093 goto fail_queues;
1094 }
1095 }
1096
1097 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1098 if (!device->compiler)
1099 goto fail_queues;
1100
1101 #define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
1102 #define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
1103
1104 device->vsc_data_pitch = 0x440 * 4;
1105 device->vsc_data2_pitch = 0x1040 * 4;
1106
1107 result = tu_bo_init_new(device, &device->vsc_data, VSC_DATA_SIZE(device->vsc_data_pitch));
1108 if (result != VK_SUCCESS)
1109 goto fail_vsc_data;
1110
1111 result = tu_bo_init_new(device, &device->vsc_data2, VSC_DATA2_SIZE(device->vsc_data2_pitch));
1112 if (result != VK_SUCCESS)
1113 goto fail_vsc_data2;
1114
1115 VkPipelineCacheCreateInfo ci;
1116 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1117 ci.pNext = NULL;
1118 ci.flags = 0;
1119 ci.pInitialData = NULL;
1120 ci.initialDataSize = 0;
1121 VkPipelineCache pc;
1122 result =
1123 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1124 if (result != VK_SUCCESS)
1125 goto fail_pipeline_cache;
1126
1127 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1128
1129 *pDevice = tu_device_to_handle(device);
1130 return VK_SUCCESS;
1131
1132 fail_pipeline_cache:
1133 tu_bo_finish(device, &device->vsc_data2);
1134
1135 fail_vsc_data2:
1136 tu_bo_finish(device, &device->vsc_data);
1137
1138 fail_vsc_data:
1139 ralloc_free(device->compiler);
1140
1141 fail_queues:
1142 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1143 for (unsigned q = 0; q < device->queue_count[i]; q++)
1144 tu_queue_finish(&device->queues[i][q]);
1145 if (device->queue_count[i])
1146 vk_free(&device->alloc, device->queues[i]);
1147 }
1148
1149 vk_free(&device->alloc, device);
1150 return result;
1151 }
1152
1153 void
1154 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1155 {
1156 TU_FROM_HANDLE(tu_device, device, _device);
1157
1158 if (!device)
1159 return;
1160
1161 tu_bo_finish(device, &device->vsc_data);
1162 tu_bo_finish(device, &device->vsc_data2);
1163
1164 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1165 for (unsigned q = 0; q < device->queue_count[i]; q++)
1166 tu_queue_finish(&device->queues[i][q]);
1167 if (device->queue_count[i])
1168 vk_free(&device->alloc, device->queues[i]);
1169 }
1170
1171 /* the compiler does not use pAllocator */
1172 ralloc_free(device->compiler);
1173
1174 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1175 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1176
1177 vk_free(&device->alloc, device);
1178 }
1179
1180 VkResult
1181 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1182 VkLayerProperties *pProperties)
1183 {
1184 *pPropertyCount = 0;
1185 return VK_SUCCESS;
1186 }
1187
1188 VkResult
1189 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1190 uint32_t *pPropertyCount,
1191 VkLayerProperties *pProperties)
1192 {
1193 *pPropertyCount = 0;
1194 return VK_SUCCESS;
1195 }
1196
1197 void
1198 tu_GetDeviceQueue2(VkDevice _device,
1199 const VkDeviceQueueInfo2 *pQueueInfo,
1200 VkQueue *pQueue)
1201 {
1202 TU_FROM_HANDLE(tu_device, device, _device);
1203 struct tu_queue *queue;
1204
1205 queue =
1206 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1207 if (pQueueInfo->flags != queue->flags) {
1208 /* From the Vulkan 1.1.70 spec:
1209 *
1210 * "The queue returned by vkGetDeviceQueue2 must have the same
1211 * flags value from this structure as that used at device
1212 * creation time in a VkDeviceQueueCreateInfo instance. If no
1213 * matching flags were specified at device creation time then
1214 * pQueue will return VK_NULL_HANDLE."
1215 */
1216 *pQueue = VK_NULL_HANDLE;
1217 return;
1218 }
1219
1220 *pQueue = tu_queue_to_handle(queue);
1221 }
1222
1223 void
1224 tu_GetDeviceQueue(VkDevice _device,
1225 uint32_t queueFamilyIndex,
1226 uint32_t queueIndex,
1227 VkQueue *pQueue)
1228 {
1229 const VkDeviceQueueInfo2 info =
1230 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1231 .queueFamilyIndex = queueFamilyIndex,
1232 .queueIndex = queueIndex };
1233
1234 tu_GetDeviceQueue2(_device, &info, pQueue);
1235 }
1236
1237 VkResult
1238 tu_QueueSubmit(VkQueue _queue,
1239 uint32_t submitCount,
1240 const VkSubmitInfo *pSubmits,
1241 VkFence _fence)
1242 {
1243 TU_FROM_HANDLE(tu_queue, queue, _queue);
1244
1245 for (uint32_t i = 0; i < submitCount; ++i) {
1246 const VkSubmitInfo *submit = pSubmits + i;
1247 const bool last_submit = (i == submitCount - 1);
1248 struct tu_bo_list bo_list;
1249 tu_bo_list_init(&bo_list);
1250
1251 uint32_t entry_count = 0;
1252 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1253 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1254 entry_count += cmdbuf->cs.entry_count;
1255 }
1256
1257 struct drm_msm_gem_submit_cmd cmds[entry_count];
1258 uint32_t entry_idx = 0;
1259 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1260 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1261 struct tu_cs *cs = &cmdbuf->cs;
1262 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1263 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1264 cmds[entry_idx].submit_idx =
1265 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1266 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1267 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1268 cmds[entry_idx].size = cs->entries[i].size;
1269 cmds[entry_idx].pad = 0;
1270 cmds[entry_idx].nr_relocs = 0;
1271 cmds[entry_idx].relocs = 0;
1272 }
1273
1274 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1275 }
1276
1277 uint32_t flags = MSM_PIPE_3D0;
1278 if (last_submit) {
1279 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1280 }
1281
1282 struct drm_msm_gem_submit req = {
1283 .flags = flags,
1284 .queueid = queue->msm_queue_id,
1285 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1286 .nr_bos = bo_list.count,
1287 .cmds = (uint64_t)(uintptr_t)cmds,
1288 .nr_cmds = entry_count,
1289 };
1290
1291 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1292 DRM_MSM_GEM_SUBMIT,
1293 &req, sizeof(req));
1294 if (ret) {
1295 fprintf(stderr, "submit failed: %s\n", strerror(errno));
1296 abort();
1297 }
1298
1299 tu_bo_list_destroy(&bo_list);
1300
1301 if (last_submit) {
1302 /* no need to merge fences as queue execution is serialized */
1303 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1304 }
1305 }
1306
1307 if (_fence != VK_NULL_HANDLE) {
1308 TU_FROM_HANDLE(tu_fence, fence, _fence);
1309 tu_fence_copy(fence, &queue->submit_fence);
1310 }
1311
1312 return VK_SUCCESS;
1313 }
1314
1315 VkResult
1316 tu_QueueWaitIdle(VkQueue _queue)
1317 {
1318 TU_FROM_HANDLE(tu_queue, queue, _queue);
1319
1320 tu_fence_wait_idle(&queue->submit_fence);
1321
1322 return VK_SUCCESS;
1323 }
1324
1325 VkResult
1326 tu_DeviceWaitIdle(VkDevice _device)
1327 {
1328 TU_FROM_HANDLE(tu_device, device, _device);
1329
1330 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1331 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1332 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1333 }
1334 }
1335 return VK_SUCCESS;
1336 }
1337
1338 VkResult
1339 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1340 uint32_t *pPropertyCount,
1341 VkExtensionProperties *pProperties)
1342 {
1343 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1344
1345 /* We spport no lyaers */
1346 if (pLayerName)
1347 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1348
1349 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1350 if (tu_supported_instance_extensions.extensions[i]) {
1351 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1352 }
1353 }
1354
1355 return vk_outarray_status(&out);
1356 }
1357
1358 VkResult
1359 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1360 const char *pLayerName,
1361 uint32_t *pPropertyCount,
1362 VkExtensionProperties *pProperties)
1363 {
1364 /* We spport no lyaers */
1365 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1366 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1367
1368 /* We spport no lyaers */
1369 if (pLayerName)
1370 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1371
1372 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1373 if (device->supported_extensions.extensions[i]) {
1374 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1375 }
1376 }
1377
1378 return vk_outarray_status(&out);
1379 }
1380
1381 PFN_vkVoidFunction
1382 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1383 {
1384 TU_FROM_HANDLE(tu_instance, instance, _instance);
1385
1386 return tu_lookup_entrypoint_checked(
1387 pName, instance ? instance->api_version : 0,
1388 instance ? &instance->enabled_extensions : NULL, NULL);
1389 }
1390
1391 /* The loader wants us to expose a second GetInstanceProcAddr function
1392 * to work around certain LD_PRELOAD issues seen in apps.
1393 */
1394 PUBLIC
1395 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1396 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1397
1398 PUBLIC
1399 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1400 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1401 {
1402 return tu_GetInstanceProcAddr(instance, pName);
1403 }
1404
1405 PFN_vkVoidFunction
1406 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1407 {
1408 TU_FROM_HANDLE(tu_device, device, _device);
1409
1410 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1411 &device->instance->enabled_extensions,
1412 &device->enabled_extensions);
1413 }
1414
1415 static VkResult
1416 tu_alloc_memory(struct tu_device *device,
1417 const VkMemoryAllocateInfo *pAllocateInfo,
1418 const VkAllocationCallbacks *pAllocator,
1419 VkDeviceMemory *pMem)
1420 {
1421 struct tu_device_memory *mem;
1422 VkResult result;
1423
1424 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1425
1426 if (pAllocateInfo->allocationSize == 0) {
1427 /* Apparently, this is allowed */
1428 *pMem = VK_NULL_HANDLE;
1429 return VK_SUCCESS;
1430 }
1431
1432 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1433 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1434 if (mem == NULL)
1435 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1436
1437 const VkImportMemoryFdInfoKHR *fd_info =
1438 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1439 if (fd_info && !fd_info->handleType)
1440 fd_info = NULL;
1441
1442 if (fd_info) {
1443 assert(fd_info->handleType ==
1444 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1445 fd_info->handleType ==
1446 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1447
1448 /*
1449 * TODO Importing the same fd twice gives us the same handle without
1450 * reference counting. We need to maintain a per-instance handle-to-bo
1451 * table and add reference count to tu_bo.
1452 */
1453 result = tu_bo_init_dmabuf(device, &mem->bo,
1454 pAllocateInfo->allocationSize, fd_info->fd);
1455 if (result == VK_SUCCESS) {
1456 /* take ownership and close the fd */
1457 close(fd_info->fd);
1458 }
1459 } else {
1460 result =
1461 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1462 }
1463
1464 if (result != VK_SUCCESS) {
1465 vk_free2(&device->alloc, pAllocator, mem);
1466 return result;
1467 }
1468
1469 mem->size = pAllocateInfo->allocationSize;
1470 mem->type_index = pAllocateInfo->memoryTypeIndex;
1471
1472 mem->map = NULL;
1473 mem->user_ptr = NULL;
1474
1475 *pMem = tu_device_memory_to_handle(mem);
1476
1477 return VK_SUCCESS;
1478 }
1479
1480 VkResult
1481 tu_AllocateMemory(VkDevice _device,
1482 const VkMemoryAllocateInfo *pAllocateInfo,
1483 const VkAllocationCallbacks *pAllocator,
1484 VkDeviceMemory *pMem)
1485 {
1486 TU_FROM_HANDLE(tu_device, device, _device);
1487 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1488 }
1489
1490 void
1491 tu_FreeMemory(VkDevice _device,
1492 VkDeviceMemory _mem,
1493 const VkAllocationCallbacks *pAllocator)
1494 {
1495 TU_FROM_HANDLE(tu_device, device, _device);
1496 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1497
1498 if (mem == NULL)
1499 return;
1500
1501 tu_bo_finish(device, &mem->bo);
1502 vk_free2(&device->alloc, pAllocator, mem);
1503 }
1504
1505 VkResult
1506 tu_MapMemory(VkDevice _device,
1507 VkDeviceMemory _memory,
1508 VkDeviceSize offset,
1509 VkDeviceSize size,
1510 VkMemoryMapFlags flags,
1511 void **ppData)
1512 {
1513 TU_FROM_HANDLE(tu_device, device, _device);
1514 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1515 VkResult result;
1516
1517 if (mem == NULL) {
1518 *ppData = NULL;
1519 return VK_SUCCESS;
1520 }
1521
1522 if (mem->user_ptr) {
1523 *ppData = mem->user_ptr;
1524 } else if (!mem->map) {
1525 result = tu_bo_map(device, &mem->bo);
1526 if (result != VK_SUCCESS)
1527 return result;
1528 *ppData = mem->map = mem->bo.map;
1529 } else
1530 *ppData = mem->map;
1531
1532 if (*ppData) {
1533 *ppData += offset;
1534 return VK_SUCCESS;
1535 }
1536
1537 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1538 }
1539
1540 void
1541 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1542 {
1543 /* I do not see any unmapping done by the freedreno Gallium driver. */
1544 }
1545
1546 VkResult
1547 tu_FlushMappedMemoryRanges(VkDevice _device,
1548 uint32_t memoryRangeCount,
1549 const VkMappedMemoryRange *pMemoryRanges)
1550 {
1551 return VK_SUCCESS;
1552 }
1553
1554 VkResult
1555 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1556 uint32_t memoryRangeCount,
1557 const VkMappedMemoryRange *pMemoryRanges)
1558 {
1559 return VK_SUCCESS;
1560 }
1561
1562 void
1563 tu_GetBufferMemoryRequirements(VkDevice _device,
1564 VkBuffer _buffer,
1565 VkMemoryRequirements *pMemoryRequirements)
1566 {
1567 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1568
1569 pMemoryRequirements->memoryTypeBits = 1;
1570 pMemoryRequirements->alignment = 16;
1571 pMemoryRequirements->size =
1572 align64(buffer->size, pMemoryRequirements->alignment);
1573 }
1574
1575 void
1576 tu_GetBufferMemoryRequirements2(
1577 VkDevice device,
1578 const VkBufferMemoryRequirementsInfo2 *pInfo,
1579 VkMemoryRequirements2 *pMemoryRequirements)
1580 {
1581 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1582 &pMemoryRequirements->memoryRequirements);
1583 }
1584
1585 void
1586 tu_GetImageMemoryRequirements(VkDevice _device,
1587 VkImage _image,
1588 VkMemoryRequirements *pMemoryRequirements)
1589 {
1590 TU_FROM_HANDLE(tu_image, image, _image);
1591
1592 pMemoryRequirements->memoryTypeBits = 1;
1593 pMemoryRequirements->size = image->layout.size;
1594 pMemoryRequirements->alignment = image->alignment;
1595 }
1596
1597 void
1598 tu_GetImageMemoryRequirements2(VkDevice device,
1599 const VkImageMemoryRequirementsInfo2 *pInfo,
1600 VkMemoryRequirements2 *pMemoryRequirements)
1601 {
1602 tu_GetImageMemoryRequirements(device, pInfo->image,
1603 &pMemoryRequirements->memoryRequirements);
1604 }
1605
1606 void
1607 tu_GetImageSparseMemoryRequirements(
1608 VkDevice device,
1609 VkImage image,
1610 uint32_t *pSparseMemoryRequirementCount,
1611 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1612 {
1613 tu_stub();
1614 }
1615
1616 void
1617 tu_GetImageSparseMemoryRequirements2(
1618 VkDevice device,
1619 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1620 uint32_t *pSparseMemoryRequirementCount,
1621 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1622 {
1623 tu_stub();
1624 }
1625
1626 void
1627 tu_GetDeviceMemoryCommitment(VkDevice device,
1628 VkDeviceMemory memory,
1629 VkDeviceSize *pCommittedMemoryInBytes)
1630 {
1631 *pCommittedMemoryInBytes = 0;
1632 }
1633
1634 VkResult
1635 tu_BindBufferMemory2(VkDevice device,
1636 uint32_t bindInfoCount,
1637 const VkBindBufferMemoryInfo *pBindInfos)
1638 {
1639 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1640 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1641 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1642
1643 if (mem) {
1644 buffer->bo = &mem->bo;
1645 buffer->bo_offset = pBindInfos[i].memoryOffset;
1646 } else {
1647 buffer->bo = NULL;
1648 }
1649 }
1650 return VK_SUCCESS;
1651 }
1652
1653 VkResult
1654 tu_BindBufferMemory(VkDevice device,
1655 VkBuffer buffer,
1656 VkDeviceMemory memory,
1657 VkDeviceSize memoryOffset)
1658 {
1659 const VkBindBufferMemoryInfo info = {
1660 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1661 .buffer = buffer,
1662 .memory = memory,
1663 .memoryOffset = memoryOffset
1664 };
1665
1666 return tu_BindBufferMemory2(device, 1, &info);
1667 }
1668
1669 VkResult
1670 tu_BindImageMemory2(VkDevice device,
1671 uint32_t bindInfoCount,
1672 const VkBindImageMemoryInfo *pBindInfos)
1673 {
1674 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1675 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1676 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1677
1678 if (mem) {
1679 image->bo = &mem->bo;
1680 image->bo_offset = pBindInfos[i].memoryOffset;
1681 } else {
1682 image->bo = NULL;
1683 image->bo_offset = 0;
1684 }
1685 }
1686
1687 return VK_SUCCESS;
1688 }
1689
1690 VkResult
1691 tu_BindImageMemory(VkDevice device,
1692 VkImage image,
1693 VkDeviceMemory memory,
1694 VkDeviceSize memoryOffset)
1695 {
1696 const VkBindImageMemoryInfo info = {
1697 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1698 .image = image,
1699 .memory = memory,
1700 .memoryOffset = memoryOffset
1701 };
1702
1703 return tu_BindImageMemory2(device, 1, &info);
1704 }
1705
1706 VkResult
1707 tu_QueueBindSparse(VkQueue _queue,
1708 uint32_t bindInfoCount,
1709 const VkBindSparseInfo *pBindInfo,
1710 VkFence _fence)
1711 {
1712 return VK_SUCCESS;
1713 }
1714
1715 // Queue semaphore functions
1716
1717 VkResult
1718 tu_CreateSemaphore(VkDevice _device,
1719 const VkSemaphoreCreateInfo *pCreateInfo,
1720 const VkAllocationCallbacks *pAllocator,
1721 VkSemaphore *pSemaphore)
1722 {
1723 TU_FROM_HANDLE(tu_device, device, _device);
1724
1725 struct tu_semaphore *sem =
1726 vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1727 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1728 if (!sem)
1729 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1730
1731 *pSemaphore = tu_semaphore_to_handle(sem);
1732 return VK_SUCCESS;
1733 }
1734
1735 void
1736 tu_DestroySemaphore(VkDevice _device,
1737 VkSemaphore _semaphore,
1738 const VkAllocationCallbacks *pAllocator)
1739 {
1740 TU_FROM_HANDLE(tu_device, device, _device);
1741 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1742 if (!_semaphore)
1743 return;
1744
1745 vk_free2(&device->alloc, pAllocator, sem);
1746 }
1747
1748 VkResult
1749 tu_CreateEvent(VkDevice _device,
1750 const VkEventCreateInfo *pCreateInfo,
1751 const VkAllocationCallbacks *pAllocator,
1752 VkEvent *pEvent)
1753 {
1754 TU_FROM_HANDLE(tu_device, device, _device);
1755 struct tu_event *event =
1756 vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1757 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1758
1759 if (!event)
1760 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1761
1762 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
1763 if (result != VK_SUCCESS)
1764 goto fail_alloc;
1765
1766 result = tu_bo_map(device, &event->bo);
1767 if (result != VK_SUCCESS)
1768 goto fail_map;
1769
1770 *pEvent = tu_event_to_handle(event);
1771
1772 return VK_SUCCESS;
1773
1774 fail_map:
1775 tu_bo_finish(device, &event->bo);
1776 fail_alloc:
1777 vk_free2(&device->alloc, pAllocator, event);
1778 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1779 }
1780
1781 void
1782 tu_DestroyEvent(VkDevice _device,
1783 VkEvent _event,
1784 const VkAllocationCallbacks *pAllocator)
1785 {
1786 TU_FROM_HANDLE(tu_device, device, _device);
1787 TU_FROM_HANDLE(tu_event, event, _event);
1788
1789 if (!event)
1790 return;
1791
1792 tu_bo_finish(device, &event->bo);
1793 vk_free2(&device->alloc, pAllocator, event);
1794 }
1795
1796 VkResult
1797 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1798 {
1799 TU_FROM_HANDLE(tu_event, event, _event);
1800
1801 if (*(uint64_t*) event->bo.map == 1)
1802 return VK_EVENT_SET;
1803 return VK_EVENT_RESET;
1804 }
1805
1806 VkResult
1807 tu_SetEvent(VkDevice _device, VkEvent _event)
1808 {
1809 TU_FROM_HANDLE(tu_event, event, _event);
1810 *(uint64_t*) event->bo.map = 1;
1811
1812 return VK_SUCCESS;
1813 }
1814
1815 VkResult
1816 tu_ResetEvent(VkDevice _device, VkEvent _event)
1817 {
1818 TU_FROM_HANDLE(tu_event, event, _event);
1819 *(uint64_t*) event->bo.map = 0;
1820
1821 return VK_SUCCESS;
1822 }
1823
1824 VkResult
1825 tu_CreateBuffer(VkDevice _device,
1826 const VkBufferCreateInfo *pCreateInfo,
1827 const VkAllocationCallbacks *pAllocator,
1828 VkBuffer *pBuffer)
1829 {
1830 TU_FROM_HANDLE(tu_device, device, _device);
1831 struct tu_buffer *buffer;
1832
1833 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1834
1835 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1836 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1837 if (buffer == NULL)
1838 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1839
1840 buffer->size = pCreateInfo->size;
1841 buffer->usage = pCreateInfo->usage;
1842 buffer->flags = pCreateInfo->flags;
1843
1844 *pBuffer = tu_buffer_to_handle(buffer);
1845
1846 return VK_SUCCESS;
1847 }
1848
1849 void
1850 tu_DestroyBuffer(VkDevice _device,
1851 VkBuffer _buffer,
1852 const VkAllocationCallbacks *pAllocator)
1853 {
1854 TU_FROM_HANDLE(tu_device, device, _device);
1855 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1856
1857 if (!buffer)
1858 return;
1859
1860 vk_free2(&device->alloc, pAllocator, buffer);
1861 }
1862
1863 static uint32_t
1864 tu_surface_max_layer_count(struct tu_image_view *iview)
1865 {
1866 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1867 ? iview->extent.depth
1868 : (iview->base_layer + iview->layer_count);
1869 }
1870
1871 VkResult
1872 tu_CreateFramebuffer(VkDevice _device,
1873 const VkFramebufferCreateInfo *pCreateInfo,
1874 const VkAllocationCallbacks *pAllocator,
1875 VkFramebuffer *pFramebuffer)
1876 {
1877 TU_FROM_HANDLE(tu_device, device, _device);
1878 struct tu_framebuffer *framebuffer;
1879
1880 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1881
1882 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1883 pCreateInfo->attachmentCount;
1884 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1885 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1886 if (framebuffer == NULL)
1887 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1888
1889 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1890 framebuffer->width = pCreateInfo->width;
1891 framebuffer->height = pCreateInfo->height;
1892 framebuffer->layers = pCreateInfo->layers;
1893 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1894 VkImageView _iview = pCreateInfo->pAttachments[i];
1895 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1896 framebuffer->attachments[i].attachment = iview;
1897
1898 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1899 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1900 framebuffer->layers =
1901 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1902 }
1903
1904 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1905 return VK_SUCCESS;
1906 }
1907
1908 void
1909 tu_DestroyFramebuffer(VkDevice _device,
1910 VkFramebuffer _fb,
1911 const VkAllocationCallbacks *pAllocator)
1912 {
1913 TU_FROM_HANDLE(tu_device, device, _device);
1914 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1915
1916 if (!fb)
1917 return;
1918 vk_free2(&device->alloc, pAllocator, fb);
1919 }
1920
1921 static enum a6xx_tex_clamp
1922 tu6_tex_wrap(VkSamplerAddressMode address_mode, bool *needs_border)
1923 {
1924 switch (address_mode) {
1925 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
1926 return A6XX_TEX_REPEAT;
1927 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
1928 return A6XX_TEX_MIRROR_REPEAT;
1929 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
1930 return A6XX_TEX_CLAMP_TO_EDGE;
1931 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
1932 *needs_border = true;
1933 return A6XX_TEX_CLAMP_TO_BORDER;
1934 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
1935 /* only works for PoT.. need to emulate otherwise! */
1936 return A6XX_TEX_MIRROR_CLAMP;
1937 default:
1938 unreachable("illegal tex wrap mode");
1939 break;
1940 }
1941 }
1942
1943 static enum a6xx_tex_filter
1944 tu6_tex_filter(VkFilter filter, unsigned aniso)
1945 {
1946 switch (filter) {
1947 case VK_FILTER_NEAREST:
1948 return A6XX_TEX_NEAREST;
1949 case VK_FILTER_LINEAR:
1950 return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
1951 case VK_FILTER_CUBIC_IMG:
1952 default:
1953 unreachable("illegal texture filter");
1954 break;
1955 }
1956 }
1957
1958 static inline enum adreno_compare_func
1959 tu6_compare_func(VkCompareOp op)
1960 {
1961 return (enum adreno_compare_func) op;
1962 }
1963
1964 static void
1965 tu_init_sampler(struct tu_device *device,
1966 struct tu_sampler *sampler,
1967 const VkSamplerCreateInfo *pCreateInfo)
1968 {
1969 unsigned aniso = pCreateInfo->anisotropyEnable ?
1970 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
1971 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
1972 bool needs_border = false;
1973
1974 sampler->state[0] =
1975 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
1976 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
1977 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
1978 A6XX_TEX_SAMP_0_ANISO(aniso) |
1979 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU, &needs_border)) |
1980 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV, &needs_border)) |
1981 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW, &needs_border)) |
1982 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
1983 sampler->state[1] =
1984 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
1985 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
1986 A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
1987 A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
1988 COND(pCreateInfo->compareEnable,
1989 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
1990 sampler->state[2] = 0;
1991 sampler->state[3] = 0;
1992
1993 /* TODO:
1994 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
1995 * border color
1996 */
1997
1998 sampler->needs_border = needs_border;
1999 sampler->border = pCreateInfo->borderColor;
2000 }
2001
2002 VkResult
2003 tu_CreateSampler(VkDevice _device,
2004 const VkSamplerCreateInfo *pCreateInfo,
2005 const VkAllocationCallbacks *pAllocator,
2006 VkSampler *pSampler)
2007 {
2008 TU_FROM_HANDLE(tu_device, device, _device);
2009 struct tu_sampler *sampler;
2010
2011 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2012
2013 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
2014 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2015 if (!sampler)
2016 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2017
2018 tu_init_sampler(device, sampler, pCreateInfo);
2019 *pSampler = tu_sampler_to_handle(sampler);
2020
2021 return VK_SUCCESS;
2022 }
2023
2024 void
2025 tu_DestroySampler(VkDevice _device,
2026 VkSampler _sampler,
2027 const VkAllocationCallbacks *pAllocator)
2028 {
2029 TU_FROM_HANDLE(tu_device, device, _device);
2030 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2031
2032 if (!sampler)
2033 return;
2034 vk_free2(&device->alloc, pAllocator, sampler);
2035 }
2036
2037 /* vk_icd.h does not declare this function, so we declare it here to
2038 * suppress Wmissing-prototypes.
2039 */
2040 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2041 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2042
2043 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2044 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2045 {
2046 /* For the full details on loader interface versioning, see
2047 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2048 * What follows is a condensed summary, to help you navigate the large and
2049 * confusing official doc.
2050 *
2051 * - Loader interface v0 is incompatible with later versions. We don't
2052 * support it.
2053 *
2054 * - In loader interface v1:
2055 * - The first ICD entrypoint called by the loader is
2056 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2057 * entrypoint.
2058 * - The ICD must statically expose no other Vulkan symbol unless it
2059 * is linked with -Bsymbolic.
2060 * - Each dispatchable Vulkan handle created by the ICD must be
2061 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2062 * ICD must initialize VK_LOADER_DATA.loadMagic to
2063 * ICD_LOADER_MAGIC.
2064 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2065 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2066 * such loader-managed surfaces.
2067 *
2068 * - Loader interface v2 differs from v1 in:
2069 * - The first ICD entrypoint called by the loader is
2070 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2071 * statically expose this entrypoint.
2072 *
2073 * - Loader interface v3 differs from v2 in:
2074 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2075 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2076 * because the loader no longer does so.
2077 */
2078 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2079 return VK_SUCCESS;
2080 }
2081
2082 VkResult
2083 tu_GetMemoryFdKHR(VkDevice _device,
2084 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2085 int *pFd)
2086 {
2087 TU_FROM_HANDLE(tu_device, device, _device);
2088 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2089
2090 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2091
2092 /* At the moment, we support only the below handle types. */
2093 assert(pGetFdInfo->handleType ==
2094 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2095 pGetFdInfo->handleType ==
2096 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2097
2098 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2099 if (prime_fd < 0)
2100 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2101
2102 *pFd = prime_fd;
2103 return VK_SUCCESS;
2104 }
2105
2106 VkResult
2107 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2108 VkExternalMemoryHandleTypeFlagBits handleType,
2109 int fd,
2110 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2111 {
2112 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2113 pMemoryFdProperties->memoryTypeBits = 1;
2114 return VK_SUCCESS;
2115 }
2116
2117 void
2118 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2119 VkPhysicalDevice physicalDevice,
2120 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2121 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2122 {
2123 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2124 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2125 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2126 }
2127
2128 void
2129 tu_GetPhysicalDeviceExternalFenceProperties(
2130 VkPhysicalDevice physicalDevice,
2131 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2132 VkExternalFenceProperties *pExternalFenceProperties)
2133 {
2134 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2135 pExternalFenceProperties->compatibleHandleTypes = 0;
2136 pExternalFenceProperties->externalFenceFeatures = 0;
2137 }
2138
2139 VkResult
2140 tu_CreateDebugReportCallbackEXT(
2141 VkInstance _instance,
2142 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2143 const VkAllocationCallbacks *pAllocator,
2144 VkDebugReportCallbackEXT *pCallback)
2145 {
2146 TU_FROM_HANDLE(tu_instance, instance, _instance);
2147 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2148 pCreateInfo, pAllocator,
2149 &instance->alloc, pCallback);
2150 }
2151
2152 void
2153 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2154 VkDebugReportCallbackEXT _callback,
2155 const VkAllocationCallbacks *pAllocator)
2156 {
2157 TU_FROM_HANDLE(tu_instance, instance, _instance);
2158 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2159 _callback, pAllocator, &instance->alloc);
2160 }
2161
2162 void
2163 tu_DebugReportMessageEXT(VkInstance _instance,
2164 VkDebugReportFlagsEXT flags,
2165 VkDebugReportObjectTypeEXT objectType,
2166 uint64_t object,
2167 size_t location,
2168 int32_t messageCode,
2169 const char *pLayerPrefix,
2170 const char *pMessage)
2171 {
2172 TU_FROM_HANDLE(tu_instance, instance, _instance);
2173 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2174 object, location, messageCode, pLayerPrefix, pMessage);
2175 }
2176
2177 void
2178 tu_GetDeviceGroupPeerMemoryFeatures(
2179 VkDevice device,
2180 uint32_t heapIndex,
2181 uint32_t localDeviceIndex,
2182 uint32_t remoteDeviceIndex,
2183 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2184 {
2185 assert(localDeviceIndex == remoteDeviceIndex);
2186
2187 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2188 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2189 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2190 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2191 }