b4ae1268d7e24889a21649dfbe74aa150f6e68ff
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include <fcntl.h>
31 #include <libsync.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <sys/sysinfo.h>
35 #include <unistd.h>
36 #include <xf86drm.h>
37
38 #include "compiler/glsl_types.h"
39 #include "util/debug.h"
40 #include "util/disk_cache.h"
41 #include "util/u_atomic.h"
42 #include "vk_format.h"
43 #include "vk_util.h"
44
45 #include "drm-uapi/msm_drm.h"
46
47 /* for fd_get_driver/device_uuid() */
48 #include "freedreno/common/freedreno_uuid.h"
49
50 static void
51 tu_semaphore_remove_temp(struct tu_device *device,
52 struct tu_semaphore *sem);
53
54 static int
55 tu_device_get_cache_uuid(uint16_t family, void *uuid)
56 {
57 uint32_t mesa_timestamp;
58 uint16_t f = family;
59 memset(uuid, 0, VK_UUID_SIZE);
60 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
61 &mesa_timestamp))
62 return -1;
63
64 memcpy(uuid, &mesa_timestamp, 4);
65 memcpy((char *) uuid + 4, &f, 2);
66 snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
67 return 0;
68 }
69
70 VkResult
71 tu_physical_device_init(struct tu_physical_device *device,
72 struct tu_instance *instance)
73 {
74 VkResult result = VK_SUCCESS;
75
76 memset(device->name, 0, sizeof(device->name));
77 sprintf(device->name, "FD%d", device->gpu_id);
78
79 device->limited_z24s8 = (device->gpu_id == 630);
80
81 switch (device->gpu_id) {
82 case 618:
83 device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
84 device->ccu_offset_bypass = 0x10000;
85 device->tile_align_w = 32;
86 device->magic.PC_UNKNOWN_9805 = 0x0;
87 device->magic.SP_UNKNOWN_A0F8 = 0x0;
88 break;
89 case 630:
90 case 640:
91 device->ccu_offset_gmem = 0xf8000;
92 device->ccu_offset_bypass = 0x20000;
93 device->tile_align_w = 32;
94 device->magic.PC_UNKNOWN_9805 = 0x1;
95 device->magic.SP_UNKNOWN_A0F8 = 0x1;
96 break;
97 case 650:
98 device->ccu_offset_gmem = 0x114000;
99 device->ccu_offset_bypass = 0x30000;
100 device->tile_align_w = 96;
101 device->magic.PC_UNKNOWN_9805 = 0x2;
102 device->magic.SP_UNKNOWN_A0F8 = 0x2;
103 break;
104 default:
105 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
106 "device %s is unsupported", device->name);
107 goto fail;
108 }
109 if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
110 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
111 "cannot generate UUID");
112 goto fail;
113 }
114
115 /* The gpu id is already embedded in the uuid so we just pass "tu"
116 * when creating the cache.
117 */
118 char buf[VK_UUID_SIZE * 2 + 1];
119 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
120 device->disk_cache = disk_cache_create(device->name, buf, 0);
121
122 fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
123 "testing use only.\n");
124
125 fd_get_driver_uuid(device->driver_uuid);
126 fd_get_device_uuid(device->device_uuid, device->gpu_id);
127
128 tu_physical_device_get_supported_extensions(device, &device->supported_extensions);
129
130 if (result != VK_SUCCESS) {
131 vk_error(instance, result);
132 goto fail;
133 }
134
135 result = tu_wsi_init(device);
136 if (result != VK_SUCCESS) {
137 vk_error(instance, result);
138 goto fail;
139 }
140
141 return VK_SUCCESS;
142
143 fail:
144 close(device->local_fd);
145 if (device->master_fd != -1)
146 close(device->master_fd);
147 return result;
148 }
149
150 static void
151 tu_physical_device_finish(struct tu_physical_device *device)
152 {
153 tu_wsi_finish(device);
154
155 disk_cache_destroy(device->disk_cache);
156 close(device->local_fd);
157 if (device->master_fd != -1)
158 close(device->master_fd);
159
160 vk_object_base_finish(&device->base);
161 }
162
163 static VKAPI_ATTR void *
164 default_alloc_func(void *pUserData,
165 size_t size,
166 size_t align,
167 VkSystemAllocationScope allocationScope)
168 {
169 return malloc(size);
170 }
171
172 static VKAPI_ATTR void *
173 default_realloc_func(void *pUserData,
174 void *pOriginal,
175 size_t size,
176 size_t align,
177 VkSystemAllocationScope allocationScope)
178 {
179 return realloc(pOriginal, size);
180 }
181
182 static VKAPI_ATTR void
183 default_free_func(void *pUserData, void *pMemory)
184 {
185 free(pMemory);
186 }
187
188 static const VkAllocationCallbacks default_alloc = {
189 .pUserData = NULL,
190 .pfnAllocation = default_alloc_func,
191 .pfnReallocation = default_realloc_func,
192 .pfnFree = default_free_func,
193 };
194
195 static const struct debug_control tu_debug_options[] = {
196 { "startup", TU_DEBUG_STARTUP },
197 { "nir", TU_DEBUG_NIR },
198 { "ir3", TU_DEBUG_IR3 },
199 { "nobin", TU_DEBUG_NOBIN },
200 { "sysmem", TU_DEBUG_SYSMEM },
201 { "forcebin", TU_DEBUG_FORCEBIN },
202 { "noubwc", TU_DEBUG_NOUBWC },
203 { NULL, 0 }
204 };
205
206 const char *
207 tu_get_debug_option_name(int id)
208 {
209 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
210 return tu_debug_options[id].string;
211 }
212
213 static int
214 tu_get_instance_extension_index(const char *name)
215 {
216 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
217 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
218 return i;
219 }
220 return -1;
221 }
222
223 VkResult
224 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
225 const VkAllocationCallbacks *pAllocator,
226 VkInstance *pInstance)
227 {
228 struct tu_instance *instance;
229 VkResult result;
230
231 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
232
233 uint32_t client_version;
234 if (pCreateInfo->pApplicationInfo &&
235 pCreateInfo->pApplicationInfo->apiVersion != 0) {
236 client_version = pCreateInfo->pApplicationInfo->apiVersion;
237 } else {
238 tu_EnumerateInstanceVersion(&client_version);
239 }
240
241 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
242 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
243
244 if (!instance)
245 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
246
247 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
248
249 if (pAllocator)
250 instance->alloc = *pAllocator;
251 else
252 instance->alloc = default_alloc;
253
254 instance->api_version = client_version;
255 instance->physical_device_count = -1;
256
257 instance->debug_flags =
258 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
259
260 if (instance->debug_flags & TU_DEBUG_STARTUP)
261 tu_logi("Created an instance");
262
263 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
264 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
265 int index = tu_get_instance_extension_index(ext_name);
266
267 if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
268 vk_object_base_finish(&instance->base);
269 vk_free2(&default_alloc, pAllocator, instance);
270 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
271 }
272
273 instance->enabled_extensions.extensions[index] = true;
274 }
275
276 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
277 if (result != VK_SUCCESS) {
278 vk_object_base_finish(&instance->base);
279 vk_free2(&default_alloc, pAllocator, instance);
280 return vk_error(instance, result);
281 }
282
283 glsl_type_singleton_init_or_ref();
284
285 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
286
287 *pInstance = tu_instance_to_handle(instance);
288
289 return VK_SUCCESS;
290 }
291
292 void
293 tu_DestroyInstance(VkInstance _instance,
294 const VkAllocationCallbacks *pAllocator)
295 {
296 TU_FROM_HANDLE(tu_instance, instance, _instance);
297
298 if (!instance)
299 return;
300
301 for (int i = 0; i < instance->physical_device_count; ++i) {
302 tu_physical_device_finish(instance->physical_devices + i);
303 }
304
305 VG(VALGRIND_DESTROY_MEMPOOL(instance));
306
307 glsl_type_singleton_decref();
308
309 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
310
311 vk_object_base_finish(&instance->base);
312 vk_free(&instance->alloc, instance);
313 }
314
315 VkResult
316 tu_EnumeratePhysicalDevices(VkInstance _instance,
317 uint32_t *pPhysicalDeviceCount,
318 VkPhysicalDevice *pPhysicalDevices)
319 {
320 TU_FROM_HANDLE(tu_instance, instance, _instance);
321 VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
322
323 VkResult result;
324
325 if (instance->physical_device_count < 0) {
326 result = tu_enumerate_devices(instance);
327 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
328 return result;
329 }
330
331 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
332 vk_outarray_append(&out, p)
333 {
334 *p = tu_physical_device_to_handle(instance->physical_devices + i);
335 }
336 }
337
338 return vk_outarray_status(&out);
339 }
340
341 VkResult
342 tu_EnumeratePhysicalDeviceGroups(
343 VkInstance _instance,
344 uint32_t *pPhysicalDeviceGroupCount,
345 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
346 {
347 TU_FROM_HANDLE(tu_instance, instance, _instance);
348 VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
349 pPhysicalDeviceGroupCount);
350 VkResult result;
351
352 if (instance->physical_device_count < 0) {
353 result = tu_enumerate_devices(instance);
354 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
355 return result;
356 }
357
358 for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
359 vk_outarray_append(&out, p)
360 {
361 p->physicalDeviceCount = 1;
362 p->physicalDevices[0] =
363 tu_physical_device_to_handle(instance->physical_devices + i);
364 p->subsetAllocation = false;
365 }
366 }
367
368 return vk_outarray_status(&out);
369 }
370
371 void
372 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
373 VkPhysicalDeviceFeatures *pFeatures)
374 {
375 memset(pFeatures, 0, sizeof(*pFeatures));
376
377 *pFeatures = (VkPhysicalDeviceFeatures) {
378 .robustBufferAccess = true,
379 .fullDrawIndexUint32 = true,
380 .imageCubeArray = true,
381 .independentBlend = true,
382 .geometryShader = true,
383 .tessellationShader = true,
384 .sampleRateShading = true,
385 .dualSrcBlend = true,
386 .logicOp = true,
387 .multiDrawIndirect = true,
388 .drawIndirectFirstInstance = true,
389 .depthClamp = true,
390 .depthBiasClamp = true,
391 .fillModeNonSolid = true,
392 .depthBounds = true,
393 .wideLines = false,
394 .largePoints = true,
395 .alphaToOne = true,
396 .multiViewport = false,
397 .samplerAnisotropy = true,
398 .textureCompressionETC2 = true,
399 .textureCompressionASTC_LDR = true,
400 .textureCompressionBC = true,
401 .occlusionQueryPrecise = true,
402 .pipelineStatisticsQuery = false,
403 .vertexPipelineStoresAndAtomics = true,
404 .fragmentStoresAndAtomics = true,
405 .shaderTessellationAndGeometryPointSize = false,
406 .shaderImageGatherExtended = false,
407 .shaderStorageImageExtendedFormats = false,
408 .shaderStorageImageMultisample = false,
409 .shaderUniformBufferArrayDynamicIndexing = true,
410 .shaderSampledImageArrayDynamicIndexing = true,
411 .shaderStorageBufferArrayDynamicIndexing = true,
412 .shaderStorageImageArrayDynamicIndexing = true,
413 .shaderStorageImageReadWithoutFormat = false,
414 .shaderStorageImageWriteWithoutFormat = false,
415 .shaderClipDistance = false,
416 .shaderCullDistance = false,
417 .shaderFloat64 = false,
418 .shaderInt64 = false,
419 .shaderInt16 = false,
420 .sparseBinding = false,
421 .variableMultisampleRate = false,
422 .inheritedQueries = false,
423 };
424 }
425
426 void
427 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
428 VkPhysicalDeviceFeatures2 *pFeatures)
429 {
430 vk_foreach_struct(ext, pFeatures->pNext)
431 {
432 switch (ext->sType) {
433 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: {
434 VkPhysicalDeviceVulkan11Features *features = (void *) ext;
435 features->storageBuffer16BitAccess = false;
436 features->uniformAndStorageBuffer16BitAccess = false;
437 features->storagePushConstant16 = false;
438 features->storageInputOutput16 = false;
439 features->multiview = false;
440 features->multiviewGeometryShader = false;
441 features->multiviewTessellationShader = false;
442 features->variablePointersStorageBuffer = true;
443 features->variablePointers = true;
444 features->protectedMemory = false;
445 features->samplerYcbcrConversion = true;
446 features->shaderDrawParameters = true;
447 break;
448 }
449 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: {
450 VkPhysicalDeviceVulkan12Features *features = (void *) ext;
451 features->samplerMirrorClampToEdge = true;
452 features->drawIndirectCount = true;
453 features->storageBuffer8BitAccess = false;
454 features->uniformAndStorageBuffer8BitAccess = false;
455 features->storagePushConstant8 = false;
456 features->shaderBufferInt64Atomics = false;
457 features->shaderSharedInt64Atomics = false;
458 features->shaderFloat16 = false;
459 features->shaderInt8 = false;
460
461 features->descriptorIndexing = false;
462 features->shaderInputAttachmentArrayDynamicIndexing = false;
463 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
464 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
465 features->shaderUniformBufferArrayNonUniformIndexing = false;
466 features->shaderSampledImageArrayNonUniformIndexing = false;
467 features->shaderStorageBufferArrayNonUniformIndexing = false;
468 features->shaderStorageImageArrayNonUniformIndexing = false;
469 features->shaderInputAttachmentArrayNonUniformIndexing = false;
470 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
471 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
472 features->descriptorBindingUniformBufferUpdateAfterBind = false;
473 features->descriptorBindingSampledImageUpdateAfterBind = false;
474 features->descriptorBindingStorageImageUpdateAfterBind = false;
475 features->descriptorBindingStorageBufferUpdateAfterBind = false;
476 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
477 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
478 features->descriptorBindingUpdateUnusedWhilePending = false;
479 features->descriptorBindingPartiallyBound = false;
480 features->descriptorBindingVariableDescriptorCount = false;
481 features->runtimeDescriptorArray = false;
482
483 features->samplerFilterMinmax = true;
484 features->scalarBlockLayout = false;
485 features->imagelessFramebuffer = false;
486 features->uniformBufferStandardLayout = false;
487 features->shaderSubgroupExtendedTypes = false;
488 features->separateDepthStencilLayouts = false;
489 features->hostQueryReset = false;
490 features->timelineSemaphore = false;
491 features->bufferDeviceAddress = false;
492 features->bufferDeviceAddressCaptureReplay = false;
493 features->bufferDeviceAddressMultiDevice = false;
494 features->vulkanMemoryModel = false;
495 features->vulkanMemoryModelDeviceScope = false;
496 features->vulkanMemoryModelAvailabilityVisibilityChains = false;
497 features->shaderOutputViewportIndex = false;
498 features->shaderOutputLayer = false;
499 features->subgroupBroadcastDynamicId = false;
500 break;
501 }
502 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
503 VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
504 features->variablePointersStorageBuffer = true;
505 features->variablePointers = true;
506 break;
507 }
508 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
509 VkPhysicalDeviceMultiviewFeatures *features =
510 (VkPhysicalDeviceMultiviewFeatures *) ext;
511 features->multiview = false;
512 features->multiviewGeometryShader = false;
513 features->multiviewTessellationShader = false;
514 break;
515 }
516 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
517 VkPhysicalDeviceShaderDrawParametersFeatures *features =
518 (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
519 features->shaderDrawParameters = true;
520 break;
521 }
522 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
523 VkPhysicalDeviceProtectedMemoryFeatures *features =
524 (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
525 features->protectedMemory = false;
526 break;
527 }
528 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
529 VkPhysicalDevice16BitStorageFeatures *features =
530 (VkPhysicalDevice16BitStorageFeatures *) ext;
531 features->storageBuffer16BitAccess = false;
532 features->uniformAndStorageBuffer16BitAccess = false;
533 features->storagePushConstant16 = false;
534 features->storageInputOutput16 = false;
535 break;
536 }
537 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
538 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
539 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
540 features->samplerYcbcrConversion = true;
541 break;
542 }
543 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
544 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
545 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
546 features->shaderInputAttachmentArrayDynamicIndexing = false;
547 features->shaderUniformTexelBufferArrayDynamicIndexing = false;
548 features->shaderStorageTexelBufferArrayDynamicIndexing = false;
549 features->shaderUniformBufferArrayNonUniformIndexing = false;
550 features->shaderSampledImageArrayNonUniformIndexing = false;
551 features->shaderStorageBufferArrayNonUniformIndexing = false;
552 features->shaderStorageImageArrayNonUniformIndexing = false;
553 features->shaderInputAttachmentArrayNonUniformIndexing = false;
554 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
555 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
556 features->descriptorBindingUniformBufferUpdateAfterBind = false;
557 features->descriptorBindingSampledImageUpdateAfterBind = false;
558 features->descriptorBindingStorageImageUpdateAfterBind = false;
559 features->descriptorBindingStorageBufferUpdateAfterBind = false;
560 features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
561 features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
562 features->descriptorBindingUpdateUnusedWhilePending = false;
563 features->descriptorBindingPartiallyBound = false;
564 features->descriptorBindingVariableDescriptorCount = false;
565 features->runtimeDescriptorArray = false;
566 break;
567 }
568 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
569 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
570 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
571 features->conditionalRendering = true;
572 features->inheritedConditionalRendering = true;
573 break;
574 }
575 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
576 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
577 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
578 features->transformFeedback = true;
579 features->geometryStreams = false;
580 break;
581 }
582 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
583 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
584 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
585 features->indexTypeUint8 = true;
586 break;
587 }
588 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
589 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
590 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
591 features->vertexAttributeInstanceRateDivisor = true;
592 features->vertexAttributeInstanceRateZeroDivisor = true;
593 break;
594 }
595 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: {
596 VkPhysicalDevicePrivateDataFeaturesEXT *features =
597 (VkPhysicalDevicePrivateDataFeaturesEXT *)ext;
598 features->privateData = true;
599 break;
600 }
601 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
602 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
603 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
604 features->depthClipEnable = true;
605 break;
606 }
607 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
608 VkPhysicalDevice4444FormatsFeaturesEXT *features = (void *)ext;
609 features->formatA4R4G4B4 = true;
610 features->formatA4B4G4R4 = true;
611 break;
612 }
613 default:
614 break;
615 }
616 }
617 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
618 }
619
620 void
621 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
622 VkPhysicalDeviceProperties *pProperties)
623 {
624 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
625 VkSampleCountFlags sample_counts =
626 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
627
628 /* I have no idea what the maximum size is, but the hardware supports very
629 * large numbers of descriptors (at least 2^16). This limit is based on
630 * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
631 * we don't have to think about what to do if that overflows, but really
632 * nothing is likely to get close to this.
633 */
634 const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
635
636 VkPhysicalDeviceLimits limits = {
637 .maxImageDimension1D = (1 << 14),
638 .maxImageDimension2D = (1 << 14),
639 .maxImageDimension3D = (1 << 11),
640 .maxImageDimensionCube = (1 << 14),
641 .maxImageArrayLayers = (1 << 11),
642 .maxTexelBufferElements = 128 * 1024 * 1024,
643 .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
644 .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
645 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
646 .maxMemoryAllocationCount = UINT32_MAX,
647 .maxSamplerAllocationCount = 64 * 1024,
648 .bufferImageGranularity = 64, /* A cache line */
649 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
650 .maxBoundDescriptorSets = MAX_SETS,
651 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
652 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
653 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
654 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
655 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
656 .maxPerStageDescriptorInputAttachments = MAX_RTS,
657 .maxPerStageResources = max_descriptor_set_size,
658 .maxDescriptorSetSamplers = max_descriptor_set_size,
659 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
660 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
661 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
662 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
663 .maxDescriptorSetSampledImages = max_descriptor_set_size,
664 .maxDescriptorSetStorageImages = max_descriptor_set_size,
665 .maxDescriptorSetInputAttachments = MAX_RTS,
666 .maxVertexInputAttributes = 32,
667 .maxVertexInputBindings = 32,
668 .maxVertexInputAttributeOffset = 4095,
669 .maxVertexInputBindingStride = 2048,
670 .maxVertexOutputComponents = 128,
671 .maxTessellationGenerationLevel = 64,
672 .maxTessellationPatchSize = 32,
673 .maxTessellationControlPerVertexInputComponents = 128,
674 .maxTessellationControlPerVertexOutputComponents = 128,
675 .maxTessellationControlPerPatchOutputComponents = 120,
676 .maxTessellationControlTotalOutputComponents = 4096,
677 .maxTessellationEvaluationInputComponents = 128,
678 .maxTessellationEvaluationOutputComponents = 128,
679 .maxGeometryShaderInvocations = 32,
680 .maxGeometryInputComponents = 64,
681 .maxGeometryOutputComponents = 128,
682 .maxGeometryOutputVertices = 256,
683 .maxGeometryTotalOutputComponents = 1024,
684 .maxFragmentInputComponents = 124,
685 .maxFragmentOutputAttachments = 8,
686 .maxFragmentDualSrcAttachments = 1,
687 .maxFragmentCombinedOutputResources = 8,
688 .maxComputeSharedMemorySize = 32768,
689 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
690 .maxComputeWorkGroupInvocations = 2048,
691 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
692 .subPixelPrecisionBits = 8,
693 .subTexelPrecisionBits = 8,
694 .mipmapPrecisionBits = 8,
695 .maxDrawIndexedIndexValue = UINT32_MAX,
696 .maxDrawIndirectCount = UINT32_MAX,
697 .maxSamplerLodBias = 4095.0 / 256.0, /* [-16, 15.99609375] */
698 .maxSamplerAnisotropy = 16,
699 .maxViewports = MAX_VIEWPORTS,
700 .maxViewportDimensions = { (1 << 14), (1 << 14) },
701 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
702 .viewportSubPixelBits = 8,
703 .minMemoryMapAlignment = 4096, /* A page */
704 .minTexelBufferOffsetAlignment = 64,
705 .minUniformBufferOffsetAlignment = 64,
706 .minStorageBufferOffsetAlignment = 64,
707 .minTexelOffset = -16,
708 .maxTexelOffset = 15,
709 .minTexelGatherOffset = -32,
710 .maxTexelGatherOffset = 31,
711 .minInterpolationOffset = -0.5,
712 .maxInterpolationOffset = 0.4375,
713 .subPixelInterpolationOffsetBits = 4,
714 .maxFramebufferWidth = (1 << 14),
715 .maxFramebufferHeight = (1 << 14),
716 .maxFramebufferLayers = (1 << 10),
717 .framebufferColorSampleCounts = sample_counts,
718 .framebufferDepthSampleCounts = sample_counts,
719 .framebufferStencilSampleCounts = sample_counts,
720 .framebufferNoAttachmentsSampleCounts = sample_counts,
721 .maxColorAttachments = MAX_RTS,
722 .sampledImageColorSampleCounts = sample_counts,
723 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
724 .sampledImageDepthSampleCounts = sample_counts,
725 .sampledImageStencilSampleCounts = sample_counts,
726 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
727 .maxSampleMaskWords = 1,
728 .timestampComputeAndGraphics = true,
729 .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
730 .maxClipDistances = 8,
731 .maxCullDistances = 8,
732 .maxCombinedClipAndCullDistances = 8,
733 .discreteQueuePriorities = 1,
734 .pointSizeRange = { 1, 4092 },
735 .lineWidthRange = { 0.0, 7.9921875 },
736 .pointSizeGranularity = 0.0625,
737 .lineWidthGranularity = (1.0 / 128.0),
738 .strictLines = false, /* FINISHME */
739 .standardSampleLocations = true,
740 .optimalBufferCopyOffsetAlignment = 128,
741 .optimalBufferCopyRowPitchAlignment = 128,
742 .nonCoherentAtomSize = 64,
743 };
744
745 *pProperties = (VkPhysicalDeviceProperties) {
746 .apiVersion = tu_physical_device_api_version(pdevice),
747 .driverVersion = vk_get_driver_version(),
748 .vendorID = 0, /* TODO */
749 .deviceID = 0,
750 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
751 .limits = limits,
752 .sparseProperties = { 0 },
753 };
754
755 strcpy(pProperties->deviceName, pdevice->name);
756 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
757 }
758
759 void
760 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
761 VkPhysicalDeviceProperties2 *pProperties)
762 {
763 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
764 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
765
766 vk_foreach_struct(ext, pProperties->pNext)
767 {
768 switch (ext->sType) {
769 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
770 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
771 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
772 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
773 break;
774 }
775 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
776 VkPhysicalDeviceIDProperties *properties =
777 (VkPhysicalDeviceIDProperties *) ext;
778 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
779 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
780 properties->deviceLUIDValid = false;
781 break;
782 }
783 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
784 VkPhysicalDeviceMultiviewProperties *properties =
785 (VkPhysicalDeviceMultiviewProperties *) ext;
786 properties->maxMultiviewViewCount = MAX_VIEWS;
787 properties->maxMultiviewInstanceIndex = INT_MAX;
788 break;
789 }
790 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
791 VkPhysicalDevicePointClippingProperties *properties =
792 (VkPhysicalDevicePointClippingProperties *) ext;
793 properties->pointClippingBehavior =
794 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
795 break;
796 }
797 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
798 VkPhysicalDeviceMaintenance3Properties *properties =
799 (VkPhysicalDeviceMaintenance3Properties *) ext;
800 /* Make sure everything is addressable by a signed 32-bit int, and
801 * our largest descriptors are 96 bytes. */
802 properties->maxPerSetDescriptors = (1ull << 31) / 96;
803 /* Our buffer size fields allow only this much */
804 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
805 break;
806 }
807 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
808 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
809 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
810
811 properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
812 properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
813 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
814 properties->maxTransformFeedbackStreamDataSize = 512;
815 properties->maxTransformFeedbackBufferDataSize = 512;
816 properties->maxTransformFeedbackBufferDataStride = 512;
817 properties->transformFeedbackQueries = true;
818 properties->transformFeedbackStreamsLinesTriangles = false;
819 properties->transformFeedbackRasterizationStreamSelect = false;
820 properties->transformFeedbackDraw = true;
821 break;
822 }
823 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
824 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
825 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
826 properties->sampleLocationSampleCounts = 0;
827 if (pdevice->supported_extensions.EXT_sample_locations) {
828 properties->sampleLocationSampleCounts =
829 VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
830 }
831 properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
832 properties->sampleLocationCoordinateRange[0] = 0.0f;
833 properties->sampleLocationCoordinateRange[1] = 0.9375f;
834 properties->sampleLocationSubPixelBits = 4;
835 properties->variableSampleLocations = true;
836 break;
837 }
838 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
839 VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
840 (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
841 properties->filterMinmaxImageComponentMapping = true;
842 properties->filterMinmaxSingleComponentFormats = true;
843 break;
844 }
845 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
846 VkPhysicalDeviceSubgroupProperties *properties =
847 (VkPhysicalDeviceSubgroupProperties *)ext;
848 properties->subgroupSize = 64;
849 properties->supportedStages = VK_SHADER_STAGE_COMPUTE_BIT;
850 properties->supportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT |
851 VK_SUBGROUP_FEATURE_VOTE_BIT;
852 properties->quadOperationsInAllStages = false;
853 break;
854 }
855 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
856 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *props =
857 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
858 props->maxVertexAttribDivisor = UINT32_MAX;
859 break;
860 }
861 default:
862 break;
863 }
864 }
865 }
866
867 static const VkQueueFamilyProperties tu_queue_family_properties = {
868 .queueFlags =
869 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
870 .queueCount = 1,
871 .timestampValidBits = 48,
872 .minImageTransferGranularity = { 1, 1, 1 },
873 };
874
875 void
876 tu_GetPhysicalDeviceQueueFamilyProperties(
877 VkPhysicalDevice physicalDevice,
878 uint32_t *pQueueFamilyPropertyCount,
879 VkQueueFamilyProperties *pQueueFamilyProperties)
880 {
881 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
882
883 vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
884 }
885
886 void
887 tu_GetPhysicalDeviceQueueFamilyProperties2(
888 VkPhysicalDevice physicalDevice,
889 uint32_t *pQueueFamilyPropertyCount,
890 VkQueueFamilyProperties2 *pQueueFamilyProperties)
891 {
892 VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
893
894 vk_outarray_append(&out, p)
895 {
896 p->queueFamilyProperties = tu_queue_family_properties;
897 }
898 }
899
900 static uint64_t
901 tu_get_system_heap_size()
902 {
903 struct sysinfo info;
904 sysinfo(&info);
905
906 uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
907
908 /* We don't want to burn too much ram with the GPU. If the user has 4GiB
909 * or less, we use at most half. If they have more than 4GiB, we use 3/4.
910 */
911 uint64_t available_ram;
912 if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
913 available_ram = total_ram / 2;
914 else
915 available_ram = total_ram * 3 / 4;
916
917 return available_ram;
918 }
919
920 void
921 tu_GetPhysicalDeviceMemoryProperties(
922 VkPhysicalDevice physicalDevice,
923 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
924 {
925 pMemoryProperties->memoryHeapCount = 1;
926 pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
927 pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
928
929 pMemoryProperties->memoryTypeCount = 1;
930 pMemoryProperties->memoryTypes[0].propertyFlags =
931 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
932 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
933 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
934 pMemoryProperties->memoryTypes[0].heapIndex = 0;
935 }
936
937 void
938 tu_GetPhysicalDeviceMemoryProperties2(
939 VkPhysicalDevice physicalDevice,
940 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
941 {
942 return tu_GetPhysicalDeviceMemoryProperties(
943 physicalDevice, &pMemoryProperties->memoryProperties);
944 }
945
946 static VkResult
947 tu_queue_init(struct tu_device *device,
948 struct tu_queue *queue,
949 uint32_t queue_family_index,
950 int idx,
951 VkDeviceQueueCreateFlags flags)
952 {
953 vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
954
955 queue->device = device;
956 queue->queue_family_index = queue_family_index;
957 queue->queue_idx = idx;
958 queue->flags = flags;
959
960 int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
961 if (ret)
962 return VK_ERROR_INITIALIZATION_FAILED;
963
964 tu_fence_init(&queue->submit_fence, false);
965
966 return VK_SUCCESS;
967 }
968
969 static void
970 tu_queue_finish(struct tu_queue *queue)
971 {
972 tu_fence_finish(&queue->submit_fence);
973 tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
974 }
975
976 static int
977 tu_get_device_extension_index(const char *name)
978 {
979 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
980 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
981 return i;
982 }
983 return -1;
984 }
985
986 struct PACKED bcolor_entry {
987 uint32_t fp32[4];
988 uint16_t ui16[4];
989 int16_t si16[4];
990 uint16_t fp16[4];
991 uint16_t rgb565;
992 uint16_t rgb5a1;
993 uint16_t rgba4;
994 uint8_t __pad0[2];
995 uint8_t ui8[4];
996 int8_t si8[4];
997 uint32_t rgb10a2;
998 uint32_t z24; /* also s8? */
999 uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
1000 uint8_t __pad1[56];
1001 } border_color[] = {
1002 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
1003 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
1004 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
1005 .fp32[3] = 0x3f800000,
1006 .ui16[3] = 0xffff,
1007 .si16[3] = 0x7fff,
1008 .fp16[3] = 0x3c00,
1009 .rgb5a1 = 0x8000,
1010 .rgba4 = 0xf000,
1011 .ui8[3] = 0xff,
1012 .si8[3] = 0x7f,
1013 .rgb10a2 = 0xc0000000,
1014 .srgb[3] = 0x3c00,
1015 },
1016 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
1017 .fp32[3] = 1,
1018 .fp16[3] = 1,
1019 },
1020 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
1021 .fp32[0 ... 3] = 0x3f800000,
1022 .ui16[0 ... 3] = 0xffff,
1023 .si16[0 ... 3] = 0x7fff,
1024 .fp16[0 ... 3] = 0x3c00,
1025 .rgb565 = 0xffff,
1026 .rgb5a1 = 0xffff,
1027 .rgba4 = 0xffff,
1028 .ui8[0 ... 3] = 0xff,
1029 .si8[0 ... 3] = 0x7f,
1030 .rgb10a2 = 0xffffffff,
1031 .z24 = 0xffffff,
1032 .srgb[0 ... 3] = 0x3c00,
1033 },
1034 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
1035 .fp32[0 ... 3] = 1,
1036 .fp16[0 ... 3] = 1,
1037 },
1038 };
1039
1040 VkResult
1041 tu_CreateDevice(VkPhysicalDevice physicalDevice,
1042 const VkDeviceCreateInfo *pCreateInfo,
1043 const VkAllocationCallbacks *pAllocator,
1044 VkDevice *pDevice)
1045 {
1046 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1047 VkResult result;
1048 struct tu_device *device;
1049
1050 /* Check enabled features */
1051 if (pCreateInfo->pEnabledFeatures) {
1052 VkPhysicalDeviceFeatures supported_features;
1053 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1054 VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1055 VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1056 unsigned num_features =
1057 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1058 for (uint32_t i = 0; i < num_features; i++) {
1059 if (enabled_feature[i] && !supported_feature[i])
1060 return vk_error(physical_device->instance,
1061 VK_ERROR_FEATURE_NOT_PRESENT);
1062 }
1063 }
1064
1065 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1066 sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1067 if (!device)
1068 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1069
1070 vk_device_init(&device->vk, pCreateInfo,
1071 &physical_device->instance->alloc, pAllocator);
1072
1073 device->instance = physical_device->instance;
1074 device->physical_device = physical_device;
1075 device->_lost = false;
1076
1077 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1078 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1079 int index = tu_get_device_extension_index(ext_name);
1080 if (index < 0 ||
1081 !physical_device->supported_extensions.extensions[index]) {
1082 vk_free(&device->vk.alloc, device);
1083 return vk_error(physical_device->instance,
1084 VK_ERROR_EXTENSION_NOT_PRESENT);
1085 }
1086
1087 device->enabled_extensions.extensions[index] = true;
1088 }
1089
1090 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1091 const VkDeviceQueueCreateInfo *queue_create =
1092 &pCreateInfo->pQueueCreateInfos[i];
1093 uint32_t qfi = queue_create->queueFamilyIndex;
1094 device->queues[qfi] = vk_alloc(
1095 &device->vk.alloc, queue_create->queueCount * sizeof(struct tu_queue),
1096 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1097 if (!device->queues[qfi]) {
1098 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1099 goto fail_queues;
1100 }
1101
1102 memset(device->queues[qfi], 0,
1103 queue_create->queueCount * sizeof(struct tu_queue));
1104
1105 device->queue_count[qfi] = queue_create->queueCount;
1106
1107 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1108 result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1109 queue_create->flags);
1110 if (result != VK_SUCCESS)
1111 goto fail_queues;
1112 }
1113 }
1114
1115 device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1116 if (!device->compiler)
1117 goto fail_queues;
1118
1119 /* initial sizes, these will increase if there is overflow */
1120 device->vsc_draw_strm_pitch = 0x1000 + VSC_PAD;
1121 device->vsc_prim_strm_pitch = 0x4000 + VSC_PAD;
1122
1123 STATIC_ASSERT(sizeof(border_color) == sizeof(((struct tu6_global*) 0)->border_color));
1124 result = tu_bo_init_new(device, &device->global_bo, sizeof(struct tu6_global));
1125 if (result != VK_SUCCESS)
1126 goto fail_global_bo;
1127
1128 result = tu_bo_map(device, &device->global_bo);
1129 if (result != VK_SUCCESS)
1130 goto fail_global_bo_map;
1131
1132 struct tu6_global *global = device->global_bo.map;
1133 memcpy(global->border_color, border_color, sizeof(border_color));
1134 global->predicate = 0;
1135 tu_init_clear_blit_shaders(global);
1136
1137 VkPipelineCacheCreateInfo ci;
1138 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1139 ci.pNext = NULL;
1140 ci.flags = 0;
1141 ci.pInitialData = NULL;
1142 ci.initialDataSize = 0;
1143 VkPipelineCache pc;
1144 result =
1145 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1146 if (result != VK_SUCCESS)
1147 goto fail_pipeline_cache;
1148
1149 device->mem_cache = tu_pipeline_cache_from_handle(pc);
1150
1151 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++)
1152 mtx_init(&device->scratch_bos[i].construct_mtx, mtx_plain);
1153
1154 mtx_init(&device->vsc_pitch_mtx, mtx_plain);
1155
1156 *pDevice = tu_device_to_handle(device);
1157 return VK_SUCCESS;
1158
1159 fail_pipeline_cache:
1160 fail_global_bo_map:
1161 tu_bo_finish(device, &device->global_bo);
1162
1163 fail_global_bo:
1164 ralloc_free(device->compiler);
1165
1166 fail_queues:
1167 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1168 for (unsigned q = 0; q < device->queue_count[i]; q++)
1169 tu_queue_finish(&device->queues[i][q]);
1170 if (device->queue_count[i])
1171 vk_object_free(&device->vk, NULL, device->queues[i]);
1172 }
1173
1174 vk_free(&device->vk.alloc, device);
1175 return result;
1176 }
1177
1178 void
1179 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1180 {
1181 TU_FROM_HANDLE(tu_device, device, _device);
1182
1183 if (!device)
1184 return;
1185
1186 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1187 for (unsigned q = 0; q < device->queue_count[i]; q++)
1188 tu_queue_finish(&device->queues[i][q]);
1189 if (device->queue_count[i])
1190 vk_object_free(&device->vk, NULL, device->queues[i]);
1191 }
1192
1193 for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
1194 if (device->scratch_bos[i].initialized)
1195 tu_bo_finish(device, &device->scratch_bos[i].bo);
1196 }
1197
1198 ir3_compiler_destroy(device->compiler);
1199
1200 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1201 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1202
1203 vk_free(&device->vk.alloc, device);
1204 }
1205
1206 VkResult
1207 _tu_device_set_lost(struct tu_device *device,
1208 const char *file, int line,
1209 const char *msg, ...)
1210 {
1211 /* Set the flag indicating that waits should return in finite time even
1212 * after device loss.
1213 */
1214 p_atomic_inc(&device->_lost);
1215
1216 /* TODO: Report the log message through VkDebugReportCallbackEXT instead */
1217 fprintf(stderr, "%s:%d: ", file, line);
1218 va_list ap;
1219 va_start(ap, msg);
1220 vfprintf(stderr, msg, ap);
1221 va_end(ap);
1222
1223 if (env_var_as_boolean("TU_ABORT_ON_DEVICE_LOSS", false))
1224 abort();
1225
1226 return VK_ERROR_DEVICE_LOST;
1227 }
1228
1229 VkResult
1230 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo)
1231 {
1232 unsigned size_log2 = MAX2(util_logbase2_ceil64(size), MIN_SCRATCH_BO_SIZE_LOG2);
1233 unsigned index = size_log2 - MIN_SCRATCH_BO_SIZE_LOG2;
1234 assert(index < ARRAY_SIZE(dev->scratch_bos));
1235
1236 for (unsigned i = index; i < ARRAY_SIZE(dev->scratch_bos); i++) {
1237 if (p_atomic_read(&dev->scratch_bos[i].initialized)) {
1238 /* Fast path: just return the already-allocated BO. */
1239 *bo = &dev->scratch_bos[i].bo;
1240 return VK_SUCCESS;
1241 }
1242 }
1243
1244 /* Slow path: actually allocate the BO. We take a lock because the process
1245 * of allocating it is slow, and we don't want to block the CPU while it
1246 * finishes.
1247 */
1248 mtx_lock(&dev->scratch_bos[index].construct_mtx);
1249
1250 /* Another thread may have allocated it already while we were waiting on
1251 * the lock. We need to check this in order to avoid double-allocating.
1252 */
1253 if (dev->scratch_bos[index].initialized) {
1254 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1255 *bo = &dev->scratch_bos[index].bo;
1256 return VK_SUCCESS;
1257 }
1258
1259 unsigned bo_size = 1ull << size_log2;
1260 VkResult result = tu_bo_init_new(dev, &dev->scratch_bos[index].bo, bo_size);
1261 if (result != VK_SUCCESS) {
1262 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1263 return result;
1264 }
1265
1266 p_atomic_set(&dev->scratch_bos[index].initialized, true);
1267
1268 mtx_unlock(&dev->scratch_bos[index].construct_mtx);
1269
1270 *bo = &dev->scratch_bos[index].bo;
1271 return VK_SUCCESS;
1272 }
1273
1274 VkResult
1275 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1276 VkLayerProperties *pProperties)
1277 {
1278 *pPropertyCount = 0;
1279 return VK_SUCCESS;
1280 }
1281
1282 VkResult
1283 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1284 uint32_t *pPropertyCount,
1285 VkLayerProperties *pProperties)
1286 {
1287 *pPropertyCount = 0;
1288 return VK_SUCCESS;
1289 }
1290
1291 void
1292 tu_GetDeviceQueue2(VkDevice _device,
1293 const VkDeviceQueueInfo2 *pQueueInfo,
1294 VkQueue *pQueue)
1295 {
1296 TU_FROM_HANDLE(tu_device, device, _device);
1297 struct tu_queue *queue;
1298
1299 queue =
1300 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1301 if (pQueueInfo->flags != queue->flags) {
1302 /* From the Vulkan 1.1.70 spec:
1303 *
1304 * "The queue returned by vkGetDeviceQueue2 must have the same
1305 * flags value from this structure as that used at device
1306 * creation time in a VkDeviceQueueCreateInfo instance. If no
1307 * matching flags were specified at device creation time then
1308 * pQueue will return VK_NULL_HANDLE."
1309 */
1310 *pQueue = VK_NULL_HANDLE;
1311 return;
1312 }
1313
1314 *pQueue = tu_queue_to_handle(queue);
1315 }
1316
1317 void
1318 tu_GetDeviceQueue(VkDevice _device,
1319 uint32_t queueFamilyIndex,
1320 uint32_t queueIndex,
1321 VkQueue *pQueue)
1322 {
1323 const VkDeviceQueueInfo2 info =
1324 (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1325 .queueFamilyIndex = queueFamilyIndex,
1326 .queueIndex = queueIndex };
1327
1328 tu_GetDeviceQueue2(_device, &info, pQueue);
1329 }
1330
1331 static VkResult
1332 tu_get_semaphore_syncobjs(const VkSemaphore *sems,
1333 uint32_t sem_count,
1334 bool wait,
1335 struct drm_msm_gem_submit_syncobj **out,
1336 uint32_t *out_count)
1337 {
1338 uint32_t syncobj_count = 0;
1339 struct drm_msm_gem_submit_syncobj *syncobjs;
1340
1341 for (uint32_t i = 0; i < sem_count; ++i) {
1342 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1343
1344 struct tu_semaphore_part *part =
1345 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1346 &sem->temporary : &sem->permanent;
1347
1348 if (part->kind == TU_SEMAPHORE_SYNCOBJ)
1349 ++syncobj_count;
1350 }
1351
1352 *out = NULL;
1353 *out_count = syncobj_count;
1354 if (!syncobj_count)
1355 return VK_SUCCESS;
1356
1357 *out = syncobjs = calloc(syncobj_count, sizeof (*syncobjs));
1358 if (!syncobjs)
1359 return VK_ERROR_OUT_OF_HOST_MEMORY;
1360
1361 for (uint32_t i = 0, j = 0; i < sem_count; ++i) {
1362 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1363
1364 struct tu_semaphore_part *part =
1365 sem->temporary.kind != TU_SEMAPHORE_NONE ?
1366 &sem->temporary : &sem->permanent;
1367
1368 if (part->kind == TU_SEMAPHORE_SYNCOBJ) {
1369 syncobjs[j].handle = part->syncobj;
1370 syncobjs[j].flags = wait ? MSM_SUBMIT_SYNCOBJ_RESET : 0;
1371 ++j;
1372 }
1373 }
1374
1375 return VK_SUCCESS;
1376 }
1377
1378
1379 static void
1380 tu_semaphores_remove_temp(struct tu_device *device,
1381 const VkSemaphore *sems,
1382 uint32_t sem_count)
1383 {
1384 for (uint32_t i = 0; i < sem_count; ++i) {
1385 TU_FROM_HANDLE(tu_semaphore, sem, sems[i]);
1386 tu_semaphore_remove_temp(device, sem);
1387 }
1388 }
1389
1390 VkResult
1391 tu_QueueSubmit(VkQueue _queue,
1392 uint32_t submitCount,
1393 const VkSubmitInfo *pSubmits,
1394 VkFence _fence)
1395 {
1396 TU_FROM_HANDLE(tu_queue, queue, _queue);
1397 VkResult result;
1398
1399 for (uint32_t i = 0; i < submitCount; ++i) {
1400 const VkSubmitInfo *submit = pSubmits + i;
1401 const bool last_submit = (i == submitCount - 1);
1402 struct drm_msm_gem_submit_syncobj *in_syncobjs = NULL, *out_syncobjs = NULL;
1403 uint32_t nr_in_syncobjs, nr_out_syncobjs;
1404 struct tu_bo_list bo_list;
1405 tu_bo_list_init(&bo_list);
1406
1407 result = tu_get_semaphore_syncobjs(pSubmits[i].pWaitSemaphores,
1408 pSubmits[i].waitSemaphoreCount,
1409 false, &in_syncobjs, &nr_in_syncobjs);
1410 if (result != VK_SUCCESS) {
1411 return tu_device_set_lost(queue->device,
1412 "failed to allocate space for semaphore submission\n");
1413 }
1414
1415 result = tu_get_semaphore_syncobjs(pSubmits[i].pSignalSemaphores,
1416 pSubmits[i].signalSemaphoreCount,
1417 false, &out_syncobjs, &nr_out_syncobjs);
1418 if (result != VK_SUCCESS) {
1419 free(in_syncobjs);
1420 return tu_device_set_lost(queue->device,
1421 "failed to allocate space for semaphore submission\n");
1422 }
1423
1424 uint32_t entry_count = 0;
1425 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1426 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1427 entry_count += cmdbuf->cs.entry_count;
1428 }
1429
1430 struct drm_msm_gem_submit_cmd cmds[entry_count];
1431 uint32_t entry_idx = 0;
1432 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1433 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1434 struct tu_cs *cs = &cmdbuf->cs;
1435 for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1436 cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1437 cmds[entry_idx].submit_idx =
1438 tu_bo_list_add(&bo_list, cs->entries[i].bo,
1439 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1440 cmds[entry_idx].submit_offset = cs->entries[i].offset;
1441 cmds[entry_idx].size = cs->entries[i].size;
1442 cmds[entry_idx].pad = 0;
1443 cmds[entry_idx].nr_relocs = 0;
1444 cmds[entry_idx].relocs = 0;
1445 }
1446
1447 tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1448 }
1449
1450 uint32_t flags = MSM_PIPE_3D0;
1451 if (nr_in_syncobjs) {
1452 flags |= MSM_SUBMIT_SYNCOBJ_IN;
1453 }
1454 if (nr_out_syncobjs) {
1455 flags |= MSM_SUBMIT_SYNCOBJ_OUT;
1456 }
1457
1458 if (last_submit) {
1459 flags |= MSM_SUBMIT_FENCE_FD_OUT;
1460 }
1461
1462 struct drm_msm_gem_submit req = {
1463 .flags = flags,
1464 .queueid = queue->msm_queue_id,
1465 .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1466 .nr_bos = bo_list.count,
1467 .cmds = (uint64_t)(uintptr_t)cmds,
1468 .nr_cmds = entry_count,
1469 .in_syncobjs = (uint64_t)(uintptr_t)in_syncobjs,
1470 .out_syncobjs = (uint64_t)(uintptr_t)out_syncobjs,
1471 .nr_in_syncobjs = nr_in_syncobjs,
1472 .nr_out_syncobjs = nr_out_syncobjs,
1473 .syncobj_stride = sizeof(struct drm_msm_gem_submit_syncobj),
1474 };
1475
1476 int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1477 DRM_MSM_GEM_SUBMIT,
1478 &req, sizeof(req));
1479 if (ret) {
1480 free(in_syncobjs);
1481 free(out_syncobjs);
1482 return tu_device_set_lost(queue->device, "submit failed: %s\n",
1483 strerror(errno));
1484 }
1485
1486 tu_bo_list_destroy(&bo_list);
1487 free(in_syncobjs);
1488 free(out_syncobjs);
1489
1490 tu_semaphores_remove_temp(queue->device, pSubmits[i].pWaitSemaphores,
1491 pSubmits[i].waitSemaphoreCount);
1492 if (last_submit) {
1493 /* no need to merge fences as queue execution is serialized */
1494 tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1495 } else if (last_submit) {
1496 close(req.fence_fd);
1497 }
1498 }
1499
1500 if (_fence != VK_NULL_HANDLE) {
1501 TU_FROM_HANDLE(tu_fence, fence, _fence);
1502 tu_fence_copy(fence, &queue->submit_fence);
1503 }
1504
1505 return VK_SUCCESS;
1506 }
1507
1508 VkResult
1509 tu_QueueWaitIdle(VkQueue _queue)
1510 {
1511 TU_FROM_HANDLE(tu_queue, queue, _queue);
1512
1513 if (tu_device_is_lost(queue->device))
1514 return VK_ERROR_DEVICE_LOST;
1515
1516 tu_fence_wait_idle(&queue->submit_fence);
1517
1518 return VK_SUCCESS;
1519 }
1520
1521 VkResult
1522 tu_DeviceWaitIdle(VkDevice _device)
1523 {
1524 TU_FROM_HANDLE(tu_device, device, _device);
1525
1526 if (tu_device_is_lost(device))
1527 return VK_ERROR_DEVICE_LOST;
1528
1529 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1530 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1531 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1532 }
1533 }
1534 return VK_SUCCESS;
1535 }
1536
1537 VkResult
1538 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1539 uint32_t *pPropertyCount,
1540 VkExtensionProperties *pProperties)
1541 {
1542 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1543
1544 /* We spport no lyaers */
1545 if (pLayerName)
1546 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1547
1548 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1549 if (tu_instance_extensions_supported.extensions[i]) {
1550 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1551 }
1552 }
1553
1554 return vk_outarray_status(&out);
1555 }
1556
1557 VkResult
1558 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1559 const char *pLayerName,
1560 uint32_t *pPropertyCount,
1561 VkExtensionProperties *pProperties)
1562 {
1563 /* We spport no lyaers */
1564 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1565 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1566
1567 /* We spport no lyaers */
1568 if (pLayerName)
1569 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1570
1571 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1572 if (device->supported_extensions.extensions[i]) {
1573 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1574 }
1575 }
1576
1577 return vk_outarray_status(&out);
1578 }
1579
1580 PFN_vkVoidFunction
1581 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1582 {
1583 TU_FROM_HANDLE(tu_instance, instance, _instance);
1584
1585 return tu_lookup_entrypoint_checked(
1586 pName, instance ? instance->api_version : 0,
1587 instance ? &instance->enabled_extensions : NULL, NULL);
1588 }
1589
1590 /* The loader wants us to expose a second GetInstanceProcAddr function
1591 * to work around certain LD_PRELOAD issues seen in apps.
1592 */
1593 PUBLIC
1594 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1595 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1596
1597 PUBLIC
1598 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1599 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1600 {
1601 return tu_GetInstanceProcAddr(instance, pName);
1602 }
1603
1604 PFN_vkVoidFunction
1605 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1606 {
1607 TU_FROM_HANDLE(tu_device, device, _device);
1608
1609 return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1610 &device->instance->enabled_extensions,
1611 &device->enabled_extensions);
1612 }
1613
1614 static VkResult
1615 tu_alloc_memory(struct tu_device *device,
1616 const VkMemoryAllocateInfo *pAllocateInfo,
1617 const VkAllocationCallbacks *pAllocator,
1618 VkDeviceMemory *pMem)
1619 {
1620 struct tu_device_memory *mem;
1621 VkResult result;
1622
1623 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1624
1625 if (pAllocateInfo->allocationSize == 0) {
1626 /* Apparently, this is allowed */
1627 *pMem = VK_NULL_HANDLE;
1628 return VK_SUCCESS;
1629 }
1630
1631 mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
1632 VK_OBJECT_TYPE_DEVICE_MEMORY);
1633 if (mem == NULL)
1634 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1635
1636 const VkImportMemoryFdInfoKHR *fd_info =
1637 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1638 if (fd_info && !fd_info->handleType)
1639 fd_info = NULL;
1640
1641 if (fd_info) {
1642 assert(fd_info->handleType ==
1643 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1644 fd_info->handleType ==
1645 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1646
1647 /*
1648 * TODO Importing the same fd twice gives us the same handle without
1649 * reference counting. We need to maintain a per-instance handle-to-bo
1650 * table and add reference count to tu_bo.
1651 */
1652 result = tu_bo_init_dmabuf(device, &mem->bo,
1653 pAllocateInfo->allocationSize, fd_info->fd);
1654 if (result == VK_SUCCESS) {
1655 /* take ownership and close the fd */
1656 close(fd_info->fd);
1657 }
1658 } else {
1659 result =
1660 tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1661 }
1662
1663 if (result != VK_SUCCESS) {
1664 vk_object_free(&device->vk, pAllocator, mem);
1665 return result;
1666 }
1667
1668 mem->size = pAllocateInfo->allocationSize;
1669 mem->type_index = pAllocateInfo->memoryTypeIndex;
1670
1671 mem->map = NULL;
1672 mem->user_ptr = NULL;
1673
1674 *pMem = tu_device_memory_to_handle(mem);
1675
1676 return VK_SUCCESS;
1677 }
1678
1679 VkResult
1680 tu_AllocateMemory(VkDevice _device,
1681 const VkMemoryAllocateInfo *pAllocateInfo,
1682 const VkAllocationCallbacks *pAllocator,
1683 VkDeviceMemory *pMem)
1684 {
1685 TU_FROM_HANDLE(tu_device, device, _device);
1686 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1687 }
1688
1689 void
1690 tu_FreeMemory(VkDevice _device,
1691 VkDeviceMemory _mem,
1692 const VkAllocationCallbacks *pAllocator)
1693 {
1694 TU_FROM_HANDLE(tu_device, device, _device);
1695 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1696
1697 if (mem == NULL)
1698 return;
1699
1700 tu_bo_finish(device, &mem->bo);
1701 vk_object_free(&device->vk, pAllocator, mem);
1702 }
1703
1704 VkResult
1705 tu_MapMemory(VkDevice _device,
1706 VkDeviceMemory _memory,
1707 VkDeviceSize offset,
1708 VkDeviceSize size,
1709 VkMemoryMapFlags flags,
1710 void **ppData)
1711 {
1712 TU_FROM_HANDLE(tu_device, device, _device);
1713 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1714 VkResult result;
1715
1716 if (mem == NULL) {
1717 *ppData = NULL;
1718 return VK_SUCCESS;
1719 }
1720
1721 if (mem->user_ptr) {
1722 *ppData = mem->user_ptr;
1723 } else if (!mem->map) {
1724 result = tu_bo_map(device, &mem->bo);
1725 if (result != VK_SUCCESS)
1726 return result;
1727 *ppData = mem->map = mem->bo.map;
1728 } else
1729 *ppData = mem->map;
1730
1731 if (*ppData) {
1732 *ppData += offset;
1733 return VK_SUCCESS;
1734 }
1735
1736 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1737 }
1738
1739 void
1740 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1741 {
1742 /* I do not see any unmapping done by the freedreno Gallium driver. */
1743 }
1744
1745 VkResult
1746 tu_FlushMappedMemoryRanges(VkDevice _device,
1747 uint32_t memoryRangeCount,
1748 const VkMappedMemoryRange *pMemoryRanges)
1749 {
1750 return VK_SUCCESS;
1751 }
1752
1753 VkResult
1754 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1755 uint32_t memoryRangeCount,
1756 const VkMappedMemoryRange *pMemoryRanges)
1757 {
1758 return VK_SUCCESS;
1759 }
1760
1761 void
1762 tu_GetBufferMemoryRequirements(VkDevice _device,
1763 VkBuffer _buffer,
1764 VkMemoryRequirements *pMemoryRequirements)
1765 {
1766 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1767
1768 pMemoryRequirements->memoryTypeBits = 1;
1769 pMemoryRequirements->alignment = 64;
1770 pMemoryRequirements->size =
1771 align64(buffer->size, pMemoryRequirements->alignment);
1772 }
1773
1774 void
1775 tu_GetBufferMemoryRequirements2(
1776 VkDevice device,
1777 const VkBufferMemoryRequirementsInfo2 *pInfo,
1778 VkMemoryRequirements2 *pMemoryRequirements)
1779 {
1780 tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1781 &pMemoryRequirements->memoryRequirements);
1782 }
1783
1784 void
1785 tu_GetImageMemoryRequirements(VkDevice _device,
1786 VkImage _image,
1787 VkMemoryRequirements *pMemoryRequirements)
1788 {
1789 TU_FROM_HANDLE(tu_image, image, _image);
1790
1791 pMemoryRequirements->memoryTypeBits = 1;
1792 pMemoryRequirements->size = image->total_size;
1793 pMemoryRequirements->alignment = image->layout[0].base_align;
1794 }
1795
1796 void
1797 tu_GetImageMemoryRequirements2(VkDevice device,
1798 const VkImageMemoryRequirementsInfo2 *pInfo,
1799 VkMemoryRequirements2 *pMemoryRequirements)
1800 {
1801 tu_GetImageMemoryRequirements(device, pInfo->image,
1802 &pMemoryRequirements->memoryRequirements);
1803 }
1804
1805 void
1806 tu_GetImageSparseMemoryRequirements(
1807 VkDevice device,
1808 VkImage image,
1809 uint32_t *pSparseMemoryRequirementCount,
1810 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1811 {
1812 tu_stub();
1813 }
1814
1815 void
1816 tu_GetImageSparseMemoryRequirements2(
1817 VkDevice device,
1818 const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1819 uint32_t *pSparseMemoryRequirementCount,
1820 VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1821 {
1822 tu_stub();
1823 }
1824
1825 void
1826 tu_GetDeviceMemoryCommitment(VkDevice device,
1827 VkDeviceMemory memory,
1828 VkDeviceSize *pCommittedMemoryInBytes)
1829 {
1830 *pCommittedMemoryInBytes = 0;
1831 }
1832
1833 VkResult
1834 tu_BindBufferMemory2(VkDevice device,
1835 uint32_t bindInfoCount,
1836 const VkBindBufferMemoryInfo *pBindInfos)
1837 {
1838 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1839 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1840 TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1841
1842 if (mem) {
1843 buffer->bo = &mem->bo;
1844 buffer->bo_offset = pBindInfos[i].memoryOffset;
1845 } else {
1846 buffer->bo = NULL;
1847 }
1848 }
1849 return VK_SUCCESS;
1850 }
1851
1852 VkResult
1853 tu_BindBufferMemory(VkDevice device,
1854 VkBuffer buffer,
1855 VkDeviceMemory memory,
1856 VkDeviceSize memoryOffset)
1857 {
1858 const VkBindBufferMemoryInfo info = {
1859 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1860 .buffer = buffer,
1861 .memory = memory,
1862 .memoryOffset = memoryOffset
1863 };
1864
1865 return tu_BindBufferMemory2(device, 1, &info);
1866 }
1867
1868 VkResult
1869 tu_BindImageMemory2(VkDevice device,
1870 uint32_t bindInfoCount,
1871 const VkBindImageMemoryInfo *pBindInfos)
1872 {
1873 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1874 TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1875 TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1876
1877 if (mem) {
1878 image->bo = &mem->bo;
1879 image->bo_offset = pBindInfos[i].memoryOffset;
1880 } else {
1881 image->bo = NULL;
1882 image->bo_offset = 0;
1883 }
1884 }
1885
1886 return VK_SUCCESS;
1887 }
1888
1889 VkResult
1890 tu_BindImageMemory(VkDevice device,
1891 VkImage image,
1892 VkDeviceMemory memory,
1893 VkDeviceSize memoryOffset)
1894 {
1895 const VkBindImageMemoryInfo info = {
1896 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1897 .image = image,
1898 .memory = memory,
1899 .memoryOffset = memoryOffset
1900 };
1901
1902 return tu_BindImageMemory2(device, 1, &info);
1903 }
1904
1905 VkResult
1906 tu_QueueBindSparse(VkQueue _queue,
1907 uint32_t bindInfoCount,
1908 const VkBindSparseInfo *pBindInfo,
1909 VkFence _fence)
1910 {
1911 return VK_SUCCESS;
1912 }
1913
1914 // Queue semaphore functions
1915
1916
1917 static void
1918 tu_semaphore_part_destroy(struct tu_device *device,
1919 struct tu_semaphore_part *part)
1920 {
1921 switch(part->kind) {
1922 case TU_SEMAPHORE_NONE:
1923 break;
1924 case TU_SEMAPHORE_SYNCOBJ:
1925 drmSyncobjDestroy(device->physical_device->local_fd, part->syncobj);
1926 break;
1927 }
1928 part->kind = TU_SEMAPHORE_NONE;
1929 }
1930
1931 static void
1932 tu_semaphore_remove_temp(struct tu_device *device,
1933 struct tu_semaphore *sem)
1934 {
1935 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
1936 tu_semaphore_part_destroy(device, &sem->temporary);
1937 }
1938 }
1939
1940 VkResult
1941 tu_CreateSemaphore(VkDevice _device,
1942 const VkSemaphoreCreateInfo *pCreateInfo,
1943 const VkAllocationCallbacks *pAllocator,
1944 VkSemaphore *pSemaphore)
1945 {
1946 TU_FROM_HANDLE(tu_device, device, _device);
1947
1948 struct tu_semaphore *sem =
1949 vk_object_alloc(&device->vk, pAllocator, sizeof(*sem),
1950 VK_OBJECT_TYPE_SEMAPHORE);
1951 if (!sem)
1952 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1953
1954 const VkExportSemaphoreCreateInfo *export =
1955 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
1956 VkExternalSemaphoreHandleTypeFlags handleTypes =
1957 export ? export->handleTypes : 0;
1958
1959 sem->permanent.kind = TU_SEMAPHORE_NONE;
1960 sem->temporary.kind = TU_SEMAPHORE_NONE;
1961
1962 if (handleTypes) {
1963 if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
1964 vk_free2(&device->vk.alloc, pAllocator, sem);
1965 return VK_ERROR_OUT_OF_HOST_MEMORY;
1966 }
1967 sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
1968 }
1969 *pSemaphore = tu_semaphore_to_handle(sem);
1970 return VK_SUCCESS;
1971 }
1972
1973 void
1974 tu_DestroySemaphore(VkDevice _device,
1975 VkSemaphore _semaphore,
1976 const VkAllocationCallbacks *pAllocator)
1977 {
1978 TU_FROM_HANDLE(tu_device, device, _device);
1979 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1980 if (!_semaphore)
1981 return;
1982
1983 tu_semaphore_part_destroy(device, &sem->permanent);
1984 tu_semaphore_part_destroy(device, &sem->temporary);
1985
1986 vk_object_free(&device->vk, pAllocator, sem);
1987 }
1988
1989 VkResult
1990 tu_CreateEvent(VkDevice _device,
1991 const VkEventCreateInfo *pCreateInfo,
1992 const VkAllocationCallbacks *pAllocator,
1993 VkEvent *pEvent)
1994 {
1995 TU_FROM_HANDLE(tu_device, device, _device);
1996
1997 struct tu_event *event =
1998 vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
1999 VK_OBJECT_TYPE_EVENT);
2000 if (!event)
2001 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2002
2003 VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
2004 if (result != VK_SUCCESS)
2005 goto fail_alloc;
2006
2007 result = tu_bo_map(device, &event->bo);
2008 if (result != VK_SUCCESS)
2009 goto fail_map;
2010
2011 *pEvent = tu_event_to_handle(event);
2012
2013 return VK_SUCCESS;
2014
2015 fail_map:
2016 tu_bo_finish(device, &event->bo);
2017 fail_alloc:
2018 vk_object_free(&device->vk, pAllocator, event);
2019 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2020 }
2021
2022 void
2023 tu_DestroyEvent(VkDevice _device,
2024 VkEvent _event,
2025 const VkAllocationCallbacks *pAllocator)
2026 {
2027 TU_FROM_HANDLE(tu_device, device, _device);
2028 TU_FROM_HANDLE(tu_event, event, _event);
2029
2030 if (!event)
2031 return;
2032
2033 tu_bo_finish(device, &event->bo);
2034 vk_object_free(&device->vk, pAllocator, event);
2035 }
2036
2037 VkResult
2038 tu_GetEventStatus(VkDevice _device, VkEvent _event)
2039 {
2040 TU_FROM_HANDLE(tu_event, event, _event);
2041
2042 if (*(uint64_t*) event->bo.map == 1)
2043 return VK_EVENT_SET;
2044 return VK_EVENT_RESET;
2045 }
2046
2047 VkResult
2048 tu_SetEvent(VkDevice _device, VkEvent _event)
2049 {
2050 TU_FROM_HANDLE(tu_event, event, _event);
2051 *(uint64_t*) event->bo.map = 1;
2052
2053 return VK_SUCCESS;
2054 }
2055
2056 VkResult
2057 tu_ResetEvent(VkDevice _device, VkEvent _event)
2058 {
2059 TU_FROM_HANDLE(tu_event, event, _event);
2060 *(uint64_t*) event->bo.map = 0;
2061
2062 return VK_SUCCESS;
2063 }
2064
2065 VkResult
2066 tu_CreateBuffer(VkDevice _device,
2067 const VkBufferCreateInfo *pCreateInfo,
2068 const VkAllocationCallbacks *pAllocator,
2069 VkBuffer *pBuffer)
2070 {
2071 TU_FROM_HANDLE(tu_device, device, _device);
2072 struct tu_buffer *buffer;
2073
2074 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
2075
2076 buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
2077 VK_OBJECT_TYPE_BUFFER);
2078 if (buffer == NULL)
2079 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2080
2081 buffer->size = pCreateInfo->size;
2082 buffer->usage = pCreateInfo->usage;
2083 buffer->flags = pCreateInfo->flags;
2084
2085 *pBuffer = tu_buffer_to_handle(buffer);
2086
2087 return VK_SUCCESS;
2088 }
2089
2090 void
2091 tu_DestroyBuffer(VkDevice _device,
2092 VkBuffer _buffer,
2093 const VkAllocationCallbacks *pAllocator)
2094 {
2095 TU_FROM_HANDLE(tu_device, device, _device);
2096 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
2097
2098 if (!buffer)
2099 return;
2100
2101 vk_object_free(&device->vk, pAllocator, buffer);
2102 }
2103
2104 VkResult
2105 tu_CreateFramebuffer(VkDevice _device,
2106 const VkFramebufferCreateInfo *pCreateInfo,
2107 const VkAllocationCallbacks *pAllocator,
2108 VkFramebuffer *pFramebuffer)
2109 {
2110 TU_FROM_HANDLE(tu_device, device, _device);
2111 TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
2112 struct tu_framebuffer *framebuffer;
2113
2114 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2115
2116 size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
2117 pCreateInfo->attachmentCount;
2118 framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
2119 VK_OBJECT_TYPE_FRAMEBUFFER);
2120 if (framebuffer == NULL)
2121 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2122
2123 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2124 framebuffer->width = pCreateInfo->width;
2125 framebuffer->height = pCreateInfo->height;
2126 framebuffer->layers = pCreateInfo->layers;
2127 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2128 VkImageView _iview = pCreateInfo->pAttachments[i];
2129 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
2130 framebuffer->attachments[i].attachment = iview;
2131 }
2132
2133 tu_framebuffer_tiling_config(framebuffer, device, pass);
2134
2135 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
2136 return VK_SUCCESS;
2137 }
2138
2139 void
2140 tu_DestroyFramebuffer(VkDevice _device,
2141 VkFramebuffer _fb,
2142 const VkAllocationCallbacks *pAllocator)
2143 {
2144 TU_FROM_HANDLE(tu_device, device, _device);
2145 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
2146
2147 if (!fb)
2148 return;
2149
2150 vk_object_free(&device->vk, pAllocator, fb);
2151 }
2152
2153 static void
2154 tu_init_sampler(struct tu_device *device,
2155 struct tu_sampler *sampler,
2156 const VkSamplerCreateInfo *pCreateInfo)
2157 {
2158 const struct VkSamplerReductionModeCreateInfo *reduction =
2159 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
2160 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
2161 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
2162
2163 unsigned aniso = pCreateInfo->anisotropyEnable ?
2164 util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
2165 bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
2166 float min_lod = CLAMP(pCreateInfo->minLod, 0.0f, 4095.0f / 256.0f);
2167 float max_lod = CLAMP(pCreateInfo->maxLod, 0.0f, 4095.0f / 256.0f);
2168
2169 sampler->descriptor[0] =
2170 COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
2171 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
2172 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
2173 A6XX_TEX_SAMP_0_ANISO(aniso) |
2174 A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
2175 A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
2176 A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
2177 A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
2178 sampler->descriptor[1] =
2179 /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
2180 COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
2181 A6XX_TEX_SAMP_1_MIN_LOD(min_lod) |
2182 A6XX_TEX_SAMP_1_MAX_LOD(max_lod) |
2183 COND(pCreateInfo->compareEnable,
2184 A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
2185 /* This is an offset into the border_color BO, which we fill with all the
2186 * possible Vulkan border colors in the correct order, so we can just use
2187 * the Vulkan enum with no translation necessary.
2188 */
2189 sampler->descriptor[2] =
2190 A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
2191 sizeof(struct bcolor_entry));
2192 sampler->descriptor[3] = 0;
2193
2194 if (reduction) {
2195 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(
2196 tu6_reduction_mode(reduction->reductionMode));
2197 }
2198
2199 sampler->ycbcr_sampler = ycbcr_conversion ?
2200 tu_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
2201
2202 if (sampler->ycbcr_sampler &&
2203 sampler->ycbcr_sampler->chroma_filter == VK_FILTER_LINEAR) {
2204 sampler->descriptor[2] |= A6XX_TEX_SAMP_2_CHROMA_LINEAR;
2205 }
2206
2207 /* TODO:
2208 * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
2209 */
2210 }
2211
2212 VkResult
2213 tu_CreateSampler(VkDevice _device,
2214 const VkSamplerCreateInfo *pCreateInfo,
2215 const VkAllocationCallbacks *pAllocator,
2216 VkSampler *pSampler)
2217 {
2218 TU_FROM_HANDLE(tu_device, device, _device);
2219 struct tu_sampler *sampler;
2220
2221 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
2222
2223 sampler = vk_object_alloc(&device->vk, pAllocator, sizeof(*sampler),
2224 VK_OBJECT_TYPE_SAMPLER);
2225 if (!sampler)
2226 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2227
2228 tu_init_sampler(device, sampler, pCreateInfo);
2229 *pSampler = tu_sampler_to_handle(sampler);
2230
2231 return VK_SUCCESS;
2232 }
2233
2234 void
2235 tu_DestroySampler(VkDevice _device,
2236 VkSampler _sampler,
2237 const VkAllocationCallbacks *pAllocator)
2238 {
2239 TU_FROM_HANDLE(tu_device, device, _device);
2240 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
2241
2242 if (!sampler)
2243 return;
2244
2245 vk_object_free(&device->vk, pAllocator, sampler);
2246 }
2247
2248 /* vk_icd.h does not declare this function, so we declare it here to
2249 * suppress Wmissing-prototypes.
2250 */
2251 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2252 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
2253
2254 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
2255 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
2256 {
2257 /* For the full details on loader interface versioning, see
2258 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
2259 * What follows is a condensed summary, to help you navigate the large and
2260 * confusing official doc.
2261 *
2262 * - Loader interface v0 is incompatible with later versions. We don't
2263 * support it.
2264 *
2265 * - In loader interface v1:
2266 * - The first ICD entrypoint called by the loader is
2267 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
2268 * entrypoint.
2269 * - The ICD must statically expose no other Vulkan symbol unless it
2270 * is linked with -Bsymbolic.
2271 * - Each dispatchable Vulkan handle created by the ICD must be
2272 * a pointer to a struct whose first member is VK_LOADER_DATA. The
2273 * ICD must initialize VK_LOADER_DATA.loadMagic to
2274 * ICD_LOADER_MAGIC.
2275 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
2276 * vkDestroySurfaceKHR(). The ICD must be capable of working with
2277 * such loader-managed surfaces.
2278 *
2279 * - Loader interface v2 differs from v1 in:
2280 * - The first ICD entrypoint called by the loader is
2281 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
2282 * statically expose this entrypoint.
2283 *
2284 * - Loader interface v3 differs from v2 in:
2285 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
2286 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
2287 * because the loader no longer does so.
2288 */
2289 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
2290 return VK_SUCCESS;
2291 }
2292
2293 VkResult
2294 tu_GetMemoryFdKHR(VkDevice _device,
2295 const VkMemoryGetFdInfoKHR *pGetFdInfo,
2296 int *pFd)
2297 {
2298 TU_FROM_HANDLE(tu_device, device, _device);
2299 TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
2300
2301 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
2302
2303 /* At the moment, we support only the below handle types. */
2304 assert(pGetFdInfo->handleType ==
2305 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
2306 pGetFdInfo->handleType ==
2307 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2308
2309 int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
2310 if (prime_fd < 0)
2311 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2312
2313 *pFd = prime_fd;
2314 return VK_SUCCESS;
2315 }
2316
2317 VkResult
2318 tu_GetMemoryFdPropertiesKHR(VkDevice _device,
2319 VkExternalMemoryHandleTypeFlagBits handleType,
2320 int fd,
2321 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
2322 {
2323 assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
2324 pMemoryFdProperties->memoryTypeBits = 1;
2325 return VK_SUCCESS;
2326 }
2327
2328 VkResult
2329 tu_ImportFenceFdKHR(VkDevice _device,
2330 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
2331 {
2332 tu_stub();
2333
2334 return VK_SUCCESS;
2335 }
2336
2337 VkResult
2338 tu_GetFenceFdKHR(VkDevice _device,
2339 const VkFenceGetFdInfoKHR *pGetFdInfo,
2340 int *pFd)
2341 {
2342 tu_stub();
2343
2344 return VK_SUCCESS;
2345 }
2346
2347 VkResult
2348 tu_ImportSemaphoreFdKHR(VkDevice _device,
2349 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
2350 {
2351 TU_FROM_HANDLE(tu_device, device, _device);
2352 TU_FROM_HANDLE(tu_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
2353 int ret;
2354 struct tu_semaphore_part *dst = NULL;
2355
2356 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2357 dst = &sem->temporary;
2358 } else {
2359 dst = &sem->permanent;
2360 }
2361
2362 uint32_t syncobj = dst->kind == TU_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
2363
2364 switch(pImportSemaphoreFdInfo->handleType) {
2365 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: {
2366 uint32_t old_syncobj = syncobj;
2367 ret = drmSyncobjFDToHandle(device->physical_device->local_fd, pImportSemaphoreFdInfo->fd, &syncobj);
2368 if (ret == 0) {
2369 close(pImportSemaphoreFdInfo->fd);
2370 if (old_syncobj)
2371 drmSyncobjDestroy(device->physical_device->local_fd, old_syncobj);
2372 }
2373 break;
2374 }
2375 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: {
2376 if (!syncobj) {
2377 ret = drmSyncobjCreate(device->physical_device->local_fd, 0, &syncobj);
2378 if (ret)
2379 break;
2380 }
2381 if (pImportSemaphoreFdInfo->fd == -1) {
2382 ret = drmSyncobjSignal(device->physical_device->local_fd, &syncobj, 1);
2383 } else {
2384 ret = drmSyncobjImportSyncFile(device->physical_device->local_fd, syncobj, pImportSemaphoreFdInfo->fd);
2385 }
2386 if (!ret)
2387 close(pImportSemaphoreFdInfo->fd);
2388 break;
2389 }
2390 default:
2391 unreachable("Unhandled semaphore handle type");
2392 }
2393
2394 if (ret) {
2395 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2396 }
2397 dst->syncobj = syncobj;
2398 dst->kind = TU_SEMAPHORE_SYNCOBJ;
2399
2400 return VK_SUCCESS;
2401 }
2402
2403 VkResult
2404 tu_GetSemaphoreFdKHR(VkDevice _device,
2405 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
2406 int *pFd)
2407 {
2408 TU_FROM_HANDLE(tu_device, device, _device);
2409 TU_FROM_HANDLE(tu_semaphore, sem, pGetFdInfo->semaphore);
2410 int ret;
2411 uint32_t syncobj_handle;
2412
2413 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2414 assert(sem->temporary.kind == TU_SEMAPHORE_SYNCOBJ);
2415 syncobj_handle = sem->temporary.syncobj;
2416 } else {
2417 assert(sem->permanent.kind == TU_SEMAPHORE_SYNCOBJ);
2418 syncobj_handle = sem->permanent.syncobj;
2419 }
2420
2421 switch(pGetFdInfo->handleType) {
2422 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
2423 ret = drmSyncobjHandleToFD(device->physical_device->local_fd, syncobj_handle, pFd);
2424 break;
2425 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2426 ret = drmSyncobjExportSyncFile(device->physical_device->local_fd, syncobj_handle, pFd);
2427 if (!ret) {
2428 if (sem->temporary.kind != TU_SEMAPHORE_NONE) {
2429 tu_semaphore_part_destroy(device, &sem->temporary);
2430 } else {
2431 drmSyncobjReset(device->physical_device->local_fd, &syncobj_handle, 1);
2432 }
2433 }
2434 break;
2435 default:
2436 unreachable("Unhandled semaphore handle type");
2437 }
2438
2439 if (ret)
2440 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
2441 return VK_SUCCESS;
2442 }
2443
2444
2445 static bool tu_has_syncobj(struct tu_physical_device *pdev)
2446 {
2447 uint64_t value;
2448 if (drmGetCap(pdev->local_fd, DRM_CAP_SYNCOBJ, &value))
2449 return false;
2450 return value && pdev->msm_major_version == 1 && pdev->msm_minor_version >= 6;
2451 }
2452
2453 void
2454 tu_GetPhysicalDeviceExternalSemaphoreProperties(
2455 VkPhysicalDevice physicalDevice,
2456 const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2457 VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2458 {
2459 TU_FROM_HANDLE(tu_physical_device, pdev, physicalDevice);
2460
2461 if (tu_has_syncobj(pdev) &&
2462 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT ||
2463 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
2464 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2465 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
2466 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
2467 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
2468 } else {
2469 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2470 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2471 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2472 }
2473 }
2474
2475 void
2476 tu_GetPhysicalDeviceExternalFenceProperties(
2477 VkPhysicalDevice physicalDevice,
2478 const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2479 VkExternalFenceProperties *pExternalFenceProperties)
2480 {
2481 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2482 pExternalFenceProperties->compatibleHandleTypes = 0;
2483 pExternalFenceProperties->externalFenceFeatures = 0;
2484 }
2485
2486 VkResult
2487 tu_CreateDebugReportCallbackEXT(
2488 VkInstance _instance,
2489 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2490 const VkAllocationCallbacks *pAllocator,
2491 VkDebugReportCallbackEXT *pCallback)
2492 {
2493 TU_FROM_HANDLE(tu_instance, instance, _instance);
2494 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2495 pCreateInfo, pAllocator,
2496 &instance->alloc, pCallback);
2497 }
2498
2499 void
2500 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2501 VkDebugReportCallbackEXT _callback,
2502 const VkAllocationCallbacks *pAllocator)
2503 {
2504 TU_FROM_HANDLE(tu_instance, instance, _instance);
2505 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2506 _callback, pAllocator, &instance->alloc);
2507 }
2508
2509 void
2510 tu_DebugReportMessageEXT(VkInstance _instance,
2511 VkDebugReportFlagsEXT flags,
2512 VkDebugReportObjectTypeEXT objectType,
2513 uint64_t object,
2514 size_t location,
2515 int32_t messageCode,
2516 const char *pLayerPrefix,
2517 const char *pMessage)
2518 {
2519 TU_FROM_HANDLE(tu_instance, instance, _instance);
2520 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2521 object, location, messageCode, pLayerPrefix, pMessage);
2522 }
2523
2524 void
2525 tu_GetDeviceGroupPeerMemoryFeatures(
2526 VkDevice device,
2527 uint32_t heapIndex,
2528 uint32_t localDeviceIndex,
2529 uint32_t remoteDeviceIndex,
2530 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2531 {
2532 assert(localDeviceIndex == remoteDeviceIndex);
2533
2534 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2535 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2536 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2537 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2538 }
2539
2540 void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
2541 VkPhysicalDevice physicalDevice,
2542 VkSampleCountFlagBits samples,
2543 VkMultisamplePropertiesEXT* pMultisampleProperties)
2544 {
2545 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
2546
2547 if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
2548 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
2549 else
2550 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
2551 }
2552
2553
2554 VkResult
2555 tu_CreatePrivateDataSlotEXT(VkDevice _device,
2556 const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
2557 const VkAllocationCallbacks* pAllocator,
2558 VkPrivateDataSlotEXT* pPrivateDataSlot)
2559 {
2560 TU_FROM_HANDLE(tu_device, device, _device);
2561 return vk_private_data_slot_create(&device->vk,
2562 pCreateInfo,
2563 pAllocator,
2564 pPrivateDataSlot);
2565 }
2566
2567 void
2568 tu_DestroyPrivateDataSlotEXT(VkDevice _device,
2569 VkPrivateDataSlotEXT privateDataSlot,
2570 const VkAllocationCallbacks* pAllocator)
2571 {
2572 TU_FROM_HANDLE(tu_device, device, _device);
2573 vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
2574 }
2575
2576 VkResult
2577 tu_SetPrivateDataEXT(VkDevice _device,
2578 VkObjectType objectType,
2579 uint64_t objectHandle,
2580 VkPrivateDataSlotEXT privateDataSlot,
2581 uint64_t data)
2582 {
2583 TU_FROM_HANDLE(tu_device, device, _device);
2584 return vk_object_base_set_private_data(&device->vk,
2585 objectType,
2586 objectHandle,
2587 privateDataSlot,
2588 data);
2589 }
2590
2591 void
2592 tu_GetPrivateDataEXT(VkDevice _device,
2593 VkObjectType objectType,
2594 uint64_t objectHandle,
2595 VkPrivateDataSlotEXT privateDataSlot,
2596 uint64_t* pData)
2597 {
2598 TU_FROM_HANDLE(tu_device, device, _device);
2599 vk_object_base_get_private_data(&device->vk,
2600 objectType,
2601 objectHandle,
2602 privateDataSlot,
2603 pData);
2604 }