turnip: Fix up detection of device.
[mesa.git] / src / freedreno / vulkan / tu_device.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29 #include "util/debug.h"
30 #include "util/disk_cache.h"
31 #include "util/strtod.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include <fcntl.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <xf86drm.h>
39
40 static int
41 tu_device_get_cache_uuid(uint16_t family, void *uuid)
42 {
43 uint32_t mesa_timestamp;
44 uint16_t f = family;
45 memset(uuid, 0, VK_UUID_SIZE);
46 if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
47 &mesa_timestamp))
48 return -1;
49
50 memcpy(uuid, &mesa_timestamp, 4);
51 memcpy((char *)uuid + 4, &f, 2);
52 snprintf((char *)uuid + 6, VK_UUID_SIZE - 10, "tu");
53 return 0;
54 }
55
56 static void
57 tu_get_driver_uuid(void *uuid)
58 {
59 memset(uuid, 0, VK_UUID_SIZE);
60 }
61
62 static void
63 tu_get_device_uuid(void *uuid)
64 {
65 stub();
66 }
67
68 static VkResult
69 tu_physical_device_init(struct tu_physical_device *device,
70 struct tu_instance *instance,
71 drmDevicePtr drm_device)
72 {
73 const char *path = drm_device->nodes[DRM_NODE_RENDER];
74 VkResult result;
75 drmVersionPtr version;
76 int fd;
77 int master_fd = -1;
78
79 fd = open(path, O_RDWR | O_CLOEXEC);
80 if (fd < 0) {
81 if (instance->debug_flags & TU_DEBUG_STARTUP)
82 tu_logi("Could not open device '%s'", path);
83
84 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
85 }
86
87 version = drmGetVersion(fd);
88 if (!version) {
89 close(fd);
90
91 if (instance->debug_flags & TU_DEBUG_STARTUP)
92 tu_logi("Could not get the kernel driver version for device '%s'",
93 path);
94
95 return vk_errorf(instance,
96 VK_ERROR_INCOMPATIBLE_DRIVER,
97 "failed to get version %s: %m",
98 path);
99 }
100
101 if (strcmp(version->name, "msm")) {
102 drmFreeVersion(version);
103 if (master_fd != -1)
104 close(master_fd);
105 close(fd);
106
107 if (instance->debug_flags & TU_DEBUG_STARTUP)
108 tu_logi("Device '%s' is not using the msm kernel driver.", path);
109
110 return VK_ERROR_INCOMPATIBLE_DRIVER;
111 }
112 drmFreeVersion(version);
113
114 if (instance->debug_flags & TU_DEBUG_STARTUP)
115 tu_logi("Found compatible device '%s'.", path);
116
117 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
118 device->instance = instance;
119 assert(strlen(path) < ARRAY_SIZE(device->path));
120 strncpy(device->path, path, ARRAY_SIZE(device->path));
121
122 if (instance->enabled_extensions.KHR_display) {
123 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
124 if (master_fd >= 0) {
125 /* TODO: free master_fd is accel is not working? */
126 abort();
127 }
128 }
129
130 device->master_fd = master_fd;
131 device->local_fd = fd;
132
133 if (tu_device_get_cache_uuid(0 /* TODO */, device->cache_uuid)) {
134 result = vk_errorf(
135 instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
136 goto fail;
137 }
138
139 /* The gpu id is already embedded in the uuid so we just pass "tu"
140 * when creating the cache.
141 */
142 char buf[VK_UUID_SIZE * 2 + 1];
143 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
144 device->disk_cache = disk_cache_create(device->name, buf, 0);
145
146 fprintf(stderr,
147 "WARNING: tu is not a conformant vulkan implementation, "
148 "testing use only.\n");
149
150 tu_get_driver_uuid(&device->device_uuid);
151 tu_get_device_uuid(&device->device_uuid);
152
153 tu_fill_device_extension_table(device, &device->supported_extensions);
154
155 if (result != VK_SUCCESS) {
156 vk_error(instance, result);
157 goto fail;
158 }
159
160 return VK_SUCCESS;
161
162 fail:
163 close(fd);
164 if (master_fd != -1)
165 close(master_fd);
166 return result;
167 }
168
169 static void
170 tu_physical_device_finish(struct tu_physical_device *device)
171 {
172 disk_cache_destroy(device->disk_cache);
173 close(device->local_fd);
174 if (device->master_fd != -1)
175 close(device->master_fd);
176 }
177
178 static void *
179 default_alloc_func(void *pUserData,
180 size_t size,
181 size_t align,
182 VkSystemAllocationScope allocationScope)
183 {
184 return malloc(size);
185 }
186
187 static void *
188 default_realloc_func(void *pUserData,
189 void *pOriginal,
190 size_t size,
191 size_t align,
192 VkSystemAllocationScope allocationScope)
193 {
194 return realloc(pOriginal, size);
195 }
196
197 static void
198 default_free_func(void *pUserData, void *pMemory)
199 {
200 free(pMemory);
201 }
202
203 static const VkAllocationCallbacks default_alloc = {
204 .pUserData = NULL,
205 .pfnAllocation = default_alloc_func,
206 .pfnReallocation = default_realloc_func,
207 .pfnFree = default_free_func,
208 };
209
210 static const struct debug_control tu_debug_options[] = { { "startup",
211 TU_DEBUG_STARTUP },
212 { NULL, 0 } };
213
214 const char *
215 tu_get_debug_option_name(int id)
216 {
217 assert(id < ARRAY_SIZE(tu_debug_options) - 1);
218 return tu_debug_options[id].string;
219 }
220
221 static int
222 tu_get_instance_extension_index(const char *name)
223 {
224 for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
225 if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
226 return i;
227 }
228 return -1;
229 }
230
231 VkResult
232 tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
233 const VkAllocationCallbacks *pAllocator,
234 VkInstance *pInstance)
235 {
236 struct tu_instance *instance;
237 VkResult result;
238
239 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
240
241 uint32_t client_version;
242 if (pCreateInfo->pApplicationInfo &&
243 pCreateInfo->pApplicationInfo->apiVersion != 0) {
244 client_version = pCreateInfo->pApplicationInfo->apiVersion;
245 } else {
246 tu_EnumerateInstanceVersion(&client_version);
247 }
248
249 instance = vk_zalloc2(&default_alloc,
250 pAllocator,
251 sizeof(*instance),
252 8,
253 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
254 if (!instance)
255 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
256
257 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
258
259 if (pAllocator)
260 instance->alloc = *pAllocator;
261 else
262 instance->alloc = default_alloc;
263
264 instance->api_version = client_version;
265 instance->physical_device_count = -1;
266
267 instance->debug_flags =
268 parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
269
270 if (instance->debug_flags & TU_DEBUG_STARTUP)
271 tu_logi("Created an instance");
272
273 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
274 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
275 int index = tu_get_instance_extension_index(ext_name);
276
277 if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
278 vk_free2(&default_alloc, pAllocator, instance);
279 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
280 }
281
282 instance->enabled_extensions.extensions[index] = true;
283 }
284
285 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
286 if (result != VK_SUCCESS) {
287 vk_free2(&default_alloc, pAllocator, instance);
288 return vk_error(instance, result);
289 }
290
291 _mesa_locale_init();
292
293 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
294
295 *pInstance = tu_instance_to_handle(instance);
296
297 return VK_SUCCESS;
298 }
299
300 void
301 tu_DestroyInstance(VkInstance _instance,
302 const VkAllocationCallbacks *pAllocator)
303 {
304 TU_FROM_HANDLE(tu_instance, instance, _instance);
305
306 if (!instance)
307 return;
308
309 for (int i = 0; i < instance->physical_device_count; ++i) {
310 tu_physical_device_finish(instance->physical_devices + i);
311 }
312
313 VG(VALGRIND_DESTROY_MEMPOOL(instance));
314
315 _mesa_locale_fini();
316
317 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
318
319 vk_free(&instance->alloc, instance);
320 }
321
322 static VkResult
323 tu_enumerate_devices(struct tu_instance *instance)
324 {
325 /* TODO: Check for more devices ? */
326 drmDevicePtr devices[8];
327 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
328 int max_devices;
329
330 instance->physical_device_count = 0;
331
332 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
333
334 if (instance->debug_flags & TU_DEBUG_STARTUP)
335 tu_logi("Found %d drm nodes", max_devices);
336
337 if (max_devices < 1)
338 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
339
340 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
341 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
342 devices[i]->bustype == DRM_BUS_PLATFORM) {
343
344 result = tu_physical_device_init(instance->physical_devices +
345 instance->physical_device_count,
346 instance,
347 devices[i]);
348 if (result == VK_SUCCESS)
349 ++instance->physical_device_count;
350 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
351 break;
352 }
353 }
354 drmFreeDevices(devices, max_devices);
355
356 return result;
357 }
358
359 VkResult
360 tu_EnumeratePhysicalDevices(VkInstance _instance,
361 uint32_t *pPhysicalDeviceCount,
362 VkPhysicalDevice *pPhysicalDevices)
363 {
364 TU_FROM_HANDLE(tu_instance, instance, _instance);
365 VkResult result;
366
367 if (instance->physical_device_count < 0) {
368 result = tu_enumerate_devices(instance);
369 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
370 return result;
371 }
372
373 if (!pPhysicalDevices) {
374 *pPhysicalDeviceCount = instance->physical_device_count;
375 } else {
376 *pPhysicalDeviceCount =
377 MIN2(*pPhysicalDeviceCount, instance->physical_device_count);
378 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
379 pPhysicalDevices[i] =
380 tu_physical_device_to_handle(instance->physical_devices + i);
381 }
382
383 return *pPhysicalDeviceCount < instance->physical_device_count
384 ? VK_INCOMPLETE
385 : VK_SUCCESS;
386 }
387
388 VkResult
389 tu_EnumeratePhysicalDeviceGroups(
390 VkInstance _instance,
391 uint32_t *pPhysicalDeviceGroupCount,
392 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
393 {
394 TU_FROM_HANDLE(tu_instance, instance, _instance);
395 VkResult result;
396
397 if (instance->physical_device_count < 0) {
398 result = tu_enumerate_devices(instance);
399 if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
400 return result;
401 }
402
403 if (!pPhysicalDeviceGroupProperties) {
404 *pPhysicalDeviceGroupCount = instance->physical_device_count;
405 } else {
406 *pPhysicalDeviceGroupCount =
407 MIN2(*pPhysicalDeviceGroupCount, instance->physical_device_count);
408 for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
409 pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
410 pPhysicalDeviceGroupProperties[i].physicalDevices[0] =
411 tu_physical_device_to_handle(instance->physical_devices + i);
412 pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
413 }
414 }
415 return *pPhysicalDeviceGroupCount < instance->physical_device_count
416 ? VK_INCOMPLETE
417 : VK_SUCCESS;
418 }
419
420 void
421 tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
422 VkPhysicalDeviceFeatures *pFeatures)
423 {
424 memset(pFeatures, 0, sizeof(*pFeatures));
425
426 *pFeatures = (VkPhysicalDeviceFeatures){
427 .robustBufferAccess = false,
428 .fullDrawIndexUint32 = false,
429 .imageCubeArray = false,
430 .independentBlend = false,
431 .geometryShader = false,
432 .tessellationShader = false,
433 .sampleRateShading = false,
434 .dualSrcBlend = false,
435 .logicOp = false,
436 .multiDrawIndirect = false,
437 .drawIndirectFirstInstance = false,
438 .depthClamp = false,
439 .depthBiasClamp = false,
440 .fillModeNonSolid = false,
441 .depthBounds = false,
442 .wideLines = false,
443 .largePoints = false,
444 .alphaToOne = false,
445 .multiViewport = false,
446 .samplerAnisotropy = false,
447 .textureCompressionETC2 = false,
448 .textureCompressionASTC_LDR = false,
449 .textureCompressionBC = false,
450 .occlusionQueryPrecise = false,
451 .pipelineStatisticsQuery = false,
452 .vertexPipelineStoresAndAtomics = false,
453 .fragmentStoresAndAtomics = false,
454 .shaderTessellationAndGeometryPointSize = false,
455 .shaderImageGatherExtended = false,
456 .shaderStorageImageExtendedFormats = false,
457 .shaderStorageImageMultisample = false,
458 .shaderUniformBufferArrayDynamicIndexing = false,
459 .shaderSampledImageArrayDynamicIndexing = false,
460 .shaderStorageBufferArrayDynamicIndexing = false,
461 .shaderStorageImageArrayDynamicIndexing = false,
462 .shaderStorageImageReadWithoutFormat = false,
463 .shaderStorageImageWriteWithoutFormat = false,
464 .shaderClipDistance = false,
465 .shaderCullDistance = false,
466 .shaderFloat64 = false,
467 .shaderInt64 = false,
468 .shaderInt16 = false,
469 .sparseBinding = false,
470 .variableMultisampleRate = false,
471 .inheritedQueries = false,
472 };
473 }
474
475 void
476 tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
477 VkPhysicalDeviceFeatures2KHR *pFeatures)
478 {
479 vk_foreach_struct(ext, pFeatures->pNext)
480 {
481 switch (ext->sType) {
482 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
483 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
484 features->variablePointersStorageBuffer = true;
485 features->variablePointers = false;
486 break;
487 }
488 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
489 VkPhysicalDeviceMultiviewFeaturesKHR *features =
490 (VkPhysicalDeviceMultiviewFeaturesKHR *)ext;
491 features->multiview = true;
492 features->multiviewGeometryShader = true;
493 features->multiviewTessellationShader = true;
494 break;
495 }
496 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
497 VkPhysicalDeviceShaderDrawParameterFeatures *features =
498 (VkPhysicalDeviceShaderDrawParameterFeatures *)ext;
499 features->shaderDrawParameters = true;
500 break;
501 }
502 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
503 VkPhysicalDeviceProtectedMemoryFeatures *features =
504 (VkPhysicalDeviceProtectedMemoryFeatures *)ext;
505 features->protectedMemory = false;
506 break;
507 }
508 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
509 VkPhysicalDevice16BitStorageFeatures *features =
510 (VkPhysicalDevice16BitStorageFeatures *)ext;
511 features->storageBuffer16BitAccess = false;
512 features->uniformAndStorageBuffer16BitAccess = false;
513 features->storagePushConstant16 = false;
514 features->storageInputOutput16 = false;
515 break;
516 }
517 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
518 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
519 (VkPhysicalDeviceSamplerYcbcrConversionFeatures *)ext;
520 features->samplerYcbcrConversion = false;
521 break;
522 }
523 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
524 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
525 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *)ext;
526 features->shaderInputAttachmentArrayDynamicIndexing = true;
527 features->shaderUniformTexelBufferArrayDynamicIndexing = true;
528 features->shaderStorageTexelBufferArrayDynamicIndexing = true;
529 features->shaderUniformBufferArrayNonUniformIndexing = false;
530 features->shaderSampledImageArrayNonUniformIndexing = false;
531 features->shaderStorageBufferArrayNonUniformIndexing = false;
532 features->shaderStorageImageArrayNonUniformIndexing = false;
533 features->shaderInputAttachmentArrayNonUniformIndexing = false;
534 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
535 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
536 features->descriptorBindingUniformBufferUpdateAfterBind = true;
537 features->descriptorBindingSampledImageUpdateAfterBind = true;
538 features->descriptorBindingStorageImageUpdateAfterBind = true;
539 features->descriptorBindingStorageBufferUpdateAfterBind = true;
540 features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
541 features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
542 features->descriptorBindingUpdateUnusedWhilePending = true;
543 features->descriptorBindingPartiallyBound = true;
544 features->descriptorBindingVariableDescriptorCount = true;
545 features->runtimeDescriptorArray = true;
546 break;
547 }
548 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
549 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
550 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
551 features->conditionalRendering = true;
552 features->inheritedConditionalRendering = false;
553 break;
554 }
555 default:
556 break;
557 }
558 }
559 return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
560 }
561
562 void
563 tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
564 VkPhysicalDeviceProperties *pProperties)
565 {
566 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
567 VkSampleCountFlags sample_counts = 0xf;
568
569 /* make sure that the entire descriptor set is addressable with a signed
570 * 32-bit int. So the sum of all limits scaled by descriptor size has to
571 * be at most 2 GiB. the combined image & samples object count as one of
572 * both. This limit is for the pipeline layout, not for the set layout, but
573 * there is no set limit, so we just set a pipeline limit. I don't think
574 * any app is going to hit this soon. */
575 size_t max_descriptor_set_size =
576 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
577 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
578 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
579 32 /* sampler, largest when combined with image */ +
580 64 /* sampled image */ + 64 /* storage image */);
581
582 VkPhysicalDeviceLimits limits = {
583 .maxImageDimension1D = (1 << 14),
584 .maxImageDimension2D = (1 << 14),
585 .maxImageDimension3D = (1 << 11),
586 .maxImageDimensionCube = (1 << 14),
587 .maxImageArrayLayers = (1 << 11),
588 .maxTexelBufferElements = 128 * 1024 * 1024,
589 .maxUniformBufferRange = UINT32_MAX,
590 .maxStorageBufferRange = UINT32_MAX,
591 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
592 .maxMemoryAllocationCount = UINT32_MAX,
593 .maxSamplerAllocationCount = 64 * 1024,
594 .bufferImageGranularity = 64, /* A cache line */
595 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
596 .maxBoundDescriptorSets = MAX_SETS,
597 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
598 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
599 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
600 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
601 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
602 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
603 .maxPerStageResources = max_descriptor_set_size,
604 .maxDescriptorSetSamplers = max_descriptor_set_size,
605 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
606 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
607 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
608 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
609 .maxDescriptorSetSampledImages = max_descriptor_set_size,
610 .maxDescriptorSetStorageImages = max_descriptor_set_size,
611 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
612 .maxVertexInputAttributes = 32,
613 .maxVertexInputBindings = 32,
614 .maxVertexInputAttributeOffset = 2047,
615 .maxVertexInputBindingStride = 2048,
616 .maxVertexOutputComponents = 128,
617 .maxTessellationGenerationLevel = 64,
618 .maxTessellationPatchSize = 32,
619 .maxTessellationControlPerVertexInputComponents = 128,
620 .maxTessellationControlPerVertexOutputComponents = 128,
621 .maxTessellationControlPerPatchOutputComponents = 120,
622 .maxTessellationControlTotalOutputComponents = 4096,
623 .maxTessellationEvaluationInputComponents = 128,
624 .maxTessellationEvaluationOutputComponents = 128,
625 .maxGeometryShaderInvocations = 127,
626 .maxGeometryInputComponents = 64,
627 .maxGeometryOutputComponents = 128,
628 .maxGeometryOutputVertices = 256,
629 .maxGeometryTotalOutputComponents = 1024,
630 .maxFragmentInputComponents = 128,
631 .maxFragmentOutputAttachments = 8,
632 .maxFragmentDualSrcAttachments = 1,
633 .maxFragmentCombinedOutputResources = 8,
634 .maxComputeSharedMemorySize = 32768,
635 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
636 .maxComputeWorkGroupInvocations = 2048,
637 .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
638 .subPixelPrecisionBits = 4 /* FIXME */,
639 .subTexelPrecisionBits = 4 /* FIXME */,
640 .mipmapPrecisionBits = 4 /* FIXME */,
641 .maxDrawIndexedIndexValue = UINT32_MAX,
642 .maxDrawIndirectCount = UINT32_MAX,
643 .maxSamplerLodBias = 16,
644 .maxSamplerAnisotropy = 16,
645 .maxViewports = MAX_VIEWPORTS,
646 .maxViewportDimensions = { (1 << 14), (1 << 14) },
647 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
648 .viewportSubPixelBits = 8,
649 .minMemoryMapAlignment = 4096, /* A page */
650 .minTexelBufferOffsetAlignment = 1,
651 .minUniformBufferOffsetAlignment = 4,
652 .minStorageBufferOffsetAlignment = 4,
653 .minTexelOffset = -32,
654 .maxTexelOffset = 31,
655 .minTexelGatherOffset = -32,
656 .maxTexelGatherOffset = 31,
657 .minInterpolationOffset = -2,
658 .maxInterpolationOffset = 2,
659 .subPixelInterpolationOffsetBits = 8,
660 .maxFramebufferWidth = (1 << 14),
661 .maxFramebufferHeight = (1 << 14),
662 .maxFramebufferLayers = (1 << 10),
663 .framebufferColorSampleCounts = sample_counts,
664 .framebufferDepthSampleCounts = sample_counts,
665 .framebufferStencilSampleCounts = sample_counts,
666 .framebufferNoAttachmentsSampleCounts = sample_counts,
667 .maxColorAttachments = MAX_RTS,
668 .sampledImageColorSampleCounts = sample_counts,
669 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
670 .sampledImageDepthSampleCounts = sample_counts,
671 .sampledImageStencilSampleCounts = sample_counts,
672 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
673 .maxSampleMaskWords = 1,
674 .timestampComputeAndGraphics = true,
675 .timestampPeriod = 1,
676 .maxClipDistances = 8,
677 .maxCullDistances = 8,
678 .maxCombinedClipAndCullDistances = 8,
679 .discreteQueuePriorities = 1,
680 .pointSizeRange = { 0.125, 255.875 },
681 .lineWidthRange = { 0.0, 7.9921875 },
682 .pointSizeGranularity = (1.0 / 8.0),
683 .lineWidthGranularity = (1.0 / 128.0),
684 .strictLines = false, /* FINISHME */
685 .standardSampleLocations = true,
686 .optimalBufferCopyOffsetAlignment = 128,
687 .optimalBufferCopyRowPitchAlignment = 128,
688 .nonCoherentAtomSize = 64,
689 };
690
691 *pProperties = (VkPhysicalDeviceProperties){
692 .apiVersion = tu_physical_device_api_version(pdevice),
693 .driverVersion = vk_get_driver_version(),
694 .vendorID = 0, /* TODO */
695 .deviceID = 0,
696 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
697 .limits = limits,
698 .sparseProperties = { 0 },
699 };
700
701 strcpy(pProperties->deviceName, pdevice->name);
702 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
703 }
704
705 void
706 tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
707 VkPhysicalDeviceProperties2KHR *pProperties)
708 {
709 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
710 tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
711
712 vk_foreach_struct(ext, pProperties->pNext)
713 {
714 switch (ext->sType) {
715 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
716 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
717 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
718 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
719 break;
720 }
721 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
722 VkPhysicalDeviceIDPropertiesKHR *properties =
723 (VkPhysicalDeviceIDPropertiesKHR *)ext;
724 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
725 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
726 properties->deviceLUIDValid = false;
727 break;
728 }
729 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
730 VkPhysicalDeviceMultiviewPropertiesKHR *properties =
731 (VkPhysicalDeviceMultiviewPropertiesKHR *)ext;
732 properties->maxMultiviewViewCount = MAX_VIEWS;
733 properties->maxMultiviewInstanceIndex = INT_MAX;
734 break;
735 }
736 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
737 VkPhysicalDevicePointClippingPropertiesKHR *properties =
738 (VkPhysicalDevicePointClippingPropertiesKHR *)ext;
739 properties->pointClippingBehavior =
740 VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
741 break;
742 }
743 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
744 VkPhysicalDeviceMaintenance3Properties *properties =
745 (VkPhysicalDeviceMaintenance3Properties *)ext;
746 /* Make sure everything is addressable by a signed 32-bit int, and
747 * our largest descriptors are 96 bytes. */
748 properties->maxPerSetDescriptors = (1ull << 31) / 96;
749 /* Our buffer size fields allow only this much */
750 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
751 break;
752 }
753 default:
754 break;
755 }
756 }
757 }
758
759 static void
760 tu_get_physical_device_queue_family_properties(
761 struct tu_physical_device *pdevice,
762 uint32_t *pCount,
763 VkQueueFamilyProperties **pQueueFamilyProperties)
764 {
765 int num_queue_families = 1;
766 int idx;
767 if (pQueueFamilyProperties == NULL) {
768 *pCount = num_queue_families;
769 return;
770 }
771
772 if (!*pCount)
773 return;
774
775 idx = 0;
776 if (*pCount >= 1) {
777 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
778 .queueFlags =
779 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
780 .queueCount = 1,
781 .timestampValidBits = 64,
782 .minImageTransferGranularity = (VkExtent3D){ 1, 1, 1 },
783 };
784 idx++;
785 }
786
787 *pCount = idx;
788 }
789
790 void
791 tu_GetPhysicalDeviceQueueFamilyProperties(
792 VkPhysicalDevice physicalDevice,
793 uint32_t *pCount,
794 VkQueueFamilyProperties *pQueueFamilyProperties)
795 {
796 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
797 if (!pQueueFamilyProperties) {
798 return tu_get_physical_device_queue_family_properties(
799 pdevice, pCount, NULL);
800 return;
801 }
802 VkQueueFamilyProperties *properties[] = {
803 pQueueFamilyProperties + 0,
804 };
805 tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
806 assert(*pCount <= 1);
807 }
808
809 void
810 tu_GetPhysicalDeviceQueueFamilyProperties2(
811 VkPhysicalDevice physicalDevice,
812 uint32_t *pCount,
813 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
814 {
815 TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
816 if (!pQueueFamilyProperties) {
817 return tu_get_physical_device_queue_family_properties(
818 pdevice, pCount, NULL);
819 return;
820 }
821 VkQueueFamilyProperties *properties[] = {
822 &pQueueFamilyProperties[0].queueFamilyProperties,
823 };
824 tu_get_physical_device_queue_family_properties(pdevice, pCount, properties);
825 assert(*pCount <= 1);
826 }
827
828 void
829 tu_GetPhysicalDeviceMemoryProperties(
830 VkPhysicalDevice physicalDevice,
831 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
832 {
833 stub();
834 }
835
836 void
837 tu_GetPhysicalDeviceMemoryProperties2(
838 VkPhysicalDevice physicalDevice,
839 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
840 {
841 return tu_GetPhysicalDeviceMemoryProperties(
842 physicalDevice, &pMemoryProperties->memoryProperties);
843 }
844
845 static int
846 tu_queue_init(struct tu_device *device,
847 struct tu_queue *queue,
848 uint32_t queue_family_index,
849 int idx,
850 VkDeviceQueueCreateFlags flags)
851 {
852 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
853 queue->device = device;
854 queue->queue_family_index = queue_family_index;
855 queue->queue_idx = idx;
856 queue->flags = flags;
857
858 return VK_SUCCESS;
859 }
860
861 static void
862 tu_queue_finish(struct tu_queue *queue)
863 {
864 }
865
866 static int
867 tu_get_device_extension_index(const char *name)
868 {
869 for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
870 if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
871 return i;
872 }
873 return -1;
874 }
875
876 VkResult
877 tu_CreateDevice(VkPhysicalDevice physicalDevice,
878 const VkDeviceCreateInfo *pCreateInfo,
879 const VkAllocationCallbacks *pAllocator,
880 VkDevice *pDevice)
881 {
882 TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
883 VkResult result;
884 struct tu_device *device;
885
886 /* Check enabled features */
887 if (pCreateInfo->pEnabledFeatures) {
888 VkPhysicalDeviceFeatures supported_features;
889 tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
890 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
891 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
892 unsigned num_features =
893 sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
894 for (uint32_t i = 0; i < num_features; i++) {
895 if (enabled_feature[i] && !supported_feature[i])
896 return vk_error(physical_device->instance,
897 VK_ERROR_FEATURE_NOT_PRESENT);
898 }
899 }
900
901 device = vk_zalloc2(&physical_device->instance->alloc,
902 pAllocator,
903 sizeof(*device),
904 8,
905 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
906 if (!device)
907 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
908
909 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
910 device->instance = physical_device->instance;
911 device->physical_device = physical_device;
912
913 if (pAllocator)
914 device->alloc = *pAllocator;
915 else
916 device->alloc = physical_device->instance->alloc;
917
918 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
919 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
920 int index = tu_get_device_extension_index(ext_name);
921 if (index < 0 ||
922 !physical_device->supported_extensions.extensions[index]) {
923 vk_free(&device->alloc, device);
924 return vk_error(physical_device->instance,
925 VK_ERROR_EXTENSION_NOT_PRESENT);
926 }
927
928 device->enabled_extensions.extensions[index] = true;
929 }
930
931 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
932 const VkDeviceQueueCreateInfo *queue_create =
933 &pCreateInfo->pQueueCreateInfos[i];
934 uint32_t qfi = queue_create->queueFamilyIndex;
935 device->queues[qfi] =
936 vk_alloc(&device->alloc,
937 queue_create->queueCount * sizeof(struct tu_queue),
938 8,
939 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
940 if (!device->queues[qfi]) {
941 result = VK_ERROR_OUT_OF_HOST_MEMORY;
942 goto fail;
943 }
944
945 memset(device->queues[qfi],
946 0,
947 queue_create->queueCount * sizeof(struct tu_queue));
948
949 device->queue_count[qfi] = queue_create->queueCount;
950
951 for (unsigned q = 0; q < queue_create->queueCount; q++) {
952 result = tu_queue_init(
953 device, &device->queues[qfi][q], qfi, q, queue_create->flags);
954 if (result != VK_SUCCESS)
955 goto fail;
956 }
957 }
958
959 VkPipelineCacheCreateInfo ci;
960 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
961 ci.pNext = NULL;
962 ci.flags = 0;
963 ci.pInitialData = NULL;
964 ci.initialDataSize = 0;
965 VkPipelineCache pc;
966 result =
967 tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
968 if (result != VK_SUCCESS)
969 goto fail;
970
971 device->mem_cache = tu_pipeline_cache_from_handle(pc);
972
973 *pDevice = tu_device_to_handle(device);
974 return VK_SUCCESS;
975
976 fail:
977 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
978 for (unsigned q = 0; q < device->queue_count[i]; q++)
979 tu_queue_finish(&device->queues[i][q]);
980 if (device->queue_count[i])
981 vk_free(&device->alloc, device->queues[i]);
982 }
983
984 vk_free(&device->alloc, device);
985 return result;
986 }
987
988 void
989 tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
990 {
991 TU_FROM_HANDLE(tu_device, device, _device);
992
993 if (!device)
994 return;
995
996 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
997 for (unsigned q = 0; q < device->queue_count[i]; q++)
998 tu_queue_finish(&device->queues[i][q]);
999 if (device->queue_count[i])
1000 vk_free(&device->alloc, device->queues[i]);
1001 }
1002
1003 VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1004 tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1005
1006 vk_free(&device->alloc, device);
1007 }
1008
1009 VkResult
1010 tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1011 VkLayerProperties *pProperties)
1012 {
1013 if (pProperties == NULL) {
1014 *pPropertyCount = 0;
1015 return VK_SUCCESS;
1016 }
1017
1018 /* None supported at this time */
1019 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1020 }
1021
1022 VkResult
1023 tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1024 uint32_t *pPropertyCount,
1025 VkLayerProperties *pProperties)
1026 {
1027 if (pProperties == NULL) {
1028 *pPropertyCount = 0;
1029 return VK_SUCCESS;
1030 }
1031
1032 /* None supported at this time */
1033 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1034 }
1035
1036 void
1037 tu_GetDeviceQueue2(VkDevice _device,
1038 const VkDeviceQueueInfo2 *pQueueInfo,
1039 VkQueue *pQueue)
1040 {
1041 TU_FROM_HANDLE(tu_device, device, _device);
1042 struct tu_queue *queue;
1043
1044 queue =
1045 &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1046 if (pQueueInfo->flags != queue->flags) {
1047 /* From the Vulkan 1.1.70 spec:
1048 *
1049 * "The queue returned by vkGetDeviceQueue2 must have the same
1050 * flags value from this structure as that used at device
1051 * creation time in a VkDeviceQueueCreateInfo instance. If no
1052 * matching flags were specified at device creation time then
1053 * pQueue will return VK_NULL_HANDLE."
1054 */
1055 *pQueue = VK_NULL_HANDLE;
1056 return;
1057 }
1058
1059 *pQueue = tu_queue_to_handle(queue);
1060 }
1061
1062 void
1063 tu_GetDeviceQueue(VkDevice _device,
1064 uint32_t queueFamilyIndex,
1065 uint32_t queueIndex,
1066 VkQueue *pQueue)
1067 {
1068 const VkDeviceQueueInfo2 info =
1069 (VkDeviceQueueInfo2){.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1070 .queueFamilyIndex = queueFamilyIndex,
1071 .queueIndex = queueIndex };
1072
1073 tu_GetDeviceQueue2(_device, &info, pQueue);
1074 }
1075
1076 VkResult
1077 tu_QueueSubmit(VkQueue _queue,
1078 uint32_t submitCount,
1079 const VkSubmitInfo *pSubmits,
1080 VkFence _fence)
1081 {
1082 return VK_SUCCESS;
1083 }
1084
1085 VkResult
1086 tu_QueueWaitIdle(VkQueue _queue)
1087 {
1088 return VK_SUCCESS;
1089 }
1090
1091 VkResult
1092 tu_DeviceWaitIdle(VkDevice _device)
1093 {
1094 TU_FROM_HANDLE(tu_device, device, _device);
1095
1096 for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1097 for (unsigned q = 0; q < device->queue_count[i]; q++) {
1098 tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1099 }
1100 }
1101 return VK_SUCCESS;
1102 }
1103
1104 VkResult
1105 tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1106 uint32_t *pPropertyCount,
1107 VkExtensionProperties *pProperties)
1108 {
1109 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1110
1111 for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1112 if (tu_supported_instance_extensions.extensions[i]) {
1113 vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1114 }
1115 }
1116
1117 return vk_outarray_status(&out);
1118 }
1119
1120 VkResult
1121 tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1122 const char *pLayerName,
1123 uint32_t *pPropertyCount,
1124 VkExtensionProperties *pProperties)
1125 {
1126 TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1127 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1128
1129 for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1130 if (device->supported_extensions.extensions[i]) {
1131 vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1132 }
1133 }
1134
1135 return vk_outarray_status(&out);
1136 }
1137
1138 PFN_vkVoidFunction
1139 tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1140 {
1141 TU_FROM_HANDLE(tu_instance, instance, _instance);
1142
1143 return tu_lookup_entrypoint_checked(pName,
1144 instance ? instance->api_version : 0,
1145 instance ? &instance->enabled_extensions
1146 : NULL,
1147 NULL);
1148 }
1149
1150 /* The loader wants us to expose a second GetInstanceProcAddr function
1151 * to work around certain LD_PRELOAD issues seen in apps.
1152 */
1153 PUBLIC
1154 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1155 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1156
1157 PUBLIC
1158 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1159 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1160 {
1161 return tu_GetInstanceProcAddr(instance, pName);
1162 }
1163
1164 PFN_vkVoidFunction
1165 tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1166 {
1167 TU_FROM_HANDLE(tu_device, device, _device);
1168
1169 return tu_lookup_entrypoint_checked(pName,
1170 device->instance->api_version,
1171 &device->instance->enabled_extensions,
1172 &device->enabled_extensions);
1173 }
1174
1175 static VkResult
1176 tu_alloc_memory(struct tu_device *device,
1177 const VkMemoryAllocateInfo *pAllocateInfo,
1178 const VkAllocationCallbacks *pAllocator,
1179 VkDeviceMemory *pMem)
1180 {
1181 struct tu_device_memory *mem;
1182
1183 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1184
1185 if (pAllocateInfo->allocationSize == 0) {
1186 /* Apparently, this is allowed */
1187 *pMem = VK_NULL_HANDLE;
1188 return VK_SUCCESS;
1189 }
1190
1191 mem = vk_alloc2(&device->alloc,
1192 pAllocator,
1193 sizeof(*mem),
1194 8,
1195 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1196 if (mem == NULL)
1197 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1198
1199 *pMem = tu_device_memory_to_handle(mem);
1200
1201 return VK_SUCCESS;
1202 }
1203
1204 VkResult
1205 tu_AllocateMemory(VkDevice _device,
1206 const VkMemoryAllocateInfo *pAllocateInfo,
1207 const VkAllocationCallbacks *pAllocator,
1208 VkDeviceMemory *pMem)
1209 {
1210 TU_FROM_HANDLE(tu_device, device, _device);
1211 return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1212 }
1213
1214 void
1215 tu_FreeMemory(VkDevice _device,
1216 VkDeviceMemory _mem,
1217 const VkAllocationCallbacks *pAllocator)
1218 {
1219 TU_FROM_HANDLE(tu_device, device, _device);
1220 TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1221
1222 if (mem == NULL)
1223 return;
1224
1225 vk_free2(&device->alloc, pAllocator, mem);
1226 }
1227
1228 VkResult
1229 tu_MapMemory(VkDevice _device,
1230 VkDeviceMemory _memory,
1231 VkDeviceSize offset,
1232 VkDeviceSize size,
1233 VkMemoryMapFlags flags,
1234 void **ppData)
1235 {
1236 TU_FROM_HANDLE(tu_device, device, _device);
1237 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1238
1239 if (mem == NULL) {
1240 *ppData = NULL;
1241 return VK_SUCCESS;
1242 }
1243
1244 if (mem->user_ptr)
1245 *ppData = mem->user_ptr;
1246
1247 if (*ppData) {
1248 *ppData += offset;
1249 return VK_SUCCESS;
1250 }
1251
1252 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1253 }
1254
1255 void
1256 tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1257 {
1258 TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1259
1260 if (mem == NULL)
1261 return;
1262 }
1263
1264 VkResult
1265 tu_FlushMappedMemoryRanges(VkDevice _device,
1266 uint32_t memoryRangeCount,
1267 const VkMappedMemoryRange *pMemoryRanges)
1268 {
1269 return VK_SUCCESS;
1270 }
1271
1272 VkResult
1273 tu_InvalidateMappedMemoryRanges(VkDevice _device,
1274 uint32_t memoryRangeCount,
1275 const VkMappedMemoryRange *pMemoryRanges)
1276 {
1277 return VK_SUCCESS;
1278 }
1279
1280 void
1281 tu_GetBufferMemoryRequirements(VkDevice _device,
1282 VkBuffer _buffer,
1283 VkMemoryRequirements *pMemoryRequirements)
1284 {
1285 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1286
1287 pMemoryRequirements->alignment = 16;
1288 pMemoryRequirements->size =
1289 align64(buffer->size, pMemoryRequirements->alignment);
1290 }
1291
1292 void
1293 tu_GetBufferMemoryRequirements2(
1294 VkDevice device,
1295 const VkBufferMemoryRequirementsInfo2KHR *pInfo,
1296 VkMemoryRequirements2KHR *pMemoryRequirements)
1297 {
1298 tu_GetBufferMemoryRequirements(
1299 device, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
1300 }
1301
1302 void
1303 tu_GetImageMemoryRequirements(VkDevice _device,
1304 VkImage _image,
1305 VkMemoryRequirements *pMemoryRequirements)
1306 {
1307 TU_FROM_HANDLE(tu_image, image, _image);
1308
1309 /* TODO: memory type */
1310
1311 pMemoryRequirements->size = image->size;
1312 pMemoryRequirements->alignment = image->alignment;
1313 }
1314
1315 void
1316 tu_GetImageMemoryRequirements2(VkDevice device,
1317 const VkImageMemoryRequirementsInfo2KHR *pInfo,
1318 VkMemoryRequirements2KHR *pMemoryRequirements)
1319 {
1320 tu_GetImageMemoryRequirements(
1321 device, pInfo->image, &pMemoryRequirements->memoryRequirements);
1322 }
1323
1324 void
1325 tu_GetImageSparseMemoryRequirements(
1326 VkDevice device,
1327 VkImage image,
1328 uint32_t *pSparseMemoryRequirementCount,
1329 VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1330 {
1331 stub();
1332 }
1333
1334 void
1335 tu_GetImageSparseMemoryRequirements2(
1336 VkDevice device,
1337 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
1338 uint32_t *pSparseMemoryRequirementCount,
1339 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
1340 {
1341 stub();
1342 }
1343
1344 void
1345 tu_GetDeviceMemoryCommitment(VkDevice device,
1346 VkDeviceMemory memory,
1347 VkDeviceSize *pCommittedMemoryInBytes)
1348 {
1349 *pCommittedMemoryInBytes = 0;
1350 }
1351
1352 VkResult
1353 tu_BindBufferMemory2(VkDevice device,
1354 uint32_t bindInfoCount,
1355 const VkBindBufferMemoryInfoKHR *pBindInfos)
1356 {
1357 return VK_SUCCESS;
1358 }
1359
1360 VkResult
1361 tu_BindBufferMemory(VkDevice device,
1362 VkBuffer buffer,
1363 VkDeviceMemory memory,
1364 VkDeviceSize memoryOffset)
1365 {
1366 const VkBindBufferMemoryInfoKHR info = {
1367 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1368 .buffer = buffer,
1369 .memory = memory,
1370 .memoryOffset = memoryOffset
1371 };
1372
1373 return tu_BindBufferMemory2(device, 1, &info);
1374 }
1375
1376 VkResult
1377 tu_BindImageMemory2(VkDevice device,
1378 uint32_t bindInfoCount,
1379 const VkBindImageMemoryInfoKHR *pBindInfos)
1380 {
1381 return VK_SUCCESS;
1382 }
1383
1384 VkResult
1385 tu_BindImageMemory(VkDevice device,
1386 VkImage image,
1387 VkDeviceMemory memory,
1388 VkDeviceSize memoryOffset)
1389 {
1390 const VkBindImageMemoryInfoKHR info = {
1391 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
1392 .image = image,
1393 .memory = memory,
1394 .memoryOffset = memoryOffset
1395 };
1396
1397 return tu_BindImageMemory2(device, 1, &info);
1398 }
1399
1400 VkResult
1401 tu_QueueBindSparse(VkQueue _queue,
1402 uint32_t bindInfoCount,
1403 const VkBindSparseInfo *pBindInfo,
1404 VkFence _fence)
1405 {
1406 return VK_SUCCESS;
1407 }
1408
1409 VkResult
1410 tu_CreateFence(VkDevice _device,
1411 const VkFenceCreateInfo *pCreateInfo,
1412 const VkAllocationCallbacks *pAllocator,
1413 VkFence *pFence)
1414 {
1415 TU_FROM_HANDLE(tu_device, device, _device);
1416
1417 struct tu_fence *fence = vk_alloc2(&device->alloc,
1418 pAllocator,
1419 sizeof(*fence),
1420 8,
1421 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1422
1423 if (!fence)
1424 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1425
1426 *pFence = tu_fence_to_handle(fence);
1427
1428 return VK_SUCCESS;
1429 }
1430
1431 void
1432 tu_DestroyFence(VkDevice _device,
1433 VkFence _fence,
1434 const VkAllocationCallbacks *pAllocator)
1435 {
1436 TU_FROM_HANDLE(tu_device, device, _device);
1437 TU_FROM_HANDLE(tu_fence, fence, _fence);
1438
1439 if (!fence)
1440 return;
1441
1442 vk_free2(&device->alloc, pAllocator, fence);
1443 }
1444
1445 VkResult
1446 tu_WaitForFences(VkDevice _device,
1447 uint32_t fenceCount,
1448 const VkFence *pFences,
1449 VkBool32 waitAll,
1450 uint64_t timeout)
1451 {
1452 return VK_SUCCESS;
1453 }
1454
1455 VkResult
1456 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
1457 {
1458 return VK_SUCCESS;
1459 }
1460
1461 VkResult
1462 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
1463 {
1464 return VK_SUCCESS;
1465 }
1466
1467 // Queue semaphore functions
1468
1469 VkResult
1470 tu_CreateSemaphore(VkDevice _device,
1471 const VkSemaphoreCreateInfo *pCreateInfo,
1472 const VkAllocationCallbacks *pAllocator,
1473 VkSemaphore *pSemaphore)
1474 {
1475 TU_FROM_HANDLE(tu_device, device, _device);
1476
1477 struct tu_semaphore *sem = vk_alloc2(&device->alloc,
1478 pAllocator,
1479 sizeof(*sem),
1480 8,
1481 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1482 if (!sem)
1483 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1484
1485 *pSemaphore = tu_semaphore_to_handle(sem);
1486 return VK_SUCCESS;
1487 }
1488
1489 void
1490 tu_DestroySemaphore(VkDevice _device,
1491 VkSemaphore _semaphore,
1492 const VkAllocationCallbacks *pAllocator)
1493 {
1494 TU_FROM_HANDLE(tu_device, device, _device);
1495 TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1496 if (!_semaphore)
1497 return;
1498
1499 vk_free2(&device->alloc, pAllocator, sem);
1500 }
1501
1502 VkResult
1503 tu_CreateEvent(VkDevice _device,
1504 const VkEventCreateInfo *pCreateInfo,
1505 const VkAllocationCallbacks *pAllocator,
1506 VkEvent *pEvent)
1507 {
1508 TU_FROM_HANDLE(tu_device, device, _device);
1509 struct tu_event *event = vk_alloc2(&device->alloc,
1510 pAllocator,
1511 sizeof(*event),
1512 8,
1513 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1514
1515 if (!event)
1516 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1517
1518 *pEvent = tu_event_to_handle(event);
1519
1520 return VK_SUCCESS;
1521 }
1522
1523 void
1524 tu_DestroyEvent(VkDevice _device,
1525 VkEvent _event,
1526 const VkAllocationCallbacks *pAllocator)
1527 {
1528 TU_FROM_HANDLE(tu_device, device, _device);
1529 TU_FROM_HANDLE(tu_event, event, _event);
1530
1531 if (!event)
1532 return;
1533 vk_free2(&device->alloc, pAllocator, event);
1534 }
1535
1536 VkResult
1537 tu_GetEventStatus(VkDevice _device, VkEvent _event)
1538 {
1539 TU_FROM_HANDLE(tu_event, event, _event);
1540
1541 if (*event->map == 1)
1542 return VK_EVENT_SET;
1543 return VK_EVENT_RESET;
1544 }
1545
1546 VkResult
1547 tu_SetEvent(VkDevice _device, VkEvent _event)
1548 {
1549 TU_FROM_HANDLE(tu_event, event, _event);
1550 *event->map = 1;
1551
1552 return VK_SUCCESS;
1553 }
1554
1555 VkResult
1556 tu_ResetEvent(VkDevice _device, VkEvent _event)
1557 {
1558 TU_FROM_HANDLE(tu_event, event, _event);
1559 *event->map = 0;
1560
1561 return VK_SUCCESS;
1562 }
1563
1564 VkResult
1565 tu_CreateBuffer(VkDevice _device,
1566 const VkBufferCreateInfo *pCreateInfo,
1567 const VkAllocationCallbacks *pAllocator,
1568 VkBuffer *pBuffer)
1569 {
1570 TU_FROM_HANDLE(tu_device, device, _device);
1571 struct tu_buffer *buffer;
1572
1573 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1574
1575 buffer = vk_alloc2(&device->alloc,
1576 pAllocator,
1577 sizeof(*buffer),
1578 8,
1579 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1580 if (buffer == NULL)
1581 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1582
1583 buffer->size = pCreateInfo->size;
1584 buffer->usage = pCreateInfo->usage;
1585 buffer->flags = pCreateInfo->flags;
1586
1587 *pBuffer = tu_buffer_to_handle(buffer);
1588
1589 return VK_SUCCESS;
1590 }
1591
1592 void
1593 tu_DestroyBuffer(VkDevice _device,
1594 VkBuffer _buffer,
1595 const VkAllocationCallbacks *pAllocator)
1596 {
1597 TU_FROM_HANDLE(tu_device, device, _device);
1598 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1599
1600 if (!buffer)
1601 return;
1602
1603 vk_free2(&device->alloc, pAllocator, buffer);
1604 }
1605
1606 static uint32_t
1607 tu_surface_max_layer_count(struct tu_image_view *iview)
1608 {
1609 return iview->type == VK_IMAGE_VIEW_TYPE_3D
1610 ? iview->extent.depth
1611 : (iview->base_layer + iview->layer_count);
1612 }
1613
1614 VkResult
1615 tu_CreateFramebuffer(VkDevice _device,
1616 const VkFramebufferCreateInfo *pCreateInfo,
1617 const VkAllocationCallbacks *pAllocator,
1618 VkFramebuffer *pFramebuffer)
1619 {
1620 TU_FROM_HANDLE(tu_device, device, _device);
1621 struct tu_framebuffer *framebuffer;
1622
1623 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1624
1625 size_t size =
1626 sizeof(*framebuffer) +
1627 sizeof(struct tu_attachment_info) * pCreateInfo->attachmentCount;
1628 framebuffer = vk_alloc2(
1629 &device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1630 if (framebuffer == NULL)
1631 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1632
1633 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1634 framebuffer->width = pCreateInfo->width;
1635 framebuffer->height = pCreateInfo->height;
1636 framebuffer->layers = pCreateInfo->layers;
1637 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1638 VkImageView _iview = pCreateInfo->pAttachments[i];
1639 struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1640 framebuffer->attachments[i].attachment = iview;
1641
1642 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1643 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1644 framebuffer->layers =
1645 MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1646 }
1647
1648 *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1649 return VK_SUCCESS;
1650 }
1651
1652 void
1653 tu_DestroyFramebuffer(VkDevice _device,
1654 VkFramebuffer _fb,
1655 const VkAllocationCallbacks *pAllocator)
1656 {
1657 TU_FROM_HANDLE(tu_device, device, _device);
1658 TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1659
1660 if (!fb)
1661 return;
1662 vk_free2(&device->alloc, pAllocator, fb);
1663 }
1664
1665 static void
1666 tu_init_sampler(struct tu_device *device,
1667 struct tu_sampler *sampler,
1668 const VkSamplerCreateInfo *pCreateInfo)
1669 {
1670 }
1671
1672 VkResult
1673 tu_CreateSampler(VkDevice _device,
1674 const VkSamplerCreateInfo *pCreateInfo,
1675 const VkAllocationCallbacks *pAllocator,
1676 VkSampler *pSampler)
1677 {
1678 TU_FROM_HANDLE(tu_device, device, _device);
1679 struct tu_sampler *sampler;
1680
1681 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1682
1683 sampler = vk_alloc2(&device->alloc,
1684 pAllocator,
1685 sizeof(*sampler),
1686 8,
1687 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1688 if (!sampler)
1689 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1690
1691 tu_init_sampler(device, sampler, pCreateInfo);
1692 *pSampler = tu_sampler_to_handle(sampler);
1693
1694 return VK_SUCCESS;
1695 }
1696
1697 void
1698 tu_DestroySampler(VkDevice _device,
1699 VkSampler _sampler,
1700 const VkAllocationCallbacks *pAllocator)
1701 {
1702 TU_FROM_HANDLE(tu_device, device, _device);
1703 TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1704
1705 if (!sampler)
1706 return;
1707 vk_free2(&device->alloc, pAllocator, sampler);
1708 }
1709
1710 /* vk_icd.h does not declare this function, so we declare it here to
1711 * suppress Wmissing-prototypes.
1712 */
1713 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1714 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1715
1716 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1717 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1718 {
1719 /* For the full details on loader interface versioning, see
1720 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1721 * What follows is a condensed summary, to help you navigate the large and
1722 * confusing official doc.
1723 *
1724 * - Loader interface v0 is incompatible with later versions. We don't
1725 * support it.
1726 *
1727 * - In loader interface v1:
1728 * - The first ICD entrypoint called by the loader is
1729 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1730 * entrypoint.
1731 * - The ICD must statically expose no other Vulkan symbol unless it is
1732 * linked with -Bsymbolic.
1733 * - Each dispatchable Vulkan handle created by the ICD must be
1734 * a pointer to a struct whose first member is VK_LOADER_DATA. The
1735 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
1736 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1737 * vkDestroySurfaceKHR(). The ICD must be capable of working with
1738 * such loader-managed surfaces.
1739 *
1740 * - Loader interface v2 differs from v1 in:
1741 * - The first ICD entrypoint called by the loader is
1742 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1743 * statically expose this entrypoint.
1744 *
1745 * - Loader interface v3 differs from v2 in:
1746 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1747 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1748 * because the loader no longer does so.
1749 */
1750 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1751 return VK_SUCCESS;
1752 }
1753
1754 void
1755 tu_GetPhysicalDeviceExternalSemaphoreProperties(
1756 VkPhysicalDevice physicalDevice,
1757 const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
1758 VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
1759 {
1760 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1761 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1762 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1763 }
1764
1765 void
1766 tu_GetPhysicalDeviceExternalFenceProperties(
1767 VkPhysicalDevice physicalDevice,
1768 const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
1769 VkExternalFencePropertiesKHR *pExternalFenceProperties)
1770 {
1771 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1772 pExternalFenceProperties->compatibleHandleTypes = 0;
1773 pExternalFenceProperties->externalFenceFeatures = 0;
1774 }
1775
1776 VkResult
1777 tu_CreateDebugReportCallbackEXT(
1778 VkInstance _instance,
1779 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
1780 const VkAllocationCallbacks *pAllocator,
1781 VkDebugReportCallbackEXT *pCallback)
1782 {
1783 TU_FROM_HANDLE(tu_instance, instance, _instance);
1784 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
1785 pCreateInfo,
1786 pAllocator,
1787 &instance->alloc,
1788 pCallback);
1789 }
1790
1791 void
1792 tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
1793 VkDebugReportCallbackEXT _callback,
1794 const VkAllocationCallbacks *pAllocator)
1795 {
1796 TU_FROM_HANDLE(tu_instance, instance, _instance);
1797 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
1798 _callback,
1799 pAllocator,
1800 &instance->alloc);
1801 }
1802
1803 void
1804 tu_DebugReportMessageEXT(VkInstance _instance,
1805 VkDebugReportFlagsEXT flags,
1806 VkDebugReportObjectTypeEXT objectType,
1807 uint64_t object,
1808 size_t location,
1809 int32_t messageCode,
1810 const char *pLayerPrefix,
1811 const char *pMessage)
1812 {
1813 TU_FROM_HANDLE(tu_instance, instance, _instance);
1814 vk_debug_report(&instance->debug_report_callbacks,
1815 flags,
1816 objectType,
1817 object,
1818 location,
1819 messageCode,
1820 pLayerPrefix,
1821 pMessage);
1822 }
1823
1824 void
1825 tu_GetDeviceGroupPeerMemoryFeatures(
1826 VkDevice device,
1827 uint32_t heapIndex,
1828 uint32_t localDeviceIndex,
1829 uint32_t remoteDeviceIndex,
1830 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
1831 {
1832 assert(localDeviceIndex == remoteDeviceIndex);
1833
1834 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
1835 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
1836 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
1837 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
1838 }