gallium/swr: Fix compilation TCS/TES compilation issues
[mesa.git] / src / gallium / frontends / vallium / val_device.c
1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "val_private.h"
25
26 #include "pipe-loader/pipe_loader.h"
27 #include "git_sha1.h"
28 #include "vk_util.h"
29 #include "pipe/p_state.h"
30 #include "pipe/p_context.h"
31 #include "frontend/drisw_api.h"
32
33 #include "compiler/glsl_types.h"
34 #include "util/u_inlines.h"
35 #include "util/os_memory.h"
36 #include "util/u_thread.h"
37 #include "util/u_atomic.h"
38 #include "util/timespec.h"
39
40 static VkResult
41 val_physical_device_init(struct val_physical_device *device,
42 struct val_instance *instance,
43 struct pipe_loader_device *pld)
44 {
45 VkResult result;
46 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
47 device->instance = instance;
48 device->pld = pld;
49
50 device->pscreen = pipe_loader_create_screen(device->pld);
51 if (!device->pscreen)
52 return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
53
54 fprintf(stderr, "WARNING: vallium/llvmpipe is not a conformant vulkan implementation, testing use only.\n");
55
56 device->max_images = device->pscreen->get_shader_param(device->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES);
57 val_physical_device_get_supported_extensions(device, &device->supported_extensions);
58 result = val_init_wsi(device);
59 if (result != VK_SUCCESS) {
60 vk_error(instance, result);
61 goto fail;
62 }
63
64 return VK_SUCCESS;
65 fail:
66 return result;
67 }
68
69 static void
70 val_physical_device_finish(struct val_physical_device *device)
71 {
72 val_finish_wsi(device);
73 device->pscreen->destroy(device->pscreen);
74 }
75
76 static void *
77 default_alloc_func(void *pUserData, size_t size, size_t align,
78 VkSystemAllocationScope allocationScope)
79 {
80 return os_malloc_aligned(size, align);
81 }
82
83 static void *
84 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
85 size_t align, VkSystemAllocationScope allocationScope)
86 {
87 return realloc(pOriginal, size);
88 }
89
90 static void
91 default_free_func(void *pUserData, void *pMemory)
92 {
93 os_free_aligned(pMemory);
94 }
95
96 static const VkAllocationCallbacks default_alloc = {
97 .pUserData = NULL,
98 .pfnAllocation = default_alloc_func,
99 .pfnReallocation = default_realloc_func,
100 .pfnFree = default_free_func,
101 };
102
103 VkResult val_CreateInstance(
104 const VkInstanceCreateInfo* pCreateInfo,
105 const VkAllocationCallbacks* pAllocator,
106 VkInstance* pInstance)
107 {
108 struct val_instance *instance;
109
110 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
111
112 uint32_t client_version;
113 if (pCreateInfo->pApplicationInfo &&
114 pCreateInfo->pApplicationInfo->apiVersion != 0) {
115 client_version = pCreateInfo->pApplicationInfo->apiVersion;
116 } else {
117 client_version = VK_API_VERSION_1_0;
118 }
119
120 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
121 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
122 if (!instance)
123 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
124
125 vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
126
127 if (pAllocator)
128 instance->alloc = *pAllocator;
129 else
130 instance->alloc = default_alloc;
131
132 instance->apiVersion = client_version;
133 instance->physicalDeviceCount = -1;
134
135 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
136 int idx;
137 for (idx = 0; idx < VAL_INSTANCE_EXTENSION_COUNT; idx++) {
138 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i],
139 val_instance_extensions[idx].extensionName))
140 break;
141 }
142
143 if (idx >= VAL_INSTANCE_EXTENSION_COUNT ||
144 !val_instance_extensions_supported.extensions[idx]) {
145 vk_free2(&default_alloc, pAllocator, instance);
146 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
147 }
148 instance->enabled_extensions.extensions[idx] = true;
149 }
150
151 bool unchecked = instance->debug_flags & VAL_DEBUG_ALL_ENTRYPOINTS;
152 for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
153 /* Vulkan requires that entrypoints for extensions which have
154 * not been enabled must not be advertised.
155 */
156 if (!unchecked &&
157 !val_instance_entrypoint_is_enabled(i, instance->apiVersion,
158 &instance->enabled_extensions)) {
159 instance->dispatch.entrypoints[i] = NULL;
160 } else {
161 instance->dispatch.entrypoints[i] =
162 val_instance_dispatch_table.entrypoints[i];
163 }
164 }
165
166 for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) {
167 /* Vulkan requires that entrypoints for extensions which have
168 * not been enabled must not be advertised.
169 */
170 if (!unchecked &&
171 !val_physical_device_entrypoint_is_enabled(i, instance->apiVersion,
172 &instance->enabled_extensions)) {
173 instance->physical_device_dispatch.entrypoints[i] = NULL;
174 } else {
175 instance->physical_device_dispatch.entrypoints[i] =
176 val_physical_device_dispatch_table.entrypoints[i];
177 }
178 }
179
180 for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) {
181 /* Vulkan requires that entrypoints for extensions which have
182 * not been enabled must not be advertised.
183 */
184 if (!unchecked &&
185 !val_device_entrypoint_is_enabled(i, instance->apiVersion,
186 &instance->enabled_extensions, NULL)) {
187 instance->device_dispatch.entrypoints[i] = NULL;
188 } else {
189 instance->device_dispatch.entrypoints[i] =
190 val_device_dispatch_table.entrypoints[i];
191 }
192 }
193
194 // _mesa_locale_init();
195 glsl_type_singleton_init_or_ref();
196 // VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
197
198 *pInstance = val_instance_to_handle(instance);
199
200 return VK_SUCCESS;
201 }
202
203 void val_DestroyInstance(
204 VkInstance _instance,
205 const VkAllocationCallbacks* pAllocator)
206 {
207 VAL_FROM_HANDLE(val_instance, instance, _instance);
208
209 if (!instance)
210 return;
211 glsl_type_singleton_decref();
212 if (instance->physicalDeviceCount > 0)
213 val_physical_device_finish(&instance->physicalDevice);
214 // _mesa_locale_fini();
215
216 pipe_loader_release(&instance->devs, instance->num_devices);
217
218 vk_object_base_finish(&instance->base);
219 vk_free(&instance->alloc, instance);
220 }
221
222 static void val_get_image(struct dri_drawable *dri_drawable,
223 int x, int y, unsigned width, unsigned height, unsigned stride,
224 void *data)
225 {
226
227 }
228
229 static void val_put_image(struct dri_drawable *dri_drawable,
230 void *data, unsigned width, unsigned height)
231 {
232 fprintf(stderr, "put image %dx%d\n", width, height);
233 }
234
235 static void val_put_image2(struct dri_drawable *dri_drawable,
236 void *data, int x, int y, unsigned width, unsigned height,
237 unsigned stride)
238 {
239 fprintf(stderr, "put image 2 %d,%d %dx%d\n", x, y, width, height);
240 }
241
242 static struct drisw_loader_funcs val_sw_lf = {
243 .get_image = val_get_image,
244 .put_image = val_put_image,
245 .put_image2 = val_put_image2,
246 };
247
248 VkResult val_EnumeratePhysicalDevices(
249 VkInstance _instance,
250 uint32_t* pPhysicalDeviceCount,
251 VkPhysicalDevice* pPhysicalDevices)
252 {
253 VAL_FROM_HANDLE(val_instance, instance, _instance);
254 VkResult result;
255
256 if (instance->physicalDeviceCount < 0) {
257
258 /* sw only for now */
259 instance->num_devices = pipe_loader_sw_probe(NULL, 0);
260
261 assert(instance->num_devices == 1);
262
263 pipe_loader_sw_probe_dri(&instance->devs, &val_sw_lf);
264
265
266 result = val_physical_device_init(&instance->physicalDevice,
267 instance, &instance->devs[0]);
268 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
269 instance->physicalDeviceCount = 0;
270 } else if (result == VK_SUCCESS) {
271 instance->physicalDeviceCount = 1;
272 } else {
273 return result;
274 }
275 }
276
277 if (!pPhysicalDevices) {
278 *pPhysicalDeviceCount = instance->physicalDeviceCount;
279 } else if (*pPhysicalDeviceCount >= 1) {
280 pPhysicalDevices[0] = val_physical_device_to_handle(&instance->physicalDevice);
281 *pPhysicalDeviceCount = 1;
282 } else {
283 *pPhysicalDeviceCount = 0;
284 }
285
286 return VK_SUCCESS;
287 }
288
289 void val_GetPhysicalDeviceFeatures(
290 VkPhysicalDevice physicalDevice,
291 VkPhysicalDeviceFeatures* pFeatures)
292 {
293 VAL_FROM_HANDLE(val_physical_device, pdevice, physicalDevice);
294 bool indirect = false;//pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_GLSL_FEATURE_LEVEL) >= 400;
295 memset(pFeatures, 0, sizeof(*pFeatures));
296 *pFeatures = (VkPhysicalDeviceFeatures) {
297 .robustBufferAccess = true,
298 .fullDrawIndexUint32 = true,
299 .imageCubeArray = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CUBE_MAP_ARRAY) != 0),
300 .independentBlend = true,
301 .geometryShader = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_GEOMETRY, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) != 0),
302 .tessellationShader = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_TESS_EVAL, PIPE_SHADER_CAP_MAX_INSTRUCTIONS) != 0),
303 .sampleRateShading = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_SAMPLE_SHADING) != 0),
304 .dualSrcBlend = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS) != 0),
305 .logicOp = true,
306 .multiDrawIndirect = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MULTI_DRAW_INDIRECT) != 0),
307 .drawIndirectFirstInstance = true,
308 .depthClamp = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DEPTH_CLIP_DISABLE) != 0),
309 .depthBiasClamp = true,
310 .fillModeNonSolid = true,
311 .depthBounds = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DEPTH_BOUNDS_TEST) != 0),
312 .wideLines = false,
313 .largePoints = true,
314 .alphaToOne = false,
315 .multiViewport = true,
316 .samplerAnisotropy = false, /* FINISHME */
317 .textureCompressionETC2 = false,
318 .textureCompressionASTC_LDR = false,
319 .textureCompressionBC = true,
320 .occlusionQueryPrecise = true,
321 .pipelineStatisticsQuery = false,
322 .vertexPipelineStoresAndAtomics = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) != 0),
323 .fragmentStoresAndAtomics = (pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS) != 0),
324 .shaderTessellationAndGeometryPointSize = true,
325 .shaderImageGatherExtended = true,
326 .shaderStorageImageExtendedFormats = false,
327 .shaderStorageImageMultisample = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_TEXTURE_MULTISAMPLE) != 0),
328 .shaderUniformBufferArrayDynamicIndexing = indirect,
329 .shaderSampledImageArrayDynamicIndexing = indirect,
330 .shaderStorageBufferArrayDynamicIndexing = indirect,
331 .shaderStorageImageArrayDynamicIndexing = indirect,
332 .shaderStorageImageReadWithoutFormat = false,
333 .shaderStorageImageWriteWithoutFormat = true,
334 .shaderClipDistance = true,
335 .shaderCullDistance = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CULL_DISTANCE) == 1),
336 .shaderFloat64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
337 .shaderInt64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
338 .shaderInt16 = true,
339 .alphaToOne = true,
340 .variableMultisampleRate = false,
341 .inheritedQueries = false,
342 };
343 }
344
345 void val_GetPhysicalDeviceFeatures2(
346 VkPhysicalDevice physicalDevice,
347 VkPhysicalDeviceFeatures2 *pFeatures)
348 {
349 val_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
350
351 vk_foreach_struct(ext, pFeatures->pNext) {
352 switch (ext->sType) {
353 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
354 VkPhysicalDeviceVariablePointersFeatures *features = (void *)ext;
355 features->variablePointers = true;
356 features->variablePointersStorageBuffer = true;
357 break;
358 }
359 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
360 VkPhysicalDevice16BitStorageFeatures *features =
361 (VkPhysicalDevice16BitStorageFeatures*)ext;
362 features->storageBuffer16BitAccess = true;
363 features->uniformAndStorageBuffer16BitAccess = true;
364 features->storagePushConstant16 = true;
365 features->storageInputOutput16 = false;
366 break;
367 }
368 default:
369 break;
370 }
371 }
372 }
373
374 void
375 val_device_get_cache_uuid(void *uuid)
376 {
377 memset(uuid, 0, VK_UUID_SIZE);
378 snprintf(uuid, VK_UUID_SIZE, "val-%s", MESA_GIT_SHA1 + 4);
379 }
380
381 void val_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
382 VkPhysicalDeviceProperties *pProperties)
383 {
384 VAL_FROM_HANDLE(val_physical_device, pdevice, physicalDevice);
385
386 VkSampleCountFlags sample_counts = VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT;
387
388 uint64_t grid_size[3], block_size[3];
389 uint64_t max_threads_per_block, max_local_size;
390
391 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
392 PIPE_COMPUTE_CAP_MAX_GRID_SIZE, grid_size);
393 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
394 PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE, block_size);
395 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
396 PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
397 &max_threads_per_block);
398 pdevice->pscreen->get_compute_param(pdevice->pscreen, PIPE_SHADER_IR_NIR,
399 PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE,
400 &max_local_size);
401
402 VkPhysicalDeviceLimits limits = {
403 .maxImageDimension1D = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
404 .maxImageDimension2D = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
405 .maxImageDimension3D = (1 << pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_3D_LEVELS)),
406 .maxImageDimensionCube = (1 << pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS)),
407 .maxImageArrayLayers = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS),
408 .maxTexelBufferElements = 128 * 1024 * 1024,
409 .maxUniformBufferRange = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE),
410 .maxStorageBufferRange = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_SHADER_BUFFER_SIZE),
411 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
412 .maxMemoryAllocationCount = 4096,
413 .maxSamplerAllocationCount = 32 * 1024,
414 .bufferImageGranularity = 64, /* A cache line */
415 .sparseAddressSpaceSize = 0,
416 .maxBoundDescriptorSets = MAX_SETS,
417 .maxPerStageDescriptorSamplers = 32,
418 .maxPerStageDescriptorUniformBuffers = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_CONST_BUFFERS),
419 .maxPerStageDescriptorStorageBuffers = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_BUFFERS),
420 .maxPerStageDescriptorSampledImages = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS),
421 .maxPerStageDescriptorStorageImages = pdevice->pscreen->get_shader_param(pdevice->pscreen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_SHADER_IMAGES - 8),
422 .maxPerStageDescriptorInputAttachments = 8,
423 .maxPerStageResources = 128,
424 .maxDescriptorSetSamplers = 32 * 1024,
425 .maxDescriptorSetUniformBuffers = 256,
426 .maxDescriptorSetUniformBuffersDynamic = 256,
427 .maxDescriptorSetStorageBuffers = 256,
428 .maxDescriptorSetStorageBuffersDynamic = 256,
429 .maxDescriptorSetSampledImages = 256,
430 .maxDescriptorSetStorageImages = 256,
431 .maxDescriptorSetInputAttachments = 256,
432 .maxVertexInputAttributes = 32,
433 .maxVertexInputBindings = 32,
434 .maxVertexInputAttributeOffset = 2047,
435 .maxVertexInputBindingStride = 2048,
436 .maxVertexOutputComponents = 128,
437 .maxTessellationGenerationLevel = 64,
438 .maxTessellationPatchSize = 32,
439 .maxTessellationControlPerVertexInputComponents = 128,
440 .maxTessellationControlPerVertexOutputComponents = 128,
441 .maxTessellationControlPerPatchOutputComponents = 128,
442 .maxTessellationControlTotalOutputComponents = 4096,
443 .maxTessellationEvaluationInputComponents = 128,
444 .maxTessellationEvaluationOutputComponents = 128,
445 .maxGeometryShaderInvocations = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GS_INVOCATIONS),
446 .maxGeometryInputComponents = 64,
447 .maxGeometryOutputComponents = 128,
448 .maxGeometryOutputVertices = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES),
449 .maxGeometryTotalOutputComponents = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS),
450 .maxFragmentInputComponents = 128,
451 .maxFragmentOutputAttachments = 8,
452 .maxFragmentDualSrcAttachments = 2,
453 .maxFragmentCombinedOutputResources = 8,
454 .maxComputeSharedMemorySize = max_local_size,
455 .maxComputeWorkGroupCount = { grid_size[0], grid_size[1], grid_size[2] },
456 .maxComputeWorkGroupInvocations = max_threads_per_block,
457 .maxComputeWorkGroupSize = { block_size[0], block_size[1], block_size[2] },
458 .subPixelPrecisionBits = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_RASTERIZER_SUBPIXEL_BITS),
459 .subTexelPrecisionBits = 4 /* FIXME */,
460 .mipmapPrecisionBits = 4 /* FIXME */,
461 .maxDrawIndexedIndexValue = UINT32_MAX,
462 .maxDrawIndirectCount = UINT32_MAX,
463 .maxSamplerLodBias = 16,
464 .maxSamplerAnisotropy = 16,
465 .maxViewports = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_VIEWPORTS),
466 .maxViewportDimensions = { (1 << 14), (1 << 14) },
467 .viewportBoundsRange = { -16384.0, 16384.0 },
468 .viewportSubPixelBits = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_VIEWPORT_SUBPIXEL_BITS),
469 .minMemoryMapAlignment = 4096, /* A page */
470 .minTexelBufferOffsetAlignment = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT),
471 .minUniformBufferOffsetAlignment = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT),
472 .minStorageBufferOffsetAlignment = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT),
473 .minTexelOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MIN_TEXEL_OFFSET),
474 .maxTexelOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXEL_OFFSET),
475 .minTexelGatherOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET),
476 .maxTexelGatherOffset = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET),
477 .minInterpolationOffset = -2, /* FIXME */
478 .maxInterpolationOffset = 2, /* FIXME */
479 .subPixelInterpolationOffsetBits = 8, /* FIXME */
480 .maxFramebufferWidth = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
481 .maxFramebufferHeight = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_2D_SIZE),
482 .maxFramebufferLayers = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS),
483 .framebufferColorSampleCounts = sample_counts,
484 .framebufferDepthSampleCounts = sample_counts,
485 .framebufferStencilSampleCounts = sample_counts,
486 .framebufferNoAttachmentsSampleCounts = sample_counts,
487 .maxColorAttachments = pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_MAX_RENDER_TARGETS),
488 .sampledImageColorSampleCounts = sample_counts,
489 .sampledImageIntegerSampleCounts = sample_counts,
490 .sampledImageDepthSampleCounts = sample_counts,
491 .sampledImageStencilSampleCounts = sample_counts,
492 .storageImageSampleCounts = sample_counts,
493 .maxSampleMaskWords = 1,
494 .timestampComputeAndGraphics = true,
495 .timestampPeriod = 1,
496 .maxClipDistances = 8,
497 .maxCullDistances = 8,
498 .maxCombinedClipAndCullDistances = 8,
499 .discreteQueuePriorities = 2,
500 .pointSizeRange = { 0.0, pdevice->pscreen->get_paramf(pdevice->pscreen, PIPE_CAPF_MAX_POINT_WIDTH) },
501 .lineWidthRange = { 0.0, pdevice->pscreen->get_paramf(pdevice->pscreen, PIPE_CAPF_MAX_LINE_WIDTH) },
502 .pointSizeGranularity = (1.0 / 8.0),
503 .lineWidthGranularity = (1.0 / 128.0),
504 .strictLines = false, /* FINISHME */
505 .standardSampleLocations = true,
506 .optimalBufferCopyOffsetAlignment = 128,
507 .optimalBufferCopyRowPitchAlignment = 128,
508 .nonCoherentAtomSize = 64,
509 };
510
511 *pProperties = (VkPhysicalDeviceProperties) {
512 .apiVersion = VK_MAKE_VERSION(1, 0, 2),
513 .driverVersion = 1,
514 .vendorID = VK_VENDOR_ID_MESA,
515 .deviceID = 0,
516 .deviceType = VK_PHYSICAL_DEVICE_TYPE_CPU,
517 .limits = limits,
518 .sparseProperties = {0},
519 };
520
521 strcpy(pProperties->deviceName, pdevice->pscreen->get_name(pdevice->pscreen));
522 val_device_get_cache_uuid(pProperties->pipelineCacheUUID);
523
524 }
525
526 void val_GetPhysicalDeviceProperties2(
527 VkPhysicalDevice physicalDevice,
528 VkPhysicalDeviceProperties2 *pProperties)
529 {
530 val_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
531
532 vk_foreach_struct(ext, pProperties->pNext) {
533 switch (ext->sType) {
534
535 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
536 VkPhysicalDeviceMaintenance3Properties *properties =
537 (VkPhysicalDeviceMaintenance3Properties*)ext;
538 properties->maxPerSetDescriptors = 1024;
539 properties->maxMemoryAllocationSize = (1u << 31);
540 break;
541 }
542 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
543 VkPhysicalDeviceDriverPropertiesKHR *driver_props =
544 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
545 driver_props->driverID = VK_DRIVER_ID_MESA_LLVMPIPE;
546 snprintf(driver_props->driverName, VK_MAX_DRIVER_NAME_SIZE_KHR, "llvmpipe");
547 snprintf(driver_props->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
548 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1
549 #ifdef MESA_LLVM_VERSION_STRING
550 " (LLVM " MESA_LLVM_VERSION_STRING ")"
551 #endif
552 );
553 driver_props->conformanceVersion.major = 1;
554 driver_props->conformanceVersion.minor = 0;
555 driver_props->conformanceVersion.subminor = 0;
556 driver_props->conformanceVersion.patch = 0;;
557 break;
558 }
559 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
560 VkPhysicalDevicePointClippingProperties *properties =
561 (VkPhysicalDevicePointClippingProperties*)ext;
562 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
563 break;
564 }
565 default:
566 break;
567 }
568 }
569 }
570
571 void val_GetPhysicalDeviceQueueFamilyProperties(
572 VkPhysicalDevice physicalDevice,
573 uint32_t* pCount,
574 VkQueueFamilyProperties* pQueueFamilyProperties)
575 {
576 if (pQueueFamilyProperties == NULL) {
577 *pCount = 1;
578 return;
579 }
580
581 assert(*pCount >= 1);
582
583 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
584 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
585 VK_QUEUE_COMPUTE_BIT |
586 VK_QUEUE_TRANSFER_BIT,
587 .queueCount = 1,
588 .timestampValidBits = 64,
589 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
590 };
591 }
592
593 void val_GetPhysicalDeviceMemoryProperties(
594 VkPhysicalDevice physicalDevice,
595 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
596 {
597 pMemoryProperties->memoryTypeCount = 1;
598 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
599 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
600 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
601 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
602 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
603 .heapIndex = 0,
604 };
605
606 pMemoryProperties->memoryHeapCount = 1;
607 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
608 .size = 2ULL*1024*1024*1024,
609 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
610 };
611 }
612
613 PFN_vkVoidFunction val_GetInstanceProcAddr(
614 VkInstance _instance,
615 const char* pName)
616 {
617 VAL_FROM_HANDLE(val_instance, instance, _instance);
618
619 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
620 * when we have to return valid function pointers, NULL, or it's left
621 * undefined. See the table for exact details.
622 */
623 if (pName == NULL)
624 return NULL;
625
626 #define LOOKUP_VAL_ENTRYPOINT(entrypoint) \
627 if (strcmp(pName, "vk" #entrypoint) == 0) \
628 return (PFN_vkVoidFunction)val_##entrypoint
629
630 LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceExtensionProperties);
631 LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceLayerProperties);
632 LOOKUP_VAL_ENTRYPOINT(EnumerateInstanceVersion);
633 LOOKUP_VAL_ENTRYPOINT(CreateInstance);
634
635 /* GetInstanceProcAddr() can also be called with a NULL instance.
636 * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
637 */
638 LOOKUP_VAL_ENTRYPOINT(GetInstanceProcAddr);
639
640 #undef LOOKUP_VAL_ENTRYPOINT
641
642 if (instance == NULL)
643 return NULL;
644
645 int idx = val_get_instance_entrypoint_index(pName);
646 if (idx >= 0)
647 return instance->dispatch.entrypoints[idx];
648
649 idx = val_get_physical_device_entrypoint_index(pName);
650 if (idx >= 0)
651 return instance->physical_device_dispatch.entrypoints[idx];
652
653 idx = val_get_device_entrypoint_index(pName);
654 if (idx >= 0)
655 return instance->device_dispatch.entrypoints[idx];
656
657 return NULL;
658 }
659
660 /* The loader wants us to expose a second GetInstanceProcAddr function
661 * to work around certain LD_PRELOAD issues seen in apps.
662 */
663 PUBLIC
664 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
665 VkInstance instance,
666 const char* pName);
667
668 PUBLIC
669 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
670 VkInstance instance,
671 const char* pName)
672 {
673 return val_GetInstanceProcAddr(instance, pName);
674 }
675
676 PUBLIC
677 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
678 VkInstance _instance,
679 const char* pName);
680
681 PUBLIC
682 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
683 VkInstance _instance,
684 const char* pName)
685 {
686 VAL_FROM_HANDLE(val_instance, instance, _instance);
687
688 if (!pName || !instance)
689 return NULL;
690
691 int idx = val_get_physical_device_entrypoint_index(pName);
692 if (idx < 0)
693 return NULL;
694
695 return instance->physical_device_dispatch.entrypoints[idx];
696 }
697
698 PFN_vkVoidFunction val_GetDeviceProcAddr(
699 VkDevice _device,
700 const char* pName)
701 {
702 VAL_FROM_HANDLE(val_device, device, _device);
703 if (!device || !pName)
704 return NULL;
705
706 int idx = val_get_device_entrypoint_index(pName);
707 if (idx < 0)
708 return NULL;
709
710 return device->dispatch.entrypoints[idx];
711 }
712
713 static int queue_thread(void *data)
714 {
715 struct val_queue *queue = data;
716
717 mtx_lock(&queue->m);
718 while (!queue->shutdown) {
719 struct val_queue_work *task;
720 while (list_is_empty(&queue->workqueue) && !queue->shutdown)
721 cnd_wait(&queue->new_work, &queue->m);
722
723 if (queue->shutdown)
724 break;
725
726 task = list_first_entry(&queue->workqueue, struct val_queue_work,
727 list);
728
729 mtx_unlock(&queue->m);
730 //execute
731 for (unsigned i = 0; i < task->cmd_buffer_count; i++) {
732 val_execute_cmds(queue->device, queue, task->fence, task->cmd_buffers[i]);
733 }
734 if (!task->cmd_buffer_count && task->fence)
735 task->fence->signaled = true;
736 p_atomic_dec(&queue->count);
737 mtx_lock(&queue->m);
738 list_del(&task->list);
739 free(task);
740 }
741 mtx_unlock(&queue->m);
742 return 0;
743 }
744
745 static VkResult
746 val_queue_init(struct val_device *device, struct val_queue *queue)
747 {
748 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
749 queue->device = device;
750
751 queue->flags = 0;
752 queue->ctx = device->pscreen->context_create(device->pscreen, NULL, PIPE_CONTEXT_ROBUST_BUFFER_ACCESS);
753 list_inithead(&queue->workqueue);
754 p_atomic_set(&queue->count, 0);
755 mtx_init(&queue->m, mtx_plain);
756 queue->exec_thread = u_thread_create(queue_thread, queue);
757
758 return VK_SUCCESS;
759 }
760
761 static void
762 val_queue_finish(struct val_queue *queue)
763 {
764 mtx_lock(&queue->m);
765 queue->shutdown = true;
766 cnd_broadcast(&queue->new_work);
767 mtx_unlock(&queue->m);
768
769 thrd_join(queue->exec_thread, NULL);
770
771 cnd_destroy(&queue->new_work);
772 mtx_destroy(&queue->m);
773 queue->ctx->destroy(queue->ctx);
774 }
775
776 static int val_get_device_extension_index(const char *name)
777 {
778 for (unsigned i = 0; i < VAL_DEVICE_EXTENSION_COUNT; ++i) {
779 if (strcmp(name, val_device_extensions[i].extensionName) == 0)
780 return i;
781 }
782 return -1;
783 }
784
785 static void
786 val_device_init_dispatch(struct val_device *device)
787 {
788 const struct val_instance *instance = device->physical_device->instance;
789 const struct val_device_dispatch_table *dispatch_table_layer = NULL;
790 bool unchecked = instance->debug_flags & VAL_DEBUG_ALL_ENTRYPOINTS;
791
792 for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
793 /* Vulkan requires that entrypoints for extensions which have not been
794 * enabled must not be advertised.
795 */
796 if (!unchecked &&
797 !val_device_entrypoint_is_enabled(i, instance->apiVersion,
798 &instance->enabled_extensions,
799 &device->enabled_extensions)) {
800 device->dispatch.entrypoints[i] = NULL;
801 } else if (dispatch_table_layer &&
802 dispatch_table_layer->entrypoints[i]) {
803 device->dispatch.entrypoints[i] =
804 dispatch_table_layer->entrypoints[i];
805 } else {
806 device->dispatch.entrypoints[i] =
807 val_device_dispatch_table.entrypoints[i];
808 }
809 }
810 }
811
812 VkResult val_CreateDevice(
813 VkPhysicalDevice physicalDevice,
814 const VkDeviceCreateInfo* pCreateInfo,
815 const VkAllocationCallbacks* pAllocator,
816 VkDevice* pDevice)
817 {
818 VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
819 struct val_device *device;
820
821 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
822
823 /* Check enabled features */
824 if (pCreateInfo->pEnabledFeatures) {
825 VkPhysicalDeviceFeatures supported_features;
826 val_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
827 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
828 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
829 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
830 for (uint32_t i = 0; i < num_features; i++) {
831 if (enabled_feature[i] && !supported_feature[i])
832 return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
833 }
834 }
835
836 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
837 sizeof(*device), 8,
838 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
839 if (!device)
840 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
841
842 device->instance = physical_device->instance;
843 device->physical_device = physical_device;
844
845 if (pAllocator)
846 device->alloc = *pAllocator;
847 else
848 device->alloc = physical_device->instance->alloc;
849
850 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
851 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
852 int index = val_get_device_extension_index(ext_name);
853 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
854 vk_free(&device->alloc, device);
855 return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
856 }
857
858 device->enabled_extensions.extensions[index] = true;
859 }
860 val_device_init_dispatch(device);
861
862 mtx_init(&device->fence_lock, mtx_plain);
863 device->pscreen = physical_device->pscreen;
864
865 val_queue_init(device, &device->queue);
866
867 *pDevice = val_device_to_handle(device);
868
869 return VK_SUCCESS;
870
871 }
872
873 void val_DestroyDevice(
874 VkDevice _device,
875 const VkAllocationCallbacks* pAllocator)
876 {
877 VAL_FROM_HANDLE(val_device, device, _device);
878
879 val_queue_finish(&device->queue);
880 vk_free(&device->alloc, device);
881 }
882
883 VkResult val_EnumerateInstanceExtensionProperties(
884 const char* pLayerName,
885 uint32_t* pPropertyCount,
886 VkExtensionProperties* pProperties)
887 {
888 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
889
890 for (int i = 0; i < VAL_INSTANCE_EXTENSION_COUNT; i++) {
891 if (val_instance_extensions_supported.extensions[i]) {
892 vk_outarray_append(&out, prop) {
893 *prop = val_instance_extensions[i];
894 }
895 }
896 }
897
898 return vk_outarray_status(&out);
899 }
900
901 VkResult val_EnumerateDeviceExtensionProperties(
902 VkPhysicalDevice physicalDevice,
903 const char* pLayerName,
904 uint32_t* pPropertyCount,
905 VkExtensionProperties* pProperties)
906 {
907 VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
908 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
909
910 for (int i = 0; i < VAL_DEVICE_EXTENSION_COUNT; i++) {
911 if (device->supported_extensions.extensions[i]) {
912 vk_outarray_append(&out, prop) {
913 *prop = val_device_extensions[i];
914 }
915 }
916 }
917 return vk_outarray_status(&out);
918 }
919
920 VkResult val_EnumerateInstanceLayerProperties(
921 uint32_t* pPropertyCount,
922 VkLayerProperties* pProperties)
923 {
924 if (pProperties == NULL) {
925 *pPropertyCount = 0;
926 return VK_SUCCESS;
927 }
928
929 /* None supported at this time */
930 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
931 }
932
933 VkResult val_EnumerateDeviceLayerProperties(
934 VkPhysicalDevice physicalDevice,
935 uint32_t* pPropertyCount,
936 VkLayerProperties* pProperties)
937 {
938 if (pProperties == NULL) {
939 *pPropertyCount = 0;
940 return VK_SUCCESS;
941 }
942
943 /* None supported at this time */
944 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
945 }
946
947 void val_GetDeviceQueue2(
948 VkDevice _device,
949 const VkDeviceQueueInfo2* pQueueInfo,
950 VkQueue* pQueue)
951 {
952 VAL_FROM_HANDLE(val_device, device, _device);
953 struct val_queue *queue;
954
955 queue = &device->queue;
956 if (pQueueInfo->flags != queue->flags) {
957 /* From the Vulkan 1.1.70 spec:
958 *
959 * "The queue returned by vkGetDeviceQueue2 must have the same
960 * flags value from this structure as that used at device
961 * creation time in a VkDeviceQueueCreateInfo instance. If no
962 * matching flags were specified at device creation time then
963 * pQueue will return VK_NULL_HANDLE."
964 */
965 *pQueue = VK_NULL_HANDLE;
966 return;
967 }
968
969 *pQueue = val_queue_to_handle(queue);
970 }
971
972
973 void val_GetDeviceQueue(
974 VkDevice _device,
975 uint32_t queueFamilyIndex,
976 uint32_t queueIndex,
977 VkQueue* pQueue)
978 {
979 const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
980 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
981 .queueFamilyIndex = queueFamilyIndex,
982 .queueIndex = queueIndex
983 };
984
985 val_GetDeviceQueue2(_device, &info, pQueue);
986 }
987
988
989 VkResult val_QueueSubmit(
990 VkQueue _queue,
991 uint32_t submitCount,
992 const VkSubmitInfo* pSubmits,
993 VkFence _fence)
994 {
995 VAL_FROM_HANDLE(val_queue, queue, _queue);
996 VAL_FROM_HANDLE(val_fence, fence, _fence);
997
998 if (submitCount == 0)
999 goto just_signal_fence;
1000 for (uint32_t i = 0; i < submitCount; i++) {
1001 uint32_t task_size = sizeof(struct val_queue_work) + pSubmits[i].commandBufferCount * sizeof(struct val_cmd_buffer *);
1002 struct val_queue_work *task = malloc(task_size);
1003
1004 task->cmd_buffer_count = pSubmits[i].commandBufferCount;
1005 task->fence = fence;
1006 task->cmd_buffers = (struct val_cmd_buffer **)(task + 1);
1007 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1008 task->cmd_buffers[j] = val_cmd_buffer_from_handle(pSubmits[i].pCommandBuffers[j]);
1009 }
1010
1011 mtx_lock(&queue->m);
1012 p_atomic_inc(&queue->count);
1013 list_addtail(&task->list, &queue->workqueue);
1014 cnd_signal(&queue->new_work);
1015 mtx_unlock(&queue->m);
1016 }
1017 return VK_SUCCESS;
1018 just_signal_fence:
1019 fence->signaled = true;
1020 return VK_SUCCESS;
1021 }
1022
1023 static VkResult queue_wait_idle(struct val_queue *queue, uint64_t timeout)
1024 {
1025 if (timeout == 0)
1026 return p_atomic_read(&queue->count) == 0 ? VK_SUCCESS : VK_TIMEOUT;
1027 if (timeout == UINT64_MAX)
1028 while (p_atomic_read(&queue->count))
1029 usleep(100);
1030 else {
1031 struct timespec t, current;
1032 clock_gettime(CLOCK_MONOTONIC, &current);
1033 timespec_add_nsec(&t, &current, timeout);
1034 bool timedout = false;
1035 while (p_atomic_read(&queue->count) && !(timedout = timespec_passed(CLOCK_MONOTONIC, &t)))
1036 usleep(10);
1037 if (timedout)
1038 return VK_TIMEOUT;
1039 }
1040 return VK_SUCCESS;
1041 }
1042
1043 VkResult val_QueueWaitIdle(
1044 VkQueue _queue)
1045 {
1046 VAL_FROM_HANDLE(val_queue, queue, _queue);
1047
1048 return queue_wait_idle(queue, UINT64_MAX);
1049 }
1050
1051 VkResult val_DeviceWaitIdle(
1052 VkDevice _device)
1053 {
1054 VAL_FROM_HANDLE(val_device, device, _device);
1055
1056 return queue_wait_idle(&device->queue, UINT64_MAX);
1057 }
1058
1059 VkResult val_AllocateMemory(
1060 VkDevice _device,
1061 const VkMemoryAllocateInfo* pAllocateInfo,
1062 const VkAllocationCallbacks* pAllocator,
1063 VkDeviceMemory* pMem)
1064 {
1065 VAL_FROM_HANDLE(val_device, device, _device);
1066 struct val_device_memory *mem;
1067 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1068
1069 if (pAllocateInfo->allocationSize == 0) {
1070 /* Apparently, this is allowed */
1071 *pMem = VK_NULL_HANDLE;
1072 return VK_SUCCESS;
1073 }
1074
1075 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1076 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1077 if (mem == NULL)
1078 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1079
1080 vk_object_base_init(&device->vk, &mem->base,
1081 VK_OBJECT_TYPE_DEVICE_MEMORY);
1082 mem->pmem = device->pscreen->allocate_memory(device->pscreen, pAllocateInfo->allocationSize);
1083 if (!mem->pmem) {
1084 vk_free2(&device->alloc, pAllocator, mem);
1085 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1086 }
1087
1088 mem->type_index = pAllocateInfo->memoryTypeIndex;
1089
1090 *pMem = val_device_memory_to_handle(mem);
1091
1092 return VK_SUCCESS;
1093 }
1094
1095 void val_FreeMemory(
1096 VkDevice _device,
1097 VkDeviceMemory _mem,
1098 const VkAllocationCallbacks* pAllocator)
1099 {
1100 VAL_FROM_HANDLE(val_device, device, _device);
1101 VAL_FROM_HANDLE(val_device_memory, mem, _mem);
1102
1103 if (mem == NULL)
1104 return;
1105
1106 device->pscreen->free_memory(device->pscreen, mem->pmem);
1107 vk_object_base_finish(&mem->base);
1108 vk_free2(&device->alloc, pAllocator, mem);
1109
1110 }
1111
1112 VkResult val_MapMemory(
1113 VkDevice _device,
1114 VkDeviceMemory _memory,
1115 VkDeviceSize offset,
1116 VkDeviceSize size,
1117 VkMemoryMapFlags flags,
1118 void** ppData)
1119 {
1120 VAL_FROM_HANDLE(val_device, device, _device);
1121 VAL_FROM_HANDLE(val_device_memory, mem, _memory);
1122 void *map;
1123 if (mem == NULL) {
1124 *ppData = NULL;
1125 return VK_SUCCESS;
1126 }
1127
1128 map = device->pscreen->map_memory(device->pscreen, mem->pmem);
1129
1130 *ppData = map + offset;
1131 return VK_SUCCESS;
1132 }
1133
1134 void val_UnmapMemory(
1135 VkDevice _device,
1136 VkDeviceMemory _memory)
1137 {
1138 VAL_FROM_HANDLE(val_device, device, _device);
1139 VAL_FROM_HANDLE(val_device_memory, mem, _memory);
1140
1141 if (mem == NULL)
1142 return;
1143
1144 device->pscreen->unmap_memory(device->pscreen, mem->pmem);
1145 }
1146
1147 VkResult val_FlushMappedMemoryRanges(
1148 VkDevice _device,
1149 uint32_t memoryRangeCount,
1150 const VkMappedMemoryRange* pMemoryRanges)
1151 {
1152 return VK_SUCCESS;
1153 }
1154 VkResult val_InvalidateMappedMemoryRanges(
1155 VkDevice _device,
1156 uint32_t memoryRangeCount,
1157 const VkMappedMemoryRange* pMemoryRanges)
1158 {
1159 return VK_SUCCESS;
1160 }
1161
1162 void val_GetBufferMemoryRequirements(
1163 VkDevice device,
1164 VkBuffer _buffer,
1165 VkMemoryRequirements* pMemoryRequirements)
1166 {
1167 VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
1168
1169 /* The Vulkan spec (git aaed022) says:
1170 *
1171 * memoryTypeBits is a bitfield and contains one bit set for every
1172 * supported memory type for the resource. The bit `1<<i` is set if and
1173 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1174 * structure for the physical device is supported.
1175 *
1176 * We support exactly one memory type.
1177 */
1178 pMemoryRequirements->memoryTypeBits = 1;
1179
1180 pMemoryRequirements->size = buffer->total_size;
1181 pMemoryRequirements->alignment = 64;
1182 }
1183
1184 void val_GetBufferMemoryRequirements2(
1185 VkDevice device,
1186 const VkBufferMemoryRequirementsInfo2 *pInfo,
1187 VkMemoryRequirements2 *pMemoryRequirements)
1188 {
1189 val_GetBufferMemoryRequirements(device, pInfo->buffer,
1190 &pMemoryRequirements->memoryRequirements);
1191 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
1192 switch (ext->sType) {
1193 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
1194 VkMemoryDedicatedRequirements *req =
1195 (VkMemoryDedicatedRequirements *) ext;
1196 req->requiresDedicatedAllocation = false;
1197 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
1198 break;
1199 }
1200 default:
1201 break;
1202 }
1203 }
1204 }
1205
1206 void val_GetImageMemoryRequirements(
1207 VkDevice device,
1208 VkImage _image,
1209 VkMemoryRequirements* pMemoryRequirements)
1210 {
1211 VAL_FROM_HANDLE(val_image, image, _image);
1212 pMemoryRequirements->memoryTypeBits = 1;
1213
1214 pMemoryRequirements->size = image->size;
1215 pMemoryRequirements->alignment = image->alignment;
1216 }
1217
1218 void val_GetImageMemoryRequirements2(
1219 VkDevice device,
1220 const VkImageMemoryRequirementsInfo2 *pInfo,
1221 VkMemoryRequirements2 *pMemoryRequirements)
1222 {
1223 val_GetImageMemoryRequirements(device, pInfo->image,
1224 &pMemoryRequirements->memoryRequirements);
1225
1226 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
1227 switch (ext->sType) {
1228 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
1229 VkMemoryDedicatedRequirements *req =
1230 (VkMemoryDedicatedRequirements *) ext;
1231 req->requiresDedicatedAllocation = false;
1232 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
1233 break;
1234 }
1235 default:
1236 break;
1237 }
1238 }
1239 }
1240
1241 void val_GetImageSparseMemoryRequirements(
1242 VkDevice device,
1243 VkImage image,
1244 uint32_t* pSparseMemoryRequirementCount,
1245 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1246 {
1247 stub();
1248 }
1249
1250 void val_GetImageSparseMemoryRequirements2(
1251 VkDevice device,
1252 const VkImageSparseMemoryRequirementsInfo2* pInfo,
1253 uint32_t* pSparseMemoryRequirementCount,
1254 VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
1255 {
1256 stub();
1257 }
1258
1259 void val_GetDeviceMemoryCommitment(
1260 VkDevice device,
1261 VkDeviceMemory memory,
1262 VkDeviceSize* pCommittedMemoryInBytes)
1263 {
1264 *pCommittedMemoryInBytes = 0;
1265 }
1266
1267 VkResult val_BindBufferMemory2(VkDevice _device,
1268 uint32_t bindInfoCount,
1269 const VkBindBufferMemoryInfo *pBindInfos)
1270 {
1271 VAL_FROM_HANDLE(val_device, device, _device);
1272 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1273 VAL_FROM_HANDLE(val_device_memory, mem, pBindInfos[i].memory);
1274 VAL_FROM_HANDLE(val_buffer, buffer, pBindInfos[i].buffer);
1275
1276 device->pscreen->resource_bind_backing(device->pscreen,
1277 buffer->bo,
1278 mem->pmem,
1279 pBindInfos[i].memoryOffset);
1280 }
1281 return VK_SUCCESS;
1282 }
1283
1284 VkResult val_BindBufferMemory(
1285 VkDevice _device,
1286 VkBuffer _buffer,
1287 VkDeviceMemory _memory,
1288 VkDeviceSize memoryOffset)
1289 {
1290 VAL_FROM_HANDLE(val_device, device, _device);
1291 VAL_FROM_HANDLE(val_device_memory, mem, _memory);
1292 VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
1293
1294 device->pscreen->resource_bind_backing(device->pscreen,
1295 buffer->bo,
1296 mem->pmem,
1297 memoryOffset);
1298 return VK_SUCCESS;
1299 }
1300
1301 VkResult val_BindImageMemory2(VkDevice _device,
1302 uint32_t bindInfoCount,
1303 const VkBindImageMemoryInfo *pBindInfos)
1304 {
1305 VAL_FROM_HANDLE(val_device, device, _device);
1306 for (uint32_t i = 0; i < bindInfoCount; ++i) {
1307 VAL_FROM_HANDLE(val_device_memory, mem, pBindInfos[i].memory);
1308 VAL_FROM_HANDLE(val_image, image, pBindInfos[i].image);
1309
1310 device->pscreen->resource_bind_backing(device->pscreen,
1311 image->bo,
1312 mem->pmem,
1313 pBindInfos[i].memoryOffset);
1314 }
1315 return VK_SUCCESS;
1316 }
1317
1318 VkResult val_BindImageMemory(
1319 VkDevice _device,
1320 VkImage _image,
1321 VkDeviceMemory _memory,
1322 VkDeviceSize memoryOffset)
1323 {
1324 VAL_FROM_HANDLE(val_device, device, _device);
1325 VAL_FROM_HANDLE(val_device_memory, mem, _memory);
1326 VAL_FROM_HANDLE(val_image, image, _image);
1327
1328 device->pscreen->resource_bind_backing(device->pscreen,
1329 image->bo,
1330 mem->pmem,
1331 memoryOffset);
1332 return VK_SUCCESS;
1333 }
1334
1335 VkResult val_QueueBindSparse(
1336 VkQueue queue,
1337 uint32_t bindInfoCount,
1338 const VkBindSparseInfo* pBindInfo,
1339 VkFence fence)
1340 {
1341 stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
1342 }
1343
1344
1345 VkResult val_CreateFence(
1346 VkDevice _device,
1347 const VkFenceCreateInfo* pCreateInfo,
1348 const VkAllocationCallbacks* pAllocator,
1349 VkFence* pFence)
1350 {
1351 VAL_FROM_HANDLE(val_device, device, _device);
1352 struct val_fence *fence;
1353
1354 fence = vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1355 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1356 if (fence == NULL)
1357 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1358
1359 vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE);
1360 fence->signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
1361
1362 fence->handle = NULL;
1363 *pFence = val_fence_to_handle(fence);
1364
1365 return VK_SUCCESS;
1366 }
1367
1368 void val_DestroyFence(
1369 VkDevice _device,
1370 VkFence _fence,
1371 const VkAllocationCallbacks* pAllocator)
1372 {
1373 VAL_FROM_HANDLE(val_device, device, _device);
1374 VAL_FROM_HANDLE(val_fence, fence, _fence);
1375
1376 if (!_fence)
1377 return;
1378 if (fence->handle)
1379 device->pscreen->fence_reference(device->pscreen, &fence->handle, NULL);
1380
1381 vk_object_base_finish(&fence->base);
1382 vk_free2(&device->alloc, pAllocator, fence);
1383 }
1384
1385 VkResult val_ResetFences(
1386 VkDevice _device,
1387 uint32_t fenceCount,
1388 const VkFence* pFences)
1389 {
1390 VAL_FROM_HANDLE(val_device, device, _device);
1391 for (unsigned i = 0; i < fenceCount; i++) {
1392 struct val_fence *fence = val_fence_from_handle(pFences[i]);
1393
1394 fence->signaled = false;
1395
1396 mtx_lock(&device->fence_lock);
1397 if (fence->handle)
1398 device->pscreen->fence_reference(device->pscreen, &fence->handle, NULL);
1399 mtx_unlock(&device->fence_lock);
1400 }
1401 return VK_SUCCESS;
1402 }
1403
1404 VkResult val_GetFenceStatus(
1405 VkDevice _device,
1406 VkFence _fence)
1407 {
1408 VAL_FROM_HANDLE(val_device, device, _device);
1409 VAL_FROM_HANDLE(val_fence, fence, _fence);
1410
1411 if (fence->signaled)
1412 return VK_SUCCESS;
1413
1414 mtx_lock(&device->fence_lock);
1415
1416 if (!fence->handle) {
1417 mtx_unlock(&device->fence_lock);
1418 return VK_NOT_READY;
1419 }
1420
1421 bool signalled = device->pscreen->fence_finish(device->pscreen,
1422 NULL,
1423 fence->handle,
1424 0);
1425 mtx_unlock(&device->fence_lock);
1426 if (signalled)
1427 return VK_SUCCESS;
1428 else
1429 return VK_NOT_READY;
1430 }
1431
1432 VkResult val_CreateFramebuffer(
1433 VkDevice _device,
1434 const VkFramebufferCreateInfo* pCreateInfo,
1435 const VkAllocationCallbacks* pAllocator,
1436 VkFramebuffer* pFramebuffer)
1437 {
1438 VAL_FROM_HANDLE(val_device, device, _device);
1439 struct val_framebuffer *framebuffer;
1440
1441 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1442
1443 size_t size = sizeof(*framebuffer) +
1444 sizeof(struct val_image_view *) * pCreateInfo->attachmentCount;
1445 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1446 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1447 if (framebuffer == NULL)
1448 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1449
1450 vk_object_base_init(&device->vk, &framebuffer->base,
1451 VK_OBJECT_TYPE_FRAMEBUFFER);
1452 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1453 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1454 VkImageView _iview = pCreateInfo->pAttachments[i];
1455 framebuffer->attachments[i] = val_image_view_from_handle(_iview);
1456 }
1457
1458 framebuffer->width = pCreateInfo->width;
1459 framebuffer->height = pCreateInfo->height;
1460 framebuffer->layers = pCreateInfo->layers;
1461
1462 *pFramebuffer = val_framebuffer_to_handle(framebuffer);
1463
1464 return VK_SUCCESS;
1465 }
1466
1467 void val_DestroyFramebuffer(
1468 VkDevice _device,
1469 VkFramebuffer _fb,
1470 const VkAllocationCallbacks* pAllocator)
1471 {
1472 VAL_FROM_HANDLE(val_device, device, _device);
1473 VAL_FROM_HANDLE(val_framebuffer, fb, _fb);
1474
1475 if (!fb)
1476 return;
1477 vk_object_base_finish(&fb->base);
1478 vk_free2(&device->alloc, pAllocator, fb);
1479 }
1480
1481 VkResult val_WaitForFences(
1482 VkDevice _device,
1483 uint32_t fenceCount,
1484 const VkFence* pFences,
1485 VkBool32 waitAll,
1486 uint64_t timeout)
1487 {
1488 VAL_FROM_HANDLE(val_device, device, _device);
1489
1490 VkResult qret = queue_wait_idle(&device->queue, timeout);
1491 bool timeout_status = false;
1492 if (qret == VK_TIMEOUT)
1493 return VK_TIMEOUT;
1494
1495 mtx_lock(&device->fence_lock);
1496 for (unsigned i = 0; i < fenceCount; i++) {
1497 struct val_fence *fence = val_fence_from_handle(pFences[i]);
1498
1499 if (fence->signaled)
1500 continue;
1501 if (!fence->handle) {
1502 timeout_status |= true;
1503 continue;
1504 }
1505 bool ret = device->pscreen->fence_finish(device->pscreen,
1506 NULL,
1507 fence->handle,
1508 timeout);
1509 if (ret && !waitAll) {
1510 timeout_status = false;
1511 break;
1512 }
1513
1514 if (!ret)
1515 timeout_status |= true;
1516 }
1517 mtx_unlock(&device->fence_lock);
1518 return timeout_status ? VK_TIMEOUT : VK_SUCCESS;
1519 }
1520
1521 VkResult val_CreateSemaphore(
1522 VkDevice _device,
1523 const VkSemaphoreCreateInfo* pCreateInfo,
1524 const VkAllocationCallbacks* pAllocator,
1525 VkSemaphore* pSemaphore)
1526 {
1527 VAL_FROM_HANDLE(val_device, device, _device);
1528
1529 struct val_semaphore *sema = vk_alloc2(&device->alloc, pAllocator,
1530 sizeof(*sema), 8,
1531 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1532
1533 if (!sema)
1534 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1535 vk_object_base_init(&device->vk, &sema->base,
1536 VK_OBJECT_TYPE_SEMAPHORE);
1537 *pSemaphore = val_semaphore_to_handle(sema);
1538
1539 return VK_SUCCESS;
1540 }
1541
1542 void val_DestroySemaphore(
1543 VkDevice _device,
1544 VkSemaphore _semaphore,
1545 const VkAllocationCallbacks* pAllocator)
1546 {
1547 VAL_FROM_HANDLE(val_device, device, _device);
1548 VAL_FROM_HANDLE(val_semaphore, semaphore, _semaphore);
1549
1550 if (!_semaphore)
1551 return;
1552 vk_object_base_finish(&semaphore->base);
1553 vk_free2(&device->alloc, pAllocator, semaphore);
1554 }
1555
1556 VkResult val_CreateEvent(
1557 VkDevice _device,
1558 const VkEventCreateInfo* pCreateInfo,
1559 const VkAllocationCallbacks* pAllocator,
1560 VkEvent* pEvent)
1561 {
1562 VAL_FROM_HANDLE(val_device, device, _device);
1563 struct val_event *event = vk_alloc2(&device->alloc, pAllocator,
1564 sizeof(*event), 8,
1565 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1566
1567 if (!event)
1568 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1569
1570 vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
1571 *pEvent = val_event_to_handle(event);
1572
1573 return VK_SUCCESS;
1574 }
1575
1576 void val_DestroyEvent(
1577 VkDevice _device,
1578 VkEvent _event,
1579 const VkAllocationCallbacks* pAllocator)
1580 {
1581 VAL_FROM_HANDLE(val_device, device, _device);
1582 VAL_FROM_HANDLE(val_event, event, _event);
1583
1584 if (!event)
1585 return;
1586
1587 vk_object_base_finish(&event->base);
1588 vk_free2(&device->alloc, pAllocator, event);
1589 }
1590
1591 VkResult val_GetEventStatus(
1592 VkDevice _device,
1593 VkEvent _event)
1594 {
1595 VAL_FROM_HANDLE(val_event, event, _event);
1596 if (event->event_storage == 1)
1597 return VK_EVENT_SET;
1598 return VK_EVENT_RESET;
1599 }
1600
1601 VkResult val_SetEvent(
1602 VkDevice _device,
1603 VkEvent _event)
1604 {
1605 VAL_FROM_HANDLE(val_event, event, _event);
1606 event->event_storage = 1;
1607
1608 return VK_SUCCESS;
1609 }
1610
1611 VkResult val_ResetEvent(
1612 VkDevice _device,
1613 VkEvent _event)
1614 {
1615 VAL_FROM_HANDLE(val_event, event, _event);
1616 event->event_storage = 0;
1617
1618 return VK_SUCCESS;
1619 }
1620
1621 VkResult val_CreateSampler(
1622 VkDevice _device,
1623 const VkSamplerCreateInfo* pCreateInfo,
1624 const VkAllocationCallbacks* pAllocator,
1625 VkSampler* pSampler)
1626 {
1627 VAL_FROM_HANDLE(val_device, device, _device);
1628 struct val_sampler *sampler;
1629
1630 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1631
1632 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1633 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1634 if (!sampler)
1635 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1636
1637 vk_object_base_init(&device->vk, &sampler->base,
1638 VK_OBJECT_TYPE_SAMPLER);
1639 sampler->create_info = *pCreateInfo;
1640 *pSampler = val_sampler_to_handle(sampler);
1641
1642 return VK_SUCCESS;
1643 }
1644
1645 void val_DestroySampler(
1646 VkDevice _device,
1647 VkSampler _sampler,
1648 const VkAllocationCallbacks* pAllocator)
1649 {
1650 VAL_FROM_HANDLE(val_device, device, _device);
1651 VAL_FROM_HANDLE(val_sampler, sampler, _sampler);
1652
1653 if (!_sampler)
1654 return;
1655 vk_object_base_finish(&sampler->base);
1656 vk_free2(&device->alloc, pAllocator, sampler);
1657 }
1658
1659 VkResult val_CreatePrivateDataSlotEXT(
1660 VkDevice _device,
1661 const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
1662 const VkAllocationCallbacks* pAllocator,
1663 VkPrivateDataSlotEXT* pPrivateDataSlot)
1664 {
1665 VAL_FROM_HANDLE(val_device, device, _device);
1666 return vk_private_data_slot_create(&device->vk, pCreateInfo, pAllocator,
1667 pPrivateDataSlot);
1668 }
1669
1670 void val_DestroyPrivateDataSlotEXT(
1671 VkDevice _device,
1672 VkPrivateDataSlotEXT privateDataSlot,
1673 const VkAllocationCallbacks* pAllocator)
1674 {
1675 VAL_FROM_HANDLE(val_device, device, _device);
1676 vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
1677 }
1678
1679 VkResult val_SetPrivateDataEXT(
1680 VkDevice _device,
1681 VkObjectType objectType,
1682 uint64_t objectHandle,
1683 VkPrivateDataSlotEXT privateDataSlot,
1684 uint64_t data)
1685 {
1686 VAL_FROM_HANDLE(val_device, device, _device);
1687 return vk_object_base_set_private_data(&device->vk, objectType,
1688 objectHandle, privateDataSlot,
1689 data);
1690 }
1691
1692 void val_GetPrivateDataEXT(
1693 VkDevice _device,
1694 VkObjectType objectType,
1695 uint64_t objectHandle,
1696 VkPrivateDataSlotEXT privateDataSlot,
1697 uint64_t* pData)
1698 {
1699 VAL_FROM_HANDLE(val_device, device, _device);
1700 vk_object_base_get_private_data(&device->vk, objectType, objectHandle,
1701 privateDataSlot, pData);
1702 }