anv/device: Only support binding UBOs through BufferInfo
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 struct anv_dispatch_table dtable;
35
36 static void
37 compiler_debug_log(void *data, const char *fmt, ...)
38 { }
39
40 static void
41 compiler_perf_log(void *data, const char *fmt, ...)
42 {
43 va_list args;
44 va_start(args, fmt);
45
46 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
47 vfprintf(stderr, fmt, args);
48
49 va_end(args);
50 }
51
52 static VkResult
53 anv_physical_device_init(struct anv_physical_device *device,
54 struct anv_instance *instance,
55 const char *path)
56 {
57 VkResult result;
58 int fd;
59
60 fd = open(path, O_RDWR | O_CLOEXEC);
61 if (fd < 0)
62 return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
63 "failed to open %s: %m", path);
64
65 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
66 device->instance = instance;
67 device->path = path;
68
69 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
70 if (!device->chipset_id) {
71 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
72 "failed to get chipset id: %m");
73 goto fail;
74 }
75
76 device->name = brw_get_device_name(device->chipset_id);
77 device->info = brw_get_device_info(device->chipset_id);
78 if (!device->info) {
79 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
80 "failed to get device info");
81 goto fail;
82 }
83
84 if (device->info->gen == 7 &&
85 !device->info->is_haswell && !device->info->is_baytrail) {
86 fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete");
87 } else if (device->info->gen == 8 && !device->info->is_cherryview) {
88 /* Briadwell is as fully supported as anything */
89 } else {
90 result = vk_errorf(VK_UNSUPPORTED,
91 "Vulkan not yet supported on %s", device->name);
92 goto fail;
93 }
94
95 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
96 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
97 "failed to get aperture size: %m");
98 goto fail;
99 }
100
101 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
102 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
103 "kernel missing gem wait");
104 goto fail;
105 }
106
107 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
108 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
109 "kernel missing execbuf2");
110 goto fail;
111 }
112
113 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
114 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
115 "non-llc gpu");
116 goto fail;
117 }
118
119 close(fd);
120
121 brw_process_intel_debug_variable();
122
123 device->compiler = brw_compiler_create(NULL, device->info);
124 if (device->compiler == NULL) {
125 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
126 goto fail;
127 }
128 device->compiler->shader_debug_log = compiler_debug_log;
129 device->compiler->shader_perf_log = compiler_perf_log;
130
131 return VK_SUCCESS;
132
133 fail:
134 close(fd);
135 return result;
136 }
137
138 static void
139 anv_physical_device_finish(struct anv_physical_device *device)
140 {
141 ralloc_free(device->compiler);
142 }
143
144 static void *default_alloc(
145 void* pUserData,
146 size_t size,
147 size_t alignment,
148 VkSystemAllocType allocType)
149 {
150 return malloc(size);
151 }
152
153 static void default_free(
154 void* pUserData,
155 void* pMem)
156 {
157 free(pMem);
158 }
159
160 static const VkAllocCallbacks default_alloc_callbacks = {
161 .pUserData = NULL,
162 .pfnAlloc = default_alloc,
163 .pfnFree = default_free
164 };
165
166 static const VkExtensionProperties global_extensions[] = {
167 {
168 .extName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
169 .specVersion = 17,
170 },
171 };
172
173 static const VkExtensionProperties device_extensions[] = {
174 {
175 .extName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
176 .specVersion = 53,
177 },
178 };
179
180 VkResult anv_CreateInstance(
181 const VkInstanceCreateInfo* pCreateInfo,
182 VkInstance* pInstance)
183 {
184 struct anv_instance *instance;
185 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
186 void *user_data = NULL;
187
188 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
189
190 if (pCreateInfo->pAppInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
191 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
192
193 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
194 bool found = false;
195 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
196 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
197 global_extensions[j].extName) == 0) {
198 found = true;
199 break;
200 }
201 }
202 if (!found)
203 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
204 }
205
206 if (pCreateInfo->pAllocCb) {
207 alloc_callbacks = pCreateInfo->pAllocCb;
208 user_data = pCreateInfo->pAllocCb->pUserData;
209 }
210 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
211 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
212 if (!instance)
213 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
214
215 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
216 instance->pAllocUserData = alloc_callbacks->pUserData;
217 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
218 instance->pfnFree = alloc_callbacks->pfnFree;
219 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
220 instance->physicalDeviceCount = -1;
221
222 _mesa_locale_init();
223
224 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
225
226 anv_init_wsi(instance);
227
228 *pInstance = anv_instance_to_handle(instance);
229
230 return VK_SUCCESS;
231 }
232
233 void anv_DestroyInstance(
234 VkInstance _instance)
235 {
236 ANV_FROM_HANDLE(anv_instance, instance, _instance);
237
238 if (instance->physicalDeviceCount > 0) {
239 /* We support at most one physical device. */
240 assert(instance->physicalDeviceCount == 1);
241 anv_physical_device_finish(&instance->physicalDevice);
242 }
243
244 anv_finish_wsi(instance);
245
246 VG(VALGRIND_DESTROY_MEMPOOL(instance));
247
248 _mesa_locale_fini();
249
250 instance->pfnFree(instance->pAllocUserData, instance);
251 }
252
253 void *
254 anv_instance_alloc(struct anv_instance *instance, size_t size,
255 size_t alignment, VkSystemAllocType allocType)
256 {
257 void *mem = instance->pfnAlloc(instance->pAllocUserData,
258 size, alignment, allocType);
259 if (mem) {
260 VG(VALGRIND_MEMPOOL_ALLOC(instance, mem, size));
261 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem, size));
262 }
263 return mem;
264 }
265
266 void
267 anv_instance_free(struct anv_instance *instance, void *mem)
268 {
269 if (mem == NULL)
270 return;
271
272 VG(VALGRIND_MEMPOOL_FREE(instance, mem));
273
274 instance->pfnFree(instance->pAllocUserData, mem);
275 }
276
277 VkResult anv_EnumeratePhysicalDevices(
278 VkInstance _instance,
279 uint32_t* pPhysicalDeviceCount,
280 VkPhysicalDevice* pPhysicalDevices)
281 {
282 ANV_FROM_HANDLE(anv_instance, instance, _instance);
283 VkResult result;
284
285 if (instance->physicalDeviceCount < 0) {
286 result = anv_physical_device_init(&instance->physicalDevice,
287 instance, "/dev/dri/renderD128");
288 if (result == VK_UNSUPPORTED) {
289 instance->physicalDeviceCount = 0;
290 } else if (result == VK_SUCCESS) {
291 instance->physicalDeviceCount = 1;
292 } else {
293 return result;
294 }
295 }
296
297 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
298 * otherwise it's an inout parameter.
299 *
300 * The Vulkan spec (git aaed022) says:
301 *
302 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
303 * that is initialized with the number of devices the application is
304 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
305 * an array of at least this many VkPhysicalDevice handles [...].
306 *
307 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
308 * overwrites the contents of the variable pointed to by
309 * pPhysicalDeviceCount with the number of physical devices in in the
310 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
311 * pPhysicalDeviceCount with the number of physical handles written to
312 * pPhysicalDevices.
313 */
314 if (!pPhysicalDevices) {
315 *pPhysicalDeviceCount = instance->physicalDeviceCount;
316 } else if (*pPhysicalDeviceCount >= 1) {
317 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
318 *pPhysicalDeviceCount = 1;
319 } else {
320 *pPhysicalDeviceCount = 0;
321 }
322
323 return VK_SUCCESS;
324 }
325
326 VkResult anv_GetPhysicalDeviceFeatures(
327 VkPhysicalDevice physicalDevice,
328 VkPhysicalDeviceFeatures* pFeatures)
329 {
330 anv_finishme("Get correct values for PhysicalDeviceFeatures");
331
332 *pFeatures = (VkPhysicalDeviceFeatures) {
333 .robustBufferAccess = false,
334 .fullDrawIndexUint32 = false,
335 .imageCubeArray = false,
336 .independentBlend = false,
337 .geometryShader = true,
338 .tessellationShader = false,
339 .sampleRateShading = false,
340 .dualSourceBlend = true,
341 .logicOp = true,
342 .multiDrawIndirect = true,
343 .depthClip = false,
344 .depthBiasClamp = false,
345 .fillModeNonSolid = true,
346 .depthBounds = false,
347 .wideLines = true,
348 .largePoints = true,
349 .textureCompressionETC2 = true,
350 .textureCompressionASTC_LDR = true,
351 .textureCompressionBC = true,
352 .occlusionQueryNonConservative = false, /* FINISHME */
353 .pipelineStatisticsQuery = true,
354 .vertexSideEffects = false,
355 .tessellationSideEffects = false,
356 .geometrySideEffects = false,
357 .fragmentSideEffects = false,
358 .shaderTessellationPointSize = false,
359 .shaderGeometryPointSize = true,
360 .shaderImageGatherExtended = true,
361 .shaderStorageImageExtendedFormats = false,
362 .shaderStorageImageMultisample = false,
363 .shaderUniformBufferArrayDynamicIndexing = true,
364 .shaderSampledImageArrayDynamicIndexing = false,
365 .shaderStorageBufferArrayDynamicIndexing = false,
366 .shaderStorageImageArrayDynamicIndexing = false,
367 .shaderClipDistance = false,
368 .shaderCullDistance = false,
369 .shaderFloat64 = false,
370 .shaderInt64 = false,
371 .shaderInt16 = false,
372 .alphaToOne = true,
373 };
374
375 return VK_SUCCESS;
376 }
377
378 VkResult anv_GetPhysicalDeviceProperties(
379 VkPhysicalDevice physicalDevice,
380 VkPhysicalDeviceProperties* pProperties)
381 {
382 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
383 const struct brw_device_info *devinfo = pdevice->info;
384
385 anv_finishme("Get correct values for VkPhysicalDeviceLimits");
386
387 VkPhysicalDeviceLimits limits = {
388 .maxImageDimension1D = (1 << 14),
389 .maxImageDimension2D = (1 << 14),
390 .maxImageDimension3D = (1 << 10),
391 .maxImageDimensionCube = (1 << 14),
392 .maxImageArrayLayers = (1 << 10),
393
394 /* Broadwell supports 1, 2, 4, and 8 samples. */
395 .sampleCounts = 4,
396
397 .maxTexelBufferSize = (1 << 14),
398 .maxUniformBufferSize = UINT32_MAX,
399 .maxStorageBufferSize = UINT32_MAX,
400 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
401 .maxMemoryAllocationCount = UINT32_MAX,
402 .bufferImageGranularity = 64, /* A cache line */
403 .sparseAddressSpaceSize = 0,
404 .maxBoundDescriptorSets = MAX_SETS,
405 .maxDescriptorSets = UINT32_MAX,
406 .maxPerStageDescriptorSamplers = 64,
407 .maxPerStageDescriptorUniformBuffers = 64,
408 .maxPerStageDescriptorStorageBuffers = 64,
409 .maxPerStageDescriptorSampledImages = 64,
410 .maxPerStageDescriptorStorageImages = 64,
411 .maxDescriptorSetSamplers = 256,
412 .maxDescriptorSetUniformBuffers = 256,
413 .maxDescriptorSetUniformBuffersDynamic = 256,
414 .maxDescriptorSetStorageBuffers = 256,
415 .maxDescriptorSetStorageBuffersDynamic = 256,
416 .maxDescriptorSetSampledImages = 256,
417 .maxDescriptorSetStorageImages = 256,
418 .maxVertexInputAttributes = 32,
419 .maxVertexInputBindings = 32,
420 .maxVertexInputAttributeOffset = 256,
421 .maxVertexInputBindingStride = 256,
422 .maxVertexOutputComponents = 32,
423 .maxTessGenLevel = 0,
424 .maxTessPatchSize = 0,
425 .maxTessControlPerVertexInputComponents = 0,
426 .maxTessControlPerVertexOutputComponents = 0,
427 .maxTessControlPerPatchOutputComponents = 0,
428 .maxTessControlTotalOutputComponents = 0,
429 .maxTessEvaluationInputComponents = 0,
430 .maxTessEvaluationOutputComponents = 0,
431 .maxGeometryShaderInvocations = 6,
432 .maxGeometryInputComponents = 16,
433 .maxGeometryOutputComponents = 16,
434 .maxGeometryOutputVertices = 16,
435 .maxGeometryTotalOutputComponents = 16,
436 .maxFragmentInputComponents = 16,
437 .maxFragmentOutputBuffers = 8,
438 .maxFragmentDualSourceBuffers = 2,
439 .maxFragmentCombinedOutputResources = 8,
440 .maxComputeSharedMemorySize = 1024,
441 .maxComputeWorkGroupCount = {
442 16 * devinfo->max_cs_threads,
443 16 * devinfo->max_cs_threads,
444 16 * devinfo->max_cs_threads,
445 },
446 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
447 .maxComputeWorkGroupSize = {
448 16 * devinfo->max_cs_threads,
449 16 * devinfo->max_cs_threads,
450 16 * devinfo->max_cs_threads,
451 },
452 .subPixelPrecisionBits = 4 /* FIXME */,
453 .subTexelPrecisionBits = 4 /* FIXME */,
454 .mipmapPrecisionBits = 4 /* FIXME */,
455 .maxDrawIndexedIndexValue = UINT32_MAX,
456 .maxDrawIndirectInstanceCount = UINT32_MAX,
457 .primitiveRestartForPatches = UINT32_MAX,
458 .maxSamplerLodBias = 16,
459 .maxSamplerAnisotropy = 16,
460 .maxViewports = MAX_VIEWPORTS,
461 .maxViewportDimensions = { (1 << 14), (1 << 14) },
462 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
463 .viewportSubPixelBits = 13, /* We take a float? */
464 .minMemoryMapAlignment = 64, /* A cache line */
465 .minTexelBufferOffsetAlignment = 1,
466 .minUniformBufferOffsetAlignment = 1,
467 .minStorageBufferOffsetAlignment = 1,
468 .minTexelOffset = 0, /* FIXME */
469 .maxTexelOffset = 0, /* FIXME */
470 .minTexelGatherOffset = 0, /* FIXME */
471 .maxTexelGatherOffset = 0, /* FIXME */
472 .minInterpolationOffset = 0, /* FIXME */
473 .maxInterpolationOffset = 0, /* FIXME */
474 .subPixelInterpolationOffsetBits = 0, /* FIXME */
475 .maxFramebufferWidth = (1 << 14),
476 .maxFramebufferHeight = (1 << 14),
477 .maxFramebufferLayers = (1 << 10),
478 .maxFramebufferColorSamples = 8,
479 .maxFramebufferDepthSamples = 8,
480 .maxFramebufferStencilSamples = 8,
481 .maxColorAttachments = MAX_RTS,
482 .maxSampledImageColorSamples = 8,
483 .maxSampledImageDepthSamples = 8,
484 .maxSampledImageIntegerSamples = 1,
485 .maxStorageImageSamples = 1,
486 .maxSampleMaskWords = 1,
487 .timestampFrequency = 1000 * 1000 * 1000 / 80,
488 .maxClipDistances = 0 /* FIXME */,
489 .maxCullDistances = 0 /* FIXME */,
490 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
491 .pointSizeRange = { 0.125, 255.875 },
492 .lineWidthRange = { 0.0, 7.9921875 },
493 .pointSizeGranularity = (1.0 / 8.0),
494 .lineWidthGranularity = (1.0 / 128.0),
495 };
496
497 *pProperties = (VkPhysicalDeviceProperties) {
498 .apiVersion = VK_MAKE_VERSION(0, 170, 2),
499 .driverVersion = 1,
500 .vendorId = 0x8086,
501 .deviceId = pdevice->chipset_id,
502 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
503 .limits = limits,
504 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
505 };
506
507 strcpy(pProperties->deviceName, pdevice->name);
508 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
509 "anv-%s", MESA_GIT_SHA1 + 4);
510
511 return VK_SUCCESS;
512 }
513
514 VkResult anv_GetPhysicalDeviceQueueFamilyProperties(
515 VkPhysicalDevice physicalDevice,
516 uint32_t* pCount,
517 VkQueueFamilyProperties* pQueueFamilyProperties)
518 {
519 if (pQueueFamilyProperties == NULL) {
520 *pCount = 1;
521 return VK_SUCCESS;
522 }
523
524 assert(*pCount >= 1);
525
526 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
527 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
528 VK_QUEUE_COMPUTE_BIT |
529 VK_QUEUE_DMA_BIT,
530 .queueCount = 1,
531 .supportsTimestamps = true,
532 };
533
534 return VK_SUCCESS;
535 }
536
537 VkResult anv_GetPhysicalDeviceMemoryProperties(
538 VkPhysicalDevice physicalDevice,
539 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
540 {
541 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
542 VkDeviceSize heap_size;
543
544 /* Reserve some wiggle room for the driver by exposing only 75% of the
545 * aperture to the heap.
546 */
547 heap_size = 3 * physical_device->aperture_size / 4;
548
549 /* The property flags below are valid only for llc platforms. */
550 pMemoryProperties->memoryTypeCount = 1;
551 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
552 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
553 .heapIndex = 1,
554 };
555
556 pMemoryProperties->memoryHeapCount = 1;
557 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
558 .size = heap_size,
559 .flags = VK_MEMORY_HEAP_HOST_LOCAL_BIT,
560 };
561
562 return VK_SUCCESS;
563 }
564
565 PFN_vkVoidFunction anv_GetInstanceProcAddr(
566 VkInstance instance,
567 const char* pName)
568 {
569 return anv_lookup_entrypoint(pName);
570 }
571
572 PFN_vkVoidFunction anv_GetDeviceProcAddr(
573 VkDevice device,
574 const char* pName)
575 {
576 return anv_lookup_entrypoint(pName);
577 }
578
579 static VkResult
580 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
581 {
582 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
583 queue->device = device;
584 queue->pool = &device->surface_state_pool;
585
586 return VK_SUCCESS;
587 }
588
589 static void
590 anv_queue_finish(struct anv_queue *queue)
591 {
592 }
593
594 static void
595 anv_device_init_border_colors(struct anv_device *device)
596 {
597 static const VkClearColorValue border_colors[] = {
598 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
599 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
600 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
601 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
602 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
603 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
604 };
605
606 device->border_colors =
607 anv_state_pool_alloc(&device->dynamic_state_pool,
608 sizeof(border_colors), 32);
609 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
610 }
611
612 VkResult anv_CreateDevice(
613 VkPhysicalDevice physicalDevice,
614 const VkDeviceCreateInfo* pCreateInfo,
615 VkDevice* pDevice)
616 {
617 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
618 struct anv_instance *instance = physical_device->instance;
619 struct anv_device *device;
620
621 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
622
623 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
624 bool found = false;
625 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
626 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
627 device_extensions[j].extName) == 0) {
628 found = true;
629 break;
630 }
631 }
632 if (!found)
633 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
634 }
635
636 anv_set_dispatch_gen(physical_device->info->gen);
637
638 device = anv_instance_alloc(instance, sizeof(*device), 8,
639 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
640 if (!device)
641 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
642
643 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
644 device->instance = physical_device->instance;
645
646 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
647 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
648 if (device->fd == -1)
649 goto fail_device;
650
651 device->context_id = anv_gem_create_context(device);
652 if (device->context_id == -1)
653 goto fail_fd;
654
655 pthread_mutex_init(&device->mutex, NULL);
656
657 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
658
659 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
660
661 anv_state_pool_init(&device->dynamic_state_pool,
662 &device->dynamic_state_block_pool);
663
664 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
665 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
666
667 anv_state_pool_init(&device->surface_state_pool,
668 &device->surface_state_block_pool);
669
670 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
671
672 device->info = *physical_device->info;
673
674 anv_queue_init(device, &device->queue);
675
676 anv_device_init_meta(device);
677
678 anv_device_init_border_colors(device);
679
680 *pDevice = anv_device_to_handle(device);
681
682 return VK_SUCCESS;
683
684 fail_fd:
685 close(device->fd);
686 fail_device:
687 anv_device_free(device, device);
688
689 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
690 }
691
692 void anv_DestroyDevice(
693 VkDevice _device)
694 {
695 ANV_FROM_HANDLE(anv_device, device, _device);
696
697 anv_queue_finish(&device->queue);
698
699 anv_device_finish_meta(device);
700
701 #ifdef HAVE_VALGRIND
702 /* We only need to free these to prevent valgrind errors. The backing
703 * BO will go away in a couple of lines so we don't actually leak.
704 */
705 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
706 #endif
707
708 anv_bo_pool_finish(&device->batch_bo_pool);
709 anv_state_pool_finish(&device->dynamic_state_pool);
710 anv_block_pool_finish(&device->dynamic_state_block_pool);
711 anv_block_pool_finish(&device->instruction_block_pool);
712 anv_state_pool_finish(&device->surface_state_pool);
713 anv_block_pool_finish(&device->surface_state_block_pool);
714 anv_block_pool_finish(&device->scratch_block_pool);
715
716 close(device->fd);
717
718 anv_instance_free(device->instance, device);
719 }
720
721 VkResult anv_EnumerateInstanceExtensionProperties(
722 const char* pLayerName,
723 uint32_t* pCount,
724 VkExtensionProperties* pProperties)
725 {
726 if (pProperties == NULL) {
727 *pCount = ARRAY_SIZE(global_extensions);
728 return VK_SUCCESS;
729 }
730
731 assert(*pCount >= ARRAY_SIZE(global_extensions));
732
733 *pCount = ARRAY_SIZE(global_extensions);
734 memcpy(pProperties, global_extensions, sizeof(global_extensions));
735
736 return VK_SUCCESS;
737 }
738
739 VkResult anv_EnumerateDeviceExtensionProperties(
740 VkPhysicalDevice physicalDevice,
741 const char* pLayerName,
742 uint32_t* pCount,
743 VkExtensionProperties* pProperties)
744 {
745 if (pProperties == NULL) {
746 *pCount = ARRAY_SIZE(device_extensions);
747 return VK_SUCCESS;
748 }
749
750 assert(*pCount >= ARRAY_SIZE(device_extensions));
751
752 *pCount = ARRAY_SIZE(device_extensions);
753 memcpy(pProperties, device_extensions, sizeof(device_extensions));
754
755 return VK_SUCCESS;
756 }
757
758 VkResult anv_EnumerateInstanceLayerProperties(
759 uint32_t* pCount,
760 VkLayerProperties* pProperties)
761 {
762 if (pProperties == NULL) {
763 *pCount = 0;
764 return VK_SUCCESS;
765 }
766
767 /* None supported at this time */
768 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
769 }
770
771 VkResult anv_EnumerateDeviceLayerProperties(
772 VkPhysicalDevice physicalDevice,
773 uint32_t* pCount,
774 VkLayerProperties* pProperties)
775 {
776 if (pProperties == NULL) {
777 *pCount = 0;
778 return VK_SUCCESS;
779 }
780
781 /* None supported at this time */
782 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
783 }
784
785 VkResult anv_GetDeviceQueue(
786 VkDevice _device,
787 uint32_t queueNodeIndex,
788 uint32_t queueIndex,
789 VkQueue* pQueue)
790 {
791 ANV_FROM_HANDLE(anv_device, device, _device);
792
793 assert(queueIndex == 0);
794
795 *pQueue = anv_queue_to_handle(&device->queue);
796
797 return VK_SUCCESS;
798 }
799
800 VkResult anv_QueueSubmit(
801 VkQueue _queue,
802 uint32_t cmdBufferCount,
803 const VkCmdBuffer* pCmdBuffers,
804 VkFence _fence)
805 {
806 ANV_FROM_HANDLE(anv_queue, queue, _queue);
807 ANV_FROM_HANDLE(anv_fence, fence, _fence);
808 struct anv_device *device = queue->device;
809 int ret;
810
811 for (uint32_t i = 0; i < cmdBufferCount; i++) {
812 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
813
814 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
815
816 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
817 if (ret != 0) {
818 /* We don't know the real error. */
819 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
820 "execbuf2 failed: %m");
821 }
822
823 if (fence) {
824 ret = anv_gem_execbuffer(device, &fence->execbuf);
825 if (ret != 0) {
826 /* We don't know the real error. */
827 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
828 "execbuf2 failed: %m");
829 }
830 }
831
832 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
833 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
834 }
835
836 return VK_SUCCESS;
837 }
838
839 VkResult anv_QueueWaitIdle(
840 VkQueue _queue)
841 {
842 ANV_FROM_HANDLE(anv_queue, queue, _queue);
843
844 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
845 }
846
847 VkResult anv_DeviceWaitIdle(
848 VkDevice _device)
849 {
850 ANV_FROM_HANDLE(anv_device, device, _device);
851 struct anv_state state;
852 struct anv_batch batch;
853 struct drm_i915_gem_execbuffer2 execbuf;
854 struct drm_i915_gem_exec_object2 exec2_objects[1];
855 struct anv_bo *bo = NULL;
856 VkResult result;
857 int64_t timeout;
858 int ret;
859
860 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
861 bo = &device->dynamic_state_pool.block_pool->bo;
862 batch.start = batch.next = state.map;
863 batch.end = state.map + 32;
864 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
865 anv_batch_emit(&batch, GEN7_MI_NOOP);
866
867 exec2_objects[0].handle = bo->gem_handle;
868 exec2_objects[0].relocation_count = 0;
869 exec2_objects[0].relocs_ptr = 0;
870 exec2_objects[0].alignment = 0;
871 exec2_objects[0].offset = bo->offset;
872 exec2_objects[0].flags = 0;
873 exec2_objects[0].rsvd1 = 0;
874 exec2_objects[0].rsvd2 = 0;
875
876 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
877 execbuf.buffer_count = 1;
878 execbuf.batch_start_offset = state.offset;
879 execbuf.batch_len = batch.next - state.map;
880 execbuf.cliprects_ptr = 0;
881 execbuf.num_cliprects = 0;
882 execbuf.DR1 = 0;
883 execbuf.DR4 = 0;
884
885 execbuf.flags =
886 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
887 execbuf.rsvd1 = device->context_id;
888 execbuf.rsvd2 = 0;
889
890 ret = anv_gem_execbuffer(device, &execbuf);
891 if (ret != 0) {
892 /* We don't know the real error. */
893 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
894 goto fail;
895 }
896
897 timeout = INT64_MAX;
898 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
899 if (ret != 0) {
900 /* We don't know the real error. */
901 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
902 goto fail;
903 }
904
905 anv_state_pool_free(&device->dynamic_state_pool, state);
906
907 return VK_SUCCESS;
908
909 fail:
910 anv_state_pool_free(&device->dynamic_state_pool, state);
911
912 return result;
913 }
914
915 void *
916 anv_device_alloc(struct anv_device * device,
917 size_t size,
918 size_t alignment,
919 VkSystemAllocType allocType)
920 {
921 return anv_instance_alloc(device->instance, size, alignment, allocType);
922 }
923
924 void
925 anv_device_free(struct anv_device * device,
926 void * mem)
927 {
928 anv_instance_free(device->instance, mem);
929 }
930
931 VkResult
932 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
933 {
934 bo->gem_handle = anv_gem_create(device, size);
935 if (!bo->gem_handle)
936 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
937
938 bo->map = NULL;
939 bo->index = 0;
940 bo->offset = 0;
941 bo->size = size;
942
943 return VK_SUCCESS;
944 }
945
946 VkResult anv_AllocMemory(
947 VkDevice _device,
948 const VkMemoryAllocInfo* pAllocInfo,
949 VkDeviceMemory* pMem)
950 {
951 ANV_FROM_HANDLE(anv_device, device, _device);
952 struct anv_device_memory *mem;
953 VkResult result;
954
955 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
956
957 /* We support exactly one memory heap. */
958 assert(pAllocInfo->memoryTypeIndex == 0);
959
960 /* FINISHME: Fail if allocation request exceeds heap size. */
961
962 mem = anv_device_alloc(device, sizeof(*mem), 8,
963 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
964 if (mem == NULL)
965 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
966
967 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
968 if (result != VK_SUCCESS)
969 goto fail;
970
971 *pMem = anv_device_memory_to_handle(mem);
972
973 return VK_SUCCESS;
974
975 fail:
976 anv_device_free(device, mem);
977
978 return result;
979 }
980
981 void anv_FreeMemory(
982 VkDevice _device,
983 VkDeviceMemory _mem)
984 {
985 ANV_FROM_HANDLE(anv_device, device, _device);
986 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
987
988 if (mem->bo.map)
989 anv_gem_munmap(mem->bo.map, mem->bo.size);
990
991 if (mem->bo.gem_handle != 0)
992 anv_gem_close(device, mem->bo.gem_handle);
993
994 anv_device_free(device, mem);
995 }
996
997 VkResult anv_MapMemory(
998 VkDevice _device,
999 VkDeviceMemory _mem,
1000 VkDeviceSize offset,
1001 VkDeviceSize size,
1002 VkMemoryMapFlags flags,
1003 void** ppData)
1004 {
1005 ANV_FROM_HANDLE(anv_device, device, _device);
1006 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1007
1008 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1009 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1010 * at a time is valid. We could just mmap up front and return an offset
1011 * pointer here, but that may exhaust virtual memory on 32 bit
1012 * userspace. */
1013
1014 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
1015 mem->map_size = size;
1016
1017 *ppData = mem->map;
1018
1019 return VK_SUCCESS;
1020 }
1021
1022 void anv_UnmapMemory(
1023 VkDevice _device,
1024 VkDeviceMemory _mem)
1025 {
1026 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1027
1028 anv_gem_munmap(mem->map, mem->map_size);
1029 }
1030
1031 VkResult anv_FlushMappedMemoryRanges(
1032 VkDevice device,
1033 uint32_t memRangeCount,
1034 const VkMappedMemoryRange* pMemRanges)
1035 {
1036 /* clflush here for !llc platforms */
1037
1038 return VK_SUCCESS;
1039 }
1040
1041 VkResult anv_InvalidateMappedMemoryRanges(
1042 VkDevice device,
1043 uint32_t memRangeCount,
1044 const VkMappedMemoryRange* pMemRanges)
1045 {
1046 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
1047 }
1048
1049 VkResult anv_GetBufferMemoryRequirements(
1050 VkDevice device,
1051 VkBuffer _buffer,
1052 VkMemoryRequirements* pMemoryRequirements)
1053 {
1054 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1055
1056 /* The Vulkan spec (git aaed022) says:
1057 *
1058 * memoryTypeBits is a bitfield and contains one bit set for every
1059 * supported memory type for the resource. The bit `1<<i` is set if and
1060 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1061 * structure for the physical device is supported.
1062 *
1063 * We support exactly one memory type.
1064 */
1065 pMemoryRequirements->memoryTypeBits = 1;
1066
1067 pMemoryRequirements->size = buffer->size;
1068 pMemoryRequirements->alignment = 16;
1069
1070 return VK_SUCCESS;
1071 }
1072
1073 VkResult anv_GetImageMemoryRequirements(
1074 VkDevice device,
1075 VkImage _image,
1076 VkMemoryRequirements* pMemoryRequirements)
1077 {
1078 ANV_FROM_HANDLE(anv_image, image, _image);
1079
1080 /* The Vulkan spec (git aaed022) says:
1081 *
1082 * memoryTypeBits is a bitfield and contains one bit set for every
1083 * supported memory type for the resource. The bit `1<<i` is set if and
1084 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1085 * structure for the physical device is supported.
1086 *
1087 * We support exactly one memory type.
1088 */
1089 pMemoryRequirements->memoryTypeBits = 1;
1090
1091 pMemoryRequirements->size = image->size;
1092 pMemoryRequirements->alignment = image->alignment;
1093
1094 return VK_SUCCESS;
1095 }
1096
1097 VkResult anv_GetImageSparseMemoryRequirements(
1098 VkDevice device,
1099 VkImage image,
1100 uint32_t* pNumRequirements,
1101 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1102 {
1103 return vk_error(VK_UNSUPPORTED);
1104 }
1105
1106 VkResult anv_GetDeviceMemoryCommitment(
1107 VkDevice device,
1108 VkDeviceMemory memory,
1109 VkDeviceSize* pCommittedMemoryInBytes)
1110 {
1111 *pCommittedMemoryInBytes = 0;
1112 stub_return(VK_SUCCESS);
1113 }
1114
1115 VkResult anv_BindBufferMemory(
1116 VkDevice device,
1117 VkBuffer _buffer,
1118 VkDeviceMemory _mem,
1119 VkDeviceSize memOffset)
1120 {
1121 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1122 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1123
1124 buffer->bo = &mem->bo;
1125 buffer->offset = memOffset;
1126
1127 return VK_SUCCESS;
1128 }
1129
1130 VkResult anv_BindImageMemory(
1131 VkDevice device,
1132 VkImage _image,
1133 VkDeviceMemory _mem,
1134 VkDeviceSize memOffset)
1135 {
1136 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1137 ANV_FROM_HANDLE(anv_image, image, _image);
1138
1139 image->bo = &mem->bo;
1140 image->offset = memOffset;
1141
1142 return VK_SUCCESS;
1143 }
1144
1145 VkResult anv_QueueBindSparseBufferMemory(
1146 VkQueue queue,
1147 VkBuffer buffer,
1148 uint32_t numBindings,
1149 const VkSparseMemoryBindInfo* pBindInfo)
1150 {
1151 stub_return(VK_UNSUPPORTED);
1152 }
1153
1154 VkResult anv_QueueBindSparseImageOpaqueMemory(
1155 VkQueue queue,
1156 VkImage image,
1157 uint32_t numBindings,
1158 const VkSparseMemoryBindInfo* pBindInfo)
1159 {
1160 stub_return(VK_UNSUPPORTED);
1161 }
1162
1163 VkResult anv_QueueBindSparseImageMemory(
1164 VkQueue queue,
1165 VkImage image,
1166 uint32_t numBindings,
1167 const VkSparseImageMemoryBindInfo* pBindInfo)
1168 {
1169 stub_return(VK_UNSUPPORTED);
1170 }
1171
1172 VkResult anv_CreateFence(
1173 VkDevice _device,
1174 const VkFenceCreateInfo* pCreateInfo,
1175 VkFence* pFence)
1176 {
1177 ANV_FROM_HANDLE(anv_device, device, _device);
1178 struct anv_fence *fence;
1179 struct anv_batch batch;
1180 VkResult result;
1181
1182 const uint32_t fence_size = 128;
1183
1184 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1185
1186 fence = anv_device_alloc(device, sizeof(*fence), 8,
1187 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1188 if (fence == NULL)
1189 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1190
1191 result = anv_bo_init_new(&fence->bo, device, fence_size);
1192 if (result != VK_SUCCESS)
1193 goto fail;
1194
1195 fence->bo.map =
1196 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1197 batch.next = batch.start = fence->bo.map;
1198 batch.end = fence->bo.map + fence->bo.size;
1199 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1200 anv_batch_emit(&batch, GEN7_MI_NOOP);
1201
1202 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1203 fence->exec2_objects[0].relocation_count = 0;
1204 fence->exec2_objects[0].relocs_ptr = 0;
1205 fence->exec2_objects[0].alignment = 0;
1206 fence->exec2_objects[0].offset = fence->bo.offset;
1207 fence->exec2_objects[0].flags = 0;
1208 fence->exec2_objects[0].rsvd1 = 0;
1209 fence->exec2_objects[0].rsvd2 = 0;
1210
1211 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1212 fence->execbuf.buffer_count = 1;
1213 fence->execbuf.batch_start_offset = 0;
1214 fence->execbuf.batch_len = batch.next - fence->bo.map;
1215 fence->execbuf.cliprects_ptr = 0;
1216 fence->execbuf.num_cliprects = 0;
1217 fence->execbuf.DR1 = 0;
1218 fence->execbuf.DR4 = 0;
1219
1220 fence->execbuf.flags =
1221 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1222 fence->execbuf.rsvd1 = device->context_id;
1223 fence->execbuf.rsvd2 = 0;
1224
1225 *pFence = anv_fence_to_handle(fence);
1226
1227 return VK_SUCCESS;
1228
1229 fail:
1230 anv_device_free(device, fence);
1231
1232 return result;
1233 }
1234
1235 void anv_DestroyFence(
1236 VkDevice _device,
1237 VkFence _fence)
1238 {
1239 ANV_FROM_HANDLE(anv_device, device, _device);
1240 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1241
1242 anv_gem_munmap(fence->bo.map, fence->bo.size);
1243 anv_gem_close(device, fence->bo.gem_handle);
1244 anv_device_free(device, fence);
1245 }
1246
1247 VkResult anv_ResetFences(
1248 VkDevice _device,
1249 uint32_t fenceCount,
1250 const VkFence* pFences)
1251 {
1252 for (uint32_t i = 0; i < fenceCount; i++) {
1253 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1254 fence->ready = false;
1255 }
1256
1257 return VK_SUCCESS;
1258 }
1259
1260 VkResult anv_GetFenceStatus(
1261 VkDevice _device,
1262 VkFence _fence)
1263 {
1264 ANV_FROM_HANDLE(anv_device, device, _device);
1265 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1266 int64_t t = 0;
1267 int ret;
1268
1269 if (fence->ready)
1270 return VK_SUCCESS;
1271
1272 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1273 if (ret == 0) {
1274 fence->ready = true;
1275 return VK_SUCCESS;
1276 }
1277
1278 return VK_NOT_READY;
1279 }
1280
1281 VkResult anv_WaitForFences(
1282 VkDevice _device,
1283 uint32_t fenceCount,
1284 const VkFence* pFences,
1285 VkBool32 waitAll,
1286 uint64_t timeout)
1287 {
1288 ANV_FROM_HANDLE(anv_device, device, _device);
1289 int64_t t = timeout;
1290 int ret;
1291
1292 /* FIXME: handle !waitAll */
1293
1294 for (uint32_t i = 0; i < fenceCount; i++) {
1295 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1296 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1297 if (ret == -1 && errno == ETIME) {
1298 return VK_TIMEOUT;
1299 } else if (ret == -1) {
1300 /* We don't know the real error. */
1301 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1302 "gem wait failed: %m");
1303 }
1304 }
1305
1306 return VK_SUCCESS;
1307 }
1308
1309 // Queue semaphore functions
1310
1311 VkResult anv_CreateSemaphore(
1312 VkDevice device,
1313 const VkSemaphoreCreateInfo* pCreateInfo,
1314 VkSemaphore* pSemaphore)
1315 {
1316 pSemaphore->handle = 1;
1317 stub_return(VK_SUCCESS);
1318 }
1319
1320 void anv_DestroySemaphore(
1321 VkDevice device,
1322 VkSemaphore semaphore)
1323 {
1324 stub();
1325 }
1326
1327 VkResult anv_QueueSignalSemaphore(
1328 VkQueue queue,
1329 VkSemaphore semaphore)
1330 {
1331 stub_return(VK_UNSUPPORTED);
1332 }
1333
1334 VkResult anv_QueueWaitSemaphore(
1335 VkQueue queue,
1336 VkSemaphore semaphore)
1337 {
1338 stub_return(VK_UNSUPPORTED);
1339 }
1340
1341 // Event functions
1342
1343 VkResult anv_CreateEvent(
1344 VkDevice device,
1345 const VkEventCreateInfo* pCreateInfo,
1346 VkEvent* pEvent)
1347 {
1348 stub_return(VK_UNSUPPORTED);
1349 }
1350
1351 void anv_DestroyEvent(
1352 VkDevice device,
1353 VkEvent event)
1354 {
1355 stub();
1356 }
1357
1358 VkResult anv_GetEventStatus(
1359 VkDevice device,
1360 VkEvent event)
1361 {
1362 stub_return(VK_UNSUPPORTED);
1363 }
1364
1365 VkResult anv_SetEvent(
1366 VkDevice device,
1367 VkEvent event)
1368 {
1369 stub_return(VK_UNSUPPORTED);
1370 }
1371
1372 VkResult anv_ResetEvent(
1373 VkDevice device,
1374 VkEvent event)
1375 {
1376 stub_return(VK_UNSUPPORTED);
1377 }
1378
1379 // Buffer functions
1380
1381 VkResult anv_CreateBuffer(
1382 VkDevice _device,
1383 const VkBufferCreateInfo* pCreateInfo,
1384 VkBuffer* pBuffer)
1385 {
1386 ANV_FROM_HANDLE(anv_device, device, _device);
1387 struct anv_buffer *buffer;
1388
1389 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1390
1391 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1392 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1393 if (buffer == NULL)
1394 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1395
1396 buffer->size = pCreateInfo->size;
1397 buffer->bo = NULL;
1398 buffer->offset = 0;
1399
1400 *pBuffer = anv_buffer_to_handle(buffer);
1401
1402 return VK_SUCCESS;
1403 }
1404
1405 void anv_DestroyBuffer(
1406 VkDevice _device,
1407 VkBuffer _buffer)
1408 {
1409 ANV_FROM_HANDLE(anv_device, device, _device);
1410 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1411
1412 anv_device_free(device, buffer);
1413 }
1414
1415 void
1416 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1417 const struct anv_format *format,
1418 uint32_t offset, uint32_t range)
1419 {
1420 switch (device->info.gen) {
1421 case 7:
1422 gen7_fill_buffer_surface_state(state, format, offset, range);
1423 break;
1424 case 8:
1425 gen8_fill_buffer_surface_state(state, format, offset, range);
1426 break;
1427 default:
1428 unreachable("unsupported gen\n");
1429 }
1430 }
1431
1432 VkResult
1433 anv_buffer_view_create(
1434 struct anv_device * device,
1435 const VkBufferViewCreateInfo* pCreateInfo,
1436 struct anv_buffer_view ** bview_out)
1437 {
1438 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1439 struct anv_buffer_view *bview;
1440
1441 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1442
1443 bview = anv_device_alloc(device, sizeof(*bview), 8,
1444 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1445 if (bview == NULL)
1446 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1447
1448 *bview = (struct anv_buffer_view) {
1449 .bo = buffer->bo,
1450 .offset = buffer->offset + pCreateInfo->offset,
1451 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1452 .format = anv_format_for_vk_format(pCreateInfo->format),
1453 .range = pCreateInfo->range,
1454 };
1455
1456 *bview_out = bview;
1457
1458 return VK_SUCCESS;
1459 }
1460
1461 void anv_DestroyBufferView(
1462 VkDevice _device,
1463 VkBufferView _bview)
1464 {
1465 ANV_FROM_HANDLE(anv_device, device, _device);
1466 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1467
1468 anv_state_pool_free(&device->surface_state_pool, bview->surface_state);
1469 anv_device_free(device, bview);
1470 }
1471
1472 void anv_DestroySampler(
1473 VkDevice _device,
1474 VkSampler _sampler)
1475 {
1476 ANV_FROM_HANDLE(anv_device, device, _device);
1477 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1478
1479 anv_device_free(device, sampler);
1480 }
1481
1482 // Descriptor set functions
1483
1484 VkResult anv_CreateDescriptorSetLayout(
1485 VkDevice _device,
1486 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1487 VkDescriptorSetLayout* pSetLayout)
1488 {
1489 ANV_FROM_HANDLE(anv_device, device, _device);
1490 struct anv_descriptor_set_layout *set_layout;
1491 uint32_t s;
1492
1493 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1494
1495 uint32_t immutable_sampler_count = 0;
1496 for (uint32_t b = 0; b < pCreateInfo->count; b++) {
1497 if (pCreateInfo->pBinding[b].pImmutableSamplers)
1498 immutable_sampler_count += pCreateInfo->pBinding[b].arraySize;
1499 }
1500
1501 size_t size = sizeof(struct anv_descriptor_set_layout) +
1502 pCreateInfo->count * sizeof(set_layout->binding[0]) +
1503 immutable_sampler_count * sizeof(struct anv_sampler *);
1504
1505 set_layout = anv_device_alloc(device, size, 8,
1506 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1507 if (!set_layout)
1508 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1509
1510 /* We just allocate all the samplers at the end of the struct */
1511 struct anv_sampler **samplers =
1512 (struct anv_sampler **)&set_layout->binding[pCreateInfo->count];
1513
1514 set_layout->binding_count = pCreateInfo->count;
1515 set_layout->shader_stages = 0;
1516 set_layout->size = 0;
1517
1518 /* Initialize all binding_layout entries to -1 */
1519 memset(set_layout->binding, -1,
1520 pCreateInfo->count * sizeof(set_layout->binding[0]));
1521
1522 /* Initialize all samplers to 0 */
1523 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
1524
1525 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1526 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1527 uint32_t dynamic_offset_count = 0;
1528
1529 for (uint32_t b = 0; b < pCreateInfo->count; b++) {
1530 uint32_t array_size = MAX2(1, pCreateInfo->pBinding[b].arraySize);
1531 set_layout->binding[b].array_size = array_size;
1532 set_layout->binding[b].descriptor_index = set_layout->size;
1533 set_layout->size += array_size;
1534
1535 switch (pCreateInfo->pBinding[b].descriptorType) {
1536 case VK_DESCRIPTOR_TYPE_SAMPLER:
1537 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1538 for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) {
1539 set_layout->binding[b].stage[s].sampler_index = sampler_count[s];
1540 sampler_count[s] += array_size;
1541 }
1542 break;
1543 default:
1544 break;
1545 }
1546
1547 switch (pCreateInfo->pBinding[b].descriptorType) {
1548 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1549 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1550 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1551 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1552 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1553 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1554 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1555 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1556 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1557 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1558 for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) {
1559 set_layout->binding[b].stage[s].surface_index = surface_count[s];
1560 surface_count[s] += array_size;
1561 }
1562 break;
1563 default:
1564 break;
1565 }
1566
1567 switch (pCreateInfo->pBinding[b].descriptorType) {
1568 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1569 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1570 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
1571 dynamic_offset_count += array_size;
1572 break;
1573 default:
1574 break;
1575 }
1576
1577 if (pCreateInfo->pBinding[b].pImmutableSamplers) {
1578 set_layout->binding[b].immutable_samplers = samplers;
1579 samplers += array_size;
1580
1581 for (uint32_t i = 0; i < array_size; i++)
1582 set_layout->binding[b].immutable_samplers[i] =
1583 anv_sampler_from_handle(pCreateInfo->pBinding[b].pImmutableSamplers[i]);
1584 } else {
1585 set_layout->binding[b].immutable_samplers = NULL;
1586 }
1587
1588 set_layout->shader_stages |= pCreateInfo->pBinding[b].stageFlags;
1589 }
1590
1591 set_layout->dynamic_offset_count = dynamic_offset_count;
1592
1593 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1594
1595 return VK_SUCCESS;
1596 }
1597
1598 void anv_DestroyDescriptorSetLayout(
1599 VkDevice _device,
1600 VkDescriptorSetLayout _set_layout)
1601 {
1602 ANV_FROM_HANDLE(anv_device, device, _device);
1603 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1604
1605 anv_device_free(device, set_layout);
1606 }
1607
1608 VkResult anv_CreateDescriptorPool(
1609 VkDevice device,
1610 const VkDescriptorPoolCreateInfo* pCreateInfo,
1611 VkDescriptorPool* pDescriptorPool)
1612 {
1613 anv_finishme("VkDescriptorPool is a stub");
1614 pDescriptorPool->handle = 1;
1615 return VK_SUCCESS;
1616 }
1617
1618 void anv_DestroyDescriptorPool(
1619 VkDevice _device,
1620 VkDescriptorPool _pool)
1621 {
1622 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1623 }
1624
1625 VkResult anv_ResetDescriptorPool(
1626 VkDevice device,
1627 VkDescriptorPool descriptorPool)
1628 {
1629 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1630 return VK_SUCCESS;
1631 }
1632
1633 VkResult
1634 anv_descriptor_set_create(struct anv_device *device,
1635 const struct anv_descriptor_set_layout *layout,
1636 struct anv_descriptor_set **out_set)
1637 {
1638 struct anv_descriptor_set *set;
1639 size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
1640
1641 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1642 if (!set)
1643 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1644
1645 /* A descriptor set may not be 100% filled. Clear the set so we can can
1646 * later detect holes in it.
1647 */
1648 memset(set, 0, size);
1649
1650 set->layout = layout;
1651
1652 /* Go through and fill out immutable samplers if we have any */
1653 struct anv_descriptor *desc = set->descriptors;
1654 for (uint32_t b = 0; b < layout->binding_count; b++) {
1655 if (layout->binding[b].immutable_samplers) {
1656 for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
1657 desc[i].sampler = layout->binding[b].immutable_samplers[i];
1658 }
1659 desc += layout->binding[b].array_size;
1660 }
1661
1662 *out_set = set;
1663
1664 return VK_SUCCESS;
1665 }
1666
1667 void
1668 anv_descriptor_set_destroy(struct anv_device *device,
1669 struct anv_descriptor_set *set)
1670 {
1671 anv_device_free(device, set);
1672 }
1673
1674 VkResult anv_AllocDescriptorSets(
1675 VkDevice _device,
1676 VkDescriptorPool descriptorPool,
1677 VkDescriptorSetUsage setUsage,
1678 uint32_t count,
1679 const VkDescriptorSetLayout* pSetLayouts,
1680 VkDescriptorSet* pDescriptorSets)
1681 {
1682 ANV_FROM_HANDLE(anv_device, device, _device);
1683
1684 VkResult result = VK_SUCCESS;
1685 struct anv_descriptor_set *set;
1686 uint32_t i;
1687
1688 for (i = 0; i < count; i++) {
1689 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1690
1691 result = anv_descriptor_set_create(device, layout, &set);
1692 if (result != VK_SUCCESS)
1693 break;
1694
1695 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1696 }
1697
1698 if (result != VK_SUCCESS)
1699 anv_FreeDescriptorSets(_device, descriptorPool, i, pDescriptorSets);
1700
1701 return result;
1702 }
1703
1704 VkResult anv_FreeDescriptorSets(
1705 VkDevice _device,
1706 VkDescriptorPool descriptorPool,
1707 uint32_t count,
1708 const VkDescriptorSet* pDescriptorSets)
1709 {
1710 ANV_FROM_HANDLE(anv_device, device, _device);
1711
1712 for (uint32_t i = 0; i < count; i++) {
1713 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1714
1715 anv_descriptor_set_destroy(device, set);
1716 }
1717
1718 return VK_SUCCESS;
1719 }
1720
1721 void anv_UpdateDescriptorSets(
1722 VkDevice device,
1723 uint32_t writeCount,
1724 const VkWriteDescriptorSet* pDescriptorWrites,
1725 uint32_t copyCount,
1726 const VkCopyDescriptorSet* pDescriptorCopies)
1727 {
1728 for (uint32_t i = 0; i < writeCount; i++) {
1729 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1730 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1731 const struct anv_descriptor_set_binding_layout *bind_layout =
1732 &set->layout->binding[write->destBinding];
1733 struct anv_descriptor *desc =
1734 &set->descriptors[bind_layout->descriptor_index];
1735
1736 switch (write->descriptorType) {
1737 case VK_DESCRIPTOR_TYPE_SAMPLER:
1738 for (uint32_t j = 0; j < write->count; j++) {
1739 ANV_FROM_HANDLE(anv_sampler, sampler,
1740 write->pDescriptors[j].sampler);
1741
1742 desc[j] = (struct anv_descriptor) {
1743 .type = ANV_DESCRIPTOR_TYPE_SAMPLER,
1744 .sampler = sampler,
1745 };
1746 }
1747 break;
1748
1749 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1750 for (uint32_t j = 0; j < write->count; j++) {
1751 ANV_FROM_HANDLE(anv_image_view, iview,
1752 write->pDescriptors[j].imageView);
1753 ANV_FROM_HANDLE(anv_sampler, sampler,
1754 write->pDescriptors[j].sampler);
1755
1756 desc[j].type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW_AND_SAMPLER;
1757 desc[j].image_view = iview;
1758
1759 /* If this descriptor has an immutable sampler, we don't want
1760 * to stomp on it.
1761 */
1762 if (sampler)
1763 desc->sampler = sampler;
1764 }
1765 break;
1766
1767 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1768 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1769 for (uint32_t j = 0; j < write->count; j++) {
1770 ANV_FROM_HANDLE(anv_image_view, iview,
1771 write->pDescriptors[j].imageView);
1772
1773 desc[j] = (struct anv_descriptor) {
1774 .type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW,
1775 .image_view = iview,
1776 };
1777 }
1778 break;
1779
1780 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1781 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1782 anv_finishme("texel buffers not implemented");
1783 break;
1784
1785 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1786 anv_finishme("input attachments not implemented");
1787 break;
1788
1789 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1790 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1791 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1792 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1793 for (uint32_t j = 0; j < write->count; j++) {
1794 assert(write->pDescriptors[j].bufferInfo.buffer.handle);
1795 ANV_FROM_HANDLE(anv_buffer, buffer,
1796 write->pDescriptors[j].bufferInfo.buffer);
1797 assert(buffer);
1798
1799 desc[j] = (struct anv_descriptor) {
1800 .type = ANV_DESCRIPTOR_TYPE_BUFFER_AND_OFFSET,
1801 .buffer = buffer,
1802 .offset = write->pDescriptors[j].bufferInfo.offset,
1803 .range = write->pDescriptors[j].bufferInfo.range,
1804 };
1805 }
1806
1807 default:
1808 break;
1809 }
1810 }
1811
1812 for (uint32_t i = 0; i < copyCount; i++) {
1813 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1814 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1815 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1816 for (uint32_t j = 0; j < copy->count; j++) {
1817 dest->descriptors[copy->destBinding + j] =
1818 src->descriptors[copy->srcBinding + j];
1819 }
1820 }
1821 }
1822
1823 VkResult anv_CreateFramebuffer(
1824 VkDevice _device,
1825 const VkFramebufferCreateInfo* pCreateInfo,
1826 VkFramebuffer* pFramebuffer)
1827 {
1828 ANV_FROM_HANDLE(anv_device, device, _device);
1829 struct anv_framebuffer *framebuffer;
1830
1831 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1832
1833 size_t size = sizeof(*framebuffer) +
1834 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1835 framebuffer = anv_device_alloc(device, size, 8,
1836 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1837 if (framebuffer == NULL)
1838 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1839
1840 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1841 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1842 VkImageView _iview = pCreateInfo->pAttachments[i];
1843 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1844 }
1845
1846 framebuffer->width = pCreateInfo->width;
1847 framebuffer->height = pCreateInfo->height;
1848 framebuffer->layers = pCreateInfo->layers;
1849
1850 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1851
1852 return VK_SUCCESS;
1853 }
1854
1855 void anv_DestroyFramebuffer(
1856 VkDevice _device,
1857 VkFramebuffer _fb)
1858 {
1859 ANV_FROM_HANDLE(anv_device, device, _device);
1860 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1861
1862 anv_device_free(device, fb);
1863 }
1864
1865 void vkCmdDbgMarkerBegin(
1866 VkCmdBuffer cmdBuffer,
1867 const char* pMarker)
1868 __attribute__ ((visibility ("default")));
1869
1870 void vkCmdDbgMarkerEnd(
1871 VkCmdBuffer cmdBuffer)
1872 __attribute__ ((visibility ("default")));
1873
1874 void vkCmdDbgMarkerBegin(
1875 VkCmdBuffer cmdBuffer,
1876 const char* pMarker)
1877 {
1878 }
1879
1880 void vkCmdDbgMarkerEnd(
1881 VkCmdBuffer cmdBuffer)
1882 {
1883 }