Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 struct anv_dispatch_table dtable;
35
36 static void
37 compiler_debug_log(void *data, const char *fmt, ...)
38 { }
39
40 static void
41 compiler_perf_log(void *data, const char *fmt, ...)
42 {
43 va_list args;
44 va_start(args, fmt);
45
46 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
47 vfprintf(stderr, fmt, args);
48
49 va_end(args);
50 }
51
52 static VkResult
53 anv_physical_device_init(struct anv_physical_device *device,
54 struct anv_instance *instance,
55 const char *path)
56 {
57 VkResult result;
58 int fd;
59
60 fd = open(path, O_RDWR | O_CLOEXEC);
61 if (fd < 0)
62 return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
63 "failed to open %s: %m", path);
64
65 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
66 device->instance = instance;
67 device->path = path;
68
69 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
70 if (!device->chipset_id) {
71 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
72 "failed to get chipset id: %m");
73 goto fail;
74 }
75
76 device->name = brw_get_device_name(device->chipset_id);
77 device->info = brw_get_device_info(device->chipset_id);
78 if (!device->info) {
79 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
80 "failed to get device info");
81 goto fail;
82 }
83
84 if (device->info->gen == 7 &&
85 !device->info->is_haswell && !device->info->is_baytrail) {
86 fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
87 } else if (device->info->gen == 8 && !device->info->is_cherryview) {
88 /* Briadwell is as fully supported as anything */
89 } else {
90 result = vk_errorf(VK_UNSUPPORTED,
91 "Vulkan not yet supported on %s", device->name);
92 goto fail;
93 }
94
95 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
96 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
97 "failed to get aperture size: %m");
98 goto fail;
99 }
100
101 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
102 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
103 "kernel missing gem wait");
104 goto fail;
105 }
106
107 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
108 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
109 "kernel missing execbuf2");
110 goto fail;
111 }
112
113 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
114 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
115 "non-llc gpu");
116 goto fail;
117 }
118
119 close(fd);
120
121 brw_process_intel_debug_variable();
122
123 device->compiler = brw_compiler_create(NULL, device->info);
124 if (device->compiler == NULL) {
125 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
126 goto fail;
127 }
128 device->compiler->shader_debug_log = compiler_debug_log;
129 device->compiler->shader_perf_log = compiler_perf_log;
130
131 isl_device_init(&device->isl_dev, device->info);
132
133 return VK_SUCCESS;
134
135 fail:
136 close(fd);
137 return result;
138 }
139
140 static void
141 anv_physical_device_finish(struct anv_physical_device *device)
142 {
143 ralloc_free(device->compiler);
144 }
145
146 static void *default_alloc(
147 void* pUserData,
148 size_t size,
149 size_t alignment,
150 VkSystemAllocType allocType)
151 {
152 return malloc(size);
153 }
154
155 static void default_free(
156 void* pUserData,
157 void* pMem)
158 {
159 free(pMem);
160 }
161
162 static const VkAllocCallbacks default_alloc_callbacks = {
163 .pUserData = NULL,
164 .pfnAlloc = default_alloc,
165 .pfnFree = default_free
166 };
167
168 static const VkExtensionProperties global_extensions[] = {
169 {
170 .extName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
171 .specVersion = 17,
172 },
173 };
174
175 static const VkExtensionProperties device_extensions[] = {
176 {
177 .extName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
178 .specVersion = 53,
179 },
180 };
181
182 VkResult anv_CreateInstance(
183 const VkInstanceCreateInfo* pCreateInfo,
184 VkInstance* pInstance)
185 {
186 struct anv_instance *instance;
187 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
188 void *user_data = NULL;
189
190 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
191
192 if (pCreateInfo->pAppInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
193 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
194
195 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
196 bool found = false;
197 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
198 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
199 global_extensions[j].extName) == 0) {
200 found = true;
201 break;
202 }
203 }
204 if (!found)
205 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
206 }
207
208 if (pCreateInfo->pAllocCb) {
209 alloc_callbacks = pCreateInfo->pAllocCb;
210 user_data = pCreateInfo->pAllocCb->pUserData;
211 }
212 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
213 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
214 if (!instance)
215 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
216
217 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
218 instance->pAllocUserData = alloc_callbacks->pUserData;
219 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
220 instance->pfnFree = alloc_callbacks->pfnFree;
221 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
222 instance->physicalDeviceCount = -1;
223
224 _mesa_locale_init();
225
226 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
227
228 anv_init_wsi(instance);
229
230 *pInstance = anv_instance_to_handle(instance);
231
232 return VK_SUCCESS;
233 }
234
235 void anv_DestroyInstance(
236 VkInstance _instance)
237 {
238 ANV_FROM_HANDLE(anv_instance, instance, _instance);
239
240 if (instance->physicalDeviceCount > 0) {
241 /* We support at most one physical device. */
242 assert(instance->physicalDeviceCount == 1);
243 anv_physical_device_finish(&instance->physicalDevice);
244 }
245
246 anv_finish_wsi(instance);
247
248 VG(VALGRIND_DESTROY_MEMPOOL(instance));
249
250 _mesa_locale_fini();
251
252 instance->pfnFree(instance->pAllocUserData, instance);
253 }
254
255 void *
256 anv_instance_alloc(struct anv_instance *instance, size_t size,
257 size_t alignment, VkSystemAllocType allocType)
258 {
259 void *mem = instance->pfnAlloc(instance->pAllocUserData,
260 size, alignment, allocType);
261 if (mem) {
262 VG(VALGRIND_MEMPOOL_ALLOC(instance, mem, size));
263 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem, size));
264 }
265 return mem;
266 }
267
268 void
269 anv_instance_free(struct anv_instance *instance, void *mem)
270 {
271 if (mem == NULL)
272 return;
273
274 VG(VALGRIND_MEMPOOL_FREE(instance, mem));
275
276 instance->pfnFree(instance->pAllocUserData, mem);
277 }
278
279 VkResult anv_EnumeratePhysicalDevices(
280 VkInstance _instance,
281 uint32_t* pPhysicalDeviceCount,
282 VkPhysicalDevice* pPhysicalDevices)
283 {
284 ANV_FROM_HANDLE(anv_instance, instance, _instance);
285 VkResult result;
286
287 if (instance->physicalDeviceCount < 0) {
288 result = anv_physical_device_init(&instance->physicalDevice,
289 instance, "/dev/dri/renderD128");
290 if (result == VK_UNSUPPORTED) {
291 instance->physicalDeviceCount = 0;
292 } else if (result == VK_SUCCESS) {
293 instance->physicalDeviceCount = 1;
294 } else {
295 return result;
296 }
297 }
298
299 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
300 * otherwise it's an inout parameter.
301 *
302 * The Vulkan spec (git aaed022) says:
303 *
304 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
305 * that is initialized with the number of devices the application is
306 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
307 * an array of at least this many VkPhysicalDevice handles [...].
308 *
309 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
310 * overwrites the contents of the variable pointed to by
311 * pPhysicalDeviceCount with the number of physical devices in in the
312 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
313 * pPhysicalDeviceCount with the number of physical handles written to
314 * pPhysicalDevices.
315 */
316 if (!pPhysicalDevices) {
317 *pPhysicalDeviceCount = instance->physicalDeviceCount;
318 } else if (*pPhysicalDeviceCount >= 1) {
319 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
320 *pPhysicalDeviceCount = 1;
321 } else {
322 *pPhysicalDeviceCount = 0;
323 }
324
325 return VK_SUCCESS;
326 }
327
328 VkResult anv_GetPhysicalDeviceFeatures(
329 VkPhysicalDevice physicalDevice,
330 VkPhysicalDeviceFeatures* pFeatures)
331 {
332 anv_finishme("Get correct values for PhysicalDeviceFeatures");
333
334 *pFeatures = (VkPhysicalDeviceFeatures) {
335 .robustBufferAccess = false,
336 .fullDrawIndexUint32 = false,
337 .imageCubeArray = false,
338 .independentBlend = false,
339 .geometryShader = true,
340 .tessellationShader = false,
341 .sampleRateShading = false,
342 .dualSourceBlend = true,
343 .logicOp = true,
344 .multiDrawIndirect = true,
345 .depthClip = false,
346 .depthBiasClamp = false,
347 .fillModeNonSolid = true,
348 .depthBounds = false,
349 .wideLines = true,
350 .largePoints = true,
351 .textureCompressionETC2 = true,
352 .textureCompressionASTC_LDR = true,
353 .textureCompressionBC = true,
354 .occlusionQueryNonConservative = false, /* FINISHME */
355 .pipelineStatisticsQuery = true,
356 .vertexSideEffects = false,
357 .tessellationSideEffects = false,
358 .geometrySideEffects = false,
359 .fragmentSideEffects = false,
360 .shaderTessellationPointSize = false,
361 .shaderGeometryPointSize = true,
362 .shaderImageGatherExtended = true,
363 .shaderStorageImageExtendedFormats = false,
364 .shaderStorageImageMultisample = false,
365 .shaderUniformBufferArrayDynamicIndexing = true,
366 .shaderSampledImageArrayDynamicIndexing = false,
367 .shaderStorageBufferArrayDynamicIndexing = false,
368 .shaderStorageImageArrayDynamicIndexing = false,
369 .shaderClipDistance = false,
370 .shaderCullDistance = false,
371 .shaderFloat64 = false,
372 .shaderInt64 = false,
373 .shaderInt16 = false,
374 .alphaToOne = true,
375 };
376
377 return VK_SUCCESS;
378 }
379
380 VkResult anv_GetPhysicalDeviceProperties(
381 VkPhysicalDevice physicalDevice,
382 VkPhysicalDeviceProperties* pProperties)
383 {
384 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
385 const struct brw_device_info *devinfo = pdevice->info;
386
387 anv_finishme("Get correct values for VkPhysicalDeviceLimits");
388
389 VkPhysicalDeviceLimits limits = {
390 .maxImageDimension1D = (1 << 14),
391 .maxImageDimension2D = (1 << 14),
392 .maxImageDimension3D = (1 << 10),
393 .maxImageDimensionCube = (1 << 14),
394 .maxImageArrayLayers = (1 << 10),
395
396 /* Broadwell supports 1, 2, 4, and 8 samples. */
397 .sampleCounts = 4,
398
399 .maxTexelBufferSize = (1 << 14),
400 .maxUniformBufferSize = UINT32_MAX,
401 .maxStorageBufferSize = UINT32_MAX,
402 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
403 .maxMemoryAllocationCount = UINT32_MAX,
404 .bufferImageGranularity = 64, /* A cache line */
405 .sparseAddressSpaceSize = 0,
406 .maxBoundDescriptorSets = MAX_SETS,
407 .maxDescriptorSets = UINT32_MAX,
408 .maxPerStageDescriptorSamplers = 64,
409 .maxPerStageDescriptorUniformBuffers = 64,
410 .maxPerStageDescriptorStorageBuffers = 64,
411 .maxPerStageDescriptorSampledImages = 64,
412 .maxPerStageDescriptorStorageImages = 64,
413 .maxDescriptorSetSamplers = 256,
414 .maxDescriptorSetUniformBuffers = 256,
415 .maxDescriptorSetUniformBuffersDynamic = 256,
416 .maxDescriptorSetStorageBuffers = 256,
417 .maxDescriptorSetStorageBuffersDynamic = 256,
418 .maxDescriptorSetSampledImages = 256,
419 .maxDescriptorSetStorageImages = 256,
420 .maxVertexInputAttributes = 32,
421 .maxVertexInputBindings = 32,
422 .maxVertexInputAttributeOffset = 256,
423 .maxVertexInputBindingStride = 256,
424 .maxVertexOutputComponents = 32,
425 .maxTessGenLevel = 0,
426 .maxTessPatchSize = 0,
427 .maxTessControlPerVertexInputComponents = 0,
428 .maxTessControlPerVertexOutputComponents = 0,
429 .maxTessControlPerPatchOutputComponents = 0,
430 .maxTessControlTotalOutputComponents = 0,
431 .maxTessEvaluationInputComponents = 0,
432 .maxTessEvaluationOutputComponents = 0,
433 .maxGeometryShaderInvocations = 6,
434 .maxGeometryInputComponents = 16,
435 .maxGeometryOutputComponents = 16,
436 .maxGeometryOutputVertices = 16,
437 .maxGeometryTotalOutputComponents = 16,
438 .maxFragmentInputComponents = 16,
439 .maxFragmentOutputBuffers = 8,
440 .maxFragmentDualSourceBuffers = 2,
441 .maxFragmentCombinedOutputResources = 8,
442 .maxComputeSharedMemorySize = 1024,
443 .maxComputeWorkGroupCount = {
444 16 * devinfo->max_cs_threads,
445 16 * devinfo->max_cs_threads,
446 16 * devinfo->max_cs_threads,
447 },
448 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
449 .maxComputeWorkGroupSize = {
450 16 * devinfo->max_cs_threads,
451 16 * devinfo->max_cs_threads,
452 16 * devinfo->max_cs_threads,
453 },
454 .subPixelPrecisionBits = 4 /* FIXME */,
455 .subTexelPrecisionBits = 4 /* FIXME */,
456 .mipmapPrecisionBits = 4 /* FIXME */,
457 .maxDrawIndexedIndexValue = UINT32_MAX,
458 .maxDrawIndirectInstanceCount = UINT32_MAX,
459 .primitiveRestartForPatches = UINT32_MAX,
460 .maxSamplerLodBias = 16,
461 .maxSamplerAnisotropy = 16,
462 .maxViewports = MAX_VIEWPORTS,
463 .maxViewportDimensions = { (1 << 14), (1 << 14) },
464 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
465 .viewportSubPixelBits = 13, /* We take a float? */
466 .minMemoryMapAlignment = 64, /* A cache line */
467 .minTexelBufferOffsetAlignment = 1,
468 .minUniformBufferOffsetAlignment = 1,
469 .minStorageBufferOffsetAlignment = 1,
470 .minTexelOffset = 0, /* FIXME */
471 .maxTexelOffset = 0, /* FIXME */
472 .minTexelGatherOffset = 0, /* FIXME */
473 .maxTexelGatherOffset = 0, /* FIXME */
474 .minInterpolationOffset = 0, /* FIXME */
475 .maxInterpolationOffset = 0, /* FIXME */
476 .subPixelInterpolationOffsetBits = 0, /* FIXME */
477 .maxFramebufferWidth = (1 << 14),
478 .maxFramebufferHeight = (1 << 14),
479 .maxFramebufferLayers = (1 << 10),
480 .maxFramebufferColorSamples = 8,
481 .maxFramebufferDepthSamples = 8,
482 .maxFramebufferStencilSamples = 8,
483 .maxColorAttachments = MAX_RTS,
484 .maxSampledImageColorSamples = 8,
485 .maxSampledImageDepthSamples = 8,
486 .maxSampledImageIntegerSamples = 1,
487 .maxStorageImageSamples = 1,
488 .maxSampleMaskWords = 1,
489 .timestampFrequency = 1000 * 1000 * 1000 / 80,
490 .maxClipDistances = 0 /* FIXME */,
491 .maxCullDistances = 0 /* FIXME */,
492 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
493 .pointSizeRange = { 0.125, 255.875 },
494 .lineWidthRange = { 0.0, 7.9921875 },
495 .pointSizeGranularity = (1.0 / 8.0),
496 .lineWidthGranularity = (1.0 / 128.0),
497 };
498
499 *pProperties = (VkPhysicalDeviceProperties) {
500 .apiVersion = VK_MAKE_VERSION(0, 170, 2),
501 .driverVersion = 1,
502 .vendorId = 0x8086,
503 .deviceId = pdevice->chipset_id,
504 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
505 .limits = limits,
506 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
507 };
508
509 strcpy(pProperties->deviceName, pdevice->name);
510 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
511 "anv-%s", MESA_GIT_SHA1 + 4);
512
513 return VK_SUCCESS;
514 }
515
516 VkResult anv_GetPhysicalDeviceQueueFamilyProperties(
517 VkPhysicalDevice physicalDevice,
518 uint32_t* pCount,
519 VkQueueFamilyProperties* pQueueFamilyProperties)
520 {
521 if (pQueueFamilyProperties == NULL) {
522 *pCount = 1;
523 return VK_SUCCESS;
524 }
525
526 assert(*pCount >= 1);
527
528 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
529 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
530 VK_QUEUE_COMPUTE_BIT |
531 VK_QUEUE_DMA_BIT,
532 .queueCount = 1,
533 .supportsTimestamps = true,
534 };
535
536 return VK_SUCCESS;
537 }
538
539 VkResult anv_GetPhysicalDeviceMemoryProperties(
540 VkPhysicalDevice physicalDevice,
541 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
542 {
543 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
544 VkDeviceSize heap_size;
545
546 /* Reserve some wiggle room for the driver by exposing only 75% of the
547 * aperture to the heap.
548 */
549 heap_size = 3 * physical_device->aperture_size / 4;
550
551 /* The property flags below are valid only for llc platforms. */
552 pMemoryProperties->memoryTypeCount = 1;
553 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
554 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
555 .heapIndex = 1,
556 };
557
558 pMemoryProperties->memoryHeapCount = 1;
559 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
560 .size = heap_size,
561 .flags = VK_MEMORY_HEAP_HOST_LOCAL_BIT,
562 };
563
564 return VK_SUCCESS;
565 }
566
567 PFN_vkVoidFunction anv_GetInstanceProcAddr(
568 VkInstance instance,
569 const char* pName)
570 {
571 return anv_lookup_entrypoint(pName);
572 }
573
574 PFN_vkVoidFunction anv_GetDeviceProcAddr(
575 VkDevice device,
576 const char* pName)
577 {
578 return anv_lookup_entrypoint(pName);
579 }
580
581 static VkResult
582 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
583 {
584 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
585 queue->device = device;
586 queue->pool = &device->surface_state_pool;
587
588 return VK_SUCCESS;
589 }
590
591 static void
592 anv_queue_finish(struct anv_queue *queue)
593 {
594 }
595
596 static void
597 anv_device_init_border_colors(struct anv_device *device)
598 {
599 static const VkClearColorValue border_colors[] = {
600 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
601 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
602 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
603 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
604 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
605 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
606 };
607
608 device->border_colors =
609 anv_state_pool_alloc(&device->dynamic_state_pool,
610 sizeof(border_colors), 32);
611 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
612 }
613
614 VkResult anv_CreateDevice(
615 VkPhysicalDevice physicalDevice,
616 const VkDeviceCreateInfo* pCreateInfo,
617 VkDevice* pDevice)
618 {
619 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
620 struct anv_instance *instance = physical_device->instance;
621 struct anv_device *device;
622
623 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
624
625 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
626 bool found = false;
627 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
628 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
629 device_extensions[j].extName) == 0) {
630 found = true;
631 break;
632 }
633 }
634 if (!found)
635 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
636 }
637
638 anv_set_dispatch_gen(physical_device->info->gen);
639
640 device = anv_instance_alloc(instance, sizeof(*device), 8,
641 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
642 if (!device)
643 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
644
645 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
646 device->instance = physical_device->instance;
647
648 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
649 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
650 if (device->fd == -1)
651 goto fail_device;
652
653 device->context_id = anv_gem_create_context(device);
654 if (device->context_id == -1)
655 goto fail_fd;
656
657 pthread_mutex_init(&device->mutex, NULL);
658
659 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
660
661 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
662
663 anv_state_pool_init(&device->dynamic_state_pool,
664 &device->dynamic_state_block_pool);
665
666 anv_block_pool_init(&device->instruction_block_pool, device, 4096);
667 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
668
669 anv_state_pool_init(&device->surface_state_pool,
670 &device->surface_state_block_pool);
671
672 anv_bo_init_new(&device->workaround_bo, device, 1024);
673
674 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
675
676 device->info = *physical_device->info;
677 device->isl_dev = physical_device->isl_dev;
678
679 anv_queue_init(device, &device->queue);
680
681 anv_device_init_meta(device);
682
683 anv_device_init_border_colors(device);
684
685 *pDevice = anv_device_to_handle(device);
686
687 return VK_SUCCESS;
688
689 fail_fd:
690 close(device->fd);
691 fail_device:
692 anv_device_free(device, device);
693
694 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
695 }
696
697 void anv_DestroyDevice(
698 VkDevice _device)
699 {
700 ANV_FROM_HANDLE(anv_device, device, _device);
701
702 anv_queue_finish(&device->queue);
703
704 anv_device_finish_meta(device);
705
706 #ifdef HAVE_VALGRIND
707 /* We only need to free these to prevent valgrind errors. The backing
708 * BO will go away in a couple of lines so we don't actually leak.
709 */
710 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
711 #endif
712
713 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
714 anv_gem_close(device, device->workaround_bo.gem_handle);
715
716 anv_bo_pool_finish(&device->batch_bo_pool);
717 anv_state_pool_finish(&device->dynamic_state_pool);
718 anv_block_pool_finish(&device->dynamic_state_block_pool);
719 anv_block_pool_finish(&device->instruction_block_pool);
720 anv_state_pool_finish(&device->surface_state_pool);
721 anv_block_pool_finish(&device->surface_state_block_pool);
722 anv_block_pool_finish(&device->scratch_block_pool);
723
724 close(device->fd);
725
726 anv_instance_free(device->instance, device);
727 }
728
729 VkResult anv_EnumerateInstanceExtensionProperties(
730 const char* pLayerName,
731 uint32_t* pCount,
732 VkExtensionProperties* pProperties)
733 {
734 if (pProperties == NULL) {
735 *pCount = ARRAY_SIZE(global_extensions);
736 return VK_SUCCESS;
737 }
738
739 assert(*pCount >= ARRAY_SIZE(global_extensions));
740
741 *pCount = ARRAY_SIZE(global_extensions);
742 memcpy(pProperties, global_extensions, sizeof(global_extensions));
743
744 return VK_SUCCESS;
745 }
746
747 VkResult anv_EnumerateDeviceExtensionProperties(
748 VkPhysicalDevice physicalDevice,
749 const char* pLayerName,
750 uint32_t* pCount,
751 VkExtensionProperties* pProperties)
752 {
753 if (pProperties == NULL) {
754 *pCount = ARRAY_SIZE(device_extensions);
755 return VK_SUCCESS;
756 }
757
758 assert(*pCount >= ARRAY_SIZE(device_extensions));
759
760 *pCount = ARRAY_SIZE(device_extensions);
761 memcpy(pProperties, device_extensions, sizeof(device_extensions));
762
763 return VK_SUCCESS;
764 }
765
766 VkResult anv_EnumerateInstanceLayerProperties(
767 uint32_t* pCount,
768 VkLayerProperties* pProperties)
769 {
770 if (pProperties == NULL) {
771 *pCount = 0;
772 return VK_SUCCESS;
773 }
774
775 /* None supported at this time */
776 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
777 }
778
779 VkResult anv_EnumerateDeviceLayerProperties(
780 VkPhysicalDevice physicalDevice,
781 uint32_t* pCount,
782 VkLayerProperties* pProperties)
783 {
784 if (pProperties == NULL) {
785 *pCount = 0;
786 return VK_SUCCESS;
787 }
788
789 /* None supported at this time */
790 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
791 }
792
793 VkResult anv_GetDeviceQueue(
794 VkDevice _device,
795 uint32_t queueNodeIndex,
796 uint32_t queueIndex,
797 VkQueue* pQueue)
798 {
799 ANV_FROM_HANDLE(anv_device, device, _device);
800
801 assert(queueIndex == 0);
802
803 *pQueue = anv_queue_to_handle(&device->queue);
804
805 return VK_SUCCESS;
806 }
807
808 VkResult anv_QueueSubmit(
809 VkQueue _queue,
810 uint32_t cmdBufferCount,
811 const VkCmdBuffer* pCmdBuffers,
812 VkFence _fence)
813 {
814 ANV_FROM_HANDLE(anv_queue, queue, _queue);
815 ANV_FROM_HANDLE(anv_fence, fence, _fence);
816 struct anv_device *device = queue->device;
817 int ret;
818
819 for (uint32_t i = 0; i < cmdBufferCount; i++) {
820 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
821
822 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
823
824 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
825 if (ret != 0) {
826 /* We don't know the real error. */
827 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
828 "execbuf2 failed: %m");
829 }
830
831 if (fence) {
832 ret = anv_gem_execbuffer(device, &fence->execbuf);
833 if (ret != 0) {
834 /* We don't know the real error. */
835 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
836 "execbuf2 failed: %m");
837 }
838 }
839
840 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
841 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
842 }
843
844 return VK_SUCCESS;
845 }
846
847 VkResult anv_QueueWaitIdle(
848 VkQueue _queue)
849 {
850 ANV_FROM_HANDLE(anv_queue, queue, _queue);
851
852 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
853 }
854
855 VkResult anv_DeviceWaitIdle(
856 VkDevice _device)
857 {
858 ANV_FROM_HANDLE(anv_device, device, _device);
859 struct anv_state state;
860 struct anv_batch batch;
861 struct drm_i915_gem_execbuffer2 execbuf;
862 struct drm_i915_gem_exec_object2 exec2_objects[1];
863 struct anv_bo *bo = NULL;
864 VkResult result;
865 int64_t timeout;
866 int ret;
867
868 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
869 bo = &device->dynamic_state_pool.block_pool->bo;
870 batch.start = batch.next = state.map;
871 batch.end = state.map + 32;
872 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
873 anv_batch_emit(&batch, GEN7_MI_NOOP);
874
875 exec2_objects[0].handle = bo->gem_handle;
876 exec2_objects[0].relocation_count = 0;
877 exec2_objects[0].relocs_ptr = 0;
878 exec2_objects[0].alignment = 0;
879 exec2_objects[0].offset = bo->offset;
880 exec2_objects[0].flags = 0;
881 exec2_objects[0].rsvd1 = 0;
882 exec2_objects[0].rsvd2 = 0;
883
884 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
885 execbuf.buffer_count = 1;
886 execbuf.batch_start_offset = state.offset;
887 execbuf.batch_len = batch.next - state.map;
888 execbuf.cliprects_ptr = 0;
889 execbuf.num_cliprects = 0;
890 execbuf.DR1 = 0;
891 execbuf.DR4 = 0;
892
893 execbuf.flags =
894 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
895 execbuf.rsvd1 = device->context_id;
896 execbuf.rsvd2 = 0;
897
898 ret = anv_gem_execbuffer(device, &execbuf);
899 if (ret != 0) {
900 /* We don't know the real error. */
901 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
902 goto fail;
903 }
904
905 timeout = INT64_MAX;
906 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
907 if (ret != 0) {
908 /* We don't know the real error. */
909 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
910 goto fail;
911 }
912
913 anv_state_pool_free(&device->dynamic_state_pool, state);
914
915 return VK_SUCCESS;
916
917 fail:
918 anv_state_pool_free(&device->dynamic_state_pool, state);
919
920 return result;
921 }
922
923 void *
924 anv_device_alloc(struct anv_device * device,
925 size_t size,
926 size_t alignment,
927 VkSystemAllocType allocType)
928 {
929 return anv_instance_alloc(device->instance, size, alignment, allocType);
930 }
931
932 void
933 anv_device_free(struct anv_device * device,
934 void * mem)
935 {
936 anv_instance_free(device->instance, mem);
937 }
938
939 VkResult
940 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
941 {
942 bo->gem_handle = anv_gem_create(device, size);
943 if (!bo->gem_handle)
944 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
945
946 bo->map = NULL;
947 bo->index = 0;
948 bo->offset = 0;
949 bo->size = size;
950
951 return VK_SUCCESS;
952 }
953
954 VkResult anv_AllocMemory(
955 VkDevice _device,
956 const VkMemoryAllocInfo* pAllocInfo,
957 VkDeviceMemory* pMem)
958 {
959 ANV_FROM_HANDLE(anv_device, device, _device);
960 struct anv_device_memory *mem;
961 VkResult result;
962
963 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
964
965 /* We support exactly one memory heap. */
966 assert(pAllocInfo->memoryTypeIndex == 0);
967
968 /* FINISHME: Fail if allocation request exceeds heap size. */
969
970 mem = anv_device_alloc(device, sizeof(*mem), 8,
971 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
972 if (mem == NULL)
973 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
974
975 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
976 if (result != VK_SUCCESS)
977 goto fail;
978
979 *pMem = anv_device_memory_to_handle(mem);
980
981 return VK_SUCCESS;
982
983 fail:
984 anv_device_free(device, mem);
985
986 return result;
987 }
988
989 void anv_FreeMemory(
990 VkDevice _device,
991 VkDeviceMemory _mem)
992 {
993 ANV_FROM_HANDLE(anv_device, device, _device);
994 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
995
996 if (mem->bo.map)
997 anv_gem_munmap(mem->bo.map, mem->bo.size);
998
999 if (mem->bo.gem_handle != 0)
1000 anv_gem_close(device, mem->bo.gem_handle);
1001
1002 anv_device_free(device, mem);
1003 }
1004
1005 VkResult anv_MapMemory(
1006 VkDevice _device,
1007 VkDeviceMemory _mem,
1008 VkDeviceSize offset,
1009 VkDeviceSize size,
1010 VkMemoryMapFlags flags,
1011 void** ppData)
1012 {
1013 ANV_FROM_HANDLE(anv_device, device, _device);
1014 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1015
1016 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1017 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1018 * at a time is valid. We could just mmap up front and return an offset
1019 * pointer here, but that may exhaust virtual memory on 32 bit
1020 * userspace. */
1021
1022 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
1023 mem->map_size = size;
1024
1025 *ppData = mem->map;
1026
1027 return VK_SUCCESS;
1028 }
1029
1030 void anv_UnmapMemory(
1031 VkDevice _device,
1032 VkDeviceMemory _mem)
1033 {
1034 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1035
1036 anv_gem_munmap(mem->map, mem->map_size);
1037 }
1038
1039 VkResult anv_FlushMappedMemoryRanges(
1040 VkDevice device,
1041 uint32_t memRangeCount,
1042 const VkMappedMemoryRange* pMemRanges)
1043 {
1044 /* clflush here for !llc platforms */
1045
1046 return VK_SUCCESS;
1047 }
1048
1049 VkResult anv_InvalidateMappedMemoryRanges(
1050 VkDevice device,
1051 uint32_t memRangeCount,
1052 const VkMappedMemoryRange* pMemRanges)
1053 {
1054 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
1055 }
1056
1057 VkResult anv_GetBufferMemoryRequirements(
1058 VkDevice device,
1059 VkBuffer _buffer,
1060 VkMemoryRequirements* pMemoryRequirements)
1061 {
1062 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1063
1064 /* The Vulkan spec (git aaed022) says:
1065 *
1066 * memoryTypeBits is a bitfield and contains one bit set for every
1067 * supported memory type for the resource. The bit `1<<i` is set if and
1068 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1069 * structure for the physical device is supported.
1070 *
1071 * We support exactly one memory type.
1072 */
1073 pMemoryRequirements->memoryTypeBits = 1;
1074
1075 pMemoryRequirements->size = buffer->size;
1076 pMemoryRequirements->alignment = 16;
1077
1078 return VK_SUCCESS;
1079 }
1080
1081 VkResult anv_GetImageMemoryRequirements(
1082 VkDevice device,
1083 VkImage _image,
1084 VkMemoryRequirements* pMemoryRequirements)
1085 {
1086 ANV_FROM_HANDLE(anv_image, image, _image);
1087
1088 /* The Vulkan spec (git aaed022) says:
1089 *
1090 * memoryTypeBits is a bitfield and contains one bit set for every
1091 * supported memory type for the resource. The bit `1<<i` is set if and
1092 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1093 * structure for the physical device is supported.
1094 *
1095 * We support exactly one memory type.
1096 */
1097 pMemoryRequirements->memoryTypeBits = 1;
1098
1099 pMemoryRequirements->size = image->size;
1100 pMemoryRequirements->alignment = image->alignment;
1101
1102 return VK_SUCCESS;
1103 }
1104
1105 VkResult anv_GetImageSparseMemoryRequirements(
1106 VkDevice device,
1107 VkImage image,
1108 uint32_t* pNumRequirements,
1109 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1110 {
1111 return vk_error(VK_UNSUPPORTED);
1112 }
1113
1114 VkResult anv_GetDeviceMemoryCommitment(
1115 VkDevice device,
1116 VkDeviceMemory memory,
1117 VkDeviceSize* pCommittedMemoryInBytes)
1118 {
1119 *pCommittedMemoryInBytes = 0;
1120 stub_return(VK_SUCCESS);
1121 }
1122
1123 VkResult anv_BindBufferMemory(
1124 VkDevice device,
1125 VkBuffer _buffer,
1126 VkDeviceMemory _mem,
1127 VkDeviceSize memOffset)
1128 {
1129 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1130 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1131
1132 buffer->bo = &mem->bo;
1133 buffer->offset = memOffset;
1134
1135 return VK_SUCCESS;
1136 }
1137
1138 VkResult anv_BindImageMemory(
1139 VkDevice device,
1140 VkImage _image,
1141 VkDeviceMemory _mem,
1142 VkDeviceSize memOffset)
1143 {
1144 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1145 ANV_FROM_HANDLE(anv_image, image, _image);
1146
1147 image->bo = &mem->bo;
1148 image->offset = memOffset;
1149
1150 return VK_SUCCESS;
1151 }
1152
1153 VkResult anv_QueueBindSparseBufferMemory(
1154 VkQueue queue,
1155 VkBuffer buffer,
1156 uint32_t numBindings,
1157 const VkSparseMemoryBindInfo* pBindInfo)
1158 {
1159 stub_return(VK_UNSUPPORTED);
1160 }
1161
1162 VkResult anv_QueueBindSparseImageOpaqueMemory(
1163 VkQueue queue,
1164 VkImage image,
1165 uint32_t numBindings,
1166 const VkSparseMemoryBindInfo* pBindInfo)
1167 {
1168 stub_return(VK_UNSUPPORTED);
1169 }
1170
1171 VkResult anv_QueueBindSparseImageMemory(
1172 VkQueue queue,
1173 VkImage image,
1174 uint32_t numBindings,
1175 const VkSparseImageMemoryBindInfo* pBindInfo)
1176 {
1177 stub_return(VK_UNSUPPORTED);
1178 }
1179
1180 VkResult anv_CreateFence(
1181 VkDevice _device,
1182 const VkFenceCreateInfo* pCreateInfo,
1183 VkFence* pFence)
1184 {
1185 ANV_FROM_HANDLE(anv_device, device, _device);
1186 struct anv_fence *fence;
1187 struct anv_batch batch;
1188 VkResult result;
1189
1190 const uint32_t fence_size = 128;
1191
1192 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1193
1194 fence = anv_device_alloc(device, sizeof(*fence), 8,
1195 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1196 if (fence == NULL)
1197 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1198
1199 result = anv_bo_init_new(&fence->bo, device, fence_size);
1200 if (result != VK_SUCCESS)
1201 goto fail;
1202
1203 fence->bo.map =
1204 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1205 batch.next = batch.start = fence->bo.map;
1206 batch.end = fence->bo.map + fence->bo.size;
1207 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1208 anv_batch_emit(&batch, GEN7_MI_NOOP);
1209
1210 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1211 fence->exec2_objects[0].relocation_count = 0;
1212 fence->exec2_objects[0].relocs_ptr = 0;
1213 fence->exec2_objects[0].alignment = 0;
1214 fence->exec2_objects[0].offset = fence->bo.offset;
1215 fence->exec2_objects[0].flags = 0;
1216 fence->exec2_objects[0].rsvd1 = 0;
1217 fence->exec2_objects[0].rsvd2 = 0;
1218
1219 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1220 fence->execbuf.buffer_count = 1;
1221 fence->execbuf.batch_start_offset = 0;
1222 fence->execbuf.batch_len = batch.next - fence->bo.map;
1223 fence->execbuf.cliprects_ptr = 0;
1224 fence->execbuf.num_cliprects = 0;
1225 fence->execbuf.DR1 = 0;
1226 fence->execbuf.DR4 = 0;
1227
1228 fence->execbuf.flags =
1229 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1230 fence->execbuf.rsvd1 = device->context_id;
1231 fence->execbuf.rsvd2 = 0;
1232
1233 *pFence = anv_fence_to_handle(fence);
1234
1235 return VK_SUCCESS;
1236
1237 fail:
1238 anv_device_free(device, fence);
1239
1240 return result;
1241 }
1242
1243 void anv_DestroyFence(
1244 VkDevice _device,
1245 VkFence _fence)
1246 {
1247 ANV_FROM_HANDLE(anv_device, device, _device);
1248 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1249
1250 anv_gem_munmap(fence->bo.map, fence->bo.size);
1251 anv_gem_close(device, fence->bo.gem_handle);
1252 anv_device_free(device, fence);
1253 }
1254
1255 VkResult anv_ResetFences(
1256 VkDevice _device,
1257 uint32_t fenceCount,
1258 const VkFence* pFences)
1259 {
1260 for (uint32_t i = 0; i < fenceCount; i++) {
1261 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1262 fence->ready = false;
1263 }
1264
1265 return VK_SUCCESS;
1266 }
1267
1268 VkResult anv_GetFenceStatus(
1269 VkDevice _device,
1270 VkFence _fence)
1271 {
1272 ANV_FROM_HANDLE(anv_device, device, _device);
1273 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1274 int64_t t = 0;
1275 int ret;
1276
1277 if (fence->ready)
1278 return VK_SUCCESS;
1279
1280 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1281 if (ret == 0) {
1282 fence->ready = true;
1283 return VK_SUCCESS;
1284 }
1285
1286 return VK_NOT_READY;
1287 }
1288
1289 VkResult anv_WaitForFences(
1290 VkDevice _device,
1291 uint32_t fenceCount,
1292 const VkFence* pFences,
1293 VkBool32 waitAll,
1294 uint64_t timeout)
1295 {
1296 ANV_FROM_HANDLE(anv_device, device, _device);
1297
1298 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1299 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
1300 * for a couple of kernel releases. Since there's no way to know
1301 * whether or not the kernel we're using is one of the broken ones, the
1302 * best we can do is to clamp the timeout to INT64_MAX. This limits the
1303 * maximum timeout from 584 years to 292 years - likely not a big deal.
1304 */
1305 if (timeout > INT64_MAX)
1306 timeout = INT64_MAX;
1307
1308 int64_t t = timeout;
1309
1310 /* FIXME: handle !waitAll */
1311
1312 for (uint32_t i = 0; i < fenceCount; i++) {
1313 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1314 int ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1315 if (ret == -1 && errno == ETIME) {
1316 return VK_TIMEOUT;
1317 } else if (ret == -1) {
1318 /* We don't know the real error. */
1319 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1320 "gem wait failed: %m");
1321 }
1322 }
1323
1324 return VK_SUCCESS;
1325 }
1326
1327 // Queue semaphore functions
1328
1329 VkResult anv_CreateSemaphore(
1330 VkDevice device,
1331 const VkSemaphoreCreateInfo* pCreateInfo,
1332 VkSemaphore* pSemaphore)
1333 {
1334 pSemaphore->handle = 1;
1335 stub_return(VK_SUCCESS);
1336 }
1337
1338 void anv_DestroySemaphore(
1339 VkDevice device,
1340 VkSemaphore semaphore)
1341 {
1342 stub();
1343 }
1344
1345 VkResult anv_QueueSignalSemaphore(
1346 VkQueue queue,
1347 VkSemaphore semaphore)
1348 {
1349 stub_return(VK_UNSUPPORTED);
1350 }
1351
1352 VkResult anv_QueueWaitSemaphore(
1353 VkQueue queue,
1354 VkSemaphore semaphore)
1355 {
1356 stub_return(VK_UNSUPPORTED);
1357 }
1358
1359 // Event functions
1360
1361 VkResult anv_CreateEvent(
1362 VkDevice device,
1363 const VkEventCreateInfo* pCreateInfo,
1364 VkEvent* pEvent)
1365 {
1366 stub_return(VK_UNSUPPORTED);
1367 }
1368
1369 void anv_DestroyEvent(
1370 VkDevice device,
1371 VkEvent event)
1372 {
1373 stub();
1374 }
1375
1376 VkResult anv_GetEventStatus(
1377 VkDevice device,
1378 VkEvent event)
1379 {
1380 stub_return(VK_UNSUPPORTED);
1381 }
1382
1383 VkResult anv_SetEvent(
1384 VkDevice device,
1385 VkEvent event)
1386 {
1387 stub_return(VK_UNSUPPORTED);
1388 }
1389
1390 VkResult anv_ResetEvent(
1391 VkDevice device,
1392 VkEvent event)
1393 {
1394 stub_return(VK_UNSUPPORTED);
1395 }
1396
1397 // Buffer functions
1398
1399 VkResult anv_CreateBuffer(
1400 VkDevice _device,
1401 const VkBufferCreateInfo* pCreateInfo,
1402 VkBuffer* pBuffer)
1403 {
1404 ANV_FROM_HANDLE(anv_device, device, _device);
1405 struct anv_buffer *buffer;
1406
1407 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1408
1409 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1410 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1411 if (buffer == NULL)
1412 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1413
1414 buffer->size = pCreateInfo->size;
1415 buffer->bo = NULL;
1416 buffer->offset = 0;
1417
1418 *pBuffer = anv_buffer_to_handle(buffer);
1419
1420 return VK_SUCCESS;
1421 }
1422
1423 void anv_DestroyBuffer(
1424 VkDevice _device,
1425 VkBuffer _buffer)
1426 {
1427 ANV_FROM_HANDLE(anv_device, device, _device);
1428 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1429
1430 anv_device_free(device, buffer);
1431 }
1432
1433 void
1434 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1435 const struct anv_format *format,
1436 uint32_t offset, uint32_t range, uint32_t stride)
1437 {
1438 switch (device->info.gen) {
1439 case 7:
1440 gen7_fill_buffer_surface_state(state, format, offset, range, stride);
1441 break;
1442 case 8:
1443 gen8_fill_buffer_surface_state(state, format, offset, range, stride);
1444 break;
1445 default:
1446 unreachable("unsupported gen\n");
1447 }
1448 }
1449
1450 VkResult anv_CreateBufferView(
1451 VkDevice _device,
1452 const VkBufferViewCreateInfo* pCreateInfo,
1453 VkBufferView* pView)
1454 {
1455 stub_return(VK_UNSUPPORTED);
1456 }
1457
1458 void anv_DestroyBufferView(
1459 VkDevice _device,
1460 VkBufferView _bview)
1461 {
1462 stub();
1463 }
1464
1465 void anv_DestroySampler(
1466 VkDevice _device,
1467 VkSampler _sampler)
1468 {
1469 ANV_FROM_HANDLE(anv_device, device, _device);
1470 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1471
1472 anv_device_free(device, sampler);
1473 }
1474
1475 // Descriptor set functions
1476
1477 VkResult anv_CreateDescriptorSetLayout(
1478 VkDevice _device,
1479 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1480 VkDescriptorSetLayout* pSetLayout)
1481 {
1482 ANV_FROM_HANDLE(anv_device, device, _device);
1483 struct anv_descriptor_set_layout *set_layout;
1484 uint32_t s;
1485
1486 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1487
1488 uint32_t immutable_sampler_count = 0;
1489 for (uint32_t b = 0; b < pCreateInfo->count; b++) {
1490 if (pCreateInfo->pBinding[b].pImmutableSamplers)
1491 immutable_sampler_count += pCreateInfo->pBinding[b].arraySize;
1492 }
1493
1494 size_t size = sizeof(struct anv_descriptor_set_layout) +
1495 pCreateInfo->count * sizeof(set_layout->binding[0]) +
1496 immutable_sampler_count * sizeof(struct anv_sampler *);
1497
1498 set_layout = anv_device_alloc(device, size, 8,
1499 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1500 if (!set_layout)
1501 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1502
1503 /* We just allocate all the samplers at the end of the struct */
1504 struct anv_sampler **samplers =
1505 (struct anv_sampler **)&set_layout->binding[pCreateInfo->count];
1506
1507 set_layout->binding_count = pCreateInfo->count;
1508 set_layout->shader_stages = 0;
1509 set_layout->size = 0;
1510
1511 /* Initialize all binding_layout entries to -1 */
1512 memset(set_layout->binding, -1,
1513 pCreateInfo->count * sizeof(set_layout->binding[0]));
1514
1515 /* Initialize all samplers to 0 */
1516 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
1517
1518 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1519 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1520 uint32_t dynamic_offset_count = 0;
1521
1522 for (uint32_t b = 0; b < pCreateInfo->count; b++) {
1523 uint32_t array_size = MAX2(1, pCreateInfo->pBinding[b].arraySize);
1524 set_layout->binding[b].array_size = array_size;
1525 set_layout->binding[b].descriptor_index = set_layout->size;
1526 set_layout->size += array_size;
1527
1528 switch (pCreateInfo->pBinding[b].descriptorType) {
1529 case VK_DESCRIPTOR_TYPE_SAMPLER:
1530 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1531 for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) {
1532 set_layout->binding[b].stage[s].sampler_index = sampler_count[s];
1533 sampler_count[s] += array_size;
1534 }
1535 break;
1536 default:
1537 break;
1538 }
1539
1540 switch (pCreateInfo->pBinding[b].descriptorType) {
1541 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1542 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1543 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1544 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1545 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1546 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1547 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1548 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1549 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1550 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1551 for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) {
1552 set_layout->binding[b].stage[s].surface_index = surface_count[s];
1553 surface_count[s] += array_size;
1554 }
1555 break;
1556 default:
1557 break;
1558 }
1559
1560 switch (pCreateInfo->pBinding[b].descriptorType) {
1561 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1562 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1563 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
1564 dynamic_offset_count += array_size;
1565 break;
1566 default:
1567 break;
1568 }
1569
1570 if (pCreateInfo->pBinding[b].pImmutableSamplers) {
1571 set_layout->binding[b].immutable_samplers = samplers;
1572 samplers += array_size;
1573
1574 for (uint32_t i = 0; i < array_size; i++)
1575 set_layout->binding[b].immutable_samplers[i] =
1576 anv_sampler_from_handle(pCreateInfo->pBinding[b].pImmutableSamplers[i]);
1577 } else {
1578 set_layout->binding[b].immutable_samplers = NULL;
1579 }
1580
1581 set_layout->shader_stages |= pCreateInfo->pBinding[b].stageFlags;
1582 }
1583
1584 set_layout->dynamic_offset_count = dynamic_offset_count;
1585
1586 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1587
1588 return VK_SUCCESS;
1589 }
1590
1591 void anv_DestroyDescriptorSetLayout(
1592 VkDevice _device,
1593 VkDescriptorSetLayout _set_layout)
1594 {
1595 ANV_FROM_HANDLE(anv_device, device, _device);
1596 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1597
1598 anv_device_free(device, set_layout);
1599 }
1600
1601 VkResult anv_CreateDescriptorPool(
1602 VkDevice device,
1603 const VkDescriptorPoolCreateInfo* pCreateInfo,
1604 VkDescriptorPool* pDescriptorPool)
1605 {
1606 anv_finishme("VkDescriptorPool is a stub");
1607 pDescriptorPool->handle = 1;
1608 return VK_SUCCESS;
1609 }
1610
1611 void anv_DestroyDescriptorPool(
1612 VkDevice _device,
1613 VkDescriptorPool _pool)
1614 {
1615 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1616 }
1617
1618 VkResult anv_ResetDescriptorPool(
1619 VkDevice device,
1620 VkDescriptorPool descriptorPool)
1621 {
1622 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1623 return VK_SUCCESS;
1624 }
1625
1626 VkResult
1627 anv_descriptor_set_create(struct anv_device *device,
1628 const struct anv_descriptor_set_layout *layout,
1629 struct anv_descriptor_set **out_set)
1630 {
1631 struct anv_descriptor_set *set;
1632 size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
1633
1634 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1635 if (!set)
1636 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1637
1638 /* A descriptor set may not be 100% filled. Clear the set so we can can
1639 * later detect holes in it.
1640 */
1641 memset(set, 0, size);
1642
1643 set->layout = layout;
1644
1645 /* Go through and fill out immutable samplers if we have any */
1646 struct anv_descriptor *desc = set->descriptors;
1647 for (uint32_t b = 0; b < layout->binding_count; b++) {
1648 if (layout->binding[b].immutable_samplers) {
1649 for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
1650 desc[i].sampler = layout->binding[b].immutable_samplers[i];
1651 }
1652 desc += layout->binding[b].array_size;
1653 }
1654
1655 *out_set = set;
1656
1657 return VK_SUCCESS;
1658 }
1659
1660 void
1661 anv_descriptor_set_destroy(struct anv_device *device,
1662 struct anv_descriptor_set *set)
1663 {
1664 anv_device_free(device, set);
1665 }
1666
1667 VkResult anv_AllocDescriptorSets(
1668 VkDevice _device,
1669 VkDescriptorPool descriptorPool,
1670 VkDescriptorSetUsage setUsage,
1671 uint32_t count,
1672 const VkDescriptorSetLayout* pSetLayouts,
1673 VkDescriptorSet* pDescriptorSets)
1674 {
1675 ANV_FROM_HANDLE(anv_device, device, _device);
1676
1677 VkResult result = VK_SUCCESS;
1678 struct anv_descriptor_set *set;
1679 uint32_t i;
1680
1681 for (i = 0; i < count; i++) {
1682 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1683
1684 result = anv_descriptor_set_create(device, layout, &set);
1685 if (result != VK_SUCCESS)
1686 break;
1687
1688 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1689 }
1690
1691 if (result != VK_SUCCESS)
1692 anv_FreeDescriptorSets(_device, descriptorPool, i, pDescriptorSets);
1693
1694 return result;
1695 }
1696
1697 VkResult anv_FreeDescriptorSets(
1698 VkDevice _device,
1699 VkDescriptorPool descriptorPool,
1700 uint32_t count,
1701 const VkDescriptorSet* pDescriptorSets)
1702 {
1703 ANV_FROM_HANDLE(anv_device, device, _device);
1704
1705 for (uint32_t i = 0; i < count; i++) {
1706 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1707
1708 anv_descriptor_set_destroy(device, set);
1709 }
1710
1711 return VK_SUCCESS;
1712 }
1713
1714 void anv_UpdateDescriptorSets(
1715 VkDevice device,
1716 uint32_t writeCount,
1717 const VkWriteDescriptorSet* pDescriptorWrites,
1718 uint32_t copyCount,
1719 const VkCopyDescriptorSet* pDescriptorCopies)
1720 {
1721 for (uint32_t i = 0; i < writeCount; i++) {
1722 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1723 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1724 const struct anv_descriptor_set_binding_layout *bind_layout =
1725 &set->layout->binding[write->destBinding];
1726 struct anv_descriptor *desc =
1727 &set->descriptors[bind_layout->descriptor_index];
1728
1729 switch (write->descriptorType) {
1730 case VK_DESCRIPTOR_TYPE_SAMPLER:
1731 for (uint32_t j = 0; j < write->count; j++) {
1732 ANV_FROM_HANDLE(anv_sampler, sampler,
1733 write->pDescriptors[j].sampler);
1734
1735 desc[j] = (struct anv_descriptor) {
1736 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
1737 .sampler = sampler,
1738 };
1739 }
1740 break;
1741
1742 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1743 for (uint32_t j = 0; j < write->count; j++) {
1744 ANV_FROM_HANDLE(anv_image_view, iview,
1745 write->pDescriptors[j].imageView);
1746 ANV_FROM_HANDLE(anv_sampler, sampler,
1747 write->pDescriptors[j].sampler);
1748
1749 desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1750 desc[j].image_view = iview;
1751
1752 /* If this descriptor has an immutable sampler, we don't want
1753 * to stomp on it.
1754 */
1755 if (sampler)
1756 desc->sampler = sampler;
1757 }
1758 break;
1759
1760 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1761 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1762 for (uint32_t j = 0; j < write->count; j++) {
1763 ANV_FROM_HANDLE(anv_image_view, iview,
1764 write->pDescriptors[j].imageView);
1765
1766 desc[j] = (struct anv_descriptor) {
1767 .type = write->descriptorType,
1768 .image_view = iview,
1769 };
1770 }
1771 break;
1772
1773 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1774 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1775 anv_finishme("texel buffers not implemented");
1776 break;
1777
1778 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1779 anv_finishme("input attachments not implemented");
1780 break;
1781
1782 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1783 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1784 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1785 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1786 for (uint32_t j = 0; j < write->count; j++) {
1787 assert(write->pDescriptors[j].bufferInfo.buffer.handle);
1788 ANV_FROM_HANDLE(anv_buffer, buffer,
1789 write->pDescriptors[j].bufferInfo.buffer);
1790 assert(buffer);
1791
1792 desc[j] = (struct anv_descriptor) {
1793 .type = write->descriptorType,
1794 .buffer = buffer,
1795 .offset = write->pDescriptors[j].bufferInfo.offset,
1796 .range = write->pDescriptors[j].bufferInfo.range,
1797 };
1798
1799 /* For buffers with dynamic offsets, we use the full possible
1800 * range in the surface state and do the actual range-checking
1801 * in the shader.
1802 */
1803 if (bind_layout->dynamic_offset_index >= 0)
1804 desc[j].range = buffer->size - desc[j].offset;
1805 }
1806
1807 default:
1808 break;
1809 }
1810 }
1811
1812 for (uint32_t i = 0; i < copyCount; i++) {
1813 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1814 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1815 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1816 for (uint32_t j = 0; j < copy->count; j++) {
1817 dest->descriptors[copy->destBinding + j] =
1818 src->descriptors[copy->srcBinding + j];
1819 }
1820 }
1821 }
1822
1823 VkResult anv_CreateFramebuffer(
1824 VkDevice _device,
1825 const VkFramebufferCreateInfo* pCreateInfo,
1826 VkFramebuffer* pFramebuffer)
1827 {
1828 ANV_FROM_HANDLE(anv_device, device, _device);
1829 struct anv_framebuffer *framebuffer;
1830
1831 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1832
1833 size_t size = sizeof(*framebuffer) +
1834 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1835 framebuffer = anv_device_alloc(device, size, 8,
1836 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1837 if (framebuffer == NULL)
1838 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1839
1840 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1841 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1842 VkImageView _iview = pCreateInfo->pAttachments[i];
1843 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1844 }
1845
1846 framebuffer->width = pCreateInfo->width;
1847 framebuffer->height = pCreateInfo->height;
1848 framebuffer->layers = pCreateInfo->layers;
1849
1850 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1851
1852 return VK_SUCCESS;
1853 }
1854
1855 void anv_DestroyFramebuffer(
1856 VkDevice _device,
1857 VkFramebuffer _fb)
1858 {
1859 ANV_FROM_HANDLE(anv_device, device, _device);
1860 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1861
1862 anv_device_free(device, fb);
1863 }
1864
1865 void vkCmdDbgMarkerBegin(
1866 VkCmdBuffer cmdBuffer,
1867 const char* pMarker)
1868 __attribute__ ((visibility ("default")));
1869
1870 void vkCmdDbgMarkerEnd(
1871 VkCmdBuffer cmdBuffer)
1872 __attribute__ ((visibility ("default")));
1873
1874 void vkCmdDbgMarkerBegin(
1875 VkCmdBuffer cmdBuffer,
1876 const char* pMarker)
1877 {
1878 }
1879
1880 void vkCmdDbgMarkerEnd(
1881 VkCmdBuffer cmdBuffer)
1882 {
1883 }