27a51129a74186eaeff03aff04586f42443b4eb5
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 static VkResult
35 anv_physical_device_init(struct anv_physical_device *device,
36 struct anv_instance *instance,
37 const char *path)
38 {
39 int fd;
40
41 fd = open(path, O_RDWR | O_CLOEXEC);
42 if (fd < 0)
43 return vk_error(VK_ERROR_UNAVAILABLE);
44
45 device->instance = instance;
46 device->path = path;
47
48 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
49 if (!device->chipset_id)
50 goto fail;
51
52 device->name = brw_get_device_name(device->chipset_id);
53 device->info = brw_get_device_info(device->chipset_id, -1);
54 if (!device->info)
55 goto fail;
56
57 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1)
58 goto fail;
59
60 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
61 goto fail;
62
63 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
64 goto fail;
65
66 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
67 goto fail;
68
69 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
70 goto fail;
71
72 close(fd);
73
74 return VK_SUCCESS;
75
76 fail:
77 close(fd);
78 return vk_error(VK_ERROR_UNAVAILABLE);
79 }
80
81 static void *default_alloc(
82 void* pUserData,
83 size_t size,
84 size_t alignment,
85 VkSystemAllocType allocType)
86 {
87 return malloc(size);
88 }
89
90 static void default_free(
91 void* pUserData,
92 void* pMem)
93 {
94 free(pMem);
95 }
96
97 static const VkAllocCallbacks default_alloc_callbacks = {
98 .pUserData = NULL,
99 .pfnAlloc = default_alloc,
100 .pfnFree = default_free
101 };
102
103 VkResult anv_CreateInstance(
104 const VkInstanceCreateInfo* pCreateInfo,
105 VkInstance* pInstance)
106 {
107 struct anv_instance *instance;
108 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
109 void *user_data = NULL;
110
111 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
112
113 if (pCreateInfo->pAllocCb) {
114 alloc_callbacks = pCreateInfo->pAllocCb;
115 user_data = pCreateInfo->pAllocCb->pUserData;
116 }
117 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
118 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
119 if (!instance)
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
121
122 instance->pAllocUserData = alloc_callbacks->pUserData;
123 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
124 instance->pfnFree = alloc_callbacks->pfnFree;
125 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
126 instance->physicalDeviceCount = 0;
127
128 _mesa_locale_init();
129
130 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
131
132 *pInstance = anv_instance_to_handle(instance);
133
134 return VK_SUCCESS;
135 }
136
137 VkResult anv_DestroyInstance(
138 VkInstance _instance)
139 {
140 ANV_FROM_HANDLE(anv_instance, instance, _instance);
141
142 VG(VALGRIND_DESTROY_MEMPOOL(instance));
143
144 _mesa_locale_fini();
145
146 instance->pfnFree(instance->pAllocUserData, instance);
147
148 return VK_SUCCESS;
149 }
150
151 static void *
152 anv_instance_alloc(struct anv_instance *instance, size_t size,
153 size_t alignment, VkSystemAllocType allocType)
154 {
155 void *mem = instance->pfnAlloc(instance->pAllocUserData,
156 size, alignment, allocType);
157 if (mem) {
158 VALGRIND_MEMPOOL_ALLOC(instance, mem, size);
159 VALGRIND_MAKE_MEM_UNDEFINED(mem, size);
160 }
161 return mem;
162 }
163
164 static void
165 anv_instance_free(struct anv_instance *instance, void *mem)
166 {
167 if (mem == NULL)
168 return;
169
170 VALGRIND_MEMPOOL_FREE(instance, mem);
171
172 instance->pfnFree(instance->pAllocUserData, mem);
173 }
174
175 VkResult anv_EnumeratePhysicalDevices(
176 VkInstance _instance,
177 uint32_t* pPhysicalDeviceCount,
178 VkPhysicalDevice* pPhysicalDevices)
179 {
180 ANV_FROM_HANDLE(anv_instance, instance, _instance);
181 VkResult result;
182
183 if (instance->physicalDeviceCount == 0) {
184 result = anv_physical_device_init(&instance->physicalDevice,
185 instance, "/dev/dri/renderD128");
186 if (result != VK_SUCCESS)
187 return result;
188
189 instance->physicalDeviceCount = 1;
190 }
191
192 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
193 * otherwise it's an inout parameter.
194 *
195 * The Vulkan spec (git aaed022) says:
196 *
197 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
198 * that is initialized with the number of devices the application is
199 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
200 * an array of at least this many VkPhysicalDevice handles [...].
201 *
202 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
203 * overwrites the contents of the variable pointed to by
204 * pPhysicalDeviceCount with the number of physical devices in in the
205 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
206 * pPhysicalDeviceCount with the number of physical handles written to
207 * pPhysicalDevices.
208 */
209 if (!pPhysicalDevices) {
210 *pPhysicalDeviceCount = instance->physicalDeviceCount;
211 } else if (*pPhysicalDeviceCount >= 1) {
212 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
213 *pPhysicalDeviceCount = 1;
214 } else {
215 *pPhysicalDeviceCount = 0;
216 }
217
218 return VK_SUCCESS;
219 }
220
221 VkResult anv_GetPhysicalDeviceFeatures(
222 VkPhysicalDevice physicalDevice,
223 VkPhysicalDeviceFeatures* pFeatures)
224 {
225 anv_finishme("Get correct values for PhysicalDeviceFeatures");
226
227 *pFeatures = (VkPhysicalDeviceFeatures) {
228 .robustBufferAccess = false,
229 .fullDrawIndexUint32 = false,
230 .imageCubeArray = false,
231 .independentBlend = false,
232 .geometryShader = true,
233 .tessellationShader = false,
234 .sampleRateShading = false,
235 .dualSourceBlend = true,
236 .logicOp = true,
237 .instancedDrawIndirect = true,
238 .depthClip = false,
239 .depthBiasClamp = false,
240 .fillModeNonSolid = true,
241 .depthBounds = false,
242 .wideLines = true,
243 .largePoints = true,
244 .textureCompressionETC2 = true,
245 .textureCompressionASTC_LDR = true,
246 .textureCompressionBC = true,
247 .pipelineStatisticsQuery = true,
248 .vertexSideEffects = false,
249 .tessellationSideEffects = false,
250 .geometrySideEffects = false,
251 .fragmentSideEffects = false,
252 .shaderTessellationPointSize = false,
253 .shaderGeometryPointSize = true,
254 .shaderTextureGatherExtended = true,
255 .shaderStorageImageExtendedFormats = false,
256 .shaderStorageImageMultisample = false,
257 .shaderStorageBufferArrayConstantIndexing = false,
258 .shaderStorageImageArrayConstantIndexing = false,
259 .shaderUniformBufferArrayDynamicIndexing = true,
260 .shaderSampledImageArrayDynamicIndexing = false,
261 .shaderStorageBufferArrayDynamicIndexing = false,
262 .shaderStorageImageArrayDynamicIndexing = false,
263 .shaderClipDistance = false,
264 .shaderCullDistance = false,
265 .shaderFloat64 = false,
266 .shaderInt64 = false,
267 .shaderFloat16 = false,
268 .shaderInt16 = false,
269 };
270
271 return VK_SUCCESS;
272 }
273
274 VkResult anv_GetPhysicalDeviceLimits(
275 VkPhysicalDevice physicalDevice,
276 VkPhysicalDeviceLimits* pLimits)
277 {
278 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
279 const struct brw_device_info *devinfo = physical_device->info;
280
281 anv_finishme("Get correct values for PhysicalDeviceLimits");
282
283 *pLimits = (VkPhysicalDeviceLimits) {
284 .maxImageDimension1D = (1 << 14),
285 .maxImageDimension2D = (1 << 14),
286 .maxImageDimension3D = (1 << 10),
287 .maxImageDimensionCube = (1 << 14),
288 .maxImageArrayLayers = (1 << 10),
289 .maxTexelBufferSize = (1 << 14),
290 .maxUniformBufferSize = UINT32_MAX,
291 .maxStorageBufferSize = UINT32_MAX,
292 .maxPushConstantsSize = 128,
293 .maxMemoryAllocationCount = UINT32_MAX,
294 .bufferImageGranularity = 64, /* A cache line */
295 .maxBoundDescriptorSets = MAX_SETS,
296 .maxDescriptorSets = UINT32_MAX,
297 .maxPerStageDescriptorSamplers = 64,
298 .maxPerStageDescriptorUniformBuffers = 64,
299 .maxPerStageDescriptorStorageBuffers = 64,
300 .maxPerStageDescriptorSampledImages = 64,
301 .maxPerStageDescriptorStorageImages = 64,
302 .maxDescriptorSetSamplers = 256,
303 .maxDescriptorSetUniformBuffers = 256,
304 .maxDescriptorSetStorageBuffers = 256,
305 .maxDescriptorSetSampledImages = 256,
306 .maxDescriptorSetStorageImages = 256,
307 .maxVertexInputAttributes = 32,
308 .maxVertexInputAttributeOffset = 256,
309 .maxVertexInputBindingStride = 256,
310 .maxVertexOutputComponents = 32,
311 .maxTessGenLevel = 0,
312 .maxTessPatchSize = 0,
313 .maxTessControlPerVertexInputComponents = 0,
314 .maxTessControlPerVertexOutputComponents = 0,
315 .maxTessControlPerPatchOutputComponents = 0,
316 .maxTessControlTotalOutputComponents = 0,
317 .maxTessEvaluationInputComponents = 0,
318 .maxTessEvaluationOutputComponents = 0,
319 .maxGeometryShaderInvocations = 6,
320 .maxGeometryInputComponents = 16,
321 .maxGeometryOutputComponents = 16,
322 .maxGeometryOutputVertices = 16,
323 .maxGeometryTotalOutputComponents = 16,
324 .maxFragmentInputComponents = 16,
325 .maxFragmentOutputBuffers = 8,
326 .maxFragmentDualSourceBuffers = 2,
327 .maxFragmentCombinedOutputResources = 8,
328 .maxComputeSharedMemorySize = 1024,
329 .maxComputeWorkGroupCount = {
330 16 * devinfo->max_cs_threads,
331 16 * devinfo->max_cs_threads,
332 16 * devinfo->max_cs_threads,
333 },
334 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
335 .maxComputeWorkGroupSize = {
336 16 * devinfo->max_cs_threads,
337 16 * devinfo->max_cs_threads,
338 16 * devinfo->max_cs_threads,
339 },
340 .subPixelPrecisionBits = 4 /* FIXME */,
341 .subTexelPrecisionBits = 4 /* FIXME */,
342 .mipmapPrecisionBits = 4 /* FIXME */,
343 .maxDrawIndexedIndexValue = UINT32_MAX,
344 .maxDrawIndirectInstanceCount = UINT32_MAX,
345 .primitiveRestartForPatches = UINT32_MAX,
346 .maxSamplerLodBias = 16,
347 .maxSamplerAnisotropy = 16,
348 .maxViewports = 16,
349 .maxDynamicViewportStates = UINT32_MAX,
350 .maxViewportDimensions = { (1 << 14), (1 << 14) },
351 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
352 .viewportSubPixelBits = 13, /* We take a float? */
353 .minMemoryMapAlignment = 64, /* A cache line */
354 .minTexelBufferOffsetAlignment = 1,
355 .minUniformBufferOffsetAlignment = 1,
356 .minStorageBufferOffsetAlignment = 1,
357 .minTexelOffset = 0, /* FIXME */
358 .maxTexelOffset = 0, /* FIXME */
359 .minTexelGatherOffset = 0, /* FIXME */
360 .maxTexelGatherOffset = 0, /* FIXME */
361 .minInterpolationOffset = 0, /* FIXME */
362 .maxInterpolationOffset = 0, /* FIXME */
363 .subPixelInterpolationOffsetBits = 0, /* FIXME */
364 .maxFramebufferWidth = (1 << 14),
365 .maxFramebufferHeight = (1 << 14),
366 .maxFramebufferLayers = (1 << 10),
367 .maxFramebufferColorSamples = 8,
368 .maxFramebufferDepthSamples = 8,
369 .maxFramebufferStencilSamples = 8,
370 .maxColorAttachments = MAX_RTS,
371 .maxSampledImageColorSamples = 8,
372 .maxSampledImageDepthSamples = 8,
373 .maxSampledImageIntegerSamples = 1,
374 .maxStorageImageSamples = 1,
375 .maxSampleMaskWords = 1,
376 .timestampFrequency = 1000 * 1000 * 1000 / 80,
377 .maxClipDistances = 0 /* FIXME */,
378 .maxCullDistances = 0 /* FIXME */,
379 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
380 .pointSizeRange = { 0.125, 255.875 },
381 .lineWidthRange = { 0.0, 7.9921875 },
382 .pointSizeGranularity = (1.0 / 8.0),
383 .lineWidthGranularity = (1.0 / 128.0),
384 };
385
386 return VK_SUCCESS;
387 }
388
389 VkResult anv_GetPhysicalDeviceProperties(
390 VkPhysicalDevice physicalDevice,
391 VkPhysicalDeviceProperties* pProperties)
392 {
393 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
394
395 *pProperties = (VkPhysicalDeviceProperties) {
396 .apiVersion = VK_MAKE_VERSION(0, 138, 1),
397 .driverVersion = 1,
398 .vendorId = 0x8086,
399 .deviceId = pdevice->chipset_id,
400 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
401 };
402
403 strcpy(pProperties->deviceName, pdevice->name);
404 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
405 "anv-%s", MESA_GIT_SHA1 + 4);
406
407 return VK_SUCCESS;
408 }
409
410 VkResult anv_GetPhysicalDeviceQueueCount(
411 VkPhysicalDevice physicalDevice,
412 uint32_t* pCount)
413 {
414 *pCount = 1;
415
416 return VK_SUCCESS;
417 }
418
419 VkResult anv_GetPhysicalDeviceQueueProperties(
420 VkPhysicalDevice physicalDevice,
421 uint32_t count,
422 VkPhysicalDeviceQueueProperties* pQueueProperties)
423 {
424 assert(count == 1);
425
426 *pQueueProperties = (VkPhysicalDeviceQueueProperties) {
427 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
428 VK_QUEUE_COMPUTE_BIT |
429 VK_QUEUE_DMA_BIT,
430 .queueCount = 1,
431 .supportsTimestamps = true,
432 };
433
434 return VK_SUCCESS;
435 }
436
437 VkResult anv_GetPhysicalDeviceMemoryProperties(
438 VkPhysicalDevice physicalDevice,
439 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
440 {
441 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
442 VkDeviceSize heap_size;
443
444 /* Reserve some wiggle room for the driver by exposing only 75% of the
445 * aperture to the heap.
446 */
447 heap_size = 3 * physical_device->aperture_size / 4;
448
449 /* The property flags below are valid only for llc platforms. */
450 pMemoryProperties->memoryTypeCount = 1;
451 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
452 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
453 .heapIndex = 1,
454 };
455
456 pMemoryProperties->memoryHeapCount = 1;
457 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
458 .size = heap_size,
459 .flags = VK_MEMORY_HEAP_HOST_LOCAL,
460 };
461
462 return VK_SUCCESS;
463 }
464
465 PFN_vkVoidFunction anv_GetInstanceProcAddr(
466 VkInstance instance,
467 const char* pName)
468 {
469 return anv_lookup_entrypoint(pName);
470 }
471
472 PFN_vkVoidFunction anv_GetDeviceProcAddr(
473 VkDevice device,
474 const char* pName)
475 {
476 return anv_lookup_entrypoint(pName);
477 }
478
479 static VkResult
480 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
481 {
482 queue->device = device;
483 queue->pool = &device->surface_state_pool;
484
485 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
486 if (queue->completed_serial.map == NULL)
487 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
488
489 *(uint32_t *)queue->completed_serial.map = 0;
490 queue->next_serial = 1;
491
492 return VK_SUCCESS;
493 }
494
495 static void
496 anv_queue_finish(struct anv_queue *queue)
497 {
498 #ifdef HAVE_VALGRIND
499 /* This gets torn down with the device so we only need to do this if
500 * valgrind is present.
501 */
502 anv_state_pool_free(queue->pool, queue->completed_serial);
503 #endif
504 }
505
506 static void
507 anv_device_init_border_colors(struct anv_device *device)
508 {
509 static const VkClearColorValue border_colors[] = {
510 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 0.0 } },
511 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 1.0 } },
512 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .f32 = { 1.0, 1.0, 1.0, 1.0 } },
513 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .u32 = { 0, 0, 0, 0 } },
514 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .u32 = { 0, 0, 0, 1 } },
515 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .u32 = { 1, 1, 1, 1 } },
516 };
517
518 device->border_colors =
519 anv_state_pool_alloc(&device->dynamic_state_pool,
520 sizeof(border_colors), 32);
521 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
522 }
523
524 VkResult anv_CreateDevice(
525 VkPhysicalDevice physicalDevice,
526 const VkDeviceCreateInfo* pCreateInfo,
527 VkDevice* pDevice)
528 {
529 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
530 struct anv_instance *instance = physical_device->instance;
531 struct anv_device *device;
532
533 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
534
535 switch (physical_device->info->gen) {
536 case 7:
537 driver_layer = &gen7_layer;
538 break;
539 case 8:
540 driver_layer = &gen8_layer;
541 break;
542 }
543
544 device = anv_instance_alloc(instance, sizeof(*device), 8,
545 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
546 if (!device)
547 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
548
549 device->instance = physical_device->instance;
550
551 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
552 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
553 if (device->fd == -1)
554 goto fail_device;
555
556 device->context_id = anv_gem_create_context(device);
557 if (device->context_id == -1)
558 goto fail_fd;
559
560 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
561
562 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
563
564 anv_state_pool_init(&device->dynamic_state_pool,
565 &device->dynamic_state_block_pool);
566
567 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
568 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
569
570 anv_state_pool_init(&device->surface_state_pool,
571 &device->surface_state_block_pool);
572
573 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
574
575 device->info = *physical_device->info;
576
577 device->compiler = anv_compiler_create(device);
578
579 pthread_mutex_init(&device->mutex, NULL);
580
581 anv_queue_init(device, &device->queue);
582
583 anv_device_init_meta(device);
584
585 anv_device_init_border_colors(device);
586
587 *pDevice = anv_device_to_handle(device);
588
589 return VK_SUCCESS;
590
591 fail_fd:
592 close(device->fd);
593 fail_device:
594 anv_device_free(device, device);
595
596 return vk_error(VK_ERROR_UNAVAILABLE);
597 }
598
599 VkResult anv_DestroyDevice(
600 VkDevice _device)
601 {
602 ANV_FROM_HANDLE(anv_device, device, _device);
603
604 anv_compiler_destroy(device->compiler);
605
606 anv_queue_finish(&device->queue);
607
608 anv_device_finish_meta(device);
609
610 #ifdef HAVE_VALGRIND
611 /* We only need to free these to prevent valgrind errors. The backing
612 * BO will go away in a couple of lines so we don't actually leak.
613 */
614 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
615 #endif
616
617 anv_bo_pool_finish(&device->batch_bo_pool);
618 anv_state_pool_finish(&device->dynamic_state_pool);
619 anv_block_pool_finish(&device->dynamic_state_block_pool);
620 anv_block_pool_finish(&device->instruction_block_pool);
621 anv_state_pool_finish(&device->surface_state_pool);
622 anv_block_pool_finish(&device->surface_state_block_pool);
623 anv_block_pool_finish(&device->scratch_block_pool);
624
625 close(device->fd);
626
627 anv_instance_free(device->instance, device);
628
629 return VK_SUCCESS;
630 }
631
632 static const VkExtensionProperties global_extensions[] = {
633 {
634 .extName = "VK_WSI_LunarG",
635 .specVersion = 3
636 }
637 };
638
639 VkResult anv_GetGlobalExtensionProperties(
640 const char* pLayerName,
641 uint32_t* pCount,
642 VkExtensionProperties* pProperties)
643 {
644 if (pProperties == NULL) {
645 *pCount = ARRAY_SIZE(global_extensions);
646 return VK_SUCCESS;
647 }
648
649 assert(*pCount < ARRAY_SIZE(global_extensions));
650
651 *pCount = ARRAY_SIZE(global_extensions);
652 memcpy(pProperties, global_extensions, sizeof(global_extensions));
653
654 return VK_SUCCESS;
655 }
656
657 VkResult anv_GetPhysicalDeviceExtensionProperties(
658 VkPhysicalDevice physicalDevice,
659 const char* pLayerName,
660 uint32_t* pCount,
661 VkExtensionProperties* pProperties)
662 {
663 if (pProperties == NULL) {
664 *pCount = 0;
665 return VK_SUCCESS;
666 }
667
668 /* None supported at this time */
669 return vk_error(VK_ERROR_INVALID_EXTENSION);
670 }
671
672 VkResult anv_GetGlobalLayerProperties(
673 uint32_t* pCount,
674 VkLayerProperties* pProperties)
675 {
676 if (pProperties == NULL) {
677 *pCount = 0;
678 return VK_SUCCESS;
679 }
680
681 /* None supported at this time */
682 return vk_error(VK_ERROR_INVALID_LAYER);
683 }
684
685 VkResult anv_GetPhysicalDeviceLayerProperties(
686 VkPhysicalDevice physicalDevice,
687 uint32_t* pCount,
688 VkLayerProperties* pProperties)
689 {
690 if (pProperties == NULL) {
691 *pCount = 0;
692 return VK_SUCCESS;
693 }
694
695 /* None supported at this time */
696 return vk_error(VK_ERROR_INVALID_LAYER);
697 }
698
699 VkResult anv_GetDeviceQueue(
700 VkDevice _device,
701 uint32_t queueNodeIndex,
702 uint32_t queueIndex,
703 VkQueue* pQueue)
704 {
705 ANV_FROM_HANDLE(anv_device, device, _device);
706
707 assert(queueIndex == 0);
708
709 *pQueue = anv_queue_to_handle(&device->queue);
710
711 return VK_SUCCESS;
712 }
713
714 VkResult anv_QueueSubmit(
715 VkQueue _queue,
716 uint32_t cmdBufferCount,
717 const VkCmdBuffer* pCmdBuffers,
718 VkFence _fence)
719 {
720 ANV_FROM_HANDLE(anv_queue, queue, _queue);
721 ANV_FROM_HANDLE(anv_fence, fence, _fence);
722 struct anv_device *device = queue->device;
723 int ret;
724
725 for (uint32_t i = 0; i < cmdBufferCount; i++) {
726 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
727
728 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
729
730 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
731 if (ret != 0)
732 return vk_error(VK_ERROR_UNKNOWN);
733
734 if (fence) {
735 ret = anv_gem_execbuffer(device, &fence->execbuf);
736 if (ret != 0)
737 return vk_error(VK_ERROR_UNKNOWN);
738 }
739
740 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
741 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
742 }
743
744 return VK_SUCCESS;
745 }
746
747 VkResult anv_QueueWaitIdle(
748 VkQueue _queue)
749 {
750 ANV_FROM_HANDLE(anv_queue, queue, _queue);
751
752 return vkDeviceWaitIdle(anv_device_to_handle(queue->device));
753 }
754
755 VkResult anv_DeviceWaitIdle(
756 VkDevice _device)
757 {
758 ANV_FROM_HANDLE(anv_device, device, _device);
759 struct anv_state state;
760 struct anv_batch batch;
761 struct drm_i915_gem_execbuffer2 execbuf;
762 struct drm_i915_gem_exec_object2 exec2_objects[1];
763 struct anv_bo *bo = NULL;
764 VkResult result;
765 int64_t timeout;
766 int ret;
767
768 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
769 bo = &device->dynamic_state_pool.block_pool->bo;
770 batch.start = batch.next = state.map;
771 batch.end = state.map + 32;
772 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
773 anv_batch_emit(&batch, GEN7_MI_NOOP);
774
775 exec2_objects[0].handle = bo->gem_handle;
776 exec2_objects[0].relocation_count = 0;
777 exec2_objects[0].relocs_ptr = 0;
778 exec2_objects[0].alignment = 0;
779 exec2_objects[0].offset = bo->offset;
780 exec2_objects[0].flags = 0;
781 exec2_objects[0].rsvd1 = 0;
782 exec2_objects[0].rsvd2 = 0;
783
784 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
785 execbuf.buffer_count = 1;
786 execbuf.batch_start_offset = state.offset;
787 execbuf.batch_len = batch.next - state.map;
788 execbuf.cliprects_ptr = 0;
789 execbuf.num_cliprects = 0;
790 execbuf.DR1 = 0;
791 execbuf.DR4 = 0;
792
793 execbuf.flags =
794 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
795 execbuf.rsvd1 = device->context_id;
796 execbuf.rsvd2 = 0;
797
798 ret = anv_gem_execbuffer(device, &execbuf);
799 if (ret != 0) {
800 result = vk_error(VK_ERROR_UNKNOWN);
801 goto fail;
802 }
803
804 timeout = INT64_MAX;
805 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
806 if (ret != 0) {
807 result = vk_error(VK_ERROR_UNKNOWN);
808 goto fail;
809 }
810
811 anv_state_pool_free(&device->dynamic_state_pool, state);
812
813 return VK_SUCCESS;
814
815 fail:
816 anv_state_pool_free(&device->dynamic_state_pool, state);
817
818 return result;
819 }
820
821 void *
822 anv_device_alloc(struct anv_device * device,
823 size_t size,
824 size_t alignment,
825 VkSystemAllocType allocType)
826 {
827 return anv_instance_alloc(device->instance, size, alignment, allocType);
828 }
829
830 void
831 anv_device_free(struct anv_device * device,
832 void * mem)
833 {
834 anv_instance_free(device->instance, mem);
835 }
836
837 VkResult
838 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
839 {
840 bo->gem_handle = anv_gem_create(device, size);
841 if (!bo->gem_handle)
842 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
843
844 bo->map = NULL;
845 bo->index = 0;
846 bo->offset = 0;
847 bo->size = size;
848
849 return VK_SUCCESS;
850 }
851
852 VkResult anv_AllocMemory(
853 VkDevice _device,
854 const VkMemoryAllocInfo* pAllocInfo,
855 VkDeviceMemory* pMem)
856 {
857 ANV_FROM_HANDLE(anv_device, device, _device);
858 struct anv_device_memory *mem;
859 VkResult result;
860
861 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
862
863 if (pAllocInfo->memoryTypeIndex != 0) {
864 /* We support exactly one memory heap. */
865 return vk_error(VK_ERROR_INVALID_VALUE);
866 }
867
868 /* FINISHME: Fail if allocation request exceeds heap size. */
869
870 mem = anv_device_alloc(device, sizeof(*mem), 8,
871 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
872 if (mem == NULL)
873 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
874
875 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
876 if (result != VK_SUCCESS)
877 goto fail;
878
879 *pMem = anv_device_memory_to_handle(mem);
880
881 return VK_SUCCESS;
882
883 fail:
884 anv_device_free(device, mem);
885
886 return result;
887 }
888
889 VkResult anv_FreeMemory(
890 VkDevice _device,
891 VkDeviceMemory _mem)
892 {
893 ANV_FROM_HANDLE(anv_device, device, _device);
894 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
895
896 if (mem->bo.map)
897 anv_gem_munmap(mem->bo.map, mem->bo.size);
898
899 if (mem->bo.gem_handle != 0)
900 anv_gem_close(device, mem->bo.gem_handle);
901
902 anv_device_free(device, mem);
903
904 return VK_SUCCESS;
905 }
906
907 VkResult anv_MapMemory(
908 VkDevice _device,
909 VkDeviceMemory _mem,
910 VkDeviceSize offset,
911 VkDeviceSize size,
912 VkMemoryMapFlags flags,
913 void** ppData)
914 {
915 ANV_FROM_HANDLE(anv_device, device, _device);
916 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
917
918 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
919 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
920 * at a time is valid. We could just mmap up front and return an offset
921 * pointer here, but that may exhaust virtual memory on 32 bit
922 * userspace. */
923
924 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
925 mem->map_size = size;
926
927 *ppData = mem->map;
928
929 return VK_SUCCESS;
930 }
931
932 VkResult anv_UnmapMemory(
933 VkDevice _device,
934 VkDeviceMemory _mem)
935 {
936 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
937
938 anv_gem_munmap(mem->map, mem->map_size);
939
940 return VK_SUCCESS;
941 }
942
943 VkResult anv_FlushMappedMemoryRanges(
944 VkDevice device,
945 uint32_t memRangeCount,
946 const VkMappedMemoryRange* pMemRanges)
947 {
948 /* clflush here for !llc platforms */
949
950 return VK_SUCCESS;
951 }
952
953 VkResult anv_InvalidateMappedMemoryRanges(
954 VkDevice device,
955 uint32_t memRangeCount,
956 const VkMappedMemoryRange* pMemRanges)
957 {
958 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
959 }
960
961 VkResult anv_GetBufferMemoryRequirements(
962 VkDevice device,
963 VkBuffer _buffer,
964 VkMemoryRequirements* pMemoryRequirements)
965 {
966 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
967
968 /* The Vulkan spec (git aaed022) says:
969 *
970 * memoryTypeBits is a bitfield and contains one bit set for every
971 * supported memory type for the resource. The bit `1<<i` is set if and
972 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
973 * structure for the physical device is supported.
974 *
975 * We support exactly one memory type.
976 */
977 pMemoryRequirements->memoryTypeBits = 1;
978
979 pMemoryRequirements->size = buffer->size;
980 pMemoryRequirements->alignment = 16;
981
982 return VK_SUCCESS;
983 }
984
985 VkResult anv_GetImageMemoryRequirements(
986 VkDevice device,
987 VkImage _image,
988 VkMemoryRequirements* pMemoryRequirements)
989 {
990 ANV_FROM_HANDLE(anv_image, image, _image);
991
992 /* The Vulkan spec (git aaed022) says:
993 *
994 * memoryTypeBits is a bitfield and contains one bit set for every
995 * supported memory type for the resource. The bit `1<<i` is set if and
996 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
997 * structure for the physical device is supported.
998 *
999 * We support exactly one memory type.
1000 */
1001 pMemoryRequirements->memoryTypeBits = 1;
1002
1003 pMemoryRequirements->size = image->size;
1004 pMemoryRequirements->alignment = image->alignment;
1005
1006 return VK_SUCCESS;
1007 }
1008
1009 VkResult anv_GetImageSparseMemoryRequirements(
1010 VkDevice device,
1011 VkImage image,
1012 uint32_t* pNumRequirements,
1013 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1014 {
1015 return vk_error(VK_UNSUPPORTED);
1016 }
1017
1018 VkResult anv_GetDeviceMemoryCommitment(
1019 VkDevice device,
1020 VkDeviceMemory memory,
1021 VkDeviceSize* pCommittedMemoryInBytes)
1022 {
1023 *pCommittedMemoryInBytes = 0;
1024 stub_return(VK_SUCCESS);
1025 }
1026
1027 VkResult anv_BindBufferMemory(
1028 VkDevice device,
1029 VkBuffer _buffer,
1030 VkDeviceMemory _mem,
1031 VkDeviceSize memOffset)
1032 {
1033 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1034 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1035
1036 buffer->bo = &mem->bo;
1037 buffer->offset = memOffset;
1038
1039 return VK_SUCCESS;
1040 }
1041
1042 VkResult anv_BindImageMemory(
1043 VkDevice device,
1044 VkImage _image,
1045 VkDeviceMemory _mem,
1046 VkDeviceSize memOffset)
1047 {
1048 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1049 ANV_FROM_HANDLE(anv_image, image, _image);
1050
1051 image->bo = &mem->bo;
1052 image->offset = memOffset;
1053
1054 return VK_SUCCESS;
1055 }
1056
1057 VkResult anv_QueueBindSparseBufferMemory(
1058 VkQueue queue,
1059 VkBuffer buffer,
1060 uint32_t numBindings,
1061 const VkSparseMemoryBindInfo* pBindInfo)
1062 {
1063 stub_return(VK_UNSUPPORTED);
1064 }
1065
1066 VkResult anv_QueueBindSparseImageOpaqueMemory(
1067 VkQueue queue,
1068 VkImage image,
1069 uint32_t numBindings,
1070 const VkSparseMemoryBindInfo* pBindInfo)
1071 {
1072 stub_return(VK_UNSUPPORTED);
1073 }
1074
1075 VkResult anv_QueueBindSparseImageMemory(
1076 VkQueue queue,
1077 VkImage image,
1078 uint32_t numBindings,
1079 const VkSparseImageMemoryBindInfo* pBindInfo)
1080 {
1081 stub_return(VK_UNSUPPORTED);
1082 }
1083
1084 VkResult anv_CreateFence(
1085 VkDevice _device,
1086 const VkFenceCreateInfo* pCreateInfo,
1087 VkFence* pFence)
1088 {
1089 ANV_FROM_HANDLE(anv_device, device, _device);
1090 struct anv_fence *fence;
1091 struct anv_batch batch;
1092 VkResult result;
1093
1094 const uint32_t fence_size = 128;
1095
1096 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1097
1098 fence = anv_device_alloc(device, sizeof(*fence), 8,
1099 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1100 if (fence == NULL)
1101 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1102
1103 result = anv_bo_init_new(&fence->bo, device, fence_size);
1104 if (result != VK_SUCCESS)
1105 goto fail;
1106
1107 fence->bo.map =
1108 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1109 batch.next = batch.start = fence->bo.map;
1110 batch.end = fence->bo.map + fence->bo.size;
1111 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1112 anv_batch_emit(&batch, GEN7_MI_NOOP);
1113
1114 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1115 fence->exec2_objects[0].relocation_count = 0;
1116 fence->exec2_objects[0].relocs_ptr = 0;
1117 fence->exec2_objects[0].alignment = 0;
1118 fence->exec2_objects[0].offset = fence->bo.offset;
1119 fence->exec2_objects[0].flags = 0;
1120 fence->exec2_objects[0].rsvd1 = 0;
1121 fence->exec2_objects[0].rsvd2 = 0;
1122
1123 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1124 fence->execbuf.buffer_count = 1;
1125 fence->execbuf.batch_start_offset = 0;
1126 fence->execbuf.batch_len = batch.next - fence->bo.map;
1127 fence->execbuf.cliprects_ptr = 0;
1128 fence->execbuf.num_cliprects = 0;
1129 fence->execbuf.DR1 = 0;
1130 fence->execbuf.DR4 = 0;
1131
1132 fence->execbuf.flags =
1133 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1134 fence->execbuf.rsvd1 = device->context_id;
1135 fence->execbuf.rsvd2 = 0;
1136
1137 *pFence = anv_fence_to_handle(fence);
1138
1139 return VK_SUCCESS;
1140
1141 fail:
1142 anv_device_free(device, fence);
1143
1144 return result;
1145 }
1146
1147 VkResult anv_DestroyFence(
1148 VkDevice _device,
1149 VkFence _fence)
1150 {
1151 ANV_FROM_HANDLE(anv_device, device, _device);
1152 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1153
1154 anv_gem_munmap(fence->bo.map, fence->bo.size);
1155 anv_gem_close(device, fence->bo.gem_handle);
1156 anv_device_free(device, fence);
1157
1158 return VK_SUCCESS;
1159 }
1160
1161 VkResult anv_ResetFences(
1162 VkDevice _device,
1163 uint32_t fenceCount,
1164 const VkFence* pFences)
1165 {
1166 for (uint32_t i = 0; i < fenceCount; i++) {
1167 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1168 fence->ready = false;
1169 }
1170
1171 return VK_SUCCESS;
1172 }
1173
1174 VkResult anv_GetFenceStatus(
1175 VkDevice _device,
1176 VkFence _fence)
1177 {
1178 ANV_FROM_HANDLE(anv_device, device, _device);
1179 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1180 int64_t t = 0;
1181 int ret;
1182
1183 if (fence->ready)
1184 return VK_SUCCESS;
1185
1186 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1187 if (ret == 0) {
1188 fence->ready = true;
1189 return VK_SUCCESS;
1190 }
1191
1192 return VK_NOT_READY;
1193 }
1194
1195 VkResult anv_WaitForFences(
1196 VkDevice _device,
1197 uint32_t fenceCount,
1198 const VkFence* pFences,
1199 VkBool32 waitAll,
1200 uint64_t timeout)
1201 {
1202 ANV_FROM_HANDLE(anv_device, device, _device);
1203 int64_t t = timeout;
1204 int ret;
1205
1206 /* FIXME: handle !waitAll */
1207
1208 for (uint32_t i = 0; i < fenceCount; i++) {
1209 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1210 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1211 if (ret == -1 && errno == ETIME)
1212 return VK_TIMEOUT;
1213 else if (ret == -1)
1214 return vk_error(VK_ERROR_UNKNOWN);
1215 }
1216
1217 return VK_SUCCESS;
1218 }
1219
1220 // Queue semaphore functions
1221
1222 VkResult anv_CreateSemaphore(
1223 VkDevice device,
1224 const VkSemaphoreCreateInfo* pCreateInfo,
1225 VkSemaphore* pSemaphore)
1226 {
1227 stub_return(VK_UNSUPPORTED);
1228 }
1229
1230 VkResult anv_DestroySemaphore(
1231 VkDevice device,
1232 VkSemaphore semaphore)
1233 {
1234 stub_return(VK_UNSUPPORTED);
1235 }
1236
1237 VkResult anv_QueueSignalSemaphore(
1238 VkQueue queue,
1239 VkSemaphore semaphore)
1240 {
1241 stub_return(VK_UNSUPPORTED);
1242 }
1243
1244 VkResult anv_QueueWaitSemaphore(
1245 VkQueue queue,
1246 VkSemaphore semaphore)
1247 {
1248 stub_return(VK_UNSUPPORTED);
1249 }
1250
1251 // Event functions
1252
1253 VkResult anv_CreateEvent(
1254 VkDevice device,
1255 const VkEventCreateInfo* pCreateInfo,
1256 VkEvent* pEvent)
1257 {
1258 stub_return(VK_UNSUPPORTED);
1259 }
1260
1261 VkResult anv_DestroyEvent(
1262 VkDevice device,
1263 VkEvent event)
1264 {
1265 stub_return(VK_UNSUPPORTED);
1266 }
1267
1268 VkResult anv_GetEventStatus(
1269 VkDevice device,
1270 VkEvent event)
1271 {
1272 stub_return(VK_UNSUPPORTED);
1273 }
1274
1275 VkResult anv_SetEvent(
1276 VkDevice device,
1277 VkEvent event)
1278 {
1279 stub_return(VK_UNSUPPORTED);
1280 }
1281
1282 VkResult anv_ResetEvent(
1283 VkDevice device,
1284 VkEvent event)
1285 {
1286 stub_return(VK_UNSUPPORTED);
1287 }
1288
1289 // Buffer functions
1290
1291 VkResult anv_CreateBuffer(
1292 VkDevice _device,
1293 const VkBufferCreateInfo* pCreateInfo,
1294 VkBuffer* pBuffer)
1295 {
1296 ANV_FROM_HANDLE(anv_device, device, _device);
1297 struct anv_buffer *buffer;
1298
1299 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1300
1301 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1302 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1303 if (buffer == NULL)
1304 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1305
1306 buffer->size = pCreateInfo->size;
1307 buffer->bo = NULL;
1308 buffer->offset = 0;
1309
1310 *pBuffer = anv_buffer_to_handle(buffer);
1311
1312 return VK_SUCCESS;
1313 }
1314
1315 VkResult anv_DestroyBuffer(
1316 VkDevice _device,
1317 VkBuffer _buffer)
1318 {
1319 ANV_FROM_HANDLE(anv_device, device, _device);
1320 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1321
1322 anv_device_free(device, buffer);
1323
1324 return VK_SUCCESS;
1325 }
1326
1327 void
1328 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1329 const struct anv_format *format,
1330 uint32_t offset, uint32_t range)
1331 {
1332 switch (device->info.gen) {
1333 case 8:
1334 gen8_fill_buffer_surface_state(state, format, offset, range);
1335 break;
1336 default:
1337 unreachable("unsupported gen\n");
1338 }
1339 }
1340
1341 VkResult
1342 anv_buffer_view_create(
1343 struct anv_device * device,
1344 const VkBufferViewCreateInfo* pCreateInfo,
1345 struct anv_buffer_view ** view_out)
1346 {
1347 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1348 struct anv_buffer_view *view;
1349
1350 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1351
1352 view = anv_device_alloc(device, sizeof(*view), 8,
1353 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1354 if (view == NULL)
1355 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1356
1357 view->view = (struct anv_surface_view) {
1358 .bo = buffer->bo,
1359 .offset = buffer->offset + pCreateInfo->offset,
1360 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1361 .format = anv_format_for_vk_format(pCreateInfo->format),
1362 .range = pCreateInfo->range,
1363 };
1364
1365 *view_out = view;
1366
1367 return VK_SUCCESS;
1368 }
1369
1370
1371 VkResult anv_CreateBufferView(
1372 VkDevice _device,
1373 const VkBufferViewCreateInfo* pCreateInfo,
1374 VkBufferView* pView)
1375 {
1376 return driver_layer->CreateBufferView(_device, pCreateInfo, pView);
1377 }
1378
1379 VkResult anv_DestroyBufferView(
1380 VkDevice _device,
1381 VkBufferView _bview)
1382 {
1383 ANV_FROM_HANDLE(anv_device, device, _device);
1384 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1385
1386 anv_surface_view_fini(device, &bview->view);
1387 anv_device_free(device, bview);
1388
1389 return VK_SUCCESS;
1390 }
1391
1392 VkResult anv_CreateSampler(
1393 VkDevice _device,
1394 const VkSamplerCreateInfo* pCreateInfo,
1395 VkSampler* pSampler)
1396 {
1397 return driver_layer->CreateSampler(_device, pCreateInfo, pSampler);
1398 }
1399
1400 VkResult anv_DestroySampler(
1401 VkDevice _device,
1402 VkSampler _sampler)
1403 {
1404 ANV_FROM_HANDLE(anv_device, device, _device);
1405 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1406
1407 anv_device_free(device, sampler);
1408
1409 return VK_SUCCESS;
1410 }
1411
1412 // Descriptor set functions
1413
1414 VkResult anv_CreateDescriptorSetLayout(
1415 VkDevice _device,
1416 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1417 VkDescriptorSetLayout* pSetLayout)
1418 {
1419 ANV_FROM_HANDLE(anv_device, device, _device);
1420 struct anv_descriptor_set_layout *set_layout;
1421
1422 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1423
1424 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1425 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1426 uint32_t num_dynamic_buffers = 0;
1427 uint32_t count = 0;
1428 uint32_t stages = 0;
1429 uint32_t s;
1430
1431 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1432 switch (pCreateInfo->pBinding[i].descriptorType) {
1433 case VK_DESCRIPTOR_TYPE_SAMPLER:
1434 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1435 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1436 sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
1437 break;
1438 default:
1439 break;
1440 }
1441
1442 switch (pCreateInfo->pBinding[i].descriptorType) {
1443 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1444 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1445 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1446 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1447 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1448 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1449 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1450 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1451 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1452 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1453 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1454 surface_count[s] += pCreateInfo->pBinding[i].arraySize;
1455 break;
1456 default:
1457 break;
1458 }
1459
1460 switch (pCreateInfo->pBinding[i].descriptorType) {
1461 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1462 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1463 num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
1464 break;
1465 default:
1466 break;
1467 }
1468
1469 stages |= pCreateInfo->pBinding[i].stageFlags;
1470 count += pCreateInfo->pBinding[i].arraySize;
1471 }
1472
1473 uint32_t sampler_total = 0;
1474 uint32_t surface_total = 0;
1475 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1476 sampler_total += sampler_count[s];
1477 surface_total += surface_count[s];
1478 }
1479
1480 size_t size = sizeof(*set_layout) +
1481 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1482 set_layout = anv_device_alloc(device, size, 8,
1483 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1484 if (!set_layout)
1485 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1486
1487 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1488 set_layout->count = count;
1489 set_layout->shader_stages = stages;
1490
1491 struct anv_descriptor_slot *p = set_layout->entries;
1492 struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
1493 struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
1494 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1495 set_layout->stage[s].surface_count = surface_count[s];
1496 set_layout->stage[s].surface_start = surface[s] = p;
1497 p += surface_count[s];
1498 set_layout->stage[s].sampler_count = sampler_count[s];
1499 set_layout->stage[s].sampler_start = sampler[s] = p;
1500 p += sampler_count[s];
1501 }
1502
1503 uint32_t descriptor = 0;
1504 int8_t dynamic_slot = 0;
1505 bool is_dynamic;
1506 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1507 switch (pCreateInfo->pBinding[i].descriptorType) {
1508 case VK_DESCRIPTOR_TYPE_SAMPLER:
1509 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1510 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1511 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1512 sampler[s]->index = descriptor + j;
1513 sampler[s]->dynamic_slot = -1;
1514 sampler[s]++;
1515 }
1516 break;
1517 default:
1518 break;
1519 }
1520
1521 switch (pCreateInfo->pBinding[i].descriptorType) {
1522 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1523 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1524 is_dynamic = true;
1525 break;
1526 default:
1527 is_dynamic = false;
1528 break;
1529 }
1530
1531 switch (pCreateInfo->pBinding[i].descriptorType) {
1532 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1533 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1534 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1535 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1536 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1537 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1538 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1539 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1540 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1541 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1542 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1543 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1544 surface[s]->index = descriptor + j;
1545 if (is_dynamic)
1546 surface[s]->dynamic_slot = dynamic_slot + j;
1547 else
1548 surface[s]->dynamic_slot = -1;
1549 surface[s]++;
1550 }
1551 break;
1552 default:
1553 break;
1554 }
1555
1556 if (is_dynamic)
1557 dynamic_slot += pCreateInfo->pBinding[i].arraySize;
1558
1559 descriptor += pCreateInfo->pBinding[i].arraySize;
1560 }
1561
1562 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1563
1564 return VK_SUCCESS;
1565 }
1566
1567 VkResult anv_DestroyDescriptorSetLayout(
1568 VkDevice _device,
1569 VkDescriptorSetLayout _set_layout)
1570 {
1571 ANV_FROM_HANDLE(anv_device, device, _device);
1572 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1573
1574 anv_device_free(device, set_layout);
1575
1576 return VK_SUCCESS;
1577 }
1578
1579 VkResult anv_CreateDescriptorPool(
1580 VkDevice device,
1581 VkDescriptorPoolUsage poolUsage,
1582 uint32_t maxSets,
1583 const VkDescriptorPoolCreateInfo* pCreateInfo,
1584 VkDescriptorPool* pDescriptorPool)
1585 {
1586 anv_finishme("VkDescriptorPool is a stub");
1587 pDescriptorPool->handle = 1;
1588 return VK_SUCCESS;
1589 }
1590
1591 VkResult anv_DestroyDescriptorPool(
1592 VkDevice _device,
1593 VkDescriptorPool _pool)
1594 {
1595 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1596 return VK_SUCCESS;
1597 }
1598
1599 VkResult anv_ResetDescriptorPool(
1600 VkDevice device,
1601 VkDescriptorPool descriptorPool)
1602 {
1603 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1604 return VK_SUCCESS;
1605 }
1606
1607 VkResult
1608 anv_descriptor_set_create(struct anv_device *device,
1609 const struct anv_descriptor_set_layout *layout,
1610 struct anv_descriptor_set **out_set)
1611 {
1612 struct anv_descriptor_set *set;
1613 size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1614
1615 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1616 if (!set)
1617 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1618
1619 /* A descriptor set may not be 100% filled. Clear the set so we can can
1620 * later detect holes in it.
1621 */
1622 memset(set, 0, size);
1623
1624 *out_set = set;
1625
1626 return VK_SUCCESS;
1627 }
1628
1629 void
1630 anv_descriptor_set_destroy(struct anv_device *device,
1631 struct anv_descriptor_set *set)
1632 {
1633 anv_device_free(device, set);
1634 }
1635
1636 VkResult anv_AllocDescriptorSets(
1637 VkDevice _device,
1638 VkDescriptorPool descriptorPool,
1639 VkDescriptorSetUsage setUsage,
1640 uint32_t count,
1641 const VkDescriptorSetLayout* pSetLayouts,
1642 VkDescriptorSet* pDescriptorSets,
1643 uint32_t* pCount)
1644 {
1645 ANV_FROM_HANDLE(anv_device, device, _device);
1646
1647 VkResult result;
1648 struct anv_descriptor_set *set;
1649
1650 for (uint32_t i = 0; i < count; i++) {
1651 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1652
1653 result = anv_descriptor_set_create(device, layout, &set);
1654 if (result != VK_SUCCESS) {
1655 *pCount = i;
1656 return result;
1657 }
1658
1659 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1660 }
1661
1662 *pCount = count;
1663
1664 return VK_SUCCESS;
1665 }
1666
1667 VkResult anv_FreeDescriptorSets(
1668 VkDevice _device,
1669 VkDescriptorPool descriptorPool,
1670 uint32_t count,
1671 const VkDescriptorSet* pDescriptorSets)
1672 {
1673 ANV_FROM_HANDLE(anv_device, device, _device);
1674
1675 for (uint32_t i = 0; i < count; i++) {
1676 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1677
1678 anv_descriptor_set_destroy(device, set);
1679 }
1680
1681 return VK_SUCCESS;
1682 }
1683
1684 VkResult anv_UpdateDescriptorSets(
1685 VkDevice device,
1686 uint32_t writeCount,
1687 const VkWriteDescriptorSet* pDescriptorWrites,
1688 uint32_t copyCount,
1689 const VkCopyDescriptorSet* pDescriptorCopies)
1690 {
1691 for (uint32_t i = 0; i < writeCount; i++) {
1692 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1693 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1694
1695 switch (write->descriptorType) {
1696 case VK_DESCRIPTOR_TYPE_SAMPLER:
1697 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1698 for (uint32_t j = 0; j < write->count; j++) {
1699 set->descriptors[write->destBinding + j].sampler =
1700 anv_sampler_from_handle(write->pDescriptors[j].sampler);
1701 }
1702
1703 if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
1704 break;
1705
1706 /* fallthrough */
1707
1708 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1709 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1710 for (uint32_t j = 0; j < write->count; j++) {
1711 ANV_FROM_HANDLE(anv_image_view, iview,
1712 write->pDescriptors[j].imageView);
1713 set->descriptors[write->destBinding + j].view = &iview->view;
1714 }
1715 break;
1716
1717 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1718 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1719 anv_finishme("texel buffers not implemented");
1720 break;
1721
1722 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1723 anv_finishme("input attachments not implemented");
1724 break;
1725
1726 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1727 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1728 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1729 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1730 for (uint32_t j = 0; j < write->count; j++) {
1731 ANV_FROM_HANDLE(anv_buffer_view, bview,
1732 write->pDescriptors[j].bufferView);
1733 set->descriptors[write->destBinding + j].view = &bview->view;
1734 }
1735
1736 default:
1737 break;
1738 }
1739 }
1740
1741 for (uint32_t i = 0; i < copyCount; i++) {
1742 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1743 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1744 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1745 for (uint32_t j = 0; j < copy->count; j++) {
1746 dest->descriptors[copy->destBinding + j] =
1747 src->descriptors[copy->srcBinding + j];
1748 }
1749 }
1750
1751 return VK_SUCCESS;
1752 }
1753
1754 // State object functions
1755
1756 static inline int64_t
1757 clamp_int64(int64_t x, int64_t min, int64_t max)
1758 {
1759 if (x < min)
1760 return min;
1761 else if (x < max)
1762 return x;
1763 else
1764 return max;
1765 }
1766
1767 VkResult anv_CreateDynamicViewportState(
1768 VkDevice _device,
1769 const VkDynamicViewportStateCreateInfo* pCreateInfo,
1770 VkDynamicViewportState* pState)
1771 {
1772 ANV_FROM_HANDLE(anv_device, device, _device);
1773 struct anv_dynamic_vp_state *state;
1774
1775 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO);
1776
1777 state = anv_device_alloc(device, sizeof(*state), 8,
1778 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1779 if (state == NULL)
1780 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1781
1782 unsigned count = pCreateInfo->viewportAndScissorCount;
1783 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1784 count * 64, 64);
1785 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1786 count * 8, 32);
1787 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
1788 count * 32, 32);
1789
1790 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1791 const VkViewport *vp = &pCreateInfo->pViewports[i];
1792 const VkRect2D *s = &pCreateInfo->pScissors[i];
1793
1794 /* The gen7 state struct has just the matrix and guardband fields, the
1795 * gen8 struct adds the min/max viewport fields. */
1796 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1797 .ViewportMatrixElementm00 = vp->width / 2,
1798 .ViewportMatrixElementm11 = vp->height / 2,
1799 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1800 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1801 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1802 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1803 .XMinClipGuardband = -1.0f,
1804 .XMaxClipGuardband = 1.0f,
1805 .YMinClipGuardband = -1.0f,
1806 .YMaxClipGuardband = 1.0f,
1807 .XMinViewPort = vp->originX,
1808 .XMaxViewPort = vp->originX + vp->width - 1,
1809 .YMinViewPort = vp->originY,
1810 .YMaxViewPort = vp->originY + vp->height - 1,
1811 };
1812
1813 struct GEN7_CC_VIEWPORT cc_viewport = {
1814 .MinimumDepth = vp->minDepth,
1815 .MaximumDepth = vp->maxDepth
1816 };
1817
1818 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1819 * ymax < ymin for empty clips. In case clip x, y, width height are all
1820 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1821 * what we want. Just special case empty clips and produce a canonical
1822 * empty clip. */
1823 static const struct GEN7_SCISSOR_RECT empty_scissor = {
1824 .ScissorRectangleYMin = 1,
1825 .ScissorRectangleXMin = 1,
1826 .ScissorRectangleYMax = 0,
1827 .ScissorRectangleXMax = 0
1828 };
1829
1830 const int max = 0xffff;
1831 struct GEN7_SCISSOR_RECT scissor = {
1832 /* Do this math using int64_t so overflow gets clamped correctly. */
1833 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1834 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1835 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1836 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1837 };
1838
1839 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1840 GEN7_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1841
1842 if (s->extent.width <= 0 || s->extent.height <= 0) {
1843 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1844 } else {
1845 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1846 }
1847 }
1848
1849 *pState = anv_dynamic_vp_state_to_handle(state);
1850
1851 return VK_SUCCESS;
1852 }
1853
1854 VkResult anv_DestroyDynamicViewportState(
1855 VkDevice _device,
1856 VkDynamicViewportState _vp_state)
1857 {
1858 ANV_FROM_HANDLE(anv_device, device, _device);
1859 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, _vp_state);
1860
1861 anv_state_pool_free(&device->dynamic_state_pool, vp_state->sf_clip_vp);
1862 anv_state_pool_free(&device->dynamic_state_pool, vp_state->cc_vp);
1863 anv_state_pool_free(&device->dynamic_state_pool, vp_state->scissor);
1864
1865 anv_device_free(device, vp_state);
1866
1867 return VK_SUCCESS;
1868 }
1869
1870 VkResult anv_CreateDynamicRasterState(
1871 VkDevice _device,
1872 const VkDynamicRasterStateCreateInfo* pCreateInfo,
1873 VkDynamicRasterState* pState)
1874 {
1875 return driver_layer->CreateDynamicRasterState(_device, pCreateInfo, pState);
1876 }
1877
1878 VkResult anv_DestroyDynamicRasterState(
1879 VkDevice _device,
1880 VkDynamicRasterState _rs_state)
1881 {
1882 ANV_FROM_HANDLE(anv_device, device, _device);
1883 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, _rs_state);
1884
1885 anv_device_free(device, rs_state);
1886
1887 return VK_SUCCESS;
1888 }
1889
1890 VkResult anv_CreateDynamicColorBlendState(
1891 VkDevice _device,
1892 const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
1893 VkDynamicColorBlendState* pState)
1894 {
1895 ANV_FROM_HANDLE(anv_device, device, _device);
1896 struct anv_dynamic_cb_state *state;
1897
1898 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO);
1899
1900 state = anv_device_alloc(device, sizeof(*state), 8,
1901 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1902 if (state == NULL)
1903 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1904
1905 struct GEN7_COLOR_CALC_STATE color_calc_state = {
1906 .BlendConstantColorRed = pCreateInfo->blendConst[0],
1907 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
1908 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
1909 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
1910 };
1911
1912 GEN7_COLOR_CALC_STATE_pack(NULL, state->color_calc_state, &color_calc_state);
1913
1914 *pState = anv_dynamic_cb_state_to_handle(state);
1915
1916 return VK_SUCCESS;
1917 }
1918
1919 VkResult anv_DestroyDynamicColorBlendState(
1920 VkDevice _device,
1921 VkDynamicColorBlendState _cb_state)
1922 {
1923 ANV_FROM_HANDLE(anv_device, device, _device);
1924 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, _cb_state);
1925
1926 anv_device_free(device, cb_state);
1927
1928 return VK_SUCCESS;
1929 }
1930
1931 VkResult anv_CreateDynamicDepthStencilState(
1932 VkDevice _device,
1933 const VkDynamicDepthStencilStateCreateInfo* pCreateInfo,
1934 VkDynamicDepthStencilState* pState)
1935 {
1936 return driver_layer->CreateDynamicDepthStencilState(_device, pCreateInfo, pState);
1937 }
1938
1939 VkResult anv_DestroyDynamicDepthStencilState(
1940 VkDevice _device,
1941 VkDynamicDepthStencilState _ds_state)
1942 {
1943 ANV_FROM_HANDLE(anv_device, device, _device);
1944 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, _ds_state);
1945
1946 anv_device_free(device, ds_state);
1947
1948 return VK_SUCCESS;
1949 }
1950
1951 VkResult anv_CreateFramebuffer(
1952 VkDevice _device,
1953 const VkFramebufferCreateInfo* pCreateInfo,
1954 VkFramebuffer* pFramebuffer)
1955 {
1956 ANV_FROM_HANDLE(anv_device, device, _device);
1957 struct anv_framebuffer *framebuffer;
1958
1959 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1960
1961 size_t size = sizeof(*framebuffer) +
1962 sizeof(struct anv_attachment_view *) * pCreateInfo->attachmentCount;
1963 framebuffer = anv_device_alloc(device, size, 8,
1964 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1965 if (framebuffer == NULL)
1966 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1967
1968 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1969 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1970 ANV_FROM_HANDLE(anv_attachment_view, view,
1971 pCreateInfo->pAttachments[i].view);
1972
1973 framebuffer->attachments[i] = view;
1974 }
1975
1976 framebuffer->width = pCreateInfo->width;
1977 framebuffer->height = pCreateInfo->height;
1978 framebuffer->layers = pCreateInfo->layers;
1979
1980 anv_CreateDynamicViewportState(anv_device_to_handle(device),
1981 &(VkDynamicViewportStateCreateInfo) {
1982 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
1983 .viewportAndScissorCount = 1,
1984 .pViewports = (VkViewport[]) {
1985 {
1986 .originX = 0,
1987 .originY = 0,
1988 .width = pCreateInfo->width,
1989 .height = pCreateInfo->height,
1990 .minDepth = 0,
1991 .maxDepth = 1
1992 },
1993 },
1994 .pScissors = (VkRect2D[]) {
1995 { { 0, 0 },
1996 { pCreateInfo->width, pCreateInfo->height } },
1997 }
1998 },
1999 &framebuffer->vp_state);
2000
2001 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2002
2003 return VK_SUCCESS;
2004 }
2005
2006 VkResult anv_DestroyFramebuffer(
2007 VkDevice _device,
2008 VkFramebuffer _fb)
2009 {
2010 ANV_FROM_HANDLE(anv_device, device, _device);
2011 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2012
2013 anv_DestroyDynamicViewportState(anv_device_to_handle(device),
2014 fb->vp_state);
2015 anv_device_free(device, fb);
2016
2017 return VK_SUCCESS;
2018 }
2019
2020 VkResult anv_CreateRenderPass(
2021 VkDevice _device,
2022 const VkRenderPassCreateInfo* pCreateInfo,
2023 VkRenderPass* pRenderPass)
2024 {
2025 ANV_FROM_HANDLE(anv_device, device, _device);
2026 struct anv_render_pass *pass;
2027 size_t size;
2028 size_t attachments_offset;
2029
2030 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2031
2032 size = sizeof(*pass);
2033 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
2034 attachments_offset = size;
2035 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
2036
2037 pass = anv_device_alloc(device, size, 8,
2038 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2039 if (pass == NULL)
2040 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2041
2042 /* Clear the subpasses along with the parent pass. This required because
2043 * each array member of anv_subpass must be a valid pointer if not NULL.
2044 */
2045 memset(pass, 0, size);
2046 pass->attachment_count = pCreateInfo->attachmentCount;
2047 pass->subpass_count = pCreateInfo->subpassCount;
2048 pass->attachments = (void *) pass + attachments_offset;
2049
2050 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2051 struct anv_render_pass_attachment *att = &pass->attachments[i];
2052
2053 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
2054 att->samples = pCreateInfo->pAttachments[i].samples;
2055 att->load_op = pCreateInfo->pAttachments[i].loadOp;
2056 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
2057 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2058 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2059
2060 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2061 if (anv_format_is_color(att->format)) {
2062 ++pass->num_color_clear_attachments;
2063 } else if (att->format->depth_format) {
2064 pass->has_depth_clear_attachment = true;
2065 }
2066 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2067 assert(att->format->has_stencil);
2068 pass->has_stencil_clear_attachment = true;
2069 }
2070 }
2071
2072 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
2073 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
2074 struct anv_subpass *subpass = &pass->subpasses[i];
2075
2076 subpass->input_count = desc->inputCount;
2077 subpass->color_count = desc->colorCount;
2078
2079 if (desc->inputCount > 0) {
2080 subpass->input_attachments =
2081 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
2082 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2083
2084 for (uint32_t j = 0; j < desc->inputCount; j++) {
2085 subpass->input_attachments[j]
2086 = desc->inputAttachments[j].attachment;
2087 }
2088 }
2089
2090 if (desc->colorCount > 0) {
2091 subpass->color_attachments =
2092 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2093 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2094
2095 for (uint32_t j = 0; j < desc->colorCount; j++) {
2096 subpass->color_attachments[j]
2097 = desc->colorAttachments[j].attachment;
2098 }
2099 }
2100
2101 if (desc->resolveAttachments) {
2102 subpass->resolve_attachments =
2103 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2104 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2105
2106 for (uint32_t j = 0; j < desc->colorCount; j++) {
2107 subpass->resolve_attachments[j]
2108 = desc->resolveAttachments[j].attachment;
2109 }
2110 }
2111
2112 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
2113 }
2114
2115 *pRenderPass = anv_render_pass_to_handle(pass);
2116
2117 return VK_SUCCESS;
2118 }
2119
2120 VkResult anv_DestroyRenderPass(
2121 VkDevice _device,
2122 VkRenderPass _pass)
2123 {
2124 ANV_FROM_HANDLE(anv_device, device, _device);
2125 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
2126
2127 for (uint32_t i = 0; i < pass->subpass_count; i++) {
2128 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2129 * Don't free the null arrays.
2130 */
2131 struct anv_subpass *subpass = &pass->subpasses[i];
2132
2133 anv_device_free(device, subpass->input_attachments);
2134 anv_device_free(device, subpass->color_attachments);
2135 anv_device_free(device, subpass->resolve_attachments);
2136 }
2137
2138 anv_device_free(device, pass);
2139
2140 return VK_SUCCESS;
2141 }
2142
2143 VkResult anv_GetRenderAreaGranularity(
2144 VkDevice device,
2145 VkRenderPass renderPass,
2146 VkExtent2D* pGranularity)
2147 {
2148 *pGranularity = (VkExtent2D) { 1, 1 };
2149
2150 return VK_SUCCESS;
2151 }
2152
2153 void vkCmdDbgMarkerBegin(
2154 VkCmdBuffer cmdBuffer,
2155 const char* pMarker)
2156 __attribute__ ((visibility ("default")));
2157
2158 void vkCmdDbgMarkerEnd(
2159 VkCmdBuffer cmdBuffer)
2160 __attribute__ ((visibility ("default")));
2161
2162 void vkCmdDbgMarkerBegin(
2163 VkCmdBuffer cmdBuffer,
2164 const char* pMarker)
2165 {
2166 }
2167
2168 void vkCmdDbgMarkerEnd(
2169 VkCmdBuffer cmdBuffer)
2170 {
2171 }