vk/extensions: count needs to be <= number of extensions
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 static VkResult
35 anv_physical_device_init(struct anv_physical_device *device,
36 struct anv_instance *instance,
37 const char *path)
38 {
39 int fd;
40
41 fd = open(path, O_RDWR | O_CLOEXEC);
42 if (fd < 0)
43 return vk_error(VK_ERROR_UNAVAILABLE);
44
45 device->instance = instance;
46 device->path = path;
47
48 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
49 if (!device->chipset_id)
50 goto fail;
51
52 device->name = brw_get_device_name(device->chipset_id);
53 device->info = brw_get_device_info(device->chipset_id, -1);
54 if (!device->info)
55 goto fail;
56
57 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1)
58 goto fail;
59
60 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
61 goto fail;
62
63 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
64 goto fail;
65
66 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
67 goto fail;
68
69 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
70 goto fail;
71
72 close(fd);
73
74 return VK_SUCCESS;
75
76 fail:
77 close(fd);
78 return vk_error(VK_ERROR_UNAVAILABLE);
79 }
80
81 static void *default_alloc(
82 void* pUserData,
83 size_t size,
84 size_t alignment,
85 VkSystemAllocType allocType)
86 {
87 return malloc(size);
88 }
89
90 static void default_free(
91 void* pUserData,
92 void* pMem)
93 {
94 free(pMem);
95 }
96
97 static const VkAllocCallbacks default_alloc_callbacks = {
98 .pUserData = NULL,
99 .pfnAlloc = default_alloc,
100 .pfnFree = default_free
101 };
102
103 VkResult anv_CreateInstance(
104 const VkInstanceCreateInfo* pCreateInfo,
105 VkInstance* pInstance)
106 {
107 struct anv_instance *instance;
108 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
109 void *user_data = NULL;
110
111 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
112
113 if (pCreateInfo->pAllocCb) {
114 alloc_callbacks = pCreateInfo->pAllocCb;
115 user_data = pCreateInfo->pAllocCb->pUserData;
116 }
117 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
118 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
119 if (!instance)
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
121
122 instance->pAllocUserData = alloc_callbacks->pUserData;
123 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
124 instance->pfnFree = alloc_callbacks->pfnFree;
125 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
126 instance->physicalDeviceCount = 0;
127
128 _mesa_locale_init();
129
130 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
131
132 *pInstance = anv_instance_to_handle(instance);
133
134 return VK_SUCCESS;
135 }
136
137 VkResult anv_DestroyInstance(
138 VkInstance _instance)
139 {
140 ANV_FROM_HANDLE(anv_instance, instance, _instance);
141
142 VG(VALGRIND_DESTROY_MEMPOOL(instance));
143
144 _mesa_locale_fini();
145
146 instance->pfnFree(instance->pAllocUserData, instance);
147
148 return VK_SUCCESS;
149 }
150
151 static void *
152 anv_instance_alloc(struct anv_instance *instance, size_t size,
153 size_t alignment, VkSystemAllocType allocType)
154 {
155 void *mem = instance->pfnAlloc(instance->pAllocUserData,
156 size, alignment, allocType);
157 if (mem) {
158 VALGRIND_MEMPOOL_ALLOC(instance, mem, size);
159 VALGRIND_MAKE_MEM_UNDEFINED(mem, size);
160 }
161 return mem;
162 }
163
164 static void
165 anv_instance_free(struct anv_instance *instance, void *mem)
166 {
167 if (mem == NULL)
168 return;
169
170 VALGRIND_MEMPOOL_FREE(instance, mem);
171
172 instance->pfnFree(instance->pAllocUserData, mem);
173 }
174
175 VkResult anv_EnumeratePhysicalDevices(
176 VkInstance _instance,
177 uint32_t* pPhysicalDeviceCount,
178 VkPhysicalDevice* pPhysicalDevices)
179 {
180 ANV_FROM_HANDLE(anv_instance, instance, _instance);
181 VkResult result;
182
183 if (instance->physicalDeviceCount == 0) {
184 result = anv_physical_device_init(&instance->physicalDevice,
185 instance, "/dev/dri/renderD128");
186 if (result != VK_SUCCESS)
187 return result;
188
189 instance->physicalDeviceCount = 1;
190 }
191
192 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
193 * otherwise it's an inout parameter.
194 *
195 * The Vulkan spec (git aaed022) says:
196 *
197 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
198 * that is initialized with the number of devices the application is
199 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
200 * an array of at least this many VkPhysicalDevice handles [...].
201 *
202 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
203 * overwrites the contents of the variable pointed to by
204 * pPhysicalDeviceCount with the number of physical devices in in the
205 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
206 * pPhysicalDeviceCount with the number of physical handles written to
207 * pPhysicalDevices.
208 */
209 if (!pPhysicalDevices) {
210 *pPhysicalDeviceCount = instance->physicalDeviceCount;
211 } else if (*pPhysicalDeviceCount >= 1) {
212 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
213 *pPhysicalDeviceCount = 1;
214 } else {
215 *pPhysicalDeviceCount = 0;
216 }
217
218 return VK_SUCCESS;
219 }
220
221 VkResult anv_GetPhysicalDeviceFeatures(
222 VkPhysicalDevice physicalDevice,
223 VkPhysicalDeviceFeatures* pFeatures)
224 {
225 anv_finishme("Get correct values for PhysicalDeviceFeatures");
226
227 *pFeatures = (VkPhysicalDeviceFeatures) {
228 .robustBufferAccess = false,
229 .fullDrawIndexUint32 = false,
230 .imageCubeArray = false,
231 .independentBlend = false,
232 .geometryShader = true,
233 .tessellationShader = false,
234 .sampleRateShading = false,
235 .dualSourceBlend = true,
236 .logicOp = true,
237 .instancedDrawIndirect = true,
238 .depthClip = false,
239 .depthBiasClamp = false,
240 .fillModeNonSolid = true,
241 .depthBounds = false,
242 .wideLines = true,
243 .largePoints = true,
244 .textureCompressionETC2 = true,
245 .textureCompressionASTC_LDR = true,
246 .textureCompressionBC = true,
247 .pipelineStatisticsQuery = true,
248 .vertexSideEffects = false,
249 .tessellationSideEffects = false,
250 .geometrySideEffects = false,
251 .fragmentSideEffects = false,
252 .shaderTessellationPointSize = false,
253 .shaderGeometryPointSize = true,
254 .shaderTextureGatherExtended = true,
255 .shaderStorageImageExtendedFormats = false,
256 .shaderStorageImageMultisample = false,
257 .shaderStorageBufferArrayConstantIndexing = false,
258 .shaderStorageImageArrayConstantIndexing = false,
259 .shaderUniformBufferArrayDynamicIndexing = true,
260 .shaderSampledImageArrayDynamicIndexing = false,
261 .shaderStorageBufferArrayDynamicIndexing = false,
262 .shaderStorageImageArrayDynamicIndexing = false,
263 .shaderClipDistance = false,
264 .shaderCullDistance = false,
265 .shaderFloat64 = false,
266 .shaderInt64 = false,
267 .shaderFloat16 = false,
268 .shaderInt16 = false,
269 };
270
271 return VK_SUCCESS;
272 }
273
274 VkResult anv_GetPhysicalDeviceLimits(
275 VkPhysicalDevice physicalDevice,
276 VkPhysicalDeviceLimits* pLimits)
277 {
278 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
279 const struct brw_device_info *devinfo = physical_device->info;
280
281 anv_finishme("Get correct values for PhysicalDeviceLimits");
282
283 *pLimits = (VkPhysicalDeviceLimits) {
284 .maxImageDimension1D = (1 << 14),
285 .maxImageDimension2D = (1 << 14),
286 .maxImageDimension3D = (1 << 10),
287 .maxImageDimensionCube = (1 << 14),
288 .maxImageArrayLayers = (1 << 10),
289 .maxTexelBufferSize = (1 << 14),
290 .maxUniformBufferSize = UINT32_MAX,
291 .maxStorageBufferSize = UINT32_MAX,
292 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
293 .maxMemoryAllocationCount = UINT32_MAX,
294 .bufferImageGranularity = 64, /* A cache line */
295 .maxBoundDescriptorSets = MAX_SETS,
296 .maxDescriptorSets = UINT32_MAX,
297 .maxPerStageDescriptorSamplers = 64,
298 .maxPerStageDescriptorUniformBuffers = 64,
299 .maxPerStageDescriptorStorageBuffers = 64,
300 .maxPerStageDescriptorSampledImages = 64,
301 .maxPerStageDescriptorStorageImages = 64,
302 .maxDescriptorSetSamplers = 256,
303 .maxDescriptorSetUniformBuffers = 256,
304 .maxDescriptorSetStorageBuffers = 256,
305 .maxDescriptorSetSampledImages = 256,
306 .maxDescriptorSetStorageImages = 256,
307 .maxVertexInputAttributes = 32,
308 .maxVertexInputAttributeOffset = 256,
309 .maxVertexInputBindingStride = 256,
310 .maxVertexOutputComponents = 32,
311 .maxTessGenLevel = 0,
312 .maxTessPatchSize = 0,
313 .maxTessControlPerVertexInputComponents = 0,
314 .maxTessControlPerVertexOutputComponents = 0,
315 .maxTessControlPerPatchOutputComponents = 0,
316 .maxTessControlTotalOutputComponents = 0,
317 .maxTessEvaluationInputComponents = 0,
318 .maxTessEvaluationOutputComponents = 0,
319 .maxGeometryShaderInvocations = 6,
320 .maxGeometryInputComponents = 16,
321 .maxGeometryOutputComponents = 16,
322 .maxGeometryOutputVertices = 16,
323 .maxGeometryTotalOutputComponents = 16,
324 .maxFragmentInputComponents = 16,
325 .maxFragmentOutputBuffers = 8,
326 .maxFragmentDualSourceBuffers = 2,
327 .maxFragmentCombinedOutputResources = 8,
328 .maxComputeSharedMemorySize = 1024,
329 .maxComputeWorkGroupCount = {
330 16 * devinfo->max_cs_threads,
331 16 * devinfo->max_cs_threads,
332 16 * devinfo->max_cs_threads,
333 },
334 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
335 .maxComputeWorkGroupSize = {
336 16 * devinfo->max_cs_threads,
337 16 * devinfo->max_cs_threads,
338 16 * devinfo->max_cs_threads,
339 },
340 .subPixelPrecisionBits = 4 /* FIXME */,
341 .subTexelPrecisionBits = 4 /* FIXME */,
342 .mipmapPrecisionBits = 4 /* FIXME */,
343 .maxDrawIndexedIndexValue = UINT32_MAX,
344 .maxDrawIndirectInstanceCount = UINT32_MAX,
345 .primitiveRestartForPatches = UINT32_MAX,
346 .maxSamplerLodBias = 16,
347 .maxSamplerAnisotropy = 16,
348 .maxViewports = 16,
349 .maxDynamicViewportStates = UINT32_MAX,
350 .maxViewportDimensions = { (1 << 14), (1 << 14) },
351 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
352 .viewportSubPixelBits = 13, /* We take a float? */
353 .minMemoryMapAlignment = 64, /* A cache line */
354 .minTexelBufferOffsetAlignment = 1,
355 .minUniformBufferOffsetAlignment = 1,
356 .minStorageBufferOffsetAlignment = 1,
357 .minTexelOffset = 0, /* FIXME */
358 .maxTexelOffset = 0, /* FIXME */
359 .minTexelGatherOffset = 0, /* FIXME */
360 .maxTexelGatherOffset = 0, /* FIXME */
361 .minInterpolationOffset = 0, /* FIXME */
362 .maxInterpolationOffset = 0, /* FIXME */
363 .subPixelInterpolationOffsetBits = 0, /* FIXME */
364 .maxFramebufferWidth = (1 << 14),
365 .maxFramebufferHeight = (1 << 14),
366 .maxFramebufferLayers = (1 << 10),
367 .maxFramebufferColorSamples = 8,
368 .maxFramebufferDepthSamples = 8,
369 .maxFramebufferStencilSamples = 8,
370 .maxColorAttachments = MAX_RTS,
371 .maxSampledImageColorSamples = 8,
372 .maxSampledImageDepthSamples = 8,
373 .maxSampledImageIntegerSamples = 1,
374 .maxStorageImageSamples = 1,
375 .maxSampleMaskWords = 1,
376 .timestampFrequency = 1000 * 1000 * 1000 / 80,
377 .maxClipDistances = 0 /* FIXME */,
378 .maxCullDistances = 0 /* FIXME */,
379 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
380 .pointSizeRange = { 0.125, 255.875 },
381 .lineWidthRange = { 0.0, 7.9921875 },
382 .pointSizeGranularity = (1.0 / 8.0),
383 .lineWidthGranularity = (1.0 / 128.0),
384 };
385
386 return VK_SUCCESS;
387 }
388
389 VkResult anv_GetPhysicalDeviceProperties(
390 VkPhysicalDevice physicalDevice,
391 VkPhysicalDeviceProperties* pProperties)
392 {
393 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
394
395 *pProperties = (VkPhysicalDeviceProperties) {
396 .apiVersion = VK_MAKE_VERSION(0, 138, 1),
397 .driverVersion = 1,
398 .vendorId = 0x8086,
399 .deviceId = pdevice->chipset_id,
400 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
401 };
402
403 strcpy(pProperties->deviceName, pdevice->name);
404 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
405 "anv-%s", MESA_GIT_SHA1 + 4);
406
407 return VK_SUCCESS;
408 }
409
410 VkResult anv_GetPhysicalDeviceQueueCount(
411 VkPhysicalDevice physicalDevice,
412 uint32_t* pCount)
413 {
414 *pCount = 1;
415
416 return VK_SUCCESS;
417 }
418
419 VkResult anv_GetPhysicalDeviceQueueProperties(
420 VkPhysicalDevice physicalDevice,
421 uint32_t count,
422 VkPhysicalDeviceQueueProperties* pQueueProperties)
423 {
424 assert(count == 1);
425
426 *pQueueProperties = (VkPhysicalDeviceQueueProperties) {
427 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
428 VK_QUEUE_COMPUTE_BIT |
429 VK_QUEUE_DMA_BIT,
430 .queueCount = 1,
431 .supportsTimestamps = true,
432 };
433
434 return VK_SUCCESS;
435 }
436
437 VkResult anv_GetPhysicalDeviceMemoryProperties(
438 VkPhysicalDevice physicalDevice,
439 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
440 {
441 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
442 VkDeviceSize heap_size;
443
444 /* Reserve some wiggle room for the driver by exposing only 75% of the
445 * aperture to the heap.
446 */
447 heap_size = 3 * physical_device->aperture_size / 4;
448
449 /* The property flags below are valid only for llc platforms. */
450 pMemoryProperties->memoryTypeCount = 1;
451 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
452 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
453 .heapIndex = 1,
454 };
455
456 pMemoryProperties->memoryHeapCount = 1;
457 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
458 .size = heap_size,
459 .flags = VK_MEMORY_HEAP_HOST_LOCAL,
460 };
461
462 return VK_SUCCESS;
463 }
464
465 PFN_vkVoidFunction anv_GetInstanceProcAddr(
466 VkInstance instance,
467 const char* pName)
468 {
469 return anv_lookup_entrypoint(pName);
470 }
471
472 PFN_vkVoidFunction anv_GetDeviceProcAddr(
473 VkDevice device,
474 const char* pName)
475 {
476 return anv_lookup_entrypoint(pName);
477 }
478
479 static VkResult
480 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
481 {
482 queue->device = device;
483 queue->pool = &device->surface_state_pool;
484
485 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
486 if (queue->completed_serial.map == NULL)
487 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
488
489 *(uint32_t *)queue->completed_serial.map = 0;
490 queue->next_serial = 1;
491
492 return VK_SUCCESS;
493 }
494
495 static void
496 anv_queue_finish(struct anv_queue *queue)
497 {
498 #ifdef HAVE_VALGRIND
499 /* This gets torn down with the device so we only need to do this if
500 * valgrind is present.
501 */
502 anv_state_pool_free(queue->pool, queue->completed_serial);
503 #endif
504 }
505
506 static void
507 anv_device_init_border_colors(struct anv_device *device)
508 {
509 static const VkClearColorValue border_colors[] = {
510 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 0.0 } },
511 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 1.0 } },
512 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .f32 = { 1.0, 1.0, 1.0, 1.0 } },
513 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .u32 = { 0, 0, 0, 0 } },
514 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .u32 = { 0, 0, 0, 1 } },
515 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .u32 = { 1, 1, 1, 1 } },
516 };
517
518 device->border_colors =
519 anv_state_pool_alloc(&device->dynamic_state_pool,
520 sizeof(border_colors), 32);
521 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
522 }
523
524 VkResult anv_CreateDevice(
525 VkPhysicalDevice physicalDevice,
526 const VkDeviceCreateInfo* pCreateInfo,
527 VkDevice* pDevice)
528 {
529 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
530 struct anv_instance *instance = physical_device->instance;
531 struct anv_device *device;
532
533 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
534
535 switch (physical_device->info->gen) {
536 case 7:
537 driver_layer = &gen7_layer;
538 break;
539 case 8:
540 driver_layer = &gen8_layer;
541 break;
542 }
543
544 device = anv_instance_alloc(instance, sizeof(*device), 8,
545 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
546 if (!device)
547 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
548
549 device->instance = physical_device->instance;
550
551 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
552 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
553 if (device->fd == -1)
554 goto fail_device;
555
556 device->context_id = anv_gem_create_context(device);
557 if (device->context_id == -1)
558 goto fail_fd;
559
560 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
561
562 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
563
564 anv_state_pool_init(&device->dynamic_state_pool,
565 &device->dynamic_state_block_pool);
566
567 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
568 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
569
570 anv_state_pool_init(&device->surface_state_pool,
571 &device->surface_state_block_pool);
572
573 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
574
575 device->info = *physical_device->info;
576
577 device->compiler = anv_compiler_create(device);
578
579 pthread_mutex_init(&device->mutex, NULL);
580
581 anv_queue_init(device, &device->queue);
582
583 anv_device_init_meta(device);
584
585 anv_device_init_border_colors(device);
586
587 *pDevice = anv_device_to_handle(device);
588
589 return VK_SUCCESS;
590
591 fail_fd:
592 close(device->fd);
593 fail_device:
594 anv_device_free(device, device);
595
596 return vk_error(VK_ERROR_UNAVAILABLE);
597 }
598
599 VkResult anv_DestroyDevice(
600 VkDevice _device)
601 {
602 ANV_FROM_HANDLE(anv_device, device, _device);
603
604 anv_compiler_destroy(device->compiler);
605
606 anv_queue_finish(&device->queue);
607
608 anv_device_finish_meta(device);
609
610 #ifdef HAVE_VALGRIND
611 /* We only need to free these to prevent valgrind errors. The backing
612 * BO will go away in a couple of lines so we don't actually leak.
613 */
614 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
615 #endif
616
617 anv_bo_pool_finish(&device->batch_bo_pool);
618 anv_state_pool_finish(&device->dynamic_state_pool);
619 anv_block_pool_finish(&device->dynamic_state_block_pool);
620 anv_block_pool_finish(&device->instruction_block_pool);
621 anv_state_pool_finish(&device->surface_state_pool);
622 anv_block_pool_finish(&device->surface_state_block_pool);
623 anv_block_pool_finish(&device->scratch_block_pool);
624
625 close(device->fd);
626
627 anv_instance_free(device->instance, device);
628
629 return VK_SUCCESS;
630 }
631
632 static const VkExtensionProperties global_extensions[] = {
633 {
634 .extName = "VK_WSI_LunarG",
635 .specVersion = 3
636 }
637 };
638
639 VkResult anv_GetGlobalExtensionProperties(
640 const char* pLayerName,
641 uint32_t* pCount,
642 VkExtensionProperties* pProperties)
643 {
644 if (pProperties == NULL) {
645 *pCount = ARRAY_SIZE(global_extensions);
646 return VK_SUCCESS;
647 }
648
649 assert(*pCount <= ARRAY_SIZE(global_extensions));
650
651 *pCount = ARRAY_SIZE(global_extensions);
652 memcpy(pProperties, global_extensions, sizeof(global_extensions));
653
654 return VK_SUCCESS;
655 }
656
657 VkResult anv_GetPhysicalDeviceExtensionProperties(
658 VkPhysicalDevice physicalDevice,
659 const char* pLayerName,
660 uint32_t* pCount,
661 VkExtensionProperties* pProperties)
662 {
663 if (pProperties == NULL) {
664 *pCount = 0;
665 return VK_SUCCESS;
666 }
667
668 /* None supported at this time */
669 return vk_error(VK_ERROR_INVALID_EXTENSION);
670 }
671
672 VkResult anv_GetGlobalLayerProperties(
673 uint32_t* pCount,
674 VkLayerProperties* pProperties)
675 {
676 if (pProperties == NULL) {
677 *pCount = 0;
678 return VK_SUCCESS;
679 }
680
681 /* None supported at this time */
682 return vk_error(VK_ERROR_INVALID_LAYER);
683 }
684
685 VkResult anv_GetPhysicalDeviceLayerProperties(
686 VkPhysicalDevice physicalDevice,
687 uint32_t* pCount,
688 VkLayerProperties* pProperties)
689 {
690 if (pProperties == NULL) {
691 *pCount = 0;
692 return VK_SUCCESS;
693 }
694
695 /* None supported at this time */
696 return vk_error(VK_ERROR_INVALID_LAYER);
697 }
698
699 VkResult anv_GetDeviceQueue(
700 VkDevice _device,
701 uint32_t queueNodeIndex,
702 uint32_t queueIndex,
703 VkQueue* pQueue)
704 {
705 ANV_FROM_HANDLE(anv_device, device, _device);
706
707 assert(queueIndex == 0);
708
709 *pQueue = anv_queue_to_handle(&device->queue);
710
711 return VK_SUCCESS;
712 }
713
714 VkResult anv_QueueSubmit(
715 VkQueue _queue,
716 uint32_t cmdBufferCount,
717 const VkCmdBuffer* pCmdBuffers,
718 VkFence _fence)
719 {
720 ANV_FROM_HANDLE(anv_queue, queue, _queue);
721 ANV_FROM_HANDLE(anv_fence, fence, _fence);
722 struct anv_device *device = queue->device;
723 int ret;
724
725 for (uint32_t i = 0; i < cmdBufferCount; i++) {
726 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
727
728 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
729
730 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
731 if (ret != 0)
732 return vk_error(VK_ERROR_UNKNOWN);
733
734 if (fence) {
735 ret = anv_gem_execbuffer(device, &fence->execbuf);
736 if (ret != 0)
737 return vk_error(VK_ERROR_UNKNOWN);
738 }
739
740 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
741 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
742 }
743
744 return VK_SUCCESS;
745 }
746
747 VkResult anv_QueueWaitIdle(
748 VkQueue _queue)
749 {
750 ANV_FROM_HANDLE(anv_queue, queue, _queue);
751
752 return vkDeviceWaitIdle(anv_device_to_handle(queue->device));
753 }
754
755 VkResult anv_DeviceWaitIdle(
756 VkDevice _device)
757 {
758 ANV_FROM_HANDLE(anv_device, device, _device);
759 struct anv_state state;
760 struct anv_batch batch;
761 struct drm_i915_gem_execbuffer2 execbuf;
762 struct drm_i915_gem_exec_object2 exec2_objects[1];
763 struct anv_bo *bo = NULL;
764 VkResult result;
765 int64_t timeout;
766 int ret;
767
768 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
769 bo = &device->dynamic_state_pool.block_pool->bo;
770 batch.start = batch.next = state.map;
771 batch.end = state.map + 32;
772 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
773 anv_batch_emit(&batch, GEN7_MI_NOOP);
774
775 exec2_objects[0].handle = bo->gem_handle;
776 exec2_objects[0].relocation_count = 0;
777 exec2_objects[0].relocs_ptr = 0;
778 exec2_objects[0].alignment = 0;
779 exec2_objects[0].offset = bo->offset;
780 exec2_objects[0].flags = 0;
781 exec2_objects[0].rsvd1 = 0;
782 exec2_objects[0].rsvd2 = 0;
783
784 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
785 execbuf.buffer_count = 1;
786 execbuf.batch_start_offset = state.offset;
787 execbuf.batch_len = batch.next - state.map;
788 execbuf.cliprects_ptr = 0;
789 execbuf.num_cliprects = 0;
790 execbuf.DR1 = 0;
791 execbuf.DR4 = 0;
792
793 execbuf.flags =
794 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
795 execbuf.rsvd1 = device->context_id;
796 execbuf.rsvd2 = 0;
797
798 ret = anv_gem_execbuffer(device, &execbuf);
799 if (ret != 0) {
800 result = vk_error(VK_ERROR_UNKNOWN);
801 goto fail;
802 }
803
804 timeout = INT64_MAX;
805 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
806 if (ret != 0) {
807 result = vk_error(VK_ERROR_UNKNOWN);
808 goto fail;
809 }
810
811 anv_state_pool_free(&device->dynamic_state_pool, state);
812
813 return VK_SUCCESS;
814
815 fail:
816 anv_state_pool_free(&device->dynamic_state_pool, state);
817
818 return result;
819 }
820
821 void *
822 anv_device_alloc(struct anv_device * device,
823 size_t size,
824 size_t alignment,
825 VkSystemAllocType allocType)
826 {
827 return anv_instance_alloc(device->instance, size, alignment, allocType);
828 }
829
830 void
831 anv_device_free(struct anv_device * device,
832 void * mem)
833 {
834 anv_instance_free(device->instance, mem);
835 }
836
837 VkResult
838 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
839 {
840 bo->gem_handle = anv_gem_create(device, size);
841 if (!bo->gem_handle)
842 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
843
844 bo->map = NULL;
845 bo->index = 0;
846 bo->offset = 0;
847 bo->size = size;
848
849 return VK_SUCCESS;
850 }
851
852 VkResult anv_AllocMemory(
853 VkDevice _device,
854 const VkMemoryAllocInfo* pAllocInfo,
855 VkDeviceMemory* pMem)
856 {
857 ANV_FROM_HANDLE(anv_device, device, _device);
858 struct anv_device_memory *mem;
859 VkResult result;
860
861 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
862
863 if (pAllocInfo->memoryTypeIndex != 0) {
864 /* We support exactly one memory heap. */
865 return vk_error(VK_ERROR_INVALID_VALUE);
866 }
867
868 /* FINISHME: Fail if allocation request exceeds heap size. */
869
870 mem = anv_device_alloc(device, sizeof(*mem), 8,
871 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
872 if (mem == NULL)
873 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
874
875 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
876 if (result != VK_SUCCESS)
877 goto fail;
878
879 *pMem = anv_device_memory_to_handle(mem);
880
881 return VK_SUCCESS;
882
883 fail:
884 anv_device_free(device, mem);
885
886 return result;
887 }
888
889 VkResult anv_FreeMemory(
890 VkDevice _device,
891 VkDeviceMemory _mem)
892 {
893 ANV_FROM_HANDLE(anv_device, device, _device);
894 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
895
896 if (mem->bo.map)
897 anv_gem_munmap(mem->bo.map, mem->bo.size);
898
899 if (mem->bo.gem_handle != 0)
900 anv_gem_close(device, mem->bo.gem_handle);
901
902 anv_device_free(device, mem);
903
904 return VK_SUCCESS;
905 }
906
907 VkResult anv_MapMemory(
908 VkDevice _device,
909 VkDeviceMemory _mem,
910 VkDeviceSize offset,
911 VkDeviceSize size,
912 VkMemoryMapFlags flags,
913 void** ppData)
914 {
915 ANV_FROM_HANDLE(anv_device, device, _device);
916 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
917
918 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
919 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
920 * at a time is valid. We could just mmap up front and return an offset
921 * pointer here, but that may exhaust virtual memory on 32 bit
922 * userspace. */
923
924 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
925 mem->map_size = size;
926
927 *ppData = mem->map;
928
929 return VK_SUCCESS;
930 }
931
932 VkResult anv_UnmapMemory(
933 VkDevice _device,
934 VkDeviceMemory _mem)
935 {
936 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
937
938 anv_gem_munmap(mem->map, mem->map_size);
939
940 return VK_SUCCESS;
941 }
942
943 VkResult anv_FlushMappedMemoryRanges(
944 VkDevice device,
945 uint32_t memRangeCount,
946 const VkMappedMemoryRange* pMemRanges)
947 {
948 /* clflush here for !llc platforms */
949
950 return VK_SUCCESS;
951 }
952
953 VkResult anv_InvalidateMappedMemoryRanges(
954 VkDevice device,
955 uint32_t memRangeCount,
956 const VkMappedMemoryRange* pMemRanges)
957 {
958 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
959 }
960
961 VkResult anv_GetBufferMemoryRequirements(
962 VkDevice device,
963 VkBuffer _buffer,
964 VkMemoryRequirements* pMemoryRequirements)
965 {
966 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
967
968 /* The Vulkan spec (git aaed022) says:
969 *
970 * memoryTypeBits is a bitfield and contains one bit set for every
971 * supported memory type for the resource. The bit `1<<i` is set if and
972 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
973 * structure for the physical device is supported.
974 *
975 * We support exactly one memory type.
976 */
977 pMemoryRequirements->memoryTypeBits = 1;
978
979 pMemoryRequirements->size = buffer->size;
980 pMemoryRequirements->alignment = 16;
981
982 return VK_SUCCESS;
983 }
984
985 VkResult anv_GetImageMemoryRequirements(
986 VkDevice device,
987 VkImage _image,
988 VkMemoryRequirements* pMemoryRequirements)
989 {
990 ANV_FROM_HANDLE(anv_image, image, _image);
991
992 /* The Vulkan spec (git aaed022) says:
993 *
994 * memoryTypeBits is a bitfield and contains one bit set for every
995 * supported memory type for the resource. The bit `1<<i` is set if and
996 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
997 * structure for the physical device is supported.
998 *
999 * We support exactly one memory type.
1000 */
1001 pMemoryRequirements->memoryTypeBits = 1;
1002
1003 pMemoryRequirements->size = image->size;
1004 pMemoryRequirements->alignment = image->alignment;
1005
1006 return VK_SUCCESS;
1007 }
1008
1009 VkResult anv_GetImageSparseMemoryRequirements(
1010 VkDevice device,
1011 VkImage image,
1012 uint32_t* pNumRequirements,
1013 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1014 {
1015 return vk_error(VK_UNSUPPORTED);
1016 }
1017
1018 VkResult anv_GetDeviceMemoryCommitment(
1019 VkDevice device,
1020 VkDeviceMemory memory,
1021 VkDeviceSize* pCommittedMemoryInBytes)
1022 {
1023 *pCommittedMemoryInBytes = 0;
1024 stub_return(VK_SUCCESS);
1025 }
1026
1027 VkResult anv_BindBufferMemory(
1028 VkDevice device,
1029 VkBuffer _buffer,
1030 VkDeviceMemory _mem,
1031 VkDeviceSize memOffset)
1032 {
1033 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1034 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1035
1036 buffer->bo = &mem->bo;
1037 buffer->offset = memOffset;
1038
1039 return VK_SUCCESS;
1040 }
1041
1042 VkResult anv_BindImageMemory(
1043 VkDevice device,
1044 VkImage _image,
1045 VkDeviceMemory _mem,
1046 VkDeviceSize memOffset)
1047 {
1048 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1049 ANV_FROM_HANDLE(anv_image, image, _image);
1050
1051 image->bo = &mem->bo;
1052 image->offset = memOffset;
1053
1054 return VK_SUCCESS;
1055 }
1056
1057 VkResult anv_QueueBindSparseBufferMemory(
1058 VkQueue queue,
1059 VkBuffer buffer,
1060 uint32_t numBindings,
1061 const VkSparseMemoryBindInfo* pBindInfo)
1062 {
1063 stub_return(VK_UNSUPPORTED);
1064 }
1065
1066 VkResult anv_QueueBindSparseImageOpaqueMemory(
1067 VkQueue queue,
1068 VkImage image,
1069 uint32_t numBindings,
1070 const VkSparseMemoryBindInfo* pBindInfo)
1071 {
1072 stub_return(VK_UNSUPPORTED);
1073 }
1074
1075 VkResult anv_QueueBindSparseImageMemory(
1076 VkQueue queue,
1077 VkImage image,
1078 uint32_t numBindings,
1079 const VkSparseImageMemoryBindInfo* pBindInfo)
1080 {
1081 stub_return(VK_UNSUPPORTED);
1082 }
1083
1084 VkResult anv_CreateFence(
1085 VkDevice _device,
1086 const VkFenceCreateInfo* pCreateInfo,
1087 VkFence* pFence)
1088 {
1089 ANV_FROM_HANDLE(anv_device, device, _device);
1090 struct anv_fence *fence;
1091 struct anv_batch batch;
1092 VkResult result;
1093
1094 const uint32_t fence_size = 128;
1095
1096 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1097
1098 fence = anv_device_alloc(device, sizeof(*fence), 8,
1099 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1100 if (fence == NULL)
1101 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1102
1103 result = anv_bo_init_new(&fence->bo, device, fence_size);
1104 if (result != VK_SUCCESS)
1105 goto fail;
1106
1107 fence->bo.map =
1108 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1109 batch.next = batch.start = fence->bo.map;
1110 batch.end = fence->bo.map + fence->bo.size;
1111 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1112 anv_batch_emit(&batch, GEN7_MI_NOOP);
1113
1114 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1115 fence->exec2_objects[0].relocation_count = 0;
1116 fence->exec2_objects[0].relocs_ptr = 0;
1117 fence->exec2_objects[0].alignment = 0;
1118 fence->exec2_objects[0].offset = fence->bo.offset;
1119 fence->exec2_objects[0].flags = 0;
1120 fence->exec2_objects[0].rsvd1 = 0;
1121 fence->exec2_objects[0].rsvd2 = 0;
1122
1123 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1124 fence->execbuf.buffer_count = 1;
1125 fence->execbuf.batch_start_offset = 0;
1126 fence->execbuf.batch_len = batch.next - fence->bo.map;
1127 fence->execbuf.cliprects_ptr = 0;
1128 fence->execbuf.num_cliprects = 0;
1129 fence->execbuf.DR1 = 0;
1130 fence->execbuf.DR4 = 0;
1131
1132 fence->execbuf.flags =
1133 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1134 fence->execbuf.rsvd1 = device->context_id;
1135 fence->execbuf.rsvd2 = 0;
1136
1137 *pFence = anv_fence_to_handle(fence);
1138
1139 return VK_SUCCESS;
1140
1141 fail:
1142 anv_device_free(device, fence);
1143
1144 return result;
1145 }
1146
1147 VkResult anv_DestroyFence(
1148 VkDevice _device,
1149 VkFence _fence)
1150 {
1151 ANV_FROM_HANDLE(anv_device, device, _device);
1152 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1153
1154 anv_gem_munmap(fence->bo.map, fence->bo.size);
1155 anv_gem_close(device, fence->bo.gem_handle);
1156 anv_device_free(device, fence);
1157
1158 return VK_SUCCESS;
1159 }
1160
1161 VkResult anv_ResetFences(
1162 VkDevice _device,
1163 uint32_t fenceCount,
1164 const VkFence* pFences)
1165 {
1166 for (uint32_t i = 0; i < fenceCount; i++) {
1167 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1168 fence->ready = false;
1169 }
1170
1171 return VK_SUCCESS;
1172 }
1173
1174 VkResult anv_GetFenceStatus(
1175 VkDevice _device,
1176 VkFence _fence)
1177 {
1178 ANV_FROM_HANDLE(anv_device, device, _device);
1179 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1180 int64_t t = 0;
1181 int ret;
1182
1183 if (fence->ready)
1184 return VK_SUCCESS;
1185
1186 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1187 if (ret == 0) {
1188 fence->ready = true;
1189 return VK_SUCCESS;
1190 }
1191
1192 return VK_NOT_READY;
1193 }
1194
1195 VkResult anv_WaitForFences(
1196 VkDevice _device,
1197 uint32_t fenceCount,
1198 const VkFence* pFences,
1199 VkBool32 waitAll,
1200 uint64_t timeout)
1201 {
1202 ANV_FROM_HANDLE(anv_device, device, _device);
1203 int64_t t = timeout;
1204 int ret;
1205
1206 /* FIXME: handle !waitAll */
1207
1208 for (uint32_t i = 0; i < fenceCount; i++) {
1209 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1210 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1211 if (ret == -1 && errno == ETIME)
1212 return VK_TIMEOUT;
1213 else if (ret == -1)
1214 return vk_error(VK_ERROR_UNKNOWN);
1215 }
1216
1217 return VK_SUCCESS;
1218 }
1219
1220 // Queue semaphore functions
1221
1222 VkResult anv_CreateSemaphore(
1223 VkDevice device,
1224 const VkSemaphoreCreateInfo* pCreateInfo,
1225 VkSemaphore* pSemaphore)
1226 {
1227 stub_return(VK_UNSUPPORTED);
1228 }
1229
1230 VkResult anv_DestroySemaphore(
1231 VkDevice device,
1232 VkSemaphore semaphore)
1233 {
1234 stub_return(VK_UNSUPPORTED);
1235 }
1236
1237 VkResult anv_QueueSignalSemaphore(
1238 VkQueue queue,
1239 VkSemaphore semaphore)
1240 {
1241 stub_return(VK_UNSUPPORTED);
1242 }
1243
1244 VkResult anv_QueueWaitSemaphore(
1245 VkQueue queue,
1246 VkSemaphore semaphore)
1247 {
1248 stub_return(VK_UNSUPPORTED);
1249 }
1250
1251 // Event functions
1252
1253 VkResult anv_CreateEvent(
1254 VkDevice device,
1255 const VkEventCreateInfo* pCreateInfo,
1256 VkEvent* pEvent)
1257 {
1258 stub_return(VK_UNSUPPORTED);
1259 }
1260
1261 VkResult anv_DestroyEvent(
1262 VkDevice device,
1263 VkEvent event)
1264 {
1265 stub_return(VK_UNSUPPORTED);
1266 }
1267
1268 VkResult anv_GetEventStatus(
1269 VkDevice device,
1270 VkEvent event)
1271 {
1272 stub_return(VK_UNSUPPORTED);
1273 }
1274
1275 VkResult anv_SetEvent(
1276 VkDevice device,
1277 VkEvent event)
1278 {
1279 stub_return(VK_UNSUPPORTED);
1280 }
1281
1282 VkResult anv_ResetEvent(
1283 VkDevice device,
1284 VkEvent event)
1285 {
1286 stub_return(VK_UNSUPPORTED);
1287 }
1288
1289 // Buffer functions
1290
1291 VkResult anv_CreateBuffer(
1292 VkDevice _device,
1293 const VkBufferCreateInfo* pCreateInfo,
1294 VkBuffer* pBuffer)
1295 {
1296 ANV_FROM_HANDLE(anv_device, device, _device);
1297 struct anv_buffer *buffer;
1298
1299 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1300
1301 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1302 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1303 if (buffer == NULL)
1304 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1305
1306 buffer->size = pCreateInfo->size;
1307 buffer->bo = NULL;
1308 buffer->offset = 0;
1309
1310 *pBuffer = anv_buffer_to_handle(buffer);
1311
1312 return VK_SUCCESS;
1313 }
1314
1315 VkResult anv_DestroyBuffer(
1316 VkDevice _device,
1317 VkBuffer _buffer)
1318 {
1319 ANV_FROM_HANDLE(anv_device, device, _device);
1320 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1321
1322 anv_device_free(device, buffer);
1323
1324 return VK_SUCCESS;
1325 }
1326
1327 void
1328 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1329 const struct anv_format *format,
1330 uint32_t offset, uint32_t range)
1331 {
1332 switch (device->info.gen) {
1333 case 7:
1334 gen7_fill_buffer_surface_state(state, format, offset, range);
1335 break;
1336 case 8:
1337 gen8_fill_buffer_surface_state(state, format, offset, range);
1338 break;
1339 default:
1340 unreachable("unsupported gen\n");
1341 }
1342 }
1343
1344 VkResult
1345 anv_buffer_view_create(
1346 struct anv_device * device,
1347 const VkBufferViewCreateInfo* pCreateInfo,
1348 struct anv_buffer_view ** view_out)
1349 {
1350 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1351 struct anv_buffer_view *view;
1352
1353 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1354
1355 view = anv_device_alloc(device, sizeof(*view), 8,
1356 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1357 if (view == NULL)
1358 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1359
1360 view->view = (struct anv_surface_view) {
1361 .bo = buffer->bo,
1362 .offset = buffer->offset + pCreateInfo->offset,
1363 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1364 .format = anv_format_for_vk_format(pCreateInfo->format),
1365 .range = pCreateInfo->range,
1366 };
1367
1368 *view_out = view;
1369
1370 return VK_SUCCESS;
1371 }
1372
1373
1374 VkResult anv_CreateBufferView(
1375 VkDevice _device,
1376 const VkBufferViewCreateInfo* pCreateInfo,
1377 VkBufferView* pView)
1378 {
1379 return driver_layer->CreateBufferView(_device, pCreateInfo, pView);
1380 }
1381
1382 VkResult anv_DestroyBufferView(
1383 VkDevice _device,
1384 VkBufferView _bview)
1385 {
1386 ANV_FROM_HANDLE(anv_device, device, _device);
1387 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1388
1389 anv_surface_view_fini(device, &bview->view);
1390 anv_device_free(device, bview);
1391
1392 return VK_SUCCESS;
1393 }
1394
1395 VkResult anv_CreateSampler(
1396 VkDevice _device,
1397 const VkSamplerCreateInfo* pCreateInfo,
1398 VkSampler* pSampler)
1399 {
1400 return driver_layer->CreateSampler(_device, pCreateInfo, pSampler);
1401 }
1402
1403 VkResult anv_DestroySampler(
1404 VkDevice _device,
1405 VkSampler _sampler)
1406 {
1407 ANV_FROM_HANDLE(anv_device, device, _device);
1408 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1409
1410 anv_device_free(device, sampler);
1411
1412 return VK_SUCCESS;
1413 }
1414
1415 // Descriptor set functions
1416
1417 VkResult anv_CreateDescriptorSetLayout(
1418 VkDevice _device,
1419 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1420 VkDescriptorSetLayout* pSetLayout)
1421 {
1422 ANV_FROM_HANDLE(anv_device, device, _device);
1423 struct anv_descriptor_set_layout *set_layout;
1424
1425 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1426
1427 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1428 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1429 uint32_t num_dynamic_buffers = 0;
1430 uint32_t count = 0;
1431 uint32_t stages = 0;
1432 uint32_t s;
1433
1434 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1435 switch (pCreateInfo->pBinding[i].descriptorType) {
1436 case VK_DESCRIPTOR_TYPE_SAMPLER:
1437 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1438 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1439 sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
1440 break;
1441 default:
1442 break;
1443 }
1444
1445 switch (pCreateInfo->pBinding[i].descriptorType) {
1446 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1447 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1448 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1449 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1450 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1451 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1452 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1453 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1454 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1455 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1456 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1457 surface_count[s] += pCreateInfo->pBinding[i].arraySize;
1458 break;
1459 default:
1460 break;
1461 }
1462
1463 switch (pCreateInfo->pBinding[i].descriptorType) {
1464 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1465 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1466 num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
1467 break;
1468 default:
1469 break;
1470 }
1471
1472 stages |= pCreateInfo->pBinding[i].stageFlags;
1473 count += pCreateInfo->pBinding[i].arraySize;
1474 }
1475
1476 uint32_t sampler_total = 0;
1477 uint32_t surface_total = 0;
1478 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1479 sampler_total += sampler_count[s];
1480 surface_total += surface_count[s];
1481 }
1482
1483 size_t size = sizeof(*set_layout) +
1484 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1485 set_layout = anv_device_alloc(device, size, 8,
1486 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1487 if (!set_layout)
1488 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1489
1490 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1491 set_layout->count = count;
1492 set_layout->shader_stages = stages;
1493
1494 struct anv_descriptor_slot *p = set_layout->entries;
1495 struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
1496 struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
1497 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1498 set_layout->stage[s].surface_count = surface_count[s];
1499 set_layout->stage[s].surface_start = surface[s] = p;
1500 p += surface_count[s];
1501 set_layout->stage[s].sampler_count = sampler_count[s];
1502 set_layout->stage[s].sampler_start = sampler[s] = p;
1503 p += sampler_count[s];
1504 }
1505
1506 uint32_t descriptor = 0;
1507 int8_t dynamic_slot = 0;
1508 bool is_dynamic;
1509 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1510 switch (pCreateInfo->pBinding[i].descriptorType) {
1511 case VK_DESCRIPTOR_TYPE_SAMPLER:
1512 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1513 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1514 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1515 sampler[s]->index = descriptor + j;
1516 sampler[s]->dynamic_slot = -1;
1517 sampler[s]++;
1518 }
1519 break;
1520 default:
1521 break;
1522 }
1523
1524 switch (pCreateInfo->pBinding[i].descriptorType) {
1525 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1526 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1527 is_dynamic = true;
1528 break;
1529 default:
1530 is_dynamic = false;
1531 break;
1532 }
1533
1534 switch (pCreateInfo->pBinding[i].descriptorType) {
1535 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1536 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1537 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1538 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1539 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1540 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1541 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1542 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1543 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1544 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1545 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1546 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1547 surface[s]->index = descriptor + j;
1548 if (is_dynamic)
1549 surface[s]->dynamic_slot = dynamic_slot + j;
1550 else
1551 surface[s]->dynamic_slot = -1;
1552 surface[s]++;
1553 }
1554 break;
1555 default:
1556 break;
1557 }
1558
1559 if (is_dynamic)
1560 dynamic_slot += pCreateInfo->pBinding[i].arraySize;
1561
1562 descriptor += pCreateInfo->pBinding[i].arraySize;
1563 }
1564
1565 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1566
1567 return VK_SUCCESS;
1568 }
1569
1570 VkResult anv_DestroyDescriptorSetLayout(
1571 VkDevice _device,
1572 VkDescriptorSetLayout _set_layout)
1573 {
1574 ANV_FROM_HANDLE(anv_device, device, _device);
1575 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1576
1577 anv_device_free(device, set_layout);
1578
1579 return VK_SUCCESS;
1580 }
1581
1582 VkResult anv_CreateDescriptorPool(
1583 VkDevice device,
1584 VkDescriptorPoolUsage poolUsage,
1585 uint32_t maxSets,
1586 const VkDescriptorPoolCreateInfo* pCreateInfo,
1587 VkDescriptorPool* pDescriptorPool)
1588 {
1589 anv_finishme("VkDescriptorPool is a stub");
1590 pDescriptorPool->handle = 1;
1591 return VK_SUCCESS;
1592 }
1593
1594 VkResult anv_DestroyDescriptorPool(
1595 VkDevice _device,
1596 VkDescriptorPool _pool)
1597 {
1598 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1599 return VK_SUCCESS;
1600 }
1601
1602 VkResult anv_ResetDescriptorPool(
1603 VkDevice device,
1604 VkDescriptorPool descriptorPool)
1605 {
1606 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1607 return VK_SUCCESS;
1608 }
1609
1610 VkResult
1611 anv_descriptor_set_create(struct anv_device *device,
1612 const struct anv_descriptor_set_layout *layout,
1613 struct anv_descriptor_set **out_set)
1614 {
1615 struct anv_descriptor_set *set;
1616 size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1617
1618 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1619 if (!set)
1620 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1621
1622 /* A descriptor set may not be 100% filled. Clear the set so we can can
1623 * later detect holes in it.
1624 */
1625 memset(set, 0, size);
1626
1627 *out_set = set;
1628
1629 return VK_SUCCESS;
1630 }
1631
1632 void
1633 anv_descriptor_set_destroy(struct anv_device *device,
1634 struct anv_descriptor_set *set)
1635 {
1636 anv_device_free(device, set);
1637 }
1638
1639 VkResult anv_AllocDescriptorSets(
1640 VkDevice _device,
1641 VkDescriptorPool descriptorPool,
1642 VkDescriptorSetUsage setUsage,
1643 uint32_t count,
1644 const VkDescriptorSetLayout* pSetLayouts,
1645 VkDescriptorSet* pDescriptorSets,
1646 uint32_t* pCount)
1647 {
1648 ANV_FROM_HANDLE(anv_device, device, _device);
1649
1650 VkResult result;
1651 struct anv_descriptor_set *set;
1652
1653 for (uint32_t i = 0; i < count; i++) {
1654 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1655
1656 result = anv_descriptor_set_create(device, layout, &set);
1657 if (result != VK_SUCCESS) {
1658 *pCount = i;
1659 return result;
1660 }
1661
1662 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1663 }
1664
1665 *pCount = count;
1666
1667 return VK_SUCCESS;
1668 }
1669
1670 VkResult anv_FreeDescriptorSets(
1671 VkDevice _device,
1672 VkDescriptorPool descriptorPool,
1673 uint32_t count,
1674 const VkDescriptorSet* pDescriptorSets)
1675 {
1676 ANV_FROM_HANDLE(anv_device, device, _device);
1677
1678 for (uint32_t i = 0; i < count; i++) {
1679 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1680
1681 anv_descriptor_set_destroy(device, set);
1682 }
1683
1684 return VK_SUCCESS;
1685 }
1686
1687 VkResult anv_UpdateDescriptorSets(
1688 VkDevice device,
1689 uint32_t writeCount,
1690 const VkWriteDescriptorSet* pDescriptorWrites,
1691 uint32_t copyCount,
1692 const VkCopyDescriptorSet* pDescriptorCopies)
1693 {
1694 for (uint32_t i = 0; i < writeCount; i++) {
1695 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1696 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1697
1698 switch (write->descriptorType) {
1699 case VK_DESCRIPTOR_TYPE_SAMPLER:
1700 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1701 for (uint32_t j = 0; j < write->count; j++) {
1702 set->descriptors[write->destBinding + j].sampler =
1703 anv_sampler_from_handle(write->pDescriptors[j].sampler);
1704 }
1705
1706 if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
1707 break;
1708
1709 /* fallthrough */
1710
1711 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1712 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1713 for (uint32_t j = 0; j < write->count; j++) {
1714 ANV_FROM_HANDLE(anv_image_view, iview,
1715 write->pDescriptors[j].imageView);
1716 set->descriptors[write->destBinding + j].view = &iview->view;
1717 }
1718 break;
1719
1720 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1721 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1722 anv_finishme("texel buffers not implemented");
1723 break;
1724
1725 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1726 anv_finishme("input attachments not implemented");
1727 break;
1728
1729 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1730 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1731 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1732 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1733 for (uint32_t j = 0; j < write->count; j++) {
1734 ANV_FROM_HANDLE(anv_buffer_view, bview,
1735 write->pDescriptors[j].bufferView);
1736 set->descriptors[write->destBinding + j].view = &bview->view;
1737 }
1738
1739 default:
1740 break;
1741 }
1742 }
1743
1744 for (uint32_t i = 0; i < copyCount; i++) {
1745 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1746 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1747 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1748 for (uint32_t j = 0; j < copy->count; j++) {
1749 dest->descriptors[copy->destBinding + j] =
1750 src->descriptors[copy->srcBinding + j];
1751 }
1752 }
1753
1754 return VK_SUCCESS;
1755 }
1756
1757 // State object functions
1758
1759 static inline int64_t
1760 clamp_int64(int64_t x, int64_t min, int64_t max)
1761 {
1762 if (x < min)
1763 return min;
1764 else if (x < max)
1765 return x;
1766 else
1767 return max;
1768 }
1769
1770 VkResult anv_CreateDynamicViewportState(
1771 VkDevice _device,
1772 const VkDynamicViewportStateCreateInfo* pCreateInfo,
1773 VkDynamicViewportState* pState)
1774 {
1775 ANV_FROM_HANDLE(anv_device, device, _device);
1776 struct anv_dynamic_vp_state *state;
1777
1778 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO);
1779
1780 state = anv_device_alloc(device, sizeof(*state), 8,
1781 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1782 if (state == NULL)
1783 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1784
1785 unsigned count = pCreateInfo->viewportAndScissorCount;
1786 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1787 count * 64, 64);
1788 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1789 count * 8, 32);
1790 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
1791 count * 32, 32);
1792
1793 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1794 const VkViewport *vp = &pCreateInfo->pViewports[i];
1795 const VkRect2D *s = &pCreateInfo->pScissors[i];
1796
1797 /* The gen7 state struct has just the matrix and guardband fields, the
1798 * gen8 struct adds the min/max viewport fields. */
1799 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1800 .ViewportMatrixElementm00 = vp->width / 2,
1801 .ViewportMatrixElementm11 = vp->height / 2,
1802 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1803 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1804 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1805 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1806 .XMinClipGuardband = -1.0f,
1807 .XMaxClipGuardband = 1.0f,
1808 .YMinClipGuardband = -1.0f,
1809 .YMaxClipGuardband = 1.0f,
1810 .XMinViewPort = vp->originX,
1811 .XMaxViewPort = vp->originX + vp->width - 1,
1812 .YMinViewPort = vp->originY,
1813 .YMaxViewPort = vp->originY + vp->height - 1,
1814 };
1815
1816 struct GEN7_CC_VIEWPORT cc_viewport = {
1817 .MinimumDepth = vp->minDepth,
1818 .MaximumDepth = vp->maxDepth
1819 };
1820
1821 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1822 * ymax < ymin for empty clips. In case clip x, y, width height are all
1823 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1824 * what we want. Just special case empty clips and produce a canonical
1825 * empty clip. */
1826 static const struct GEN7_SCISSOR_RECT empty_scissor = {
1827 .ScissorRectangleYMin = 1,
1828 .ScissorRectangleXMin = 1,
1829 .ScissorRectangleYMax = 0,
1830 .ScissorRectangleXMax = 0
1831 };
1832
1833 const int max = 0xffff;
1834 struct GEN7_SCISSOR_RECT scissor = {
1835 /* Do this math using int64_t so overflow gets clamped correctly. */
1836 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1837 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1838 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1839 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1840 };
1841
1842 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1843 GEN7_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1844
1845 if (s->extent.width <= 0 || s->extent.height <= 0) {
1846 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1847 } else {
1848 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1849 }
1850 }
1851
1852 *pState = anv_dynamic_vp_state_to_handle(state);
1853
1854 return VK_SUCCESS;
1855 }
1856
1857 VkResult anv_DestroyDynamicViewportState(
1858 VkDevice _device,
1859 VkDynamicViewportState _vp_state)
1860 {
1861 ANV_FROM_HANDLE(anv_device, device, _device);
1862 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, _vp_state);
1863
1864 anv_state_pool_free(&device->dynamic_state_pool, vp_state->sf_clip_vp);
1865 anv_state_pool_free(&device->dynamic_state_pool, vp_state->cc_vp);
1866 anv_state_pool_free(&device->dynamic_state_pool, vp_state->scissor);
1867
1868 anv_device_free(device, vp_state);
1869
1870 return VK_SUCCESS;
1871 }
1872
1873 VkResult anv_CreateDynamicRasterState(
1874 VkDevice _device,
1875 const VkDynamicRasterStateCreateInfo* pCreateInfo,
1876 VkDynamicRasterState* pState)
1877 {
1878 return driver_layer->CreateDynamicRasterState(_device, pCreateInfo, pState);
1879 }
1880
1881 VkResult anv_DestroyDynamicRasterState(
1882 VkDevice _device,
1883 VkDynamicRasterState _rs_state)
1884 {
1885 ANV_FROM_HANDLE(anv_device, device, _device);
1886 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, _rs_state);
1887
1888 anv_device_free(device, rs_state);
1889
1890 return VK_SUCCESS;
1891 }
1892
1893 VkResult anv_CreateDynamicColorBlendState(
1894 VkDevice _device,
1895 const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
1896 VkDynamicColorBlendState* pState)
1897 {
1898 ANV_FROM_HANDLE(anv_device, device, _device);
1899 struct anv_dynamic_cb_state *state;
1900
1901 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO);
1902
1903 state = anv_device_alloc(device, sizeof(*state), 8,
1904 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1905 if (state == NULL)
1906 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1907
1908 struct GEN7_COLOR_CALC_STATE color_calc_state = {
1909 .BlendConstantColorRed = pCreateInfo->blendConst[0],
1910 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
1911 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
1912 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
1913 };
1914
1915 GEN7_COLOR_CALC_STATE_pack(NULL, state->color_calc_state, &color_calc_state);
1916
1917 *pState = anv_dynamic_cb_state_to_handle(state);
1918
1919 return VK_SUCCESS;
1920 }
1921
1922 VkResult anv_DestroyDynamicColorBlendState(
1923 VkDevice _device,
1924 VkDynamicColorBlendState _cb_state)
1925 {
1926 ANV_FROM_HANDLE(anv_device, device, _device);
1927 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, _cb_state);
1928
1929 anv_device_free(device, cb_state);
1930
1931 return VK_SUCCESS;
1932 }
1933
1934 VkResult anv_CreateDynamicDepthStencilState(
1935 VkDevice _device,
1936 const VkDynamicDepthStencilStateCreateInfo* pCreateInfo,
1937 VkDynamicDepthStencilState* pState)
1938 {
1939 return driver_layer->CreateDynamicDepthStencilState(_device, pCreateInfo, pState);
1940 }
1941
1942 VkResult anv_DestroyDynamicDepthStencilState(
1943 VkDevice _device,
1944 VkDynamicDepthStencilState _ds_state)
1945 {
1946 ANV_FROM_HANDLE(anv_device, device, _device);
1947 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, _ds_state);
1948
1949 anv_device_free(device, ds_state);
1950
1951 return VK_SUCCESS;
1952 }
1953
1954 VkResult anv_CreateFramebuffer(
1955 VkDevice _device,
1956 const VkFramebufferCreateInfo* pCreateInfo,
1957 VkFramebuffer* pFramebuffer)
1958 {
1959 ANV_FROM_HANDLE(anv_device, device, _device);
1960 struct anv_framebuffer *framebuffer;
1961
1962 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1963
1964 size_t size = sizeof(*framebuffer) +
1965 sizeof(struct anv_attachment_view *) * pCreateInfo->attachmentCount;
1966 framebuffer = anv_device_alloc(device, size, 8,
1967 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1968 if (framebuffer == NULL)
1969 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1970
1971 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1972 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1973 ANV_FROM_HANDLE(anv_attachment_view, view,
1974 pCreateInfo->pAttachments[i].view);
1975
1976 framebuffer->attachments[i] = view;
1977 }
1978
1979 framebuffer->width = pCreateInfo->width;
1980 framebuffer->height = pCreateInfo->height;
1981 framebuffer->layers = pCreateInfo->layers;
1982
1983 anv_CreateDynamicViewportState(anv_device_to_handle(device),
1984 &(VkDynamicViewportStateCreateInfo) {
1985 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
1986 .viewportAndScissorCount = 1,
1987 .pViewports = (VkViewport[]) {
1988 {
1989 .originX = 0,
1990 .originY = 0,
1991 .width = pCreateInfo->width,
1992 .height = pCreateInfo->height,
1993 .minDepth = 0,
1994 .maxDepth = 1
1995 },
1996 },
1997 .pScissors = (VkRect2D[]) {
1998 { { 0, 0 },
1999 { pCreateInfo->width, pCreateInfo->height } },
2000 }
2001 },
2002 &framebuffer->vp_state);
2003
2004 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2005
2006 return VK_SUCCESS;
2007 }
2008
2009 VkResult anv_DestroyFramebuffer(
2010 VkDevice _device,
2011 VkFramebuffer _fb)
2012 {
2013 ANV_FROM_HANDLE(anv_device, device, _device);
2014 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2015
2016 anv_DestroyDynamicViewportState(anv_device_to_handle(device),
2017 fb->vp_state);
2018 anv_device_free(device, fb);
2019
2020 return VK_SUCCESS;
2021 }
2022
2023 VkResult anv_CreateRenderPass(
2024 VkDevice _device,
2025 const VkRenderPassCreateInfo* pCreateInfo,
2026 VkRenderPass* pRenderPass)
2027 {
2028 ANV_FROM_HANDLE(anv_device, device, _device);
2029 struct anv_render_pass *pass;
2030 size_t size;
2031 size_t attachments_offset;
2032
2033 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2034
2035 size = sizeof(*pass);
2036 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
2037 attachments_offset = size;
2038 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
2039
2040 pass = anv_device_alloc(device, size, 8,
2041 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2042 if (pass == NULL)
2043 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2044
2045 /* Clear the subpasses along with the parent pass. This required because
2046 * each array member of anv_subpass must be a valid pointer if not NULL.
2047 */
2048 memset(pass, 0, size);
2049 pass->attachment_count = pCreateInfo->attachmentCount;
2050 pass->subpass_count = pCreateInfo->subpassCount;
2051 pass->attachments = (void *) pass + attachments_offset;
2052
2053 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2054 struct anv_render_pass_attachment *att = &pass->attachments[i];
2055
2056 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
2057 att->samples = pCreateInfo->pAttachments[i].samples;
2058 att->load_op = pCreateInfo->pAttachments[i].loadOp;
2059 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
2060 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2061 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2062
2063 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2064 if (anv_format_is_color(att->format)) {
2065 ++pass->num_color_clear_attachments;
2066 } else if (att->format->depth_format) {
2067 pass->has_depth_clear_attachment = true;
2068 }
2069 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2070 assert(att->format->has_stencil);
2071 pass->has_stencil_clear_attachment = true;
2072 }
2073 }
2074
2075 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
2076 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
2077 struct anv_subpass *subpass = &pass->subpasses[i];
2078
2079 subpass->input_count = desc->inputCount;
2080 subpass->color_count = desc->colorCount;
2081
2082 if (desc->inputCount > 0) {
2083 subpass->input_attachments =
2084 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
2085 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2086
2087 for (uint32_t j = 0; j < desc->inputCount; j++) {
2088 subpass->input_attachments[j]
2089 = desc->inputAttachments[j].attachment;
2090 }
2091 }
2092
2093 if (desc->colorCount > 0) {
2094 subpass->color_attachments =
2095 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2096 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2097
2098 for (uint32_t j = 0; j < desc->colorCount; j++) {
2099 subpass->color_attachments[j]
2100 = desc->colorAttachments[j].attachment;
2101 }
2102 }
2103
2104 if (desc->resolveAttachments) {
2105 subpass->resolve_attachments =
2106 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2107 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2108
2109 for (uint32_t j = 0; j < desc->colorCount; j++) {
2110 subpass->resolve_attachments[j]
2111 = desc->resolveAttachments[j].attachment;
2112 }
2113 }
2114
2115 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
2116 }
2117
2118 *pRenderPass = anv_render_pass_to_handle(pass);
2119
2120 return VK_SUCCESS;
2121 }
2122
2123 VkResult anv_DestroyRenderPass(
2124 VkDevice _device,
2125 VkRenderPass _pass)
2126 {
2127 ANV_FROM_HANDLE(anv_device, device, _device);
2128 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
2129
2130 for (uint32_t i = 0; i < pass->subpass_count; i++) {
2131 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2132 * Don't free the null arrays.
2133 */
2134 struct anv_subpass *subpass = &pass->subpasses[i];
2135
2136 anv_device_free(device, subpass->input_attachments);
2137 anv_device_free(device, subpass->color_attachments);
2138 anv_device_free(device, subpass->resolve_attachments);
2139 }
2140
2141 anv_device_free(device, pass);
2142
2143 return VK_SUCCESS;
2144 }
2145
2146 VkResult anv_GetRenderAreaGranularity(
2147 VkDevice device,
2148 VkRenderPass renderPass,
2149 VkExtent2D* pGranularity)
2150 {
2151 *pGranularity = (VkExtent2D) { 1, 1 };
2152
2153 return VK_SUCCESS;
2154 }
2155
2156 void vkCmdDbgMarkerBegin(
2157 VkCmdBuffer cmdBuffer,
2158 const char* pMarker)
2159 __attribute__ ((visibility ("default")));
2160
2161 void vkCmdDbgMarkerEnd(
2162 VkCmdBuffer cmdBuffer)
2163 __attribute__ ((visibility ("default")));
2164
2165 void vkCmdDbgMarkerBegin(
2166 VkCmdBuffer cmdBuffer,
2167 const char* pMarker)
2168 {
2169 }
2170
2171 void vkCmdDbgMarkerEnd(
2172 VkCmdBuffer cmdBuffer)
2173 {
2174 }