vk: Use vk* entrypoints in meta, not driver_layer pointers
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 static VkResult
35 anv_physical_device_init(struct anv_physical_device *device,
36 struct anv_instance *instance,
37 const char *path)
38 {
39 VkResult result;
40 int fd;
41
42 fd = open(path, O_RDWR | O_CLOEXEC);
43 if (fd < 0)
44 return vk_errorf(VK_ERROR_UNAVAILABLE, "failed to open %s: %m", path);
45
46 device->instance = instance;
47 device->path = path;
48
49 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
50 if (!device->chipset_id) {
51 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get chipset id: %m");
52 goto fail;
53 }
54
55 device->name = brw_get_device_name(device->chipset_id);
56 device->info = brw_get_device_info(device->chipset_id, -1);
57 if (!device->info) {
58 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get device info");
59 goto fail;
60 }
61
62 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
63 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get aperture size: %m");
64 goto fail;
65 }
66
67 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
68 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing gem wait");
69 goto fail;
70 }
71
72 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
73 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing execbuf2");
74 goto fail;
75 }
76
77 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
78 result = vk_errorf(VK_ERROR_UNAVAILABLE, "non-llc gpu");
79 goto fail;
80 }
81
82 close(fd);
83
84 return VK_SUCCESS;
85
86 fail:
87 close(fd);
88 return result;
89 }
90
91 static void *default_alloc(
92 void* pUserData,
93 size_t size,
94 size_t alignment,
95 VkSystemAllocType allocType)
96 {
97 return malloc(size);
98 }
99
100 static void default_free(
101 void* pUserData,
102 void* pMem)
103 {
104 free(pMem);
105 }
106
107 static const VkAllocCallbacks default_alloc_callbacks = {
108 .pUserData = NULL,
109 .pfnAlloc = default_alloc,
110 .pfnFree = default_free
111 };
112
113 VkResult anv_CreateInstance(
114 const VkInstanceCreateInfo* pCreateInfo,
115 VkInstance* pInstance)
116 {
117 struct anv_instance *instance;
118 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
119 void *user_data = NULL;
120
121 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
122
123 if (pCreateInfo->pAllocCb) {
124 alloc_callbacks = pCreateInfo->pAllocCb;
125 user_data = pCreateInfo->pAllocCb->pUserData;
126 }
127 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
128 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
129 if (!instance)
130 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
131
132 instance->pAllocUserData = alloc_callbacks->pUserData;
133 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
134 instance->pfnFree = alloc_callbacks->pfnFree;
135 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
136 instance->physicalDeviceCount = 0;
137
138 _mesa_locale_init();
139
140 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
141
142 *pInstance = anv_instance_to_handle(instance);
143
144 return VK_SUCCESS;
145 }
146
147 VkResult anv_DestroyInstance(
148 VkInstance _instance)
149 {
150 ANV_FROM_HANDLE(anv_instance, instance, _instance);
151
152 VG(VALGRIND_DESTROY_MEMPOOL(instance));
153
154 _mesa_locale_fini();
155
156 instance->pfnFree(instance->pAllocUserData, instance);
157
158 return VK_SUCCESS;
159 }
160
161 static void *
162 anv_instance_alloc(struct anv_instance *instance, size_t size,
163 size_t alignment, VkSystemAllocType allocType)
164 {
165 void *mem = instance->pfnAlloc(instance->pAllocUserData,
166 size, alignment, allocType);
167 if (mem) {
168 VALGRIND_MEMPOOL_ALLOC(instance, mem, size);
169 VALGRIND_MAKE_MEM_UNDEFINED(mem, size);
170 }
171 return mem;
172 }
173
174 static void
175 anv_instance_free(struct anv_instance *instance, void *mem)
176 {
177 if (mem == NULL)
178 return;
179
180 VALGRIND_MEMPOOL_FREE(instance, mem);
181
182 instance->pfnFree(instance->pAllocUserData, mem);
183 }
184
185 VkResult anv_EnumeratePhysicalDevices(
186 VkInstance _instance,
187 uint32_t* pPhysicalDeviceCount,
188 VkPhysicalDevice* pPhysicalDevices)
189 {
190 ANV_FROM_HANDLE(anv_instance, instance, _instance);
191 VkResult result;
192
193 if (instance->physicalDeviceCount == 0) {
194 result = anv_physical_device_init(&instance->physicalDevice,
195 instance, "/dev/dri/renderD128");
196 if (result != VK_SUCCESS)
197 return result;
198
199 instance->physicalDeviceCount = 1;
200 }
201
202 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
203 * otherwise it's an inout parameter.
204 *
205 * The Vulkan spec (git aaed022) says:
206 *
207 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
208 * that is initialized with the number of devices the application is
209 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
210 * an array of at least this many VkPhysicalDevice handles [...].
211 *
212 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
213 * overwrites the contents of the variable pointed to by
214 * pPhysicalDeviceCount with the number of physical devices in in the
215 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
216 * pPhysicalDeviceCount with the number of physical handles written to
217 * pPhysicalDevices.
218 */
219 if (!pPhysicalDevices) {
220 *pPhysicalDeviceCount = instance->physicalDeviceCount;
221 } else if (*pPhysicalDeviceCount >= 1) {
222 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
223 *pPhysicalDeviceCount = 1;
224 } else {
225 *pPhysicalDeviceCount = 0;
226 }
227
228 return VK_SUCCESS;
229 }
230
231 VkResult anv_GetPhysicalDeviceFeatures(
232 VkPhysicalDevice physicalDevice,
233 VkPhysicalDeviceFeatures* pFeatures)
234 {
235 anv_finishme("Get correct values for PhysicalDeviceFeatures");
236
237 *pFeatures = (VkPhysicalDeviceFeatures) {
238 .robustBufferAccess = false,
239 .fullDrawIndexUint32 = false,
240 .imageCubeArray = false,
241 .independentBlend = false,
242 .geometryShader = true,
243 .tessellationShader = false,
244 .sampleRateShading = false,
245 .dualSourceBlend = true,
246 .logicOp = true,
247 .instancedDrawIndirect = true,
248 .depthClip = false,
249 .depthBiasClamp = false,
250 .fillModeNonSolid = true,
251 .depthBounds = false,
252 .wideLines = true,
253 .largePoints = true,
254 .textureCompressionETC2 = true,
255 .textureCompressionASTC_LDR = true,
256 .textureCompressionBC = true,
257 .pipelineStatisticsQuery = true,
258 .vertexSideEffects = false,
259 .tessellationSideEffects = false,
260 .geometrySideEffects = false,
261 .fragmentSideEffects = false,
262 .shaderTessellationPointSize = false,
263 .shaderGeometryPointSize = true,
264 .shaderTextureGatherExtended = true,
265 .shaderStorageImageExtendedFormats = false,
266 .shaderStorageImageMultisample = false,
267 .shaderStorageBufferArrayConstantIndexing = false,
268 .shaderStorageImageArrayConstantIndexing = false,
269 .shaderUniformBufferArrayDynamicIndexing = true,
270 .shaderSampledImageArrayDynamicIndexing = false,
271 .shaderStorageBufferArrayDynamicIndexing = false,
272 .shaderStorageImageArrayDynamicIndexing = false,
273 .shaderClipDistance = false,
274 .shaderCullDistance = false,
275 .shaderFloat64 = false,
276 .shaderInt64 = false,
277 .shaderFloat16 = false,
278 .shaderInt16 = false,
279 };
280
281 return VK_SUCCESS;
282 }
283
284 VkResult anv_GetPhysicalDeviceLimits(
285 VkPhysicalDevice physicalDevice,
286 VkPhysicalDeviceLimits* pLimits)
287 {
288 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
289 const struct brw_device_info *devinfo = physical_device->info;
290
291 anv_finishme("Get correct values for PhysicalDeviceLimits");
292
293 *pLimits = (VkPhysicalDeviceLimits) {
294 .maxImageDimension1D = (1 << 14),
295 .maxImageDimension2D = (1 << 14),
296 .maxImageDimension3D = (1 << 10),
297 .maxImageDimensionCube = (1 << 14),
298 .maxImageArrayLayers = (1 << 10),
299 .maxTexelBufferSize = (1 << 14),
300 .maxUniformBufferSize = UINT32_MAX,
301 .maxStorageBufferSize = UINT32_MAX,
302 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
303 .maxMemoryAllocationCount = UINT32_MAX,
304 .bufferImageGranularity = 64, /* A cache line */
305 .maxBoundDescriptorSets = MAX_SETS,
306 .maxDescriptorSets = UINT32_MAX,
307 .maxPerStageDescriptorSamplers = 64,
308 .maxPerStageDescriptorUniformBuffers = 64,
309 .maxPerStageDescriptorStorageBuffers = 64,
310 .maxPerStageDescriptorSampledImages = 64,
311 .maxPerStageDescriptorStorageImages = 64,
312 .maxDescriptorSetSamplers = 256,
313 .maxDescriptorSetUniformBuffers = 256,
314 .maxDescriptorSetStorageBuffers = 256,
315 .maxDescriptorSetSampledImages = 256,
316 .maxDescriptorSetStorageImages = 256,
317 .maxVertexInputAttributes = 32,
318 .maxVertexInputAttributeOffset = 256,
319 .maxVertexInputBindingStride = 256,
320 .maxVertexOutputComponents = 32,
321 .maxTessGenLevel = 0,
322 .maxTessPatchSize = 0,
323 .maxTessControlPerVertexInputComponents = 0,
324 .maxTessControlPerVertexOutputComponents = 0,
325 .maxTessControlPerPatchOutputComponents = 0,
326 .maxTessControlTotalOutputComponents = 0,
327 .maxTessEvaluationInputComponents = 0,
328 .maxTessEvaluationOutputComponents = 0,
329 .maxGeometryShaderInvocations = 6,
330 .maxGeometryInputComponents = 16,
331 .maxGeometryOutputComponents = 16,
332 .maxGeometryOutputVertices = 16,
333 .maxGeometryTotalOutputComponents = 16,
334 .maxFragmentInputComponents = 16,
335 .maxFragmentOutputBuffers = 8,
336 .maxFragmentDualSourceBuffers = 2,
337 .maxFragmentCombinedOutputResources = 8,
338 .maxComputeSharedMemorySize = 1024,
339 .maxComputeWorkGroupCount = {
340 16 * devinfo->max_cs_threads,
341 16 * devinfo->max_cs_threads,
342 16 * devinfo->max_cs_threads,
343 },
344 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
345 .maxComputeWorkGroupSize = {
346 16 * devinfo->max_cs_threads,
347 16 * devinfo->max_cs_threads,
348 16 * devinfo->max_cs_threads,
349 },
350 .subPixelPrecisionBits = 4 /* FIXME */,
351 .subTexelPrecisionBits = 4 /* FIXME */,
352 .mipmapPrecisionBits = 4 /* FIXME */,
353 .maxDrawIndexedIndexValue = UINT32_MAX,
354 .maxDrawIndirectInstanceCount = UINT32_MAX,
355 .primitiveRestartForPatches = UINT32_MAX,
356 .maxSamplerLodBias = 16,
357 .maxSamplerAnisotropy = 16,
358 .maxViewports = 16,
359 .maxDynamicViewportStates = UINT32_MAX,
360 .maxViewportDimensions = { (1 << 14), (1 << 14) },
361 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
362 .viewportSubPixelBits = 13, /* We take a float? */
363 .minMemoryMapAlignment = 64, /* A cache line */
364 .minTexelBufferOffsetAlignment = 1,
365 .minUniformBufferOffsetAlignment = 1,
366 .minStorageBufferOffsetAlignment = 1,
367 .minTexelOffset = 0, /* FIXME */
368 .maxTexelOffset = 0, /* FIXME */
369 .minTexelGatherOffset = 0, /* FIXME */
370 .maxTexelGatherOffset = 0, /* FIXME */
371 .minInterpolationOffset = 0, /* FIXME */
372 .maxInterpolationOffset = 0, /* FIXME */
373 .subPixelInterpolationOffsetBits = 0, /* FIXME */
374 .maxFramebufferWidth = (1 << 14),
375 .maxFramebufferHeight = (1 << 14),
376 .maxFramebufferLayers = (1 << 10),
377 .maxFramebufferColorSamples = 8,
378 .maxFramebufferDepthSamples = 8,
379 .maxFramebufferStencilSamples = 8,
380 .maxColorAttachments = MAX_RTS,
381 .maxSampledImageColorSamples = 8,
382 .maxSampledImageDepthSamples = 8,
383 .maxSampledImageIntegerSamples = 1,
384 .maxStorageImageSamples = 1,
385 .maxSampleMaskWords = 1,
386 .timestampFrequency = 1000 * 1000 * 1000 / 80,
387 .maxClipDistances = 0 /* FIXME */,
388 .maxCullDistances = 0 /* FIXME */,
389 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
390 .pointSizeRange = { 0.125, 255.875 },
391 .lineWidthRange = { 0.0, 7.9921875 },
392 .pointSizeGranularity = (1.0 / 8.0),
393 .lineWidthGranularity = (1.0 / 128.0),
394 };
395
396 return VK_SUCCESS;
397 }
398
399 VkResult anv_GetPhysicalDeviceProperties(
400 VkPhysicalDevice physicalDevice,
401 VkPhysicalDeviceProperties* pProperties)
402 {
403 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
404
405 *pProperties = (VkPhysicalDeviceProperties) {
406 .apiVersion = VK_MAKE_VERSION(0, 138, 1),
407 .driverVersion = 1,
408 .vendorId = 0x8086,
409 .deviceId = pdevice->chipset_id,
410 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
411 };
412
413 strcpy(pProperties->deviceName, pdevice->name);
414 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
415 "anv-%s", MESA_GIT_SHA1 + 4);
416
417 return VK_SUCCESS;
418 }
419
420 VkResult anv_GetPhysicalDeviceQueueCount(
421 VkPhysicalDevice physicalDevice,
422 uint32_t* pCount)
423 {
424 *pCount = 1;
425
426 return VK_SUCCESS;
427 }
428
429 VkResult anv_GetPhysicalDeviceQueueProperties(
430 VkPhysicalDevice physicalDevice,
431 uint32_t count,
432 VkPhysicalDeviceQueueProperties* pQueueProperties)
433 {
434 assert(count == 1);
435
436 *pQueueProperties = (VkPhysicalDeviceQueueProperties) {
437 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
438 VK_QUEUE_COMPUTE_BIT |
439 VK_QUEUE_DMA_BIT,
440 .queueCount = 1,
441 .supportsTimestamps = true,
442 };
443
444 return VK_SUCCESS;
445 }
446
447 VkResult anv_GetPhysicalDeviceMemoryProperties(
448 VkPhysicalDevice physicalDevice,
449 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
450 {
451 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
452 VkDeviceSize heap_size;
453
454 /* Reserve some wiggle room for the driver by exposing only 75% of the
455 * aperture to the heap.
456 */
457 heap_size = 3 * physical_device->aperture_size / 4;
458
459 /* The property flags below are valid only for llc platforms. */
460 pMemoryProperties->memoryTypeCount = 1;
461 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
462 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
463 .heapIndex = 1,
464 };
465
466 pMemoryProperties->memoryHeapCount = 1;
467 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
468 .size = heap_size,
469 .flags = VK_MEMORY_HEAP_HOST_LOCAL,
470 };
471
472 return VK_SUCCESS;
473 }
474
475 PFN_vkVoidFunction anv_GetInstanceProcAddr(
476 VkInstance instance,
477 const char* pName)
478 {
479 return anv_lookup_entrypoint(pName);
480 }
481
482 PFN_vkVoidFunction anv_GetDeviceProcAddr(
483 VkDevice device,
484 const char* pName)
485 {
486 return anv_lookup_entrypoint(pName);
487 }
488
489 static VkResult
490 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
491 {
492 queue->device = device;
493 queue->pool = &device->surface_state_pool;
494
495 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
496 if (queue->completed_serial.map == NULL)
497 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
498
499 *(uint32_t *)queue->completed_serial.map = 0;
500 queue->next_serial = 1;
501
502 return VK_SUCCESS;
503 }
504
505 static void
506 anv_queue_finish(struct anv_queue *queue)
507 {
508 #ifdef HAVE_VALGRIND
509 /* This gets torn down with the device so we only need to do this if
510 * valgrind is present.
511 */
512 anv_state_pool_free(queue->pool, queue->completed_serial);
513 #endif
514 }
515
516 static void
517 anv_device_init_border_colors(struct anv_device *device)
518 {
519 static const VkClearColorValue border_colors[] = {
520 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 0.0 } },
521 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 1.0 } },
522 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .f32 = { 1.0, 1.0, 1.0, 1.0 } },
523 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .u32 = { 0, 0, 0, 0 } },
524 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .u32 = { 0, 0, 0, 1 } },
525 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .u32 = { 1, 1, 1, 1 } },
526 };
527
528 device->border_colors =
529 anv_state_pool_alloc(&device->dynamic_state_pool,
530 sizeof(border_colors), 32);
531 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
532 }
533
534 VkResult anv_CreateDevice(
535 VkPhysicalDevice physicalDevice,
536 const VkDeviceCreateInfo* pCreateInfo,
537 VkDevice* pDevice)
538 {
539 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
540 struct anv_instance *instance = physical_device->instance;
541 struct anv_device *device;
542
543 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
544
545 switch (physical_device->info->gen) {
546 case 7:
547 driver_layer = &gen7_layer;
548 break;
549 case 8:
550 driver_layer = &gen8_layer;
551 break;
552 }
553
554 device = anv_instance_alloc(instance, sizeof(*device), 8,
555 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
556 if (!device)
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
558
559 device->instance = physical_device->instance;
560
561 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
562 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
563 if (device->fd == -1)
564 goto fail_device;
565
566 device->context_id = anv_gem_create_context(device);
567 if (device->context_id == -1)
568 goto fail_fd;
569
570 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
571
572 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
573
574 anv_state_pool_init(&device->dynamic_state_pool,
575 &device->dynamic_state_block_pool);
576
577 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
578 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
579
580 anv_state_pool_init(&device->surface_state_pool,
581 &device->surface_state_block_pool);
582
583 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
584
585 device->info = *physical_device->info;
586
587 device->compiler = anv_compiler_create(device);
588
589 pthread_mutex_init(&device->mutex, NULL);
590
591 anv_queue_init(device, &device->queue);
592
593 anv_device_init_meta(device);
594
595 anv_device_init_border_colors(device);
596
597 *pDevice = anv_device_to_handle(device);
598
599 return VK_SUCCESS;
600
601 fail_fd:
602 close(device->fd);
603 fail_device:
604 anv_device_free(device, device);
605
606 return vk_error(VK_ERROR_UNAVAILABLE);
607 }
608
609 VkResult anv_DestroyDevice(
610 VkDevice _device)
611 {
612 ANV_FROM_HANDLE(anv_device, device, _device);
613
614 anv_compiler_destroy(device->compiler);
615
616 anv_queue_finish(&device->queue);
617
618 anv_device_finish_meta(device);
619
620 #ifdef HAVE_VALGRIND
621 /* We only need to free these to prevent valgrind errors. The backing
622 * BO will go away in a couple of lines so we don't actually leak.
623 */
624 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
625 #endif
626
627 anv_bo_pool_finish(&device->batch_bo_pool);
628 anv_state_pool_finish(&device->dynamic_state_pool);
629 anv_block_pool_finish(&device->dynamic_state_block_pool);
630 anv_block_pool_finish(&device->instruction_block_pool);
631 anv_state_pool_finish(&device->surface_state_pool);
632 anv_block_pool_finish(&device->surface_state_block_pool);
633 anv_block_pool_finish(&device->scratch_block_pool);
634
635 close(device->fd);
636
637 anv_instance_free(device->instance, device);
638
639 return VK_SUCCESS;
640 }
641
642 static const VkExtensionProperties global_extensions[] = {
643 {
644 .extName = "VK_WSI_LunarG",
645 .specVersion = 3
646 }
647 };
648
649 VkResult anv_GetGlobalExtensionProperties(
650 const char* pLayerName,
651 uint32_t* pCount,
652 VkExtensionProperties* pProperties)
653 {
654 if (pProperties == NULL) {
655 *pCount = ARRAY_SIZE(global_extensions);
656 return VK_SUCCESS;
657 }
658
659 assert(*pCount <= ARRAY_SIZE(global_extensions));
660
661 *pCount = ARRAY_SIZE(global_extensions);
662 memcpy(pProperties, global_extensions, sizeof(global_extensions));
663
664 return VK_SUCCESS;
665 }
666
667 VkResult anv_GetPhysicalDeviceExtensionProperties(
668 VkPhysicalDevice physicalDevice,
669 const char* pLayerName,
670 uint32_t* pCount,
671 VkExtensionProperties* pProperties)
672 {
673 if (pProperties == NULL) {
674 *pCount = 0;
675 return VK_SUCCESS;
676 }
677
678 /* None supported at this time */
679 return vk_error(VK_ERROR_INVALID_EXTENSION);
680 }
681
682 VkResult anv_GetGlobalLayerProperties(
683 uint32_t* pCount,
684 VkLayerProperties* pProperties)
685 {
686 if (pProperties == NULL) {
687 *pCount = 0;
688 return VK_SUCCESS;
689 }
690
691 /* None supported at this time */
692 return vk_error(VK_ERROR_INVALID_LAYER);
693 }
694
695 VkResult anv_GetPhysicalDeviceLayerProperties(
696 VkPhysicalDevice physicalDevice,
697 uint32_t* pCount,
698 VkLayerProperties* pProperties)
699 {
700 if (pProperties == NULL) {
701 *pCount = 0;
702 return VK_SUCCESS;
703 }
704
705 /* None supported at this time */
706 return vk_error(VK_ERROR_INVALID_LAYER);
707 }
708
709 VkResult anv_GetDeviceQueue(
710 VkDevice _device,
711 uint32_t queueNodeIndex,
712 uint32_t queueIndex,
713 VkQueue* pQueue)
714 {
715 ANV_FROM_HANDLE(anv_device, device, _device);
716
717 assert(queueIndex == 0);
718
719 *pQueue = anv_queue_to_handle(&device->queue);
720
721 return VK_SUCCESS;
722 }
723
724 VkResult anv_QueueSubmit(
725 VkQueue _queue,
726 uint32_t cmdBufferCount,
727 const VkCmdBuffer* pCmdBuffers,
728 VkFence _fence)
729 {
730 ANV_FROM_HANDLE(anv_queue, queue, _queue);
731 ANV_FROM_HANDLE(anv_fence, fence, _fence);
732 struct anv_device *device = queue->device;
733 int ret;
734
735 for (uint32_t i = 0; i < cmdBufferCount; i++) {
736 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
737
738 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
739
740 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
741 if (ret != 0)
742 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
743
744 if (fence) {
745 ret = anv_gem_execbuffer(device, &fence->execbuf);
746 if (ret != 0)
747 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
748 }
749
750 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
751 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
752 }
753
754 return VK_SUCCESS;
755 }
756
757 VkResult anv_QueueWaitIdle(
758 VkQueue _queue)
759 {
760 ANV_FROM_HANDLE(anv_queue, queue, _queue);
761
762 return vkDeviceWaitIdle(anv_device_to_handle(queue->device));
763 }
764
765 VkResult anv_DeviceWaitIdle(
766 VkDevice _device)
767 {
768 ANV_FROM_HANDLE(anv_device, device, _device);
769 struct anv_state state;
770 struct anv_batch batch;
771 struct drm_i915_gem_execbuffer2 execbuf;
772 struct drm_i915_gem_exec_object2 exec2_objects[1];
773 struct anv_bo *bo = NULL;
774 VkResult result;
775 int64_t timeout;
776 int ret;
777
778 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
779 bo = &device->dynamic_state_pool.block_pool->bo;
780 batch.start = batch.next = state.map;
781 batch.end = state.map + 32;
782 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
783 anv_batch_emit(&batch, GEN7_MI_NOOP);
784
785 exec2_objects[0].handle = bo->gem_handle;
786 exec2_objects[0].relocation_count = 0;
787 exec2_objects[0].relocs_ptr = 0;
788 exec2_objects[0].alignment = 0;
789 exec2_objects[0].offset = bo->offset;
790 exec2_objects[0].flags = 0;
791 exec2_objects[0].rsvd1 = 0;
792 exec2_objects[0].rsvd2 = 0;
793
794 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
795 execbuf.buffer_count = 1;
796 execbuf.batch_start_offset = state.offset;
797 execbuf.batch_len = batch.next - state.map;
798 execbuf.cliprects_ptr = 0;
799 execbuf.num_cliprects = 0;
800 execbuf.DR1 = 0;
801 execbuf.DR4 = 0;
802
803 execbuf.flags =
804 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
805 execbuf.rsvd1 = device->context_id;
806 execbuf.rsvd2 = 0;
807
808 ret = anv_gem_execbuffer(device, &execbuf);
809 if (ret != 0) {
810 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
811 goto fail;
812 }
813
814 timeout = INT64_MAX;
815 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
816 if (ret != 0) {
817 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
818 goto fail;
819 }
820
821 anv_state_pool_free(&device->dynamic_state_pool, state);
822
823 return VK_SUCCESS;
824
825 fail:
826 anv_state_pool_free(&device->dynamic_state_pool, state);
827
828 return result;
829 }
830
831 void *
832 anv_device_alloc(struct anv_device * device,
833 size_t size,
834 size_t alignment,
835 VkSystemAllocType allocType)
836 {
837 return anv_instance_alloc(device->instance, size, alignment, allocType);
838 }
839
840 void
841 anv_device_free(struct anv_device * device,
842 void * mem)
843 {
844 anv_instance_free(device->instance, mem);
845 }
846
847 VkResult
848 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
849 {
850 bo->gem_handle = anv_gem_create(device, size);
851 if (!bo->gem_handle)
852 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
853
854 bo->map = NULL;
855 bo->index = 0;
856 bo->offset = 0;
857 bo->size = size;
858
859 return VK_SUCCESS;
860 }
861
862 VkResult anv_AllocMemory(
863 VkDevice _device,
864 const VkMemoryAllocInfo* pAllocInfo,
865 VkDeviceMemory* pMem)
866 {
867 ANV_FROM_HANDLE(anv_device, device, _device);
868 struct anv_device_memory *mem;
869 VkResult result;
870
871 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
872
873 if (pAllocInfo->memoryTypeIndex != 0) {
874 /* We support exactly one memory heap. */
875 return vk_error(VK_ERROR_INVALID_VALUE);
876 }
877
878 /* FINISHME: Fail if allocation request exceeds heap size. */
879
880 mem = anv_device_alloc(device, sizeof(*mem), 8,
881 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
882 if (mem == NULL)
883 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
884
885 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
886 if (result != VK_SUCCESS)
887 goto fail;
888
889 *pMem = anv_device_memory_to_handle(mem);
890
891 return VK_SUCCESS;
892
893 fail:
894 anv_device_free(device, mem);
895
896 return result;
897 }
898
899 VkResult anv_FreeMemory(
900 VkDevice _device,
901 VkDeviceMemory _mem)
902 {
903 ANV_FROM_HANDLE(anv_device, device, _device);
904 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
905
906 if (mem->bo.map)
907 anv_gem_munmap(mem->bo.map, mem->bo.size);
908
909 if (mem->bo.gem_handle != 0)
910 anv_gem_close(device, mem->bo.gem_handle);
911
912 anv_device_free(device, mem);
913
914 return VK_SUCCESS;
915 }
916
917 VkResult anv_MapMemory(
918 VkDevice _device,
919 VkDeviceMemory _mem,
920 VkDeviceSize offset,
921 VkDeviceSize size,
922 VkMemoryMapFlags flags,
923 void** ppData)
924 {
925 ANV_FROM_HANDLE(anv_device, device, _device);
926 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
927
928 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
929 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
930 * at a time is valid. We could just mmap up front and return an offset
931 * pointer here, but that may exhaust virtual memory on 32 bit
932 * userspace. */
933
934 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
935 mem->map_size = size;
936
937 *ppData = mem->map;
938
939 return VK_SUCCESS;
940 }
941
942 VkResult anv_UnmapMemory(
943 VkDevice _device,
944 VkDeviceMemory _mem)
945 {
946 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
947
948 anv_gem_munmap(mem->map, mem->map_size);
949
950 return VK_SUCCESS;
951 }
952
953 VkResult anv_FlushMappedMemoryRanges(
954 VkDevice device,
955 uint32_t memRangeCount,
956 const VkMappedMemoryRange* pMemRanges)
957 {
958 /* clflush here for !llc platforms */
959
960 return VK_SUCCESS;
961 }
962
963 VkResult anv_InvalidateMappedMemoryRanges(
964 VkDevice device,
965 uint32_t memRangeCount,
966 const VkMappedMemoryRange* pMemRanges)
967 {
968 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
969 }
970
971 VkResult anv_GetBufferMemoryRequirements(
972 VkDevice device,
973 VkBuffer _buffer,
974 VkMemoryRequirements* pMemoryRequirements)
975 {
976 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
977
978 /* The Vulkan spec (git aaed022) says:
979 *
980 * memoryTypeBits is a bitfield and contains one bit set for every
981 * supported memory type for the resource. The bit `1<<i` is set if and
982 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
983 * structure for the physical device is supported.
984 *
985 * We support exactly one memory type.
986 */
987 pMemoryRequirements->memoryTypeBits = 1;
988
989 pMemoryRequirements->size = buffer->size;
990 pMemoryRequirements->alignment = 16;
991
992 return VK_SUCCESS;
993 }
994
995 VkResult anv_GetImageMemoryRequirements(
996 VkDevice device,
997 VkImage _image,
998 VkMemoryRequirements* pMemoryRequirements)
999 {
1000 ANV_FROM_HANDLE(anv_image, image, _image);
1001
1002 /* The Vulkan spec (git aaed022) says:
1003 *
1004 * memoryTypeBits is a bitfield and contains one bit set for every
1005 * supported memory type for the resource. The bit `1<<i` is set if and
1006 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1007 * structure for the physical device is supported.
1008 *
1009 * We support exactly one memory type.
1010 */
1011 pMemoryRequirements->memoryTypeBits = 1;
1012
1013 pMemoryRequirements->size = image->size;
1014 pMemoryRequirements->alignment = image->alignment;
1015
1016 return VK_SUCCESS;
1017 }
1018
1019 VkResult anv_GetImageSparseMemoryRequirements(
1020 VkDevice device,
1021 VkImage image,
1022 uint32_t* pNumRequirements,
1023 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1024 {
1025 return vk_error(VK_UNSUPPORTED);
1026 }
1027
1028 VkResult anv_GetDeviceMemoryCommitment(
1029 VkDevice device,
1030 VkDeviceMemory memory,
1031 VkDeviceSize* pCommittedMemoryInBytes)
1032 {
1033 *pCommittedMemoryInBytes = 0;
1034 stub_return(VK_SUCCESS);
1035 }
1036
1037 VkResult anv_BindBufferMemory(
1038 VkDevice device,
1039 VkBuffer _buffer,
1040 VkDeviceMemory _mem,
1041 VkDeviceSize memOffset)
1042 {
1043 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1044 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1045
1046 buffer->bo = &mem->bo;
1047 buffer->offset = memOffset;
1048
1049 return VK_SUCCESS;
1050 }
1051
1052 VkResult anv_BindImageMemory(
1053 VkDevice device,
1054 VkImage _image,
1055 VkDeviceMemory _mem,
1056 VkDeviceSize memOffset)
1057 {
1058 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1059 ANV_FROM_HANDLE(anv_image, image, _image);
1060
1061 image->bo = &mem->bo;
1062 image->offset = memOffset;
1063
1064 return VK_SUCCESS;
1065 }
1066
1067 VkResult anv_QueueBindSparseBufferMemory(
1068 VkQueue queue,
1069 VkBuffer buffer,
1070 uint32_t numBindings,
1071 const VkSparseMemoryBindInfo* pBindInfo)
1072 {
1073 stub_return(VK_UNSUPPORTED);
1074 }
1075
1076 VkResult anv_QueueBindSparseImageOpaqueMemory(
1077 VkQueue queue,
1078 VkImage image,
1079 uint32_t numBindings,
1080 const VkSparseMemoryBindInfo* pBindInfo)
1081 {
1082 stub_return(VK_UNSUPPORTED);
1083 }
1084
1085 VkResult anv_QueueBindSparseImageMemory(
1086 VkQueue queue,
1087 VkImage image,
1088 uint32_t numBindings,
1089 const VkSparseImageMemoryBindInfo* pBindInfo)
1090 {
1091 stub_return(VK_UNSUPPORTED);
1092 }
1093
1094 VkResult anv_CreateFence(
1095 VkDevice _device,
1096 const VkFenceCreateInfo* pCreateInfo,
1097 VkFence* pFence)
1098 {
1099 ANV_FROM_HANDLE(anv_device, device, _device);
1100 struct anv_fence *fence;
1101 struct anv_batch batch;
1102 VkResult result;
1103
1104 const uint32_t fence_size = 128;
1105
1106 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1107
1108 fence = anv_device_alloc(device, sizeof(*fence), 8,
1109 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1110 if (fence == NULL)
1111 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1112
1113 result = anv_bo_init_new(&fence->bo, device, fence_size);
1114 if (result != VK_SUCCESS)
1115 goto fail;
1116
1117 fence->bo.map =
1118 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1119 batch.next = batch.start = fence->bo.map;
1120 batch.end = fence->bo.map + fence->bo.size;
1121 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1122 anv_batch_emit(&batch, GEN7_MI_NOOP);
1123
1124 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1125 fence->exec2_objects[0].relocation_count = 0;
1126 fence->exec2_objects[0].relocs_ptr = 0;
1127 fence->exec2_objects[0].alignment = 0;
1128 fence->exec2_objects[0].offset = fence->bo.offset;
1129 fence->exec2_objects[0].flags = 0;
1130 fence->exec2_objects[0].rsvd1 = 0;
1131 fence->exec2_objects[0].rsvd2 = 0;
1132
1133 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1134 fence->execbuf.buffer_count = 1;
1135 fence->execbuf.batch_start_offset = 0;
1136 fence->execbuf.batch_len = batch.next - fence->bo.map;
1137 fence->execbuf.cliprects_ptr = 0;
1138 fence->execbuf.num_cliprects = 0;
1139 fence->execbuf.DR1 = 0;
1140 fence->execbuf.DR4 = 0;
1141
1142 fence->execbuf.flags =
1143 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1144 fence->execbuf.rsvd1 = device->context_id;
1145 fence->execbuf.rsvd2 = 0;
1146
1147 *pFence = anv_fence_to_handle(fence);
1148
1149 return VK_SUCCESS;
1150
1151 fail:
1152 anv_device_free(device, fence);
1153
1154 return result;
1155 }
1156
1157 VkResult anv_DestroyFence(
1158 VkDevice _device,
1159 VkFence _fence)
1160 {
1161 ANV_FROM_HANDLE(anv_device, device, _device);
1162 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1163
1164 anv_gem_munmap(fence->bo.map, fence->bo.size);
1165 anv_gem_close(device, fence->bo.gem_handle);
1166 anv_device_free(device, fence);
1167
1168 return VK_SUCCESS;
1169 }
1170
1171 VkResult anv_ResetFences(
1172 VkDevice _device,
1173 uint32_t fenceCount,
1174 const VkFence* pFences)
1175 {
1176 for (uint32_t i = 0; i < fenceCount; i++) {
1177 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1178 fence->ready = false;
1179 }
1180
1181 return VK_SUCCESS;
1182 }
1183
1184 VkResult anv_GetFenceStatus(
1185 VkDevice _device,
1186 VkFence _fence)
1187 {
1188 ANV_FROM_HANDLE(anv_device, device, _device);
1189 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1190 int64_t t = 0;
1191 int ret;
1192
1193 if (fence->ready)
1194 return VK_SUCCESS;
1195
1196 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1197 if (ret == 0) {
1198 fence->ready = true;
1199 return VK_SUCCESS;
1200 }
1201
1202 return VK_NOT_READY;
1203 }
1204
1205 VkResult anv_WaitForFences(
1206 VkDevice _device,
1207 uint32_t fenceCount,
1208 const VkFence* pFences,
1209 VkBool32 waitAll,
1210 uint64_t timeout)
1211 {
1212 ANV_FROM_HANDLE(anv_device, device, _device);
1213 int64_t t = timeout;
1214 int ret;
1215
1216 /* FIXME: handle !waitAll */
1217
1218 for (uint32_t i = 0; i < fenceCount; i++) {
1219 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1220 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1221 if (ret == -1 && errno == ETIME)
1222 return VK_TIMEOUT;
1223 else if (ret == -1)
1224 return vk_errorf(VK_ERROR_UNKNOWN, "gem wait failed: %m");
1225 }
1226
1227 return VK_SUCCESS;
1228 }
1229
1230 // Queue semaphore functions
1231
1232 VkResult anv_CreateSemaphore(
1233 VkDevice device,
1234 const VkSemaphoreCreateInfo* pCreateInfo,
1235 VkSemaphore* pSemaphore)
1236 {
1237 stub_return(VK_UNSUPPORTED);
1238 }
1239
1240 VkResult anv_DestroySemaphore(
1241 VkDevice device,
1242 VkSemaphore semaphore)
1243 {
1244 stub_return(VK_UNSUPPORTED);
1245 }
1246
1247 VkResult anv_QueueSignalSemaphore(
1248 VkQueue queue,
1249 VkSemaphore semaphore)
1250 {
1251 stub_return(VK_UNSUPPORTED);
1252 }
1253
1254 VkResult anv_QueueWaitSemaphore(
1255 VkQueue queue,
1256 VkSemaphore semaphore)
1257 {
1258 stub_return(VK_UNSUPPORTED);
1259 }
1260
1261 // Event functions
1262
1263 VkResult anv_CreateEvent(
1264 VkDevice device,
1265 const VkEventCreateInfo* pCreateInfo,
1266 VkEvent* pEvent)
1267 {
1268 stub_return(VK_UNSUPPORTED);
1269 }
1270
1271 VkResult anv_DestroyEvent(
1272 VkDevice device,
1273 VkEvent event)
1274 {
1275 stub_return(VK_UNSUPPORTED);
1276 }
1277
1278 VkResult anv_GetEventStatus(
1279 VkDevice device,
1280 VkEvent event)
1281 {
1282 stub_return(VK_UNSUPPORTED);
1283 }
1284
1285 VkResult anv_SetEvent(
1286 VkDevice device,
1287 VkEvent event)
1288 {
1289 stub_return(VK_UNSUPPORTED);
1290 }
1291
1292 VkResult anv_ResetEvent(
1293 VkDevice device,
1294 VkEvent event)
1295 {
1296 stub_return(VK_UNSUPPORTED);
1297 }
1298
1299 // Buffer functions
1300
1301 VkResult anv_CreateBuffer(
1302 VkDevice _device,
1303 const VkBufferCreateInfo* pCreateInfo,
1304 VkBuffer* pBuffer)
1305 {
1306 ANV_FROM_HANDLE(anv_device, device, _device);
1307 struct anv_buffer *buffer;
1308
1309 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1310
1311 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1312 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1313 if (buffer == NULL)
1314 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1315
1316 buffer->size = pCreateInfo->size;
1317 buffer->bo = NULL;
1318 buffer->offset = 0;
1319
1320 *pBuffer = anv_buffer_to_handle(buffer);
1321
1322 return VK_SUCCESS;
1323 }
1324
1325 VkResult anv_DestroyBuffer(
1326 VkDevice _device,
1327 VkBuffer _buffer)
1328 {
1329 ANV_FROM_HANDLE(anv_device, device, _device);
1330 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1331
1332 anv_device_free(device, buffer);
1333
1334 return VK_SUCCESS;
1335 }
1336
1337 void
1338 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1339 const struct anv_format *format,
1340 uint32_t offset, uint32_t range)
1341 {
1342 switch (device->info.gen) {
1343 case 7:
1344 gen7_fill_buffer_surface_state(state, format, offset, range);
1345 break;
1346 case 8:
1347 gen8_fill_buffer_surface_state(state, format, offset, range);
1348 break;
1349 default:
1350 unreachable("unsupported gen\n");
1351 }
1352 }
1353
1354 VkResult
1355 anv_buffer_view_create(
1356 struct anv_device * device,
1357 const VkBufferViewCreateInfo* pCreateInfo,
1358 struct anv_buffer_view ** view_out)
1359 {
1360 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1361 struct anv_buffer_view *view;
1362
1363 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1364
1365 view = anv_device_alloc(device, sizeof(*view), 8,
1366 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1367 if (view == NULL)
1368 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1369
1370 view->view = (struct anv_surface_view) {
1371 .bo = buffer->bo,
1372 .offset = buffer->offset + pCreateInfo->offset,
1373 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1374 .format = anv_format_for_vk_format(pCreateInfo->format),
1375 .range = pCreateInfo->range,
1376 };
1377
1378 *view_out = view;
1379
1380 return VK_SUCCESS;
1381 }
1382
1383 VkResult anv_DestroyBufferView(
1384 VkDevice _device,
1385 VkBufferView _bview)
1386 {
1387 ANV_FROM_HANDLE(anv_device, device, _device);
1388 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1389
1390 anv_surface_view_fini(device, &bview->view);
1391 anv_device_free(device, bview);
1392
1393 return VK_SUCCESS;
1394 }
1395
1396 VkResult anv_DestroySampler(
1397 VkDevice _device,
1398 VkSampler _sampler)
1399 {
1400 ANV_FROM_HANDLE(anv_device, device, _device);
1401 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1402
1403 anv_device_free(device, sampler);
1404
1405 return VK_SUCCESS;
1406 }
1407
1408 // Descriptor set functions
1409
1410 VkResult anv_CreateDescriptorSetLayout(
1411 VkDevice _device,
1412 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1413 VkDescriptorSetLayout* pSetLayout)
1414 {
1415 ANV_FROM_HANDLE(anv_device, device, _device);
1416 struct anv_descriptor_set_layout *set_layout;
1417
1418 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1419
1420 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1421 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1422 uint32_t num_dynamic_buffers = 0;
1423 uint32_t count = 0;
1424 uint32_t stages = 0;
1425 uint32_t s;
1426
1427 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1428 switch (pCreateInfo->pBinding[i].descriptorType) {
1429 case VK_DESCRIPTOR_TYPE_SAMPLER:
1430 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1431 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1432 sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
1433 break;
1434 default:
1435 break;
1436 }
1437
1438 switch (pCreateInfo->pBinding[i].descriptorType) {
1439 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1440 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1441 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1442 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1443 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1444 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1445 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1446 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1447 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1448 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1449 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1450 surface_count[s] += pCreateInfo->pBinding[i].arraySize;
1451 break;
1452 default:
1453 break;
1454 }
1455
1456 switch (pCreateInfo->pBinding[i].descriptorType) {
1457 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1458 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1459 num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
1460 break;
1461 default:
1462 break;
1463 }
1464
1465 stages |= pCreateInfo->pBinding[i].stageFlags;
1466 count += pCreateInfo->pBinding[i].arraySize;
1467 }
1468
1469 uint32_t sampler_total = 0;
1470 uint32_t surface_total = 0;
1471 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1472 sampler_total += sampler_count[s];
1473 surface_total += surface_count[s];
1474 }
1475
1476 size_t size = sizeof(*set_layout) +
1477 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1478 set_layout = anv_device_alloc(device, size, 8,
1479 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1480 if (!set_layout)
1481 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1482
1483 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1484 set_layout->count = count;
1485 set_layout->shader_stages = stages;
1486
1487 struct anv_descriptor_slot *p = set_layout->entries;
1488 struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
1489 struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
1490 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1491 set_layout->stage[s].surface_count = surface_count[s];
1492 set_layout->stage[s].surface_start = surface[s] = p;
1493 p += surface_count[s];
1494 set_layout->stage[s].sampler_count = sampler_count[s];
1495 set_layout->stage[s].sampler_start = sampler[s] = p;
1496 p += sampler_count[s];
1497 }
1498
1499 uint32_t descriptor = 0;
1500 int8_t dynamic_slot = 0;
1501 bool is_dynamic;
1502 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1503 switch (pCreateInfo->pBinding[i].descriptorType) {
1504 case VK_DESCRIPTOR_TYPE_SAMPLER:
1505 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1506 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1507 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1508 sampler[s]->index = descriptor + j;
1509 sampler[s]->dynamic_slot = -1;
1510 sampler[s]++;
1511 }
1512 break;
1513 default:
1514 break;
1515 }
1516
1517 switch (pCreateInfo->pBinding[i].descriptorType) {
1518 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1519 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1520 is_dynamic = true;
1521 break;
1522 default:
1523 is_dynamic = false;
1524 break;
1525 }
1526
1527 switch (pCreateInfo->pBinding[i].descriptorType) {
1528 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1529 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1530 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1531 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1532 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1533 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1534 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1535 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1536 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1537 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1538 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1539 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1540 surface[s]->index = descriptor + j;
1541 if (is_dynamic)
1542 surface[s]->dynamic_slot = dynamic_slot + j;
1543 else
1544 surface[s]->dynamic_slot = -1;
1545 surface[s]++;
1546 }
1547 break;
1548 default:
1549 break;
1550 }
1551
1552 if (is_dynamic)
1553 dynamic_slot += pCreateInfo->pBinding[i].arraySize;
1554
1555 descriptor += pCreateInfo->pBinding[i].arraySize;
1556 }
1557
1558 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1559
1560 return VK_SUCCESS;
1561 }
1562
1563 VkResult anv_DestroyDescriptorSetLayout(
1564 VkDevice _device,
1565 VkDescriptorSetLayout _set_layout)
1566 {
1567 ANV_FROM_HANDLE(anv_device, device, _device);
1568 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1569
1570 anv_device_free(device, set_layout);
1571
1572 return VK_SUCCESS;
1573 }
1574
1575 VkResult anv_CreateDescriptorPool(
1576 VkDevice device,
1577 VkDescriptorPoolUsage poolUsage,
1578 uint32_t maxSets,
1579 const VkDescriptorPoolCreateInfo* pCreateInfo,
1580 VkDescriptorPool* pDescriptorPool)
1581 {
1582 anv_finishme("VkDescriptorPool is a stub");
1583 pDescriptorPool->handle = 1;
1584 return VK_SUCCESS;
1585 }
1586
1587 VkResult anv_DestroyDescriptorPool(
1588 VkDevice _device,
1589 VkDescriptorPool _pool)
1590 {
1591 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1592 return VK_SUCCESS;
1593 }
1594
1595 VkResult anv_ResetDescriptorPool(
1596 VkDevice device,
1597 VkDescriptorPool descriptorPool)
1598 {
1599 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1600 return VK_SUCCESS;
1601 }
1602
1603 VkResult
1604 anv_descriptor_set_create(struct anv_device *device,
1605 const struct anv_descriptor_set_layout *layout,
1606 struct anv_descriptor_set **out_set)
1607 {
1608 struct anv_descriptor_set *set;
1609 size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1610
1611 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1612 if (!set)
1613 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1614
1615 /* A descriptor set may not be 100% filled. Clear the set so we can can
1616 * later detect holes in it.
1617 */
1618 memset(set, 0, size);
1619
1620 *out_set = set;
1621
1622 return VK_SUCCESS;
1623 }
1624
1625 void
1626 anv_descriptor_set_destroy(struct anv_device *device,
1627 struct anv_descriptor_set *set)
1628 {
1629 anv_device_free(device, set);
1630 }
1631
1632 VkResult anv_AllocDescriptorSets(
1633 VkDevice _device,
1634 VkDescriptorPool descriptorPool,
1635 VkDescriptorSetUsage setUsage,
1636 uint32_t count,
1637 const VkDescriptorSetLayout* pSetLayouts,
1638 VkDescriptorSet* pDescriptorSets,
1639 uint32_t* pCount)
1640 {
1641 ANV_FROM_HANDLE(anv_device, device, _device);
1642
1643 VkResult result;
1644 struct anv_descriptor_set *set;
1645
1646 for (uint32_t i = 0; i < count; i++) {
1647 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1648
1649 result = anv_descriptor_set_create(device, layout, &set);
1650 if (result != VK_SUCCESS) {
1651 *pCount = i;
1652 return result;
1653 }
1654
1655 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1656 }
1657
1658 *pCount = count;
1659
1660 return VK_SUCCESS;
1661 }
1662
1663 VkResult anv_FreeDescriptorSets(
1664 VkDevice _device,
1665 VkDescriptorPool descriptorPool,
1666 uint32_t count,
1667 const VkDescriptorSet* pDescriptorSets)
1668 {
1669 ANV_FROM_HANDLE(anv_device, device, _device);
1670
1671 for (uint32_t i = 0; i < count; i++) {
1672 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1673
1674 anv_descriptor_set_destroy(device, set);
1675 }
1676
1677 return VK_SUCCESS;
1678 }
1679
1680 VkResult anv_UpdateDescriptorSets(
1681 VkDevice device,
1682 uint32_t writeCount,
1683 const VkWriteDescriptorSet* pDescriptorWrites,
1684 uint32_t copyCount,
1685 const VkCopyDescriptorSet* pDescriptorCopies)
1686 {
1687 for (uint32_t i = 0; i < writeCount; i++) {
1688 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1689 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1690
1691 switch (write->descriptorType) {
1692 case VK_DESCRIPTOR_TYPE_SAMPLER:
1693 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1694 for (uint32_t j = 0; j < write->count; j++) {
1695 set->descriptors[write->destBinding + j].sampler =
1696 anv_sampler_from_handle(write->pDescriptors[j].sampler);
1697 }
1698
1699 if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
1700 break;
1701
1702 /* fallthrough */
1703
1704 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1705 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1706 for (uint32_t j = 0; j < write->count; j++) {
1707 ANV_FROM_HANDLE(anv_image_view, iview,
1708 write->pDescriptors[j].imageView);
1709 set->descriptors[write->destBinding + j].view = &iview->view;
1710 }
1711 break;
1712
1713 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1714 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1715 anv_finishme("texel buffers not implemented");
1716 break;
1717
1718 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1719 anv_finishme("input attachments not implemented");
1720 break;
1721
1722 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1723 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1724 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1725 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1726 for (uint32_t j = 0; j < write->count; j++) {
1727 ANV_FROM_HANDLE(anv_buffer_view, bview,
1728 write->pDescriptors[j].bufferView);
1729 set->descriptors[write->destBinding + j].view = &bview->view;
1730 }
1731
1732 default:
1733 break;
1734 }
1735 }
1736
1737 for (uint32_t i = 0; i < copyCount; i++) {
1738 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1739 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1740 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1741 for (uint32_t j = 0; j < copy->count; j++) {
1742 dest->descriptors[copy->destBinding + j] =
1743 src->descriptors[copy->srcBinding + j];
1744 }
1745 }
1746
1747 return VK_SUCCESS;
1748 }
1749
1750 // State object functions
1751
1752 static inline int64_t
1753 clamp_int64(int64_t x, int64_t min, int64_t max)
1754 {
1755 if (x < min)
1756 return min;
1757 else if (x < max)
1758 return x;
1759 else
1760 return max;
1761 }
1762
1763 VkResult anv_CreateDynamicViewportState(
1764 VkDevice _device,
1765 const VkDynamicViewportStateCreateInfo* pCreateInfo,
1766 VkDynamicViewportState* pState)
1767 {
1768 ANV_FROM_HANDLE(anv_device, device, _device);
1769 struct anv_dynamic_vp_state *state;
1770
1771 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO);
1772
1773 state = anv_device_alloc(device, sizeof(*state), 8,
1774 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1775 if (state == NULL)
1776 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1777
1778 unsigned count = pCreateInfo->viewportAndScissorCount;
1779 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1780 count * 64, 64);
1781 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1782 count * 8, 32);
1783 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
1784 count * 32, 32);
1785
1786 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1787 const VkViewport *vp = &pCreateInfo->pViewports[i];
1788 const VkRect2D *s = &pCreateInfo->pScissors[i];
1789
1790 /* The gen7 state struct has just the matrix and guardband fields, the
1791 * gen8 struct adds the min/max viewport fields. */
1792 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1793 .ViewportMatrixElementm00 = vp->width / 2,
1794 .ViewportMatrixElementm11 = vp->height / 2,
1795 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1796 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1797 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1798 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1799 .XMinClipGuardband = -1.0f,
1800 .XMaxClipGuardband = 1.0f,
1801 .YMinClipGuardband = -1.0f,
1802 .YMaxClipGuardband = 1.0f,
1803 .XMinViewPort = vp->originX,
1804 .XMaxViewPort = vp->originX + vp->width - 1,
1805 .YMinViewPort = vp->originY,
1806 .YMaxViewPort = vp->originY + vp->height - 1,
1807 };
1808
1809 struct GEN7_CC_VIEWPORT cc_viewport = {
1810 .MinimumDepth = vp->minDepth,
1811 .MaximumDepth = vp->maxDepth
1812 };
1813
1814 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1815 * ymax < ymin for empty clips. In case clip x, y, width height are all
1816 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1817 * what we want. Just special case empty clips and produce a canonical
1818 * empty clip. */
1819 static const struct GEN7_SCISSOR_RECT empty_scissor = {
1820 .ScissorRectangleYMin = 1,
1821 .ScissorRectangleXMin = 1,
1822 .ScissorRectangleYMax = 0,
1823 .ScissorRectangleXMax = 0
1824 };
1825
1826 const int max = 0xffff;
1827 struct GEN7_SCISSOR_RECT scissor = {
1828 /* Do this math using int64_t so overflow gets clamped correctly. */
1829 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1830 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1831 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1832 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1833 };
1834
1835 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1836 GEN7_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1837
1838 if (s->extent.width <= 0 || s->extent.height <= 0) {
1839 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1840 } else {
1841 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1842 }
1843 }
1844
1845 *pState = anv_dynamic_vp_state_to_handle(state);
1846
1847 return VK_SUCCESS;
1848 }
1849
1850 VkResult anv_DestroyDynamicViewportState(
1851 VkDevice _device,
1852 VkDynamicViewportState _vp_state)
1853 {
1854 ANV_FROM_HANDLE(anv_device, device, _device);
1855 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, _vp_state);
1856
1857 anv_state_pool_free(&device->dynamic_state_pool, vp_state->sf_clip_vp);
1858 anv_state_pool_free(&device->dynamic_state_pool, vp_state->cc_vp);
1859 anv_state_pool_free(&device->dynamic_state_pool, vp_state->scissor);
1860
1861 anv_device_free(device, vp_state);
1862
1863 return VK_SUCCESS;
1864 }
1865
1866 VkResult anv_DestroyDynamicRasterState(
1867 VkDevice _device,
1868 VkDynamicRasterState _rs_state)
1869 {
1870 ANV_FROM_HANDLE(anv_device, device, _device);
1871 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, _rs_state);
1872
1873 anv_device_free(device, rs_state);
1874
1875 return VK_SUCCESS;
1876 }
1877
1878 VkResult anv_CreateDynamicColorBlendState(
1879 VkDevice _device,
1880 const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
1881 VkDynamicColorBlendState* pState)
1882 {
1883 ANV_FROM_HANDLE(anv_device, device, _device);
1884 struct anv_dynamic_cb_state *state;
1885
1886 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO);
1887
1888 state = anv_device_alloc(device, sizeof(*state), 8,
1889 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1890 if (state == NULL)
1891 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1892
1893 struct GEN7_COLOR_CALC_STATE color_calc_state = {
1894 .BlendConstantColorRed = pCreateInfo->blendConst[0],
1895 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
1896 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
1897 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
1898 };
1899
1900 GEN7_COLOR_CALC_STATE_pack(NULL, state->color_calc_state, &color_calc_state);
1901
1902 *pState = anv_dynamic_cb_state_to_handle(state);
1903
1904 return VK_SUCCESS;
1905 }
1906
1907 VkResult anv_DestroyDynamicColorBlendState(
1908 VkDevice _device,
1909 VkDynamicColorBlendState _cb_state)
1910 {
1911 ANV_FROM_HANDLE(anv_device, device, _device);
1912 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, _cb_state);
1913
1914 anv_device_free(device, cb_state);
1915
1916 return VK_SUCCESS;
1917 }
1918
1919 VkResult anv_DestroyDynamicDepthStencilState(
1920 VkDevice _device,
1921 VkDynamicDepthStencilState _ds_state)
1922 {
1923 ANV_FROM_HANDLE(anv_device, device, _device);
1924 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, _ds_state);
1925
1926 anv_device_free(device, ds_state);
1927
1928 return VK_SUCCESS;
1929 }
1930
1931 VkResult anv_CreateFramebuffer(
1932 VkDevice _device,
1933 const VkFramebufferCreateInfo* pCreateInfo,
1934 VkFramebuffer* pFramebuffer)
1935 {
1936 ANV_FROM_HANDLE(anv_device, device, _device);
1937 struct anv_framebuffer *framebuffer;
1938
1939 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1940
1941 size_t size = sizeof(*framebuffer) +
1942 sizeof(struct anv_attachment_view *) * pCreateInfo->attachmentCount;
1943 framebuffer = anv_device_alloc(device, size, 8,
1944 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1945 if (framebuffer == NULL)
1946 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1947
1948 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1949 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1950 ANV_FROM_HANDLE(anv_attachment_view, view,
1951 pCreateInfo->pAttachments[i].view);
1952
1953 framebuffer->attachments[i] = view;
1954 }
1955
1956 framebuffer->width = pCreateInfo->width;
1957 framebuffer->height = pCreateInfo->height;
1958 framebuffer->layers = pCreateInfo->layers;
1959
1960 anv_CreateDynamicViewportState(anv_device_to_handle(device),
1961 &(VkDynamicViewportStateCreateInfo) {
1962 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
1963 .viewportAndScissorCount = 1,
1964 .pViewports = (VkViewport[]) {
1965 {
1966 .originX = 0,
1967 .originY = 0,
1968 .width = pCreateInfo->width,
1969 .height = pCreateInfo->height,
1970 .minDepth = 0,
1971 .maxDepth = 1
1972 },
1973 },
1974 .pScissors = (VkRect2D[]) {
1975 { { 0, 0 },
1976 { pCreateInfo->width, pCreateInfo->height } },
1977 }
1978 },
1979 &framebuffer->vp_state);
1980
1981 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1982
1983 return VK_SUCCESS;
1984 }
1985
1986 VkResult anv_DestroyFramebuffer(
1987 VkDevice _device,
1988 VkFramebuffer _fb)
1989 {
1990 ANV_FROM_HANDLE(anv_device, device, _device);
1991 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1992
1993 anv_DestroyDynamicViewportState(anv_device_to_handle(device),
1994 fb->vp_state);
1995 anv_device_free(device, fb);
1996
1997 return VK_SUCCESS;
1998 }
1999
2000 VkResult anv_CreateRenderPass(
2001 VkDevice _device,
2002 const VkRenderPassCreateInfo* pCreateInfo,
2003 VkRenderPass* pRenderPass)
2004 {
2005 ANV_FROM_HANDLE(anv_device, device, _device);
2006 struct anv_render_pass *pass;
2007 size_t size;
2008 size_t attachments_offset;
2009
2010 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2011
2012 size = sizeof(*pass);
2013 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
2014 attachments_offset = size;
2015 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
2016
2017 pass = anv_device_alloc(device, size, 8,
2018 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2019 if (pass == NULL)
2020 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2021
2022 /* Clear the subpasses along with the parent pass. This required because
2023 * each array member of anv_subpass must be a valid pointer if not NULL.
2024 */
2025 memset(pass, 0, size);
2026 pass->attachment_count = pCreateInfo->attachmentCount;
2027 pass->subpass_count = pCreateInfo->subpassCount;
2028 pass->attachments = (void *) pass + attachments_offset;
2029
2030 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2031 struct anv_render_pass_attachment *att = &pass->attachments[i];
2032
2033 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
2034 att->samples = pCreateInfo->pAttachments[i].samples;
2035 att->load_op = pCreateInfo->pAttachments[i].loadOp;
2036 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
2037 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2038 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2039
2040 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2041 if (anv_format_is_color(att->format)) {
2042 ++pass->num_color_clear_attachments;
2043 } else if (att->format->depth_format) {
2044 pass->has_depth_clear_attachment = true;
2045 }
2046 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2047 assert(att->format->has_stencil);
2048 pass->has_stencil_clear_attachment = true;
2049 }
2050 }
2051
2052 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
2053 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
2054 struct anv_subpass *subpass = &pass->subpasses[i];
2055
2056 subpass->input_count = desc->inputCount;
2057 subpass->color_count = desc->colorCount;
2058
2059 if (desc->inputCount > 0) {
2060 subpass->input_attachments =
2061 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
2062 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2063
2064 for (uint32_t j = 0; j < desc->inputCount; j++) {
2065 subpass->input_attachments[j]
2066 = desc->inputAttachments[j].attachment;
2067 }
2068 }
2069
2070 if (desc->colorCount > 0) {
2071 subpass->color_attachments =
2072 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2073 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2074
2075 for (uint32_t j = 0; j < desc->colorCount; j++) {
2076 subpass->color_attachments[j]
2077 = desc->colorAttachments[j].attachment;
2078 }
2079 }
2080
2081 if (desc->resolveAttachments) {
2082 subpass->resolve_attachments =
2083 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2084 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2085
2086 for (uint32_t j = 0; j < desc->colorCount; j++) {
2087 subpass->resolve_attachments[j]
2088 = desc->resolveAttachments[j].attachment;
2089 }
2090 }
2091
2092 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
2093 }
2094
2095 *pRenderPass = anv_render_pass_to_handle(pass);
2096
2097 return VK_SUCCESS;
2098 }
2099
2100 VkResult anv_DestroyRenderPass(
2101 VkDevice _device,
2102 VkRenderPass _pass)
2103 {
2104 ANV_FROM_HANDLE(anv_device, device, _device);
2105 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
2106
2107 for (uint32_t i = 0; i < pass->subpass_count; i++) {
2108 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2109 * Don't free the null arrays.
2110 */
2111 struct anv_subpass *subpass = &pass->subpasses[i];
2112
2113 anv_device_free(device, subpass->input_attachments);
2114 anv_device_free(device, subpass->color_attachments);
2115 anv_device_free(device, subpass->resolve_attachments);
2116 }
2117
2118 anv_device_free(device, pass);
2119
2120 return VK_SUCCESS;
2121 }
2122
2123 VkResult anv_GetRenderAreaGranularity(
2124 VkDevice device,
2125 VkRenderPass renderPass,
2126 VkExtent2D* pGranularity)
2127 {
2128 *pGranularity = (VkExtent2D) { 1, 1 };
2129
2130 return VK_SUCCESS;
2131 }
2132
2133 void vkCmdDbgMarkerBegin(
2134 VkCmdBuffer cmdBuffer,
2135 const char* pMarker)
2136 __attribute__ ((visibility ("default")));
2137
2138 void vkCmdDbgMarkerEnd(
2139 VkCmdBuffer cmdBuffer)
2140 __attribute__ ((visibility ("default")));
2141
2142 void vkCmdDbgMarkerBegin(
2143 VkCmdBuffer cmdBuffer,
2144 const char* pMarker)
2145 {
2146 }
2147
2148 void vkCmdDbgMarkerEnd(
2149 VkCmdBuffer cmdBuffer)
2150 {
2151 }