anv: Add a global dispatch table for use in meta operations
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 struct anv_dispatch_table dtable;
35
36 static VkResult
37 anv_physical_device_init(struct anv_physical_device *device,
38 struct anv_instance *instance,
39 const char *path)
40 {
41 VkResult result;
42 int fd;
43
44 fd = open(path, O_RDWR | O_CLOEXEC);
45 if (fd < 0)
46 return vk_errorf(VK_ERROR_UNAVAILABLE, "failed to open %s: %m", path);
47
48 device->instance = instance;
49 device->path = path;
50
51 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
52 if (!device->chipset_id) {
53 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get chipset id: %m");
54 goto fail;
55 }
56
57 device->name = brw_get_device_name(device->chipset_id);
58 device->info = brw_get_device_info(device->chipset_id, -1);
59 if (!device->info) {
60 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get device info");
61 goto fail;
62 }
63
64 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
65 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get aperture size: %m");
66 goto fail;
67 }
68
69 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
70 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing gem wait");
71 goto fail;
72 }
73
74 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
75 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing execbuf2");
76 goto fail;
77 }
78
79 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
80 result = vk_errorf(VK_ERROR_UNAVAILABLE, "non-llc gpu");
81 goto fail;
82 }
83
84 close(fd);
85
86 return VK_SUCCESS;
87
88 fail:
89 close(fd);
90 return result;
91 }
92
93 static void *default_alloc(
94 void* pUserData,
95 size_t size,
96 size_t alignment,
97 VkSystemAllocType allocType)
98 {
99 return malloc(size);
100 }
101
102 static void default_free(
103 void* pUserData,
104 void* pMem)
105 {
106 free(pMem);
107 }
108
109 static const VkAllocCallbacks default_alloc_callbacks = {
110 .pUserData = NULL,
111 .pfnAlloc = default_alloc,
112 .pfnFree = default_free
113 };
114
115 static const VkExtensionProperties global_extensions[] = {
116 {
117 .extName = "VK_WSI_swapchain",
118 .specVersion = 12
119 },
120 };
121
122 static const VkExtensionProperties device_extensions[] = {
123 {
124 .extName = "VK_WSI_device_swapchain",
125 .specVersion = 12
126 },
127 };
128
129
130 VkResult anv_CreateInstance(
131 const VkInstanceCreateInfo* pCreateInfo,
132 VkInstance* pInstance)
133 {
134 struct anv_instance *instance;
135 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
136 void *user_data = NULL;
137
138 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
139
140 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
141 bool found = false;
142 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
143 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
144 global_extensions[j].extName) == 0) {
145 found = true;
146 break;
147 }
148 }
149 if (!found)
150 return vk_error(VK_ERROR_INVALID_EXTENSION);
151 }
152
153 if (pCreateInfo->pAllocCb) {
154 alloc_callbacks = pCreateInfo->pAllocCb;
155 user_data = pCreateInfo->pAllocCb->pUserData;
156 }
157 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
158 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
159 if (!instance)
160 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
161
162 instance->pAllocUserData = alloc_callbacks->pUserData;
163 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
164 instance->pfnFree = alloc_callbacks->pfnFree;
165 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
166 instance->physicalDeviceCount = 0;
167
168 _mesa_locale_init();
169
170 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
171
172 anv_init_wsi(instance);
173
174 *pInstance = anv_instance_to_handle(instance);
175
176 return VK_SUCCESS;
177 }
178
179 VkResult anv_DestroyInstance(
180 VkInstance _instance)
181 {
182 ANV_FROM_HANDLE(anv_instance, instance, _instance);
183
184 anv_finish_wsi(instance);
185
186 VG(VALGRIND_DESTROY_MEMPOOL(instance));
187
188 _mesa_locale_fini();
189
190 instance->pfnFree(instance->pAllocUserData, instance);
191
192 return VK_SUCCESS;
193 }
194
195 void *
196 anv_instance_alloc(struct anv_instance *instance, size_t size,
197 size_t alignment, VkSystemAllocType allocType)
198 {
199 void *mem = instance->pfnAlloc(instance->pAllocUserData,
200 size, alignment, allocType);
201 if (mem) {
202 VALGRIND_MEMPOOL_ALLOC(instance, mem, size);
203 VALGRIND_MAKE_MEM_UNDEFINED(mem, size);
204 }
205 return mem;
206 }
207
208 void
209 anv_instance_free(struct anv_instance *instance, void *mem)
210 {
211 if (mem == NULL)
212 return;
213
214 VALGRIND_MEMPOOL_FREE(instance, mem);
215
216 instance->pfnFree(instance->pAllocUserData, mem);
217 }
218
219 VkResult anv_EnumeratePhysicalDevices(
220 VkInstance _instance,
221 uint32_t* pPhysicalDeviceCount,
222 VkPhysicalDevice* pPhysicalDevices)
223 {
224 ANV_FROM_HANDLE(anv_instance, instance, _instance);
225 VkResult result;
226
227 if (instance->physicalDeviceCount == 0) {
228 result = anv_physical_device_init(&instance->physicalDevice,
229 instance, "/dev/dri/renderD128");
230 if (result != VK_SUCCESS)
231 return result;
232
233 instance->physicalDeviceCount = 1;
234 }
235
236 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
237 * otherwise it's an inout parameter.
238 *
239 * The Vulkan spec (git aaed022) says:
240 *
241 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
242 * that is initialized with the number of devices the application is
243 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
244 * an array of at least this many VkPhysicalDevice handles [...].
245 *
246 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
247 * overwrites the contents of the variable pointed to by
248 * pPhysicalDeviceCount with the number of physical devices in in the
249 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
250 * pPhysicalDeviceCount with the number of physical handles written to
251 * pPhysicalDevices.
252 */
253 if (!pPhysicalDevices) {
254 *pPhysicalDeviceCount = instance->physicalDeviceCount;
255 } else if (*pPhysicalDeviceCount >= 1) {
256 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
257 *pPhysicalDeviceCount = 1;
258 } else {
259 *pPhysicalDeviceCount = 0;
260 }
261
262 return VK_SUCCESS;
263 }
264
265 VkResult anv_GetPhysicalDeviceFeatures(
266 VkPhysicalDevice physicalDevice,
267 VkPhysicalDeviceFeatures* pFeatures)
268 {
269 anv_finishme("Get correct values for PhysicalDeviceFeatures");
270
271 *pFeatures = (VkPhysicalDeviceFeatures) {
272 .robustBufferAccess = false,
273 .fullDrawIndexUint32 = false,
274 .imageCubeArray = false,
275 .independentBlend = false,
276 .geometryShader = true,
277 .tessellationShader = false,
278 .sampleRateShading = false,
279 .dualSourceBlend = true,
280 .logicOp = true,
281 .instancedDrawIndirect = true,
282 .depthClip = false,
283 .depthBiasClamp = false,
284 .fillModeNonSolid = true,
285 .depthBounds = false,
286 .wideLines = true,
287 .largePoints = true,
288 .textureCompressionETC2 = true,
289 .textureCompressionASTC_LDR = true,
290 .textureCompressionBC = true,
291 .pipelineStatisticsQuery = true,
292 .vertexSideEffects = false,
293 .tessellationSideEffects = false,
294 .geometrySideEffects = false,
295 .fragmentSideEffects = false,
296 .shaderTessellationPointSize = false,
297 .shaderGeometryPointSize = true,
298 .shaderTextureGatherExtended = true,
299 .shaderStorageImageExtendedFormats = false,
300 .shaderStorageImageMultisample = false,
301 .shaderStorageBufferArrayConstantIndexing = false,
302 .shaderStorageImageArrayConstantIndexing = false,
303 .shaderUniformBufferArrayDynamicIndexing = true,
304 .shaderSampledImageArrayDynamicIndexing = false,
305 .shaderStorageBufferArrayDynamicIndexing = false,
306 .shaderStorageImageArrayDynamicIndexing = false,
307 .shaderClipDistance = false,
308 .shaderCullDistance = false,
309 .shaderFloat64 = false,
310 .shaderInt64 = false,
311 .shaderFloat16 = false,
312 .shaderInt16 = false,
313 };
314
315 return VK_SUCCESS;
316 }
317
318 VkResult anv_GetPhysicalDeviceLimits(
319 VkPhysicalDevice physicalDevice,
320 VkPhysicalDeviceLimits* pLimits)
321 {
322 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
323 const struct brw_device_info *devinfo = physical_device->info;
324
325 anv_finishme("Get correct values for PhysicalDeviceLimits");
326
327 *pLimits = (VkPhysicalDeviceLimits) {
328 .maxImageDimension1D = (1 << 14),
329 .maxImageDimension2D = (1 << 14),
330 .maxImageDimension3D = (1 << 10),
331 .maxImageDimensionCube = (1 << 14),
332 .maxImageArrayLayers = (1 << 10),
333 .maxTexelBufferSize = (1 << 14),
334 .maxUniformBufferSize = UINT32_MAX,
335 .maxStorageBufferSize = UINT32_MAX,
336 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
337 .maxMemoryAllocationCount = UINT32_MAX,
338 .bufferImageGranularity = 64, /* A cache line */
339 .maxBoundDescriptorSets = MAX_SETS,
340 .maxDescriptorSets = UINT32_MAX,
341 .maxPerStageDescriptorSamplers = 64,
342 .maxPerStageDescriptorUniformBuffers = 64,
343 .maxPerStageDescriptorStorageBuffers = 64,
344 .maxPerStageDescriptorSampledImages = 64,
345 .maxPerStageDescriptorStorageImages = 64,
346 .maxDescriptorSetSamplers = 256,
347 .maxDescriptorSetUniformBuffers = 256,
348 .maxDescriptorSetStorageBuffers = 256,
349 .maxDescriptorSetSampledImages = 256,
350 .maxDescriptorSetStorageImages = 256,
351 .maxVertexInputAttributes = 32,
352 .maxVertexInputAttributeOffset = 256,
353 .maxVertexInputBindingStride = 256,
354 .maxVertexOutputComponents = 32,
355 .maxTessGenLevel = 0,
356 .maxTessPatchSize = 0,
357 .maxTessControlPerVertexInputComponents = 0,
358 .maxTessControlPerVertexOutputComponents = 0,
359 .maxTessControlPerPatchOutputComponents = 0,
360 .maxTessControlTotalOutputComponents = 0,
361 .maxTessEvaluationInputComponents = 0,
362 .maxTessEvaluationOutputComponents = 0,
363 .maxGeometryShaderInvocations = 6,
364 .maxGeometryInputComponents = 16,
365 .maxGeometryOutputComponents = 16,
366 .maxGeometryOutputVertices = 16,
367 .maxGeometryTotalOutputComponents = 16,
368 .maxFragmentInputComponents = 16,
369 .maxFragmentOutputBuffers = 8,
370 .maxFragmentDualSourceBuffers = 2,
371 .maxFragmentCombinedOutputResources = 8,
372 .maxComputeSharedMemorySize = 1024,
373 .maxComputeWorkGroupCount = {
374 16 * devinfo->max_cs_threads,
375 16 * devinfo->max_cs_threads,
376 16 * devinfo->max_cs_threads,
377 },
378 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
379 .maxComputeWorkGroupSize = {
380 16 * devinfo->max_cs_threads,
381 16 * devinfo->max_cs_threads,
382 16 * devinfo->max_cs_threads,
383 },
384 .subPixelPrecisionBits = 4 /* FIXME */,
385 .subTexelPrecisionBits = 4 /* FIXME */,
386 .mipmapPrecisionBits = 4 /* FIXME */,
387 .maxDrawIndexedIndexValue = UINT32_MAX,
388 .maxDrawIndirectInstanceCount = UINT32_MAX,
389 .primitiveRestartForPatches = UINT32_MAX,
390 .maxSamplerLodBias = 16,
391 .maxSamplerAnisotropy = 16,
392 .maxViewports = 16,
393 .maxDynamicViewportStates = UINT32_MAX,
394 .maxViewportDimensions = { (1 << 14), (1 << 14) },
395 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
396 .viewportSubPixelBits = 13, /* We take a float? */
397 .minMemoryMapAlignment = 64, /* A cache line */
398 .minTexelBufferOffsetAlignment = 1,
399 .minUniformBufferOffsetAlignment = 1,
400 .minStorageBufferOffsetAlignment = 1,
401 .minTexelOffset = 0, /* FIXME */
402 .maxTexelOffset = 0, /* FIXME */
403 .minTexelGatherOffset = 0, /* FIXME */
404 .maxTexelGatherOffset = 0, /* FIXME */
405 .minInterpolationOffset = 0, /* FIXME */
406 .maxInterpolationOffset = 0, /* FIXME */
407 .subPixelInterpolationOffsetBits = 0, /* FIXME */
408 .maxFramebufferWidth = (1 << 14),
409 .maxFramebufferHeight = (1 << 14),
410 .maxFramebufferLayers = (1 << 10),
411 .maxFramebufferColorSamples = 8,
412 .maxFramebufferDepthSamples = 8,
413 .maxFramebufferStencilSamples = 8,
414 .maxColorAttachments = MAX_RTS,
415 .maxSampledImageColorSamples = 8,
416 .maxSampledImageDepthSamples = 8,
417 .maxSampledImageIntegerSamples = 1,
418 .maxStorageImageSamples = 1,
419 .maxSampleMaskWords = 1,
420 .timestampFrequency = 1000 * 1000 * 1000 / 80,
421 .maxClipDistances = 0 /* FIXME */,
422 .maxCullDistances = 0 /* FIXME */,
423 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
424 .pointSizeRange = { 0.125, 255.875 },
425 .lineWidthRange = { 0.0, 7.9921875 },
426 .pointSizeGranularity = (1.0 / 8.0),
427 .lineWidthGranularity = (1.0 / 128.0),
428 };
429
430 return VK_SUCCESS;
431 }
432
433 VkResult anv_GetPhysicalDeviceProperties(
434 VkPhysicalDevice physicalDevice,
435 VkPhysicalDeviceProperties* pProperties)
436 {
437 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
438
439 *pProperties = (VkPhysicalDeviceProperties) {
440 .apiVersion = VK_MAKE_VERSION(0, 138, 1),
441 .driverVersion = 1,
442 .vendorId = 0x8086,
443 .deviceId = pdevice->chipset_id,
444 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
445 };
446
447 strcpy(pProperties->deviceName, pdevice->name);
448 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
449 "anv-%s", MESA_GIT_SHA1 + 4);
450
451 return VK_SUCCESS;
452 }
453
454 VkResult anv_GetPhysicalDeviceQueueCount(
455 VkPhysicalDevice physicalDevice,
456 uint32_t* pCount)
457 {
458 *pCount = 1;
459
460 return VK_SUCCESS;
461 }
462
463 VkResult anv_GetPhysicalDeviceQueueProperties(
464 VkPhysicalDevice physicalDevice,
465 uint32_t count,
466 VkPhysicalDeviceQueueProperties* pQueueProperties)
467 {
468 assert(count == 1);
469
470 *pQueueProperties = (VkPhysicalDeviceQueueProperties) {
471 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
472 VK_QUEUE_COMPUTE_BIT |
473 VK_QUEUE_DMA_BIT,
474 .queueCount = 1,
475 .supportsTimestamps = true,
476 };
477
478 return VK_SUCCESS;
479 }
480
481 VkResult anv_GetPhysicalDeviceMemoryProperties(
482 VkPhysicalDevice physicalDevice,
483 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
484 {
485 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
486 VkDeviceSize heap_size;
487
488 /* Reserve some wiggle room for the driver by exposing only 75% of the
489 * aperture to the heap.
490 */
491 heap_size = 3 * physical_device->aperture_size / 4;
492
493 /* The property flags below are valid only for llc platforms. */
494 pMemoryProperties->memoryTypeCount = 1;
495 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
496 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
497 .heapIndex = 1,
498 };
499
500 pMemoryProperties->memoryHeapCount = 1;
501 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
502 .size = heap_size,
503 .flags = VK_MEMORY_HEAP_HOST_LOCAL,
504 };
505
506 return VK_SUCCESS;
507 }
508
509 PFN_vkVoidFunction anv_GetInstanceProcAddr(
510 VkInstance instance,
511 const char* pName)
512 {
513 return anv_lookup_entrypoint(pName);
514 }
515
516 PFN_vkVoidFunction anv_GetDeviceProcAddr(
517 VkDevice device,
518 const char* pName)
519 {
520 return anv_lookup_entrypoint(pName);
521 }
522
523 static VkResult
524 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
525 {
526 queue->device = device;
527 queue->pool = &device->surface_state_pool;
528
529 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
530 if (queue->completed_serial.map == NULL)
531 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
532
533 *(uint32_t *)queue->completed_serial.map = 0;
534 queue->next_serial = 1;
535
536 return VK_SUCCESS;
537 }
538
539 static void
540 anv_queue_finish(struct anv_queue *queue)
541 {
542 #ifdef HAVE_VALGRIND
543 /* This gets torn down with the device so we only need to do this if
544 * valgrind is present.
545 */
546 anv_state_pool_free(queue->pool, queue->completed_serial);
547 #endif
548 }
549
550 static void
551 anv_device_init_border_colors(struct anv_device *device)
552 {
553 static const VkClearColorValue border_colors[] = {
554 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 0.0 } },
555 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 1.0 } },
556 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .f32 = { 1.0, 1.0, 1.0, 1.0 } },
557 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .u32 = { 0, 0, 0, 0 } },
558 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .u32 = { 0, 0, 0, 1 } },
559 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .u32 = { 1, 1, 1, 1 } },
560 };
561
562 device->border_colors =
563 anv_state_pool_alloc(&device->dynamic_state_pool,
564 sizeof(border_colors), 32);
565 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
566 }
567
568 VkResult anv_CreateDevice(
569 VkPhysicalDevice physicalDevice,
570 const VkDeviceCreateInfo* pCreateInfo,
571 VkDevice* pDevice)
572 {
573 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
574 struct anv_instance *instance = physical_device->instance;
575 struct anv_device *device;
576
577 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
578
579 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
580 bool found = false;
581 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
582 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
583 device_extensions[j].extName) == 0) {
584 found = true;
585 break;
586 }
587 }
588 if (!found)
589 return vk_error(VK_ERROR_INVALID_EXTENSION);
590 }
591
592 anv_set_dispatch_gen(physical_device->info->gen);
593
594 device = anv_instance_alloc(instance, sizeof(*device), 8,
595 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
596 if (!device)
597 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
598
599 device->instance = physical_device->instance;
600
601 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
602 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
603 if (device->fd == -1)
604 goto fail_device;
605
606 device->context_id = anv_gem_create_context(device);
607 if (device->context_id == -1)
608 goto fail_fd;
609
610 pthread_mutex_init(&device->mutex, NULL);
611
612 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
613
614 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
615
616 anv_state_pool_init(&device->dynamic_state_pool,
617 &device->dynamic_state_block_pool);
618
619 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
620 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
621
622 anv_state_pool_init(&device->surface_state_pool,
623 &device->surface_state_block_pool);
624
625 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
626
627 device->info = *physical_device->info;
628
629 device->compiler = anv_compiler_create(device);
630
631 anv_queue_init(device, &device->queue);
632
633 anv_device_init_meta(device);
634
635 anv_device_init_border_colors(device);
636
637 *pDevice = anv_device_to_handle(device);
638
639 return VK_SUCCESS;
640
641 fail_fd:
642 close(device->fd);
643 fail_device:
644 anv_device_free(device, device);
645
646 return vk_error(VK_ERROR_UNAVAILABLE);
647 }
648
649 VkResult anv_DestroyDevice(
650 VkDevice _device)
651 {
652 ANV_FROM_HANDLE(anv_device, device, _device);
653
654 anv_compiler_destroy(device->compiler);
655
656 anv_queue_finish(&device->queue);
657
658 anv_device_finish_meta(device);
659
660 #ifdef HAVE_VALGRIND
661 /* We only need to free these to prevent valgrind errors. The backing
662 * BO will go away in a couple of lines so we don't actually leak.
663 */
664 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
665 #endif
666
667 anv_bo_pool_finish(&device->batch_bo_pool);
668 anv_state_pool_finish(&device->dynamic_state_pool);
669 anv_block_pool_finish(&device->dynamic_state_block_pool);
670 anv_block_pool_finish(&device->instruction_block_pool);
671 anv_state_pool_finish(&device->surface_state_pool);
672 anv_block_pool_finish(&device->surface_state_block_pool);
673 anv_block_pool_finish(&device->scratch_block_pool);
674
675 close(device->fd);
676
677 anv_instance_free(device->instance, device);
678
679 return VK_SUCCESS;
680 }
681
682 VkResult anv_GetGlobalExtensionProperties(
683 const char* pLayerName,
684 uint32_t* pCount,
685 VkExtensionProperties* pProperties)
686 {
687 if (pProperties == NULL) {
688 *pCount = ARRAY_SIZE(global_extensions);
689 return VK_SUCCESS;
690 }
691
692 assert(*pCount >= ARRAY_SIZE(global_extensions));
693
694 *pCount = ARRAY_SIZE(global_extensions);
695 memcpy(pProperties, global_extensions, sizeof(global_extensions));
696
697 return VK_SUCCESS;
698 }
699
700 VkResult anv_GetPhysicalDeviceExtensionProperties(
701 VkPhysicalDevice physicalDevice,
702 const char* pLayerName,
703 uint32_t* pCount,
704 VkExtensionProperties* pProperties)
705 {
706 if (pProperties == NULL) {
707 *pCount = ARRAY_SIZE(device_extensions);
708 return VK_SUCCESS;
709 }
710
711 assert(*pCount >= ARRAY_SIZE(device_extensions));
712
713 *pCount = ARRAY_SIZE(device_extensions);
714 memcpy(pProperties, device_extensions, sizeof(device_extensions));
715
716 return VK_SUCCESS;
717 }
718
719 VkResult anv_GetGlobalLayerProperties(
720 uint32_t* pCount,
721 VkLayerProperties* pProperties)
722 {
723 if (pProperties == NULL) {
724 *pCount = 0;
725 return VK_SUCCESS;
726 }
727
728 /* None supported at this time */
729 return vk_error(VK_ERROR_INVALID_LAYER);
730 }
731
732 VkResult anv_GetPhysicalDeviceLayerProperties(
733 VkPhysicalDevice physicalDevice,
734 uint32_t* pCount,
735 VkLayerProperties* pProperties)
736 {
737 if (pProperties == NULL) {
738 *pCount = 0;
739 return VK_SUCCESS;
740 }
741
742 /* None supported at this time */
743 return vk_error(VK_ERROR_INVALID_LAYER);
744 }
745
746 VkResult anv_GetDeviceQueue(
747 VkDevice _device,
748 uint32_t queueNodeIndex,
749 uint32_t queueIndex,
750 VkQueue* pQueue)
751 {
752 ANV_FROM_HANDLE(anv_device, device, _device);
753
754 assert(queueIndex == 0);
755
756 *pQueue = anv_queue_to_handle(&device->queue);
757
758 return VK_SUCCESS;
759 }
760
761 VkResult anv_QueueSubmit(
762 VkQueue _queue,
763 uint32_t cmdBufferCount,
764 const VkCmdBuffer* pCmdBuffers,
765 VkFence _fence)
766 {
767 ANV_FROM_HANDLE(anv_queue, queue, _queue);
768 ANV_FROM_HANDLE(anv_fence, fence, _fence);
769 struct anv_device *device = queue->device;
770 int ret;
771
772 for (uint32_t i = 0; i < cmdBufferCount; i++) {
773 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
774
775 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
776
777 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
778 if (ret != 0)
779 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
780
781 if (fence) {
782 ret = anv_gem_execbuffer(device, &fence->execbuf);
783 if (ret != 0)
784 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
785 }
786
787 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
788 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
789 }
790
791 return VK_SUCCESS;
792 }
793
794 VkResult anv_QueueWaitIdle(
795 VkQueue _queue)
796 {
797 ANV_FROM_HANDLE(anv_queue, queue, _queue);
798
799 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
800 }
801
802 VkResult anv_DeviceWaitIdle(
803 VkDevice _device)
804 {
805 ANV_FROM_HANDLE(anv_device, device, _device);
806 struct anv_state state;
807 struct anv_batch batch;
808 struct drm_i915_gem_execbuffer2 execbuf;
809 struct drm_i915_gem_exec_object2 exec2_objects[1];
810 struct anv_bo *bo = NULL;
811 VkResult result;
812 int64_t timeout;
813 int ret;
814
815 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
816 bo = &device->dynamic_state_pool.block_pool->bo;
817 batch.start = batch.next = state.map;
818 batch.end = state.map + 32;
819 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
820 anv_batch_emit(&batch, GEN7_MI_NOOP);
821
822 exec2_objects[0].handle = bo->gem_handle;
823 exec2_objects[0].relocation_count = 0;
824 exec2_objects[0].relocs_ptr = 0;
825 exec2_objects[0].alignment = 0;
826 exec2_objects[0].offset = bo->offset;
827 exec2_objects[0].flags = 0;
828 exec2_objects[0].rsvd1 = 0;
829 exec2_objects[0].rsvd2 = 0;
830
831 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
832 execbuf.buffer_count = 1;
833 execbuf.batch_start_offset = state.offset;
834 execbuf.batch_len = batch.next - state.map;
835 execbuf.cliprects_ptr = 0;
836 execbuf.num_cliprects = 0;
837 execbuf.DR1 = 0;
838 execbuf.DR4 = 0;
839
840 execbuf.flags =
841 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
842 execbuf.rsvd1 = device->context_id;
843 execbuf.rsvd2 = 0;
844
845 ret = anv_gem_execbuffer(device, &execbuf);
846 if (ret != 0) {
847 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
848 goto fail;
849 }
850
851 timeout = INT64_MAX;
852 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
853 if (ret != 0) {
854 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
855 goto fail;
856 }
857
858 anv_state_pool_free(&device->dynamic_state_pool, state);
859
860 return VK_SUCCESS;
861
862 fail:
863 anv_state_pool_free(&device->dynamic_state_pool, state);
864
865 return result;
866 }
867
868 void *
869 anv_device_alloc(struct anv_device * device,
870 size_t size,
871 size_t alignment,
872 VkSystemAllocType allocType)
873 {
874 return anv_instance_alloc(device->instance, size, alignment, allocType);
875 }
876
877 void
878 anv_device_free(struct anv_device * device,
879 void * mem)
880 {
881 anv_instance_free(device->instance, mem);
882 }
883
884 VkResult
885 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
886 {
887 bo->gem_handle = anv_gem_create(device, size);
888 if (!bo->gem_handle)
889 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
890
891 bo->map = NULL;
892 bo->index = 0;
893 bo->offset = 0;
894 bo->size = size;
895
896 return VK_SUCCESS;
897 }
898
899 VkResult anv_AllocMemory(
900 VkDevice _device,
901 const VkMemoryAllocInfo* pAllocInfo,
902 VkDeviceMemory* pMem)
903 {
904 ANV_FROM_HANDLE(anv_device, device, _device);
905 struct anv_device_memory *mem;
906 VkResult result;
907
908 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
909
910 if (pAllocInfo->memoryTypeIndex != 0) {
911 /* We support exactly one memory heap. */
912 return vk_error(VK_ERROR_INVALID_VALUE);
913 }
914
915 /* FINISHME: Fail if allocation request exceeds heap size. */
916
917 mem = anv_device_alloc(device, sizeof(*mem), 8,
918 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
919 if (mem == NULL)
920 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
921
922 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
923 if (result != VK_SUCCESS)
924 goto fail;
925
926 *pMem = anv_device_memory_to_handle(mem);
927
928 return VK_SUCCESS;
929
930 fail:
931 anv_device_free(device, mem);
932
933 return result;
934 }
935
936 VkResult anv_FreeMemory(
937 VkDevice _device,
938 VkDeviceMemory _mem)
939 {
940 ANV_FROM_HANDLE(anv_device, device, _device);
941 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
942
943 if (mem->bo.map)
944 anv_gem_munmap(mem->bo.map, mem->bo.size);
945
946 if (mem->bo.gem_handle != 0)
947 anv_gem_close(device, mem->bo.gem_handle);
948
949 anv_device_free(device, mem);
950
951 return VK_SUCCESS;
952 }
953
954 VkResult anv_MapMemory(
955 VkDevice _device,
956 VkDeviceMemory _mem,
957 VkDeviceSize offset,
958 VkDeviceSize size,
959 VkMemoryMapFlags flags,
960 void** ppData)
961 {
962 ANV_FROM_HANDLE(anv_device, device, _device);
963 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
964
965 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
966 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
967 * at a time is valid. We could just mmap up front and return an offset
968 * pointer here, but that may exhaust virtual memory on 32 bit
969 * userspace. */
970
971 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
972 mem->map_size = size;
973
974 *ppData = mem->map;
975
976 return VK_SUCCESS;
977 }
978
979 VkResult anv_UnmapMemory(
980 VkDevice _device,
981 VkDeviceMemory _mem)
982 {
983 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
984
985 anv_gem_munmap(mem->map, mem->map_size);
986
987 return VK_SUCCESS;
988 }
989
990 VkResult anv_FlushMappedMemoryRanges(
991 VkDevice device,
992 uint32_t memRangeCount,
993 const VkMappedMemoryRange* pMemRanges)
994 {
995 /* clflush here for !llc platforms */
996
997 return VK_SUCCESS;
998 }
999
1000 VkResult anv_InvalidateMappedMemoryRanges(
1001 VkDevice device,
1002 uint32_t memRangeCount,
1003 const VkMappedMemoryRange* pMemRanges)
1004 {
1005 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
1006 }
1007
1008 VkResult anv_GetBufferMemoryRequirements(
1009 VkDevice device,
1010 VkBuffer _buffer,
1011 VkMemoryRequirements* pMemoryRequirements)
1012 {
1013 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1014
1015 /* The Vulkan spec (git aaed022) says:
1016 *
1017 * memoryTypeBits is a bitfield and contains one bit set for every
1018 * supported memory type for the resource. The bit `1<<i` is set if and
1019 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1020 * structure for the physical device is supported.
1021 *
1022 * We support exactly one memory type.
1023 */
1024 pMemoryRequirements->memoryTypeBits = 1;
1025
1026 pMemoryRequirements->size = buffer->size;
1027 pMemoryRequirements->alignment = 16;
1028
1029 return VK_SUCCESS;
1030 }
1031
1032 VkResult anv_GetImageMemoryRequirements(
1033 VkDevice device,
1034 VkImage _image,
1035 VkMemoryRequirements* pMemoryRequirements)
1036 {
1037 ANV_FROM_HANDLE(anv_image, image, _image);
1038
1039 /* The Vulkan spec (git aaed022) says:
1040 *
1041 * memoryTypeBits is a bitfield and contains one bit set for every
1042 * supported memory type for the resource. The bit `1<<i` is set if and
1043 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1044 * structure for the physical device is supported.
1045 *
1046 * We support exactly one memory type.
1047 */
1048 pMemoryRequirements->memoryTypeBits = 1;
1049
1050 pMemoryRequirements->size = image->size;
1051 pMemoryRequirements->alignment = image->alignment;
1052
1053 return VK_SUCCESS;
1054 }
1055
1056 VkResult anv_GetImageSparseMemoryRequirements(
1057 VkDevice device,
1058 VkImage image,
1059 uint32_t* pNumRequirements,
1060 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1061 {
1062 return vk_error(VK_UNSUPPORTED);
1063 }
1064
1065 VkResult anv_GetDeviceMemoryCommitment(
1066 VkDevice device,
1067 VkDeviceMemory memory,
1068 VkDeviceSize* pCommittedMemoryInBytes)
1069 {
1070 *pCommittedMemoryInBytes = 0;
1071 stub_return(VK_SUCCESS);
1072 }
1073
1074 VkResult anv_BindBufferMemory(
1075 VkDevice device,
1076 VkBuffer _buffer,
1077 VkDeviceMemory _mem,
1078 VkDeviceSize memOffset)
1079 {
1080 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1081 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1082
1083 buffer->bo = &mem->bo;
1084 buffer->offset = memOffset;
1085
1086 return VK_SUCCESS;
1087 }
1088
1089 VkResult anv_BindImageMemory(
1090 VkDevice device,
1091 VkImage _image,
1092 VkDeviceMemory _mem,
1093 VkDeviceSize memOffset)
1094 {
1095 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1096 ANV_FROM_HANDLE(anv_image, image, _image);
1097
1098 image->bo = &mem->bo;
1099 image->offset = memOffset;
1100
1101 return VK_SUCCESS;
1102 }
1103
1104 VkResult anv_QueueBindSparseBufferMemory(
1105 VkQueue queue,
1106 VkBuffer buffer,
1107 uint32_t numBindings,
1108 const VkSparseMemoryBindInfo* pBindInfo)
1109 {
1110 stub_return(VK_UNSUPPORTED);
1111 }
1112
1113 VkResult anv_QueueBindSparseImageOpaqueMemory(
1114 VkQueue queue,
1115 VkImage image,
1116 uint32_t numBindings,
1117 const VkSparseMemoryBindInfo* pBindInfo)
1118 {
1119 stub_return(VK_UNSUPPORTED);
1120 }
1121
1122 VkResult anv_QueueBindSparseImageMemory(
1123 VkQueue queue,
1124 VkImage image,
1125 uint32_t numBindings,
1126 const VkSparseImageMemoryBindInfo* pBindInfo)
1127 {
1128 stub_return(VK_UNSUPPORTED);
1129 }
1130
1131 VkResult anv_CreateFence(
1132 VkDevice _device,
1133 const VkFenceCreateInfo* pCreateInfo,
1134 VkFence* pFence)
1135 {
1136 ANV_FROM_HANDLE(anv_device, device, _device);
1137 struct anv_fence *fence;
1138 struct anv_batch batch;
1139 VkResult result;
1140
1141 const uint32_t fence_size = 128;
1142
1143 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1144
1145 fence = anv_device_alloc(device, sizeof(*fence), 8,
1146 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1147 if (fence == NULL)
1148 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1149
1150 result = anv_bo_init_new(&fence->bo, device, fence_size);
1151 if (result != VK_SUCCESS)
1152 goto fail;
1153
1154 fence->bo.map =
1155 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1156 batch.next = batch.start = fence->bo.map;
1157 batch.end = fence->bo.map + fence->bo.size;
1158 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1159 anv_batch_emit(&batch, GEN7_MI_NOOP);
1160
1161 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1162 fence->exec2_objects[0].relocation_count = 0;
1163 fence->exec2_objects[0].relocs_ptr = 0;
1164 fence->exec2_objects[0].alignment = 0;
1165 fence->exec2_objects[0].offset = fence->bo.offset;
1166 fence->exec2_objects[0].flags = 0;
1167 fence->exec2_objects[0].rsvd1 = 0;
1168 fence->exec2_objects[0].rsvd2 = 0;
1169
1170 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1171 fence->execbuf.buffer_count = 1;
1172 fence->execbuf.batch_start_offset = 0;
1173 fence->execbuf.batch_len = batch.next - fence->bo.map;
1174 fence->execbuf.cliprects_ptr = 0;
1175 fence->execbuf.num_cliprects = 0;
1176 fence->execbuf.DR1 = 0;
1177 fence->execbuf.DR4 = 0;
1178
1179 fence->execbuf.flags =
1180 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1181 fence->execbuf.rsvd1 = device->context_id;
1182 fence->execbuf.rsvd2 = 0;
1183
1184 *pFence = anv_fence_to_handle(fence);
1185
1186 return VK_SUCCESS;
1187
1188 fail:
1189 anv_device_free(device, fence);
1190
1191 return result;
1192 }
1193
1194 VkResult anv_DestroyFence(
1195 VkDevice _device,
1196 VkFence _fence)
1197 {
1198 ANV_FROM_HANDLE(anv_device, device, _device);
1199 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1200
1201 anv_gem_munmap(fence->bo.map, fence->bo.size);
1202 anv_gem_close(device, fence->bo.gem_handle);
1203 anv_device_free(device, fence);
1204
1205 return VK_SUCCESS;
1206 }
1207
1208 VkResult anv_ResetFences(
1209 VkDevice _device,
1210 uint32_t fenceCount,
1211 const VkFence* pFences)
1212 {
1213 for (uint32_t i = 0; i < fenceCount; i++) {
1214 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1215 fence->ready = false;
1216 }
1217
1218 return VK_SUCCESS;
1219 }
1220
1221 VkResult anv_GetFenceStatus(
1222 VkDevice _device,
1223 VkFence _fence)
1224 {
1225 ANV_FROM_HANDLE(anv_device, device, _device);
1226 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1227 int64_t t = 0;
1228 int ret;
1229
1230 if (fence->ready)
1231 return VK_SUCCESS;
1232
1233 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1234 if (ret == 0) {
1235 fence->ready = true;
1236 return VK_SUCCESS;
1237 }
1238
1239 return VK_NOT_READY;
1240 }
1241
1242 VkResult anv_WaitForFences(
1243 VkDevice _device,
1244 uint32_t fenceCount,
1245 const VkFence* pFences,
1246 VkBool32 waitAll,
1247 uint64_t timeout)
1248 {
1249 ANV_FROM_HANDLE(anv_device, device, _device);
1250 int64_t t = timeout;
1251 int ret;
1252
1253 /* FIXME: handle !waitAll */
1254
1255 for (uint32_t i = 0; i < fenceCount; i++) {
1256 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1257 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1258 if (ret == -1 && errno == ETIME)
1259 return VK_TIMEOUT;
1260 else if (ret == -1)
1261 return vk_errorf(VK_ERROR_UNKNOWN, "gem wait failed: %m");
1262 }
1263
1264 return VK_SUCCESS;
1265 }
1266
1267 // Queue semaphore functions
1268
1269 VkResult anv_CreateSemaphore(
1270 VkDevice device,
1271 const VkSemaphoreCreateInfo* pCreateInfo,
1272 VkSemaphore* pSemaphore)
1273 {
1274 stub_return(VK_UNSUPPORTED);
1275 }
1276
1277 VkResult anv_DestroySemaphore(
1278 VkDevice device,
1279 VkSemaphore semaphore)
1280 {
1281 stub_return(VK_UNSUPPORTED);
1282 }
1283
1284 VkResult anv_QueueSignalSemaphore(
1285 VkQueue queue,
1286 VkSemaphore semaphore)
1287 {
1288 stub_return(VK_UNSUPPORTED);
1289 }
1290
1291 VkResult anv_QueueWaitSemaphore(
1292 VkQueue queue,
1293 VkSemaphore semaphore)
1294 {
1295 stub_return(VK_UNSUPPORTED);
1296 }
1297
1298 // Event functions
1299
1300 VkResult anv_CreateEvent(
1301 VkDevice device,
1302 const VkEventCreateInfo* pCreateInfo,
1303 VkEvent* pEvent)
1304 {
1305 stub_return(VK_UNSUPPORTED);
1306 }
1307
1308 VkResult anv_DestroyEvent(
1309 VkDevice device,
1310 VkEvent event)
1311 {
1312 stub_return(VK_UNSUPPORTED);
1313 }
1314
1315 VkResult anv_GetEventStatus(
1316 VkDevice device,
1317 VkEvent event)
1318 {
1319 stub_return(VK_UNSUPPORTED);
1320 }
1321
1322 VkResult anv_SetEvent(
1323 VkDevice device,
1324 VkEvent event)
1325 {
1326 stub_return(VK_UNSUPPORTED);
1327 }
1328
1329 VkResult anv_ResetEvent(
1330 VkDevice device,
1331 VkEvent event)
1332 {
1333 stub_return(VK_UNSUPPORTED);
1334 }
1335
1336 // Buffer functions
1337
1338 VkResult anv_CreateBuffer(
1339 VkDevice _device,
1340 const VkBufferCreateInfo* pCreateInfo,
1341 VkBuffer* pBuffer)
1342 {
1343 ANV_FROM_HANDLE(anv_device, device, _device);
1344 struct anv_buffer *buffer;
1345
1346 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1347
1348 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1349 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1350 if (buffer == NULL)
1351 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1352
1353 buffer->size = pCreateInfo->size;
1354 buffer->bo = NULL;
1355 buffer->offset = 0;
1356
1357 *pBuffer = anv_buffer_to_handle(buffer);
1358
1359 return VK_SUCCESS;
1360 }
1361
1362 VkResult anv_DestroyBuffer(
1363 VkDevice _device,
1364 VkBuffer _buffer)
1365 {
1366 ANV_FROM_HANDLE(anv_device, device, _device);
1367 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1368
1369 anv_device_free(device, buffer);
1370
1371 return VK_SUCCESS;
1372 }
1373
1374 void
1375 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1376 const struct anv_format *format,
1377 uint32_t offset, uint32_t range)
1378 {
1379 switch (device->info.gen) {
1380 case 7:
1381 gen7_fill_buffer_surface_state(state, format, offset, range);
1382 break;
1383 case 8:
1384 gen8_fill_buffer_surface_state(state, format, offset, range);
1385 break;
1386 default:
1387 unreachable("unsupported gen\n");
1388 }
1389 }
1390
1391 VkResult
1392 anv_buffer_view_create(
1393 struct anv_device * device,
1394 const VkBufferViewCreateInfo* pCreateInfo,
1395 struct anv_buffer_view ** view_out)
1396 {
1397 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1398 struct anv_buffer_view *view;
1399
1400 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1401
1402 view = anv_device_alloc(device, sizeof(*view), 8,
1403 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1404 if (view == NULL)
1405 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1406
1407 view->view = (struct anv_surface_view) {
1408 .bo = buffer->bo,
1409 .offset = buffer->offset + pCreateInfo->offset,
1410 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1411 .format = anv_format_for_vk_format(pCreateInfo->format),
1412 .range = pCreateInfo->range,
1413 };
1414
1415 *view_out = view;
1416
1417 return VK_SUCCESS;
1418 }
1419
1420 VkResult anv_DestroyBufferView(
1421 VkDevice _device,
1422 VkBufferView _bview)
1423 {
1424 ANV_FROM_HANDLE(anv_device, device, _device);
1425 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1426
1427 anv_surface_view_fini(device, &bview->view);
1428 anv_device_free(device, bview);
1429
1430 return VK_SUCCESS;
1431 }
1432
1433 VkResult anv_DestroySampler(
1434 VkDevice _device,
1435 VkSampler _sampler)
1436 {
1437 ANV_FROM_HANDLE(anv_device, device, _device);
1438 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1439
1440 anv_device_free(device, sampler);
1441
1442 return VK_SUCCESS;
1443 }
1444
1445 // Descriptor set functions
1446
1447 VkResult anv_CreateDescriptorSetLayout(
1448 VkDevice _device,
1449 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1450 VkDescriptorSetLayout* pSetLayout)
1451 {
1452 ANV_FROM_HANDLE(anv_device, device, _device);
1453 struct anv_descriptor_set_layout *set_layout;
1454
1455 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1456
1457 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1458 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1459 uint32_t num_dynamic_buffers = 0;
1460 uint32_t count = 0;
1461 uint32_t stages = 0;
1462 uint32_t s;
1463
1464 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1465 switch (pCreateInfo->pBinding[i].descriptorType) {
1466 case VK_DESCRIPTOR_TYPE_SAMPLER:
1467 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1468 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1469 sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
1470 break;
1471 default:
1472 break;
1473 }
1474
1475 switch (pCreateInfo->pBinding[i].descriptorType) {
1476 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1477 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1478 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1479 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1480 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1481 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1482 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1483 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1484 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1485 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1486 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1487 surface_count[s] += pCreateInfo->pBinding[i].arraySize;
1488 break;
1489 default:
1490 break;
1491 }
1492
1493 switch (pCreateInfo->pBinding[i].descriptorType) {
1494 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1495 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1496 num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
1497 break;
1498 default:
1499 break;
1500 }
1501
1502 stages |= pCreateInfo->pBinding[i].stageFlags;
1503 count += pCreateInfo->pBinding[i].arraySize;
1504 }
1505
1506 uint32_t sampler_total = 0;
1507 uint32_t surface_total = 0;
1508 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1509 sampler_total += sampler_count[s];
1510 surface_total += surface_count[s];
1511 }
1512
1513 size_t size = sizeof(*set_layout) +
1514 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1515 set_layout = anv_device_alloc(device, size, 8,
1516 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1517 if (!set_layout)
1518 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1519
1520 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1521 set_layout->count = count;
1522 set_layout->shader_stages = stages;
1523
1524 struct anv_descriptor_slot *p = set_layout->entries;
1525 struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
1526 struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
1527 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1528 set_layout->stage[s].surface_count = surface_count[s];
1529 set_layout->stage[s].surface_start = surface[s] = p;
1530 p += surface_count[s];
1531 set_layout->stage[s].sampler_count = sampler_count[s];
1532 set_layout->stage[s].sampler_start = sampler[s] = p;
1533 p += sampler_count[s];
1534 }
1535
1536 uint32_t descriptor = 0;
1537 int8_t dynamic_slot = 0;
1538 bool is_dynamic;
1539 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1540 switch (pCreateInfo->pBinding[i].descriptorType) {
1541 case VK_DESCRIPTOR_TYPE_SAMPLER:
1542 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1543 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1544 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1545 sampler[s]->index = descriptor + j;
1546 sampler[s]->dynamic_slot = -1;
1547 sampler[s]++;
1548 }
1549 break;
1550 default:
1551 break;
1552 }
1553
1554 switch (pCreateInfo->pBinding[i].descriptorType) {
1555 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1556 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1557 is_dynamic = true;
1558 break;
1559 default:
1560 is_dynamic = false;
1561 break;
1562 }
1563
1564 switch (pCreateInfo->pBinding[i].descriptorType) {
1565 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1566 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1567 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1568 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1569 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1570 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1571 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1572 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1573 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1574 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1575 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1576 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1577 surface[s]->index = descriptor + j;
1578 if (is_dynamic)
1579 surface[s]->dynamic_slot = dynamic_slot + j;
1580 else
1581 surface[s]->dynamic_slot = -1;
1582 surface[s]++;
1583 }
1584 break;
1585 default:
1586 break;
1587 }
1588
1589 if (is_dynamic)
1590 dynamic_slot += pCreateInfo->pBinding[i].arraySize;
1591
1592 descriptor += pCreateInfo->pBinding[i].arraySize;
1593 }
1594
1595 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1596
1597 return VK_SUCCESS;
1598 }
1599
1600 VkResult anv_DestroyDescriptorSetLayout(
1601 VkDevice _device,
1602 VkDescriptorSetLayout _set_layout)
1603 {
1604 ANV_FROM_HANDLE(anv_device, device, _device);
1605 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1606
1607 anv_device_free(device, set_layout);
1608
1609 return VK_SUCCESS;
1610 }
1611
1612 VkResult anv_CreateDescriptorPool(
1613 VkDevice device,
1614 VkDescriptorPoolUsage poolUsage,
1615 uint32_t maxSets,
1616 const VkDescriptorPoolCreateInfo* pCreateInfo,
1617 VkDescriptorPool* pDescriptorPool)
1618 {
1619 anv_finishme("VkDescriptorPool is a stub");
1620 pDescriptorPool->handle = 1;
1621 return VK_SUCCESS;
1622 }
1623
1624 VkResult anv_DestroyDescriptorPool(
1625 VkDevice _device,
1626 VkDescriptorPool _pool)
1627 {
1628 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1629 return VK_SUCCESS;
1630 }
1631
1632 VkResult anv_ResetDescriptorPool(
1633 VkDevice device,
1634 VkDescriptorPool descriptorPool)
1635 {
1636 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1637 return VK_SUCCESS;
1638 }
1639
1640 VkResult
1641 anv_descriptor_set_create(struct anv_device *device,
1642 const struct anv_descriptor_set_layout *layout,
1643 struct anv_descriptor_set **out_set)
1644 {
1645 struct anv_descriptor_set *set;
1646 size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1647
1648 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1649 if (!set)
1650 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1651
1652 /* A descriptor set may not be 100% filled. Clear the set so we can can
1653 * later detect holes in it.
1654 */
1655 memset(set, 0, size);
1656
1657 *out_set = set;
1658
1659 return VK_SUCCESS;
1660 }
1661
1662 void
1663 anv_descriptor_set_destroy(struct anv_device *device,
1664 struct anv_descriptor_set *set)
1665 {
1666 anv_device_free(device, set);
1667 }
1668
1669 VkResult anv_AllocDescriptorSets(
1670 VkDevice _device,
1671 VkDescriptorPool descriptorPool,
1672 VkDescriptorSetUsage setUsage,
1673 uint32_t count,
1674 const VkDescriptorSetLayout* pSetLayouts,
1675 VkDescriptorSet* pDescriptorSets,
1676 uint32_t* pCount)
1677 {
1678 ANV_FROM_HANDLE(anv_device, device, _device);
1679
1680 VkResult result;
1681 struct anv_descriptor_set *set;
1682
1683 for (uint32_t i = 0; i < count; i++) {
1684 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1685
1686 result = anv_descriptor_set_create(device, layout, &set);
1687 if (result != VK_SUCCESS) {
1688 *pCount = i;
1689 return result;
1690 }
1691
1692 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1693 }
1694
1695 *pCount = count;
1696
1697 return VK_SUCCESS;
1698 }
1699
1700 VkResult anv_FreeDescriptorSets(
1701 VkDevice _device,
1702 VkDescriptorPool descriptorPool,
1703 uint32_t count,
1704 const VkDescriptorSet* pDescriptorSets)
1705 {
1706 ANV_FROM_HANDLE(anv_device, device, _device);
1707
1708 for (uint32_t i = 0; i < count; i++) {
1709 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1710
1711 anv_descriptor_set_destroy(device, set);
1712 }
1713
1714 return VK_SUCCESS;
1715 }
1716
1717 VkResult anv_UpdateDescriptorSets(
1718 VkDevice device,
1719 uint32_t writeCount,
1720 const VkWriteDescriptorSet* pDescriptorWrites,
1721 uint32_t copyCount,
1722 const VkCopyDescriptorSet* pDescriptorCopies)
1723 {
1724 for (uint32_t i = 0; i < writeCount; i++) {
1725 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1726 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1727
1728 switch (write->descriptorType) {
1729 case VK_DESCRIPTOR_TYPE_SAMPLER:
1730 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1731 for (uint32_t j = 0; j < write->count; j++) {
1732 set->descriptors[write->destBinding + j].sampler =
1733 anv_sampler_from_handle(write->pDescriptors[j].sampler);
1734 }
1735
1736 if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
1737 break;
1738
1739 /* fallthrough */
1740
1741 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1742 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1743 for (uint32_t j = 0; j < write->count; j++) {
1744 ANV_FROM_HANDLE(anv_image_view, iview,
1745 write->pDescriptors[j].imageView);
1746 set->descriptors[write->destBinding + j].view = &iview->view;
1747 }
1748 break;
1749
1750 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1751 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1752 anv_finishme("texel buffers not implemented");
1753 break;
1754
1755 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1756 anv_finishme("input attachments not implemented");
1757 break;
1758
1759 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1760 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1761 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1762 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1763 for (uint32_t j = 0; j < write->count; j++) {
1764 ANV_FROM_HANDLE(anv_buffer_view, bview,
1765 write->pDescriptors[j].bufferView);
1766 set->descriptors[write->destBinding + j].view = &bview->view;
1767 }
1768
1769 default:
1770 break;
1771 }
1772 }
1773
1774 for (uint32_t i = 0; i < copyCount; i++) {
1775 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1776 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1777 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1778 for (uint32_t j = 0; j < copy->count; j++) {
1779 dest->descriptors[copy->destBinding + j] =
1780 src->descriptors[copy->srcBinding + j];
1781 }
1782 }
1783
1784 return VK_SUCCESS;
1785 }
1786
1787 // State object functions
1788
1789 static inline int64_t
1790 clamp_int64(int64_t x, int64_t min, int64_t max)
1791 {
1792 if (x < min)
1793 return min;
1794 else if (x < max)
1795 return x;
1796 else
1797 return max;
1798 }
1799
1800 VkResult anv_CreateDynamicViewportState(
1801 VkDevice _device,
1802 const VkDynamicViewportStateCreateInfo* pCreateInfo,
1803 VkDynamicViewportState* pState)
1804 {
1805 ANV_FROM_HANDLE(anv_device, device, _device);
1806 struct anv_dynamic_vp_state *state;
1807
1808 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO);
1809
1810 state = anv_device_alloc(device, sizeof(*state), 8,
1811 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1812 if (state == NULL)
1813 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1814
1815 unsigned count = pCreateInfo->viewportAndScissorCount;
1816 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1817 count * 64, 64);
1818 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1819 count * 8, 32);
1820 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
1821 count * 32, 32);
1822
1823 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1824 const VkViewport *vp = &pCreateInfo->pViewports[i];
1825 const VkRect2D *s = &pCreateInfo->pScissors[i];
1826
1827 /* The gen7 state struct has just the matrix and guardband fields, the
1828 * gen8 struct adds the min/max viewport fields. */
1829 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1830 .ViewportMatrixElementm00 = vp->width / 2,
1831 .ViewportMatrixElementm11 = vp->height / 2,
1832 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1833 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1834 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1835 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1836 .XMinClipGuardband = -1.0f,
1837 .XMaxClipGuardband = 1.0f,
1838 .YMinClipGuardband = -1.0f,
1839 .YMaxClipGuardband = 1.0f,
1840 .XMinViewPort = vp->originX,
1841 .XMaxViewPort = vp->originX + vp->width - 1,
1842 .YMinViewPort = vp->originY,
1843 .YMaxViewPort = vp->originY + vp->height - 1,
1844 };
1845
1846 struct GEN7_CC_VIEWPORT cc_viewport = {
1847 .MinimumDepth = vp->minDepth,
1848 .MaximumDepth = vp->maxDepth
1849 };
1850
1851 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1852 * ymax < ymin for empty clips. In case clip x, y, width height are all
1853 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1854 * what we want. Just special case empty clips and produce a canonical
1855 * empty clip. */
1856 static const struct GEN7_SCISSOR_RECT empty_scissor = {
1857 .ScissorRectangleYMin = 1,
1858 .ScissorRectangleXMin = 1,
1859 .ScissorRectangleYMax = 0,
1860 .ScissorRectangleXMax = 0
1861 };
1862
1863 const int max = 0xffff;
1864 struct GEN7_SCISSOR_RECT scissor = {
1865 /* Do this math using int64_t so overflow gets clamped correctly. */
1866 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1867 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1868 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1869 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1870 };
1871
1872 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1873 GEN7_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1874
1875 if (s->extent.width <= 0 || s->extent.height <= 0) {
1876 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1877 } else {
1878 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1879 }
1880 }
1881
1882 *pState = anv_dynamic_vp_state_to_handle(state);
1883
1884 return VK_SUCCESS;
1885 }
1886
1887 VkResult anv_DestroyDynamicViewportState(
1888 VkDevice _device,
1889 VkDynamicViewportState _vp_state)
1890 {
1891 ANV_FROM_HANDLE(anv_device, device, _device);
1892 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, _vp_state);
1893
1894 anv_state_pool_free(&device->dynamic_state_pool, vp_state->sf_clip_vp);
1895 anv_state_pool_free(&device->dynamic_state_pool, vp_state->cc_vp);
1896 anv_state_pool_free(&device->dynamic_state_pool, vp_state->scissor);
1897
1898 anv_device_free(device, vp_state);
1899
1900 return VK_SUCCESS;
1901 }
1902
1903 VkResult anv_DestroyDynamicRasterState(
1904 VkDevice _device,
1905 VkDynamicRasterState _rs_state)
1906 {
1907 ANV_FROM_HANDLE(anv_device, device, _device);
1908 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, _rs_state);
1909
1910 anv_device_free(device, rs_state);
1911
1912 return VK_SUCCESS;
1913 }
1914
1915 VkResult anv_CreateDynamicColorBlendState(
1916 VkDevice _device,
1917 const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
1918 VkDynamicColorBlendState* pState)
1919 {
1920 ANV_FROM_HANDLE(anv_device, device, _device);
1921 struct anv_dynamic_cb_state *state;
1922
1923 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO);
1924
1925 state = anv_device_alloc(device, sizeof(*state), 8,
1926 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1927 if (state == NULL)
1928 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1929
1930 struct GEN7_COLOR_CALC_STATE color_calc_state = {
1931 .BlendConstantColorRed = pCreateInfo->blendConst[0],
1932 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
1933 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
1934 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
1935 };
1936
1937 GEN7_COLOR_CALC_STATE_pack(NULL, state->color_calc_state, &color_calc_state);
1938
1939 *pState = anv_dynamic_cb_state_to_handle(state);
1940
1941 return VK_SUCCESS;
1942 }
1943
1944 VkResult anv_DestroyDynamicColorBlendState(
1945 VkDevice _device,
1946 VkDynamicColorBlendState _cb_state)
1947 {
1948 ANV_FROM_HANDLE(anv_device, device, _device);
1949 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, _cb_state);
1950
1951 anv_device_free(device, cb_state);
1952
1953 return VK_SUCCESS;
1954 }
1955
1956 VkResult anv_DestroyDynamicDepthStencilState(
1957 VkDevice _device,
1958 VkDynamicDepthStencilState _ds_state)
1959 {
1960 ANV_FROM_HANDLE(anv_device, device, _device);
1961 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, _ds_state);
1962
1963 anv_device_free(device, ds_state);
1964
1965 return VK_SUCCESS;
1966 }
1967
1968 VkResult anv_CreateFramebuffer(
1969 VkDevice _device,
1970 const VkFramebufferCreateInfo* pCreateInfo,
1971 VkFramebuffer* pFramebuffer)
1972 {
1973 ANV_FROM_HANDLE(anv_device, device, _device);
1974 struct anv_framebuffer *framebuffer;
1975
1976 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1977
1978 size_t size = sizeof(*framebuffer) +
1979 sizeof(struct anv_attachment_view *) * pCreateInfo->attachmentCount;
1980 framebuffer = anv_device_alloc(device, size, 8,
1981 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1982 if (framebuffer == NULL)
1983 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1984
1985 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1986 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1987 ANV_FROM_HANDLE(anv_attachment_view, view,
1988 pCreateInfo->pAttachments[i].view);
1989
1990 framebuffer->attachments[i] = view;
1991 }
1992
1993 framebuffer->width = pCreateInfo->width;
1994 framebuffer->height = pCreateInfo->height;
1995 framebuffer->layers = pCreateInfo->layers;
1996
1997 anv_CreateDynamicViewportState(anv_device_to_handle(device),
1998 &(VkDynamicViewportStateCreateInfo) {
1999 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
2000 .viewportAndScissorCount = 1,
2001 .pViewports = (VkViewport[]) {
2002 {
2003 .originX = 0,
2004 .originY = 0,
2005 .width = pCreateInfo->width,
2006 .height = pCreateInfo->height,
2007 .minDepth = 0,
2008 .maxDepth = 1
2009 },
2010 },
2011 .pScissors = (VkRect2D[]) {
2012 { { 0, 0 },
2013 { pCreateInfo->width, pCreateInfo->height } },
2014 }
2015 },
2016 &framebuffer->vp_state);
2017
2018 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2019
2020 return VK_SUCCESS;
2021 }
2022
2023 VkResult anv_DestroyFramebuffer(
2024 VkDevice _device,
2025 VkFramebuffer _fb)
2026 {
2027 ANV_FROM_HANDLE(anv_device, device, _device);
2028 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2029
2030 anv_DestroyDynamicViewportState(anv_device_to_handle(device),
2031 fb->vp_state);
2032 anv_device_free(device, fb);
2033
2034 return VK_SUCCESS;
2035 }
2036
2037 VkResult anv_CreateRenderPass(
2038 VkDevice _device,
2039 const VkRenderPassCreateInfo* pCreateInfo,
2040 VkRenderPass* pRenderPass)
2041 {
2042 ANV_FROM_HANDLE(anv_device, device, _device);
2043 struct anv_render_pass *pass;
2044 size_t size;
2045 size_t attachments_offset;
2046
2047 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2048
2049 size = sizeof(*pass);
2050 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
2051 attachments_offset = size;
2052 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
2053
2054 pass = anv_device_alloc(device, size, 8,
2055 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2056 if (pass == NULL)
2057 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2058
2059 /* Clear the subpasses along with the parent pass. This required because
2060 * each array member of anv_subpass must be a valid pointer if not NULL.
2061 */
2062 memset(pass, 0, size);
2063 pass->attachment_count = pCreateInfo->attachmentCount;
2064 pass->subpass_count = pCreateInfo->subpassCount;
2065 pass->attachments = (void *) pass + attachments_offset;
2066
2067 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2068 struct anv_render_pass_attachment *att = &pass->attachments[i];
2069
2070 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
2071 att->samples = pCreateInfo->pAttachments[i].samples;
2072 att->load_op = pCreateInfo->pAttachments[i].loadOp;
2073 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
2074 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2075 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2076
2077 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2078 if (anv_format_is_color(att->format)) {
2079 ++pass->num_color_clear_attachments;
2080 } else if (att->format->depth_format) {
2081 pass->has_depth_clear_attachment = true;
2082 }
2083 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2084 assert(att->format->has_stencil);
2085 pass->has_stencil_clear_attachment = true;
2086 }
2087 }
2088
2089 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
2090 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
2091 struct anv_subpass *subpass = &pass->subpasses[i];
2092
2093 subpass->input_count = desc->inputCount;
2094 subpass->color_count = desc->colorCount;
2095
2096 if (desc->inputCount > 0) {
2097 subpass->input_attachments =
2098 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
2099 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2100
2101 for (uint32_t j = 0; j < desc->inputCount; j++) {
2102 subpass->input_attachments[j]
2103 = desc->inputAttachments[j].attachment;
2104 }
2105 }
2106
2107 if (desc->colorCount > 0) {
2108 subpass->color_attachments =
2109 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2110 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2111
2112 for (uint32_t j = 0; j < desc->colorCount; j++) {
2113 subpass->color_attachments[j]
2114 = desc->colorAttachments[j].attachment;
2115 }
2116 }
2117
2118 if (desc->resolveAttachments) {
2119 subpass->resolve_attachments =
2120 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2121 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2122
2123 for (uint32_t j = 0; j < desc->colorCount; j++) {
2124 subpass->resolve_attachments[j]
2125 = desc->resolveAttachments[j].attachment;
2126 }
2127 }
2128
2129 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
2130 }
2131
2132 *pRenderPass = anv_render_pass_to_handle(pass);
2133
2134 return VK_SUCCESS;
2135 }
2136
2137 VkResult anv_DestroyRenderPass(
2138 VkDevice _device,
2139 VkRenderPass _pass)
2140 {
2141 ANV_FROM_HANDLE(anv_device, device, _device);
2142 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
2143
2144 for (uint32_t i = 0; i < pass->subpass_count; i++) {
2145 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2146 * Don't free the null arrays.
2147 */
2148 struct anv_subpass *subpass = &pass->subpasses[i];
2149
2150 anv_device_free(device, subpass->input_attachments);
2151 anv_device_free(device, subpass->color_attachments);
2152 anv_device_free(device, subpass->resolve_attachments);
2153 }
2154
2155 anv_device_free(device, pass);
2156
2157 return VK_SUCCESS;
2158 }
2159
2160 VkResult anv_GetRenderAreaGranularity(
2161 VkDevice device,
2162 VkRenderPass renderPass,
2163 VkExtent2D* pGranularity)
2164 {
2165 *pGranularity = (VkExtent2D) { 1, 1 };
2166
2167 return VK_SUCCESS;
2168 }
2169
2170 void vkCmdDbgMarkerBegin(
2171 VkCmdBuffer cmdBuffer,
2172 const char* pMarker)
2173 __attribute__ ((visibility ("default")));
2174
2175 void vkCmdDbgMarkerEnd(
2176 VkCmdBuffer cmdBuffer)
2177 __attribute__ ((visibility ("default")));
2178
2179 void vkCmdDbgMarkerBegin(
2180 VkCmdBuffer cmdBuffer,
2181 const char* pMarker)
2182 {
2183 }
2184
2185 void vkCmdDbgMarkerEnd(
2186 VkCmdBuffer cmdBuffer)
2187 {
2188 }