anv: Completely rework shader compilation
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 struct anv_dispatch_table dtable;
35
36 static void
37 compiler_debug_log(void *data, const char *fmt, ...)
38 { }
39
40 static void
41 compiler_perf_log(void *data, const char *fmt, ...)
42 {
43 va_list args;
44 va_start(args, fmt);
45
46 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
47 vfprintf(stderr, fmt, args);
48
49 va_end(args);
50 }
51
52 static VkResult
53 anv_physical_device_init(struct anv_physical_device *device,
54 struct anv_instance *instance,
55 const char *path)
56 {
57 VkResult result;
58 int fd;
59
60 fd = open(path, O_RDWR | O_CLOEXEC);
61 if (fd < 0)
62 return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
63 "failed to open %s: %m", path);
64
65 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
66 device->instance = instance;
67 device->path = path;
68
69 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
70 if (!device->chipset_id) {
71 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
72 "failed to get chipset id: %m");
73 goto fail;
74 }
75
76 device->name = brw_get_device_name(device->chipset_id);
77 device->info = brw_get_device_info(device->chipset_id, -1);
78 if (!device->info) {
79 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
80 "failed to get device info");
81 goto fail;
82 }
83
84 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
85 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
86 "failed to get aperture size: %m");
87 goto fail;
88 }
89
90 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
91 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
92 "kernel missing gem wait");
93 goto fail;
94 }
95
96 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
97 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
98 "kernel missing execbuf2");
99 goto fail;
100 }
101
102 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
103 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
104 "non-llc gpu");
105 goto fail;
106 }
107
108 close(fd);
109
110 brw_process_intel_debug_variable();
111
112 device->compiler = brw_compiler_create(NULL, device->info);
113 if (device->compiler == NULL) {
114 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
115 goto fail;
116 }
117 device->compiler->shader_debug_log = compiler_debug_log;
118 device->compiler->shader_perf_log = compiler_perf_log;
119
120 return VK_SUCCESS;
121
122 fail:
123 close(fd);
124 return result;
125 }
126
127 static void
128 anv_physical_device_finish(struct anv_physical_device *device)
129 {
130 ralloc_free(device->compiler);
131 }
132
133 static void *default_alloc(
134 void* pUserData,
135 size_t size,
136 size_t alignment,
137 VkSystemAllocType allocType)
138 {
139 return malloc(size);
140 }
141
142 static void default_free(
143 void* pUserData,
144 void* pMem)
145 {
146 free(pMem);
147 }
148
149 static const VkAllocCallbacks default_alloc_callbacks = {
150 .pUserData = NULL,
151 .pfnAlloc = default_alloc,
152 .pfnFree = default_free
153 };
154
155 static const VkExtensionProperties global_extensions[] = {
156 {
157 .extName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
158 .specVersion = 17,
159 },
160 };
161
162 static const VkExtensionProperties device_extensions[] = {
163 {
164 .extName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
165 .specVersion = 53,
166 },
167 };
168
169 VkResult anv_CreateInstance(
170 const VkInstanceCreateInfo* pCreateInfo,
171 VkInstance* pInstance)
172 {
173 struct anv_instance *instance;
174 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
175 void *user_data = NULL;
176
177 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
178
179 if (pCreateInfo->pAppInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
180 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
181
182 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
183 bool found = false;
184 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
185 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
186 global_extensions[j].extName) == 0) {
187 found = true;
188 break;
189 }
190 }
191 if (!found)
192 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
193 }
194
195 if (pCreateInfo->pAllocCb) {
196 alloc_callbacks = pCreateInfo->pAllocCb;
197 user_data = pCreateInfo->pAllocCb->pUserData;
198 }
199 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
200 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
201 if (!instance)
202 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
203
204 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
205 instance->pAllocUserData = alloc_callbacks->pUserData;
206 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
207 instance->pfnFree = alloc_callbacks->pfnFree;
208 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
209 instance->physicalDeviceCount = 0;
210
211 _mesa_locale_init();
212
213 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
214
215 anv_init_wsi(instance);
216
217 *pInstance = anv_instance_to_handle(instance);
218
219 return VK_SUCCESS;
220 }
221
222 void anv_DestroyInstance(
223 VkInstance _instance)
224 {
225 ANV_FROM_HANDLE(anv_instance, instance, _instance);
226
227 anv_physical_device_finish(&instance->physicalDevice);
228 anv_finish_wsi(instance);
229
230 VG(VALGRIND_DESTROY_MEMPOOL(instance));
231
232 _mesa_locale_fini();
233
234 instance->pfnFree(instance->pAllocUserData, instance);
235 }
236
237 void *
238 anv_instance_alloc(struct anv_instance *instance, size_t size,
239 size_t alignment, VkSystemAllocType allocType)
240 {
241 void *mem = instance->pfnAlloc(instance->pAllocUserData,
242 size, alignment, allocType);
243 if (mem) {
244 VG(VALGRIND_MEMPOOL_ALLOC(instance, mem, size));
245 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem, size));
246 }
247 return mem;
248 }
249
250 void
251 anv_instance_free(struct anv_instance *instance, void *mem)
252 {
253 if (mem == NULL)
254 return;
255
256 VG(VALGRIND_MEMPOOL_FREE(instance, mem));
257
258 instance->pfnFree(instance->pAllocUserData, mem);
259 }
260
261 VkResult anv_EnumeratePhysicalDevices(
262 VkInstance _instance,
263 uint32_t* pPhysicalDeviceCount,
264 VkPhysicalDevice* pPhysicalDevices)
265 {
266 ANV_FROM_HANDLE(anv_instance, instance, _instance);
267 VkResult result;
268
269 if (instance->physicalDeviceCount == 0) {
270 result = anv_physical_device_init(&instance->physicalDevice,
271 instance, "/dev/dri/renderD128");
272 if (result != VK_SUCCESS)
273 return result;
274
275 instance->physicalDeviceCount = 1;
276 }
277
278 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
279 * otherwise it's an inout parameter.
280 *
281 * The Vulkan spec (git aaed022) says:
282 *
283 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
284 * that is initialized with the number of devices the application is
285 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
286 * an array of at least this many VkPhysicalDevice handles [...].
287 *
288 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
289 * overwrites the contents of the variable pointed to by
290 * pPhysicalDeviceCount with the number of physical devices in in the
291 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
292 * pPhysicalDeviceCount with the number of physical handles written to
293 * pPhysicalDevices.
294 */
295 if (!pPhysicalDevices) {
296 *pPhysicalDeviceCount = instance->physicalDeviceCount;
297 } else if (*pPhysicalDeviceCount >= 1) {
298 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
299 *pPhysicalDeviceCount = 1;
300 } else {
301 *pPhysicalDeviceCount = 0;
302 }
303
304 return VK_SUCCESS;
305 }
306
307 VkResult anv_GetPhysicalDeviceFeatures(
308 VkPhysicalDevice physicalDevice,
309 VkPhysicalDeviceFeatures* pFeatures)
310 {
311 anv_finishme("Get correct values for PhysicalDeviceFeatures");
312
313 *pFeatures = (VkPhysicalDeviceFeatures) {
314 .robustBufferAccess = false,
315 .fullDrawIndexUint32 = false,
316 .imageCubeArray = false,
317 .independentBlend = false,
318 .geometryShader = true,
319 .tessellationShader = false,
320 .sampleRateShading = false,
321 .dualSourceBlend = true,
322 .logicOp = true,
323 .multiDrawIndirect = true,
324 .depthClip = false,
325 .depthBiasClamp = false,
326 .fillModeNonSolid = true,
327 .depthBounds = false,
328 .wideLines = true,
329 .largePoints = true,
330 .textureCompressionETC2 = true,
331 .textureCompressionASTC_LDR = true,
332 .textureCompressionBC = true,
333 .occlusionQueryNonConservative = false, /* FINISHME */
334 .pipelineStatisticsQuery = true,
335 .vertexSideEffects = false,
336 .tessellationSideEffects = false,
337 .geometrySideEffects = false,
338 .fragmentSideEffects = false,
339 .shaderTessellationPointSize = false,
340 .shaderGeometryPointSize = true,
341 .shaderImageGatherExtended = true,
342 .shaderStorageImageExtendedFormats = false,
343 .shaderStorageImageMultisample = false,
344 .shaderUniformBufferArrayDynamicIndexing = true,
345 .shaderSampledImageArrayDynamicIndexing = false,
346 .shaderStorageBufferArrayDynamicIndexing = false,
347 .shaderStorageImageArrayDynamicIndexing = false,
348 .shaderClipDistance = false,
349 .shaderCullDistance = false,
350 .shaderFloat64 = false,
351 .shaderInt64 = false,
352 .shaderInt16 = false,
353 .alphaToOne = true,
354 };
355
356 return VK_SUCCESS;
357 }
358
359 VkResult anv_GetPhysicalDeviceProperties(
360 VkPhysicalDevice physicalDevice,
361 VkPhysicalDeviceProperties* pProperties)
362 {
363 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
364 const struct brw_device_info *devinfo = pdevice->info;
365
366 anv_finishme("Get correct values for VkPhysicalDeviceLimits");
367
368 VkPhysicalDeviceLimits limits = {
369 .maxImageDimension1D = (1 << 14),
370 .maxImageDimension2D = (1 << 14),
371 .maxImageDimension3D = (1 << 10),
372 .maxImageDimensionCube = (1 << 14),
373 .maxImageArrayLayers = (1 << 10),
374
375 /* Broadwell supports 1, 2, 4, and 8 samples. */
376 .sampleCounts = 4,
377
378 .maxTexelBufferSize = (1 << 14),
379 .maxUniformBufferSize = UINT32_MAX,
380 .maxStorageBufferSize = UINT32_MAX,
381 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
382 .maxMemoryAllocationCount = UINT32_MAX,
383 .bufferImageGranularity = 64, /* A cache line */
384 .sparseAddressSpaceSize = 0,
385 .maxBoundDescriptorSets = MAX_SETS,
386 .maxDescriptorSets = UINT32_MAX,
387 .maxPerStageDescriptorSamplers = 64,
388 .maxPerStageDescriptorUniformBuffers = 64,
389 .maxPerStageDescriptorStorageBuffers = 64,
390 .maxPerStageDescriptorSampledImages = 64,
391 .maxPerStageDescriptorStorageImages = 64,
392 .maxDescriptorSetSamplers = 256,
393 .maxDescriptorSetUniformBuffers = 256,
394 .maxDescriptorSetUniformBuffersDynamic = 256,
395 .maxDescriptorSetStorageBuffers = 256,
396 .maxDescriptorSetStorageBuffersDynamic = 256,
397 .maxDescriptorSetSampledImages = 256,
398 .maxDescriptorSetStorageImages = 256,
399 .maxVertexInputAttributes = 32,
400 .maxVertexInputBindings = 32,
401 .maxVertexInputAttributeOffset = 256,
402 .maxVertexInputBindingStride = 256,
403 .maxVertexOutputComponents = 32,
404 .maxTessGenLevel = 0,
405 .maxTessPatchSize = 0,
406 .maxTessControlPerVertexInputComponents = 0,
407 .maxTessControlPerVertexOutputComponents = 0,
408 .maxTessControlPerPatchOutputComponents = 0,
409 .maxTessControlTotalOutputComponents = 0,
410 .maxTessEvaluationInputComponents = 0,
411 .maxTessEvaluationOutputComponents = 0,
412 .maxGeometryShaderInvocations = 6,
413 .maxGeometryInputComponents = 16,
414 .maxGeometryOutputComponents = 16,
415 .maxGeometryOutputVertices = 16,
416 .maxGeometryTotalOutputComponents = 16,
417 .maxFragmentInputComponents = 16,
418 .maxFragmentOutputBuffers = 8,
419 .maxFragmentDualSourceBuffers = 2,
420 .maxFragmentCombinedOutputResources = 8,
421 .maxComputeSharedMemorySize = 1024,
422 .maxComputeWorkGroupCount = {
423 16 * devinfo->max_cs_threads,
424 16 * devinfo->max_cs_threads,
425 16 * devinfo->max_cs_threads,
426 },
427 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
428 .maxComputeWorkGroupSize = {
429 16 * devinfo->max_cs_threads,
430 16 * devinfo->max_cs_threads,
431 16 * devinfo->max_cs_threads,
432 },
433 .subPixelPrecisionBits = 4 /* FIXME */,
434 .subTexelPrecisionBits = 4 /* FIXME */,
435 .mipmapPrecisionBits = 4 /* FIXME */,
436 .maxDrawIndexedIndexValue = UINT32_MAX,
437 .maxDrawIndirectInstanceCount = UINT32_MAX,
438 .primitiveRestartForPatches = UINT32_MAX,
439 .maxSamplerLodBias = 16,
440 .maxSamplerAnisotropy = 16,
441 .maxViewports = MAX_VIEWPORTS,
442 .maxViewportDimensions = { (1 << 14), (1 << 14) },
443 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
444 .viewportSubPixelBits = 13, /* We take a float? */
445 .minMemoryMapAlignment = 64, /* A cache line */
446 .minTexelBufferOffsetAlignment = 1,
447 .minUniformBufferOffsetAlignment = 1,
448 .minStorageBufferOffsetAlignment = 1,
449 .minTexelOffset = 0, /* FIXME */
450 .maxTexelOffset = 0, /* FIXME */
451 .minTexelGatherOffset = 0, /* FIXME */
452 .maxTexelGatherOffset = 0, /* FIXME */
453 .minInterpolationOffset = 0, /* FIXME */
454 .maxInterpolationOffset = 0, /* FIXME */
455 .subPixelInterpolationOffsetBits = 0, /* FIXME */
456 .maxFramebufferWidth = (1 << 14),
457 .maxFramebufferHeight = (1 << 14),
458 .maxFramebufferLayers = (1 << 10),
459 .maxFramebufferColorSamples = 8,
460 .maxFramebufferDepthSamples = 8,
461 .maxFramebufferStencilSamples = 8,
462 .maxColorAttachments = MAX_RTS,
463 .maxSampledImageColorSamples = 8,
464 .maxSampledImageDepthSamples = 8,
465 .maxSampledImageIntegerSamples = 1,
466 .maxStorageImageSamples = 1,
467 .maxSampleMaskWords = 1,
468 .timestampFrequency = 1000 * 1000 * 1000 / 80,
469 .maxClipDistances = 0 /* FIXME */,
470 .maxCullDistances = 0 /* FIXME */,
471 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
472 .pointSizeRange = { 0.125, 255.875 },
473 .lineWidthRange = { 0.0, 7.9921875 },
474 .pointSizeGranularity = (1.0 / 8.0),
475 .lineWidthGranularity = (1.0 / 128.0),
476 };
477
478 *pProperties = (VkPhysicalDeviceProperties) {
479 .apiVersion = VK_MAKE_VERSION(0, 170, 2),
480 .driverVersion = 1,
481 .vendorId = 0x8086,
482 .deviceId = pdevice->chipset_id,
483 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
484 .limits = limits,
485 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
486 };
487
488 strcpy(pProperties->deviceName, pdevice->name);
489 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
490 "anv-%s", MESA_GIT_SHA1 + 4);
491
492 return VK_SUCCESS;
493 }
494
495 VkResult anv_GetPhysicalDeviceQueueFamilyProperties(
496 VkPhysicalDevice physicalDevice,
497 uint32_t* pCount,
498 VkQueueFamilyProperties* pQueueFamilyProperties)
499 {
500 if (pQueueFamilyProperties == NULL) {
501 *pCount = 1;
502 return VK_SUCCESS;
503 }
504
505 assert(*pCount >= 1);
506
507 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
508 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
509 VK_QUEUE_COMPUTE_BIT |
510 VK_QUEUE_DMA_BIT,
511 .queueCount = 1,
512 .supportsTimestamps = true,
513 };
514
515 return VK_SUCCESS;
516 }
517
518 VkResult anv_GetPhysicalDeviceMemoryProperties(
519 VkPhysicalDevice physicalDevice,
520 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
521 {
522 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
523 VkDeviceSize heap_size;
524
525 /* Reserve some wiggle room for the driver by exposing only 75% of the
526 * aperture to the heap.
527 */
528 heap_size = 3 * physical_device->aperture_size / 4;
529
530 /* The property flags below are valid only for llc platforms. */
531 pMemoryProperties->memoryTypeCount = 1;
532 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
533 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
534 .heapIndex = 1,
535 };
536
537 pMemoryProperties->memoryHeapCount = 1;
538 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
539 .size = heap_size,
540 .flags = VK_MEMORY_HEAP_HOST_LOCAL_BIT,
541 };
542
543 return VK_SUCCESS;
544 }
545
546 PFN_vkVoidFunction anv_GetInstanceProcAddr(
547 VkInstance instance,
548 const char* pName)
549 {
550 return anv_lookup_entrypoint(pName);
551 }
552
553 PFN_vkVoidFunction anv_GetDeviceProcAddr(
554 VkDevice device,
555 const char* pName)
556 {
557 return anv_lookup_entrypoint(pName);
558 }
559
560 static VkResult
561 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
562 {
563 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
564 queue->device = device;
565 queue->pool = &device->surface_state_pool;
566
567 return VK_SUCCESS;
568 }
569
570 static void
571 anv_queue_finish(struct anv_queue *queue)
572 {
573 }
574
575 static void
576 anv_device_init_border_colors(struct anv_device *device)
577 {
578 static const VkClearColorValue border_colors[] = {
579 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
580 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
581 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
582 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
583 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
584 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
585 };
586
587 device->border_colors =
588 anv_state_pool_alloc(&device->dynamic_state_pool,
589 sizeof(border_colors), 32);
590 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
591 }
592
593 VkResult anv_CreateDevice(
594 VkPhysicalDevice physicalDevice,
595 const VkDeviceCreateInfo* pCreateInfo,
596 VkDevice* pDevice)
597 {
598 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
599 struct anv_instance *instance = physical_device->instance;
600 struct anv_device *device;
601
602 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
603
604 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
605 bool found = false;
606 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
607 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
608 device_extensions[j].extName) == 0) {
609 found = true;
610 break;
611 }
612 }
613 if (!found)
614 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
615 }
616
617 anv_set_dispatch_gen(physical_device->info->gen);
618
619 device = anv_instance_alloc(instance, sizeof(*device), 8,
620 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
621 if (!device)
622 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
623
624 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
625 device->instance = physical_device->instance;
626
627 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
628 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
629 if (device->fd == -1)
630 goto fail_device;
631
632 device->context_id = anv_gem_create_context(device);
633 if (device->context_id == -1)
634 goto fail_fd;
635
636 pthread_mutex_init(&device->mutex, NULL);
637
638 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
639
640 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
641
642 anv_state_pool_init(&device->dynamic_state_pool,
643 &device->dynamic_state_block_pool);
644
645 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
646 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
647
648 anv_state_pool_init(&device->surface_state_pool,
649 &device->surface_state_block_pool);
650
651 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
652
653 device->info = *physical_device->info;
654
655 anv_queue_init(device, &device->queue);
656
657 anv_device_init_meta(device);
658
659 anv_device_init_border_colors(device);
660
661 *pDevice = anv_device_to_handle(device);
662
663 return VK_SUCCESS;
664
665 fail_fd:
666 close(device->fd);
667 fail_device:
668 anv_device_free(device, device);
669
670 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
671 }
672
673 void anv_DestroyDevice(
674 VkDevice _device)
675 {
676 ANV_FROM_HANDLE(anv_device, device, _device);
677
678 anv_queue_finish(&device->queue);
679
680 anv_device_finish_meta(device);
681
682 #ifdef HAVE_VALGRIND
683 /* We only need to free these to prevent valgrind errors. The backing
684 * BO will go away in a couple of lines so we don't actually leak.
685 */
686 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
687 #endif
688
689 anv_bo_pool_finish(&device->batch_bo_pool);
690 anv_state_pool_finish(&device->dynamic_state_pool);
691 anv_block_pool_finish(&device->dynamic_state_block_pool);
692 anv_block_pool_finish(&device->instruction_block_pool);
693 anv_state_pool_finish(&device->surface_state_pool);
694 anv_block_pool_finish(&device->surface_state_block_pool);
695 anv_block_pool_finish(&device->scratch_block_pool);
696
697 close(device->fd);
698
699 anv_instance_free(device->instance, device);
700 }
701
702 VkResult anv_EnumerateInstanceExtensionProperties(
703 const char* pLayerName,
704 uint32_t* pCount,
705 VkExtensionProperties* pProperties)
706 {
707 if (pProperties == NULL) {
708 *pCount = ARRAY_SIZE(global_extensions);
709 return VK_SUCCESS;
710 }
711
712 assert(*pCount >= ARRAY_SIZE(global_extensions));
713
714 *pCount = ARRAY_SIZE(global_extensions);
715 memcpy(pProperties, global_extensions, sizeof(global_extensions));
716
717 return VK_SUCCESS;
718 }
719
720 VkResult anv_EnumerateDeviceExtensionProperties(
721 VkPhysicalDevice physicalDevice,
722 const char* pLayerName,
723 uint32_t* pCount,
724 VkExtensionProperties* pProperties)
725 {
726 if (pProperties == NULL) {
727 *pCount = ARRAY_SIZE(device_extensions);
728 return VK_SUCCESS;
729 }
730
731 assert(*pCount >= ARRAY_SIZE(device_extensions));
732
733 *pCount = ARRAY_SIZE(device_extensions);
734 memcpy(pProperties, device_extensions, sizeof(device_extensions));
735
736 return VK_SUCCESS;
737 }
738
739 VkResult anv_EnumerateInstanceLayerProperties(
740 uint32_t* pCount,
741 VkLayerProperties* pProperties)
742 {
743 if (pProperties == NULL) {
744 *pCount = 0;
745 return VK_SUCCESS;
746 }
747
748 /* None supported at this time */
749 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
750 }
751
752 VkResult anv_EnumerateDeviceLayerProperties(
753 VkPhysicalDevice physicalDevice,
754 uint32_t* pCount,
755 VkLayerProperties* pProperties)
756 {
757 if (pProperties == NULL) {
758 *pCount = 0;
759 return VK_SUCCESS;
760 }
761
762 /* None supported at this time */
763 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
764 }
765
766 VkResult anv_GetDeviceQueue(
767 VkDevice _device,
768 uint32_t queueNodeIndex,
769 uint32_t queueIndex,
770 VkQueue* pQueue)
771 {
772 ANV_FROM_HANDLE(anv_device, device, _device);
773
774 assert(queueIndex == 0);
775
776 *pQueue = anv_queue_to_handle(&device->queue);
777
778 return VK_SUCCESS;
779 }
780
781 VkResult anv_QueueSubmit(
782 VkQueue _queue,
783 uint32_t cmdBufferCount,
784 const VkCmdBuffer* pCmdBuffers,
785 VkFence _fence)
786 {
787 ANV_FROM_HANDLE(anv_queue, queue, _queue);
788 ANV_FROM_HANDLE(anv_fence, fence, _fence);
789 struct anv_device *device = queue->device;
790 int ret;
791
792 for (uint32_t i = 0; i < cmdBufferCount; i++) {
793 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
794
795 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
796
797 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
798 if (ret != 0) {
799 /* We don't know the real error. */
800 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
801 "execbuf2 failed: %m");
802 }
803
804 if (fence) {
805 ret = anv_gem_execbuffer(device, &fence->execbuf);
806 if (ret != 0) {
807 /* We don't know the real error. */
808 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
809 "execbuf2 failed: %m");
810 }
811 }
812
813 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
814 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
815 }
816
817 return VK_SUCCESS;
818 }
819
820 VkResult anv_QueueWaitIdle(
821 VkQueue _queue)
822 {
823 ANV_FROM_HANDLE(anv_queue, queue, _queue);
824
825 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
826 }
827
828 VkResult anv_DeviceWaitIdle(
829 VkDevice _device)
830 {
831 ANV_FROM_HANDLE(anv_device, device, _device);
832 struct anv_state state;
833 struct anv_batch batch;
834 struct drm_i915_gem_execbuffer2 execbuf;
835 struct drm_i915_gem_exec_object2 exec2_objects[1];
836 struct anv_bo *bo = NULL;
837 VkResult result;
838 int64_t timeout;
839 int ret;
840
841 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
842 bo = &device->dynamic_state_pool.block_pool->bo;
843 batch.start = batch.next = state.map;
844 batch.end = state.map + 32;
845 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
846 anv_batch_emit(&batch, GEN7_MI_NOOP);
847
848 exec2_objects[0].handle = bo->gem_handle;
849 exec2_objects[0].relocation_count = 0;
850 exec2_objects[0].relocs_ptr = 0;
851 exec2_objects[0].alignment = 0;
852 exec2_objects[0].offset = bo->offset;
853 exec2_objects[0].flags = 0;
854 exec2_objects[0].rsvd1 = 0;
855 exec2_objects[0].rsvd2 = 0;
856
857 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
858 execbuf.buffer_count = 1;
859 execbuf.batch_start_offset = state.offset;
860 execbuf.batch_len = batch.next - state.map;
861 execbuf.cliprects_ptr = 0;
862 execbuf.num_cliprects = 0;
863 execbuf.DR1 = 0;
864 execbuf.DR4 = 0;
865
866 execbuf.flags =
867 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
868 execbuf.rsvd1 = device->context_id;
869 execbuf.rsvd2 = 0;
870
871 ret = anv_gem_execbuffer(device, &execbuf);
872 if (ret != 0) {
873 /* We don't know the real error. */
874 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
875 goto fail;
876 }
877
878 timeout = INT64_MAX;
879 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
880 if (ret != 0) {
881 /* We don't know the real error. */
882 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
883 goto fail;
884 }
885
886 anv_state_pool_free(&device->dynamic_state_pool, state);
887
888 return VK_SUCCESS;
889
890 fail:
891 anv_state_pool_free(&device->dynamic_state_pool, state);
892
893 return result;
894 }
895
896 void *
897 anv_device_alloc(struct anv_device * device,
898 size_t size,
899 size_t alignment,
900 VkSystemAllocType allocType)
901 {
902 return anv_instance_alloc(device->instance, size, alignment, allocType);
903 }
904
905 void
906 anv_device_free(struct anv_device * device,
907 void * mem)
908 {
909 anv_instance_free(device->instance, mem);
910 }
911
912 VkResult
913 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
914 {
915 bo->gem_handle = anv_gem_create(device, size);
916 if (!bo->gem_handle)
917 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
918
919 bo->map = NULL;
920 bo->index = 0;
921 bo->offset = 0;
922 bo->size = size;
923
924 return VK_SUCCESS;
925 }
926
927 VkResult anv_AllocMemory(
928 VkDevice _device,
929 const VkMemoryAllocInfo* pAllocInfo,
930 VkDeviceMemory* pMem)
931 {
932 ANV_FROM_HANDLE(anv_device, device, _device);
933 struct anv_device_memory *mem;
934 VkResult result;
935
936 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
937
938 /* We support exactly one memory heap. */
939 assert(pAllocInfo->memoryTypeIndex == 0);
940
941 /* FINISHME: Fail if allocation request exceeds heap size. */
942
943 mem = anv_device_alloc(device, sizeof(*mem), 8,
944 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
945 if (mem == NULL)
946 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
947
948 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
949 if (result != VK_SUCCESS)
950 goto fail;
951
952 *pMem = anv_device_memory_to_handle(mem);
953
954 return VK_SUCCESS;
955
956 fail:
957 anv_device_free(device, mem);
958
959 return result;
960 }
961
962 void anv_FreeMemory(
963 VkDevice _device,
964 VkDeviceMemory _mem)
965 {
966 ANV_FROM_HANDLE(anv_device, device, _device);
967 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
968
969 if (mem->bo.map)
970 anv_gem_munmap(mem->bo.map, mem->bo.size);
971
972 if (mem->bo.gem_handle != 0)
973 anv_gem_close(device, mem->bo.gem_handle);
974
975 anv_device_free(device, mem);
976 }
977
978 VkResult anv_MapMemory(
979 VkDevice _device,
980 VkDeviceMemory _mem,
981 VkDeviceSize offset,
982 VkDeviceSize size,
983 VkMemoryMapFlags flags,
984 void** ppData)
985 {
986 ANV_FROM_HANDLE(anv_device, device, _device);
987 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
988
989 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
990 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
991 * at a time is valid. We could just mmap up front and return an offset
992 * pointer here, but that may exhaust virtual memory on 32 bit
993 * userspace. */
994
995 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
996 mem->map_size = size;
997
998 *ppData = mem->map;
999
1000 return VK_SUCCESS;
1001 }
1002
1003 void anv_UnmapMemory(
1004 VkDevice _device,
1005 VkDeviceMemory _mem)
1006 {
1007 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1008
1009 anv_gem_munmap(mem->map, mem->map_size);
1010 }
1011
1012 VkResult anv_FlushMappedMemoryRanges(
1013 VkDevice device,
1014 uint32_t memRangeCount,
1015 const VkMappedMemoryRange* pMemRanges)
1016 {
1017 /* clflush here for !llc platforms */
1018
1019 return VK_SUCCESS;
1020 }
1021
1022 VkResult anv_InvalidateMappedMemoryRanges(
1023 VkDevice device,
1024 uint32_t memRangeCount,
1025 const VkMappedMemoryRange* pMemRanges)
1026 {
1027 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
1028 }
1029
1030 VkResult anv_GetBufferMemoryRequirements(
1031 VkDevice device,
1032 VkBuffer _buffer,
1033 VkMemoryRequirements* pMemoryRequirements)
1034 {
1035 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1036
1037 /* The Vulkan spec (git aaed022) says:
1038 *
1039 * memoryTypeBits is a bitfield and contains one bit set for every
1040 * supported memory type for the resource. The bit `1<<i` is set if and
1041 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1042 * structure for the physical device is supported.
1043 *
1044 * We support exactly one memory type.
1045 */
1046 pMemoryRequirements->memoryTypeBits = 1;
1047
1048 pMemoryRequirements->size = buffer->size;
1049 pMemoryRequirements->alignment = 16;
1050
1051 return VK_SUCCESS;
1052 }
1053
1054 VkResult anv_GetImageMemoryRequirements(
1055 VkDevice device,
1056 VkImage _image,
1057 VkMemoryRequirements* pMemoryRequirements)
1058 {
1059 ANV_FROM_HANDLE(anv_image, image, _image);
1060
1061 /* The Vulkan spec (git aaed022) says:
1062 *
1063 * memoryTypeBits is a bitfield and contains one bit set for every
1064 * supported memory type for the resource. The bit `1<<i` is set if and
1065 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1066 * structure for the physical device is supported.
1067 *
1068 * We support exactly one memory type.
1069 */
1070 pMemoryRequirements->memoryTypeBits = 1;
1071
1072 pMemoryRequirements->size = image->size;
1073 pMemoryRequirements->alignment = image->alignment;
1074
1075 return VK_SUCCESS;
1076 }
1077
1078 VkResult anv_GetImageSparseMemoryRequirements(
1079 VkDevice device,
1080 VkImage image,
1081 uint32_t* pNumRequirements,
1082 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1083 {
1084 return vk_error(VK_UNSUPPORTED);
1085 }
1086
1087 VkResult anv_GetDeviceMemoryCommitment(
1088 VkDevice device,
1089 VkDeviceMemory memory,
1090 VkDeviceSize* pCommittedMemoryInBytes)
1091 {
1092 *pCommittedMemoryInBytes = 0;
1093 stub_return(VK_SUCCESS);
1094 }
1095
1096 VkResult anv_BindBufferMemory(
1097 VkDevice device,
1098 VkBuffer _buffer,
1099 VkDeviceMemory _mem,
1100 VkDeviceSize memOffset)
1101 {
1102 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1103 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1104
1105 buffer->bo = &mem->bo;
1106 buffer->offset = memOffset;
1107
1108 return VK_SUCCESS;
1109 }
1110
1111 VkResult anv_BindImageMemory(
1112 VkDevice device,
1113 VkImage _image,
1114 VkDeviceMemory _mem,
1115 VkDeviceSize memOffset)
1116 {
1117 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1118 ANV_FROM_HANDLE(anv_image, image, _image);
1119
1120 image->bo = &mem->bo;
1121 image->offset = memOffset;
1122
1123 return VK_SUCCESS;
1124 }
1125
1126 VkResult anv_QueueBindSparseBufferMemory(
1127 VkQueue queue,
1128 VkBuffer buffer,
1129 uint32_t numBindings,
1130 const VkSparseMemoryBindInfo* pBindInfo)
1131 {
1132 stub_return(VK_UNSUPPORTED);
1133 }
1134
1135 VkResult anv_QueueBindSparseImageOpaqueMemory(
1136 VkQueue queue,
1137 VkImage image,
1138 uint32_t numBindings,
1139 const VkSparseMemoryBindInfo* pBindInfo)
1140 {
1141 stub_return(VK_UNSUPPORTED);
1142 }
1143
1144 VkResult anv_QueueBindSparseImageMemory(
1145 VkQueue queue,
1146 VkImage image,
1147 uint32_t numBindings,
1148 const VkSparseImageMemoryBindInfo* pBindInfo)
1149 {
1150 stub_return(VK_UNSUPPORTED);
1151 }
1152
1153 VkResult anv_CreateFence(
1154 VkDevice _device,
1155 const VkFenceCreateInfo* pCreateInfo,
1156 VkFence* pFence)
1157 {
1158 ANV_FROM_HANDLE(anv_device, device, _device);
1159 struct anv_fence *fence;
1160 struct anv_batch batch;
1161 VkResult result;
1162
1163 const uint32_t fence_size = 128;
1164
1165 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1166
1167 fence = anv_device_alloc(device, sizeof(*fence), 8,
1168 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1169 if (fence == NULL)
1170 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1171
1172 result = anv_bo_init_new(&fence->bo, device, fence_size);
1173 if (result != VK_SUCCESS)
1174 goto fail;
1175
1176 fence->bo.map =
1177 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1178 batch.next = batch.start = fence->bo.map;
1179 batch.end = fence->bo.map + fence->bo.size;
1180 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1181 anv_batch_emit(&batch, GEN7_MI_NOOP);
1182
1183 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1184 fence->exec2_objects[0].relocation_count = 0;
1185 fence->exec2_objects[0].relocs_ptr = 0;
1186 fence->exec2_objects[0].alignment = 0;
1187 fence->exec2_objects[0].offset = fence->bo.offset;
1188 fence->exec2_objects[0].flags = 0;
1189 fence->exec2_objects[0].rsvd1 = 0;
1190 fence->exec2_objects[0].rsvd2 = 0;
1191
1192 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1193 fence->execbuf.buffer_count = 1;
1194 fence->execbuf.batch_start_offset = 0;
1195 fence->execbuf.batch_len = batch.next - fence->bo.map;
1196 fence->execbuf.cliprects_ptr = 0;
1197 fence->execbuf.num_cliprects = 0;
1198 fence->execbuf.DR1 = 0;
1199 fence->execbuf.DR4 = 0;
1200
1201 fence->execbuf.flags =
1202 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1203 fence->execbuf.rsvd1 = device->context_id;
1204 fence->execbuf.rsvd2 = 0;
1205
1206 *pFence = anv_fence_to_handle(fence);
1207
1208 return VK_SUCCESS;
1209
1210 fail:
1211 anv_device_free(device, fence);
1212
1213 return result;
1214 }
1215
1216 void anv_DestroyFence(
1217 VkDevice _device,
1218 VkFence _fence)
1219 {
1220 ANV_FROM_HANDLE(anv_device, device, _device);
1221 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1222
1223 anv_gem_munmap(fence->bo.map, fence->bo.size);
1224 anv_gem_close(device, fence->bo.gem_handle);
1225 anv_device_free(device, fence);
1226 }
1227
1228 VkResult anv_ResetFences(
1229 VkDevice _device,
1230 uint32_t fenceCount,
1231 const VkFence* pFences)
1232 {
1233 for (uint32_t i = 0; i < fenceCount; i++) {
1234 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1235 fence->ready = false;
1236 }
1237
1238 return VK_SUCCESS;
1239 }
1240
1241 VkResult anv_GetFenceStatus(
1242 VkDevice _device,
1243 VkFence _fence)
1244 {
1245 ANV_FROM_HANDLE(anv_device, device, _device);
1246 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1247 int64_t t = 0;
1248 int ret;
1249
1250 if (fence->ready)
1251 return VK_SUCCESS;
1252
1253 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1254 if (ret == 0) {
1255 fence->ready = true;
1256 return VK_SUCCESS;
1257 }
1258
1259 return VK_NOT_READY;
1260 }
1261
1262 VkResult anv_WaitForFences(
1263 VkDevice _device,
1264 uint32_t fenceCount,
1265 const VkFence* pFences,
1266 VkBool32 waitAll,
1267 uint64_t timeout)
1268 {
1269 ANV_FROM_HANDLE(anv_device, device, _device);
1270 int64_t t = timeout;
1271 int ret;
1272
1273 /* FIXME: handle !waitAll */
1274
1275 for (uint32_t i = 0; i < fenceCount; i++) {
1276 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1277 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1278 if (ret == -1 && errno == ETIME) {
1279 return VK_TIMEOUT;
1280 } else if (ret == -1) {
1281 /* We don't know the real error. */
1282 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1283 "gem wait failed: %m");
1284 }
1285 }
1286
1287 return VK_SUCCESS;
1288 }
1289
1290 // Queue semaphore functions
1291
1292 VkResult anv_CreateSemaphore(
1293 VkDevice device,
1294 const VkSemaphoreCreateInfo* pCreateInfo,
1295 VkSemaphore* pSemaphore)
1296 {
1297 pSemaphore->handle = 1;
1298 stub_return(VK_SUCCESS);
1299 }
1300
1301 void anv_DestroySemaphore(
1302 VkDevice device,
1303 VkSemaphore semaphore)
1304 {
1305 stub();
1306 }
1307
1308 VkResult anv_QueueSignalSemaphore(
1309 VkQueue queue,
1310 VkSemaphore semaphore)
1311 {
1312 stub_return(VK_UNSUPPORTED);
1313 }
1314
1315 VkResult anv_QueueWaitSemaphore(
1316 VkQueue queue,
1317 VkSemaphore semaphore)
1318 {
1319 stub_return(VK_UNSUPPORTED);
1320 }
1321
1322 // Event functions
1323
1324 VkResult anv_CreateEvent(
1325 VkDevice device,
1326 const VkEventCreateInfo* pCreateInfo,
1327 VkEvent* pEvent)
1328 {
1329 stub_return(VK_UNSUPPORTED);
1330 }
1331
1332 void anv_DestroyEvent(
1333 VkDevice device,
1334 VkEvent event)
1335 {
1336 stub();
1337 }
1338
1339 VkResult anv_GetEventStatus(
1340 VkDevice device,
1341 VkEvent event)
1342 {
1343 stub_return(VK_UNSUPPORTED);
1344 }
1345
1346 VkResult anv_SetEvent(
1347 VkDevice device,
1348 VkEvent event)
1349 {
1350 stub_return(VK_UNSUPPORTED);
1351 }
1352
1353 VkResult anv_ResetEvent(
1354 VkDevice device,
1355 VkEvent event)
1356 {
1357 stub_return(VK_UNSUPPORTED);
1358 }
1359
1360 // Buffer functions
1361
1362 VkResult anv_CreateBuffer(
1363 VkDevice _device,
1364 const VkBufferCreateInfo* pCreateInfo,
1365 VkBuffer* pBuffer)
1366 {
1367 ANV_FROM_HANDLE(anv_device, device, _device);
1368 struct anv_buffer *buffer;
1369
1370 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1371
1372 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1373 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1374 if (buffer == NULL)
1375 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1376
1377 buffer->size = pCreateInfo->size;
1378 buffer->bo = NULL;
1379 buffer->offset = 0;
1380
1381 *pBuffer = anv_buffer_to_handle(buffer);
1382
1383 return VK_SUCCESS;
1384 }
1385
1386 void anv_DestroyBuffer(
1387 VkDevice _device,
1388 VkBuffer _buffer)
1389 {
1390 ANV_FROM_HANDLE(anv_device, device, _device);
1391 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1392
1393 anv_device_free(device, buffer);
1394 }
1395
1396 void
1397 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1398 const struct anv_format *format,
1399 uint32_t offset, uint32_t range)
1400 {
1401 switch (device->info.gen) {
1402 case 7:
1403 gen7_fill_buffer_surface_state(state, format, offset, range);
1404 break;
1405 case 8:
1406 gen8_fill_buffer_surface_state(state, format, offset, range);
1407 break;
1408 default:
1409 unreachable("unsupported gen\n");
1410 }
1411 }
1412
1413 VkResult
1414 anv_buffer_view_create(
1415 struct anv_device * device,
1416 const VkBufferViewCreateInfo* pCreateInfo,
1417 struct anv_buffer_view ** bview_out)
1418 {
1419 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1420 struct anv_buffer_view *bview;
1421
1422 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1423
1424 bview = anv_device_alloc(device, sizeof(*bview), 8,
1425 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1426 if (bview == NULL)
1427 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1428
1429 *bview = (struct anv_buffer_view) {
1430 .bo = buffer->bo,
1431 .offset = buffer->offset + pCreateInfo->offset,
1432 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1433 .format = anv_format_for_vk_format(pCreateInfo->format),
1434 .range = pCreateInfo->range,
1435 };
1436
1437 *bview_out = bview;
1438
1439 return VK_SUCCESS;
1440 }
1441
1442 void anv_DestroyBufferView(
1443 VkDevice _device,
1444 VkBufferView _bview)
1445 {
1446 ANV_FROM_HANDLE(anv_device, device, _device);
1447 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1448
1449 anv_state_pool_free(&device->surface_state_pool, bview->surface_state);
1450 anv_device_free(device, bview);
1451 }
1452
1453 void anv_DestroySampler(
1454 VkDevice _device,
1455 VkSampler _sampler)
1456 {
1457 ANV_FROM_HANDLE(anv_device, device, _device);
1458 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1459
1460 anv_device_free(device, sampler);
1461 }
1462
1463 // Descriptor set functions
1464
1465 VkResult anv_CreateDescriptorSetLayout(
1466 VkDevice _device,
1467 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1468 VkDescriptorSetLayout* pSetLayout)
1469 {
1470 ANV_FROM_HANDLE(anv_device, device, _device);
1471 struct anv_descriptor_set_layout *set_layout;
1472 uint32_t s;
1473
1474 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1475
1476 uint32_t immutable_sampler_count = 0;
1477 for (uint32_t b = 0; b < pCreateInfo->count; b++) {
1478 if (pCreateInfo->pBinding[b].pImmutableSamplers)
1479 immutable_sampler_count += pCreateInfo->pBinding[b].arraySize;
1480 }
1481
1482 size_t size = sizeof(struct anv_descriptor_set_layout) +
1483 pCreateInfo->count * sizeof(set_layout->binding[0]) +
1484 immutable_sampler_count * sizeof(struct anv_sampler *);
1485
1486 set_layout = anv_device_alloc(device, size, 8,
1487 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1488 if (!set_layout)
1489 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1490
1491 /* We just allocate all the samplers at the end of the struct */
1492 struct anv_sampler **samplers =
1493 (struct anv_sampler **)&set_layout->binding[pCreateInfo->count];
1494
1495 set_layout->binding_count = pCreateInfo->count;
1496 set_layout->shader_stages = 0;
1497 set_layout->size = 0;
1498
1499 /* Initialize all binding_layout entries to -1 */
1500 memset(set_layout->binding, -1,
1501 pCreateInfo->count * sizeof(set_layout->binding[0]));
1502
1503 /* Initialize all samplers to 0 */
1504 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
1505
1506 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1507 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1508 uint32_t dynamic_offset_count = 0;
1509
1510 for (uint32_t b = 0; b < pCreateInfo->count; b++) {
1511 uint32_t array_size = MAX2(1, pCreateInfo->pBinding[b].arraySize);
1512 set_layout->binding[b].array_size = array_size;
1513 set_layout->size += array_size;
1514
1515 switch (pCreateInfo->pBinding[b].descriptorType) {
1516 case VK_DESCRIPTOR_TYPE_SAMPLER:
1517 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1518 for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) {
1519 set_layout->binding[b].stage[s].sampler_index = sampler_count[s];
1520 sampler_count[s] += array_size;
1521 }
1522 break;
1523 default:
1524 break;
1525 }
1526
1527 switch (pCreateInfo->pBinding[b].descriptorType) {
1528 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1529 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1530 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1531 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1532 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1533 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1534 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1535 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1536 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1537 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1538 for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) {
1539 set_layout->binding[b].stage[s].surface_index = surface_count[s];
1540 surface_count[s] += array_size;
1541 }
1542 break;
1543 default:
1544 break;
1545 }
1546
1547 switch (pCreateInfo->pBinding[b].descriptorType) {
1548 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1549 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1550 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
1551 dynamic_offset_count += array_size;
1552 break;
1553 default:
1554 break;
1555 }
1556
1557 if (pCreateInfo->pBinding[b].pImmutableSamplers) {
1558 set_layout->binding[b].immutable_samplers = samplers;
1559 samplers += array_size;
1560
1561 for (uint32_t i = 0; i < array_size; i++)
1562 set_layout->binding[b].immutable_samplers[i] =
1563 anv_sampler_from_handle(pCreateInfo->pBinding[b].pImmutableSamplers[i]);
1564 } else {
1565 set_layout->binding[b].immutable_samplers = NULL;
1566 }
1567
1568 set_layout->shader_stages |= pCreateInfo->pBinding[b].stageFlags;
1569 }
1570
1571 set_layout->dynamic_offset_count = dynamic_offset_count;
1572
1573 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1574
1575 return VK_SUCCESS;
1576 }
1577
1578 void anv_DestroyDescriptorSetLayout(
1579 VkDevice _device,
1580 VkDescriptorSetLayout _set_layout)
1581 {
1582 ANV_FROM_HANDLE(anv_device, device, _device);
1583 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1584
1585 anv_device_free(device, set_layout);
1586 }
1587
1588 VkResult anv_CreateDescriptorPool(
1589 VkDevice device,
1590 const VkDescriptorPoolCreateInfo* pCreateInfo,
1591 VkDescriptorPool* pDescriptorPool)
1592 {
1593 anv_finishme("VkDescriptorPool is a stub");
1594 pDescriptorPool->handle = 1;
1595 return VK_SUCCESS;
1596 }
1597
1598 void anv_DestroyDescriptorPool(
1599 VkDevice _device,
1600 VkDescriptorPool _pool)
1601 {
1602 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1603 }
1604
1605 VkResult anv_ResetDescriptorPool(
1606 VkDevice device,
1607 VkDescriptorPool descriptorPool)
1608 {
1609 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1610 return VK_SUCCESS;
1611 }
1612
1613 VkResult
1614 anv_descriptor_set_create(struct anv_device *device,
1615 const struct anv_descriptor_set_layout *layout,
1616 struct anv_descriptor_set **out_set)
1617 {
1618 struct anv_descriptor_set *set;
1619 size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
1620
1621 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1622 if (!set)
1623 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1624
1625 /* A descriptor set may not be 100% filled. Clear the set so we can can
1626 * later detect holes in it.
1627 */
1628 memset(set, 0, size);
1629
1630 /* Go through and fill out immutable samplers if we have any */
1631 struct anv_descriptor *desc = set->descriptors;
1632 for (uint32_t b = 0; b < layout->binding_count; b++) {
1633 if (layout->binding[b].immutable_samplers) {
1634 for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
1635 desc[i].sampler = layout->binding[b].immutable_samplers[i];
1636 }
1637 desc += layout->binding[b].array_size;
1638 }
1639
1640 *out_set = set;
1641
1642 return VK_SUCCESS;
1643 }
1644
1645 void
1646 anv_descriptor_set_destroy(struct anv_device *device,
1647 struct anv_descriptor_set *set)
1648 {
1649 anv_device_free(device, set);
1650 }
1651
1652 VkResult anv_AllocDescriptorSets(
1653 VkDevice _device,
1654 VkDescriptorPool descriptorPool,
1655 VkDescriptorSetUsage setUsage,
1656 uint32_t count,
1657 const VkDescriptorSetLayout* pSetLayouts,
1658 VkDescriptorSet* pDescriptorSets)
1659 {
1660 ANV_FROM_HANDLE(anv_device, device, _device);
1661
1662 VkResult result = VK_SUCCESS;
1663 struct anv_descriptor_set *set;
1664 uint32_t i;
1665
1666 for (i = 0; i < count; i++) {
1667 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1668
1669 result = anv_descriptor_set_create(device, layout, &set);
1670 if (result != VK_SUCCESS)
1671 break;
1672
1673 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1674 }
1675
1676 if (result != VK_SUCCESS)
1677 anv_FreeDescriptorSets(_device, descriptorPool, i, pDescriptorSets);
1678
1679 return result;
1680 }
1681
1682 VkResult anv_FreeDescriptorSets(
1683 VkDevice _device,
1684 VkDescriptorPool descriptorPool,
1685 uint32_t count,
1686 const VkDescriptorSet* pDescriptorSets)
1687 {
1688 ANV_FROM_HANDLE(anv_device, device, _device);
1689
1690 for (uint32_t i = 0; i < count; i++) {
1691 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1692
1693 anv_descriptor_set_destroy(device, set);
1694 }
1695
1696 return VK_SUCCESS;
1697 }
1698
1699 void anv_UpdateDescriptorSets(
1700 VkDevice device,
1701 uint32_t writeCount,
1702 const VkWriteDescriptorSet* pDescriptorWrites,
1703 uint32_t copyCount,
1704 const VkCopyDescriptorSet* pDescriptorCopies)
1705 {
1706 for (uint32_t i = 0; i < writeCount; i++) {
1707 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1708 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1709
1710 switch (write->descriptorType) {
1711 case VK_DESCRIPTOR_TYPE_SAMPLER:
1712 for (uint32_t j = 0; j < write->count; j++) {
1713 ANV_FROM_HANDLE(anv_sampler, sampler,
1714 write->pDescriptors[j].sampler);
1715
1716 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1717 .type = ANV_DESCRIPTOR_TYPE_SAMPLER,
1718 .sampler = sampler,
1719 };
1720 }
1721 break;
1722
1723 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1724 for (uint32_t j = 0; j < write->count; j++) {
1725 struct anv_descriptor *desc =
1726 &set->descriptors[write->destBinding + j];
1727 ANV_FROM_HANDLE(anv_image_view, iview,
1728 write->pDescriptors[j].imageView);
1729 ANV_FROM_HANDLE(anv_sampler, sampler,
1730 write->pDescriptors[j].sampler);
1731
1732 desc->type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW_AND_SAMPLER;
1733 desc->image_view = iview;
1734
1735 /* If this descriptor has an immutable sampler, we don't want
1736 * to stomp on it.
1737 */
1738 if (sampler)
1739 desc->sampler = sampler;
1740 }
1741 break;
1742
1743 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1744 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1745 for (uint32_t j = 0; j < write->count; j++) {
1746 ANV_FROM_HANDLE(anv_image_view, iview,
1747 write->pDescriptors[j].imageView);
1748
1749 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1750 .type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW,
1751 .image_view = iview,
1752 };
1753 }
1754 break;
1755
1756 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1757 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1758 anv_finishme("texel buffers not implemented");
1759 break;
1760
1761 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1762 anv_finishme("input attachments not implemented");
1763 break;
1764
1765 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1766 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1767 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1768 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1769 for (uint32_t j = 0; j < write->count; j++) {
1770 if (write->pDescriptors[j].bufferView.handle) {
1771 ANV_FROM_HANDLE(anv_buffer_view, bview,
1772 write->pDescriptors[j].bufferView);
1773
1774 set->descriptors[write->destBinding + j] =
1775 (struct anv_descriptor) {
1776 .type = ANV_DESCRIPTOR_TYPE_BUFFER_VIEW,
1777 .buffer_view = bview,
1778 };
1779 } else {
1780 ANV_FROM_HANDLE(anv_buffer, buffer,
1781 write->pDescriptors[j].bufferInfo.buffer);
1782 assert(buffer);
1783
1784 set->descriptors[write->destBinding + j] =
1785 (struct anv_descriptor) {
1786 .type = ANV_DESCRIPTOR_TYPE_BUFFER_AND_OFFSET,
1787 .buffer = buffer,
1788 .offset = write->pDescriptors[j].bufferInfo.offset,
1789 .range = write->pDescriptors[j].bufferInfo.range,
1790 };
1791 }
1792 }
1793
1794 default:
1795 break;
1796 }
1797 }
1798
1799 for (uint32_t i = 0; i < copyCount; i++) {
1800 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1801 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1802 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1803 for (uint32_t j = 0; j < copy->count; j++) {
1804 dest->descriptors[copy->destBinding + j] =
1805 src->descriptors[copy->srcBinding + j];
1806 }
1807 }
1808 }
1809
1810 VkResult anv_CreateFramebuffer(
1811 VkDevice _device,
1812 const VkFramebufferCreateInfo* pCreateInfo,
1813 VkFramebuffer* pFramebuffer)
1814 {
1815 ANV_FROM_HANDLE(anv_device, device, _device);
1816 struct anv_framebuffer *framebuffer;
1817
1818 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1819
1820 size_t size = sizeof(*framebuffer) +
1821 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1822 framebuffer = anv_device_alloc(device, size, 8,
1823 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1824 if (framebuffer == NULL)
1825 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1826
1827 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1828 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1829 VkImageView _iview = pCreateInfo->pAttachments[i];
1830 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1831 }
1832
1833 framebuffer->width = pCreateInfo->width;
1834 framebuffer->height = pCreateInfo->height;
1835 framebuffer->layers = pCreateInfo->layers;
1836
1837 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1838
1839 return VK_SUCCESS;
1840 }
1841
1842 void anv_DestroyFramebuffer(
1843 VkDevice _device,
1844 VkFramebuffer _fb)
1845 {
1846 ANV_FROM_HANDLE(anv_device, device, _device);
1847 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1848
1849 anv_device_free(device, fb);
1850 }
1851
1852 VkResult anv_CreateRenderPass(
1853 VkDevice _device,
1854 const VkRenderPassCreateInfo* pCreateInfo,
1855 VkRenderPass* pRenderPass)
1856 {
1857 ANV_FROM_HANDLE(anv_device, device, _device);
1858 struct anv_render_pass *pass;
1859 size_t size;
1860 size_t attachments_offset;
1861
1862 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
1863
1864 size = sizeof(*pass);
1865 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
1866 attachments_offset = size;
1867 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
1868
1869 pass = anv_device_alloc(device, size, 8,
1870 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1871 if (pass == NULL)
1872 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1873
1874 /* Clear the subpasses along with the parent pass. This required because
1875 * each array member of anv_subpass must be a valid pointer if not NULL.
1876 */
1877 memset(pass, 0, size);
1878 pass->attachment_count = pCreateInfo->attachmentCount;
1879 pass->subpass_count = pCreateInfo->subpassCount;
1880 pass->attachments = (void *) pass + attachments_offset;
1881
1882 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1883 struct anv_render_pass_attachment *att = &pass->attachments[i];
1884
1885 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
1886 att->samples = pCreateInfo->pAttachments[i].samples;
1887 att->load_op = pCreateInfo->pAttachments[i].loadOp;
1888 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
1889 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
1890 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
1891
1892 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1893 if (anv_format_is_color(att->format)) {
1894 ++pass->num_color_clear_attachments;
1895 } else if (att->format->depth_format) {
1896 pass->has_depth_clear_attachment = true;
1897 }
1898 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1899 assert(att->format->has_stencil);
1900 pass->has_stencil_clear_attachment = true;
1901 }
1902 }
1903
1904 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
1905 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
1906 struct anv_subpass *subpass = &pass->subpasses[i];
1907
1908 subpass->input_count = desc->inputCount;
1909 subpass->color_count = desc->colorCount;
1910
1911 if (desc->inputCount > 0) {
1912 subpass->input_attachments =
1913 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
1914 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1915
1916 for (uint32_t j = 0; j < desc->inputCount; j++) {
1917 subpass->input_attachments[j]
1918 = desc->pInputAttachments[j].attachment;
1919 }
1920 }
1921
1922 if (desc->colorCount > 0) {
1923 subpass->color_attachments =
1924 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
1925 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1926
1927 for (uint32_t j = 0; j < desc->colorCount; j++) {
1928 subpass->color_attachments[j]
1929 = desc->pColorAttachments[j].attachment;
1930 }
1931 }
1932
1933 if (desc->pResolveAttachments) {
1934 subpass->resolve_attachments =
1935 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
1936 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1937
1938 for (uint32_t j = 0; j < desc->colorCount; j++) {
1939 subpass->resolve_attachments[j]
1940 = desc->pResolveAttachments[j].attachment;
1941 }
1942 }
1943
1944 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
1945 }
1946
1947 *pRenderPass = anv_render_pass_to_handle(pass);
1948
1949 return VK_SUCCESS;
1950 }
1951
1952 void anv_DestroyRenderPass(
1953 VkDevice _device,
1954 VkRenderPass _pass)
1955 {
1956 ANV_FROM_HANDLE(anv_device, device, _device);
1957 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
1958
1959 for (uint32_t i = 0; i < pass->subpass_count; i++) {
1960 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
1961 * Don't free the null arrays.
1962 */
1963 struct anv_subpass *subpass = &pass->subpasses[i];
1964
1965 anv_device_free(device, subpass->input_attachments);
1966 anv_device_free(device, subpass->color_attachments);
1967 anv_device_free(device, subpass->resolve_attachments);
1968 }
1969
1970 anv_device_free(device, pass);
1971 }
1972
1973 VkResult anv_GetRenderAreaGranularity(
1974 VkDevice device,
1975 VkRenderPass renderPass,
1976 VkExtent2D* pGranularity)
1977 {
1978 *pGranularity = (VkExtent2D) { 1, 1 };
1979
1980 return VK_SUCCESS;
1981 }
1982
1983 void vkCmdDbgMarkerBegin(
1984 VkCmdBuffer cmdBuffer,
1985 const char* pMarker)
1986 __attribute__ ((visibility ("default")));
1987
1988 void vkCmdDbgMarkerEnd(
1989 VkCmdBuffer cmdBuffer)
1990 __attribute__ ((visibility ("default")));
1991
1992 void vkCmdDbgMarkerBegin(
1993 VkCmdBuffer cmdBuffer,
1994 const char* pMarker)
1995 {
1996 }
1997
1998 void vkCmdDbgMarkerEnd(
1999 VkCmdBuffer cmdBuffer)
2000 {
2001 }