2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
34 struct anv_dispatch_table dtable
;
37 anv_physical_device_init(struct anv_physical_device
*device
,
38 struct anv_instance
*instance
,
44 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
46 return vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to open %s: %m", path
);
48 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
49 device
->instance
= instance
;
52 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
53 if (!device
->chipset_id
) {
54 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get chipset id: %m");
58 device
->name
= brw_get_device_name(device
->chipset_id
);
59 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
61 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get device info");
65 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1) {
66 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get aperture size: %m");
70 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
)) {
71 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "kernel missing gem wait");
75 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
)) {
76 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "kernel missing execbuf2");
80 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
)) {
81 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "non-llc gpu");
94 static void *default_alloc(
98 VkSystemAllocType allocType
)
103 static void default_free(
110 static const VkAllocCallbacks default_alloc_callbacks
= {
112 .pfnAlloc
= default_alloc
,
113 .pfnFree
= default_free
116 static const VkExtensionProperties global_extensions
[] = {
118 .extName
= "VK_WSI_swapchain",
123 static const VkExtensionProperties device_extensions
[] = {
125 .extName
= "VK_WSI_device_swapchain",
131 VkResult
anv_CreateInstance(
132 const VkInstanceCreateInfo
* pCreateInfo
,
133 VkInstance
* pInstance
)
135 struct anv_instance
*instance
;
136 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
137 void *user_data
= NULL
;
139 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
141 for (uint32_t i
= 0; i
< pCreateInfo
->extensionCount
; i
++) {
143 for (uint32_t j
= 0; j
< ARRAY_SIZE(global_extensions
); j
++) {
144 if (strcmp(pCreateInfo
->ppEnabledExtensionNames
[i
],
145 global_extensions
[j
].extName
) == 0) {
151 return vk_error(VK_ERROR_INVALID_EXTENSION
);
154 if (pCreateInfo
->pAllocCb
) {
155 alloc_callbacks
= pCreateInfo
->pAllocCb
;
156 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
158 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
159 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
161 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
163 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
164 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
165 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
166 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
167 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
168 instance
->physicalDeviceCount
= 0;
172 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
174 anv_init_wsi(instance
);
176 *pInstance
= anv_instance_to_handle(instance
);
181 VkResult
anv_DestroyInstance(
182 VkInstance _instance
)
184 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
186 anv_finish_wsi(instance
);
188 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
192 instance
->pfnFree(instance
->pAllocUserData
, instance
);
198 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
199 size_t alignment
, VkSystemAllocType allocType
)
201 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
202 size
, alignment
, allocType
);
204 VG(VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
));
205 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
));
211 anv_instance_free(struct anv_instance
*instance
, void *mem
)
216 VG(VALGRIND_MEMPOOL_FREE(instance
, mem
));
218 instance
->pfnFree(instance
->pAllocUserData
, mem
);
221 VkResult
anv_EnumeratePhysicalDevices(
222 VkInstance _instance
,
223 uint32_t* pPhysicalDeviceCount
,
224 VkPhysicalDevice
* pPhysicalDevices
)
226 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
229 if (instance
->physicalDeviceCount
== 0) {
230 result
= anv_physical_device_init(&instance
->physicalDevice
,
231 instance
, "/dev/dri/renderD128");
232 if (result
!= VK_SUCCESS
)
235 instance
->physicalDeviceCount
= 1;
238 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
239 * otherwise it's an inout parameter.
241 * The Vulkan spec (git aaed022) says:
243 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
244 * that is initialized with the number of devices the application is
245 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
246 * an array of at least this many VkPhysicalDevice handles [...].
248 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
249 * overwrites the contents of the variable pointed to by
250 * pPhysicalDeviceCount with the number of physical devices in in the
251 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
252 * pPhysicalDeviceCount with the number of physical handles written to
255 if (!pPhysicalDevices
) {
256 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
257 } else if (*pPhysicalDeviceCount
>= 1) {
258 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
259 *pPhysicalDeviceCount
= 1;
261 *pPhysicalDeviceCount
= 0;
267 VkResult
anv_GetPhysicalDeviceFeatures(
268 VkPhysicalDevice physicalDevice
,
269 VkPhysicalDeviceFeatures
* pFeatures
)
271 anv_finishme("Get correct values for PhysicalDeviceFeatures");
273 *pFeatures
= (VkPhysicalDeviceFeatures
) {
274 .robustBufferAccess
= false,
275 .fullDrawIndexUint32
= false,
276 .imageCubeArray
= false,
277 .independentBlend
= false,
278 .geometryShader
= true,
279 .tessellationShader
= false,
280 .sampleRateShading
= false,
281 .dualSourceBlend
= true,
283 .instancedDrawIndirect
= true,
285 .depthBiasClamp
= false,
286 .fillModeNonSolid
= true,
287 .depthBounds
= false,
290 .textureCompressionETC2
= true,
291 .textureCompressionASTC_LDR
= true,
292 .textureCompressionBC
= true,
293 .pipelineStatisticsQuery
= true,
294 .vertexSideEffects
= false,
295 .tessellationSideEffects
= false,
296 .geometrySideEffects
= false,
297 .fragmentSideEffects
= false,
298 .shaderTessellationPointSize
= false,
299 .shaderGeometryPointSize
= true,
300 .shaderTextureGatherExtended
= true,
301 .shaderStorageImageExtendedFormats
= false,
302 .shaderStorageImageMultisample
= false,
303 .shaderStorageBufferArrayConstantIndexing
= false,
304 .shaderStorageImageArrayConstantIndexing
= false,
305 .shaderUniformBufferArrayDynamicIndexing
= true,
306 .shaderSampledImageArrayDynamicIndexing
= false,
307 .shaderStorageBufferArrayDynamicIndexing
= false,
308 .shaderStorageImageArrayDynamicIndexing
= false,
309 .shaderClipDistance
= false,
310 .shaderCullDistance
= false,
311 .shaderFloat64
= false,
312 .shaderInt64
= false,
313 .shaderFloat16
= false,
314 .shaderInt16
= false,
320 VkResult
anv_GetPhysicalDeviceLimits(
321 VkPhysicalDevice physicalDevice
,
322 VkPhysicalDeviceLimits
* pLimits
)
324 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
325 const struct brw_device_info
*devinfo
= physical_device
->info
;
327 anv_finishme("Get correct values for PhysicalDeviceLimits");
329 *pLimits
= (VkPhysicalDeviceLimits
) {
330 .maxImageDimension1D
= (1 << 14),
331 .maxImageDimension2D
= (1 << 14),
332 .maxImageDimension3D
= (1 << 10),
333 .maxImageDimensionCube
= (1 << 14),
334 .maxImageArrayLayers
= (1 << 10),
335 .maxTexelBufferSize
= (1 << 14),
336 .maxUniformBufferSize
= UINT32_MAX
,
337 .maxStorageBufferSize
= UINT32_MAX
,
338 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
339 .maxMemoryAllocationCount
= UINT32_MAX
,
340 .bufferImageGranularity
= 64, /* A cache line */
341 .maxBoundDescriptorSets
= MAX_SETS
,
342 .maxDescriptorSets
= UINT32_MAX
,
343 .maxPerStageDescriptorSamplers
= 64,
344 .maxPerStageDescriptorUniformBuffers
= 64,
345 .maxPerStageDescriptorStorageBuffers
= 64,
346 .maxPerStageDescriptorSampledImages
= 64,
347 .maxPerStageDescriptorStorageImages
= 64,
348 .maxDescriptorSetSamplers
= 256,
349 .maxDescriptorSetUniformBuffers
= 256,
350 .maxDescriptorSetStorageBuffers
= 256,
351 .maxDescriptorSetSampledImages
= 256,
352 .maxDescriptorSetStorageImages
= 256,
353 .maxVertexInputAttributes
= 32,
354 .maxVertexInputAttributeOffset
= 256,
355 .maxVertexInputBindingStride
= 256,
356 .maxVertexOutputComponents
= 32,
357 .maxTessGenLevel
= 0,
358 .maxTessPatchSize
= 0,
359 .maxTessControlPerVertexInputComponents
= 0,
360 .maxTessControlPerVertexOutputComponents
= 0,
361 .maxTessControlPerPatchOutputComponents
= 0,
362 .maxTessControlTotalOutputComponents
= 0,
363 .maxTessEvaluationInputComponents
= 0,
364 .maxTessEvaluationOutputComponents
= 0,
365 .maxGeometryShaderInvocations
= 6,
366 .maxGeometryInputComponents
= 16,
367 .maxGeometryOutputComponents
= 16,
368 .maxGeometryOutputVertices
= 16,
369 .maxGeometryTotalOutputComponents
= 16,
370 .maxFragmentInputComponents
= 16,
371 .maxFragmentOutputBuffers
= 8,
372 .maxFragmentDualSourceBuffers
= 2,
373 .maxFragmentCombinedOutputResources
= 8,
374 .maxComputeSharedMemorySize
= 1024,
375 .maxComputeWorkGroupCount
= {
376 16 * devinfo
->max_cs_threads
,
377 16 * devinfo
->max_cs_threads
,
378 16 * devinfo
->max_cs_threads
,
380 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
381 .maxComputeWorkGroupSize
= {
382 16 * devinfo
->max_cs_threads
,
383 16 * devinfo
->max_cs_threads
,
384 16 * devinfo
->max_cs_threads
,
386 .subPixelPrecisionBits
= 4 /* FIXME */,
387 .subTexelPrecisionBits
= 4 /* FIXME */,
388 .mipmapPrecisionBits
= 4 /* FIXME */,
389 .maxDrawIndexedIndexValue
= UINT32_MAX
,
390 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
391 .primitiveRestartForPatches
= UINT32_MAX
,
392 .maxSamplerLodBias
= 16,
393 .maxSamplerAnisotropy
= 16,
395 .maxDynamicViewportStates
= UINT32_MAX
,
396 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
397 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
398 .viewportSubPixelBits
= 13, /* We take a float? */
399 .minMemoryMapAlignment
= 64, /* A cache line */
400 .minTexelBufferOffsetAlignment
= 1,
401 .minUniformBufferOffsetAlignment
= 1,
402 .minStorageBufferOffsetAlignment
= 1,
403 .minTexelOffset
= 0, /* FIXME */
404 .maxTexelOffset
= 0, /* FIXME */
405 .minTexelGatherOffset
= 0, /* FIXME */
406 .maxTexelGatherOffset
= 0, /* FIXME */
407 .minInterpolationOffset
= 0, /* FIXME */
408 .maxInterpolationOffset
= 0, /* FIXME */
409 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
410 .maxFramebufferWidth
= (1 << 14),
411 .maxFramebufferHeight
= (1 << 14),
412 .maxFramebufferLayers
= (1 << 10),
413 .maxFramebufferColorSamples
= 8,
414 .maxFramebufferDepthSamples
= 8,
415 .maxFramebufferStencilSamples
= 8,
416 .maxColorAttachments
= MAX_RTS
,
417 .maxSampledImageColorSamples
= 8,
418 .maxSampledImageDepthSamples
= 8,
419 .maxSampledImageIntegerSamples
= 1,
420 .maxStorageImageSamples
= 1,
421 .maxSampleMaskWords
= 1,
422 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
423 .maxClipDistances
= 0 /* FIXME */,
424 .maxCullDistances
= 0 /* FIXME */,
425 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
426 .pointSizeRange
= { 0.125, 255.875 },
427 .lineWidthRange
= { 0.0, 7.9921875 },
428 .pointSizeGranularity
= (1.0 / 8.0),
429 .lineWidthGranularity
= (1.0 / 128.0),
435 VkResult
anv_GetPhysicalDeviceProperties(
436 VkPhysicalDevice physicalDevice
,
437 VkPhysicalDeviceProperties
* pProperties
)
439 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
441 *pProperties
= (VkPhysicalDeviceProperties
) {
442 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
445 .deviceId
= pdevice
->chipset_id
,
446 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
449 strcpy(pProperties
->deviceName
, pdevice
->name
);
450 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
451 "anv-%s", MESA_GIT_SHA1
+ 4);
456 VkResult
anv_GetPhysicalDeviceQueueCount(
457 VkPhysicalDevice physicalDevice
,
465 VkResult
anv_GetPhysicalDeviceQueueProperties(
466 VkPhysicalDevice physicalDevice
,
468 VkPhysicalDeviceQueueProperties
* pQueueProperties
)
472 *pQueueProperties
= (VkPhysicalDeviceQueueProperties
) {
473 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
474 VK_QUEUE_COMPUTE_BIT
|
477 .supportsTimestamps
= true,
483 VkResult
anv_GetPhysicalDeviceMemoryProperties(
484 VkPhysicalDevice physicalDevice
,
485 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
487 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
488 VkDeviceSize heap_size
;
490 /* Reserve some wiggle room for the driver by exposing only 75% of the
491 * aperture to the heap.
493 heap_size
= 3 * physical_device
->aperture_size
/ 4;
495 /* The property flags below are valid only for llc platforms. */
496 pMemoryProperties
->memoryTypeCount
= 1;
497 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
498 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
502 pMemoryProperties
->memoryHeapCount
= 1;
503 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
505 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
511 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
515 return anv_lookup_entrypoint(pName
);
518 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
522 return anv_lookup_entrypoint(pName
);
526 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
528 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
529 queue
->device
= device
;
530 queue
->pool
= &device
->surface_state_pool
;
532 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
533 if (queue
->completed_serial
.map
== NULL
)
534 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
536 *(uint32_t *)queue
->completed_serial
.map
= 0;
537 queue
->next_serial
= 1;
543 anv_queue_finish(struct anv_queue
*queue
)
546 /* This gets torn down with the device so we only need to do this if
547 * valgrind is present.
549 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
554 anv_device_init_border_colors(struct anv_device
*device
)
556 static const VkClearColorValue border_colors
[] = {
557 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 0.0 } },
558 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 1.0 } },
559 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .f32
= { 1.0, 1.0, 1.0, 1.0 } },
560 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .u32
= { 0, 0, 0, 0 } },
561 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .u32
= { 0, 0, 0, 1 } },
562 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .u32
= { 1, 1, 1, 1 } },
565 device
->border_colors
=
566 anv_state_pool_alloc(&device
->dynamic_state_pool
,
567 sizeof(border_colors
), 32);
568 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
571 VkResult
anv_CreateDevice(
572 VkPhysicalDevice physicalDevice
,
573 const VkDeviceCreateInfo
* pCreateInfo
,
576 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
577 struct anv_instance
*instance
= physical_device
->instance
;
578 struct anv_device
*device
;
580 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
582 for (uint32_t i
= 0; i
< pCreateInfo
->extensionCount
; i
++) {
584 for (uint32_t j
= 0; j
< ARRAY_SIZE(device_extensions
); j
++) {
585 if (strcmp(pCreateInfo
->ppEnabledExtensionNames
[i
],
586 device_extensions
[j
].extName
) == 0) {
592 return vk_error(VK_ERROR_INVALID_EXTENSION
);
595 anv_set_dispatch_gen(physical_device
->info
->gen
);
597 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
598 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
600 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
602 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
603 device
->instance
= physical_device
->instance
;
605 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
606 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
607 if (device
->fd
== -1)
610 device
->context_id
= anv_gem_create_context(device
);
611 if (device
->context_id
== -1)
614 pthread_mutex_init(&device
->mutex
, NULL
);
616 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
618 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
620 anv_state_pool_init(&device
->dynamic_state_pool
,
621 &device
->dynamic_state_block_pool
);
623 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
624 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 4096);
626 anv_state_pool_init(&device
->surface_state_pool
,
627 &device
->surface_state_block_pool
);
629 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
631 device
->info
= *physical_device
->info
;
633 device
->compiler
= anv_compiler_create(device
);
635 anv_queue_init(device
, &device
->queue
);
637 anv_device_init_meta(device
);
639 anv_device_init_border_colors(device
);
641 *pDevice
= anv_device_to_handle(device
);
648 anv_device_free(device
, device
);
650 return vk_error(VK_ERROR_UNAVAILABLE
);
653 VkResult
anv_DestroyDevice(
656 ANV_FROM_HANDLE(anv_device
, device
, _device
);
658 anv_compiler_destroy(device
->compiler
);
660 anv_queue_finish(&device
->queue
);
662 anv_device_finish_meta(device
);
665 /* We only need to free these to prevent valgrind errors. The backing
666 * BO will go away in a couple of lines so we don't actually leak.
668 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
671 anv_bo_pool_finish(&device
->batch_bo_pool
);
672 anv_state_pool_finish(&device
->dynamic_state_pool
);
673 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
674 anv_block_pool_finish(&device
->instruction_block_pool
);
675 anv_state_pool_finish(&device
->surface_state_pool
);
676 anv_block_pool_finish(&device
->surface_state_block_pool
);
677 anv_block_pool_finish(&device
->scratch_block_pool
);
681 anv_instance_free(device
->instance
, device
);
686 VkResult
anv_GetGlobalExtensionProperties(
687 const char* pLayerName
,
689 VkExtensionProperties
* pProperties
)
691 if (pProperties
== NULL
) {
692 *pCount
= ARRAY_SIZE(global_extensions
);
696 assert(*pCount
>= ARRAY_SIZE(global_extensions
));
698 *pCount
= ARRAY_SIZE(global_extensions
);
699 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
704 VkResult
anv_GetPhysicalDeviceExtensionProperties(
705 VkPhysicalDevice physicalDevice
,
706 const char* pLayerName
,
708 VkExtensionProperties
* pProperties
)
710 if (pProperties
== NULL
) {
711 *pCount
= ARRAY_SIZE(device_extensions
);
715 assert(*pCount
>= ARRAY_SIZE(device_extensions
));
717 *pCount
= ARRAY_SIZE(device_extensions
);
718 memcpy(pProperties
, device_extensions
, sizeof(device_extensions
));
723 VkResult
anv_GetGlobalLayerProperties(
725 VkLayerProperties
* pProperties
)
727 if (pProperties
== NULL
) {
732 /* None supported at this time */
733 return vk_error(VK_ERROR_INVALID_LAYER
);
736 VkResult
anv_GetPhysicalDeviceLayerProperties(
737 VkPhysicalDevice physicalDevice
,
739 VkLayerProperties
* pProperties
)
741 if (pProperties
== NULL
) {
746 /* None supported at this time */
747 return vk_error(VK_ERROR_INVALID_LAYER
);
750 VkResult
anv_GetDeviceQueue(
752 uint32_t queueNodeIndex
,
756 ANV_FROM_HANDLE(anv_device
, device
, _device
);
758 assert(queueIndex
== 0);
760 *pQueue
= anv_queue_to_handle(&device
->queue
);
765 VkResult
anv_QueueSubmit(
767 uint32_t cmdBufferCount
,
768 const VkCmdBuffer
* pCmdBuffers
,
771 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
772 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
773 struct anv_device
*device
= queue
->device
;
776 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
777 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
779 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
781 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
783 return vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
786 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
788 return vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
791 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
792 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
798 VkResult
anv_QueueWaitIdle(
801 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
803 return ANV_CALL(DeviceWaitIdle
)(anv_device_to_handle(queue
->device
));
806 VkResult
anv_DeviceWaitIdle(
809 ANV_FROM_HANDLE(anv_device
, device
, _device
);
810 struct anv_state state
;
811 struct anv_batch batch
;
812 struct drm_i915_gem_execbuffer2 execbuf
;
813 struct drm_i915_gem_exec_object2 exec2_objects
[1];
814 struct anv_bo
*bo
= NULL
;
819 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
820 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
821 batch
.start
= batch
.next
= state
.map
;
822 batch
.end
= state
.map
+ 32;
823 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
824 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
826 exec2_objects
[0].handle
= bo
->gem_handle
;
827 exec2_objects
[0].relocation_count
= 0;
828 exec2_objects
[0].relocs_ptr
= 0;
829 exec2_objects
[0].alignment
= 0;
830 exec2_objects
[0].offset
= bo
->offset
;
831 exec2_objects
[0].flags
= 0;
832 exec2_objects
[0].rsvd1
= 0;
833 exec2_objects
[0].rsvd2
= 0;
835 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
836 execbuf
.buffer_count
= 1;
837 execbuf
.batch_start_offset
= state
.offset
;
838 execbuf
.batch_len
= batch
.next
- state
.map
;
839 execbuf
.cliprects_ptr
= 0;
840 execbuf
.num_cliprects
= 0;
845 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
846 execbuf
.rsvd1
= device
->context_id
;
849 ret
= anv_gem_execbuffer(device
, &execbuf
);
851 result
= vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
856 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
858 result
= vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
862 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
867 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
873 anv_device_alloc(struct anv_device
* device
,
876 VkSystemAllocType allocType
)
878 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
882 anv_device_free(struct anv_device
* device
,
885 anv_instance_free(device
->instance
, mem
);
889 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
891 bo
->gem_handle
= anv_gem_create(device
, size
);
893 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
903 VkResult
anv_AllocMemory(
905 const VkMemoryAllocInfo
* pAllocInfo
,
906 VkDeviceMemory
* pMem
)
908 ANV_FROM_HANDLE(anv_device
, device
, _device
);
909 struct anv_device_memory
*mem
;
912 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
914 if (pAllocInfo
->memoryTypeIndex
!= 0) {
915 /* We support exactly one memory heap. */
916 return vk_error(VK_ERROR_INVALID_VALUE
);
919 /* FINISHME: Fail if allocation request exceeds heap size. */
921 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
922 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
924 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
926 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
927 if (result
!= VK_SUCCESS
)
930 *pMem
= anv_device_memory_to_handle(mem
);
935 anv_device_free(device
, mem
);
940 VkResult
anv_FreeMemory(
944 ANV_FROM_HANDLE(anv_device
, device
, _device
);
945 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
948 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
950 if (mem
->bo
.gem_handle
!= 0)
951 anv_gem_close(device
, mem
->bo
.gem_handle
);
953 anv_device_free(device
, mem
);
958 VkResult
anv_MapMemory(
963 VkMemoryMapFlags flags
,
966 ANV_FROM_HANDLE(anv_device
, device
, _device
);
967 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
969 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
970 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
971 * at a time is valid. We could just mmap up front and return an offset
972 * pointer here, but that may exhaust virtual memory on 32 bit
975 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
976 mem
->map_size
= size
;
983 VkResult
anv_UnmapMemory(
987 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
989 anv_gem_munmap(mem
->map
, mem
->map_size
);
994 VkResult
anv_FlushMappedMemoryRanges(
996 uint32_t memRangeCount
,
997 const VkMappedMemoryRange
* pMemRanges
)
999 /* clflush here for !llc platforms */
1004 VkResult
anv_InvalidateMappedMemoryRanges(
1006 uint32_t memRangeCount
,
1007 const VkMappedMemoryRange
* pMemRanges
)
1009 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
1012 VkResult
anv_GetBufferMemoryRequirements(
1015 VkMemoryRequirements
* pMemoryRequirements
)
1017 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1019 /* The Vulkan spec (git aaed022) says:
1021 * memoryTypeBits is a bitfield and contains one bit set for every
1022 * supported memory type for the resource. The bit `1<<i` is set if and
1023 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1024 * structure for the physical device is supported.
1026 * We support exactly one memory type.
1028 pMemoryRequirements
->memoryTypeBits
= 1;
1030 pMemoryRequirements
->size
= buffer
->size
;
1031 pMemoryRequirements
->alignment
= 16;
1036 VkResult
anv_GetImageMemoryRequirements(
1039 VkMemoryRequirements
* pMemoryRequirements
)
1041 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1043 /* The Vulkan spec (git aaed022) says:
1045 * memoryTypeBits is a bitfield and contains one bit set for every
1046 * supported memory type for the resource. The bit `1<<i` is set if and
1047 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1048 * structure for the physical device is supported.
1050 * We support exactly one memory type.
1052 pMemoryRequirements
->memoryTypeBits
= 1;
1054 pMemoryRequirements
->size
= image
->size
;
1055 pMemoryRequirements
->alignment
= image
->alignment
;
1060 VkResult
anv_GetImageSparseMemoryRequirements(
1063 uint32_t* pNumRequirements
,
1064 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1066 return vk_error(VK_UNSUPPORTED
);
1069 VkResult
anv_GetDeviceMemoryCommitment(
1071 VkDeviceMemory memory
,
1072 VkDeviceSize
* pCommittedMemoryInBytes
)
1074 *pCommittedMemoryInBytes
= 0;
1075 stub_return(VK_SUCCESS
);
1078 VkResult
anv_BindBufferMemory(
1081 VkDeviceMemory _mem
,
1082 VkDeviceSize memOffset
)
1084 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1085 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1087 buffer
->bo
= &mem
->bo
;
1088 buffer
->offset
= memOffset
;
1093 VkResult
anv_BindImageMemory(
1096 VkDeviceMemory _mem
,
1097 VkDeviceSize memOffset
)
1099 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1100 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1102 image
->bo
= &mem
->bo
;
1103 image
->offset
= memOffset
;
1108 VkResult
anv_QueueBindSparseBufferMemory(
1111 uint32_t numBindings
,
1112 const VkSparseMemoryBindInfo
* pBindInfo
)
1114 stub_return(VK_UNSUPPORTED
);
1117 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1120 uint32_t numBindings
,
1121 const VkSparseMemoryBindInfo
* pBindInfo
)
1123 stub_return(VK_UNSUPPORTED
);
1126 VkResult
anv_QueueBindSparseImageMemory(
1129 uint32_t numBindings
,
1130 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1132 stub_return(VK_UNSUPPORTED
);
1135 VkResult
anv_CreateFence(
1137 const VkFenceCreateInfo
* pCreateInfo
,
1140 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1141 struct anv_fence
*fence
;
1142 struct anv_batch batch
;
1145 const uint32_t fence_size
= 128;
1147 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1149 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1150 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1152 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1154 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1155 if (result
!= VK_SUCCESS
)
1159 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1160 batch
.next
= batch
.start
= fence
->bo
.map
;
1161 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1162 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
1163 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
1165 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1166 fence
->exec2_objects
[0].relocation_count
= 0;
1167 fence
->exec2_objects
[0].relocs_ptr
= 0;
1168 fence
->exec2_objects
[0].alignment
= 0;
1169 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1170 fence
->exec2_objects
[0].flags
= 0;
1171 fence
->exec2_objects
[0].rsvd1
= 0;
1172 fence
->exec2_objects
[0].rsvd2
= 0;
1174 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1175 fence
->execbuf
.buffer_count
= 1;
1176 fence
->execbuf
.batch_start_offset
= 0;
1177 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1178 fence
->execbuf
.cliprects_ptr
= 0;
1179 fence
->execbuf
.num_cliprects
= 0;
1180 fence
->execbuf
.DR1
= 0;
1181 fence
->execbuf
.DR4
= 0;
1183 fence
->execbuf
.flags
=
1184 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1185 fence
->execbuf
.rsvd1
= device
->context_id
;
1186 fence
->execbuf
.rsvd2
= 0;
1188 *pFence
= anv_fence_to_handle(fence
);
1193 anv_device_free(device
, fence
);
1198 VkResult
anv_DestroyFence(
1202 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1203 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1205 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1206 anv_gem_close(device
, fence
->bo
.gem_handle
);
1207 anv_device_free(device
, fence
);
1212 VkResult
anv_ResetFences(
1214 uint32_t fenceCount
,
1215 const VkFence
* pFences
)
1217 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1218 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1219 fence
->ready
= false;
1225 VkResult
anv_GetFenceStatus(
1229 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1230 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1237 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1239 fence
->ready
= true;
1243 return VK_NOT_READY
;
1246 VkResult
anv_WaitForFences(
1248 uint32_t fenceCount
,
1249 const VkFence
* pFences
,
1253 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1254 int64_t t
= timeout
;
1257 /* FIXME: handle !waitAll */
1259 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1260 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1261 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1262 if (ret
== -1 && errno
== ETIME
)
1265 return vk_errorf(VK_ERROR_UNKNOWN
, "gem wait failed: %m");
1271 // Queue semaphore functions
1273 VkResult
anv_CreateSemaphore(
1275 const VkSemaphoreCreateInfo
* pCreateInfo
,
1276 VkSemaphore
* pSemaphore
)
1278 stub_return(VK_UNSUPPORTED
);
1281 VkResult
anv_DestroySemaphore(
1283 VkSemaphore semaphore
)
1285 stub_return(VK_UNSUPPORTED
);
1288 VkResult
anv_QueueSignalSemaphore(
1290 VkSemaphore semaphore
)
1292 stub_return(VK_UNSUPPORTED
);
1295 VkResult
anv_QueueWaitSemaphore(
1297 VkSemaphore semaphore
)
1299 stub_return(VK_UNSUPPORTED
);
1304 VkResult
anv_CreateEvent(
1306 const VkEventCreateInfo
* pCreateInfo
,
1309 stub_return(VK_UNSUPPORTED
);
1312 VkResult
anv_DestroyEvent(
1316 stub_return(VK_UNSUPPORTED
);
1319 VkResult
anv_GetEventStatus(
1323 stub_return(VK_UNSUPPORTED
);
1326 VkResult
anv_SetEvent(
1330 stub_return(VK_UNSUPPORTED
);
1333 VkResult
anv_ResetEvent(
1337 stub_return(VK_UNSUPPORTED
);
1342 VkResult
anv_CreateBuffer(
1344 const VkBufferCreateInfo
* pCreateInfo
,
1347 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1348 struct anv_buffer
*buffer
;
1350 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1352 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1353 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1355 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1357 buffer
->size
= pCreateInfo
->size
;
1361 *pBuffer
= anv_buffer_to_handle(buffer
);
1366 VkResult
anv_DestroyBuffer(
1370 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1371 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1373 anv_device_free(device
, buffer
);
1379 anv_fill_buffer_surface_state(struct anv_device
*device
, void *state
,
1380 const struct anv_format
*format
,
1381 uint32_t offset
, uint32_t range
)
1383 switch (device
->info
.gen
) {
1385 gen7_fill_buffer_surface_state(state
, format
, offset
, range
);
1388 gen8_fill_buffer_surface_state(state
, format
, offset
, range
);
1391 unreachable("unsupported gen\n");
1396 anv_buffer_view_create(
1397 struct anv_device
* device
,
1398 const VkBufferViewCreateInfo
* pCreateInfo
,
1399 struct anv_buffer_view
** bview_out
)
1401 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1402 struct anv_buffer_view
*bview
;
1404 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1406 bview
= anv_device_alloc(device
, sizeof(*bview
), 8,
1407 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1409 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1411 *bview
= (struct anv_buffer_view
) {
1413 .offset
= buffer
->offset
+ pCreateInfo
->offset
,
1414 .surface_state
= anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64),
1415 .format
= anv_format_for_vk_format(pCreateInfo
->format
),
1416 .range
= pCreateInfo
->range
,
1424 VkResult
anv_DestroyBufferView(
1426 VkBufferView _bview
)
1428 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1429 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1431 anv_state_pool_free(&device
->surface_state_pool
, bview
->surface_state
);
1432 anv_device_free(device
, bview
);
1437 VkResult
anv_DestroySampler(
1441 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1442 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1444 anv_device_free(device
, sampler
);
1449 // Descriptor set functions
1451 VkResult
anv_CreateDescriptorSetLayout(
1453 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1454 VkDescriptorSetLayout
* pSetLayout
)
1456 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1457 struct anv_descriptor_set_layout
*set_layout
;
1459 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1461 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1462 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1463 uint32_t num_dynamic_buffers
= 0;
1465 VkShaderStageFlags stages
= 0;
1468 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1469 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1470 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1471 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1472 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1473 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1479 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1480 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1481 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1482 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1483 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1484 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1485 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1486 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1487 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1488 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1489 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1490 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1491 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1497 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1498 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1499 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1500 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1506 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1507 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1510 uint32_t sampler_total
= 0;
1511 uint32_t surface_total
= 0;
1512 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1513 sampler_total
+= sampler_count
[s
];
1514 surface_total
+= surface_count
[s
];
1517 size_t size
= sizeof(*set_layout
) +
1518 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1519 set_layout
= anv_device_alloc(device
, size
, 8,
1520 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1522 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1524 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1525 set_layout
->count
= count
;
1526 set_layout
->shader_stages
= stages
;
1528 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1529 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1530 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1531 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1532 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1533 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1534 p
+= surface_count
[s
];
1535 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1536 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1537 p
+= sampler_count
[s
];
1540 uint32_t descriptor
= 0;
1541 int8_t dynamic_slot
= 0;
1543 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1544 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1545 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1546 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1547 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1548 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1549 sampler
[s
]->index
= descriptor
+ j
;
1550 sampler
[s
]->dynamic_slot
= -1;
1558 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1559 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1560 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1568 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1569 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1570 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1571 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1572 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1573 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1574 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1575 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1576 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1577 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1578 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1579 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1580 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1581 surface
[s
]->index
= descriptor
+ j
;
1583 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1585 surface
[s
]->dynamic_slot
= -1;
1594 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1596 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1599 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1604 VkResult
anv_DestroyDescriptorSetLayout(
1606 VkDescriptorSetLayout _set_layout
)
1608 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1609 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1611 anv_device_free(device
, set_layout
);
1616 VkResult
anv_CreateDescriptorPool(
1618 VkDescriptorPoolUsage poolUsage
,
1620 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1621 VkDescriptorPool
* pDescriptorPool
)
1623 anv_finishme("VkDescriptorPool is a stub");
1624 pDescriptorPool
->handle
= 1;
1628 VkResult
anv_DestroyDescriptorPool(
1630 VkDescriptorPool _pool
)
1632 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1636 VkResult
anv_ResetDescriptorPool(
1638 VkDescriptorPool descriptorPool
)
1640 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1645 anv_descriptor_set_create(struct anv_device
*device
,
1646 const struct anv_descriptor_set_layout
*layout
,
1647 struct anv_descriptor_set
**out_set
)
1649 struct anv_descriptor_set
*set
;
1650 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1652 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1654 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1656 /* A descriptor set may not be 100% filled. Clear the set so we can can
1657 * later detect holes in it.
1659 memset(set
, 0, size
);
1667 anv_descriptor_set_destroy(struct anv_device
*device
,
1668 struct anv_descriptor_set
*set
)
1670 anv_device_free(device
, set
);
1673 VkResult
anv_AllocDescriptorSets(
1675 VkDescriptorPool descriptorPool
,
1676 VkDescriptorSetUsage setUsage
,
1678 const VkDescriptorSetLayout
* pSetLayouts
,
1679 VkDescriptorSet
* pDescriptorSets
,
1682 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1685 struct anv_descriptor_set
*set
;
1687 for (uint32_t i
= 0; i
< count
; i
++) {
1688 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1690 result
= anv_descriptor_set_create(device
, layout
, &set
);
1691 if (result
!= VK_SUCCESS
) {
1696 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1704 VkResult
anv_FreeDescriptorSets(
1706 VkDescriptorPool descriptorPool
,
1708 const VkDescriptorSet
* pDescriptorSets
)
1710 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1712 for (uint32_t i
= 0; i
< count
; i
++) {
1713 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1715 anv_descriptor_set_destroy(device
, set
);
1721 VkResult
anv_UpdateDescriptorSets(
1723 uint32_t writeCount
,
1724 const VkWriteDescriptorSet
* pDescriptorWrites
,
1726 const VkCopyDescriptorSet
* pDescriptorCopies
)
1728 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1729 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1730 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1732 switch (write
->descriptorType
) {
1733 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1734 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1735 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1736 ANV_FROM_HANDLE(anv_sampler
, sampler
,
1737 write
->pDescriptors
[j
].sampler
);
1739 set
->descriptors
[write
->destBinding
+ j
] = (struct anv_descriptor
) {
1740 .type
= ANV_DESCRIPTOR_TYPE_SAMPLER
,
1745 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1750 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1751 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1752 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1753 ANV_FROM_HANDLE(anv_image_view
, iview
,
1754 write
->pDescriptors
[j
].imageView
);
1756 set
->descriptors
[write
->destBinding
+ j
] = (struct anv_descriptor
) {
1757 .type
= ANV_DESCRIPTOR_TYPE_IMAGE_VIEW
,
1758 .image_view
= iview
,
1763 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1764 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1765 anv_finishme("texel buffers not implemented");
1768 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1769 anv_finishme("input attachments not implemented");
1772 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1773 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1774 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1775 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1776 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1777 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1778 write
->pDescriptors
[j
].bufferView
);
1780 set
->descriptors
[write
->destBinding
+ j
] = (struct anv_descriptor
) {
1781 .type
= ANV_DESCRIPTOR_TYPE_BUFFER_VIEW
,
1782 .buffer_view
= bview
,
1791 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1792 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1793 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1794 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1795 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1796 dest
->descriptors
[copy
->destBinding
+ j
] =
1797 src
->descriptors
[copy
->srcBinding
+ j
];
1804 // State object functions
1806 static inline int64_t
1807 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1817 VkResult
anv_CreateDynamicViewportState(
1819 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1820 VkDynamicViewportState
* pState
)
1822 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1823 struct anv_dynamic_vp_state
*state
;
1825 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1827 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1828 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1830 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1832 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1833 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1835 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1837 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1840 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1841 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1842 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1844 /* The gen7 state struct has just the matrix and guardband fields, the
1845 * gen8 struct adds the min/max viewport fields. */
1846 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1847 .ViewportMatrixElementm00
= vp
->width
/ 2,
1848 .ViewportMatrixElementm11
= vp
->height
/ 2,
1849 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1850 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1851 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1852 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1853 .XMinClipGuardband
= -1.0f
,
1854 .XMaxClipGuardband
= 1.0f
,
1855 .YMinClipGuardband
= -1.0f
,
1856 .YMaxClipGuardband
= 1.0f
,
1857 .XMinViewPort
= vp
->originX
,
1858 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1859 .YMinViewPort
= vp
->originY
,
1860 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1863 struct GEN7_CC_VIEWPORT cc_viewport
= {
1864 .MinimumDepth
= vp
->minDepth
,
1865 .MaximumDepth
= vp
->maxDepth
1868 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1869 * ymax < ymin for empty clips. In case clip x, y, width height are all
1870 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1871 * what we want. Just special case empty clips and produce a canonical
1873 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
1874 .ScissorRectangleYMin
= 1,
1875 .ScissorRectangleXMin
= 1,
1876 .ScissorRectangleYMax
= 0,
1877 .ScissorRectangleXMax
= 0
1880 const int max
= 0xffff;
1881 struct GEN7_SCISSOR_RECT scissor
= {
1882 /* Do this math using int64_t so overflow gets clamped correctly. */
1883 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1884 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1885 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1886 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1889 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1890 GEN7_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1892 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1893 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1895 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1899 *pState
= anv_dynamic_vp_state_to_handle(state
);
1904 VkResult
anv_DestroyDynamicViewportState(
1906 VkDynamicViewportState _vp_state
)
1908 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1909 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
1911 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
1912 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
1913 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
1915 anv_device_free(device
, vp_state
);
1920 VkResult
anv_DestroyDynamicRasterState(
1922 VkDynamicRasterState _rs_state
)
1924 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1925 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
1927 anv_device_free(device
, rs_state
);
1932 VkResult
anv_CreateDynamicColorBlendState(
1934 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
1935 VkDynamicColorBlendState
* pState
)
1937 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1938 struct anv_dynamic_cb_state
*state
;
1940 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
1942 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1943 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1945 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1947 struct GEN7_COLOR_CALC_STATE color_calc_state
= {
1948 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
1949 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
1950 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
1951 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
1954 GEN7_COLOR_CALC_STATE_pack(NULL
, state
->color_calc_state
, &color_calc_state
);
1956 *pState
= anv_dynamic_cb_state_to_handle(state
);
1961 VkResult
anv_DestroyDynamicColorBlendState(
1963 VkDynamicColorBlendState _cb_state
)
1965 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1966 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
1968 anv_device_free(device
, cb_state
);
1973 VkResult
anv_DestroyDynamicDepthStencilState(
1975 VkDynamicDepthStencilState _ds_state
)
1977 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1978 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
1980 anv_device_free(device
, ds_state
);
1985 VkResult
anv_CreateFramebuffer(
1987 const VkFramebufferCreateInfo
* pCreateInfo
,
1988 VkFramebuffer
* pFramebuffer
)
1990 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1991 struct anv_framebuffer
*framebuffer
;
1993 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1995 size_t size
= sizeof(*framebuffer
) +
1996 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
1997 framebuffer
= anv_device_alloc(device
, size
, 8,
1998 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1999 if (framebuffer
== NULL
)
2000 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2002 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
2003 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2004 ANV_FROM_HANDLE(anv_attachment_view
, aview
,
2005 pCreateInfo
->pAttachments
[i
].view
);
2007 framebuffer
->attachments
[i
] = aview
;
2010 framebuffer
->width
= pCreateInfo
->width
;
2011 framebuffer
->height
= pCreateInfo
->height
;
2012 framebuffer
->layers
= pCreateInfo
->layers
;
2014 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
2015 &(VkDynamicViewportStateCreateInfo
) {
2016 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
2017 .viewportAndScissorCount
= 1,
2018 .pViewports
= (VkViewport
[]) {
2022 .width
= pCreateInfo
->width
,
2023 .height
= pCreateInfo
->height
,
2028 .pScissors
= (VkRect2D
[]) {
2030 { pCreateInfo
->width
, pCreateInfo
->height
} },
2033 &framebuffer
->vp_state
);
2035 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
2040 VkResult
anv_DestroyFramebuffer(
2044 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2045 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
2047 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
2049 anv_device_free(device
, fb
);
2054 VkResult
anv_CreateRenderPass(
2056 const VkRenderPassCreateInfo
* pCreateInfo
,
2057 VkRenderPass
* pRenderPass
)
2059 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2060 struct anv_render_pass
*pass
;
2062 size_t attachments_offset
;
2064 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2066 size
= sizeof(*pass
);
2067 size
+= pCreateInfo
->subpassCount
* sizeof(pass
->subpasses
[0]);
2068 attachments_offset
= size
;
2069 size
+= pCreateInfo
->attachmentCount
* sizeof(pass
->attachments
[0]);
2071 pass
= anv_device_alloc(device
, size
, 8,
2072 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2074 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2076 /* Clear the subpasses along with the parent pass. This required because
2077 * each array member of anv_subpass must be a valid pointer if not NULL.
2079 memset(pass
, 0, size
);
2080 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2081 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2082 pass
->attachments
= (void *) pass
+ attachments_offset
;
2084 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2085 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
2087 att
->format
= anv_format_for_vk_format(pCreateInfo
->pAttachments
[i
].format
);
2088 att
->samples
= pCreateInfo
->pAttachments
[i
].samples
;
2089 att
->load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2090 att
->stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2091 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2092 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2094 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2095 if (anv_format_is_color(att
->format
)) {
2096 ++pass
->num_color_clear_attachments
;
2097 } else if (att
->format
->depth_format
) {
2098 pass
->has_depth_clear_attachment
= true;
2100 } else if (att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2101 assert(att
->format
->has_stencil
);
2102 pass
->has_stencil_clear_attachment
= true;
2106 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2107 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2108 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2110 subpass
->input_count
= desc
->inputCount
;
2111 subpass
->color_count
= desc
->colorCount
;
2113 if (desc
->inputCount
> 0) {
2114 subpass
->input_attachments
=
2115 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2116 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2118 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2119 subpass
->input_attachments
[j
]
2120 = desc
->pInputAttachments
[j
].attachment
;
2124 if (desc
->colorCount
> 0) {
2125 subpass
->color_attachments
=
2126 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2127 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2129 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2130 subpass
->color_attachments
[j
]
2131 = desc
->pColorAttachments
[j
].attachment
;
2135 if (desc
->pResolveAttachments
) {
2136 subpass
->resolve_attachments
=
2137 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2138 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2140 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2141 subpass
->resolve_attachments
[j
]
2142 = desc
->pResolveAttachments
[j
].attachment
;
2146 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2149 *pRenderPass
= anv_render_pass_to_handle(pass
);
2154 VkResult
anv_DestroyRenderPass(
2158 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2159 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2161 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2162 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2163 * Don't free the null arrays.
2165 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2167 anv_device_free(device
, subpass
->input_attachments
);
2168 anv_device_free(device
, subpass
->color_attachments
);
2169 anv_device_free(device
, subpass
->resolve_attachments
);
2172 anv_device_free(device
, pass
);
2177 VkResult
anv_GetRenderAreaGranularity(
2179 VkRenderPass renderPass
,
2180 VkExtent2D
* pGranularity
)
2182 *pGranularity
= (VkExtent2D
) { 1, 1 };
2187 void vkCmdDbgMarkerBegin(
2188 VkCmdBuffer cmdBuffer
,
2189 const char* pMarker
)
2190 __attribute__ ((visibility ("default")));
2192 void vkCmdDbgMarkerEnd(
2193 VkCmdBuffer cmdBuffer
)
2194 __attribute__ ((visibility ("default")));
2196 void vkCmdDbgMarkerBegin(
2197 VkCmdBuffer cmdBuffer
,
2198 const char* pMarker
)
2202 void vkCmdDbgMarkerEnd(
2203 VkCmdBuffer cmdBuffer
)