2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
35 anv_physical_device_init(struct anv_physical_device
*device
,
36 struct anv_instance
*instance
,
41 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
43 return vk_error(VK_ERROR_UNAVAILABLE
);
45 device
->instance
= instance
;
48 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
49 if (!device
->chipset_id
)
52 device
->name
= brw_get_device_name(device
->chipset_id
);
53 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
57 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1)
60 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
))
63 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
))
66 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
))
69 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXEC_CONSTANTS
))
78 return vk_error(VK_ERROR_UNAVAILABLE
);
81 static void *default_alloc(
85 VkSystemAllocType allocType
)
90 static void default_free(
97 static const VkAllocCallbacks default_alloc_callbacks
= {
99 .pfnAlloc
= default_alloc
,
100 .pfnFree
= default_free
103 VkResult
anv_CreateInstance(
104 const VkInstanceCreateInfo
* pCreateInfo
,
105 VkInstance
* pInstance
)
107 struct anv_instance
*instance
;
108 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
109 void *user_data
= NULL
;
111 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
113 if (pCreateInfo
->pAllocCb
) {
114 alloc_callbacks
= pCreateInfo
->pAllocCb
;
115 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
117 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
118 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
122 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
123 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
124 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
125 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
126 instance
->physicalDeviceCount
= 0;
130 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
132 *pInstance
= anv_instance_to_handle(instance
);
137 VkResult
anv_DestroyInstance(
138 VkInstance _instance
)
140 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
142 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
146 instance
->pfnFree(instance
->pAllocUserData
, instance
);
152 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
153 size_t alignment
, VkSystemAllocType allocType
)
155 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
156 size
, alignment
, allocType
);
158 VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
);
159 VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
);
165 anv_instance_free(struct anv_instance
*instance
, void *mem
)
170 VALGRIND_MEMPOOL_FREE(instance
, mem
);
172 instance
->pfnFree(instance
->pAllocUserData
, mem
);
175 VkResult
anv_EnumeratePhysicalDevices(
176 VkInstance _instance
,
177 uint32_t* pPhysicalDeviceCount
,
178 VkPhysicalDevice
* pPhysicalDevices
)
180 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
183 if (instance
->physicalDeviceCount
== 0) {
184 result
= anv_physical_device_init(&instance
->physicalDevice
,
185 instance
, "/dev/dri/renderD128");
186 if (result
!= VK_SUCCESS
)
189 instance
->physicalDeviceCount
= 1;
192 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
193 * otherwise it's an inout parameter.
195 * The Vulkan spec (git aaed022) says:
197 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
198 * that is initialized with the number of devices the application is
199 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
200 * an array of at least this many VkPhysicalDevice handles [...].
202 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
203 * overwrites the contents of the variable pointed to by
204 * pPhysicalDeviceCount with the number of physical devices in in the
205 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
206 * pPhysicalDeviceCount with the number of physical handles written to
209 if (!pPhysicalDevices
) {
210 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
211 } else if (*pPhysicalDeviceCount
>= 1) {
212 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
213 *pPhysicalDeviceCount
= 1;
215 *pPhysicalDeviceCount
= 0;
221 VkResult
anv_GetPhysicalDeviceFeatures(
222 VkPhysicalDevice physicalDevice
,
223 VkPhysicalDeviceFeatures
* pFeatures
)
225 anv_finishme("Get correct values for PhysicalDeviceFeatures");
227 *pFeatures
= (VkPhysicalDeviceFeatures
) {
228 .robustBufferAccess
= false,
229 .fullDrawIndexUint32
= false,
230 .imageCubeArray
= false,
231 .independentBlend
= false,
232 .geometryShader
= true,
233 .tessellationShader
= false,
234 .sampleRateShading
= false,
235 .dualSourceBlend
= true,
237 .instancedDrawIndirect
= true,
239 .depthBiasClamp
= false,
240 .fillModeNonSolid
= true,
241 .depthBounds
= false,
244 .textureCompressionETC2
= true,
245 .textureCompressionASTC_LDR
= true,
246 .textureCompressionBC
= true,
247 .pipelineStatisticsQuery
= true,
248 .vertexSideEffects
= false,
249 .tessellationSideEffects
= false,
250 .geometrySideEffects
= false,
251 .fragmentSideEffects
= false,
252 .shaderTessellationPointSize
= false,
253 .shaderGeometryPointSize
= true,
254 .shaderTextureGatherExtended
= true,
255 .shaderStorageImageExtendedFormats
= false,
256 .shaderStorageImageMultisample
= false,
257 .shaderStorageBufferArrayConstantIndexing
= false,
258 .shaderStorageImageArrayConstantIndexing
= false,
259 .shaderUniformBufferArrayDynamicIndexing
= true,
260 .shaderSampledImageArrayDynamicIndexing
= false,
261 .shaderStorageBufferArrayDynamicIndexing
= false,
262 .shaderStorageImageArrayDynamicIndexing
= false,
263 .shaderClipDistance
= false,
264 .shaderCullDistance
= false,
265 .shaderFloat64
= false,
266 .shaderInt64
= false,
267 .shaderFloat16
= false,
268 .shaderInt16
= false,
274 VkResult
anv_GetPhysicalDeviceLimits(
275 VkPhysicalDevice physicalDevice
,
276 VkPhysicalDeviceLimits
* pLimits
)
278 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
279 const struct brw_device_info
*devinfo
= physical_device
->info
;
281 anv_finishme("Get correct values for PhysicalDeviceLimits");
283 *pLimits
= (VkPhysicalDeviceLimits
) {
284 .maxImageDimension1D
= (1 << 14),
285 .maxImageDimension2D
= (1 << 14),
286 .maxImageDimension3D
= (1 << 10),
287 .maxImageDimensionCube
= (1 << 14),
288 .maxImageArrayLayers
= (1 << 10),
289 .maxTexelBufferSize
= (1 << 14),
290 .maxUniformBufferSize
= UINT32_MAX
,
291 .maxStorageBufferSize
= UINT32_MAX
,
292 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
293 .maxMemoryAllocationCount
= UINT32_MAX
,
294 .bufferImageGranularity
= 64, /* A cache line */
295 .maxBoundDescriptorSets
= MAX_SETS
,
296 .maxDescriptorSets
= UINT32_MAX
,
297 .maxPerStageDescriptorSamplers
= 64,
298 .maxPerStageDescriptorUniformBuffers
= 64,
299 .maxPerStageDescriptorStorageBuffers
= 64,
300 .maxPerStageDescriptorSampledImages
= 64,
301 .maxPerStageDescriptorStorageImages
= 64,
302 .maxDescriptorSetSamplers
= 256,
303 .maxDescriptorSetUniformBuffers
= 256,
304 .maxDescriptorSetStorageBuffers
= 256,
305 .maxDescriptorSetSampledImages
= 256,
306 .maxDescriptorSetStorageImages
= 256,
307 .maxVertexInputAttributes
= 32,
308 .maxVertexInputAttributeOffset
= 256,
309 .maxVertexInputBindingStride
= 256,
310 .maxVertexOutputComponents
= 32,
311 .maxTessGenLevel
= 0,
312 .maxTessPatchSize
= 0,
313 .maxTessControlPerVertexInputComponents
= 0,
314 .maxTessControlPerVertexOutputComponents
= 0,
315 .maxTessControlPerPatchOutputComponents
= 0,
316 .maxTessControlTotalOutputComponents
= 0,
317 .maxTessEvaluationInputComponents
= 0,
318 .maxTessEvaluationOutputComponents
= 0,
319 .maxGeometryShaderInvocations
= 6,
320 .maxGeometryInputComponents
= 16,
321 .maxGeometryOutputComponents
= 16,
322 .maxGeometryOutputVertices
= 16,
323 .maxGeometryTotalOutputComponents
= 16,
324 .maxFragmentInputComponents
= 16,
325 .maxFragmentOutputBuffers
= 8,
326 .maxFragmentDualSourceBuffers
= 2,
327 .maxFragmentCombinedOutputResources
= 8,
328 .maxComputeSharedMemorySize
= 1024,
329 .maxComputeWorkGroupCount
= {
330 16 * devinfo
->max_cs_threads
,
331 16 * devinfo
->max_cs_threads
,
332 16 * devinfo
->max_cs_threads
,
334 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
335 .maxComputeWorkGroupSize
= {
336 16 * devinfo
->max_cs_threads
,
337 16 * devinfo
->max_cs_threads
,
338 16 * devinfo
->max_cs_threads
,
340 .subPixelPrecisionBits
= 4 /* FIXME */,
341 .subTexelPrecisionBits
= 4 /* FIXME */,
342 .mipmapPrecisionBits
= 4 /* FIXME */,
343 .maxDrawIndexedIndexValue
= UINT32_MAX
,
344 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
345 .primitiveRestartForPatches
= UINT32_MAX
,
346 .maxSamplerLodBias
= 16,
347 .maxSamplerAnisotropy
= 16,
349 .maxDynamicViewportStates
= UINT32_MAX
,
350 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
351 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
352 .viewportSubPixelBits
= 13, /* We take a float? */
353 .minMemoryMapAlignment
= 64, /* A cache line */
354 .minTexelBufferOffsetAlignment
= 1,
355 .minUniformBufferOffsetAlignment
= 1,
356 .minStorageBufferOffsetAlignment
= 1,
357 .minTexelOffset
= 0, /* FIXME */
358 .maxTexelOffset
= 0, /* FIXME */
359 .minTexelGatherOffset
= 0, /* FIXME */
360 .maxTexelGatherOffset
= 0, /* FIXME */
361 .minInterpolationOffset
= 0, /* FIXME */
362 .maxInterpolationOffset
= 0, /* FIXME */
363 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
364 .maxFramebufferWidth
= (1 << 14),
365 .maxFramebufferHeight
= (1 << 14),
366 .maxFramebufferLayers
= (1 << 10),
367 .maxFramebufferColorSamples
= 8,
368 .maxFramebufferDepthSamples
= 8,
369 .maxFramebufferStencilSamples
= 8,
370 .maxColorAttachments
= MAX_RTS
,
371 .maxSampledImageColorSamples
= 8,
372 .maxSampledImageDepthSamples
= 8,
373 .maxSampledImageIntegerSamples
= 1,
374 .maxStorageImageSamples
= 1,
375 .maxSampleMaskWords
= 1,
376 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
377 .maxClipDistances
= 0 /* FIXME */,
378 .maxCullDistances
= 0 /* FIXME */,
379 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
380 .pointSizeRange
= { 0.125, 255.875 },
381 .lineWidthRange
= { 0.0, 7.9921875 },
382 .pointSizeGranularity
= (1.0 / 8.0),
383 .lineWidthGranularity
= (1.0 / 128.0),
389 VkResult
anv_GetPhysicalDeviceProperties(
390 VkPhysicalDevice physicalDevice
,
391 VkPhysicalDeviceProperties
* pProperties
)
393 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
395 *pProperties
= (VkPhysicalDeviceProperties
) {
396 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
399 .deviceId
= pdevice
->chipset_id
,
400 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
403 strcpy(pProperties
->deviceName
, pdevice
->name
);
404 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
405 "anv-%s", MESA_GIT_SHA1
+ 4);
410 VkResult
anv_GetPhysicalDeviceQueueCount(
411 VkPhysicalDevice physicalDevice
,
419 VkResult
anv_GetPhysicalDeviceQueueProperties(
420 VkPhysicalDevice physicalDevice
,
422 VkPhysicalDeviceQueueProperties
* pQueueProperties
)
426 *pQueueProperties
= (VkPhysicalDeviceQueueProperties
) {
427 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
428 VK_QUEUE_COMPUTE_BIT
|
431 .supportsTimestamps
= true,
437 VkResult
anv_GetPhysicalDeviceMemoryProperties(
438 VkPhysicalDevice physicalDevice
,
439 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
441 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
442 VkDeviceSize heap_size
;
444 /* Reserve some wiggle room for the driver by exposing only 75% of the
445 * aperture to the heap.
447 heap_size
= 3 * physical_device
->aperture_size
/ 4;
449 /* The property flags below are valid only for llc platforms. */
450 pMemoryProperties
->memoryTypeCount
= 1;
451 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
452 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
456 pMemoryProperties
->memoryHeapCount
= 1;
457 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
459 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
465 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
469 return anv_lookup_entrypoint(pName
);
472 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
476 return anv_lookup_entrypoint(pName
);
480 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
482 queue
->device
= device
;
483 queue
->pool
= &device
->surface_state_pool
;
485 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
486 if (queue
->completed_serial
.map
== NULL
)
487 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
489 *(uint32_t *)queue
->completed_serial
.map
= 0;
490 queue
->next_serial
= 1;
496 anv_queue_finish(struct anv_queue
*queue
)
499 /* This gets torn down with the device so we only need to do this if
500 * valgrind is present.
502 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
507 anv_device_init_border_colors(struct anv_device
*device
)
509 static const VkClearColorValue border_colors
[] = {
510 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 0.0 } },
511 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 1.0 } },
512 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .f32
= { 1.0, 1.0, 1.0, 1.0 } },
513 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .u32
= { 0, 0, 0, 0 } },
514 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .u32
= { 0, 0, 0, 1 } },
515 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .u32
= { 1, 1, 1, 1 } },
518 device
->border_colors
=
519 anv_state_pool_alloc(&device
->dynamic_state_pool
,
520 sizeof(border_colors
), 32);
521 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
524 VkResult
anv_CreateDevice(
525 VkPhysicalDevice physicalDevice
,
526 const VkDeviceCreateInfo
* pCreateInfo
,
529 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
530 struct anv_instance
*instance
= physical_device
->instance
;
531 struct anv_device
*device
;
533 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
535 switch (physical_device
->info
->gen
) {
537 driver_layer
= &gen7_layer
;
540 driver_layer
= &gen8_layer
;
544 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
545 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
547 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
549 device
->instance
= physical_device
->instance
;
551 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
552 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
553 if (device
->fd
== -1)
556 device
->context_id
= anv_gem_create_context(device
);
557 if (device
->context_id
== -1)
560 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
562 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
564 anv_state_pool_init(&device
->dynamic_state_pool
,
565 &device
->dynamic_state_block_pool
);
567 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
568 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
570 anv_state_pool_init(&device
->surface_state_pool
,
571 &device
->surface_state_block_pool
);
573 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
575 device
->info
= *physical_device
->info
;
577 device
->compiler
= anv_compiler_create(device
);
579 pthread_mutex_init(&device
->mutex
, NULL
);
581 anv_queue_init(device
, &device
->queue
);
583 anv_device_init_meta(device
);
585 anv_device_init_border_colors(device
);
587 *pDevice
= anv_device_to_handle(device
);
594 anv_device_free(device
, device
);
596 return vk_error(VK_ERROR_UNAVAILABLE
);
599 VkResult
anv_DestroyDevice(
602 ANV_FROM_HANDLE(anv_device
, device
, _device
);
604 anv_compiler_destroy(device
->compiler
);
606 anv_queue_finish(&device
->queue
);
608 anv_device_finish_meta(device
);
611 /* We only need to free these to prevent valgrind errors. The backing
612 * BO will go away in a couple of lines so we don't actually leak.
614 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
617 anv_bo_pool_finish(&device
->batch_bo_pool
);
618 anv_state_pool_finish(&device
->dynamic_state_pool
);
619 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
620 anv_block_pool_finish(&device
->instruction_block_pool
);
621 anv_state_pool_finish(&device
->surface_state_pool
);
622 anv_block_pool_finish(&device
->surface_state_block_pool
);
623 anv_block_pool_finish(&device
->scratch_block_pool
);
627 anv_instance_free(device
->instance
, device
);
632 static const VkExtensionProperties global_extensions
[] = {
634 .extName
= "VK_WSI_LunarG",
639 VkResult
anv_GetGlobalExtensionProperties(
640 const char* pLayerName
,
642 VkExtensionProperties
* pProperties
)
644 if (pProperties
== NULL
) {
645 *pCount
= ARRAY_SIZE(global_extensions
);
649 assert(*pCount
<= ARRAY_SIZE(global_extensions
));
651 *pCount
= ARRAY_SIZE(global_extensions
);
652 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
657 VkResult
anv_GetPhysicalDeviceExtensionProperties(
658 VkPhysicalDevice physicalDevice
,
659 const char* pLayerName
,
661 VkExtensionProperties
* pProperties
)
663 if (pProperties
== NULL
) {
668 /* None supported at this time */
669 return vk_error(VK_ERROR_INVALID_EXTENSION
);
672 VkResult
anv_GetGlobalLayerProperties(
674 VkLayerProperties
* pProperties
)
676 if (pProperties
== NULL
) {
681 /* None supported at this time */
682 return vk_error(VK_ERROR_INVALID_LAYER
);
685 VkResult
anv_GetPhysicalDeviceLayerProperties(
686 VkPhysicalDevice physicalDevice
,
688 VkLayerProperties
* pProperties
)
690 if (pProperties
== NULL
) {
695 /* None supported at this time */
696 return vk_error(VK_ERROR_INVALID_LAYER
);
699 VkResult
anv_GetDeviceQueue(
701 uint32_t queueNodeIndex
,
705 ANV_FROM_HANDLE(anv_device
, device
, _device
);
707 assert(queueIndex
== 0);
709 *pQueue
= anv_queue_to_handle(&device
->queue
);
714 VkResult
anv_QueueSubmit(
716 uint32_t cmdBufferCount
,
717 const VkCmdBuffer
* pCmdBuffers
,
720 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
721 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
722 struct anv_device
*device
= queue
->device
;
725 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
726 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
728 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
730 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
732 return vk_error(VK_ERROR_UNKNOWN
);
735 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
737 return vk_error(VK_ERROR_UNKNOWN
);
740 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
741 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
747 VkResult
anv_QueueWaitIdle(
750 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
752 return vkDeviceWaitIdle(anv_device_to_handle(queue
->device
));
755 VkResult
anv_DeviceWaitIdle(
758 ANV_FROM_HANDLE(anv_device
, device
, _device
);
759 struct anv_state state
;
760 struct anv_batch batch
;
761 struct drm_i915_gem_execbuffer2 execbuf
;
762 struct drm_i915_gem_exec_object2 exec2_objects
[1];
763 struct anv_bo
*bo
= NULL
;
768 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
769 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
770 batch
.start
= batch
.next
= state
.map
;
771 batch
.end
= state
.map
+ 32;
772 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
773 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
775 exec2_objects
[0].handle
= bo
->gem_handle
;
776 exec2_objects
[0].relocation_count
= 0;
777 exec2_objects
[0].relocs_ptr
= 0;
778 exec2_objects
[0].alignment
= 0;
779 exec2_objects
[0].offset
= bo
->offset
;
780 exec2_objects
[0].flags
= 0;
781 exec2_objects
[0].rsvd1
= 0;
782 exec2_objects
[0].rsvd2
= 0;
784 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
785 execbuf
.buffer_count
= 1;
786 execbuf
.batch_start_offset
= state
.offset
;
787 execbuf
.batch_len
= batch
.next
- state
.map
;
788 execbuf
.cliprects_ptr
= 0;
789 execbuf
.num_cliprects
= 0;
794 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
795 execbuf
.rsvd1
= device
->context_id
;
798 ret
= anv_gem_execbuffer(device
, &execbuf
);
800 result
= vk_error(VK_ERROR_UNKNOWN
);
805 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
807 result
= vk_error(VK_ERROR_UNKNOWN
);
811 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
816 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
822 anv_device_alloc(struct anv_device
* device
,
825 VkSystemAllocType allocType
)
827 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
831 anv_device_free(struct anv_device
* device
,
834 anv_instance_free(device
->instance
, mem
);
838 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
840 bo
->gem_handle
= anv_gem_create(device
, size
);
842 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
852 VkResult
anv_AllocMemory(
854 const VkMemoryAllocInfo
* pAllocInfo
,
855 VkDeviceMemory
* pMem
)
857 ANV_FROM_HANDLE(anv_device
, device
, _device
);
858 struct anv_device_memory
*mem
;
861 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
863 if (pAllocInfo
->memoryTypeIndex
!= 0) {
864 /* We support exactly one memory heap. */
865 return vk_error(VK_ERROR_INVALID_VALUE
);
868 /* FINISHME: Fail if allocation request exceeds heap size. */
870 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
871 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
873 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
875 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
876 if (result
!= VK_SUCCESS
)
879 *pMem
= anv_device_memory_to_handle(mem
);
884 anv_device_free(device
, mem
);
889 VkResult
anv_FreeMemory(
893 ANV_FROM_HANDLE(anv_device
, device
, _device
);
894 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
897 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
899 if (mem
->bo
.gem_handle
!= 0)
900 anv_gem_close(device
, mem
->bo
.gem_handle
);
902 anv_device_free(device
, mem
);
907 VkResult
anv_MapMemory(
912 VkMemoryMapFlags flags
,
915 ANV_FROM_HANDLE(anv_device
, device
, _device
);
916 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
918 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
919 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
920 * at a time is valid. We could just mmap up front and return an offset
921 * pointer here, but that may exhaust virtual memory on 32 bit
924 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
925 mem
->map_size
= size
;
932 VkResult
anv_UnmapMemory(
936 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
938 anv_gem_munmap(mem
->map
, mem
->map_size
);
943 VkResult
anv_FlushMappedMemoryRanges(
945 uint32_t memRangeCount
,
946 const VkMappedMemoryRange
* pMemRanges
)
948 /* clflush here for !llc platforms */
953 VkResult
anv_InvalidateMappedMemoryRanges(
955 uint32_t memRangeCount
,
956 const VkMappedMemoryRange
* pMemRanges
)
958 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
961 VkResult
anv_GetBufferMemoryRequirements(
964 VkMemoryRequirements
* pMemoryRequirements
)
966 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
968 /* The Vulkan spec (git aaed022) says:
970 * memoryTypeBits is a bitfield and contains one bit set for every
971 * supported memory type for the resource. The bit `1<<i` is set if and
972 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
973 * structure for the physical device is supported.
975 * We support exactly one memory type.
977 pMemoryRequirements
->memoryTypeBits
= 1;
979 pMemoryRequirements
->size
= buffer
->size
;
980 pMemoryRequirements
->alignment
= 16;
985 VkResult
anv_GetImageMemoryRequirements(
988 VkMemoryRequirements
* pMemoryRequirements
)
990 ANV_FROM_HANDLE(anv_image
, image
, _image
);
992 /* The Vulkan spec (git aaed022) says:
994 * memoryTypeBits is a bitfield and contains one bit set for every
995 * supported memory type for the resource. The bit `1<<i` is set if and
996 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
997 * structure for the physical device is supported.
999 * We support exactly one memory type.
1001 pMemoryRequirements
->memoryTypeBits
= 1;
1003 pMemoryRequirements
->size
= image
->size
;
1004 pMemoryRequirements
->alignment
= image
->alignment
;
1009 VkResult
anv_GetImageSparseMemoryRequirements(
1012 uint32_t* pNumRequirements
,
1013 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1015 return vk_error(VK_UNSUPPORTED
);
1018 VkResult
anv_GetDeviceMemoryCommitment(
1020 VkDeviceMemory memory
,
1021 VkDeviceSize
* pCommittedMemoryInBytes
)
1023 *pCommittedMemoryInBytes
= 0;
1024 stub_return(VK_SUCCESS
);
1027 VkResult
anv_BindBufferMemory(
1030 VkDeviceMemory _mem
,
1031 VkDeviceSize memOffset
)
1033 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1034 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1036 buffer
->bo
= &mem
->bo
;
1037 buffer
->offset
= memOffset
;
1042 VkResult
anv_BindImageMemory(
1045 VkDeviceMemory _mem
,
1046 VkDeviceSize memOffset
)
1048 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1049 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1051 image
->bo
= &mem
->bo
;
1052 image
->offset
= memOffset
;
1057 VkResult
anv_QueueBindSparseBufferMemory(
1060 uint32_t numBindings
,
1061 const VkSparseMemoryBindInfo
* pBindInfo
)
1063 stub_return(VK_UNSUPPORTED
);
1066 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1069 uint32_t numBindings
,
1070 const VkSparseMemoryBindInfo
* pBindInfo
)
1072 stub_return(VK_UNSUPPORTED
);
1075 VkResult
anv_QueueBindSparseImageMemory(
1078 uint32_t numBindings
,
1079 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1081 stub_return(VK_UNSUPPORTED
);
1084 VkResult
anv_CreateFence(
1086 const VkFenceCreateInfo
* pCreateInfo
,
1089 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1090 struct anv_fence
*fence
;
1091 struct anv_batch batch
;
1094 const uint32_t fence_size
= 128;
1096 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1098 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1099 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1101 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1103 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1104 if (result
!= VK_SUCCESS
)
1108 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1109 batch
.next
= batch
.start
= fence
->bo
.map
;
1110 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1111 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
1112 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
1114 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1115 fence
->exec2_objects
[0].relocation_count
= 0;
1116 fence
->exec2_objects
[0].relocs_ptr
= 0;
1117 fence
->exec2_objects
[0].alignment
= 0;
1118 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1119 fence
->exec2_objects
[0].flags
= 0;
1120 fence
->exec2_objects
[0].rsvd1
= 0;
1121 fence
->exec2_objects
[0].rsvd2
= 0;
1123 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1124 fence
->execbuf
.buffer_count
= 1;
1125 fence
->execbuf
.batch_start_offset
= 0;
1126 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1127 fence
->execbuf
.cliprects_ptr
= 0;
1128 fence
->execbuf
.num_cliprects
= 0;
1129 fence
->execbuf
.DR1
= 0;
1130 fence
->execbuf
.DR4
= 0;
1132 fence
->execbuf
.flags
=
1133 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1134 fence
->execbuf
.rsvd1
= device
->context_id
;
1135 fence
->execbuf
.rsvd2
= 0;
1137 *pFence
= anv_fence_to_handle(fence
);
1142 anv_device_free(device
, fence
);
1147 VkResult
anv_DestroyFence(
1151 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1152 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1154 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1155 anv_gem_close(device
, fence
->bo
.gem_handle
);
1156 anv_device_free(device
, fence
);
1161 VkResult
anv_ResetFences(
1163 uint32_t fenceCount
,
1164 const VkFence
* pFences
)
1166 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1167 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1168 fence
->ready
= false;
1174 VkResult
anv_GetFenceStatus(
1178 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1179 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1186 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1188 fence
->ready
= true;
1192 return VK_NOT_READY
;
1195 VkResult
anv_WaitForFences(
1197 uint32_t fenceCount
,
1198 const VkFence
* pFences
,
1202 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1203 int64_t t
= timeout
;
1206 /* FIXME: handle !waitAll */
1208 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1209 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1210 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1211 if (ret
== -1 && errno
== ETIME
)
1214 return vk_error(VK_ERROR_UNKNOWN
);
1220 // Queue semaphore functions
1222 VkResult
anv_CreateSemaphore(
1224 const VkSemaphoreCreateInfo
* pCreateInfo
,
1225 VkSemaphore
* pSemaphore
)
1227 stub_return(VK_UNSUPPORTED
);
1230 VkResult
anv_DestroySemaphore(
1232 VkSemaphore semaphore
)
1234 stub_return(VK_UNSUPPORTED
);
1237 VkResult
anv_QueueSignalSemaphore(
1239 VkSemaphore semaphore
)
1241 stub_return(VK_UNSUPPORTED
);
1244 VkResult
anv_QueueWaitSemaphore(
1246 VkSemaphore semaphore
)
1248 stub_return(VK_UNSUPPORTED
);
1253 VkResult
anv_CreateEvent(
1255 const VkEventCreateInfo
* pCreateInfo
,
1258 stub_return(VK_UNSUPPORTED
);
1261 VkResult
anv_DestroyEvent(
1265 stub_return(VK_UNSUPPORTED
);
1268 VkResult
anv_GetEventStatus(
1272 stub_return(VK_UNSUPPORTED
);
1275 VkResult
anv_SetEvent(
1279 stub_return(VK_UNSUPPORTED
);
1282 VkResult
anv_ResetEvent(
1286 stub_return(VK_UNSUPPORTED
);
1291 VkResult
anv_CreateBuffer(
1293 const VkBufferCreateInfo
* pCreateInfo
,
1296 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1297 struct anv_buffer
*buffer
;
1299 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1301 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1302 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1304 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1306 buffer
->size
= pCreateInfo
->size
;
1310 *pBuffer
= anv_buffer_to_handle(buffer
);
1315 VkResult
anv_DestroyBuffer(
1319 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1320 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1322 anv_device_free(device
, buffer
);
1328 anv_fill_buffer_surface_state(struct anv_device
*device
, void *state
,
1329 const struct anv_format
*format
,
1330 uint32_t offset
, uint32_t range
)
1332 switch (device
->info
.gen
) {
1334 gen7_fill_buffer_surface_state(state
, format
, offset
, range
);
1337 gen8_fill_buffer_surface_state(state
, format
, offset
, range
);
1340 unreachable("unsupported gen\n");
1345 anv_buffer_view_create(
1346 struct anv_device
* device
,
1347 const VkBufferViewCreateInfo
* pCreateInfo
,
1348 struct anv_buffer_view
** view_out
)
1350 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1351 struct anv_buffer_view
*view
;
1353 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1355 view
= anv_device_alloc(device
, sizeof(*view
), 8,
1356 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1358 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1360 view
->view
= (struct anv_surface_view
) {
1362 .offset
= buffer
->offset
+ pCreateInfo
->offset
,
1363 .surface_state
= anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64),
1364 .format
= anv_format_for_vk_format(pCreateInfo
->format
),
1365 .range
= pCreateInfo
->range
,
1374 VkResult
anv_CreateBufferView(
1376 const VkBufferViewCreateInfo
* pCreateInfo
,
1377 VkBufferView
* pView
)
1379 return driver_layer
->CreateBufferView(_device
, pCreateInfo
, pView
);
1382 VkResult
anv_DestroyBufferView(
1384 VkBufferView _bview
)
1386 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1387 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1389 anv_surface_view_fini(device
, &bview
->view
);
1390 anv_device_free(device
, bview
);
1395 VkResult
anv_CreateSampler(
1397 const VkSamplerCreateInfo
* pCreateInfo
,
1398 VkSampler
* pSampler
)
1400 return driver_layer
->CreateSampler(_device
, pCreateInfo
, pSampler
);
1403 VkResult
anv_DestroySampler(
1407 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1408 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1410 anv_device_free(device
, sampler
);
1415 // Descriptor set functions
1417 VkResult
anv_CreateDescriptorSetLayout(
1419 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1420 VkDescriptorSetLayout
* pSetLayout
)
1422 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1423 struct anv_descriptor_set_layout
*set_layout
;
1425 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1427 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1428 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1429 uint32_t num_dynamic_buffers
= 0;
1431 uint32_t stages
= 0;
1434 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1435 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1436 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1437 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1438 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1439 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1445 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1446 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1447 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1448 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1449 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1450 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1451 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1452 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1453 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1454 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1455 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1456 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1457 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1463 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1464 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1465 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1466 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1472 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1473 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1476 uint32_t sampler_total
= 0;
1477 uint32_t surface_total
= 0;
1478 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1479 sampler_total
+= sampler_count
[s
];
1480 surface_total
+= surface_count
[s
];
1483 size_t size
= sizeof(*set_layout
) +
1484 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1485 set_layout
= anv_device_alloc(device
, size
, 8,
1486 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1488 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1490 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1491 set_layout
->count
= count
;
1492 set_layout
->shader_stages
= stages
;
1494 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1495 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1496 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1497 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1498 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1499 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1500 p
+= surface_count
[s
];
1501 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1502 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1503 p
+= sampler_count
[s
];
1506 uint32_t descriptor
= 0;
1507 int8_t dynamic_slot
= 0;
1509 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1510 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1511 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1512 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1513 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1514 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1515 sampler
[s
]->index
= descriptor
+ j
;
1516 sampler
[s
]->dynamic_slot
= -1;
1524 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1525 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1526 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1534 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1535 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1536 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1537 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1538 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1539 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1540 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1541 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1542 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1543 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1544 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1545 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1546 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1547 surface
[s
]->index
= descriptor
+ j
;
1549 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1551 surface
[s
]->dynamic_slot
= -1;
1560 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1562 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1565 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1570 VkResult
anv_DestroyDescriptorSetLayout(
1572 VkDescriptorSetLayout _set_layout
)
1574 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1575 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1577 anv_device_free(device
, set_layout
);
1582 VkResult
anv_CreateDescriptorPool(
1584 VkDescriptorPoolUsage poolUsage
,
1586 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1587 VkDescriptorPool
* pDescriptorPool
)
1589 anv_finishme("VkDescriptorPool is a stub");
1590 pDescriptorPool
->handle
= 1;
1594 VkResult
anv_DestroyDescriptorPool(
1596 VkDescriptorPool _pool
)
1598 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1602 VkResult
anv_ResetDescriptorPool(
1604 VkDescriptorPool descriptorPool
)
1606 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1611 anv_descriptor_set_create(struct anv_device
*device
,
1612 const struct anv_descriptor_set_layout
*layout
,
1613 struct anv_descriptor_set
**out_set
)
1615 struct anv_descriptor_set
*set
;
1616 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1618 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1620 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1622 /* A descriptor set may not be 100% filled. Clear the set so we can can
1623 * later detect holes in it.
1625 memset(set
, 0, size
);
1633 anv_descriptor_set_destroy(struct anv_device
*device
,
1634 struct anv_descriptor_set
*set
)
1636 anv_device_free(device
, set
);
1639 VkResult
anv_AllocDescriptorSets(
1641 VkDescriptorPool descriptorPool
,
1642 VkDescriptorSetUsage setUsage
,
1644 const VkDescriptorSetLayout
* pSetLayouts
,
1645 VkDescriptorSet
* pDescriptorSets
,
1648 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1651 struct anv_descriptor_set
*set
;
1653 for (uint32_t i
= 0; i
< count
; i
++) {
1654 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1656 result
= anv_descriptor_set_create(device
, layout
, &set
);
1657 if (result
!= VK_SUCCESS
) {
1662 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1670 VkResult
anv_FreeDescriptorSets(
1672 VkDescriptorPool descriptorPool
,
1674 const VkDescriptorSet
* pDescriptorSets
)
1676 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1678 for (uint32_t i
= 0; i
< count
; i
++) {
1679 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1681 anv_descriptor_set_destroy(device
, set
);
1687 VkResult
anv_UpdateDescriptorSets(
1689 uint32_t writeCount
,
1690 const VkWriteDescriptorSet
* pDescriptorWrites
,
1692 const VkCopyDescriptorSet
* pDescriptorCopies
)
1694 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1695 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1696 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1698 switch (write
->descriptorType
) {
1699 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1700 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1701 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1702 set
->descriptors
[write
->destBinding
+ j
].sampler
=
1703 anv_sampler_from_handle(write
->pDescriptors
[j
].sampler
);
1706 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1711 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1712 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1713 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1714 ANV_FROM_HANDLE(anv_image_view
, iview
,
1715 write
->pDescriptors
[j
].imageView
);
1716 set
->descriptors
[write
->destBinding
+ j
].view
= &iview
->view
;
1720 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1721 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1722 anv_finishme("texel buffers not implemented");
1725 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1726 anv_finishme("input attachments not implemented");
1729 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1730 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1731 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1732 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1733 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1734 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1735 write
->pDescriptors
[j
].bufferView
);
1736 set
->descriptors
[write
->destBinding
+ j
].view
= &bview
->view
;
1744 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1745 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1746 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1747 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1748 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1749 dest
->descriptors
[copy
->destBinding
+ j
] =
1750 src
->descriptors
[copy
->srcBinding
+ j
];
1757 // State object functions
1759 static inline int64_t
1760 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1770 VkResult
anv_CreateDynamicViewportState(
1772 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1773 VkDynamicViewportState
* pState
)
1775 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1776 struct anv_dynamic_vp_state
*state
;
1778 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1780 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1781 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1783 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1785 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1786 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1788 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1790 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1793 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1794 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1795 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1797 /* The gen7 state struct has just the matrix and guardband fields, the
1798 * gen8 struct adds the min/max viewport fields. */
1799 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1800 .ViewportMatrixElementm00
= vp
->width
/ 2,
1801 .ViewportMatrixElementm11
= vp
->height
/ 2,
1802 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1803 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1804 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1805 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1806 .XMinClipGuardband
= -1.0f
,
1807 .XMaxClipGuardband
= 1.0f
,
1808 .YMinClipGuardband
= -1.0f
,
1809 .YMaxClipGuardband
= 1.0f
,
1810 .XMinViewPort
= vp
->originX
,
1811 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1812 .YMinViewPort
= vp
->originY
,
1813 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1816 struct GEN7_CC_VIEWPORT cc_viewport
= {
1817 .MinimumDepth
= vp
->minDepth
,
1818 .MaximumDepth
= vp
->maxDepth
1821 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1822 * ymax < ymin for empty clips. In case clip x, y, width height are all
1823 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1824 * what we want. Just special case empty clips and produce a canonical
1826 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
1827 .ScissorRectangleYMin
= 1,
1828 .ScissorRectangleXMin
= 1,
1829 .ScissorRectangleYMax
= 0,
1830 .ScissorRectangleXMax
= 0
1833 const int max
= 0xffff;
1834 struct GEN7_SCISSOR_RECT scissor
= {
1835 /* Do this math using int64_t so overflow gets clamped correctly. */
1836 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1837 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1838 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1839 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1842 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1843 GEN7_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1845 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1846 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1848 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1852 *pState
= anv_dynamic_vp_state_to_handle(state
);
1857 VkResult
anv_DestroyDynamicViewportState(
1859 VkDynamicViewportState _vp_state
)
1861 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1862 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
1864 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
1865 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
1866 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
1868 anv_device_free(device
, vp_state
);
1873 VkResult
anv_CreateDynamicRasterState(
1875 const VkDynamicRasterStateCreateInfo
* pCreateInfo
,
1876 VkDynamicRasterState
* pState
)
1878 return driver_layer
->CreateDynamicRasterState(_device
, pCreateInfo
, pState
);
1881 VkResult
anv_DestroyDynamicRasterState(
1883 VkDynamicRasterState _rs_state
)
1885 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1886 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
1888 anv_device_free(device
, rs_state
);
1893 VkResult
anv_CreateDynamicColorBlendState(
1895 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
1896 VkDynamicColorBlendState
* pState
)
1898 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1899 struct anv_dynamic_cb_state
*state
;
1901 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
1903 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1904 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1906 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1908 struct GEN7_COLOR_CALC_STATE color_calc_state
= {
1909 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
1910 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
1911 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
1912 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
1915 GEN7_COLOR_CALC_STATE_pack(NULL
, state
->color_calc_state
, &color_calc_state
);
1917 *pState
= anv_dynamic_cb_state_to_handle(state
);
1922 VkResult
anv_DestroyDynamicColorBlendState(
1924 VkDynamicColorBlendState _cb_state
)
1926 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1927 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
1929 anv_device_free(device
, cb_state
);
1934 VkResult
anv_CreateDynamicDepthStencilState(
1936 const VkDynamicDepthStencilStateCreateInfo
* pCreateInfo
,
1937 VkDynamicDepthStencilState
* pState
)
1939 return driver_layer
->CreateDynamicDepthStencilState(_device
, pCreateInfo
, pState
);
1942 VkResult
anv_DestroyDynamicDepthStencilState(
1944 VkDynamicDepthStencilState _ds_state
)
1946 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1947 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
1949 anv_device_free(device
, ds_state
);
1954 VkResult
anv_CreateFramebuffer(
1956 const VkFramebufferCreateInfo
* pCreateInfo
,
1957 VkFramebuffer
* pFramebuffer
)
1959 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1960 struct anv_framebuffer
*framebuffer
;
1962 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1964 size_t size
= sizeof(*framebuffer
) +
1965 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
1966 framebuffer
= anv_device_alloc(device
, size
, 8,
1967 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1968 if (framebuffer
== NULL
)
1969 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1971 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1972 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1973 ANV_FROM_HANDLE(anv_attachment_view
, view
,
1974 pCreateInfo
->pAttachments
[i
].view
);
1976 framebuffer
->attachments
[i
] = view
;
1979 framebuffer
->width
= pCreateInfo
->width
;
1980 framebuffer
->height
= pCreateInfo
->height
;
1981 framebuffer
->layers
= pCreateInfo
->layers
;
1983 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
1984 &(VkDynamicViewportStateCreateInfo
) {
1985 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
1986 .viewportAndScissorCount
= 1,
1987 .pViewports
= (VkViewport
[]) {
1991 .width
= pCreateInfo
->width
,
1992 .height
= pCreateInfo
->height
,
1997 .pScissors
= (VkRect2D
[]) {
1999 { pCreateInfo
->width
, pCreateInfo
->height
} },
2002 &framebuffer
->vp_state
);
2004 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
2009 VkResult
anv_DestroyFramebuffer(
2013 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2014 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
2016 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
2018 anv_device_free(device
, fb
);
2023 VkResult
anv_CreateRenderPass(
2025 const VkRenderPassCreateInfo
* pCreateInfo
,
2026 VkRenderPass
* pRenderPass
)
2028 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2029 struct anv_render_pass
*pass
;
2031 size_t attachments_offset
;
2033 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2035 size
= sizeof(*pass
);
2036 size
+= pCreateInfo
->subpassCount
* sizeof(pass
->subpasses
[0]);
2037 attachments_offset
= size
;
2038 size
+= pCreateInfo
->attachmentCount
* sizeof(pass
->attachments
[0]);
2040 pass
= anv_device_alloc(device
, size
, 8,
2041 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2043 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2045 /* Clear the subpasses along with the parent pass. This required because
2046 * each array member of anv_subpass must be a valid pointer if not NULL.
2048 memset(pass
, 0, size
);
2049 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2050 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2051 pass
->attachments
= (void *) pass
+ attachments_offset
;
2053 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2054 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
2056 att
->format
= anv_format_for_vk_format(pCreateInfo
->pAttachments
[i
].format
);
2057 att
->samples
= pCreateInfo
->pAttachments
[i
].samples
;
2058 att
->load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2059 att
->stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2060 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2061 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2063 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2064 if (anv_format_is_color(att
->format
)) {
2065 ++pass
->num_color_clear_attachments
;
2066 } else if (att
->format
->depth_format
) {
2067 pass
->has_depth_clear_attachment
= true;
2069 } else if (att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2070 assert(att
->format
->has_stencil
);
2071 pass
->has_stencil_clear_attachment
= true;
2075 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2076 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2077 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2079 subpass
->input_count
= desc
->inputCount
;
2080 subpass
->color_count
= desc
->colorCount
;
2082 if (desc
->inputCount
> 0) {
2083 subpass
->input_attachments
=
2084 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2085 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2087 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2088 subpass
->input_attachments
[j
]
2089 = desc
->inputAttachments
[j
].attachment
;
2093 if (desc
->colorCount
> 0) {
2094 subpass
->color_attachments
=
2095 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2096 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2098 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2099 subpass
->color_attachments
[j
]
2100 = desc
->colorAttachments
[j
].attachment
;
2104 if (desc
->resolveAttachments
) {
2105 subpass
->resolve_attachments
=
2106 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2107 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2109 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2110 subpass
->resolve_attachments
[j
]
2111 = desc
->resolveAttachments
[j
].attachment
;
2115 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2118 *pRenderPass
= anv_render_pass_to_handle(pass
);
2123 VkResult
anv_DestroyRenderPass(
2127 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2128 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2130 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2131 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2132 * Don't free the null arrays.
2134 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2136 anv_device_free(device
, subpass
->input_attachments
);
2137 anv_device_free(device
, subpass
->color_attachments
);
2138 anv_device_free(device
, subpass
->resolve_attachments
);
2141 anv_device_free(device
, pass
);
2146 VkResult
anv_GetRenderAreaGranularity(
2148 VkRenderPass renderPass
,
2149 VkExtent2D
* pGranularity
)
2151 *pGranularity
= (VkExtent2D
) { 1, 1 };
2156 void vkCmdDbgMarkerBegin(
2157 VkCmdBuffer cmdBuffer
,
2158 const char* pMarker
)
2159 __attribute__ ((visibility ("default")));
2161 void vkCmdDbgMarkerEnd(
2162 VkCmdBuffer cmdBuffer
)
2163 __attribute__ ((visibility ("default")));
2165 void vkCmdDbgMarkerBegin(
2166 VkCmdBuffer cmdBuffer
,
2167 const char* pMarker
)
2171 void vkCmdDbgMarkerEnd(
2172 VkCmdBuffer cmdBuffer
)