2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
35 anv_physical_device_init(struct anv_physical_device
*device
,
36 struct anv_instance
*instance
,
41 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
43 return vk_error(VK_ERROR_UNAVAILABLE
);
45 device
->instance
= instance
;
48 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
49 if (!device
->chipset_id
)
52 device
->name
= brw_get_device_name(device
->chipset_id
);
53 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
57 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1)
60 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
))
63 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
))
66 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
))
69 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXEC_CONSTANTS
))
78 return vk_error(VK_ERROR_UNAVAILABLE
);
81 static void *default_alloc(
85 VkSystemAllocType allocType
)
90 static void default_free(
97 static const VkAllocCallbacks default_alloc_callbacks
= {
99 .pfnAlloc
= default_alloc
,
100 .pfnFree
= default_free
103 VkResult
anv_CreateInstance(
104 const VkInstanceCreateInfo
* pCreateInfo
,
105 VkInstance
* pInstance
)
107 struct anv_instance
*instance
;
108 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
109 void *user_data
= NULL
;
111 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
113 if (pCreateInfo
->pAllocCb
) {
114 alloc_callbacks
= pCreateInfo
->pAllocCb
;
115 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
117 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
118 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
122 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
123 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
124 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
125 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
126 instance
->physicalDeviceCount
= 0;
130 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
132 *pInstance
= anv_instance_to_handle(instance
);
137 VkResult
anv_DestroyInstance(
138 VkInstance _instance
)
140 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
142 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
146 instance
->pfnFree(instance
->pAllocUserData
, instance
);
152 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
153 size_t alignment
, VkSystemAllocType allocType
)
155 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
156 size
, alignment
, allocType
);
158 VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
);
159 VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
);
165 anv_instance_free(struct anv_instance
*instance
, void *mem
)
170 VALGRIND_MEMPOOL_FREE(instance
, mem
);
172 instance
->pfnFree(instance
->pAllocUserData
, mem
);
175 VkResult
anv_EnumeratePhysicalDevices(
176 VkInstance _instance
,
177 uint32_t* pPhysicalDeviceCount
,
178 VkPhysicalDevice
* pPhysicalDevices
)
180 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
183 if (instance
->physicalDeviceCount
== 0) {
184 result
= anv_physical_device_init(&instance
->physicalDevice
,
185 instance
, "/dev/dri/renderD128");
186 if (result
!= VK_SUCCESS
)
189 instance
->physicalDeviceCount
= 1;
192 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
193 * otherwise it's an inout parameter.
195 * The Vulkan spec (git aaed022) says:
197 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
198 * that is initialized with the number of devices the application is
199 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
200 * an array of at least this many VkPhysicalDevice handles [...].
202 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
203 * overwrites the contents of the variable pointed to by
204 * pPhysicalDeviceCount with the number of physical devices in in the
205 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
206 * pPhysicalDeviceCount with the number of physical handles written to
209 if (!pPhysicalDevices
) {
210 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
211 } else if (*pPhysicalDeviceCount
>= 1) {
212 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
213 *pPhysicalDeviceCount
= 1;
215 *pPhysicalDeviceCount
= 0;
221 VkResult
anv_GetPhysicalDeviceFeatures(
222 VkPhysicalDevice physicalDevice
,
223 VkPhysicalDeviceFeatures
* pFeatures
)
225 anv_finishme("Get correct values for PhysicalDeviceFeatures");
227 *pFeatures
= (VkPhysicalDeviceFeatures
) {
228 .robustBufferAccess
= false,
229 .fullDrawIndexUint32
= false,
230 .imageCubeArray
= false,
231 .independentBlend
= false,
232 .geometryShader
= true,
233 .tessellationShader
= false,
234 .sampleRateShading
= false,
235 .dualSourceBlend
= true,
237 .instancedDrawIndirect
= true,
239 .depthBiasClamp
= false,
240 .fillModeNonSolid
= true,
241 .depthBounds
= false,
244 .textureCompressionETC2
= true,
245 .textureCompressionASTC_LDR
= true,
246 .textureCompressionBC
= true,
247 .pipelineStatisticsQuery
= true,
248 .vertexSideEffects
= false,
249 .tessellationSideEffects
= false,
250 .geometrySideEffects
= false,
251 .fragmentSideEffects
= false,
252 .shaderTessellationPointSize
= false,
253 .shaderGeometryPointSize
= true,
254 .shaderTextureGatherExtended
= true,
255 .shaderStorageImageExtendedFormats
= false,
256 .shaderStorageImageMultisample
= false,
257 .shaderStorageBufferArrayConstantIndexing
= false,
258 .shaderStorageImageArrayConstantIndexing
= false,
259 .shaderUniformBufferArrayDynamicIndexing
= true,
260 .shaderSampledImageArrayDynamicIndexing
= false,
261 .shaderStorageBufferArrayDynamicIndexing
= false,
262 .shaderStorageImageArrayDynamicIndexing
= false,
263 .shaderClipDistance
= false,
264 .shaderCullDistance
= false,
265 .shaderFloat64
= false,
266 .shaderInt64
= false,
267 .shaderFloat16
= false,
268 .shaderInt16
= false,
274 VkResult
anv_GetPhysicalDeviceLimits(
275 VkPhysicalDevice physicalDevice
,
276 VkPhysicalDeviceLimits
* pLimits
)
278 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
279 const struct brw_device_info
*devinfo
= physical_device
->info
;
281 anv_finishme("Get correct values for PhysicalDeviceLimits");
283 *pLimits
= (VkPhysicalDeviceLimits
) {
284 .maxImageDimension1D
= (1 << 14),
285 .maxImageDimension2D
= (1 << 14),
286 .maxImageDimension3D
= (1 << 10),
287 .maxImageDimensionCube
= (1 << 14),
288 .maxImageArrayLayers
= (1 << 10),
289 .maxTexelBufferSize
= (1 << 14),
290 .maxUniformBufferSize
= UINT32_MAX
,
291 .maxStorageBufferSize
= UINT32_MAX
,
292 .maxPushConstantsSize
= 128,
293 .maxMemoryAllocationCount
= UINT32_MAX
,
294 .bufferImageGranularity
= 64, /* A cache line */
295 .maxBoundDescriptorSets
= MAX_SETS
,
296 .maxDescriptorSets
= UINT32_MAX
,
297 .maxPerStageDescriptorSamplers
= 64,
298 .maxPerStageDescriptorUniformBuffers
= 64,
299 .maxPerStageDescriptorStorageBuffers
= 64,
300 .maxPerStageDescriptorSampledImages
= 64,
301 .maxPerStageDescriptorStorageImages
= 64,
302 .maxDescriptorSetSamplers
= 256,
303 .maxDescriptorSetUniformBuffers
= 256,
304 .maxDescriptorSetStorageBuffers
= 256,
305 .maxDescriptorSetSampledImages
= 256,
306 .maxDescriptorSetStorageImages
= 256,
307 .maxVertexInputAttributes
= 32,
308 .maxVertexInputAttributeOffset
= 256,
309 .maxVertexInputBindingStride
= 256,
310 .maxVertexOutputComponents
= 32,
311 .maxTessGenLevel
= 0,
312 .maxTessPatchSize
= 0,
313 .maxTessControlPerVertexInputComponents
= 0,
314 .maxTessControlPerVertexOutputComponents
= 0,
315 .maxTessControlPerPatchOutputComponents
= 0,
316 .maxTessControlTotalOutputComponents
= 0,
317 .maxTessEvaluationInputComponents
= 0,
318 .maxTessEvaluationOutputComponents
= 0,
319 .maxGeometryShaderInvocations
= 6,
320 .maxGeometryInputComponents
= 16,
321 .maxGeometryOutputComponents
= 16,
322 .maxGeometryOutputVertices
= 16,
323 .maxGeometryTotalOutputComponents
= 16,
324 .maxFragmentInputComponents
= 16,
325 .maxFragmentOutputBuffers
= 8,
326 .maxFragmentDualSourceBuffers
= 2,
327 .maxFragmentCombinedOutputResources
= 8,
328 .maxComputeSharedMemorySize
= 1024,
329 .maxComputeWorkGroupCount
= {
330 16 * devinfo
->max_cs_threads
,
331 16 * devinfo
->max_cs_threads
,
332 16 * devinfo
->max_cs_threads
,
334 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
335 .maxComputeWorkGroupSize
= {
336 16 * devinfo
->max_cs_threads
,
337 16 * devinfo
->max_cs_threads
,
338 16 * devinfo
->max_cs_threads
,
340 .subPixelPrecisionBits
= 4 /* FIXME */,
341 .subTexelPrecisionBits
= 4 /* FIXME */,
342 .mipmapPrecisionBits
= 4 /* FIXME */,
343 .maxDrawIndexedIndexValue
= UINT32_MAX
,
344 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
345 .primitiveRestartForPatches
= UINT32_MAX
,
346 .maxSamplerLodBias
= 16,
347 .maxSamplerAnisotropy
= 16,
349 .maxDynamicViewportStates
= UINT32_MAX
,
350 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
351 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
352 .viewportSubPixelBits
= 13, /* We take a float? */
353 .minMemoryMapAlignment
= 64, /* A cache line */
354 .minTexelBufferOffsetAlignment
= 1,
355 .minUniformBufferOffsetAlignment
= 1,
356 .minStorageBufferOffsetAlignment
= 1,
357 .minTexelOffset
= 0, /* FIXME */
358 .maxTexelOffset
= 0, /* FIXME */
359 .minTexelGatherOffset
= 0, /* FIXME */
360 .maxTexelGatherOffset
= 0, /* FIXME */
361 .minInterpolationOffset
= 0, /* FIXME */
362 .maxInterpolationOffset
= 0, /* FIXME */
363 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
364 .maxFramebufferWidth
= (1 << 14),
365 .maxFramebufferHeight
= (1 << 14),
366 .maxFramebufferLayers
= (1 << 10),
367 .maxFramebufferColorSamples
= 8,
368 .maxFramebufferDepthSamples
= 8,
369 .maxFramebufferStencilSamples
= 8,
370 .maxColorAttachments
= MAX_RTS
,
371 .maxSampledImageColorSamples
= 8,
372 .maxSampledImageDepthSamples
= 8,
373 .maxSampledImageIntegerSamples
= 1,
374 .maxStorageImageSamples
= 1,
375 .maxSampleMaskWords
= 1,
376 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
377 .maxClipDistances
= 0 /* FIXME */,
378 .maxCullDistances
= 0 /* FIXME */,
379 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
380 .pointSizeRange
= { 0.125, 255.875 },
381 .lineWidthRange
= { 0.0, 7.9921875 },
382 .pointSizeGranularity
= (1.0 / 8.0),
383 .lineWidthGranularity
= (1.0 / 128.0),
389 VkResult
anv_GetPhysicalDeviceProperties(
390 VkPhysicalDevice physicalDevice
,
391 VkPhysicalDeviceProperties
* pProperties
)
393 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
395 *pProperties
= (VkPhysicalDeviceProperties
) {
396 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
399 .deviceId
= pdevice
->chipset_id
,
400 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
403 strcpy(pProperties
->deviceName
, pdevice
->name
);
404 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
405 "anv-%s", MESA_GIT_SHA1
+ 4);
410 VkResult
anv_GetPhysicalDeviceQueueCount(
411 VkPhysicalDevice physicalDevice
,
419 VkResult
anv_GetPhysicalDeviceQueueProperties(
420 VkPhysicalDevice physicalDevice
,
422 VkPhysicalDeviceQueueProperties
* pQueueProperties
)
426 *pQueueProperties
= (VkPhysicalDeviceQueueProperties
) {
427 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
428 VK_QUEUE_COMPUTE_BIT
|
431 .supportsTimestamps
= true,
437 VkResult
anv_GetPhysicalDeviceMemoryProperties(
438 VkPhysicalDevice physicalDevice
,
439 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
441 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
442 VkDeviceSize heap_size
;
444 /* Reserve some wiggle room for the driver by exposing only 75% of the
445 * aperture to the heap.
447 heap_size
= 3 * physical_device
->aperture_size
/ 4;
449 /* The property flags below are valid only for llc platforms. */
450 pMemoryProperties
->memoryTypeCount
= 1;
451 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
452 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
456 pMemoryProperties
->memoryHeapCount
= 1;
457 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
459 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
465 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
469 return anv_lookup_entrypoint(pName
);
472 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
476 return anv_lookup_entrypoint(pName
);
480 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
482 queue
->device
= device
;
483 queue
->pool
= &device
->surface_state_pool
;
485 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
486 if (queue
->completed_serial
.map
== NULL
)
487 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
489 *(uint32_t *)queue
->completed_serial
.map
= 0;
490 queue
->next_serial
= 1;
496 anv_queue_finish(struct anv_queue
*queue
)
499 /* This gets torn down with the device so we only need to do this if
500 * valgrind is present.
502 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
507 anv_device_init_border_colors(struct anv_device
*device
)
509 static const VkClearColorValue border_colors
[] = {
510 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 0.0 } },
511 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 1.0 } },
512 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .f32
= { 1.0, 1.0, 1.0, 1.0 } },
513 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .u32
= { 0, 0, 0, 0 } },
514 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .u32
= { 0, 0, 0, 1 } },
515 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .u32
= { 1, 1, 1, 1 } },
518 device
->border_colors
=
519 anv_state_pool_alloc(&device
->dynamic_state_pool
,
520 sizeof(border_colors
), 32);
521 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
524 VkResult
anv_CreateDevice(
525 VkPhysicalDevice physicalDevice
,
526 const VkDeviceCreateInfo
* pCreateInfo
,
529 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
530 struct anv_instance
*instance
= physical_device
->instance
;
531 struct anv_device
*device
;
533 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
535 switch (physical_device
->info
->gen
) {
537 driver_layer
= &gen7_layer
;
540 driver_layer
= &gen8_layer
;
544 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
545 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
547 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
549 device
->instance
= physical_device
->instance
;
551 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
552 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
553 if (device
->fd
== -1)
556 device
->context_id
= anv_gem_create_context(device
);
557 if (device
->context_id
== -1)
560 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
562 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
564 anv_state_pool_init(&device
->dynamic_state_pool
,
565 &device
->dynamic_state_block_pool
);
567 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
568 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
570 anv_state_pool_init(&device
->surface_state_pool
,
571 &device
->surface_state_block_pool
);
573 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
575 device
->info
= *physical_device
->info
;
577 device
->compiler
= anv_compiler_create(device
);
579 pthread_mutex_init(&device
->mutex
, NULL
);
581 anv_queue_init(device
, &device
->queue
);
583 anv_device_init_meta(device
);
585 anv_device_init_border_colors(device
);
587 *pDevice
= anv_device_to_handle(device
);
594 anv_device_free(device
, device
);
596 return vk_error(VK_ERROR_UNAVAILABLE
);
599 VkResult
anv_DestroyDevice(
602 ANV_FROM_HANDLE(anv_device
, device
, _device
);
604 anv_compiler_destroy(device
->compiler
);
606 anv_queue_finish(&device
->queue
);
608 anv_device_finish_meta(device
);
611 /* We only need to free these to prevent valgrind errors. The backing
612 * BO will go away in a couple of lines so we don't actually leak.
614 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
617 anv_bo_pool_finish(&device
->batch_bo_pool
);
618 anv_state_pool_finish(&device
->dynamic_state_pool
);
619 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
620 anv_block_pool_finish(&device
->instruction_block_pool
);
621 anv_state_pool_finish(&device
->surface_state_pool
);
622 anv_block_pool_finish(&device
->surface_state_block_pool
);
623 anv_block_pool_finish(&device
->scratch_block_pool
);
627 anv_instance_free(device
->instance
, device
);
632 static const VkExtensionProperties global_extensions
[] = {
634 .extName
= "VK_WSI_LunarG",
639 VkResult
anv_GetGlobalExtensionProperties(
640 const char* pLayerName
,
642 VkExtensionProperties
* pProperties
)
644 if (pProperties
== NULL
) {
645 *pCount
= ARRAY_SIZE(global_extensions
);
649 assert(*pCount
< ARRAY_SIZE(global_extensions
));
651 *pCount
= ARRAY_SIZE(global_extensions
);
652 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
657 VkResult
anv_GetPhysicalDeviceExtensionProperties(
658 VkPhysicalDevice physicalDevice
,
659 const char* pLayerName
,
661 VkExtensionProperties
* pProperties
)
663 if (pProperties
== NULL
) {
668 /* None supported at this time */
669 return vk_error(VK_ERROR_INVALID_EXTENSION
);
672 VkResult
anv_GetGlobalLayerProperties(
674 VkLayerProperties
* pProperties
)
676 if (pProperties
== NULL
) {
681 /* None supported at this time */
682 return vk_error(VK_ERROR_INVALID_LAYER
);
685 VkResult
anv_GetPhysicalDeviceLayerProperties(
686 VkPhysicalDevice physicalDevice
,
688 VkLayerProperties
* pProperties
)
690 if (pProperties
== NULL
) {
695 /* None supported at this time */
696 return vk_error(VK_ERROR_INVALID_LAYER
);
699 VkResult
anv_GetDeviceQueue(
701 uint32_t queueNodeIndex
,
705 ANV_FROM_HANDLE(anv_device
, device
, _device
);
707 assert(queueIndex
== 0);
709 *pQueue
= anv_queue_to_handle(&device
->queue
);
714 VkResult
anv_QueueSubmit(
716 uint32_t cmdBufferCount
,
717 const VkCmdBuffer
* pCmdBuffers
,
720 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
721 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
722 struct anv_device
*device
= queue
->device
;
725 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
726 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
728 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
730 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
732 return vk_error(VK_ERROR_UNKNOWN
);
735 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
737 return vk_error(VK_ERROR_UNKNOWN
);
740 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
741 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
747 VkResult
anv_QueueWaitIdle(
750 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
752 return vkDeviceWaitIdle(anv_device_to_handle(queue
->device
));
755 VkResult
anv_DeviceWaitIdle(
758 ANV_FROM_HANDLE(anv_device
, device
, _device
);
759 struct anv_state state
;
760 struct anv_batch batch
;
761 struct drm_i915_gem_execbuffer2 execbuf
;
762 struct drm_i915_gem_exec_object2 exec2_objects
[1];
763 struct anv_bo
*bo
= NULL
;
768 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
769 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
770 batch
.start
= batch
.next
= state
.map
;
771 batch
.end
= state
.map
+ 32;
772 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
773 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
775 exec2_objects
[0].handle
= bo
->gem_handle
;
776 exec2_objects
[0].relocation_count
= 0;
777 exec2_objects
[0].relocs_ptr
= 0;
778 exec2_objects
[0].alignment
= 0;
779 exec2_objects
[0].offset
= bo
->offset
;
780 exec2_objects
[0].flags
= 0;
781 exec2_objects
[0].rsvd1
= 0;
782 exec2_objects
[0].rsvd2
= 0;
784 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
785 execbuf
.buffer_count
= 1;
786 execbuf
.batch_start_offset
= state
.offset
;
787 execbuf
.batch_len
= batch
.next
- state
.map
;
788 execbuf
.cliprects_ptr
= 0;
789 execbuf
.num_cliprects
= 0;
794 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
795 execbuf
.rsvd1
= device
->context_id
;
798 ret
= anv_gem_execbuffer(device
, &execbuf
);
800 result
= vk_error(VK_ERROR_UNKNOWN
);
805 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
807 result
= vk_error(VK_ERROR_UNKNOWN
);
811 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
816 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
822 anv_device_alloc(struct anv_device
* device
,
825 VkSystemAllocType allocType
)
827 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
831 anv_device_free(struct anv_device
* device
,
834 anv_instance_free(device
->instance
, mem
);
838 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
840 bo
->gem_handle
= anv_gem_create(device
, size
);
842 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
852 VkResult
anv_AllocMemory(
854 const VkMemoryAllocInfo
* pAllocInfo
,
855 VkDeviceMemory
* pMem
)
857 ANV_FROM_HANDLE(anv_device
, device
, _device
);
858 struct anv_device_memory
*mem
;
861 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
863 if (pAllocInfo
->memoryTypeIndex
!= 0) {
864 /* We support exactly one memory heap. */
865 return vk_error(VK_ERROR_INVALID_VALUE
);
868 /* FINISHME: Fail if allocation request exceeds heap size. */
870 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
871 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
873 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
875 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
876 if (result
!= VK_SUCCESS
)
879 *pMem
= anv_device_memory_to_handle(mem
);
884 anv_device_free(device
, mem
);
889 VkResult
anv_FreeMemory(
893 ANV_FROM_HANDLE(anv_device
, device
, _device
);
894 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
897 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
899 if (mem
->bo
.gem_handle
!= 0)
900 anv_gem_close(device
, mem
->bo
.gem_handle
);
902 anv_device_free(device
, mem
);
907 VkResult
anv_MapMemory(
912 VkMemoryMapFlags flags
,
915 ANV_FROM_HANDLE(anv_device
, device
, _device
);
916 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
918 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
919 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
920 * at a time is valid. We could just mmap up front and return an offset
921 * pointer here, but that may exhaust virtual memory on 32 bit
924 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
925 mem
->map_size
= size
;
932 VkResult
anv_UnmapMemory(
936 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
938 anv_gem_munmap(mem
->map
, mem
->map_size
);
943 VkResult
anv_FlushMappedMemoryRanges(
945 uint32_t memRangeCount
,
946 const VkMappedMemoryRange
* pMemRanges
)
948 /* clflush here for !llc platforms */
953 VkResult
anv_InvalidateMappedMemoryRanges(
955 uint32_t memRangeCount
,
956 const VkMappedMemoryRange
* pMemRanges
)
958 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
961 VkResult
anv_GetBufferMemoryRequirements(
964 VkMemoryRequirements
* pMemoryRequirements
)
966 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
968 /* The Vulkan spec (git aaed022) says:
970 * memoryTypeBits is a bitfield and contains one bit set for every
971 * supported memory type for the resource. The bit `1<<i` is set if and
972 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
973 * structure for the physical device is supported.
975 * We support exactly one memory type.
977 pMemoryRequirements
->memoryTypeBits
= 1;
979 pMemoryRequirements
->size
= buffer
->size
;
980 pMemoryRequirements
->alignment
= 16;
985 VkResult
anv_GetImageMemoryRequirements(
988 VkMemoryRequirements
* pMemoryRequirements
)
990 ANV_FROM_HANDLE(anv_image
, image
, _image
);
992 /* The Vulkan spec (git aaed022) says:
994 * memoryTypeBits is a bitfield and contains one bit set for every
995 * supported memory type for the resource. The bit `1<<i` is set if and
996 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
997 * structure for the physical device is supported.
999 * We support exactly one memory type.
1001 pMemoryRequirements
->memoryTypeBits
= 1;
1003 pMemoryRequirements
->size
= image
->size
;
1004 pMemoryRequirements
->alignment
= image
->alignment
;
1009 VkResult
anv_GetImageSparseMemoryRequirements(
1012 uint32_t* pNumRequirements
,
1013 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1015 return vk_error(VK_UNSUPPORTED
);
1018 VkResult
anv_GetDeviceMemoryCommitment(
1020 VkDeviceMemory memory
,
1021 VkDeviceSize
* pCommittedMemoryInBytes
)
1023 *pCommittedMemoryInBytes
= 0;
1024 stub_return(VK_SUCCESS
);
1027 VkResult
anv_BindBufferMemory(
1030 VkDeviceMemory _mem
,
1031 VkDeviceSize memOffset
)
1033 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1034 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1036 buffer
->bo
= &mem
->bo
;
1037 buffer
->offset
= memOffset
;
1042 VkResult
anv_BindImageMemory(
1045 VkDeviceMemory _mem
,
1046 VkDeviceSize memOffset
)
1048 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1049 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1051 image
->bo
= &mem
->bo
;
1052 image
->offset
= memOffset
;
1057 VkResult
anv_QueueBindSparseBufferMemory(
1060 uint32_t numBindings
,
1061 const VkSparseMemoryBindInfo
* pBindInfo
)
1063 stub_return(VK_UNSUPPORTED
);
1066 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1069 uint32_t numBindings
,
1070 const VkSparseMemoryBindInfo
* pBindInfo
)
1072 stub_return(VK_UNSUPPORTED
);
1075 VkResult
anv_QueueBindSparseImageMemory(
1078 uint32_t numBindings
,
1079 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1081 stub_return(VK_UNSUPPORTED
);
1084 VkResult
anv_CreateFence(
1086 const VkFenceCreateInfo
* pCreateInfo
,
1089 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1090 struct anv_fence
*fence
;
1091 struct anv_batch batch
;
1094 const uint32_t fence_size
= 128;
1096 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1098 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1099 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1101 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1103 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1104 if (result
!= VK_SUCCESS
)
1108 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1109 batch
.next
= batch
.start
= fence
->bo
.map
;
1110 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1111 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
1112 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
1114 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1115 fence
->exec2_objects
[0].relocation_count
= 0;
1116 fence
->exec2_objects
[0].relocs_ptr
= 0;
1117 fence
->exec2_objects
[0].alignment
= 0;
1118 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1119 fence
->exec2_objects
[0].flags
= 0;
1120 fence
->exec2_objects
[0].rsvd1
= 0;
1121 fence
->exec2_objects
[0].rsvd2
= 0;
1123 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1124 fence
->execbuf
.buffer_count
= 1;
1125 fence
->execbuf
.batch_start_offset
= 0;
1126 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1127 fence
->execbuf
.cliprects_ptr
= 0;
1128 fence
->execbuf
.num_cliprects
= 0;
1129 fence
->execbuf
.DR1
= 0;
1130 fence
->execbuf
.DR4
= 0;
1132 fence
->execbuf
.flags
=
1133 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1134 fence
->execbuf
.rsvd1
= device
->context_id
;
1135 fence
->execbuf
.rsvd2
= 0;
1137 *pFence
= anv_fence_to_handle(fence
);
1142 anv_device_free(device
, fence
);
1147 VkResult
anv_DestroyFence(
1151 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1152 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1154 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1155 anv_gem_close(device
, fence
->bo
.gem_handle
);
1156 anv_device_free(device
, fence
);
1161 VkResult
anv_ResetFences(
1163 uint32_t fenceCount
,
1164 const VkFence
* pFences
)
1166 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1167 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1168 fence
->ready
= false;
1174 VkResult
anv_GetFenceStatus(
1178 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1179 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1186 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1188 fence
->ready
= true;
1192 return VK_NOT_READY
;
1195 VkResult
anv_WaitForFences(
1197 uint32_t fenceCount
,
1198 const VkFence
* pFences
,
1202 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1203 int64_t t
= timeout
;
1206 /* FIXME: handle !waitAll */
1208 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1209 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1210 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1211 if (ret
== -1 && errno
== ETIME
)
1214 return vk_error(VK_ERROR_UNKNOWN
);
1220 // Queue semaphore functions
1222 VkResult
anv_CreateSemaphore(
1224 const VkSemaphoreCreateInfo
* pCreateInfo
,
1225 VkSemaphore
* pSemaphore
)
1227 stub_return(VK_UNSUPPORTED
);
1230 VkResult
anv_DestroySemaphore(
1232 VkSemaphore semaphore
)
1234 stub_return(VK_UNSUPPORTED
);
1237 VkResult
anv_QueueSignalSemaphore(
1239 VkSemaphore semaphore
)
1241 stub_return(VK_UNSUPPORTED
);
1244 VkResult
anv_QueueWaitSemaphore(
1246 VkSemaphore semaphore
)
1248 stub_return(VK_UNSUPPORTED
);
1253 VkResult
anv_CreateEvent(
1255 const VkEventCreateInfo
* pCreateInfo
,
1258 stub_return(VK_UNSUPPORTED
);
1261 VkResult
anv_DestroyEvent(
1265 stub_return(VK_UNSUPPORTED
);
1268 VkResult
anv_GetEventStatus(
1272 stub_return(VK_UNSUPPORTED
);
1275 VkResult
anv_SetEvent(
1279 stub_return(VK_UNSUPPORTED
);
1282 VkResult
anv_ResetEvent(
1286 stub_return(VK_UNSUPPORTED
);
1291 VkResult
anv_CreateBuffer(
1293 const VkBufferCreateInfo
* pCreateInfo
,
1296 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1297 struct anv_buffer
*buffer
;
1299 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1301 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1302 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1304 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1306 buffer
->size
= pCreateInfo
->size
;
1310 *pBuffer
= anv_buffer_to_handle(buffer
);
1315 VkResult
anv_DestroyBuffer(
1319 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1320 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1322 anv_device_free(device
, buffer
);
1328 anv_fill_buffer_surface_state(struct anv_device
*device
, void *state
,
1329 const struct anv_format
*format
,
1330 uint32_t offset
, uint32_t range
)
1332 switch (device
->info
.gen
) {
1334 gen8_fill_buffer_surface_state(state
, format
, offset
, range
);
1337 unreachable("unsupported gen\n");
1342 anv_buffer_view_create(
1343 struct anv_device
* device
,
1344 const VkBufferViewCreateInfo
* pCreateInfo
,
1345 struct anv_buffer_view
** view_out
)
1347 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1348 struct anv_buffer_view
*view
;
1350 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1352 view
= anv_device_alloc(device
, sizeof(*view
), 8,
1353 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1355 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1357 view
->view
= (struct anv_surface_view
) {
1359 .offset
= buffer
->offset
+ pCreateInfo
->offset
,
1360 .surface_state
= anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64),
1361 .format
= anv_format_for_vk_format(pCreateInfo
->format
),
1362 .range
= pCreateInfo
->range
,
1371 VkResult
anv_CreateBufferView(
1373 const VkBufferViewCreateInfo
* pCreateInfo
,
1374 VkBufferView
* pView
)
1376 return driver_layer
->CreateBufferView(_device
, pCreateInfo
, pView
);
1379 VkResult
anv_DestroyBufferView(
1381 VkBufferView _bview
)
1383 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1384 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1386 anv_surface_view_fini(device
, &bview
->view
);
1387 anv_device_free(device
, bview
);
1392 VkResult
anv_CreateSampler(
1394 const VkSamplerCreateInfo
* pCreateInfo
,
1395 VkSampler
* pSampler
)
1397 return driver_layer
->CreateSampler(_device
, pCreateInfo
, pSampler
);
1400 VkResult
anv_DestroySampler(
1404 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1405 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1407 anv_device_free(device
, sampler
);
1412 // Descriptor set functions
1414 VkResult
anv_CreateDescriptorSetLayout(
1416 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1417 VkDescriptorSetLayout
* pSetLayout
)
1419 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1420 struct anv_descriptor_set_layout
*set_layout
;
1422 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1424 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1425 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1426 uint32_t num_dynamic_buffers
= 0;
1428 uint32_t stages
= 0;
1431 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1432 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1433 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1434 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1435 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1436 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1442 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1443 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1444 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1445 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1446 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1447 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1448 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1449 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1450 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1451 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1452 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1453 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1454 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1460 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1461 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1462 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1463 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1469 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1470 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1473 uint32_t sampler_total
= 0;
1474 uint32_t surface_total
= 0;
1475 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1476 sampler_total
+= sampler_count
[s
];
1477 surface_total
+= surface_count
[s
];
1480 size_t size
= sizeof(*set_layout
) +
1481 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1482 set_layout
= anv_device_alloc(device
, size
, 8,
1483 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1485 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1487 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1488 set_layout
->count
= count
;
1489 set_layout
->shader_stages
= stages
;
1491 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1492 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1493 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1494 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1495 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1496 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1497 p
+= surface_count
[s
];
1498 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1499 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1500 p
+= sampler_count
[s
];
1503 uint32_t descriptor
= 0;
1504 int8_t dynamic_slot
= 0;
1506 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1507 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1508 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1509 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1510 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1511 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1512 sampler
[s
]->index
= descriptor
+ j
;
1513 sampler
[s
]->dynamic_slot
= -1;
1521 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1522 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1523 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1531 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1532 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1533 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1534 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1535 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1536 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1537 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1538 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1539 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1540 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1541 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1542 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1543 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1544 surface
[s
]->index
= descriptor
+ j
;
1546 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1548 surface
[s
]->dynamic_slot
= -1;
1557 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1559 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1562 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1567 VkResult
anv_DestroyDescriptorSetLayout(
1569 VkDescriptorSetLayout _set_layout
)
1571 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1572 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1574 anv_device_free(device
, set_layout
);
1579 VkResult
anv_CreateDescriptorPool(
1581 VkDescriptorPoolUsage poolUsage
,
1583 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1584 VkDescriptorPool
* pDescriptorPool
)
1586 anv_finishme("VkDescriptorPool is a stub");
1587 pDescriptorPool
->handle
= 1;
1591 VkResult
anv_DestroyDescriptorPool(
1593 VkDescriptorPool _pool
)
1595 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1599 VkResult
anv_ResetDescriptorPool(
1601 VkDescriptorPool descriptorPool
)
1603 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1608 anv_descriptor_set_create(struct anv_device
*device
,
1609 const struct anv_descriptor_set_layout
*layout
,
1610 struct anv_descriptor_set
**out_set
)
1612 struct anv_descriptor_set
*set
;
1613 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1615 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1617 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1619 /* A descriptor set may not be 100% filled. Clear the set so we can can
1620 * later detect holes in it.
1622 memset(set
, 0, size
);
1630 anv_descriptor_set_destroy(struct anv_device
*device
,
1631 struct anv_descriptor_set
*set
)
1633 anv_device_free(device
, set
);
1636 VkResult
anv_AllocDescriptorSets(
1638 VkDescriptorPool descriptorPool
,
1639 VkDescriptorSetUsage setUsage
,
1641 const VkDescriptorSetLayout
* pSetLayouts
,
1642 VkDescriptorSet
* pDescriptorSets
,
1645 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1648 struct anv_descriptor_set
*set
;
1650 for (uint32_t i
= 0; i
< count
; i
++) {
1651 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1653 result
= anv_descriptor_set_create(device
, layout
, &set
);
1654 if (result
!= VK_SUCCESS
) {
1659 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1667 VkResult
anv_FreeDescriptorSets(
1669 VkDescriptorPool descriptorPool
,
1671 const VkDescriptorSet
* pDescriptorSets
)
1673 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1675 for (uint32_t i
= 0; i
< count
; i
++) {
1676 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1678 anv_descriptor_set_destroy(device
, set
);
1684 VkResult
anv_UpdateDescriptorSets(
1686 uint32_t writeCount
,
1687 const VkWriteDescriptorSet
* pDescriptorWrites
,
1689 const VkCopyDescriptorSet
* pDescriptorCopies
)
1691 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1692 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1693 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1695 switch (write
->descriptorType
) {
1696 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1697 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1698 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1699 set
->descriptors
[write
->destBinding
+ j
].sampler
=
1700 anv_sampler_from_handle(write
->pDescriptors
[j
].sampler
);
1703 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1708 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1709 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1710 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1711 ANV_FROM_HANDLE(anv_image_view
, iview
,
1712 write
->pDescriptors
[j
].imageView
);
1713 set
->descriptors
[write
->destBinding
+ j
].view
= &iview
->view
;
1717 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1718 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1719 anv_finishme("texel buffers not implemented");
1722 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1723 anv_finishme("input attachments not implemented");
1726 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1727 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1728 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1729 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1730 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1731 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1732 write
->pDescriptors
[j
].bufferView
);
1733 set
->descriptors
[write
->destBinding
+ j
].view
= &bview
->view
;
1741 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1742 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1743 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1744 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1745 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1746 dest
->descriptors
[copy
->destBinding
+ j
] =
1747 src
->descriptors
[copy
->srcBinding
+ j
];
1754 // State object functions
1756 static inline int64_t
1757 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1767 VkResult
anv_CreateDynamicViewportState(
1769 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1770 VkDynamicViewportState
* pState
)
1772 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1773 struct anv_dynamic_vp_state
*state
;
1775 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1777 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1778 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1780 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1782 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1783 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1785 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1787 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1790 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1791 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1792 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1794 /* The gen7 state struct has just the matrix and guardband fields, the
1795 * gen8 struct adds the min/max viewport fields. */
1796 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1797 .ViewportMatrixElementm00
= vp
->width
/ 2,
1798 .ViewportMatrixElementm11
= vp
->height
/ 2,
1799 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1800 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1801 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1802 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1803 .XMinClipGuardband
= -1.0f
,
1804 .XMaxClipGuardband
= 1.0f
,
1805 .YMinClipGuardband
= -1.0f
,
1806 .YMaxClipGuardband
= 1.0f
,
1807 .XMinViewPort
= vp
->originX
,
1808 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1809 .YMinViewPort
= vp
->originY
,
1810 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1813 struct GEN7_CC_VIEWPORT cc_viewport
= {
1814 .MinimumDepth
= vp
->minDepth
,
1815 .MaximumDepth
= vp
->maxDepth
1818 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1819 * ymax < ymin for empty clips. In case clip x, y, width height are all
1820 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1821 * what we want. Just special case empty clips and produce a canonical
1823 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
1824 .ScissorRectangleYMin
= 1,
1825 .ScissorRectangleXMin
= 1,
1826 .ScissorRectangleYMax
= 0,
1827 .ScissorRectangleXMax
= 0
1830 const int max
= 0xffff;
1831 struct GEN7_SCISSOR_RECT scissor
= {
1832 /* Do this math using int64_t so overflow gets clamped correctly. */
1833 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1834 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1835 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1836 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1839 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1840 GEN7_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1842 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1843 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1845 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1849 *pState
= anv_dynamic_vp_state_to_handle(state
);
1854 VkResult
anv_DestroyDynamicViewportState(
1856 VkDynamicViewportState _vp_state
)
1858 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1859 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
1861 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
1862 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
1863 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
1865 anv_device_free(device
, vp_state
);
1870 VkResult
anv_CreateDynamicRasterState(
1872 const VkDynamicRasterStateCreateInfo
* pCreateInfo
,
1873 VkDynamicRasterState
* pState
)
1875 return driver_layer
->CreateDynamicRasterState(_device
, pCreateInfo
, pState
);
1878 VkResult
anv_DestroyDynamicRasterState(
1880 VkDynamicRasterState _rs_state
)
1882 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1883 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
1885 anv_device_free(device
, rs_state
);
1890 VkResult
anv_CreateDynamicColorBlendState(
1892 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
1893 VkDynamicColorBlendState
* pState
)
1895 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1896 struct anv_dynamic_cb_state
*state
;
1898 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
1900 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1901 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1903 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1905 struct GEN7_COLOR_CALC_STATE color_calc_state
= {
1906 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
1907 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
1908 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
1909 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
1912 GEN7_COLOR_CALC_STATE_pack(NULL
, state
->color_calc_state
, &color_calc_state
);
1914 *pState
= anv_dynamic_cb_state_to_handle(state
);
1919 VkResult
anv_DestroyDynamicColorBlendState(
1921 VkDynamicColorBlendState _cb_state
)
1923 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1924 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
1926 anv_device_free(device
, cb_state
);
1931 VkResult
anv_CreateDynamicDepthStencilState(
1933 const VkDynamicDepthStencilStateCreateInfo
* pCreateInfo
,
1934 VkDynamicDepthStencilState
* pState
)
1936 return driver_layer
->CreateDynamicDepthStencilState(_device
, pCreateInfo
, pState
);
1939 VkResult
anv_DestroyDynamicDepthStencilState(
1941 VkDynamicDepthStencilState _ds_state
)
1943 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1944 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
1946 anv_device_free(device
, ds_state
);
1951 VkResult
anv_CreateFramebuffer(
1953 const VkFramebufferCreateInfo
* pCreateInfo
,
1954 VkFramebuffer
* pFramebuffer
)
1956 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1957 struct anv_framebuffer
*framebuffer
;
1959 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1961 size_t size
= sizeof(*framebuffer
) +
1962 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
1963 framebuffer
= anv_device_alloc(device
, size
, 8,
1964 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1965 if (framebuffer
== NULL
)
1966 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1968 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1969 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1970 ANV_FROM_HANDLE(anv_attachment_view
, view
,
1971 pCreateInfo
->pAttachments
[i
].view
);
1973 framebuffer
->attachments
[i
] = view
;
1976 framebuffer
->width
= pCreateInfo
->width
;
1977 framebuffer
->height
= pCreateInfo
->height
;
1978 framebuffer
->layers
= pCreateInfo
->layers
;
1980 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
1981 &(VkDynamicViewportStateCreateInfo
) {
1982 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
1983 .viewportAndScissorCount
= 1,
1984 .pViewports
= (VkViewport
[]) {
1988 .width
= pCreateInfo
->width
,
1989 .height
= pCreateInfo
->height
,
1994 .pScissors
= (VkRect2D
[]) {
1996 { pCreateInfo
->width
, pCreateInfo
->height
} },
1999 &framebuffer
->vp_state
);
2001 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
2006 VkResult
anv_DestroyFramebuffer(
2010 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2011 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
2013 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
2015 anv_device_free(device
, fb
);
2020 VkResult
anv_CreateRenderPass(
2022 const VkRenderPassCreateInfo
* pCreateInfo
,
2023 VkRenderPass
* pRenderPass
)
2025 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2026 struct anv_render_pass
*pass
;
2028 size_t attachments_offset
;
2030 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2032 size
= sizeof(*pass
);
2033 size
+= pCreateInfo
->subpassCount
* sizeof(pass
->subpasses
[0]);
2034 attachments_offset
= size
;
2035 size
+= pCreateInfo
->attachmentCount
* sizeof(pass
->attachments
[0]);
2037 pass
= anv_device_alloc(device
, size
, 8,
2038 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2040 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2042 /* Clear the subpasses along with the parent pass. This required because
2043 * each array member of anv_subpass must be a valid pointer if not NULL.
2045 memset(pass
, 0, size
);
2046 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2047 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2048 pass
->attachments
= (void *) pass
+ attachments_offset
;
2050 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2051 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
2053 att
->format
= anv_format_for_vk_format(pCreateInfo
->pAttachments
[i
].format
);
2054 att
->samples
= pCreateInfo
->pAttachments
[i
].samples
;
2055 att
->load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2056 att
->stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2057 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2058 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2060 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2061 if (anv_format_is_color(att
->format
)) {
2062 ++pass
->num_color_clear_attachments
;
2063 } else if (att
->format
->depth_format
) {
2064 pass
->has_depth_clear_attachment
= true;
2066 } else if (att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2067 assert(att
->format
->has_stencil
);
2068 pass
->has_stencil_clear_attachment
= true;
2072 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2073 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2074 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2076 subpass
->input_count
= desc
->inputCount
;
2077 subpass
->color_count
= desc
->colorCount
;
2079 if (desc
->inputCount
> 0) {
2080 subpass
->input_attachments
=
2081 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2082 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2084 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2085 subpass
->input_attachments
[j
]
2086 = desc
->inputAttachments
[j
].attachment
;
2090 if (desc
->colorCount
> 0) {
2091 subpass
->color_attachments
=
2092 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2093 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2095 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2096 subpass
->color_attachments
[j
]
2097 = desc
->colorAttachments
[j
].attachment
;
2101 if (desc
->resolveAttachments
) {
2102 subpass
->resolve_attachments
=
2103 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2104 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2106 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2107 subpass
->resolve_attachments
[j
]
2108 = desc
->resolveAttachments
[j
].attachment
;
2112 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2115 *pRenderPass
= anv_render_pass_to_handle(pass
);
2120 VkResult
anv_DestroyRenderPass(
2124 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2125 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2127 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2128 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2129 * Don't free the null arrays.
2131 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2133 anv_device_free(device
, subpass
->input_attachments
);
2134 anv_device_free(device
, subpass
->color_attachments
);
2135 anv_device_free(device
, subpass
->resolve_attachments
);
2138 anv_device_free(device
, pass
);
2143 VkResult
anv_GetRenderAreaGranularity(
2145 VkRenderPass renderPass
,
2146 VkExtent2D
* pGranularity
)
2148 *pGranularity
= (VkExtent2D
) { 1, 1 };
2153 void vkCmdDbgMarkerBegin(
2154 VkCmdBuffer cmdBuffer
,
2155 const char* pMarker
)
2156 __attribute__ ((visibility ("default")));
2158 void vkCmdDbgMarkerEnd(
2159 VkCmdBuffer cmdBuffer
)
2160 __attribute__ ((visibility ("default")));
2162 void vkCmdDbgMarkerBegin(
2163 VkCmdBuffer cmdBuffer
,
2164 const char* pMarker
)
2168 void vkCmdDbgMarkerEnd(
2169 VkCmdBuffer cmdBuffer
)