2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
35 anv_physical_device_init(struct anv_physical_device
*device
,
36 struct anv_instance
*instance
,
42 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
44 return vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to open %s: %m", path
);
46 device
->instance
= instance
;
49 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
50 if (!device
->chipset_id
) {
51 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get chipset id: %m");
55 device
->name
= brw_get_device_name(device
->chipset_id
);
56 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
58 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get device info");
62 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1) {
63 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get aperture size: %m");
67 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
)) {
68 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "kernel missing gem wait");
72 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
)) {
73 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "kernel missing execbuf2");
77 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
)) {
78 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "non-llc gpu");
91 static void *default_alloc(
95 VkSystemAllocType allocType
)
100 static void default_free(
107 static const VkAllocCallbacks default_alloc_callbacks
= {
109 .pfnAlloc
= default_alloc
,
110 .pfnFree
= default_free
113 VkResult
anv_CreateInstance(
114 const VkInstanceCreateInfo
* pCreateInfo
,
115 VkInstance
* pInstance
)
117 struct anv_instance
*instance
;
118 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
119 void *user_data
= NULL
;
121 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
123 if (pCreateInfo
->pAllocCb
) {
124 alloc_callbacks
= pCreateInfo
->pAllocCb
;
125 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
127 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
128 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
130 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
132 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
133 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
134 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
135 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
136 instance
->physicalDeviceCount
= 0;
140 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
142 *pInstance
= anv_instance_to_handle(instance
);
147 VkResult
anv_DestroyInstance(
148 VkInstance _instance
)
150 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
152 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
156 instance
->pfnFree(instance
->pAllocUserData
, instance
);
162 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
163 size_t alignment
, VkSystemAllocType allocType
)
165 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
166 size
, alignment
, allocType
);
168 VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
);
169 VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
);
175 anv_instance_free(struct anv_instance
*instance
, void *mem
)
180 VALGRIND_MEMPOOL_FREE(instance
, mem
);
182 instance
->pfnFree(instance
->pAllocUserData
, mem
);
185 VkResult
anv_EnumeratePhysicalDevices(
186 VkInstance _instance
,
187 uint32_t* pPhysicalDeviceCount
,
188 VkPhysicalDevice
* pPhysicalDevices
)
190 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
193 if (instance
->physicalDeviceCount
== 0) {
194 result
= anv_physical_device_init(&instance
->physicalDevice
,
195 instance
, "/dev/dri/renderD128");
196 if (result
!= VK_SUCCESS
)
199 instance
->physicalDeviceCount
= 1;
202 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
203 * otherwise it's an inout parameter.
205 * The Vulkan spec (git aaed022) says:
207 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
208 * that is initialized with the number of devices the application is
209 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
210 * an array of at least this many VkPhysicalDevice handles [...].
212 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
213 * overwrites the contents of the variable pointed to by
214 * pPhysicalDeviceCount with the number of physical devices in in the
215 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
216 * pPhysicalDeviceCount with the number of physical handles written to
219 if (!pPhysicalDevices
) {
220 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
221 } else if (*pPhysicalDeviceCount
>= 1) {
222 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
223 *pPhysicalDeviceCount
= 1;
225 *pPhysicalDeviceCount
= 0;
231 VkResult
anv_GetPhysicalDeviceFeatures(
232 VkPhysicalDevice physicalDevice
,
233 VkPhysicalDeviceFeatures
* pFeatures
)
235 anv_finishme("Get correct values for PhysicalDeviceFeatures");
237 *pFeatures
= (VkPhysicalDeviceFeatures
) {
238 .robustBufferAccess
= false,
239 .fullDrawIndexUint32
= false,
240 .imageCubeArray
= false,
241 .independentBlend
= false,
242 .geometryShader
= true,
243 .tessellationShader
= false,
244 .sampleRateShading
= false,
245 .dualSourceBlend
= true,
247 .instancedDrawIndirect
= true,
249 .depthBiasClamp
= false,
250 .fillModeNonSolid
= true,
251 .depthBounds
= false,
254 .textureCompressionETC2
= true,
255 .textureCompressionASTC_LDR
= true,
256 .textureCompressionBC
= true,
257 .pipelineStatisticsQuery
= true,
258 .vertexSideEffects
= false,
259 .tessellationSideEffects
= false,
260 .geometrySideEffects
= false,
261 .fragmentSideEffects
= false,
262 .shaderTessellationPointSize
= false,
263 .shaderGeometryPointSize
= true,
264 .shaderTextureGatherExtended
= true,
265 .shaderStorageImageExtendedFormats
= false,
266 .shaderStorageImageMultisample
= false,
267 .shaderStorageBufferArrayConstantIndexing
= false,
268 .shaderStorageImageArrayConstantIndexing
= false,
269 .shaderUniformBufferArrayDynamicIndexing
= true,
270 .shaderSampledImageArrayDynamicIndexing
= false,
271 .shaderStorageBufferArrayDynamicIndexing
= false,
272 .shaderStorageImageArrayDynamicIndexing
= false,
273 .shaderClipDistance
= false,
274 .shaderCullDistance
= false,
275 .shaderFloat64
= false,
276 .shaderInt64
= false,
277 .shaderFloat16
= false,
278 .shaderInt16
= false,
284 VkResult
anv_GetPhysicalDeviceLimits(
285 VkPhysicalDevice physicalDevice
,
286 VkPhysicalDeviceLimits
* pLimits
)
288 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
289 const struct brw_device_info
*devinfo
= physical_device
->info
;
291 anv_finishme("Get correct values for PhysicalDeviceLimits");
293 *pLimits
= (VkPhysicalDeviceLimits
) {
294 .maxImageDimension1D
= (1 << 14),
295 .maxImageDimension2D
= (1 << 14),
296 .maxImageDimension3D
= (1 << 10),
297 .maxImageDimensionCube
= (1 << 14),
298 .maxImageArrayLayers
= (1 << 10),
299 .maxTexelBufferSize
= (1 << 14),
300 .maxUniformBufferSize
= UINT32_MAX
,
301 .maxStorageBufferSize
= UINT32_MAX
,
302 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
303 .maxMemoryAllocationCount
= UINT32_MAX
,
304 .bufferImageGranularity
= 64, /* A cache line */
305 .maxBoundDescriptorSets
= MAX_SETS
,
306 .maxDescriptorSets
= UINT32_MAX
,
307 .maxPerStageDescriptorSamplers
= 64,
308 .maxPerStageDescriptorUniformBuffers
= 64,
309 .maxPerStageDescriptorStorageBuffers
= 64,
310 .maxPerStageDescriptorSampledImages
= 64,
311 .maxPerStageDescriptorStorageImages
= 64,
312 .maxDescriptorSetSamplers
= 256,
313 .maxDescriptorSetUniformBuffers
= 256,
314 .maxDescriptorSetStorageBuffers
= 256,
315 .maxDescriptorSetSampledImages
= 256,
316 .maxDescriptorSetStorageImages
= 256,
317 .maxVertexInputAttributes
= 32,
318 .maxVertexInputAttributeOffset
= 256,
319 .maxVertexInputBindingStride
= 256,
320 .maxVertexOutputComponents
= 32,
321 .maxTessGenLevel
= 0,
322 .maxTessPatchSize
= 0,
323 .maxTessControlPerVertexInputComponents
= 0,
324 .maxTessControlPerVertexOutputComponents
= 0,
325 .maxTessControlPerPatchOutputComponents
= 0,
326 .maxTessControlTotalOutputComponents
= 0,
327 .maxTessEvaluationInputComponents
= 0,
328 .maxTessEvaluationOutputComponents
= 0,
329 .maxGeometryShaderInvocations
= 6,
330 .maxGeometryInputComponents
= 16,
331 .maxGeometryOutputComponents
= 16,
332 .maxGeometryOutputVertices
= 16,
333 .maxGeometryTotalOutputComponents
= 16,
334 .maxFragmentInputComponents
= 16,
335 .maxFragmentOutputBuffers
= 8,
336 .maxFragmentDualSourceBuffers
= 2,
337 .maxFragmentCombinedOutputResources
= 8,
338 .maxComputeSharedMemorySize
= 1024,
339 .maxComputeWorkGroupCount
= {
340 16 * devinfo
->max_cs_threads
,
341 16 * devinfo
->max_cs_threads
,
342 16 * devinfo
->max_cs_threads
,
344 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
345 .maxComputeWorkGroupSize
= {
346 16 * devinfo
->max_cs_threads
,
347 16 * devinfo
->max_cs_threads
,
348 16 * devinfo
->max_cs_threads
,
350 .subPixelPrecisionBits
= 4 /* FIXME */,
351 .subTexelPrecisionBits
= 4 /* FIXME */,
352 .mipmapPrecisionBits
= 4 /* FIXME */,
353 .maxDrawIndexedIndexValue
= UINT32_MAX
,
354 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
355 .primitiveRestartForPatches
= UINT32_MAX
,
356 .maxSamplerLodBias
= 16,
357 .maxSamplerAnisotropy
= 16,
359 .maxDynamicViewportStates
= UINT32_MAX
,
360 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
361 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
362 .viewportSubPixelBits
= 13, /* We take a float? */
363 .minMemoryMapAlignment
= 64, /* A cache line */
364 .minTexelBufferOffsetAlignment
= 1,
365 .minUniformBufferOffsetAlignment
= 1,
366 .minStorageBufferOffsetAlignment
= 1,
367 .minTexelOffset
= 0, /* FIXME */
368 .maxTexelOffset
= 0, /* FIXME */
369 .minTexelGatherOffset
= 0, /* FIXME */
370 .maxTexelGatherOffset
= 0, /* FIXME */
371 .minInterpolationOffset
= 0, /* FIXME */
372 .maxInterpolationOffset
= 0, /* FIXME */
373 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
374 .maxFramebufferWidth
= (1 << 14),
375 .maxFramebufferHeight
= (1 << 14),
376 .maxFramebufferLayers
= (1 << 10),
377 .maxFramebufferColorSamples
= 8,
378 .maxFramebufferDepthSamples
= 8,
379 .maxFramebufferStencilSamples
= 8,
380 .maxColorAttachments
= MAX_RTS
,
381 .maxSampledImageColorSamples
= 8,
382 .maxSampledImageDepthSamples
= 8,
383 .maxSampledImageIntegerSamples
= 1,
384 .maxStorageImageSamples
= 1,
385 .maxSampleMaskWords
= 1,
386 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
387 .maxClipDistances
= 0 /* FIXME */,
388 .maxCullDistances
= 0 /* FIXME */,
389 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
390 .pointSizeRange
= { 0.125, 255.875 },
391 .lineWidthRange
= { 0.0, 7.9921875 },
392 .pointSizeGranularity
= (1.0 / 8.0),
393 .lineWidthGranularity
= (1.0 / 128.0),
399 VkResult
anv_GetPhysicalDeviceProperties(
400 VkPhysicalDevice physicalDevice
,
401 VkPhysicalDeviceProperties
* pProperties
)
403 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
405 *pProperties
= (VkPhysicalDeviceProperties
) {
406 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
409 .deviceId
= pdevice
->chipset_id
,
410 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
413 strcpy(pProperties
->deviceName
, pdevice
->name
);
414 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
415 "anv-%s", MESA_GIT_SHA1
+ 4);
420 VkResult
anv_GetPhysicalDeviceQueueCount(
421 VkPhysicalDevice physicalDevice
,
429 VkResult
anv_GetPhysicalDeviceQueueProperties(
430 VkPhysicalDevice physicalDevice
,
432 VkPhysicalDeviceQueueProperties
* pQueueProperties
)
436 *pQueueProperties
= (VkPhysicalDeviceQueueProperties
) {
437 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
438 VK_QUEUE_COMPUTE_BIT
|
441 .supportsTimestamps
= true,
447 VkResult
anv_GetPhysicalDeviceMemoryProperties(
448 VkPhysicalDevice physicalDevice
,
449 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
451 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
452 VkDeviceSize heap_size
;
454 /* Reserve some wiggle room for the driver by exposing only 75% of the
455 * aperture to the heap.
457 heap_size
= 3 * physical_device
->aperture_size
/ 4;
459 /* The property flags below are valid only for llc platforms. */
460 pMemoryProperties
->memoryTypeCount
= 1;
461 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
462 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
466 pMemoryProperties
->memoryHeapCount
= 1;
467 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
469 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
475 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
479 return anv_lookup_entrypoint(pName
);
482 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
486 return anv_lookup_entrypoint(pName
);
490 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
492 queue
->device
= device
;
493 queue
->pool
= &device
->surface_state_pool
;
495 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
496 if (queue
->completed_serial
.map
== NULL
)
497 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
499 *(uint32_t *)queue
->completed_serial
.map
= 0;
500 queue
->next_serial
= 1;
506 anv_queue_finish(struct anv_queue
*queue
)
509 /* This gets torn down with the device so we only need to do this if
510 * valgrind is present.
512 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
517 anv_device_init_border_colors(struct anv_device
*device
)
519 static const VkClearColorValue border_colors
[] = {
520 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 0.0 } },
521 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 1.0 } },
522 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .f32
= { 1.0, 1.0, 1.0, 1.0 } },
523 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .u32
= { 0, 0, 0, 0 } },
524 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .u32
= { 0, 0, 0, 1 } },
525 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .u32
= { 1, 1, 1, 1 } },
528 device
->border_colors
=
529 anv_state_pool_alloc(&device
->dynamic_state_pool
,
530 sizeof(border_colors
), 32);
531 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
534 VkResult
anv_CreateDevice(
535 VkPhysicalDevice physicalDevice
,
536 const VkDeviceCreateInfo
* pCreateInfo
,
539 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
540 struct anv_instance
*instance
= physical_device
->instance
;
541 struct anv_device
*device
;
543 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
545 switch (physical_device
->info
->gen
) {
547 driver_layer
= &gen7_layer
;
550 driver_layer
= &gen8_layer
;
554 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
555 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
559 device
->instance
= physical_device
->instance
;
561 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
562 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
563 if (device
->fd
== -1)
566 device
->context_id
= anv_gem_create_context(device
);
567 if (device
->context_id
== -1)
570 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
572 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
574 anv_state_pool_init(&device
->dynamic_state_pool
,
575 &device
->dynamic_state_block_pool
);
577 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
578 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
580 anv_state_pool_init(&device
->surface_state_pool
,
581 &device
->surface_state_block_pool
);
583 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
585 device
->info
= *physical_device
->info
;
587 device
->compiler
= anv_compiler_create(device
);
589 pthread_mutex_init(&device
->mutex
, NULL
);
591 anv_queue_init(device
, &device
->queue
);
593 anv_device_init_meta(device
);
595 anv_device_init_border_colors(device
);
597 *pDevice
= anv_device_to_handle(device
);
604 anv_device_free(device
, device
);
606 return vk_error(VK_ERROR_UNAVAILABLE
);
609 VkResult
anv_DestroyDevice(
612 ANV_FROM_HANDLE(anv_device
, device
, _device
);
614 anv_compiler_destroy(device
->compiler
);
616 anv_queue_finish(&device
->queue
);
618 anv_device_finish_meta(device
);
621 /* We only need to free these to prevent valgrind errors. The backing
622 * BO will go away in a couple of lines so we don't actually leak.
624 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
627 anv_bo_pool_finish(&device
->batch_bo_pool
);
628 anv_state_pool_finish(&device
->dynamic_state_pool
);
629 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
630 anv_block_pool_finish(&device
->instruction_block_pool
);
631 anv_state_pool_finish(&device
->surface_state_pool
);
632 anv_block_pool_finish(&device
->surface_state_block_pool
);
633 anv_block_pool_finish(&device
->scratch_block_pool
);
637 anv_instance_free(device
->instance
, device
);
642 static const VkExtensionProperties global_extensions
[] = {
644 .extName
= "VK_WSI_LunarG",
649 VkResult
anv_GetGlobalExtensionProperties(
650 const char* pLayerName
,
652 VkExtensionProperties
* pProperties
)
654 if (pProperties
== NULL
) {
655 *pCount
= ARRAY_SIZE(global_extensions
);
659 assert(*pCount
<= ARRAY_SIZE(global_extensions
));
661 *pCount
= ARRAY_SIZE(global_extensions
);
662 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
667 VkResult
anv_GetPhysicalDeviceExtensionProperties(
668 VkPhysicalDevice physicalDevice
,
669 const char* pLayerName
,
671 VkExtensionProperties
* pProperties
)
673 if (pProperties
== NULL
) {
678 /* None supported at this time */
679 return vk_error(VK_ERROR_INVALID_EXTENSION
);
682 VkResult
anv_GetGlobalLayerProperties(
684 VkLayerProperties
* pProperties
)
686 if (pProperties
== NULL
) {
691 /* None supported at this time */
692 return vk_error(VK_ERROR_INVALID_LAYER
);
695 VkResult
anv_GetPhysicalDeviceLayerProperties(
696 VkPhysicalDevice physicalDevice
,
698 VkLayerProperties
* pProperties
)
700 if (pProperties
== NULL
) {
705 /* None supported at this time */
706 return vk_error(VK_ERROR_INVALID_LAYER
);
709 VkResult
anv_GetDeviceQueue(
711 uint32_t queueNodeIndex
,
715 ANV_FROM_HANDLE(anv_device
, device
, _device
);
717 assert(queueIndex
== 0);
719 *pQueue
= anv_queue_to_handle(&device
->queue
);
724 VkResult
anv_QueueSubmit(
726 uint32_t cmdBufferCount
,
727 const VkCmdBuffer
* pCmdBuffers
,
730 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
731 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
732 struct anv_device
*device
= queue
->device
;
735 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
736 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
738 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
740 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
742 return vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
745 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
747 return vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
750 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
751 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
757 VkResult
anv_QueueWaitIdle(
760 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
762 return vkDeviceWaitIdle(anv_device_to_handle(queue
->device
));
765 VkResult
anv_DeviceWaitIdle(
768 ANV_FROM_HANDLE(anv_device
, device
, _device
);
769 struct anv_state state
;
770 struct anv_batch batch
;
771 struct drm_i915_gem_execbuffer2 execbuf
;
772 struct drm_i915_gem_exec_object2 exec2_objects
[1];
773 struct anv_bo
*bo
= NULL
;
778 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
779 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
780 batch
.start
= batch
.next
= state
.map
;
781 batch
.end
= state
.map
+ 32;
782 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
783 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
785 exec2_objects
[0].handle
= bo
->gem_handle
;
786 exec2_objects
[0].relocation_count
= 0;
787 exec2_objects
[0].relocs_ptr
= 0;
788 exec2_objects
[0].alignment
= 0;
789 exec2_objects
[0].offset
= bo
->offset
;
790 exec2_objects
[0].flags
= 0;
791 exec2_objects
[0].rsvd1
= 0;
792 exec2_objects
[0].rsvd2
= 0;
794 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
795 execbuf
.buffer_count
= 1;
796 execbuf
.batch_start_offset
= state
.offset
;
797 execbuf
.batch_len
= batch
.next
- state
.map
;
798 execbuf
.cliprects_ptr
= 0;
799 execbuf
.num_cliprects
= 0;
804 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
805 execbuf
.rsvd1
= device
->context_id
;
808 ret
= anv_gem_execbuffer(device
, &execbuf
);
810 result
= vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
815 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
817 result
= vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
821 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
826 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
832 anv_device_alloc(struct anv_device
* device
,
835 VkSystemAllocType allocType
)
837 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
841 anv_device_free(struct anv_device
* device
,
844 anv_instance_free(device
->instance
, mem
);
848 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
850 bo
->gem_handle
= anv_gem_create(device
, size
);
852 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
862 VkResult
anv_AllocMemory(
864 const VkMemoryAllocInfo
* pAllocInfo
,
865 VkDeviceMemory
* pMem
)
867 ANV_FROM_HANDLE(anv_device
, device
, _device
);
868 struct anv_device_memory
*mem
;
871 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
873 if (pAllocInfo
->memoryTypeIndex
!= 0) {
874 /* We support exactly one memory heap. */
875 return vk_error(VK_ERROR_INVALID_VALUE
);
878 /* FINISHME: Fail if allocation request exceeds heap size. */
880 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
881 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
883 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
885 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
886 if (result
!= VK_SUCCESS
)
889 *pMem
= anv_device_memory_to_handle(mem
);
894 anv_device_free(device
, mem
);
899 VkResult
anv_FreeMemory(
903 ANV_FROM_HANDLE(anv_device
, device
, _device
);
904 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
907 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
909 if (mem
->bo
.gem_handle
!= 0)
910 anv_gem_close(device
, mem
->bo
.gem_handle
);
912 anv_device_free(device
, mem
);
917 VkResult
anv_MapMemory(
922 VkMemoryMapFlags flags
,
925 ANV_FROM_HANDLE(anv_device
, device
, _device
);
926 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
928 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
929 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
930 * at a time is valid. We could just mmap up front and return an offset
931 * pointer here, but that may exhaust virtual memory on 32 bit
934 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
935 mem
->map_size
= size
;
942 VkResult
anv_UnmapMemory(
946 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
948 anv_gem_munmap(mem
->map
, mem
->map_size
);
953 VkResult
anv_FlushMappedMemoryRanges(
955 uint32_t memRangeCount
,
956 const VkMappedMemoryRange
* pMemRanges
)
958 /* clflush here for !llc platforms */
963 VkResult
anv_InvalidateMappedMemoryRanges(
965 uint32_t memRangeCount
,
966 const VkMappedMemoryRange
* pMemRanges
)
968 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
971 VkResult
anv_GetBufferMemoryRequirements(
974 VkMemoryRequirements
* pMemoryRequirements
)
976 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
978 /* The Vulkan spec (git aaed022) says:
980 * memoryTypeBits is a bitfield and contains one bit set for every
981 * supported memory type for the resource. The bit `1<<i` is set if and
982 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
983 * structure for the physical device is supported.
985 * We support exactly one memory type.
987 pMemoryRequirements
->memoryTypeBits
= 1;
989 pMemoryRequirements
->size
= buffer
->size
;
990 pMemoryRequirements
->alignment
= 16;
995 VkResult
anv_GetImageMemoryRequirements(
998 VkMemoryRequirements
* pMemoryRequirements
)
1000 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1002 /* The Vulkan spec (git aaed022) says:
1004 * memoryTypeBits is a bitfield and contains one bit set for every
1005 * supported memory type for the resource. The bit `1<<i` is set if and
1006 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1007 * structure for the physical device is supported.
1009 * We support exactly one memory type.
1011 pMemoryRequirements
->memoryTypeBits
= 1;
1013 pMemoryRequirements
->size
= image
->size
;
1014 pMemoryRequirements
->alignment
= image
->alignment
;
1019 VkResult
anv_GetImageSparseMemoryRequirements(
1022 uint32_t* pNumRequirements
,
1023 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1025 return vk_error(VK_UNSUPPORTED
);
1028 VkResult
anv_GetDeviceMemoryCommitment(
1030 VkDeviceMemory memory
,
1031 VkDeviceSize
* pCommittedMemoryInBytes
)
1033 *pCommittedMemoryInBytes
= 0;
1034 stub_return(VK_SUCCESS
);
1037 VkResult
anv_BindBufferMemory(
1040 VkDeviceMemory _mem
,
1041 VkDeviceSize memOffset
)
1043 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1044 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1046 buffer
->bo
= &mem
->bo
;
1047 buffer
->offset
= memOffset
;
1052 VkResult
anv_BindImageMemory(
1055 VkDeviceMemory _mem
,
1056 VkDeviceSize memOffset
)
1058 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1059 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1061 image
->bo
= &mem
->bo
;
1062 image
->offset
= memOffset
;
1067 VkResult
anv_QueueBindSparseBufferMemory(
1070 uint32_t numBindings
,
1071 const VkSparseMemoryBindInfo
* pBindInfo
)
1073 stub_return(VK_UNSUPPORTED
);
1076 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1079 uint32_t numBindings
,
1080 const VkSparseMemoryBindInfo
* pBindInfo
)
1082 stub_return(VK_UNSUPPORTED
);
1085 VkResult
anv_QueueBindSparseImageMemory(
1088 uint32_t numBindings
,
1089 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1091 stub_return(VK_UNSUPPORTED
);
1094 VkResult
anv_CreateFence(
1096 const VkFenceCreateInfo
* pCreateInfo
,
1099 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1100 struct anv_fence
*fence
;
1101 struct anv_batch batch
;
1104 const uint32_t fence_size
= 128;
1106 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1108 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1109 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1111 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1113 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1114 if (result
!= VK_SUCCESS
)
1118 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1119 batch
.next
= batch
.start
= fence
->bo
.map
;
1120 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1121 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
1122 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
1124 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1125 fence
->exec2_objects
[0].relocation_count
= 0;
1126 fence
->exec2_objects
[0].relocs_ptr
= 0;
1127 fence
->exec2_objects
[0].alignment
= 0;
1128 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1129 fence
->exec2_objects
[0].flags
= 0;
1130 fence
->exec2_objects
[0].rsvd1
= 0;
1131 fence
->exec2_objects
[0].rsvd2
= 0;
1133 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1134 fence
->execbuf
.buffer_count
= 1;
1135 fence
->execbuf
.batch_start_offset
= 0;
1136 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1137 fence
->execbuf
.cliprects_ptr
= 0;
1138 fence
->execbuf
.num_cliprects
= 0;
1139 fence
->execbuf
.DR1
= 0;
1140 fence
->execbuf
.DR4
= 0;
1142 fence
->execbuf
.flags
=
1143 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1144 fence
->execbuf
.rsvd1
= device
->context_id
;
1145 fence
->execbuf
.rsvd2
= 0;
1147 *pFence
= anv_fence_to_handle(fence
);
1152 anv_device_free(device
, fence
);
1157 VkResult
anv_DestroyFence(
1161 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1162 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1164 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1165 anv_gem_close(device
, fence
->bo
.gem_handle
);
1166 anv_device_free(device
, fence
);
1171 VkResult
anv_ResetFences(
1173 uint32_t fenceCount
,
1174 const VkFence
* pFences
)
1176 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1177 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1178 fence
->ready
= false;
1184 VkResult
anv_GetFenceStatus(
1188 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1189 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1196 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1198 fence
->ready
= true;
1202 return VK_NOT_READY
;
1205 VkResult
anv_WaitForFences(
1207 uint32_t fenceCount
,
1208 const VkFence
* pFences
,
1212 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1213 int64_t t
= timeout
;
1216 /* FIXME: handle !waitAll */
1218 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1219 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1220 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1221 if (ret
== -1 && errno
== ETIME
)
1224 return vk_errorf(VK_ERROR_UNKNOWN
, "gem wait failed: %m");
1230 // Queue semaphore functions
1232 VkResult
anv_CreateSemaphore(
1234 const VkSemaphoreCreateInfo
* pCreateInfo
,
1235 VkSemaphore
* pSemaphore
)
1237 stub_return(VK_UNSUPPORTED
);
1240 VkResult
anv_DestroySemaphore(
1242 VkSemaphore semaphore
)
1244 stub_return(VK_UNSUPPORTED
);
1247 VkResult
anv_QueueSignalSemaphore(
1249 VkSemaphore semaphore
)
1251 stub_return(VK_UNSUPPORTED
);
1254 VkResult
anv_QueueWaitSemaphore(
1256 VkSemaphore semaphore
)
1258 stub_return(VK_UNSUPPORTED
);
1263 VkResult
anv_CreateEvent(
1265 const VkEventCreateInfo
* pCreateInfo
,
1268 stub_return(VK_UNSUPPORTED
);
1271 VkResult
anv_DestroyEvent(
1275 stub_return(VK_UNSUPPORTED
);
1278 VkResult
anv_GetEventStatus(
1282 stub_return(VK_UNSUPPORTED
);
1285 VkResult
anv_SetEvent(
1289 stub_return(VK_UNSUPPORTED
);
1292 VkResult
anv_ResetEvent(
1296 stub_return(VK_UNSUPPORTED
);
1301 VkResult
anv_CreateBuffer(
1303 const VkBufferCreateInfo
* pCreateInfo
,
1306 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1307 struct anv_buffer
*buffer
;
1309 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1311 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1312 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1314 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1316 buffer
->size
= pCreateInfo
->size
;
1320 *pBuffer
= anv_buffer_to_handle(buffer
);
1325 VkResult
anv_DestroyBuffer(
1329 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1330 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1332 anv_device_free(device
, buffer
);
1338 anv_fill_buffer_surface_state(struct anv_device
*device
, void *state
,
1339 const struct anv_format
*format
,
1340 uint32_t offset
, uint32_t range
)
1342 switch (device
->info
.gen
) {
1344 gen7_fill_buffer_surface_state(state
, format
, offset
, range
);
1347 gen8_fill_buffer_surface_state(state
, format
, offset
, range
);
1350 unreachable("unsupported gen\n");
1355 anv_buffer_view_create(
1356 struct anv_device
* device
,
1357 const VkBufferViewCreateInfo
* pCreateInfo
,
1358 struct anv_buffer_view
** view_out
)
1360 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1361 struct anv_buffer_view
*view
;
1363 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1365 view
= anv_device_alloc(device
, sizeof(*view
), 8,
1366 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1368 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1370 view
->view
= (struct anv_surface_view
) {
1372 .offset
= buffer
->offset
+ pCreateInfo
->offset
,
1373 .surface_state
= anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64),
1374 .format
= anv_format_for_vk_format(pCreateInfo
->format
),
1375 .range
= pCreateInfo
->range
,
1383 VkResult
anv_DestroyBufferView(
1385 VkBufferView _bview
)
1387 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1388 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1390 anv_surface_view_fini(device
, &bview
->view
);
1391 anv_device_free(device
, bview
);
1396 VkResult
anv_DestroySampler(
1400 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1401 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1403 anv_device_free(device
, sampler
);
1408 // Descriptor set functions
1410 VkResult
anv_CreateDescriptorSetLayout(
1412 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1413 VkDescriptorSetLayout
* pSetLayout
)
1415 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1416 struct anv_descriptor_set_layout
*set_layout
;
1418 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1420 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1421 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1422 uint32_t num_dynamic_buffers
= 0;
1424 uint32_t stages
= 0;
1427 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1428 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1429 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1430 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1431 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1432 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1438 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1439 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1440 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1441 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1442 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1443 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1444 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1445 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1446 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1447 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1448 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1449 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1450 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1456 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1457 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1458 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1459 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1465 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1466 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1469 uint32_t sampler_total
= 0;
1470 uint32_t surface_total
= 0;
1471 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1472 sampler_total
+= sampler_count
[s
];
1473 surface_total
+= surface_count
[s
];
1476 size_t size
= sizeof(*set_layout
) +
1477 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1478 set_layout
= anv_device_alloc(device
, size
, 8,
1479 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1481 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1483 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1484 set_layout
->count
= count
;
1485 set_layout
->shader_stages
= stages
;
1487 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1488 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1489 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1490 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1491 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1492 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1493 p
+= surface_count
[s
];
1494 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1495 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1496 p
+= sampler_count
[s
];
1499 uint32_t descriptor
= 0;
1500 int8_t dynamic_slot
= 0;
1502 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1503 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1504 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1505 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1506 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1507 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1508 sampler
[s
]->index
= descriptor
+ j
;
1509 sampler
[s
]->dynamic_slot
= -1;
1517 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1518 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1519 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1527 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1528 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1529 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1530 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1531 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1532 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1533 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1534 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1535 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1536 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1537 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1538 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1539 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1540 surface
[s
]->index
= descriptor
+ j
;
1542 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1544 surface
[s
]->dynamic_slot
= -1;
1553 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1555 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1558 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1563 VkResult
anv_DestroyDescriptorSetLayout(
1565 VkDescriptorSetLayout _set_layout
)
1567 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1568 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1570 anv_device_free(device
, set_layout
);
1575 VkResult
anv_CreateDescriptorPool(
1577 VkDescriptorPoolUsage poolUsage
,
1579 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1580 VkDescriptorPool
* pDescriptorPool
)
1582 anv_finishme("VkDescriptorPool is a stub");
1583 pDescriptorPool
->handle
= 1;
1587 VkResult
anv_DestroyDescriptorPool(
1589 VkDescriptorPool _pool
)
1591 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1595 VkResult
anv_ResetDescriptorPool(
1597 VkDescriptorPool descriptorPool
)
1599 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1604 anv_descriptor_set_create(struct anv_device
*device
,
1605 const struct anv_descriptor_set_layout
*layout
,
1606 struct anv_descriptor_set
**out_set
)
1608 struct anv_descriptor_set
*set
;
1609 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1611 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1613 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1615 /* A descriptor set may not be 100% filled. Clear the set so we can can
1616 * later detect holes in it.
1618 memset(set
, 0, size
);
1626 anv_descriptor_set_destroy(struct anv_device
*device
,
1627 struct anv_descriptor_set
*set
)
1629 anv_device_free(device
, set
);
1632 VkResult
anv_AllocDescriptorSets(
1634 VkDescriptorPool descriptorPool
,
1635 VkDescriptorSetUsage setUsage
,
1637 const VkDescriptorSetLayout
* pSetLayouts
,
1638 VkDescriptorSet
* pDescriptorSets
,
1641 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1644 struct anv_descriptor_set
*set
;
1646 for (uint32_t i
= 0; i
< count
; i
++) {
1647 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1649 result
= anv_descriptor_set_create(device
, layout
, &set
);
1650 if (result
!= VK_SUCCESS
) {
1655 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1663 VkResult
anv_FreeDescriptorSets(
1665 VkDescriptorPool descriptorPool
,
1667 const VkDescriptorSet
* pDescriptorSets
)
1669 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1671 for (uint32_t i
= 0; i
< count
; i
++) {
1672 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1674 anv_descriptor_set_destroy(device
, set
);
1680 VkResult
anv_UpdateDescriptorSets(
1682 uint32_t writeCount
,
1683 const VkWriteDescriptorSet
* pDescriptorWrites
,
1685 const VkCopyDescriptorSet
* pDescriptorCopies
)
1687 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1688 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1689 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1691 switch (write
->descriptorType
) {
1692 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1693 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1694 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1695 set
->descriptors
[write
->destBinding
+ j
].sampler
=
1696 anv_sampler_from_handle(write
->pDescriptors
[j
].sampler
);
1699 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1704 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1705 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1706 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1707 ANV_FROM_HANDLE(anv_image_view
, iview
,
1708 write
->pDescriptors
[j
].imageView
);
1709 set
->descriptors
[write
->destBinding
+ j
].view
= &iview
->view
;
1713 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1714 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1715 anv_finishme("texel buffers not implemented");
1718 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1719 anv_finishme("input attachments not implemented");
1722 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1723 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1724 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1725 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1726 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1727 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1728 write
->pDescriptors
[j
].bufferView
);
1729 set
->descriptors
[write
->destBinding
+ j
].view
= &bview
->view
;
1737 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1738 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1739 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1740 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1741 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1742 dest
->descriptors
[copy
->destBinding
+ j
] =
1743 src
->descriptors
[copy
->srcBinding
+ j
];
1750 // State object functions
1752 static inline int64_t
1753 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1763 VkResult
anv_CreateDynamicViewportState(
1765 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1766 VkDynamicViewportState
* pState
)
1768 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1769 struct anv_dynamic_vp_state
*state
;
1771 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1773 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1774 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1776 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1778 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1779 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1781 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1783 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1786 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1787 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1788 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1790 /* The gen7 state struct has just the matrix and guardband fields, the
1791 * gen8 struct adds the min/max viewport fields. */
1792 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1793 .ViewportMatrixElementm00
= vp
->width
/ 2,
1794 .ViewportMatrixElementm11
= vp
->height
/ 2,
1795 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1796 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1797 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1798 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1799 .XMinClipGuardband
= -1.0f
,
1800 .XMaxClipGuardband
= 1.0f
,
1801 .YMinClipGuardband
= -1.0f
,
1802 .YMaxClipGuardband
= 1.0f
,
1803 .XMinViewPort
= vp
->originX
,
1804 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1805 .YMinViewPort
= vp
->originY
,
1806 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1809 struct GEN7_CC_VIEWPORT cc_viewport
= {
1810 .MinimumDepth
= vp
->minDepth
,
1811 .MaximumDepth
= vp
->maxDepth
1814 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1815 * ymax < ymin for empty clips. In case clip x, y, width height are all
1816 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1817 * what we want. Just special case empty clips and produce a canonical
1819 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
1820 .ScissorRectangleYMin
= 1,
1821 .ScissorRectangleXMin
= 1,
1822 .ScissorRectangleYMax
= 0,
1823 .ScissorRectangleXMax
= 0
1826 const int max
= 0xffff;
1827 struct GEN7_SCISSOR_RECT scissor
= {
1828 /* Do this math using int64_t so overflow gets clamped correctly. */
1829 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1830 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1831 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1832 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1835 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1836 GEN7_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1838 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1839 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1841 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1845 *pState
= anv_dynamic_vp_state_to_handle(state
);
1850 VkResult
anv_DestroyDynamicViewportState(
1852 VkDynamicViewportState _vp_state
)
1854 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1855 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
1857 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
1858 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
1859 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
1861 anv_device_free(device
, vp_state
);
1866 VkResult
anv_DestroyDynamicRasterState(
1868 VkDynamicRasterState _rs_state
)
1870 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1871 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
1873 anv_device_free(device
, rs_state
);
1878 VkResult
anv_CreateDynamicColorBlendState(
1880 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
1881 VkDynamicColorBlendState
* pState
)
1883 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1884 struct anv_dynamic_cb_state
*state
;
1886 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
1888 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1889 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1891 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1893 struct GEN7_COLOR_CALC_STATE color_calc_state
= {
1894 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
1895 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
1896 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
1897 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
1900 GEN7_COLOR_CALC_STATE_pack(NULL
, state
->color_calc_state
, &color_calc_state
);
1902 *pState
= anv_dynamic_cb_state_to_handle(state
);
1907 VkResult
anv_DestroyDynamicColorBlendState(
1909 VkDynamicColorBlendState _cb_state
)
1911 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1912 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
1914 anv_device_free(device
, cb_state
);
1919 VkResult
anv_DestroyDynamicDepthStencilState(
1921 VkDynamicDepthStencilState _ds_state
)
1923 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1924 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
1926 anv_device_free(device
, ds_state
);
1931 VkResult
anv_CreateFramebuffer(
1933 const VkFramebufferCreateInfo
* pCreateInfo
,
1934 VkFramebuffer
* pFramebuffer
)
1936 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1937 struct anv_framebuffer
*framebuffer
;
1939 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1941 size_t size
= sizeof(*framebuffer
) +
1942 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
1943 framebuffer
= anv_device_alloc(device
, size
, 8,
1944 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1945 if (framebuffer
== NULL
)
1946 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1948 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1949 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1950 ANV_FROM_HANDLE(anv_attachment_view
, view
,
1951 pCreateInfo
->pAttachments
[i
].view
);
1953 framebuffer
->attachments
[i
] = view
;
1956 framebuffer
->width
= pCreateInfo
->width
;
1957 framebuffer
->height
= pCreateInfo
->height
;
1958 framebuffer
->layers
= pCreateInfo
->layers
;
1960 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
1961 &(VkDynamicViewportStateCreateInfo
) {
1962 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
1963 .viewportAndScissorCount
= 1,
1964 .pViewports
= (VkViewport
[]) {
1968 .width
= pCreateInfo
->width
,
1969 .height
= pCreateInfo
->height
,
1974 .pScissors
= (VkRect2D
[]) {
1976 { pCreateInfo
->width
, pCreateInfo
->height
} },
1979 &framebuffer
->vp_state
);
1981 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
1986 VkResult
anv_DestroyFramebuffer(
1990 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1991 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
1993 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
1995 anv_device_free(device
, fb
);
2000 VkResult
anv_CreateRenderPass(
2002 const VkRenderPassCreateInfo
* pCreateInfo
,
2003 VkRenderPass
* pRenderPass
)
2005 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2006 struct anv_render_pass
*pass
;
2008 size_t attachments_offset
;
2010 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2012 size
= sizeof(*pass
);
2013 size
+= pCreateInfo
->subpassCount
* sizeof(pass
->subpasses
[0]);
2014 attachments_offset
= size
;
2015 size
+= pCreateInfo
->attachmentCount
* sizeof(pass
->attachments
[0]);
2017 pass
= anv_device_alloc(device
, size
, 8,
2018 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2020 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2022 /* Clear the subpasses along with the parent pass. This required because
2023 * each array member of anv_subpass must be a valid pointer if not NULL.
2025 memset(pass
, 0, size
);
2026 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2027 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2028 pass
->attachments
= (void *) pass
+ attachments_offset
;
2030 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2031 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
2033 att
->format
= anv_format_for_vk_format(pCreateInfo
->pAttachments
[i
].format
);
2034 att
->samples
= pCreateInfo
->pAttachments
[i
].samples
;
2035 att
->load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2036 att
->stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2037 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2038 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2040 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2041 if (anv_format_is_color(att
->format
)) {
2042 ++pass
->num_color_clear_attachments
;
2043 } else if (att
->format
->depth_format
) {
2044 pass
->has_depth_clear_attachment
= true;
2046 } else if (att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2047 assert(att
->format
->has_stencil
);
2048 pass
->has_stencil_clear_attachment
= true;
2052 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2053 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2054 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2056 subpass
->input_count
= desc
->inputCount
;
2057 subpass
->color_count
= desc
->colorCount
;
2059 if (desc
->inputCount
> 0) {
2060 subpass
->input_attachments
=
2061 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2062 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2064 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2065 subpass
->input_attachments
[j
]
2066 = desc
->inputAttachments
[j
].attachment
;
2070 if (desc
->colorCount
> 0) {
2071 subpass
->color_attachments
=
2072 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2073 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2075 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2076 subpass
->color_attachments
[j
]
2077 = desc
->colorAttachments
[j
].attachment
;
2081 if (desc
->resolveAttachments
) {
2082 subpass
->resolve_attachments
=
2083 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2084 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2086 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2087 subpass
->resolve_attachments
[j
]
2088 = desc
->resolveAttachments
[j
].attachment
;
2092 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2095 *pRenderPass
= anv_render_pass_to_handle(pass
);
2100 VkResult
anv_DestroyRenderPass(
2104 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2105 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2107 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2108 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2109 * Don't free the null arrays.
2111 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2113 anv_device_free(device
, subpass
->input_attachments
);
2114 anv_device_free(device
, subpass
->color_attachments
);
2115 anv_device_free(device
, subpass
->resolve_attachments
);
2118 anv_device_free(device
, pass
);
2123 VkResult
anv_GetRenderAreaGranularity(
2125 VkRenderPass renderPass
,
2126 VkExtent2D
* pGranularity
)
2128 *pGranularity
= (VkExtent2D
) { 1, 1 };
2133 void vkCmdDbgMarkerBegin(
2134 VkCmdBuffer cmdBuffer
,
2135 const char* pMarker
)
2136 __attribute__ ((visibility ("default")));
2138 void vkCmdDbgMarkerEnd(
2139 VkCmdBuffer cmdBuffer
)
2140 __attribute__ ((visibility ("default")));
2142 void vkCmdDbgMarkerBegin(
2143 VkCmdBuffer cmdBuffer
,
2144 const char* pMarker
)
2148 void vkCmdDbgMarkerEnd(
2149 VkCmdBuffer cmdBuffer
)