2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
34 struct anv_dispatch_table dtable
;
37 anv_physical_device_init(struct anv_physical_device
*device
,
38 struct anv_instance
*instance
,
44 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
46 return vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to open %s: %m", path
);
48 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
49 device
->instance
= instance
;
52 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
53 if (!device
->chipset_id
) {
54 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get chipset id: %m");
58 device
->name
= brw_get_device_name(device
->chipset_id
);
59 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
61 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get device info");
65 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1) {
66 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "failed to get aperture size: %m");
70 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
)) {
71 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "kernel missing gem wait");
75 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
)) {
76 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "kernel missing execbuf2");
80 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
)) {
81 result
= vk_errorf(VK_ERROR_UNAVAILABLE
, "non-llc gpu");
94 static void *default_alloc(
98 VkSystemAllocType allocType
)
103 static void default_free(
110 static const VkAllocCallbacks default_alloc_callbacks
= {
112 .pfnAlloc
= default_alloc
,
113 .pfnFree
= default_free
116 static const VkExtensionProperties global_extensions
[] = {
118 .extName
= "VK_WSI_swapchain",
123 static const VkExtensionProperties device_extensions
[] = {
125 .extName
= "VK_WSI_device_swapchain",
131 VkResult
anv_CreateInstance(
132 const VkInstanceCreateInfo
* pCreateInfo
,
133 VkInstance
* pInstance
)
135 struct anv_instance
*instance
;
136 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
137 void *user_data
= NULL
;
139 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
141 for (uint32_t i
= 0; i
< pCreateInfo
->extensionCount
; i
++) {
143 for (uint32_t j
= 0; j
< ARRAY_SIZE(global_extensions
); j
++) {
144 if (strcmp(pCreateInfo
->ppEnabledExtensionNames
[i
],
145 global_extensions
[j
].extName
) == 0) {
151 return vk_error(VK_ERROR_INVALID_EXTENSION
);
154 if (pCreateInfo
->pAllocCb
) {
155 alloc_callbacks
= pCreateInfo
->pAllocCb
;
156 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
158 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
159 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
161 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
163 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
164 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
165 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
166 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
167 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
168 instance
->physicalDeviceCount
= 0;
172 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
174 anv_init_wsi(instance
);
176 *pInstance
= anv_instance_to_handle(instance
);
181 void anv_DestroyInstance(
182 VkInstance _instance
)
184 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
186 anv_finish_wsi(instance
);
188 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
192 instance
->pfnFree(instance
->pAllocUserData
, instance
);
196 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
197 size_t alignment
, VkSystemAllocType allocType
)
199 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
200 size
, alignment
, allocType
);
202 VG(VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
));
203 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
));
209 anv_instance_free(struct anv_instance
*instance
, void *mem
)
214 VG(VALGRIND_MEMPOOL_FREE(instance
, mem
));
216 instance
->pfnFree(instance
->pAllocUserData
, mem
);
219 VkResult
anv_EnumeratePhysicalDevices(
220 VkInstance _instance
,
221 uint32_t* pPhysicalDeviceCount
,
222 VkPhysicalDevice
* pPhysicalDevices
)
224 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
227 if (instance
->physicalDeviceCount
== 0) {
228 result
= anv_physical_device_init(&instance
->physicalDevice
,
229 instance
, "/dev/dri/renderD128");
230 if (result
!= VK_SUCCESS
)
233 instance
->physicalDeviceCount
= 1;
236 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
237 * otherwise it's an inout parameter.
239 * The Vulkan spec (git aaed022) says:
241 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
242 * that is initialized with the number of devices the application is
243 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
244 * an array of at least this many VkPhysicalDevice handles [...].
246 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
247 * overwrites the contents of the variable pointed to by
248 * pPhysicalDeviceCount with the number of physical devices in in the
249 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
250 * pPhysicalDeviceCount with the number of physical handles written to
253 if (!pPhysicalDevices
) {
254 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
255 } else if (*pPhysicalDeviceCount
>= 1) {
256 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
257 *pPhysicalDeviceCount
= 1;
259 *pPhysicalDeviceCount
= 0;
265 VkResult
anv_GetPhysicalDeviceFeatures(
266 VkPhysicalDevice physicalDevice
,
267 VkPhysicalDeviceFeatures
* pFeatures
)
269 anv_finishme("Get correct values for PhysicalDeviceFeatures");
271 *pFeatures
= (VkPhysicalDeviceFeatures
) {
272 .robustBufferAccess
= false,
273 .fullDrawIndexUint32
= false,
274 .imageCubeArray
= false,
275 .independentBlend
= false,
276 .geometryShader
= true,
277 .tessellationShader
= false,
278 .sampleRateShading
= false,
279 .dualSourceBlend
= true,
281 .instancedDrawIndirect
= true,
283 .depthBiasClamp
= false,
284 .fillModeNonSolid
= true,
285 .depthBounds
= false,
288 .textureCompressionETC2
= true,
289 .textureCompressionASTC_LDR
= true,
290 .textureCompressionBC
= true,
291 .pipelineStatisticsQuery
= true,
292 .vertexSideEffects
= false,
293 .tessellationSideEffects
= false,
294 .geometrySideEffects
= false,
295 .fragmentSideEffects
= false,
296 .shaderTessellationPointSize
= false,
297 .shaderGeometryPointSize
= true,
298 .shaderTextureGatherExtended
= true,
299 .shaderStorageImageExtendedFormats
= false,
300 .shaderStorageImageMultisample
= false,
301 .shaderStorageBufferArrayConstantIndexing
= false,
302 .shaderStorageImageArrayConstantIndexing
= false,
303 .shaderUniformBufferArrayDynamicIndexing
= true,
304 .shaderSampledImageArrayDynamicIndexing
= false,
305 .shaderStorageBufferArrayDynamicIndexing
= false,
306 .shaderStorageImageArrayDynamicIndexing
= false,
307 .shaderClipDistance
= false,
308 .shaderCullDistance
= false,
309 .shaderFloat64
= false,
310 .shaderInt64
= false,
311 .shaderFloat16
= false,
312 .shaderInt16
= false,
318 VkResult
anv_GetPhysicalDeviceLimits(
319 VkPhysicalDevice physicalDevice
,
320 VkPhysicalDeviceLimits
* pLimits
)
322 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
323 const struct brw_device_info
*devinfo
= physical_device
->info
;
325 anv_finishme("Get correct values for PhysicalDeviceLimits");
327 *pLimits
= (VkPhysicalDeviceLimits
) {
328 .maxImageDimension1D
= (1 << 14),
329 .maxImageDimension2D
= (1 << 14),
330 .maxImageDimension3D
= (1 << 10),
331 .maxImageDimensionCube
= (1 << 14),
332 .maxImageArrayLayers
= (1 << 10),
333 .maxTexelBufferSize
= (1 << 14),
334 .maxUniformBufferSize
= UINT32_MAX
,
335 .maxStorageBufferSize
= UINT32_MAX
,
336 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
337 .maxMemoryAllocationCount
= UINT32_MAX
,
338 .bufferImageGranularity
= 64, /* A cache line */
339 .maxBoundDescriptorSets
= MAX_SETS
,
340 .maxDescriptorSets
= UINT32_MAX
,
341 .maxPerStageDescriptorSamplers
= 64,
342 .maxPerStageDescriptorUniformBuffers
= 64,
343 .maxPerStageDescriptorStorageBuffers
= 64,
344 .maxPerStageDescriptorSampledImages
= 64,
345 .maxPerStageDescriptorStorageImages
= 64,
346 .maxDescriptorSetSamplers
= 256,
347 .maxDescriptorSetUniformBuffers
= 256,
348 .maxDescriptorSetStorageBuffers
= 256,
349 .maxDescriptorSetSampledImages
= 256,
350 .maxDescriptorSetStorageImages
= 256,
351 .maxVertexInputAttributes
= 32,
352 .maxVertexInputAttributeOffset
= 256,
353 .maxVertexInputBindingStride
= 256,
354 .maxVertexOutputComponents
= 32,
355 .maxTessGenLevel
= 0,
356 .maxTessPatchSize
= 0,
357 .maxTessControlPerVertexInputComponents
= 0,
358 .maxTessControlPerVertexOutputComponents
= 0,
359 .maxTessControlPerPatchOutputComponents
= 0,
360 .maxTessControlTotalOutputComponents
= 0,
361 .maxTessEvaluationInputComponents
= 0,
362 .maxTessEvaluationOutputComponents
= 0,
363 .maxGeometryShaderInvocations
= 6,
364 .maxGeometryInputComponents
= 16,
365 .maxGeometryOutputComponents
= 16,
366 .maxGeometryOutputVertices
= 16,
367 .maxGeometryTotalOutputComponents
= 16,
368 .maxFragmentInputComponents
= 16,
369 .maxFragmentOutputBuffers
= 8,
370 .maxFragmentDualSourceBuffers
= 2,
371 .maxFragmentCombinedOutputResources
= 8,
372 .maxComputeSharedMemorySize
= 1024,
373 .maxComputeWorkGroupCount
= {
374 16 * devinfo
->max_cs_threads
,
375 16 * devinfo
->max_cs_threads
,
376 16 * devinfo
->max_cs_threads
,
378 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
379 .maxComputeWorkGroupSize
= {
380 16 * devinfo
->max_cs_threads
,
381 16 * devinfo
->max_cs_threads
,
382 16 * devinfo
->max_cs_threads
,
384 .subPixelPrecisionBits
= 4 /* FIXME */,
385 .subTexelPrecisionBits
= 4 /* FIXME */,
386 .mipmapPrecisionBits
= 4 /* FIXME */,
387 .maxDrawIndexedIndexValue
= UINT32_MAX
,
388 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
389 .primitiveRestartForPatches
= UINT32_MAX
,
390 .maxSamplerLodBias
= 16,
391 .maxSamplerAnisotropy
= 16,
393 .maxDynamicViewportStates
= UINT32_MAX
,
394 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
395 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
396 .viewportSubPixelBits
= 13, /* We take a float? */
397 .minMemoryMapAlignment
= 64, /* A cache line */
398 .minTexelBufferOffsetAlignment
= 1,
399 .minUniformBufferOffsetAlignment
= 1,
400 .minStorageBufferOffsetAlignment
= 1,
401 .minTexelOffset
= 0, /* FIXME */
402 .maxTexelOffset
= 0, /* FIXME */
403 .minTexelGatherOffset
= 0, /* FIXME */
404 .maxTexelGatherOffset
= 0, /* FIXME */
405 .minInterpolationOffset
= 0, /* FIXME */
406 .maxInterpolationOffset
= 0, /* FIXME */
407 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
408 .maxFramebufferWidth
= (1 << 14),
409 .maxFramebufferHeight
= (1 << 14),
410 .maxFramebufferLayers
= (1 << 10),
411 .maxFramebufferColorSamples
= 8,
412 .maxFramebufferDepthSamples
= 8,
413 .maxFramebufferStencilSamples
= 8,
414 .maxColorAttachments
= MAX_RTS
,
415 .maxSampledImageColorSamples
= 8,
416 .maxSampledImageDepthSamples
= 8,
417 .maxSampledImageIntegerSamples
= 1,
418 .maxStorageImageSamples
= 1,
419 .maxSampleMaskWords
= 1,
420 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
421 .maxClipDistances
= 0 /* FIXME */,
422 .maxCullDistances
= 0 /* FIXME */,
423 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
424 .pointSizeRange
= { 0.125, 255.875 },
425 .lineWidthRange
= { 0.0, 7.9921875 },
426 .pointSizeGranularity
= (1.0 / 8.0),
427 .lineWidthGranularity
= (1.0 / 128.0),
433 VkResult
anv_GetPhysicalDeviceProperties(
434 VkPhysicalDevice physicalDevice
,
435 VkPhysicalDeviceProperties
* pProperties
)
437 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
439 *pProperties
= (VkPhysicalDeviceProperties
) {
440 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
443 .deviceId
= pdevice
->chipset_id
,
444 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
447 strcpy(pProperties
->deviceName
, pdevice
->name
);
448 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
449 "anv-%s", MESA_GIT_SHA1
+ 4);
454 VkResult
anv_GetPhysicalDeviceQueueFamilyProperties(
455 VkPhysicalDevice physicalDevice
,
457 VkQueueFamilyProperties
* pQueueFamilyProperties
)
459 if (pQueueFamilyProperties
== NULL
) {
463 assert(*pCount
>= 1);
465 *pQueueFamilyProperties
= (VkQueueFamilyProperties
) {
466 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
467 VK_QUEUE_COMPUTE_BIT
|
470 .supportsTimestamps
= true,
476 VkResult
anv_GetPhysicalDeviceMemoryProperties(
477 VkPhysicalDevice physicalDevice
,
478 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
480 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
481 VkDeviceSize heap_size
;
483 /* Reserve some wiggle room for the driver by exposing only 75% of the
484 * aperture to the heap.
486 heap_size
= 3 * physical_device
->aperture_size
/ 4;
488 /* The property flags below are valid only for llc platforms. */
489 pMemoryProperties
->memoryTypeCount
= 1;
490 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
491 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
495 pMemoryProperties
->memoryHeapCount
= 1;
496 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
498 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
504 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
508 return anv_lookup_entrypoint(pName
);
511 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
515 return anv_lookup_entrypoint(pName
);
519 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
521 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
522 queue
->device
= device
;
523 queue
->pool
= &device
->surface_state_pool
;
525 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
526 if (queue
->completed_serial
.map
== NULL
)
527 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
529 *(uint32_t *)queue
->completed_serial
.map
= 0;
530 queue
->next_serial
= 1;
536 anv_queue_finish(struct anv_queue
*queue
)
539 /* This gets torn down with the device so we only need to do this if
540 * valgrind is present.
542 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
547 anv_device_init_border_colors(struct anv_device
*device
)
549 static const VkClearColorValue border_colors
[] = {
550 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .float32
= { 0.0, 0.0, 0.0, 0.0 } },
551 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .float32
= { 0.0, 0.0, 0.0, 1.0 } },
552 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .float32
= { 1.0, 1.0, 1.0, 1.0 } },
553 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .uint32
= { 0, 0, 0, 0 } },
554 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .uint32
= { 0, 0, 0, 1 } },
555 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .uint32
= { 1, 1, 1, 1 } },
558 device
->border_colors
=
559 anv_state_pool_alloc(&device
->dynamic_state_pool
,
560 sizeof(border_colors
), 32);
561 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
564 VkResult
anv_CreateDevice(
565 VkPhysicalDevice physicalDevice
,
566 const VkDeviceCreateInfo
* pCreateInfo
,
569 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
570 struct anv_instance
*instance
= physical_device
->instance
;
571 struct anv_device
*device
;
573 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
575 for (uint32_t i
= 0; i
< pCreateInfo
->extensionCount
; i
++) {
577 for (uint32_t j
= 0; j
< ARRAY_SIZE(device_extensions
); j
++) {
578 if (strcmp(pCreateInfo
->ppEnabledExtensionNames
[i
],
579 device_extensions
[j
].extName
) == 0) {
585 return vk_error(VK_ERROR_INVALID_EXTENSION
);
588 anv_set_dispatch_gen(physical_device
->info
->gen
);
590 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
591 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
593 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
595 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
596 device
->instance
= physical_device
->instance
;
598 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
599 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
600 if (device
->fd
== -1)
603 device
->context_id
= anv_gem_create_context(device
);
604 if (device
->context_id
== -1)
607 pthread_mutex_init(&device
->mutex
, NULL
);
609 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
611 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
613 anv_state_pool_init(&device
->dynamic_state_pool
,
614 &device
->dynamic_state_block_pool
);
616 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
617 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 4096);
619 anv_state_pool_init(&device
->surface_state_pool
,
620 &device
->surface_state_block_pool
);
622 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
624 device
->info
= *physical_device
->info
;
626 device
->compiler
= anv_compiler_create(device
);
628 anv_queue_init(device
, &device
->queue
);
630 anv_device_init_meta(device
);
632 anv_device_init_border_colors(device
);
634 *pDevice
= anv_device_to_handle(device
);
641 anv_device_free(device
, device
);
643 return vk_error(VK_ERROR_UNAVAILABLE
);
646 void anv_DestroyDevice(
649 ANV_FROM_HANDLE(anv_device
, device
, _device
);
651 anv_compiler_destroy(device
->compiler
);
653 anv_queue_finish(&device
->queue
);
655 anv_device_finish_meta(device
);
658 /* We only need to free these to prevent valgrind errors. The backing
659 * BO will go away in a couple of lines so we don't actually leak.
661 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
664 anv_bo_pool_finish(&device
->batch_bo_pool
);
665 anv_state_pool_finish(&device
->dynamic_state_pool
);
666 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
667 anv_block_pool_finish(&device
->instruction_block_pool
);
668 anv_state_pool_finish(&device
->surface_state_pool
);
669 anv_block_pool_finish(&device
->surface_state_block_pool
);
670 anv_block_pool_finish(&device
->scratch_block_pool
);
674 anv_instance_free(device
->instance
, device
);
677 VkResult
anv_EnumerateInstanceExtensionProperties(
678 const char* pLayerName
,
680 VkExtensionProperties
* pProperties
)
682 if (pProperties
== NULL
) {
683 *pCount
= ARRAY_SIZE(global_extensions
);
687 assert(*pCount
>= ARRAY_SIZE(global_extensions
));
689 *pCount
= ARRAY_SIZE(global_extensions
);
690 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
695 VkResult
anv_EnumerateDeviceExtensionProperties(
696 VkPhysicalDevice physicalDevice
,
697 const char* pLayerName
,
699 VkExtensionProperties
* pProperties
)
701 if (pProperties
== NULL
) {
702 *pCount
= ARRAY_SIZE(device_extensions
);
706 assert(*pCount
>= ARRAY_SIZE(device_extensions
));
708 *pCount
= ARRAY_SIZE(device_extensions
);
709 memcpy(pProperties
, device_extensions
, sizeof(device_extensions
));
714 VkResult
anv_EnumerateInstanceLayerProperties(
716 VkLayerProperties
* pProperties
)
718 if (pProperties
== NULL
) {
723 /* None supported at this time */
724 return vk_error(VK_ERROR_INVALID_LAYER
);
727 VkResult
anv_EnumerateDeviceLayerProperties(
728 VkPhysicalDevice physicalDevice
,
730 VkLayerProperties
* pProperties
)
732 if (pProperties
== NULL
) {
737 /* None supported at this time */
738 return vk_error(VK_ERROR_INVALID_LAYER
);
741 VkResult
anv_GetDeviceQueue(
743 uint32_t queueNodeIndex
,
747 ANV_FROM_HANDLE(anv_device
, device
, _device
);
749 assert(queueIndex
== 0);
751 *pQueue
= anv_queue_to_handle(&device
->queue
);
756 VkResult
anv_QueueSubmit(
758 uint32_t cmdBufferCount
,
759 const VkCmdBuffer
* pCmdBuffers
,
762 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
763 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
764 struct anv_device
*device
= queue
->device
;
767 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
768 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
770 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
772 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
774 return vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
777 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
779 return vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
782 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
783 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
789 VkResult
anv_QueueWaitIdle(
792 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
794 return ANV_CALL(DeviceWaitIdle
)(anv_device_to_handle(queue
->device
));
797 VkResult
anv_DeviceWaitIdle(
800 ANV_FROM_HANDLE(anv_device
, device
, _device
);
801 struct anv_state state
;
802 struct anv_batch batch
;
803 struct drm_i915_gem_execbuffer2 execbuf
;
804 struct drm_i915_gem_exec_object2 exec2_objects
[1];
805 struct anv_bo
*bo
= NULL
;
810 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
811 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
812 batch
.start
= batch
.next
= state
.map
;
813 batch
.end
= state
.map
+ 32;
814 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
815 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
817 exec2_objects
[0].handle
= bo
->gem_handle
;
818 exec2_objects
[0].relocation_count
= 0;
819 exec2_objects
[0].relocs_ptr
= 0;
820 exec2_objects
[0].alignment
= 0;
821 exec2_objects
[0].offset
= bo
->offset
;
822 exec2_objects
[0].flags
= 0;
823 exec2_objects
[0].rsvd1
= 0;
824 exec2_objects
[0].rsvd2
= 0;
826 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
827 execbuf
.buffer_count
= 1;
828 execbuf
.batch_start_offset
= state
.offset
;
829 execbuf
.batch_len
= batch
.next
- state
.map
;
830 execbuf
.cliprects_ptr
= 0;
831 execbuf
.num_cliprects
= 0;
836 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
837 execbuf
.rsvd1
= device
->context_id
;
840 ret
= anv_gem_execbuffer(device
, &execbuf
);
842 result
= vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
847 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
849 result
= vk_errorf(VK_ERROR_UNKNOWN
, "execbuf2 failed: %m");
853 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
858 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
864 anv_device_alloc(struct anv_device
* device
,
867 VkSystemAllocType allocType
)
869 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
873 anv_device_free(struct anv_device
* device
,
876 anv_instance_free(device
->instance
, mem
);
880 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
882 bo
->gem_handle
= anv_gem_create(device
, size
);
884 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
894 VkResult
anv_AllocMemory(
896 const VkMemoryAllocInfo
* pAllocInfo
,
897 VkDeviceMemory
* pMem
)
899 ANV_FROM_HANDLE(anv_device
, device
, _device
);
900 struct anv_device_memory
*mem
;
903 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
905 if (pAllocInfo
->memoryTypeIndex
!= 0) {
906 /* We support exactly one memory heap. */
907 return vk_error(VK_ERROR_INVALID_VALUE
);
910 /* FINISHME: Fail if allocation request exceeds heap size. */
912 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
913 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
915 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
917 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
918 if (result
!= VK_SUCCESS
)
921 *pMem
= anv_device_memory_to_handle(mem
);
926 anv_device_free(device
, mem
);
935 ANV_FROM_HANDLE(anv_device
, device
, _device
);
936 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
939 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
941 if (mem
->bo
.gem_handle
!= 0)
942 anv_gem_close(device
, mem
->bo
.gem_handle
);
944 anv_device_free(device
, mem
);
947 VkResult
anv_MapMemory(
952 VkMemoryMapFlags flags
,
955 ANV_FROM_HANDLE(anv_device
, device
, _device
);
956 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
958 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
959 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
960 * at a time is valid. We could just mmap up front and return an offset
961 * pointer here, but that may exhaust virtual memory on 32 bit
964 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
965 mem
->map_size
= size
;
972 void anv_UnmapMemory(
976 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
978 anv_gem_munmap(mem
->map
, mem
->map_size
);
981 VkResult
anv_FlushMappedMemoryRanges(
983 uint32_t memRangeCount
,
984 const VkMappedMemoryRange
* pMemRanges
)
986 /* clflush here for !llc platforms */
991 VkResult
anv_InvalidateMappedMemoryRanges(
993 uint32_t memRangeCount
,
994 const VkMappedMemoryRange
* pMemRanges
)
996 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
999 VkResult
anv_GetBufferMemoryRequirements(
1002 VkMemoryRequirements
* pMemoryRequirements
)
1004 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1006 /* The Vulkan spec (git aaed022) says:
1008 * memoryTypeBits is a bitfield and contains one bit set for every
1009 * supported memory type for the resource. The bit `1<<i` is set if and
1010 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1011 * structure for the physical device is supported.
1013 * We support exactly one memory type.
1015 pMemoryRequirements
->memoryTypeBits
= 1;
1017 pMemoryRequirements
->size
= buffer
->size
;
1018 pMemoryRequirements
->alignment
= 16;
1023 VkResult
anv_GetImageMemoryRequirements(
1026 VkMemoryRequirements
* pMemoryRequirements
)
1028 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1030 /* The Vulkan spec (git aaed022) says:
1032 * memoryTypeBits is a bitfield and contains one bit set for every
1033 * supported memory type for the resource. The bit `1<<i` is set if and
1034 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1035 * structure for the physical device is supported.
1037 * We support exactly one memory type.
1039 pMemoryRequirements
->memoryTypeBits
= 1;
1041 pMemoryRequirements
->size
= image
->size
;
1042 pMemoryRequirements
->alignment
= image
->alignment
;
1047 VkResult
anv_GetImageSparseMemoryRequirements(
1050 uint32_t* pNumRequirements
,
1051 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1053 return vk_error(VK_UNSUPPORTED
);
1056 VkResult
anv_GetDeviceMemoryCommitment(
1058 VkDeviceMemory memory
,
1059 VkDeviceSize
* pCommittedMemoryInBytes
)
1061 *pCommittedMemoryInBytes
= 0;
1062 stub_return(VK_SUCCESS
);
1065 VkResult
anv_BindBufferMemory(
1068 VkDeviceMemory _mem
,
1069 VkDeviceSize memOffset
)
1071 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1072 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1074 buffer
->bo
= &mem
->bo
;
1075 buffer
->offset
= memOffset
;
1080 VkResult
anv_BindImageMemory(
1083 VkDeviceMemory _mem
,
1084 VkDeviceSize memOffset
)
1086 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1087 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1089 image
->bo
= &mem
->bo
;
1090 image
->offset
= memOffset
;
1095 VkResult
anv_QueueBindSparseBufferMemory(
1098 uint32_t numBindings
,
1099 const VkSparseMemoryBindInfo
* pBindInfo
)
1101 stub_return(VK_UNSUPPORTED
);
1104 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1107 uint32_t numBindings
,
1108 const VkSparseMemoryBindInfo
* pBindInfo
)
1110 stub_return(VK_UNSUPPORTED
);
1113 VkResult
anv_QueueBindSparseImageMemory(
1116 uint32_t numBindings
,
1117 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1119 stub_return(VK_UNSUPPORTED
);
1122 VkResult
anv_CreateFence(
1124 const VkFenceCreateInfo
* pCreateInfo
,
1127 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1128 struct anv_fence
*fence
;
1129 struct anv_batch batch
;
1132 const uint32_t fence_size
= 128;
1134 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1136 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1137 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1139 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1141 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1142 if (result
!= VK_SUCCESS
)
1146 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1147 batch
.next
= batch
.start
= fence
->bo
.map
;
1148 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1149 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
1150 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
1152 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1153 fence
->exec2_objects
[0].relocation_count
= 0;
1154 fence
->exec2_objects
[0].relocs_ptr
= 0;
1155 fence
->exec2_objects
[0].alignment
= 0;
1156 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1157 fence
->exec2_objects
[0].flags
= 0;
1158 fence
->exec2_objects
[0].rsvd1
= 0;
1159 fence
->exec2_objects
[0].rsvd2
= 0;
1161 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1162 fence
->execbuf
.buffer_count
= 1;
1163 fence
->execbuf
.batch_start_offset
= 0;
1164 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1165 fence
->execbuf
.cliprects_ptr
= 0;
1166 fence
->execbuf
.num_cliprects
= 0;
1167 fence
->execbuf
.DR1
= 0;
1168 fence
->execbuf
.DR4
= 0;
1170 fence
->execbuf
.flags
=
1171 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1172 fence
->execbuf
.rsvd1
= device
->context_id
;
1173 fence
->execbuf
.rsvd2
= 0;
1175 *pFence
= anv_fence_to_handle(fence
);
1180 anv_device_free(device
, fence
);
1185 void anv_DestroyFence(
1189 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1190 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1192 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1193 anv_gem_close(device
, fence
->bo
.gem_handle
);
1194 anv_device_free(device
, fence
);
1197 VkResult
anv_ResetFences(
1199 uint32_t fenceCount
,
1200 const VkFence
* pFences
)
1202 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1203 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1204 fence
->ready
= false;
1210 VkResult
anv_GetFenceStatus(
1214 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1215 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1222 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1224 fence
->ready
= true;
1228 return VK_NOT_READY
;
1231 VkResult
anv_WaitForFences(
1233 uint32_t fenceCount
,
1234 const VkFence
* pFences
,
1238 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1239 int64_t t
= timeout
;
1242 /* FIXME: handle !waitAll */
1244 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1245 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1246 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1247 if (ret
== -1 && errno
== ETIME
)
1250 return vk_errorf(VK_ERROR_UNKNOWN
, "gem wait failed: %m");
1256 // Queue semaphore functions
1258 VkResult
anv_CreateSemaphore(
1260 const VkSemaphoreCreateInfo
* pCreateInfo
,
1261 VkSemaphore
* pSemaphore
)
1263 stub_return(VK_UNSUPPORTED
);
1266 void anv_DestroySemaphore(
1268 VkSemaphore semaphore
)
1273 VkResult
anv_QueueSignalSemaphore(
1275 VkSemaphore semaphore
)
1277 stub_return(VK_UNSUPPORTED
);
1280 VkResult
anv_QueueWaitSemaphore(
1282 VkSemaphore semaphore
)
1284 stub_return(VK_UNSUPPORTED
);
1289 VkResult
anv_CreateEvent(
1291 const VkEventCreateInfo
* pCreateInfo
,
1294 stub_return(VK_UNSUPPORTED
);
1297 void anv_DestroyEvent(
1304 VkResult
anv_GetEventStatus(
1308 stub_return(VK_UNSUPPORTED
);
1311 VkResult
anv_SetEvent(
1315 stub_return(VK_UNSUPPORTED
);
1318 VkResult
anv_ResetEvent(
1322 stub_return(VK_UNSUPPORTED
);
1327 VkResult
anv_CreateBuffer(
1329 const VkBufferCreateInfo
* pCreateInfo
,
1332 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1333 struct anv_buffer
*buffer
;
1335 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1337 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1338 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1340 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1342 buffer
->size
= pCreateInfo
->size
;
1346 *pBuffer
= anv_buffer_to_handle(buffer
);
1351 void anv_DestroyBuffer(
1355 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1356 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1358 anv_device_free(device
, buffer
);
1362 anv_fill_buffer_surface_state(struct anv_device
*device
, void *state
,
1363 const struct anv_format
*format
,
1364 uint32_t offset
, uint32_t range
)
1366 switch (device
->info
.gen
) {
1368 gen7_fill_buffer_surface_state(state
, format
, offset
, range
);
1371 gen8_fill_buffer_surface_state(state
, format
, offset
, range
);
1374 unreachable("unsupported gen\n");
1379 anv_buffer_view_create(
1380 struct anv_device
* device
,
1381 const VkBufferViewCreateInfo
* pCreateInfo
,
1382 struct anv_buffer_view
** bview_out
)
1384 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1385 struct anv_buffer_view
*bview
;
1387 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1389 bview
= anv_device_alloc(device
, sizeof(*bview
), 8,
1390 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1392 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1394 *bview
= (struct anv_buffer_view
) {
1396 .offset
= buffer
->offset
+ pCreateInfo
->offset
,
1397 .surface_state
= anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64),
1398 .format
= anv_format_for_vk_format(pCreateInfo
->format
),
1399 .range
= pCreateInfo
->range
,
1407 void anv_DestroyBufferView(
1409 VkBufferView _bview
)
1411 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1412 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1414 anv_state_pool_free(&device
->surface_state_pool
, bview
->surface_state
);
1415 anv_device_free(device
, bview
);
1418 void anv_DestroySampler(
1422 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1423 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1425 anv_device_free(device
, sampler
);
1428 // Descriptor set functions
1430 VkResult
anv_CreateDescriptorSetLayout(
1432 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1433 VkDescriptorSetLayout
* pSetLayout
)
1435 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1436 struct anv_descriptor_set_layout
*set_layout
;
1438 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1440 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1441 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1442 uint32_t num_dynamic_buffers
= 0;
1444 VkShaderStageFlags stages
= 0;
1447 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1448 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1449 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1450 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1451 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1452 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1458 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1459 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1460 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1461 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1462 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1463 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1464 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1465 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1466 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1467 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1468 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1469 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1470 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1476 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1477 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1478 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1479 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1485 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1486 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1489 uint32_t sampler_total
= 0;
1490 uint32_t surface_total
= 0;
1491 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1492 sampler_total
+= sampler_count
[s
];
1493 surface_total
+= surface_count
[s
];
1496 size_t size
= sizeof(*set_layout
) +
1497 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1498 set_layout
= anv_device_alloc(device
, size
, 8,
1499 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1501 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1503 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1504 set_layout
->count
= count
;
1505 set_layout
->shader_stages
= stages
;
1507 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1508 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1509 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1510 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1511 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1512 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1513 p
+= surface_count
[s
];
1514 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1515 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1516 p
+= sampler_count
[s
];
1519 uint32_t descriptor
= 0;
1520 int8_t dynamic_slot
= 0;
1522 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1523 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1524 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1525 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1526 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1527 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1528 sampler
[s
]->index
= descriptor
+ j
;
1529 sampler
[s
]->dynamic_slot
= -1;
1537 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1538 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1539 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1547 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1548 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1549 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1550 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1551 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1552 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1553 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1554 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1555 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1556 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1557 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1558 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1559 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1560 surface
[s
]->index
= descriptor
+ j
;
1562 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1564 surface
[s
]->dynamic_slot
= -1;
1573 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1575 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1578 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1583 void anv_DestroyDescriptorSetLayout(
1585 VkDescriptorSetLayout _set_layout
)
1587 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1588 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1590 anv_device_free(device
, set_layout
);
1593 VkResult
anv_CreateDescriptorPool(
1595 VkDescriptorPoolUsage poolUsage
,
1597 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1598 VkDescriptorPool
* pDescriptorPool
)
1600 anv_finishme("VkDescriptorPool is a stub");
1601 pDescriptorPool
->handle
= 1;
1605 void anv_DestroyDescriptorPool(
1607 VkDescriptorPool _pool
)
1609 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1612 VkResult
anv_ResetDescriptorPool(
1614 VkDescriptorPool descriptorPool
)
1616 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1621 anv_descriptor_set_create(struct anv_device
*device
,
1622 const struct anv_descriptor_set_layout
*layout
,
1623 struct anv_descriptor_set
**out_set
)
1625 struct anv_descriptor_set
*set
;
1626 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1628 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1630 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1632 /* A descriptor set may not be 100% filled. Clear the set so we can can
1633 * later detect holes in it.
1635 memset(set
, 0, size
);
1643 anv_descriptor_set_destroy(struct anv_device
*device
,
1644 struct anv_descriptor_set
*set
)
1646 anv_device_free(device
, set
);
1649 VkResult
anv_AllocDescriptorSets(
1651 VkDescriptorPool descriptorPool
,
1652 VkDescriptorSetUsage setUsage
,
1654 const VkDescriptorSetLayout
* pSetLayouts
,
1655 VkDescriptorSet
* pDescriptorSets
)
1657 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1659 VkResult result
= VK_SUCCESS
;
1660 struct anv_descriptor_set
*set
;
1663 for (i
= 0; i
< count
; i
++) {
1664 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1666 result
= anv_descriptor_set_create(device
, layout
, &set
);
1667 if (result
!= VK_SUCCESS
)
1670 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1673 if (result
!= VK_SUCCESS
)
1674 anv_FreeDescriptorSets(_device
, descriptorPool
, i
, pDescriptorSets
);
1679 VkResult
anv_FreeDescriptorSets(
1681 VkDescriptorPool descriptorPool
,
1683 const VkDescriptorSet
* pDescriptorSets
)
1685 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1687 for (uint32_t i
= 0; i
< count
; i
++) {
1688 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1690 anv_descriptor_set_destroy(device
, set
);
1696 VkResult
anv_UpdateDescriptorSets(
1698 uint32_t writeCount
,
1699 const VkWriteDescriptorSet
* pDescriptorWrites
,
1701 const VkCopyDescriptorSet
* pDescriptorCopies
)
1703 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1704 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1705 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1707 switch (write
->descriptorType
) {
1708 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1709 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1710 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1711 ANV_FROM_HANDLE(anv_sampler
, sampler
,
1712 write
->pDescriptors
[j
].sampler
);
1714 set
->descriptors
[write
->destBinding
+ j
] = (struct anv_descriptor
) {
1715 .type
= ANV_DESCRIPTOR_TYPE_SAMPLER
,
1720 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1725 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1726 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1727 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1728 ANV_FROM_HANDLE(anv_image_view
, iview
,
1729 write
->pDescriptors
[j
].imageView
);
1731 set
->descriptors
[write
->destBinding
+ j
] = (struct anv_descriptor
) {
1732 .type
= ANV_DESCRIPTOR_TYPE_IMAGE_VIEW
,
1733 .image_view
= iview
,
1738 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1739 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1740 anv_finishme("texel buffers not implemented");
1743 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1744 anv_finishme("input attachments not implemented");
1747 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1748 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1749 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1750 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1751 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1752 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1753 write
->pDescriptors
[j
].bufferView
);
1755 set
->descriptors
[write
->destBinding
+ j
] = (struct anv_descriptor
) {
1756 .type
= ANV_DESCRIPTOR_TYPE_BUFFER_VIEW
,
1757 .buffer_view
= bview
,
1766 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1767 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1768 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1769 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1770 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1771 dest
->descriptors
[copy
->destBinding
+ j
] =
1772 src
->descriptors
[copy
->srcBinding
+ j
];
1779 // State object functions
1781 static inline int64_t
1782 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1792 VkResult
anv_CreateDynamicViewportState(
1794 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1795 VkDynamicViewportState
* pState
)
1797 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1798 struct anv_dynamic_vp_state
*state
;
1800 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1802 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1803 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1805 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1807 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1808 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1810 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1812 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1815 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1816 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1817 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1819 /* The gen7 state struct has just the matrix and guardband fields, the
1820 * gen8 struct adds the min/max viewport fields. */
1821 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1822 .ViewportMatrixElementm00
= vp
->width
/ 2,
1823 .ViewportMatrixElementm11
= vp
->height
/ 2,
1824 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1825 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1826 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1827 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1828 .XMinClipGuardband
= -1.0f
,
1829 .XMaxClipGuardband
= 1.0f
,
1830 .YMinClipGuardband
= -1.0f
,
1831 .YMaxClipGuardband
= 1.0f
,
1832 .XMinViewPort
= vp
->originX
,
1833 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1834 .YMinViewPort
= vp
->originY
,
1835 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1838 struct GEN7_CC_VIEWPORT cc_viewport
= {
1839 .MinimumDepth
= vp
->minDepth
,
1840 .MaximumDepth
= vp
->maxDepth
1843 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1844 * ymax < ymin for empty clips. In case clip x, y, width height are all
1845 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1846 * what we want. Just special case empty clips and produce a canonical
1848 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
1849 .ScissorRectangleYMin
= 1,
1850 .ScissorRectangleXMin
= 1,
1851 .ScissorRectangleYMax
= 0,
1852 .ScissorRectangleXMax
= 0
1855 const int max
= 0xffff;
1856 struct GEN7_SCISSOR_RECT scissor
= {
1857 /* Do this math using int64_t so overflow gets clamped correctly. */
1858 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1859 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1860 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1861 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1864 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1865 GEN7_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1867 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1868 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1870 GEN7_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1874 *pState
= anv_dynamic_vp_state_to_handle(state
);
1879 void anv_DestroyDynamicViewportState(
1881 VkDynamicViewportState _vp_state
)
1883 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1884 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
1886 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
1887 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
1888 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
1890 anv_device_free(device
, vp_state
);
1893 void anv_DestroyDynamicRasterState(
1895 VkDynamicRasterState _rs_state
)
1897 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1898 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
1900 anv_device_free(device
, rs_state
);
1903 VkResult
anv_CreateDynamicColorBlendState(
1905 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
1906 VkDynamicColorBlendState
* pState
)
1908 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1909 struct anv_dynamic_cb_state
*state
;
1911 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
1913 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1914 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1916 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1918 struct GEN7_COLOR_CALC_STATE color_calc_state
= {
1919 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
1920 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
1921 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
1922 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
1925 GEN7_COLOR_CALC_STATE_pack(NULL
, state
->color_calc_state
, &color_calc_state
);
1927 *pState
= anv_dynamic_cb_state_to_handle(state
);
1932 void anv_DestroyDynamicColorBlendState(
1934 VkDynamicColorBlendState _cb_state
)
1936 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1937 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
1939 anv_device_free(device
, cb_state
);
1942 void anv_DestroyDynamicDepthStencilState(
1944 VkDynamicDepthStencilState _ds_state
)
1946 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1947 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
1949 anv_device_free(device
, ds_state
);
1952 VkResult
anv_CreateFramebuffer(
1954 const VkFramebufferCreateInfo
* pCreateInfo
,
1955 VkFramebuffer
* pFramebuffer
)
1957 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1958 struct anv_framebuffer
*framebuffer
;
1960 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1962 size_t size
= sizeof(*framebuffer
) +
1963 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
1964 framebuffer
= anv_device_alloc(device
, size
, 8,
1965 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1966 if (framebuffer
== NULL
)
1967 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1969 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1970 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1971 ANV_FROM_HANDLE(anv_attachment_view
, aview
,
1972 pCreateInfo
->pAttachments
[i
].view
);
1974 framebuffer
->attachments
[i
] = aview
;
1977 framebuffer
->width
= pCreateInfo
->width
;
1978 framebuffer
->height
= pCreateInfo
->height
;
1979 framebuffer
->layers
= pCreateInfo
->layers
;
1981 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
1982 &(VkDynamicViewportStateCreateInfo
) {
1983 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
1984 .viewportAndScissorCount
= 1,
1985 .pViewports
= (VkViewport
[]) {
1989 .width
= pCreateInfo
->width
,
1990 .height
= pCreateInfo
->height
,
1995 .pScissors
= (VkRect2D
[]) {
1997 { pCreateInfo
->width
, pCreateInfo
->height
} },
2000 &framebuffer
->vp_state
);
2002 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
2007 void anv_DestroyFramebuffer(
2011 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2012 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
2014 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
2016 anv_device_free(device
, fb
);
2019 VkResult
anv_CreateRenderPass(
2021 const VkRenderPassCreateInfo
* pCreateInfo
,
2022 VkRenderPass
* pRenderPass
)
2024 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2025 struct anv_render_pass
*pass
;
2027 size_t attachments_offset
;
2029 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2031 size
= sizeof(*pass
);
2032 size
+= pCreateInfo
->subpassCount
* sizeof(pass
->subpasses
[0]);
2033 attachments_offset
= size
;
2034 size
+= pCreateInfo
->attachmentCount
* sizeof(pass
->attachments
[0]);
2036 pass
= anv_device_alloc(device
, size
, 8,
2037 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2039 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2041 /* Clear the subpasses along with the parent pass. This required because
2042 * each array member of anv_subpass must be a valid pointer if not NULL.
2044 memset(pass
, 0, size
);
2045 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2046 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2047 pass
->attachments
= (void *) pass
+ attachments_offset
;
2049 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2050 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
2052 att
->format
= anv_format_for_vk_format(pCreateInfo
->pAttachments
[i
].format
);
2053 att
->samples
= pCreateInfo
->pAttachments
[i
].samples
;
2054 att
->load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2055 att
->stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2056 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2057 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2059 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2060 if (anv_format_is_color(att
->format
)) {
2061 ++pass
->num_color_clear_attachments
;
2062 } else if (att
->format
->depth_format
) {
2063 pass
->has_depth_clear_attachment
= true;
2065 } else if (att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
2066 assert(att
->format
->has_stencil
);
2067 pass
->has_stencil_clear_attachment
= true;
2071 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2072 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2073 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2075 subpass
->input_count
= desc
->inputCount
;
2076 subpass
->color_count
= desc
->colorCount
;
2078 if (desc
->inputCount
> 0) {
2079 subpass
->input_attachments
=
2080 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2081 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2083 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2084 subpass
->input_attachments
[j
]
2085 = desc
->pInputAttachments
[j
].attachment
;
2089 if (desc
->colorCount
> 0) {
2090 subpass
->color_attachments
=
2091 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2092 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2094 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2095 subpass
->color_attachments
[j
]
2096 = desc
->pColorAttachments
[j
].attachment
;
2100 if (desc
->pResolveAttachments
) {
2101 subpass
->resolve_attachments
=
2102 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2103 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2105 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2106 subpass
->resolve_attachments
[j
]
2107 = desc
->pResolveAttachments
[j
].attachment
;
2111 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2114 *pRenderPass
= anv_render_pass_to_handle(pass
);
2119 void anv_DestroyRenderPass(
2123 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2124 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2126 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2127 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2128 * Don't free the null arrays.
2130 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2132 anv_device_free(device
, subpass
->input_attachments
);
2133 anv_device_free(device
, subpass
->color_attachments
);
2134 anv_device_free(device
, subpass
->resolve_attachments
);
2137 anv_device_free(device
, pass
);
2140 VkResult
anv_GetRenderAreaGranularity(
2142 VkRenderPass renderPass
,
2143 VkExtent2D
* pGranularity
)
2145 *pGranularity
= (VkExtent2D
) { 1, 1 };
2150 void vkCmdDbgMarkerBegin(
2151 VkCmdBuffer cmdBuffer
,
2152 const char* pMarker
)
2153 __attribute__ ((visibility ("default")));
2155 void vkCmdDbgMarkerEnd(
2156 VkCmdBuffer cmdBuffer
)
2157 __attribute__ ((visibility ("default")));
2159 void vkCmdDbgMarkerBegin(
2160 VkCmdBuffer cmdBuffer
,
2161 const char* pMarker
)
2165 void vkCmdDbgMarkerEnd(
2166 VkCmdBuffer cmdBuffer
)