2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
33 #include "anv_private.h"
36 * Descriptor set layouts.
39 static enum anv_descriptor_data
40 anv_descriptor_data_for_type(const struct anv_physical_device
*device
,
41 VkDescriptorType type
)
43 enum anv_descriptor_data data
= 0;
46 case VK_DESCRIPTOR_TYPE_SAMPLER
:
47 data
= ANV_DESCRIPTOR_SAMPLER_STATE
;
50 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
51 data
= ANV_DESCRIPTOR_SURFACE_STATE
|
52 ANV_DESCRIPTOR_SAMPLER_STATE
;
55 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
56 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
57 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
58 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
61 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
62 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
63 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
64 if (device
->info
.gen
< 9)
65 data
|= ANV_DESCRIPTOR_IMAGE_PARAM
;
68 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
69 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
70 data
= ANV_DESCRIPTOR_SURFACE_STATE
|
71 ANV_DESCRIPTOR_BUFFER_VIEW
;
74 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
75 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
76 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
79 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
:
80 data
= ANV_DESCRIPTOR_INLINE_UNIFORM
;
84 unreachable("Unsupported descriptor type");
87 /* On gen8 and above when we have softpin enabled, we also need to push
88 * SSBO address ranges so that we can use A64 messages in the shader.
90 if (device
->has_a64_buffer_access
&&
91 (type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
||
92 type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
))
93 data
|= ANV_DESCRIPTOR_ADDRESS_RANGE
;
99 anv_descriptor_data_size(enum anv_descriptor_data data
)
103 if (data
& ANV_DESCRIPTOR_IMAGE_PARAM
)
104 size
+= BRW_IMAGE_PARAM_SIZE
* 4;
106 if (data
& ANV_DESCRIPTOR_ADDRESS_RANGE
)
107 size
+= sizeof(struct anv_address_range_descriptor
);
112 /** Returns the size in bytes of each descriptor with the given layout */
114 anv_descriptor_size(const struct anv_descriptor_set_binding_layout
*layout
)
116 if (layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
117 assert(layout
->data
== ANV_DESCRIPTOR_INLINE_UNIFORM
);
118 return layout
->array_size
;
121 return anv_descriptor_data_size(layout
->data
);
124 /** Returns the size in bytes of each descriptor of the given type
126 * This version of the function does not have access to the entire layout so
127 * it may only work on certain descriptor types where the descriptor size is
128 * entirely determined by the descriptor type. Whenever possible, code should
129 * use anv_descriptor_size() instead.
132 anv_descriptor_type_size(const struct anv_physical_device
*pdevice
,
133 VkDescriptorType type
)
135 assert(type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
);
136 return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice
, type
));
140 anv_descriptor_data_supports_bindless(const struct anv_physical_device
*pdevice
,
141 enum anv_descriptor_data data
,
144 if (data
& ANV_DESCRIPTOR_ADDRESS_RANGE
) {
145 assert(pdevice
->has_a64_buffer_access
);
153 anv_descriptor_supports_bindless(const struct anv_physical_device
*pdevice
,
154 const struct anv_descriptor_set_binding_layout
*binding
,
157 return anv_descriptor_data_supports_bindless(pdevice
, binding
->data
,
162 anv_descriptor_requires_bindless(const struct anv_physical_device
*pdevice
,
163 const struct anv_descriptor_set_binding_layout
*binding
,
166 if (pdevice
->always_use_bindless
)
167 return anv_descriptor_supports_bindless(pdevice
, binding
, sampler
);
172 void anv_GetDescriptorSetLayoutSupport(
174 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
175 VkDescriptorSetLayoutSupport
* pSupport
)
177 ANV_FROM_HANDLE(anv_device
, device
, _device
);
178 const struct anv_physical_device
*pdevice
=
179 &device
->instance
->physicalDevice
;
181 uint32_t surface_count
[MESA_SHADER_STAGES
] = { 0, };
183 for (uint32_t b
= 0; b
< pCreateInfo
->bindingCount
; b
++) {
184 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[b
];
186 enum anv_descriptor_data desc_data
=
187 anv_descriptor_data_for_type(pdevice
, binding
->descriptorType
);
189 switch (binding
->descriptorType
) {
190 case VK_DESCRIPTOR_TYPE_SAMPLER
:
191 /* There is no real limit on samplers */
194 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
195 if (anv_descriptor_data_supports_bindless(pdevice
, desc_data
, false))
198 if (binding
->pImmutableSamplers
) {
199 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++) {
200 ANV_FROM_HANDLE(anv_sampler
, sampler
,
201 binding
->pImmutableSamplers
[i
]);
202 anv_foreach_stage(s
, binding
->stageFlags
)
203 surface_count
[s
] += sampler
->n_planes
;
206 anv_foreach_stage(s
, binding
->stageFlags
)
207 surface_count
[s
] += binding
->descriptorCount
;
212 if (anv_descriptor_data_supports_bindless(pdevice
, desc_data
, false))
215 anv_foreach_stage(s
, binding
->stageFlags
)
216 surface_count
[s
] += binding
->descriptorCount
;
221 bool supported
= true;
222 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
223 /* Our maximum binding table size is 240 and we need to reserve 8 for
226 if (surface_count
[s
] >= MAX_BINDING_TABLE_SIZE
- MAX_RTS
)
230 pSupport
->supported
= supported
;
233 VkResult
anv_CreateDescriptorSetLayout(
235 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
236 const VkAllocationCallbacks
* pAllocator
,
237 VkDescriptorSetLayout
* pSetLayout
)
239 ANV_FROM_HANDLE(anv_device
, device
, _device
);
241 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
243 uint32_t max_binding
= 0;
244 uint32_t immutable_sampler_count
= 0;
245 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
246 max_binding
= MAX2(max_binding
, pCreateInfo
->pBindings
[j
].binding
);
248 /* From the Vulkan 1.1.97 spec for VkDescriptorSetLayoutBinding:
250 * "If descriptorType specifies a VK_DESCRIPTOR_TYPE_SAMPLER or
251 * VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER type descriptor, then
252 * pImmutableSamplers can be used to initialize a set of immutable
253 * samplers. [...] If descriptorType is not one of these descriptor
254 * types, then pImmutableSamplers is ignored.
256 * We need to be careful here and only parse pImmutableSamplers if we
257 * have one of the right descriptor types.
259 VkDescriptorType desc_type
= pCreateInfo
->pBindings
[j
].descriptorType
;
260 if ((desc_type
== VK_DESCRIPTOR_TYPE_SAMPLER
||
261 desc_type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) &&
262 pCreateInfo
->pBindings
[j
].pImmutableSamplers
)
263 immutable_sampler_count
+= pCreateInfo
->pBindings
[j
].descriptorCount
;
266 struct anv_descriptor_set_layout
*set_layout
;
267 struct anv_descriptor_set_binding_layout
*bindings
;
268 struct anv_sampler
**samplers
;
270 /* We need to allocate decriptor set layouts off the device allocator
271 * with DEVICE scope because they are reference counted and may not be
272 * destroyed when vkDestroyDescriptorSetLayout is called.
275 anv_multialloc_add(&ma
, &set_layout
, 1);
276 anv_multialloc_add(&ma
, &bindings
, max_binding
+ 1);
277 anv_multialloc_add(&ma
, &samplers
, immutable_sampler_count
);
279 if (!anv_multialloc_alloc(&ma
, &device
->alloc
,
280 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
))
281 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
283 memset(set_layout
, 0, sizeof(*set_layout
));
284 set_layout
->ref_cnt
= 1;
285 set_layout
->binding_count
= max_binding
+ 1;
287 for (uint32_t b
= 0; b
<= max_binding
; b
++) {
288 /* Initialize all binding_layout entries to -1 */
289 memset(&set_layout
->binding
[b
], -1, sizeof(set_layout
->binding
[b
]));
291 set_layout
->binding
[b
].data
= 0;
292 set_layout
->binding
[b
].max_plane_count
= 0;
293 set_layout
->binding
[b
].array_size
= 0;
294 set_layout
->binding
[b
].immutable_samplers
= NULL
;
297 /* Initialize all samplers to 0 */
298 memset(samplers
, 0, immutable_sampler_count
* sizeof(*samplers
));
300 uint32_t buffer_view_count
= 0;
301 uint32_t dynamic_offset_count
= 0;
302 uint32_t descriptor_buffer_size
= 0;
304 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
305 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[j
];
306 uint32_t b
= binding
->binding
;
307 /* We temporarily store the pointer to the binding in the
308 * immutable_samplers pointer. This provides us with a quick-and-dirty
309 * way to sort the bindings by binding number.
311 set_layout
->binding
[b
].immutable_samplers
= (void *)binding
;
314 for (uint32_t b
= 0; b
<= max_binding
; b
++) {
315 const VkDescriptorSetLayoutBinding
*binding
=
316 (void *)set_layout
->binding
[b
].immutable_samplers
;
321 /* We temporarily stashed the pointer to the binding in the
322 * immutable_samplers pointer. Now that we've pulled it back out
323 * again, we reset immutable_samplers to NULL.
325 set_layout
->binding
[b
].immutable_samplers
= NULL
;
327 if (binding
->descriptorCount
== 0)
331 set_layout
->binding
[b
].type
= binding
->descriptorType
;
333 set_layout
->binding
[b
].data
=
334 anv_descriptor_data_for_type(&device
->instance
->physicalDevice
,
335 binding
->descriptorType
);
336 set_layout
->binding
[b
].array_size
= binding
->descriptorCount
;
337 set_layout
->binding
[b
].descriptor_index
= set_layout
->size
;
338 set_layout
->size
+= binding
->descriptorCount
;
340 if (set_layout
->binding
[b
].data
& ANV_DESCRIPTOR_BUFFER_VIEW
) {
341 set_layout
->binding
[b
].buffer_view_index
= buffer_view_count
;
342 buffer_view_count
+= binding
->descriptorCount
;
345 switch (binding
->descriptorType
) {
346 case VK_DESCRIPTOR_TYPE_SAMPLER
:
347 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
348 set_layout
->binding
[b
].max_plane_count
= 1;
349 if (binding
->pImmutableSamplers
) {
350 set_layout
->binding
[b
].immutable_samplers
= samplers
;
351 samplers
+= binding
->descriptorCount
;
353 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++) {
354 ANV_FROM_HANDLE(anv_sampler
, sampler
,
355 binding
->pImmutableSamplers
[i
]);
357 set_layout
->binding
[b
].immutable_samplers
[i
] = sampler
;
358 if (set_layout
->binding
[b
].max_plane_count
< sampler
->n_planes
)
359 set_layout
->binding
[b
].max_plane_count
= sampler
->n_planes
;
364 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
365 set_layout
->binding
[b
].max_plane_count
= 1;
372 switch (binding
->descriptorType
) {
373 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
374 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
375 set_layout
->binding
[b
].dynamic_offset_index
= dynamic_offset_count
;
376 dynamic_offset_count
+= binding
->descriptorCount
;
383 if (binding
->descriptorType
==
384 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
385 /* Inline uniform blocks are specified to use the descriptor array
386 * size as the size in bytes of the block.
388 descriptor_buffer_size
= align_u32(descriptor_buffer_size
, 32);
389 set_layout
->binding
[b
].descriptor_offset
= descriptor_buffer_size
;
390 descriptor_buffer_size
+= binding
->descriptorCount
;
392 set_layout
->binding
[b
].descriptor_offset
= descriptor_buffer_size
;
393 descriptor_buffer_size
+= anv_descriptor_size(&set_layout
->binding
[b
]) *
394 binding
->descriptorCount
;
397 set_layout
->shader_stages
|= binding
->stageFlags
;
400 set_layout
->buffer_view_count
= buffer_view_count
;
401 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
402 set_layout
->descriptor_buffer_size
= descriptor_buffer_size
;
404 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
409 void anv_DestroyDescriptorSetLayout(
411 VkDescriptorSetLayout _set_layout
,
412 const VkAllocationCallbacks
* pAllocator
)
414 ANV_FROM_HANDLE(anv_device
, device
, _device
);
415 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
420 anv_descriptor_set_layout_unref(device
, set_layout
);
423 #define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
426 sha1_update_immutable_sampler(struct mesa_sha1
*ctx
,
427 const struct anv_sampler
*sampler
)
429 if (!sampler
->conversion
)
432 /* The only thing that affects the shader is ycbcr conversion */
433 _mesa_sha1_update(ctx
, sampler
->conversion
,
434 sizeof(*sampler
->conversion
));
438 sha1_update_descriptor_set_binding_layout(struct mesa_sha1
*ctx
,
439 const struct anv_descriptor_set_binding_layout
*layout
)
441 SHA1_UPDATE_VALUE(ctx
, layout
->data
);
442 SHA1_UPDATE_VALUE(ctx
, layout
->max_plane_count
);
443 SHA1_UPDATE_VALUE(ctx
, layout
->array_size
);
444 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_index
);
445 SHA1_UPDATE_VALUE(ctx
, layout
->dynamic_offset_index
);
446 SHA1_UPDATE_VALUE(ctx
, layout
->buffer_view_index
);
447 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_offset
);
449 if (layout
->immutable_samplers
) {
450 for (uint16_t i
= 0; i
< layout
->array_size
; i
++)
451 sha1_update_immutable_sampler(ctx
, layout
->immutable_samplers
[i
]);
456 sha1_update_descriptor_set_layout(struct mesa_sha1
*ctx
,
457 const struct anv_descriptor_set_layout
*layout
)
459 SHA1_UPDATE_VALUE(ctx
, layout
->binding_count
);
460 SHA1_UPDATE_VALUE(ctx
, layout
->size
);
461 SHA1_UPDATE_VALUE(ctx
, layout
->shader_stages
);
462 SHA1_UPDATE_VALUE(ctx
, layout
->buffer_view_count
);
463 SHA1_UPDATE_VALUE(ctx
, layout
->dynamic_offset_count
);
464 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_buffer_size
);
466 for (uint16_t i
= 0; i
< layout
->binding_count
; i
++)
467 sha1_update_descriptor_set_binding_layout(ctx
, &layout
->binding
[i
]);
471 * Pipeline layouts. These have nothing to do with the pipeline. They are
472 * just multiple descriptor set layouts pasted together
475 VkResult
anv_CreatePipelineLayout(
477 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
478 const VkAllocationCallbacks
* pAllocator
,
479 VkPipelineLayout
* pPipelineLayout
)
481 ANV_FROM_HANDLE(anv_device
, device
, _device
);
482 struct anv_pipeline_layout
*layout
;
484 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
486 layout
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*layout
), 8,
487 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
489 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
491 layout
->num_sets
= pCreateInfo
->setLayoutCount
;
493 unsigned dynamic_offset_count
= 0;
495 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
496 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
,
497 pCreateInfo
->pSetLayouts
[set
]);
498 layout
->set
[set
].layout
= set_layout
;
499 anv_descriptor_set_layout_ref(set_layout
);
501 layout
->set
[set
].dynamic_offset_start
= dynamic_offset_count
;
502 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
503 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
506 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
;
510 struct mesa_sha1 ctx
;
511 _mesa_sha1_init(&ctx
);
512 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
513 sha1_update_descriptor_set_layout(&ctx
, layout
->set
[s
].layout
);
514 _mesa_sha1_update(&ctx
, &layout
->set
[s
].dynamic_offset_start
,
515 sizeof(layout
->set
[s
].dynamic_offset_start
));
517 _mesa_sha1_update(&ctx
, &layout
->num_sets
, sizeof(layout
->num_sets
));
518 _mesa_sha1_final(&ctx
, layout
->sha1
);
520 *pPipelineLayout
= anv_pipeline_layout_to_handle(layout
);
525 void anv_DestroyPipelineLayout(
527 VkPipelineLayout _pipelineLayout
,
528 const VkAllocationCallbacks
* pAllocator
)
530 ANV_FROM_HANDLE(anv_device
, device
, _device
);
531 ANV_FROM_HANDLE(anv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
533 if (!pipeline_layout
)
536 for (uint32_t i
= 0; i
< pipeline_layout
->num_sets
; i
++)
537 anv_descriptor_set_layout_unref(device
, pipeline_layout
->set
[i
].layout
);
539 vk_free2(&device
->alloc
, pAllocator
, pipeline_layout
);
545 * These are implemented using a big pool of memory and a free-list for the
546 * host memory allocations and a state_stream and a free list for the buffer
547 * view surface state. The spec allows us to fail to allocate due to
548 * fragmentation in all cases but two: 1) after pool reset, allocating up
549 * until the pool size with no freeing must succeed and 2) allocating and
550 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
551 * and the free lists lets us recycle blocks for case 2).
554 /* The vma heap reserves 0 to mean NULL; we have to offset by some ammount to
555 * ensure we can allocate the entire BO without hitting zero. The actual
556 * amount doesn't matter.
558 #define POOL_HEAP_OFFSET 64
562 VkResult
anv_CreateDescriptorPool(
564 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
565 const VkAllocationCallbacks
* pAllocator
,
566 VkDescriptorPool
* pDescriptorPool
)
568 ANV_FROM_HANDLE(anv_device
, device
, _device
);
569 struct anv_descriptor_pool
*pool
;
571 const VkDescriptorPoolInlineUniformBlockCreateInfoEXT
*inline_info
=
572 vk_find_struct_const(pCreateInfo
->pNext
,
573 DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT
);
575 uint32_t descriptor_count
= 0;
576 uint32_t buffer_view_count
= 0;
577 uint32_t descriptor_bo_size
= 0;
578 for (uint32_t i
= 0; i
< pCreateInfo
->poolSizeCount
; i
++) {
579 enum anv_descriptor_data desc_data
=
580 anv_descriptor_data_for_type(&device
->instance
->physicalDevice
,
581 pCreateInfo
->pPoolSizes
[i
].type
);
583 if (desc_data
& ANV_DESCRIPTOR_BUFFER_VIEW
)
584 buffer_view_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
586 unsigned desc_data_size
= anv_descriptor_data_size(desc_data
) *
587 pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
589 if (pCreateInfo
->pPoolSizes
[i
].type
==
590 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
591 /* Inline uniform blocks are specified to use the descriptor array
592 * size as the size in bytes of the block.
595 desc_data_size
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
598 descriptor_bo_size
+= desc_data_size
;
600 descriptor_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
602 /* We have to align descriptor buffer allocations to 32B so that we can
603 * push descriptor buffers. This means that each descriptor buffer
604 * allocated may burn up to 32B of extra space to get the right alignment.
605 * (Technically, it's at most 28B because we're always going to start at
606 * least 4B aligned but we're being conservative here.) Allocate enough
607 * extra space that we can chop it into maxSets pieces and align each one
610 descriptor_bo_size
+= 32 * pCreateInfo
->maxSets
;
611 descriptor_bo_size
= ALIGN(descriptor_bo_size
, 4096);
612 /* We align inline uniform blocks to 32B */
614 descriptor_bo_size
+= 32 * inline_info
->maxInlineUniformBlockBindings
;
616 const size_t pool_size
=
617 pCreateInfo
->maxSets
* sizeof(struct anv_descriptor_set
) +
618 descriptor_count
* sizeof(struct anv_descriptor
) +
619 buffer_view_count
* sizeof(struct anv_buffer_view
);
620 const size_t total_size
= sizeof(*pool
) + pool_size
;
622 pool
= vk_alloc2(&device
->alloc
, pAllocator
, total_size
, 8,
623 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
625 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
627 pool
->size
= pool_size
;
629 pool
->free_list
= EMPTY
;
631 if (descriptor_bo_size
> 0) {
632 VkResult result
= anv_bo_init_new(&pool
->bo
, device
, descriptor_bo_size
);
633 if (result
!= VK_SUCCESS
) {
634 vk_free2(&device
->alloc
, pAllocator
, pool
);
638 anv_gem_set_caching(device
, pool
->bo
.gem_handle
, I915_CACHING_CACHED
);
640 pool
->bo
.map
= anv_gem_mmap(device
, pool
->bo
.gem_handle
, 0,
641 descriptor_bo_size
, 0);
642 if (pool
->bo
.map
== NULL
) {
643 anv_gem_close(device
, pool
->bo
.gem_handle
);
644 vk_free2(&device
->alloc
, pAllocator
, pool
);
645 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
648 if (device
->instance
->physicalDevice
.use_softpin
) {
649 pool
->bo
.flags
|= EXEC_OBJECT_PINNED
;
650 anv_vma_alloc(device
, &pool
->bo
);
653 util_vma_heap_init(&pool
->bo_heap
, POOL_HEAP_OFFSET
, descriptor_bo_size
);
658 anv_state_stream_init(&pool
->surface_state_stream
,
659 &device
->surface_state_pool
, 4096);
660 pool
->surface_state_free_list
= NULL
;
662 list_inithead(&pool
->desc_sets
);
664 *pDescriptorPool
= anv_descriptor_pool_to_handle(pool
);
669 void anv_DestroyDescriptorPool(
671 VkDescriptorPool _pool
,
672 const VkAllocationCallbacks
* pAllocator
)
674 ANV_FROM_HANDLE(anv_device
, device
, _device
);
675 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, _pool
);
681 anv_gem_munmap(pool
->bo
.map
, pool
->bo
.size
);
682 anv_vma_free(device
, &pool
->bo
);
683 anv_gem_close(device
, pool
->bo
.gem_handle
);
685 anv_state_stream_finish(&pool
->surface_state_stream
);
687 list_for_each_entry_safe(struct anv_descriptor_set
, set
,
688 &pool
->desc_sets
, pool_link
) {
689 anv_descriptor_set_destroy(device
, pool
, set
);
692 util_vma_heap_finish(&pool
->bo_heap
);
694 vk_free2(&device
->alloc
, pAllocator
, pool
);
697 VkResult
anv_ResetDescriptorPool(
699 VkDescriptorPool descriptorPool
,
700 VkDescriptorPoolResetFlags flags
)
702 ANV_FROM_HANDLE(anv_device
, device
, _device
);
703 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, descriptorPool
);
705 list_for_each_entry_safe(struct anv_descriptor_set
, set
,
706 &pool
->desc_sets
, pool_link
) {
707 anv_descriptor_set_destroy(device
, pool
, set
);
711 pool
->free_list
= EMPTY
;
714 util_vma_heap_finish(&pool
->bo_heap
);
715 util_vma_heap_init(&pool
->bo_heap
, POOL_HEAP_OFFSET
, pool
->bo
.size
);
718 anv_state_stream_finish(&pool
->surface_state_stream
);
719 anv_state_stream_init(&pool
->surface_state_stream
,
720 &device
->surface_state_pool
, 4096);
721 pool
->surface_state_free_list
= NULL
;
726 struct pool_free_list_entry
{
732 anv_descriptor_pool_alloc_set(struct anv_descriptor_pool
*pool
,
734 struct anv_descriptor_set
**set
)
736 if (size
<= pool
->size
- pool
->next
) {
737 *set
= (struct anv_descriptor_set
*) (pool
->data
+ pool
->next
);
741 struct pool_free_list_entry
*entry
;
742 uint32_t *link
= &pool
->free_list
;
743 for (uint32_t f
= pool
->free_list
; f
!= EMPTY
; f
= entry
->next
) {
744 entry
= (struct pool_free_list_entry
*) (pool
->data
+ f
);
745 if (size
<= entry
->size
) {
747 *set
= (struct anv_descriptor_set
*) entry
;
753 if (pool
->free_list
!= EMPTY
) {
754 return vk_error(VK_ERROR_FRAGMENTED_POOL
);
756 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY
);
762 anv_descriptor_pool_free_set(struct anv_descriptor_pool
*pool
,
763 struct anv_descriptor_set
*set
)
765 /* Put the descriptor set allocation back on the free list. */
766 const uint32_t index
= (char *) set
- pool
->data
;
767 if (index
+ set
->size
== pool
->next
) {
770 struct pool_free_list_entry
*entry
= (struct pool_free_list_entry
*) set
;
771 entry
->next
= pool
->free_list
;
772 entry
->size
= set
->size
;
773 pool
->free_list
= (char *) entry
- pool
->data
;
776 list_del(&set
->pool_link
);
779 struct surface_state_free_list_entry
{
781 struct anv_state state
;
784 static struct anv_state
785 anv_descriptor_pool_alloc_state(struct anv_descriptor_pool
*pool
)
787 struct surface_state_free_list_entry
*entry
=
788 pool
->surface_state_free_list
;
791 struct anv_state state
= entry
->state
;
792 pool
->surface_state_free_list
= entry
->next
;
793 assert(state
.alloc_size
== 64);
796 return anv_state_stream_alloc(&pool
->surface_state_stream
, 64, 64);
801 anv_descriptor_pool_free_state(struct anv_descriptor_pool
*pool
,
802 struct anv_state state
)
804 /* Put the buffer view surface state back on the free list. */
805 struct surface_state_free_list_entry
*entry
= state
.map
;
806 entry
->next
= pool
->surface_state_free_list
;
807 entry
->state
= state
;
808 pool
->surface_state_free_list
= entry
;
812 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
)
815 sizeof(struct anv_descriptor_set
) +
816 layout
->size
* sizeof(struct anv_descriptor
) +
817 layout
->buffer_view_count
* sizeof(struct anv_buffer_view
);
821 anv_descriptor_set_create(struct anv_device
*device
,
822 struct anv_descriptor_pool
*pool
,
823 struct anv_descriptor_set_layout
*layout
,
824 struct anv_descriptor_set
**out_set
)
826 struct anv_descriptor_set
*set
;
827 const size_t size
= anv_descriptor_set_layout_size(layout
);
829 VkResult result
= anv_descriptor_pool_alloc_set(pool
, size
, &set
);
830 if (result
!= VK_SUCCESS
)
833 if (layout
->descriptor_buffer_size
) {
834 /* Align the size to 32 so that alignment gaps don't cause extra holes
835 * in the heap which can lead to bad performance.
837 uint64_t pool_vma_offset
=
838 util_vma_heap_alloc(&pool
->bo_heap
,
839 ALIGN(layout
->descriptor_buffer_size
, 32), 32);
840 if (pool_vma_offset
== 0) {
841 anv_descriptor_pool_free_set(pool
, set
);
842 return vk_error(VK_ERROR_FRAGMENTED_POOL
);
844 assert(pool_vma_offset
>= POOL_HEAP_OFFSET
&&
845 pool_vma_offset
- POOL_HEAP_OFFSET
<= INT32_MAX
);
846 set
->desc_mem
.offset
= pool_vma_offset
- POOL_HEAP_OFFSET
;
847 set
->desc_mem
.alloc_size
= layout
->descriptor_buffer_size
;
848 set
->desc_mem
.map
= pool
->bo
.map
+ set
->desc_mem
.offset
;
850 set
->desc_surface_state
= anv_descriptor_pool_alloc_state(pool
);
851 anv_fill_buffer_surface_state(device
, set
->desc_surface_state
,
852 ISL_FORMAT_R32G32B32A32_FLOAT
,
853 (struct anv_address
) {
855 .offset
= set
->desc_mem
.offset
,
857 layout
->descriptor_buffer_size
, 1);
859 set
->desc_mem
= ANV_STATE_NULL
;
860 set
->desc_surface_state
= ANV_STATE_NULL
;
864 set
->layout
= layout
;
865 anv_descriptor_set_layout_ref(layout
);
869 (struct anv_buffer_view
*) &set
->descriptors
[layout
->size
];
870 set
->buffer_view_count
= layout
->buffer_view_count
;
872 /* By defining the descriptors to be zero now, we can later verify that
873 * a descriptor has not been populated with user data.
875 memset(set
->descriptors
, 0, sizeof(struct anv_descriptor
) * layout
->size
);
877 /* Go through and fill out immutable samplers if we have any */
878 struct anv_descriptor
*desc
= set
->descriptors
;
879 for (uint32_t b
= 0; b
< layout
->binding_count
; b
++) {
880 if (layout
->binding
[b
].immutable_samplers
) {
881 for (uint32_t i
= 0; i
< layout
->binding
[b
].array_size
; i
++) {
882 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
883 * UpdateDescriptorSets if needed. However, if the descriptor
884 * set has an immutable sampler, UpdateDescriptorSets may never
885 * touch it, so we need to make sure it's 100% valid now.
887 desc
[i
] = (struct anv_descriptor
) {
888 .type
= VK_DESCRIPTOR_TYPE_SAMPLER
,
889 .sampler
= layout
->binding
[b
].immutable_samplers
[i
],
893 desc
+= layout
->binding
[b
].array_size
;
896 /* Allocate surface state for the buffer views. */
897 for (uint32_t b
= 0; b
< layout
->buffer_view_count
; b
++) {
898 set
->buffer_views
[b
].surface_state
=
899 anv_descriptor_pool_alloc_state(pool
);
908 anv_descriptor_set_destroy(struct anv_device
*device
,
909 struct anv_descriptor_pool
*pool
,
910 struct anv_descriptor_set
*set
)
912 anv_descriptor_set_layout_unref(device
, set
->layout
);
914 if (set
->desc_mem
.alloc_size
) {
915 util_vma_heap_free(&pool
->bo_heap
,
916 (uint64_t)set
->desc_mem
.offset
+ POOL_HEAP_OFFSET
,
917 set
->desc_mem
.alloc_size
);
918 anv_descriptor_pool_free_state(pool
, set
->desc_surface_state
);
921 for (uint32_t b
= 0; b
< set
->buffer_view_count
; b
++)
922 anv_descriptor_pool_free_state(pool
, set
->buffer_views
[b
].surface_state
);
924 anv_descriptor_pool_free_set(pool
, set
);
927 VkResult
anv_AllocateDescriptorSets(
929 const VkDescriptorSetAllocateInfo
* pAllocateInfo
,
930 VkDescriptorSet
* pDescriptorSets
)
932 ANV_FROM_HANDLE(anv_device
, device
, _device
);
933 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, pAllocateInfo
->descriptorPool
);
935 VkResult result
= VK_SUCCESS
;
936 struct anv_descriptor_set
*set
;
939 for (i
= 0; i
< pAllocateInfo
->descriptorSetCount
; i
++) {
940 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
,
941 pAllocateInfo
->pSetLayouts
[i
]);
943 result
= anv_descriptor_set_create(device
, pool
, layout
, &set
);
944 if (result
!= VK_SUCCESS
)
947 list_addtail(&set
->pool_link
, &pool
->desc_sets
);
949 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
952 if (result
!= VK_SUCCESS
)
953 anv_FreeDescriptorSets(_device
, pAllocateInfo
->descriptorPool
,
959 VkResult
anv_FreeDescriptorSets(
961 VkDescriptorPool descriptorPool
,
963 const VkDescriptorSet
* pDescriptorSets
)
965 ANV_FROM_HANDLE(anv_device
, device
, _device
);
966 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, descriptorPool
);
968 for (uint32_t i
= 0; i
< count
; i
++) {
969 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
974 anv_descriptor_set_destroy(device
, pool
, set
);
981 anv_descriptor_set_write_image_param(uint32_t *param_desc_map
,
982 const struct brw_image_param
*param
)
984 #define WRITE_PARAM_FIELD(field, FIELD) \
985 for (unsigned i = 0; i < ARRAY_SIZE(param->field); i++) \
986 param_desc_map[BRW_IMAGE_PARAM_##FIELD##_OFFSET + i] = param->field[i]
988 WRITE_PARAM_FIELD(offset
, OFFSET
);
989 WRITE_PARAM_FIELD(size
, SIZE
);
990 WRITE_PARAM_FIELD(stride
, STRIDE
);
991 WRITE_PARAM_FIELD(tiling
, TILING
);
992 WRITE_PARAM_FIELD(swizzling
, SWIZZLING
);
993 WRITE_PARAM_FIELD(size
, SIZE
);
995 #undef WRITE_PARAM_FIELD
999 anv_descriptor_set_write_image_view(struct anv_device
*device
,
1000 struct anv_descriptor_set
*set
,
1001 const VkDescriptorImageInfo
* const info
,
1002 VkDescriptorType type
,
1006 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1007 &set
->layout
->binding
[binding
];
1008 struct anv_descriptor
*desc
=
1009 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
1010 struct anv_image_view
*image_view
= NULL
;
1011 struct anv_sampler
*sampler
= NULL
;
1013 assert(type
== bind_layout
->type
);
1016 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1017 sampler
= anv_sampler_from_handle(info
->sampler
);
1020 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1021 image_view
= anv_image_view_from_handle(info
->imageView
);
1022 sampler
= anv_sampler_from_handle(info
->sampler
);
1025 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1026 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1027 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1028 image_view
= anv_image_view_from_handle(info
->imageView
);
1032 unreachable("invalid descriptor type");
1035 /* If this descriptor has an immutable sampler, we don't want to stomp on
1038 sampler
= bind_layout
->immutable_samplers
?
1039 bind_layout
->immutable_samplers
[element
] :
1042 *desc
= (struct anv_descriptor
) {
1044 .layout
= info
->imageLayout
,
1045 .image_view
= image_view
,
1049 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
+
1050 element
* anv_descriptor_size(bind_layout
);
1052 if (bind_layout
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
) {
1053 /* Storage images can only ever have one plane */
1054 assert(image_view
->n_planes
== 1);
1055 const struct brw_image_param
*image_param
=
1056 &image_view
->planes
[0].storage_image_param
;
1058 anv_descriptor_set_write_image_param(desc_map
, image_param
);
1063 anv_descriptor_set_write_buffer_view(struct anv_device
*device
,
1064 struct anv_descriptor_set
*set
,
1065 VkDescriptorType type
,
1066 struct anv_buffer_view
*buffer_view
,
1070 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1071 &set
->layout
->binding
[binding
];
1072 struct anv_descriptor
*desc
=
1073 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
1075 assert(type
== bind_layout
->type
);
1077 *desc
= (struct anv_descriptor
) {
1079 .buffer_view
= buffer_view
,
1082 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
+
1083 element
* anv_descriptor_size(bind_layout
);
1085 if (bind_layout
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
) {
1086 anv_descriptor_set_write_image_param(desc_map
,
1087 &buffer_view
->storage_image_param
);
1092 anv_descriptor_set_write_buffer(struct anv_device
*device
,
1093 struct anv_descriptor_set
*set
,
1094 struct anv_state_stream
*alloc_stream
,
1095 VkDescriptorType type
,
1096 struct anv_buffer
*buffer
,
1099 VkDeviceSize offset
,
1102 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1103 &set
->layout
->binding
[binding
];
1104 struct anv_descriptor
*desc
=
1105 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
1107 assert(type
== bind_layout
->type
);
1109 struct anv_address bind_addr
= anv_address_add(buffer
->address
, offset
);
1110 uint64_t bind_range
= anv_buffer_get_range(buffer
, offset
, range
);
1112 if (type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1113 type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1114 *desc
= (struct anv_descriptor
) {
1121 assert(bind_layout
->data
& ANV_DESCRIPTOR_BUFFER_VIEW
);
1122 struct anv_buffer_view
*bview
=
1123 &set
->buffer_views
[bind_layout
->buffer_view_index
+ element
];
1125 bview
->format
= anv_isl_format_for_descriptor_type(type
);
1126 bview
->range
= bind_range
;
1127 bview
->address
= bind_addr
;
1129 /* If we're writing descriptors through a push command, we need to
1130 * allocate the surface state from the command buffer. Otherwise it will
1131 * be allocated by the descriptor pool when calling
1132 * vkAllocateDescriptorSets. */
1134 bview
->surface_state
= anv_state_stream_alloc(alloc_stream
, 64, 64);
1136 anv_fill_buffer_surface_state(device
, bview
->surface_state
,
1137 bview
->format
, bind_addr
, bind_range
, 1);
1139 *desc
= (struct anv_descriptor
) {
1141 .buffer_view
= bview
,
1145 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
+
1146 element
* anv_descriptor_size(bind_layout
);
1148 if (bind_layout
->data
& ANV_DESCRIPTOR_ADDRESS_RANGE
) {
1149 struct anv_address_range_descriptor desc
= {
1150 .address
= anv_address_physical(bind_addr
),
1151 .range
= bind_range
,
1153 memcpy(desc_map
, &desc
, sizeof(desc
));
1158 anv_descriptor_set_write_inline_uniform_data(struct anv_device
*device
,
1159 struct anv_descriptor_set
*set
,
1165 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1166 &set
->layout
->binding
[binding
];
1168 assert(bind_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
);
1170 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
;
1172 memcpy(desc_map
+ offset
, data
, size
);
1175 void anv_UpdateDescriptorSets(
1177 uint32_t descriptorWriteCount
,
1178 const VkWriteDescriptorSet
* pDescriptorWrites
,
1179 uint32_t descriptorCopyCount
,
1180 const VkCopyDescriptorSet
* pDescriptorCopies
)
1182 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1184 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1185 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1186 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->dstSet
);
1188 switch (write
->descriptorType
) {
1189 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1190 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1191 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1192 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1193 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1194 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1195 anv_descriptor_set_write_image_view(device
, set
,
1196 write
->pImageInfo
+ j
,
1197 write
->descriptorType
,
1199 write
->dstArrayElement
+ j
);
1203 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1204 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1205 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1206 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1207 write
->pTexelBufferView
[j
]);
1209 anv_descriptor_set_write_buffer_view(device
, set
,
1210 write
->descriptorType
,
1213 write
->dstArrayElement
+ j
);
1217 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1218 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1219 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1220 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1221 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1222 assert(write
->pBufferInfo
[j
].buffer
);
1223 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1226 anv_descriptor_set_write_buffer(device
, set
,
1228 write
->descriptorType
,
1231 write
->dstArrayElement
+ j
,
1232 write
->pBufferInfo
[j
].offset
,
1233 write
->pBufferInfo
[j
].range
);
1237 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
: {
1238 const VkWriteDescriptorSetInlineUniformBlockEXT
*inline_write
=
1239 vk_find_struct_const(write
->pNext
,
1240 WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT
);
1241 assert(inline_write
->dataSize
== write
->descriptorCount
);
1242 anv_descriptor_set_write_inline_uniform_data(device
, set
,
1244 inline_write
->pData
,
1245 write
->dstArrayElement
,
1246 inline_write
->dataSize
);
1255 for (uint32_t i
= 0; i
< descriptorCopyCount
; i
++) {
1256 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1257 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->srcSet
);
1258 ANV_FROM_HANDLE(anv_descriptor_set
, dst
, copy
->dstSet
);
1260 const struct anv_descriptor_set_binding_layout
*src_layout
=
1261 &src
->layout
->binding
[copy
->srcBinding
];
1262 struct anv_descriptor
*src_desc
=
1263 &src
->descriptors
[src_layout
->descriptor_index
];
1264 src_desc
+= copy
->srcArrayElement
;
1266 const struct anv_descriptor_set_binding_layout
*dst_layout
=
1267 &dst
->layout
->binding
[copy
->dstBinding
];
1268 struct anv_descriptor
*dst_desc
=
1269 &dst
->descriptors
[dst_layout
->descriptor_index
];
1270 dst_desc
+= copy
->dstArrayElement
;
1272 for (uint32_t j
= 0; j
< copy
->descriptorCount
; j
++)
1273 dst_desc
[j
] = src_desc
[j
];
1275 if (src_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
1276 assert(src_layout
->data
== ANV_DESCRIPTOR_INLINE_UNIFORM
);
1277 memcpy(dst
->desc_mem
.map
+ dst_layout
->descriptor_offset
+
1278 copy
->dstArrayElement
,
1279 src
->desc_mem
.map
+ src_layout
->descriptor_offset
+
1280 copy
->srcArrayElement
,
1281 copy
->descriptorCount
);
1283 unsigned desc_size
= anv_descriptor_size(src_layout
);
1284 if (desc_size
> 0) {
1285 assert(desc_size
== anv_descriptor_size(dst_layout
));
1286 memcpy(dst
->desc_mem
.map
+ dst_layout
->descriptor_offset
+
1287 copy
->dstArrayElement
* desc_size
,
1288 src
->desc_mem
.map
+ src_layout
->descriptor_offset
+
1289 copy
->srcArrayElement
* desc_size
,
1290 copy
->descriptorCount
* desc_size
);
1297 * Descriptor update templates.
1301 anv_descriptor_set_write_template(struct anv_device
*device
,
1302 struct anv_descriptor_set
*set
,
1303 struct anv_state_stream
*alloc_stream
,
1304 const struct anv_descriptor_update_template
*template,
1307 for (uint32_t i
= 0; i
< template->entry_count
; i
++) {
1308 const struct anv_descriptor_template_entry
*entry
=
1309 &template->entries
[i
];
1311 switch (entry
->type
) {
1312 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1313 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1314 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1315 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1316 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1317 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
1318 const VkDescriptorImageInfo
*info
=
1319 data
+ entry
->offset
+ j
* entry
->stride
;
1320 anv_descriptor_set_write_image_view(device
, set
,
1323 entry
->array_element
+ j
);
1327 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1328 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1329 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
1330 const VkBufferView
*_bview
=
1331 data
+ entry
->offset
+ j
* entry
->stride
;
1332 ANV_FROM_HANDLE(anv_buffer_view
, bview
, *_bview
);
1334 anv_descriptor_set_write_buffer_view(device
, set
,
1338 entry
->array_element
+ j
);
1342 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1343 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1344 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1345 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1346 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
1347 const VkDescriptorBufferInfo
*info
=
1348 data
+ entry
->offset
+ j
* entry
->stride
;
1349 ANV_FROM_HANDLE(anv_buffer
, buffer
, info
->buffer
);
1351 anv_descriptor_set_write_buffer(device
, set
,
1356 entry
->array_element
+ j
,
1357 info
->offset
, info
->range
);
1361 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
:
1362 anv_descriptor_set_write_inline_uniform_data(device
, set
,
1364 data
+ entry
->offset
,
1365 entry
->array_element
,
1366 entry
->array_count
);
1375 VkResult
anv_CreateDescriptorUpdateTemplate(
1377 const VkDescriptorUpdateTemplateCreateInfo
* pCreateInfo
,
1378 const VkAllocationCallbacks
* pAllocator
,
1379 VkDescriptorUpdateTemplate
* pDescriptorUpdateTemplate
)
1381 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1382 struct anv_descriptor_update_template
*template;
1384 size_t size
= sizeof(*template) +
1385 pCreateInfo
->descriptorUpdateEntryCount
* sizeof(template->entries
[0]);
1386 template = vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
1387 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1388 if (template == NULL
)
1389 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1391 template->bind_point
= pCreateInfo
->pipelineBindPoint
;
1393 if (pCreateInfo
->templateType
== VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET
)
1394 template->set
= pCreateInfo
->set
;
1396 template->entry_count
= pCreateInfo
->descriptorUpdateEntryCount
;
1397 for (uint32_t i
= 0; i
< template->entry_count
; i
++) {
1398 const VkDescriptorUpdateTemplateEntry
*pEntry
=
1399 &pCreateInfo
->pDescriptorUpdateEntries
[i
];
1401 template->entries
[i
] = (struct anv_descriptor_template_entry
) {
1402 .type
= pEntry
->descriptorType
,
1403 .binding
= pEntry
->dstBinding
,
1404 .array_element
= pEntry
->dstArrayElement
,
1405 .array_count
= pEntry
->descriptorCount
,
1406 .offset
= pEntry
->offset
,
1407 .stride
= pEntry
->stride
,
1411 *pDescriptorUpdateTemplate
=
1412 anv_descriptor_update_template_to_handle(template);
1417 void anv_DestroyDescriptorUpdateTemplate(
1419 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1420 const VkAllocationCallbacks
* pAllocator
)
1422 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1423 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1424 descriptorUpdateTemplate
);
1426 vk_free2(&device
->alloc
, pAllocator
, template);
1429 void anv_UpdateDescriptorSetWithTemplate(
1431 VkDescriptorSet descriptorSet
,
1432 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1435 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1436 ANV_FROM_HANDLE(anv_descriptor_set
, set
, descriptorSet
);
1437 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1438 descriptorUpdateTemplate
);
1440 anv_descriptor_set_write_template(device
, set
, NULL
, template, pData
);