2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
32 #include "anv_private.h"
35 * Descriptor set layouts.
38 void anv_GetDescriptorSetLayoutSupport(
40 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
41 VkDescriptorSetLayoutSupport
* pSupport
)
43 uint32_t surface_count
[MESA_SHADER_STAGES
] = { 0, };
45 for (uint32_t b
= 0; b
< pCreateInfo
->bindingCount
; b
++) {
46 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[b
];
48 switch (binding
->descriptorType
) {
49 case VK_DESCRIPTOR_TYPE_SAMPLER
:
50 /* There is no real limit on samplers */
53 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
54 if (binding
->pImmutableSamplers
) {
55 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++) {
56 ANV_FROM_HANDLE(anv_sampler
, sampler
,
57 binding
->pImmutableSamplers
[i
]);
58 anv_foreach_stage(s
, binding
->stageFlags
)
59 surface_count
[s
] += sampler
->n_planes
;
65 anv_foreach_stage(s
, binding
->stageFlags
)
66 surface_count
[s
] += binding
->descriptorCount
;
71 bool supported
= true;
72 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
73 /* Our maximum binding table size is 250 and we need to reserve 8 for
74 * render targets. 240 is a nice round number.
76 if (surface_count
[s
] >= 240)
80 pSupport
->supported
= supported
;
83 VkResult
anv_CreateDescriptorSetLayout(
85 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
86 const VkAllocationCallbacks
* pAllocator
,
87 VkDescriptorSetLayout
* pSetLayout
)
89 ANV_FROM_HANDLE(anv_device
, device
, _device
);
91 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
93 uint32_t max_binding
= 0;
94 uint32_t immutable_sampler_count
= 0;
95 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
96 max_binding
= MAX2(max_binding
, pCreateInfo
->pBindings
[j
].binding
);
97 if (pCreateInfo
->pBindings
[j
].pImmutableSamplers
)
98 immutable_sampler_count
+= pCreateInfo
->pBindings
[j
].descriptorCount
;
101 struct anv_descriptor_set_layout
*set_layout
;
102 struct anv_descriptor_set_binding_layout
*bindings
;
103 struct anv_sampler
**samplers
;
105 /* We need to allocate decriptor set layouts off the device allocator
106 * with DEVICE scope because they are reference counted and may not be
107 * destroyed when vkDestroyDescriptorSetLayout is called.
110 anv_multialloc_add(&ma
, &set_layout
, 1);
111 anv_multialloc_add(&ma
, &bindings
, max_binding
+ 1);
112 anv_multialloc_add(&ma
, &samplers
, immutable_sampler_count
);
114 if (!anv_multialloc_alloc(&ma
, &device
->alloc
,
115 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
))
116 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
118 memset(set_layout
, 0, sizeof(*set_layout
));
119 set_layout
->ref_cnt
= 1;
120 set_layout
->binding_count
= max_binding
+ 1;
122 for (uint32_t b
= 0; b
<= max_binding
; b
++) {
123 /* Initialize all binding_layout entries to -1 */
124 memset(&set_layout
->binding
[b
], -1, sizeof(set_layout
->binding
[b
]));
126 set_layout
->binding
[b
].array_size
= 0;
127 set_layout
->binding
[b
].immutable_samplers
= NULL
;
130 /* Initialize all samplers to 0 */
131 memset(samplers
, 0, immutable_sampler_count
* sizeof(*samplers
));
133 uint32_t sampler_count
[MESA_SHADER_STAGES
] = { 0, };
134 uint32_t surface_count
[MESA_SHADER_STAGES
] = { 0, };
135 uint32_t image_count
[MESA_SHADER_STAGES
] = { 0, };
136 uint32_t buffer_count
= 0;
137 uint32_t dynamic_offset_count
= 0;
139 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
140 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[j
];
141 uint32_t b
= binding
->binding
;
142 /* We temporarily store the pointer to the binding in the
143 * immutable_samplers pointer. This provides us with a quick-and-dirty
144 * way to sort the bindings by binding number.
146 set_layout
->binding
[b
].immutable_samplers
= (void *)binding
;
149 for (uint32_t b
= 0; b
<= max_binding
; b
++) {
150 const VkDescriptorSetLayoutBinding
*binding
=
151 (void *)set_layout
->binding
[b
].immutable_samplers
;
156 if (binding
->descriptorCount
== 0)
160 set_layout
->binding
[b
].type
= binding
->descriptorType
;
162 set_layout
->binding
[b
].array_size
= binding
->descriptorCount
;
163 set_layout
->binding
[b
].descriptor_index
= set_layout
->size
;
164 set_layout
->size
+= binding
->descriptorCount
;
166 switch (binding
->descriptorType
) {
167 case VK_DESCRIPTOR_TYPE_SAMPLER
:
168 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
169 anv_foreach_stage(s
, binding
->stageFlags
) {
170 set_layout
->binding
[b
].stage
[s
].sampler_index
= sampler_count
[s
];
171 sampler_count
[s
] += binding
->descriptorCount
;
178 switch (binding
->descriptorType
) {
179 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
180 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
181 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
182 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
183 set_layout
->binding
[b
].buffer_index
= buffer_count
;
184 buffer_count
+= binding
->descriptorCount
;
187 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
188 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
189 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
190 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
191 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
192 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
193 anv_foreach_stage(s
, binding
->stageFlags
) {
194 set_layout
->binding
[b
].stage
[s
].surface_index
= surface_count
[s
];
195 surface_count
[s
] += binding
->descriptorCount
;
202 switch (binding
->descriptorType
) {
203 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
204 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
205 set_layout
->binding
[b
].dynamic_offset_index
= dynamic_offset_count
;
206 dynamic_offset_count
+= binding
->descriptorCount
;
212 switch (binding
->descriptorType
) {
213 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
214 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
215 anv_foreach_stage(s
, binding
->stageFlags
) {
216 set_layout
->binding
[b
].stage
[s
].image_index
= image_count
[s
];
217 image_count
[s
] += binding
->descriptorCount
;
224 if (binding
->pImmutableSamplers
) {
225 set_layout
->binding
[b
].immutable_samplers
= samplers
;
226 samplers
+= binding
->descriptorCount
;
228 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++)
229 set_layout
->binding
[b
].immutable_samplers
[i
] =
230 anv_sampler_from_handle(binding
->pImmutableSamplers
[i
]);
232 set_layout
->binding
[b
].immutable_samplers
= NULL
;
235 set_layout
->shader_stages
|= binding
->stageFlags
;
238 set_layout
->buffer_count
= buffer_count
;
239 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
241 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
246 void anv_DestroyDescriptorSetLayout(
248 VkDescriptorSetLayout _set_layout
,
249 const VkAllocationCallbacks
* pAllocator
)
251 ANV_FROM_HANDLE(anv_device
, device
, _device
);
252 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
257 anv_descriptor_set_layout_unref(device
, set_layout
);
260 #define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
263 sha1_update_immutable_sampler(struct mesa_sha1
*ctx
,
264 const struct anv_sampler
*sampler
)
266 if (!sampler
->conversion
)
269 /* The only thing that affects the shader is ycbcr conversion */
270 _mesa_sha1_update(ctx
, sampler
->conversion
,
271 sizeof(*sampler
->conversion
));
275 sha1_update_descriptor_set_binding_layout(struct mesa_sha1
*ctx
,
276 const struct anv_descriptor_set_binding_layout
*layout
)
278 SHA1_UPDATE_VALUE(ctx
, layout
->array_size
);
279 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_index
);
280 SHA1_UPDATE_VALUE(ctx
, layout
->dynamic_offset_index
);
281 SHA1_UPDATE_VALUE(ctx
, layout
->buffer_index
);
282 _mesa_sha1_update(ctx
, layout
->stage
, sizeof(layout
->stage
));
284 if (layout
->immutable_samplers
) {
285 for (uint16_t i
= 0; i
< layout
->array_size
; i
++)
286 sha1_update_immutable_sampler(ctx
, layout
->immutable_samplers
[i
]);
291 sha1_update_descriptor_set_layout(struct mesa_sha1
*ctx
,
292 const struct anv_descriptor_set_layout
*layout
)
294 SHA1_UPDATE_VALUE(ctx
, layout
->binding_count
);
295 SHA1_UPDATE_VALUE(ctx
, layout
->size
);
296 SHA1_UPDATE_VALUE(ctx
, layout
->shader_stages
);
297 SHA1_UPDATE_VALUE(ctx
, layout
->buffer_count
);
298 SHA1_UPDATE_VALUE(ctx
, layout
->dynamic_offset_count
);
300 for (uint16_t i
= 0; i
< layout
->binding_count
; i
++)
301 sha1_update_descriptor_set_binding_layout(ctx
, &layout
->binding
[i
]);
305 * Pipeline layouts. These have nothing to do with the pipeline. They are
306 * just multiple descriptor set layouts pasted together
309 VkResult
anv_CreatePipelineLayout(
311 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
312 const VkAllocationCallbacks
* pAllocator
,
313 VkPipelineLayout
* pPipelineLayout
)
315 ANV_FROM_HANDLE(anv_device
, device
, _device
);
316 struct anv_pipeline_layout
*layout
;
318 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
320 layout
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*layout
), 8,
321 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
323 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
325 layout
->num_sets
= pCreateInfo
->setLayoutCount
;
327 unsigned dynamic_offset_count
= 0;
329 memset(layout
->stage
, 0, sizeof(layout
->stage
));
330 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
331 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
,
332 pCreateInfo
->pSetLayouts
[set
]);
333 layout
->set
[set
].layout
= set_layout
;
334 anv_descriptor_set_layout_ref(set_layout
);
336 layout
->set
[set
].dynamic_offset_start
= dynamic_offset_count
;
337 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
338 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
341 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
;
342 for (gl_shader_stage s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
343 if (set_layout
->binding
[b
].stage
[s
].surface_index
>= 0)
344 layout
->stage
[s
].has_dynamic_offsets
= true;
349 struct mesa_sha1 ctx
;
350 _mesa_sha1_init(&ctx
);
351 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
352 sha1_update_descriptor_set_layout(&ctx
, layout
->set
[s
].layout
);
353 _mesa_sha1_update(&ctx
, &layout
->set
[s
].dynamic_offset_start
,
354 sizeof(layout
->set
[s
].dynamic_offset_start
));
356 _mesa_sha1_update(&ctx
, &layout
->num_sets
, sizeof(layout
->num_sets
));
357 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
358 _mesa_sha1_update(&ctx
, &layout
->stage
[s
].has_dynamic_offsets
,
359 sizeof(layout
->stage
[s
].has_dynamic_offsets
));
361 _mesa_sha1_final(&ctx
, layout
->sha1
);
363 *pPipelineLayout
= anv_pipeline_layout_to_handle(layout
);
368 void anv_DestroyPipelineLayout(
370 VkPipelineLayout _pipelineLayout
,
371 const VkAllocationCallbacks
* pAllocator
)
373 ANV_FROM_HANDLE(anv_device
, device
, _device
);
374 ANV_FROM_HANDLE(anv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
376 if (!pipeline_layout
)
379 for (uint32_t i
= 0; i
< pipeline_layout
->num_sets
; i
++)
380 anv_descriptor_set_layout_unref(device
, pipeline_layout
->set
[i
].layout
);
382 vk_free2(&device
->alloc
, pAllocator
, pipeline_layout
);
388 * These are implemented using a big pool of memory and a free-list for the
389 * host memory allocations and a state_stream and a free list for the buffer
390 * view surface state. The spec allows us to fail to allocate due to
391 * fragmentation in all cases but two: 1) after pool reset, allocating up
392 * until the pool size with no freeing must succeed and 2) allocating and
393 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
394 * and the free lists lets us recycle blocks for case 2).
399 VkResult
anv_CreateDescriptorPool(
401 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
402 const VkAllocationCallbacks
* pAllocator
,
403 VkDescriptorPool
* pDescriptorPool
)
405 ANV_FROM_HANDLE(anv_device
, device
, _device
);
406 struct anv_descriptor_pool
*pool
;
408 uint32_t descriptor_count
= 0;
409 uint32_t buffer_count
= 0;
410 for (uint32_t i
= 0; i
< pCreateInfo
->poolSizeCount
; i
++) {
411 switch (pCreateInfo
->pPoolSizes
[i
].type
) {
412 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
413 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
414 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
415 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
416 buffer_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
418 descriptor_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
423 const size_t pool_size
=
424 pCreateInfo
->maxSets
* sizeof(struct anv_descriptor_set
) +
425 descriptor_count
* sizeof(struct anv_descriptor
) +
426 buffer_count
* sizeof(struct anv_buffer_view
);
427 const size_t total_size
= sizeof(*pool
) + pool_size
;
429 pool
= vk_alloc2(&device
->alloc
, pAllocator
, total_size
, 8,
430 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
432 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
434 pool
->size
= pool_size
;
436 pool
->free_list
= EMPTY
;
438 anv_state_stream_init(&pool
->surface_state_stream
,
439 &device
->surface_state_pool
, 4096);
440 pool
->surface_state_free_list
= NULL
;
442 *pDescriptorPool
= anv_descriptor_pool_to_handle(pool
);
447 void anv_DestroyDescriptorPool(
449 VkDescriptorPool _pool
,
450 const VkAllocationCallbacks
* pAllocator
)
452 ANV_FROM_HANDLE(anv_device
, device
, _device
);
453 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, _pool
);
458 anv_state_stream_finish(&pool
->surface_state_stream
);
459 vk_free2(&device
->alloc
, pAllocator
, pool
);
462 VkResult
anv_ResetDescriptorPool(
464 VkDescriptorPool descriptorPool
,
465 VkDescriptorPoolResetFlags flags
)
467 ANV_FROM_HANDLE(anv_device
, device
, _device
);
468 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, descriptorPool
);
471 pool
->free_list
= EMPTY
;
472 anv_state_stream_finish(&pool
->surface_state_stream
);
473 anv_state_stream_init(&pool
->surface_state_stream
,
474 &device
->surface_state_pool
, 4096);
475 pool
->surface_state_free_list
= NULL
;
480 struct pool_free_list_entry
{
486 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
)
489 sizeof(struct anv_descriptor_set
) +
490 layout
->size
* sizeof(struct anv_descriptor
) +
491 layout
->buffer_count
* sizeof(struct anv_buffer_view
);
495 anv_descriptor_set_binding_layout_get_hw_size(const struct anv_descriptor_set_binding_layout
*binding
)
497 if (!binding
->immutable_samplers
)
498 return binding
->array_size
;
500 uint32_t total_plane_count
= 0;
501 for (uint32_t i
= 0; i
< binding
->array_size
; i
++)
502 total_plane_count
+= binding
->immutable_samplers
[i
]->n_planes
;
504 return total_plane_count
;
507 struct surface_state_free_list_entry
{
509 struct anv_state state
;
513 anv_descriptor_set_create(struct anv_device
*device
,
514 struct anv_descriptor_pool
*pool
,
515 struct anv_descriptor_set_layout
*layout
,
516 struct anv_descriptor_set
**out_set
)
518 struct anv_descriptor_set
*set
;
519 const size_t size
= anv_descriptor_set_layout_size(layout
);
522 if (size
<= pool
->size
- pool
->next
) {
523 set
= (struct anv_descriptor_set
*) (pool
->data
+ pool
->next
);
526 struct pool_free_list_entry
*entry
;
527 uint32_t *link
= &pool
->free_list
;
528 for (uint32_t f
= pool
->free_list
; f
!= EMPTY
; f
= entry
->next
) {
529 entry
= (struct pool_free_list_entry
*) (pool
->data
+ f
);
530 if (size
<= entry
->size
) {
532 set
= (struct anv_descriptor_set
*) entry
;
540 if (pool
->free_list
!= EMPTY
) {
541 return vk_error(VK_ERROR_FRAGMENTED_POOL
);
543 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY
);
547 set
->layout
= layout
;
548 anv_descriptor_set_layout_ref(layout
);
552 (struct anv_buffer_view
*) &set
->descriptors
[layout
->size
];
553 set
->buffer_count
= layout
->buffer_count
;
555 /* By defining the descriptors to be zero now, we can later verify that
556 * a descriptor has not been populated with user data.
558 memset(set
->descriptors
, 0, sizeof(struct anv_descriptor
) * layout
->size
);
560 /* Go through and fill out immutable samplers if we have any */
561 struct anv_descriptor
*desc
= set
->descriptors
;
562 for (uint32_t b
= 0; b
< layout
->binding_count
; b
++) {
563 if (layout
->binding
[b
].immutable_samplers
) {
564 for (uint32_t i
= 0; i
< layout
->binding
[b
].array_size
; i
++) {
565 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
566 * UpdateDescriptorSets if needed. However, if the descriptor
567 * set has an immutable sampler, UpdateDescriptorSets may never
568 * touch it, so we need to make sure it's 100% valid now.
570 desc
[i
] = (struct anv_descriptor
) {
571 .type
= VK_DESCRIPTOR_TYPE_SAMPLER
,
572 .sampler
= layout
->binding
[b
].immutable_samplers
[i
],
576 desc
+= layout
->binding
[b
].array_size
;
579 /* Allocate surface state for the buffer views. */
580 for (uint32_t b
= 0; b
< layout
->buffer_count
; b
++) {
581 struct surface_state_free_list_entry
*entry
=
582 pool
->surface_state_free_list
;
583 struct anv_state state
;
586 state
= entry
->state
;
587 pool
->surface_state_free_list
= entry
->next
;
588 assert(state
.alloc_size
== 64);
590 state
= anv_state_stream_alloc(&pool
->surface_state_stream
, 64, 64);
593 set
->buffer_views
[b
].surface_state
= state
;
602 anv_descriptor_set_destroy(struct anv_device
*device
,
603 struct anv_descriptor_pool
*pool
,
604 struct anv_descriptor_set
*set
)
606 anv_descriptor_set_layout_unref(device
, set
->layout
);
608 /* Put the buffer view surface state back on the free list. */
609 for (uint32_t b
= 0; b
< set
->buffer_count
; b
++) {
610 struct surface_state_free_list_entry
*entry
=
611 set
->buffer_views
[b
].surface_state
.map
;
612 entry
->next
= pool
->surface_state_free_list
;
613 entry
->state
= set
->buffer_views
[b
].surface_state
;
614 pool
->surface_state_free_list
= entry
;
617 /* Put the descriptor set allocation back on the free list. */
618 const uint32_t index
= (char *) set
- pool
->data
;
619 if (index
+ set
->size
== pool
->next
) {
622 struct pool_free_list_entry
*entry
= (struct pool_free_list_entry
*) set
;
623 entry
->next
= pool
->free_list
;
624 entry
->size
= set
->size
;
625 pool
->free_list
= (char *) entry
- pool
->data
;
629 VkResult
anv_AllocateDescriptorSets(
631 const VkDescriptorSetAllocateInfo
* pAllocateInfo
,
632 VkDescriptorSet
* pDescriptorSets
)
634 ANV_FROM_HANDLE(anv_device
, device
, _device
);
635 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, pAllocateInfo
->descriptorPool
);
637 VkResult result
= VK_SUCCESS
;
638 struct anv_descriptor_set
*set
;
641 for (i
= 0; i
< pAllocateInfo
->descriptorSetCount
; i
++) {
642 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
,
643 pAllocateInfo
->pSetLayouts
[i
]);
645 result
= anv_descriptor_set_create(device
, pool
, layout
, &set
);
646 if (result
!= VK_SUCCESS
)
649 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
652 if (result
!= VK_SUCCESS
)
653 anv_FreeDescriptorSets(_device
, pAllocateInfo
->descriptorPool
,
659 VkResult
anv_FreeDescriptorSets(
661 VkDescriptorPool descriptorPool
,
663 const VkDescriptorSet
* pDescriptorSets
)
665 ANV_FROM_HANDLE(anv_device
, device
, _device
);
666 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, descriptorPool
);
668 for (uint32_t i
= 0; i
< count
; i
++) {
669 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
674 anv_descriptor_set_destroy(device
, pool
, set
);
681 anv_descriptor_set_write_image_view(struct anv_descriptor_set
*set
,
682 const struct gen_device_info
* const devinfo
,
683 const VkDescriptorImageInfo
* const info
,
684 VkDescriptorType type
,
688 const struct anv_descriptor_set_binding_layout
*bind_layout
=
689 &set
->layout
->binding
[binding
];
690 struct anv_descriptor
*desc
=
691 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
692 struct anv_image_view
*image_view
= NULL
;
693 struct anv_sampler
*sampler
= NULL
;
695 assert(type
== bind_layout
->type
);
698 case VK_DESCRIPTOR_TYPE_SAMPLER
:
699 sampler
= anv_sampler_from_handle(info
->sampler
);
702 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
703 image_view
= anv_image_view_from_handle(info
->imageView
);
704 sampler
= anv_sampler_from_handle(info
->sampler
);
707 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
708 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
709 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
710 image_view
= anv_image_view_from_handle(info
->imageView
);
714 unreachable("invalid descriptor type");
717 /* If this descriptor has an immutable sampler, we don't want to stomp on
720 sampler
= bind_layout
->immutable_samplers
?
721 bind_layout
->immutable_samplers
[element
] :
724 *desc
= (struct anv_descriptor
) {
726 .layout
= info
->imageLayout
,
727 .image_view
= image_view
,
733 anv_descriptor_set_write_buffer_view(struct anv_descriptor_set
*set
,
734 VkDescriptorType type
,
735 struct anv_buffer_view
*buffer_view
,
739 const struct anv_descriptor_set_binding_layout
*bind_layout
=
740 &set
->layout
->binding
[binding
];
741 struct anv_descriptor
*desc
=
742 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
744 assert(type
== bind_layout
->type
);
746 *desc
= (struct anv_descriptor
) {
748 .buffer_view
= buffer_view
,
753 anv_descriptor_set_write_buffer(struct anv_descriptor_set
*set
,
754 struct anv_device
*device
,
755 struct anv_state_stream
*alloc_stream
,
756 VkDescriptorType type
,
757 struct anv_buffer
*buffer
,
763 const struct anv_descriptor_set_binding_layout
*bind_layout
=
764 &set
->layout
->binding
[binding
];
765 struct anv_descriptor
*desc
=
766 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
768 assert(type
== bind_layout
->type
);
770 if (type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
771 type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
772 *desc
= (struct anv_descriptor
) {
779 struct anv_buffer_view
*bview
=
780 &set
->buffer_views
[bind_layout
->buffer_index
+ element
];
782 bview
->format
= anv_isl_format_for_descriptor_type(type
);
783 bview
->range
= anv_buffer_get_range(buffer
, offset
, range
);
784 bview
->address
= anv_address_add(buffer
->address
, offset
);
786 /* If we're writing descriptors through a push command, we need to
787 * allocate the surface state from the command buffer. Otherwise it will
788 * be allocated by the descriptor pool when calling
789 * vkAllocateDescriptorSets. */
791 bview
->surface_state
= anv_state_stream_alloc(alloc_stream
, 64, 64);
793 anv_fill_buffer_surface_state(device
, bview
->surface_state
,
795 bview
->address
, bview
->range
, 1);
797 *desc
= (struct anv_descriptor
) {
799 .buffer_view
= bview
,
804 void anv_UpdateDescriptorSets(
806 uint32_t descriptorWriteCount
,
807 const VkWriteDescriptorSet
* pDescriptorWrites
,
808 uint32_t descriptorCopyCount
,
809 const VkCopyDescriptorSet
* pDescriptorCopies
)
811 ANV_FROM_HANDLE(anv_device
, device
, _device
);
813 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
814 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
815 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->dstSet
);
817 switch (write
->descriptorType
) {
818 case VK_DESCRIPTOR_TYPE_SAMPLER
:
819 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
820 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
821 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
822 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
823 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
824 anv_descriptor_set_write_image_view(set
, &device
->info
,
825 write
->pImageInfo
+ j
,
826 write
->descriptorType
,
828 write
->dstArrayElement
+ j
);
832 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
833 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
834 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
835 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
836 write
->pTexelBufferView
[j
]);
838 anv_descriptor_set_write_buffer_view(set
,
839 write
->descriptorType
,
842 write
->dstArrayElement
+ j
);
846 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
847 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
848 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
849 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
850 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
851 assert(write
->pBufferInfo
[j
].buffer
);
852 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
855 anv_descriptor_set_write_buffer(set
,
858 write
->descriptorType
,
861 write
->dstArrayElement
+ j
,
862 write
->pBufferInfo
[j
].offset
,
863 write
->pBufferInfo
[j
].range
);
872 for (uint32_t i
= 0; i
< descriptorCopyCount
; i
++) {
873 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
874 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->srcSet
);
875 ANV_FROM_HANDLE(anv_descriptor_set
, dst
, copy
->dstSet
);
877 const struct anv_descriptor_set_binding_layout
*src_layout
=
878 &src
->layout
->binding
[copy
->srcBinding
];
879 struct anv_descriptor
*src_desc
=
880 &src
->descriptors
[src_layout
->descriptor_index
];
881 src_desc
+= copy
->srcArrayElement
;
883 const struct anv_descriptor_set_binding_layout
*dst_layout
=
884 &dst
->layout
->binding
[copy
->dstBinding
];
885 struct anv_descriptor
*dst_desc
=
886 &dst
->descriptors
[dst_layout
->descriptor_index
];
887 dst_desc
+= copy
->dstArrayElement
;
889 for (uint32_t j
= 0; j
< copy
->descriptorCount
; j
++)
890 dst_desc
[j
] = src_desc
[j
];
895 * Descriptor update templates.
899 anv_descriptor_set_write_template(struct anv_descriptor_set
*set
,
900 struct anv_device
*device
,
901 struct anv_state_stream
*alloc_stream
,
902 const struct anv_descriptor_update_template
*template,
905 for (uint32_t i
= 0; i
< template->entry_count
; i
++) {
906 const struct anv_descriptor_template_entry
*entry
=
907 &template->entries
[i
];
909 switch (entry
->type
) {
910 case VK_DESCRIPTOR_TYPE_SAMPLER
:
911 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
912 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
913 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
914 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
915 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
916 const VkDescriptorImageInfo
*info
=
917 data
+ entry
->offset
+ j
* entry
->stride
;
918 anv_descriptor_set_write_image_view(set
, &device
->info
,
921 entry
->array_element
+ j
);
925 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
926 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
927 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
928 const VkBufferView
*_bview
=
929 data
+ entry
->offset
+ j
* entry
->stride
;
930 ANV_FROM_HANDLE(anv_buffer_view
, bview
, *_bview
);
932 anv_descriptor_set_write_buffer_view(set
,
936 entry
->array_element
+ j
);
940 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
941 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
942 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
943 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
944 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
945 const VkDescriptorBufferInfo
*info
=
946 data
+ entry
->offset
+ j
* entry
->stride
;
947 ANV_FROM_HANDLE(anv_buffer
, buffer
, info
->buffer
);
949 anv_descriptor_set_write_buffer(set
,
955 entry
->array_element
+ j
,
956 info
->offset
, info
->range
);
966 VkResult
anv_CreateDescriptorUpdateTemplate(
968 const VkDescriptorUpdateTemplateCreateInfo
* pCreateInfo
,
969 const VkAllocationCallbacks
* pAllocator
,
970 VkDescriptorUpdateTemplate
* pDescriptorUpdateTemplate
)
972 ANV_FROM_HANDLE(anv_device
, device
, _device
);
973 struct anv_descriptor_update_template
*template;
975 size_t size
= sizeof(*template) +
976 pCreateInfo
->descriptorUpdateEntryCount
* sizeof(template->entries
[0]);
977 template = vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
978 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
979 if (template == NULL
)
980 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
982 template->bind_point
= pCreateInfo
->pipelineBindPoint
;
984 if (pCreateInfo
->templateType
== VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET
)
985 template->set
= pCreateInfo
->set
;
987 template->entry_count
= pCreateInfo
->descriptorUpdateEntryCount
;
988 for (uint32_t i
= 0; i
< template->entry_count
; i
++) {
989 const VkDescriptorUpdateTemplateEntryKHR
*pEntry
=
990 &pCreateInfo
->pDescriptorUpdateEntries
[i
];
992 template->entries
[i
] = (struct anv_descriptor_template_entry
) {
993 .type
= pEntry
->descriptorType
,
994 .binding
= pEntry
->dstBinding
,
995 .array_element
= pEntry
->dstArrayElement
,
996 .array_count
= pEntry
->descriptorCount
,
997 .offset
= pEntry
->offset
,
998 .stride
= pEntry
->stride
,
1002 *pDescriptorUpdateTemplate
=
1003 anv_descriptor_update_template_to_handle(template);
1008 void anv_DestroyDescriptorUpdateTemplate(
1010 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1011 const VkAllocationCallbacks
* pAllocator
)
1013 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1014 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1015 descriptorUpdateTemplate
);
1017 vk_free2(&device
->alloc
, pAllocator
, template);
1020 void anv_UpdateDescriptorSetWithTemplate(
1022 VkDescriptorSet descriptorSet
,
1023 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1026 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1027 ANV_FROM_HANDLE(anv_descriptor_set
, set
, descriptorSet
);
1028 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1029 descriptorUpdateTemplate
);
1031 anv_descriptor_set_write_template(set
, device
, NULL
, template, pData
);