2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
33 #include "anv_private.h"
36 * Descriptor set layouts.
39 static enum anv_descriptor_data
40 anv_descriptor_data_for_type(const struct anv_physical_device
*device
,
41 VkDescriptorType type
)
43 enum anv_descriptor_data data
= 0;
46 case VK_DESCRIPTOR_TYPE_SAMPLER
:
47 data
= ANV_DESCRIPTOR_SAMPLER_STATE
;
48 if (device
->has_bindless_samplers
)
49 data
|= ANV_DESCRIPTOR_SAMPLED_IMAGE
;
52 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
53 data
= ANV_DESCRIPTOR_SURFACE_STATE
|
54 ANV_DESCRIPTOR_SAMPLER_STATE
;
55 if (device
->has_bindless_images
|| device
->has_bindless_samplers
)
56 data
|= ANV_DESCRIPTOR_SAMPLED_IMAGE
;
59 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
60 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
61 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
62 if (device
->has_bindless_images
)
63 data
|= ANV_DESCRIPTOR_SAMPLED_IMAGE
;
66 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
67 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
70 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
71 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
72 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
73 if (device
->info
.gen
< 9)
74 data
|= ANV_DESCRIPTOR_IMAGE_PARAM
;
75 if (device
->has_bindless_images
)
76 data
|= ANV_DESCRIPTOR_STORAGE_IMAGE
;
79 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
80 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
81 data
= ANV_DESCRIPTOR_SURFACE_STATE
|
82 ANV_DESCRIPTOR_BUFFER_VIEW
;
85 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
86 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
87 data
= ANV_DESCRIPTOR_SURFACE_STATE
;
90 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
:
91 data
= ANV_DESCRIPTOR_INLINE_UNIFORM
;
95 unreachable("Unsupported descriptor type");
98 /* On gen8 and above when we have softpin enabled, we also need to push
99 * SSBO address ranges so that we can use A64 messages in the shader.
101 if (device
->has_a64_buffer_access
&&
102 (type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
||
103 type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
))
104 data
|= ANV_DESCRIPTOR_ADDRESS_RANGE
;
110 anv_descriptor_data_size(enum anv_descriptor_data data
)
114 if (data
& ANV_DESCRIPTOR_SAMPLED_IMAGE
)
115 size
+= sizeof(struct anv_sampled_image_descriptor
);
117 if (data
& ANV_DESCRIPTOR_STORAGE_IMAGE
)
118 size
+= sizeof(struct anv_storage_image_descriptor
);
120 if (data
& ANV_DESCRIPTOR_IMAGE_PARAM
)
121 size
+= BRW_IMAGE_PARAM_SIZE
* 4;
123 if (data
& ANV_DESCRIPTOR_ADDRESS_RANGE
)
124 size
+= sizeof(struct anv_address_range_descriptor
);
129 /** Returns the size in bytes of each descriptor with the given layout */
131 anv_descriptor_size(const struct anv_descriptor_set_binding_layout
*layout
)
133 if (layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
134 assert(layout
->data
== ANV_DESCRIPTOR_INLINE_UNIFORM
);
135 return layout
->array_size
;
138 unsigned size
= anv_descriptor_data_size(layout
->data
);
140 /* For multi-planar bindings, we make every descriptor consume the maximum
141 * number of planes so we don't have to bother with walking arrays and
142 * adding things up every time. Fortunately, YCbCr samplers aren't all
143 * that common and likely won't be in the middle of big arrays.
145 if (layout
->max_plane_count
> 1)
146 size
*= layout
->max_plane_count
;
151 /** Returns the size in bytes of each descriptor of the given type
153 * This version of the function does not have access to the entire layout so
154 * it may only work on certain descriptor types where the descriptor size is
155 * entirely determined by the descriptor type. Whenever possible, code should
156 * use anv_descriptor_size() instead.
159 anv_descriptor_type_size(const struct anv_physical_device
*pdevice
,
160 VkDescriptorType type
)
162 assert(type
!= VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
&&
163 type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
164 type
!= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
&&
165 type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
);
167 return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice
, type
));
171 anv_descriptor_data_supports_bindless(const struct anv_physical_device
*pdevice
,
172 enum anv_descriptor_data data
,
175 if (data
& ANV_DESCRIPTOR_ADDRESS_RANGE
) {
176 assert(pdevice
->has_a64_buffer_access
);
180 if (data
& ANV_DESCRIPTOR_SAMPLED_IMAGE
) {
181 assert(pdevice
->has_bindless_images
|| pdevice
->has_bindless_samplers
);
182 return sampler
? pdevice
->has_bindless_samplers
:
183 pdevice
->has_bindless_images
;
186 if (data
& ANV_DESCRIPTOR_STORAGE_IMAGE
) {
187 assert(pdevice
->has_bindless_images
);
195 anv_descriptor_supports_bindless(const struct anv_physical_device
*pdevice
,
196 const struct anv_descriptor_set_binding_layout
*binding
,
199 return anv_descriptor_data_supports_bindless(pdevice
, binding
->data
,
204 anv_descriptor_requires_bindless(const struct anv_physical_device
*pdevice
,
205 const struct anv_descriptor_set_binding_layout
*binding
,
208 if (pdevice
->always_use_bindless
)
209 return anv_descriptor_supports_bindless(pdevice
, binding
, sampler
);
211 static const VkDescriptorBindingFlagBitsEXT flags_requiring_bindless
=
212 VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT
|
213 VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT
|
214 VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT
;
216 return (binding
->flags
& flags_requiring_bindless
) != 0;
219 void anv_GetDescriptorSetLayoutSupport(
221 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
222 VkDescriptorSetLayoutSupport
* pSupport
)
224 ANV_FROM_HANDLE(anv_device
, device
, _device
);
225 const struct anv_physical_device
*pdevice
=
226 &device
->instance
->physicalDevice
;
228 uint32_t surface_count
[MESA_SHADER_STAGES
] = { 0, };
230 for (uint32_t b
= 0; b
< pCreateInfo
->bindingCount
; b
++) {
231 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[b
];
233 enum anv_descriptor_data desc_data
=
234 anv_descriptor_data_for_type(pdevice
, binding
->descriptorType
);
236 switch (binding
->descriptorType
) {
237 case VK_DESCRIPTOR_TYPE_SAMPLER
:
238 /* There is no real limit on samplers */
241 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
242 if (anv_descriptor_data_supports_bindless(pdevice
, desc_data
, false))
245 if (binding
->pImmutableSamplers
) {
246 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++) {
247 ANV_FROM_HANDLE(anv_sampler
, sampler
,
248 binding
->pImmutableSamplers
[i
]);
249 anv_foreach_stage(s
, binding
->stageFlags
)
250 surface_count
[s
] += sampler
->n_planes
;
253 anv_foreach_stage(s
, binding
->stageFlags
)
254 surface_count
[s
] += binding
->descriptorCount
;
259 if (anv_descriptor_data_supports_bindless(pdevice
, desc_data
, false))
262 anv_foreach_stage(s
, binding
->stageFlags
)
263 surface_count
[s
] += binding
->descriptorCount
;
268 bool supported
= true;
269 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
270 /* Our maximum binding table size is 240 and we need to reserve 8 for
273 if (surface_count
[s
] >= MAX_BINDING_TABLE_SIZE
- MAX_RTS
)
277 pSupport
->supported
= supported
;
280 VkResult
anv_CreateDescriptorSetLayout(
282 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
283 const VkAllocationCallbacks
* pAllocator
,
284 VkDescriptorSetLayout
* pSetLayout
)
286 ANV_FROM_HANDLE(anv_device
, device
, _device
);
288 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
290 uint32_t max_binding
= 0;
291 uint32_t immutable_sampler_count
= 0;
292 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
293 max_binding
= MAX2(max_binding
, pCreateInfo
->pBindings
[j
].binding
);
295 /* From the Vulkan 1.1.97 spec for VkDescriptorSetLayoutBinding:
297 * "If descriptorType specifies a VK_DESCRIPTOR_TYPE_SAMPLER or
298 * VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER type descriptor, then
299 * pImmutableSamplers can be used to initialize a set of immutable
300 * samplers. [...] If descriptorType is not one of these descriptor
301 * types, then pImmutableSamplers is ignored.
303 * We need to be careful here and only parse pImmutableSamplers if we
304 * have one of the right descriptor types.
306 VkDescriptorType desc_type
= pCreateInfo
->pBindings
[j
].descriptorType
;
307 if ((desc_type
== VK_DESCRIPTOR_TYPE_SAMPLER
||
308 desc_type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
) &&
309 pCreateInfo
->pBindings
[j
].pImmutableSamplers
)
310 immutable_sampler_count
+= pCreateInfo
->pBindings
[j
].descriptorCount
;
313 struct anv_descriptor_set_layout
*set_layout
;
314 struct anv_descriptor_set_binding_layout
*bindings
;
315 struct anv_sampler
**samplers
;
317 /* We need to allocate decriptor set layouts off the device allocator
318 * with DEVICE scope because they are reference counted and may not be
319 * destroyed when vkDestroyDescriptorSetLayout is called.
322 anv_multialloc_add(&ma
, &set_layout
, 1);
323 anv_multialloc_add(&ma
, &bindings
, max_binding
+ 1);
324 anv_multialloc_add(&ma
, &samplers
, immutable_sampler_count
);
326 if (!anv_multialloc_alloc(&ma
, &device
->alloc
,
327 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
))
328 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
330 memset(set_layout
, 0, sizeof(*set_layout
));
331 set_layout
->ref_cnt
= 1;
332 set_layout
->binding_count
= max_binding
+ 1;
334 for (uint32_t b
= 0; b
<= max_binding
; b
++) {
335 /* Initialize all binding_layout entries to -1 */
336 memset(&set_layout
->binding
[b
], -1, sizeof(set_layout
->binding
[b
]));
338 set_layout
->binding
[b
].flags
= 0;
339 set_layout
->binding
[b
].data
= 0;
340 set_layout
->binding
[b
].max_plane_count
= 0;
341 set_layout
->binding
[b
].array_size
= 0;
342 set_layout
->binding
[b
].immutable_samplers
= NULL
;
345 /* Initialize all samplers to 0 */
346 memset(samplers
, 0, immutable_sampler_count
* sizeof(*samplers
));
348 uint32_t buffer_view_count
= 0;
349 uint32_t dynamic_offset_count
= 0;
350 uint32_t descriptor_buffer_size
= 0;
352 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
353 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[j
];
354 uint32_t b
= binding
->binding
;
355 /* We temporarily store pCreateInfo->pBindings[] index (plus one) in the
356 * immutable_samplers pointer. This provides us with a quick-and-dirty
357 * way to sort the bindings by binding number.
359 set_layout
->binding
[b
].immutable_samplers
= (void *)(uintptr_t)(j
+ 1);
362 const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT
*binding_flags_info
=
363 vk_find_struct_const(pCreateInfo
->pNext
,
364 DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT
);
366 for (uint32_t b
= 0; b
<= max_binding
; b
++) {
367 /* We stashed the pCreateInfo->pBindings[] index (plus one) in the
368 * immutable_samplers pointer. Check for NULL (empty binding) and then
369 * reset it and compute the index.
371 if (set_layout
->binding
[b
].immutable_samplers
== NULL
)
373 const uint32_t info_idx
=
374 (uintptr_t)(void *)set_layout
->binding
[b
].immutable_samplers
- 1;
375 set_layout
->binding
[b
].immutable_samplers
= NULL
;
377 const VkDescriptorSetLayoutBinding
*binding
=
378 &pCreateInfo
->pBindings
[info_idx
];
380 if (binding
->descriptorCount
== 0)
384 set_layout
->binding
[b
].type
= binding
->descriptorType
;
387 if (binding_flags_info
&& binding_flags_info
->bindingCount
> 0) {
388 assert(binding_flags_info
->bindingCount
== pCreateInfo
->bindingCount
);
389 set_layout
->binding
[b
].flags
=
390 binding_flags_info
->pBindingFlags
[info_idx
];
393 set_layout
->binding
[b
].data
=
394 anv_descriptor_data_for_type(&device
->instance
->physicalDevice
,
395 binding
->descriptorType
);
396 set_layout
->binding
[b
].array_size
= binding
->descriptorCount
;
397 set_layout
->binding
[b
].descriptor_index
= set_layout
->size
;
398 set_layout
->size
+= binding
->descriptorCount
;
400 if (set_layout
->binding
[b
].data
& ANV_DESCRIPTOR_BUFFER_VIEW
) {
401 set_layout
->binding
[b
].buffer_view_index
= buffer_view_count
;
402 buffer_view_count
+= binding
->descriptorCount
;
405 switch (binding
->descriptorType
) {
406 case VK_DESCRIPTOR_TYPE_SAMPLER
:
407 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
408 set_layout
->binding
[b
].max_plane_count
= 1;
409 if (binding
->pImmutableSamplers
) {
410 set_layout
->binding
[b
].immutable_samplers
= samplers
;
411 samplers
+= binding
->descriptorCount
;
413 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++) {
414 ANV_FROM_HANDLE(anv_sampler
, sampler
,
415 binding
->pImmutableSamplers
[i
]);
417 set_layout
->binding
[b
].immutable_samplers
[i
] = sampler
;
418 if (set_layout
->binding
[b
].max_plane_count
< sampler
->n_planes
)
419 set_layout
->binding
[b
].max_plane_count
= sampler
->n_planes
;
424 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
425 set_layout
->binding
[b
].max_plane_count
= 1;
432 switch (binding
->descriptorType
) {
433 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
434 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
435 set_layout
->binding
[b
].dynamic_offset_index
= dynamic_offset_count
;
436 dynamic_offset_count
+= binding
->descriptorCount
;
443 if (binding
->descriptorType
==
444 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
445 /* Inline uniform blocks are specified to use the descriptor array
446 * size as the size in bytes of the block.
448 descriptor_buffer_size
= align_u32(descriptor_buffer_size
, 32);
449 set_layout
->binding
[b
].descriptor_offset
= descriptor_buffer_size
;
450 descriptor_buffer_size
+= binding
->descriptorCount
;
452 set_layout
->binding
[b
].descriptor_offset
= descriptor_buffer_size
;
453 descriptor_buffer_size
+= anv_descriptor_size(&set_layout
->binding
[b
]) *
454 binding
->descriptorCount
;
457 set_layout
->shader_stages
|= binding
->stageFlags
;
460 set_layout
->buffer_view_count
= buffer_view_count
;
461 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
462 set_layout
->descriptor_buffer_size
= descriptor_buffer_size
;
464 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
469 void anv_DestroyDescriptorSetLayout(
471 VkDescriptorSetLayout _set_layout
,
472 const VkAllocationCallbacks
* pAllocator
)
474 ANV_FROM_HANDLE(anv_device
, device
, _device
);
475 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
480 anv_descriptor_set_layout_unref(device
, set_layout
);
483 #define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
486 sha1_update_immutable_sampler(struct mesa_sha1
*ctx
,
487 const struct anv_sampler
*sampler
)
489 if (!sampler
->conversion
)
492 /* The only thing that affects the shader is ycbcr conversion */
493 _mesa_sha1_update(ctx
, sampler
->conversion
,
494 sizeof(*sampler
->conversion
));
498 sha1_update_descriptor_set_binding_layout(struct mesa_sha1
*ctx
,
499 const struct anv_descriptor_set_binding_layout
*layout
)
501 SHA1_UPDATE_VALUE(ctx
, layout
->flags
);
502 SHA1_UPDATE_VALUE(ctx
, layout
->data
);
503 SHA1_UPDATE_VALUE(ctx
, layout
->max_plane_count
);
504 SHA1_UPDATE_VALUE(ctx
, layout
->array_size
);
505 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_index
);
506 SHA1_UPDATE_VALUE(ctx
, layout
->dynamic_offset_index
);
507 SHA1_UPDATE_VALUE(ctx
, layout
->buffer_view_index
);
508 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_offset
);
510 if (layout
->immutable_samplers
) {
511 for (uint16_t i
= 0; i
< layout
->array_size
; i
++)
512 sha1_update_immutable_sampler(ctx
, layout
->immutable_samplers
[i
]);
517 sha1_update_descriptor_set_layout(struct mesa_sha1
*ctx
,
518 const struct anv_descriptor_set_layout
*layout
)
520 SHA1_UPDATE_VALUE(ctx
, layout
->binding_count
);
521 SHA1_UPDATE_VALUE(ctx
, layout
->size
);
522 SHA1_UPDATE_VALUE(ctx
, layout
->shader_stages
);
523 SHA1_UPDATE_VALUE(ctx
, layout
->buffer_view_count
);
524 SHA1_UPDATE_VALUE(ctx
, layout
->dynamic_offset_count
);
525 SHA1_UPDATE_VALUE(ctx
, layout
->descriptor_buffer_size
);
527 for (uint16_t i
= 0; i
< layout
->binding_count
; i
++)
528 sha1_update_descriptor_set_binding_layout(ctx
, &layout
->binding
[i
]);
532 * Pipeline layouts. These have nothing to do with the pipeline. They are
533 * just multiple descriptor set layouts pasted together
536 VkResult
anv_CreatePipelineLayout(
538 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
539 const VkAllocationCallbacks
* pAllocator
,
540 VkPipelineLayout
* pPipelineLayout
)
542 ANV_FROM_HANDLE(anv_device
, device
, _device
);
543 struct anv_pipeline_layout
*layout
;
545 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
547 layout
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*layout
), 8,
548 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
550 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
552 layout
->num_sets
= pCreateInfo
->setLayoutCount
;
554 unsigned dynamic_offset_count
= 0;
556 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
557 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
,
558 pCreateInfo
->pSetLayouts
[set
]);
559 layout
->set
[set
].layout
= set_layout
;
560 anv_descriptor_set_layout_ref(set_layout
);
562 layout
->set
[set
].dynamic_offset_start
= dynamic_offset_count
;
563 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
564 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
567 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
;
571 struct mesa_sha1 ctx
;
572 _mesa_sha1_init(&ctx
);
573 for (unsigned s
= 0; s
< layout
->num_sets
; s
++) {
574 sha1_update_descriptor_set_layout(&ctx
, layout
->set
[s
].layout
);
575 _mesa_sha1_update(&ctx
, &layout
->set
[s
].dynamic_offset_start
,
576 sizeof(layout
->set
[s
].dynamic_offset_start
));
578 _mesa_sha1_update(&ctx
, &layout
->num_sets
, sizeof(layout
->num_sets
));
579 _mesa_sha1_final(&ctx
, layout
->sha1
);
581 *pPipelineLayout
= anv_pipeline_layout_to_handle(layout
);
586 void anv_DestroyPipelineLayout(
588 VkPipelineLayout _pipelineLayout
,
589 const VkAllocationCallbacks
* pAllocator
)
591 ANV_FROM_HANDLE(anv_device
, device
, _device
);
592 ANV_FROM_HANDLE(anv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
594 if (!pipeline_layout
)
597 for (uint32_t i
= 0; i
< pipeline_layout
->num_sets
; i
++)
598 anv_descriptor_set_layout_unref(device
, pipeline_layout
->set
[i
].layout
);
600 vk_free2(&device
->alloc
, pAllocator
, pipeline_layout
);
606 * These are implemented using a big pool of memory and a free-list for the
607 * host memory allocations and a state_stream and a free list for the buffer
608 * view surface state. The spec allows us to fail to allocate due to
609 * fragmentation in all cases but two: 1) after pool reset, allocating up
610 * until the pool size with no freeing must succeed and 2) allocating and
611 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
612 * and the free lists lets us recycle blocks for case 2).
615 /* The vma heap reserves 0 to mean NULL; we have to offset by some ammount to
616 * ensure we can allocate the entire BO without hitting zero. The actual
617 * amount doesn't matter.
619 #define POOL_HEAP_OFFSET 64
623 VkResult
anv_CreateDescriptorPool(
625 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
626 const VkAllocationCallbacks
* pAllocator
,
627 VkDescriptorPool
* pDescriptorPool
)
629 ANV_FROM_HANDLE(anv_device
, device
, _device
);
630 struct anv_descriptor_pool
*pool
;
632 const VkDescriptorPoolInlineUniformBlockCreateInfoEXT
*inline_info
=
633 vk_find_struct_const(pCreateInfo
->pNext
,
634 DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT
);
636 uint32_t descriptor_count
= 0;
637 uint32_t buffer_view_count
= 0;
638 uint32_t descriptor_bo_size
= 0;
639 for (uint32_t i
= 0; i
< pCreateInfo
->poolSizeCount
; i
++) {
640 enum anv_descriptor_data desc_data
=
641 anv_descriptor_data_for_type(&device
->instance
->physicalDevice
,
642 pCreateInfo
->pPoolSizes
[i
].type
);
644 if (desc_data
& ANV_DESCRIPTOR_BUFFER_VIEW
)
645 buffer_view_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
647 unsigned desc_data_size
= anv_descriptor_data_size(desc_data
) *
648 pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
650 /* Combined image sampler descriptors can take up to 3 slots if they
651 * hold a YCbCr image.
653 if (pCreateInfo
->pPoolSizes
[i
].type
==
654 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
657 if (pCreateInfo
->pPoolSizes
[i
].type
==
658 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
) {
659 /* Inline uniform blocks are specified to use the descriptor array
660 * size as the size in bytes of the block.
663 desc_data_size
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
666 descriptor_bo_size
+= desc_data_size
;
668 descriptor_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
670 /* We have to align descriptor buffer allocations to 32B so that we can
671 * push descriptor buffers. This means that each descriptor buffer
672 * allocated may burn up to 32B of extra space to get the right alignment.
673 * (Technically, it's at most 28B because we're always going to start at
674 * least 4B aligned but we're being conservative here.) Allocate enough
675 * extra space that we can chop it into maxSets pieces and align each one
678 descriptor_bo_size
+= 32 * pCreateInfo
->maxSets
;
679 descriptor_bo_size
= ALIGN(descriptor_bo_size
, 4096);
680 /* We align inline uniform blocks to 32B */
682 descriptor_bo_size
+= 32 * inline_info
->maxInlineUniformBlockBindings
;
684 const size_t pool_size
=
685 pCreateInfo
->maxSets
* sizeof(struct anv_descriptor_set
) +
686 descriptor_count
* sizeof(struct anv_descriptor
) +
687 buffer_view_count
* sizeof(struct anv_buffer_view
);
688 const size_t total_size
= sizeof(*pool
) + pool_size
;
690 pool
= vk_alloc2(&device
->alloc
, pAllocator
, total_size
, 8,
691 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
693 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
695 pool
->size
= pool_size
;
697 pool
->free_list
= EMPTY
;
699 if (descriptor_bo_size
> 0) {
700 VkResult result
= anv_bo_init_new(&pool
->bo
, device
, descriptor_bo_size
);
701 if (result
!= VK_SUCCESS
) {
702 vk_free2(&device
->alloc
, pAllocator
, pool
);
706 anv_gem_set_caching(device
, pool
->bo
.gem_handle
, I915_CACHING_CACHED
);
708 pool
->bo
.map
= anv_gem_mmap(device
, pool
->bo
.gem_handle
, 0,
709 descriptor_bo_size
, 0);
710 if (pool
->bo
.map
== NULL
) {
711 anv_gem_close(device
, pool
->bo
.gem_handle
);
712 vk_free2(&device
->alloc
, pAllocator
, pool
);
713 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
716 if (device
->instance
->physicalDevice
.use_softpin
) {
717 pool
->bo
.flags
|= EXEC_OBJECT_PINNED
;
718 anv_vma_alloc(device
, &pool
->bo
);
721 util_vma_heap_init(&pool
->bo_heap
, POOL_HEAP_OFFSET
, descriptor_bo_size
);
726 anv_state_stream_init(&pool
->surface_state_stream
,
727 &device
->surface_state_pool
, 4096);
728 pool
->surface_state_free_list
= NULL
;
730 list_inithead(&pool
->desc_sets
);
732 *pDescriptorPool
= anv_descriptor_pool_to_handle(pool
);
737 void anv_DestroyDescriptorPool(
739 VkDescriptorPool _pool
,
740 const VkAllocationCallbacks
* pAllocator
)
742 ANV_FROM_HANDLE(anv_device
, device
, _device
);
743 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, _pool
);
748 list_for_each_entry_safe(struct anv_descriptor_set
, set
,
749 &pool
->desc_sets
, pool_link
) {
750 anv_descriptor_set_destroy(device
, pool
, set
);
754 anv_gem_munmap(pool
->bo
.map
, pool
->bo
.size
);
755 anv_vma_free(device
, &pool
->bo
);
756 anv_gem_close(device
, pool
->bo
.gem_handle
);
758 anv_state_stream_finish(&pool
->surface_state_stream
);
760 util_vma_heap_finish(&pool
->bo_heap
);
762 vk_free2(&device
->alloc
, pAllocator
, pool
);
765 VkResult
anv_ResetDescriptorPool(
767 VkDescriptorPool descriptorPool
,
768 VkDescriptorPoolResetFlags flags
)
770 ANV_FROM_HANDLE(anv_device
, device
, _device
);
771 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, descriptorPool
);
773 list_for_each_entry_safe(struct anv_descriptor_set
, set
,
774 &pool
->desc_sets
, pool_link
) {
775 anv_descriptor_set_destroy(device
, pool
, set
);
779 pool
->free_list
= EMPTY
;
782 util_vma_heap_finish(&pool
->bo_heap
);
783 util_vma_heap_init(&pool
->bo_heap
, POOL_HEAP_OFFSET
, pool
->bo
.size
);
786 anv_state_stream_finish(&pool
->surface_state_stream
);
787 anv_state_stream_init(&pool
->surface_state_stream
,
788 &device
->surface_state_pool
, 4096);
789 pool
->surface_state_free_list
= NULL
;
794 struct pool_free_list_entry
{
800 anv_descriptor_pool_alloc_set(struct anv_descriptor_pool
*pool
,
802 struct anv_descriptor_set
**set
)
804 if (size
<= pool
->size
- pool
->next
) {
805 *set
= (struct anv_descriptor_set
*) (pool
->data
+ pool
->next
);
809 struct pool_free_list_entry
*entry
;
810 uint32_t *link
= &pool
->free_list
;
811 for (uint32_t f
= pool
->free_list
; f
!= EMPTY
; f
= entry
->next
) {
812 entry
= (struct pool_free_list_entry
*) (pool
->data
+ f
);
813 if (size
<= entry
->size
) {
815 *set
= (struct anv_descriptor_set
*) entry
;
821 if (pool
->free_list
!= EMPTY
) {
822 return vk_error(VK_ERROR_FRAGMENTED_POOL
);
824 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY
);
830 anv_descriptor_pool_free_set(struct anv_descriptor_pool
*pool
,
831 struct anv_descriptor_set
*set
)
833 /* Put the descriptor set allocation back on the free list. */
834 const uint32_t index
= (char *) set
- pool
->data
;
835 if (index
+ set
->size
== pool
->next
) {
838 struct pool_free_list_entry
*entry
= (struct pool_free_list_entry
*) set
;
839 entry
->next
= pool
->free_list
;
840 entry
->size
= set
->size
;
841 pool
->free_list
= (char *) entry
- pool
->data
;
845 struct surface_state_free_list_entry
{
847 struct anv_state state
;
850 static struct anv_state
851 anv_descriptor_pool_alloc_state(struct anv_descriptor_pool
*pool
)
853 struct surface_state_free_list_entry
*entry
=
854 pool
->surface_state_free_list
;
857 struct anv_state state
= entry
->state
;
858 pool
->surface_state_free_list
= entry
->next
;
859 assert(state
.alloc_size
== 64);
862 return anv_state_stream_alloc(&pool
->surface_state_stream
, 64, 64);
867 anv_descriptor_pool_free_state(struct anv_descriptor_pool
*pool
,
868 struct anv_state state
)
870 /* Put the buffer view surface state back on the free list. */
871 struct surface_state_free_list_entry
*entry
= state
.map
;
872 entry
->next
= pool
->surface_state_free_list
;
873 entry
->state
= state
;
874 pool
->surface_state_free_list
= entry
;
878 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout
*layout
)
881 sizeof(struct anv_descriptor_set
) +
882 layout
->size
* sizeof(struct anv_descriptor
) +
883 layout
->buffer_view_count
* sizeof(struct anv_buffer_view
);
887 anv_descriptor_set_create(struct anv_device
*device
,
888 struct anv_descriptor_pool
*pool
,
889 struct anv_descriptor_set_layout
*layout
,
890 struct anv_descriptor_set
**out_set
)
892 struct anv_descriptor_set
*set
;
893 const size_t size
= anv_descriptor_set_layout_size(layout
);
895 VkResult result
= anv_descriptor_pool_alloc_set(pool
, size
, &set
);
896 if (result
!= VK_SUCCESS
)
899 if (layout
->descriptor_buffer_size
) {
900 /* Align the size to 32 so that alignment gaps don't cause extra holes
901 * in the heap which can lead to bad performance.
903 uint64_t pool_vma_offset
=
904 util_vma_heap_alloc(&pool
->bo_heap
,
905 ALIGN(layout
->descriptor_buffer_size
, 32), 32);
906 if (pool_vma_offset
== 0) {
907 anv_descriptor_pool_free_set(pool
, set
);
908 return vk_error(VK_ERROR_FRAGMENTED_POOL
);
910 assert(pool_vma_offset
>= POOL_HEAP_OFFSET
&&
911 pool_vma_offset
- POOL_HEAP_OFFSET
<= INT32_MAX
);
912 set
->desc_mem
.offset
= pool_vma_offset
- POOL_HEAP_OFFSET
;
913 set
->desc_mem
.alloc_size
= layout
->descriptor_buffer_size
;
914 set
->desc_mem
.map
= pool
->bo
.map
+ set
->desc_mem
.offset
;
916 set
->desc_surface_state
= anv_descriptor_pool_alloc_state(pool
);
917 anv_fill_buffer_surface_state(device
, set
->desc_surface_state
,
918 ISL_FORMAT_R32G32B32A32_FLOAT
,
919 (struct anv_address
) {
921 .offset
= set
->desc_mem
.offset
,
923 layout
->descriptor_buffer_size
, 1);
925 set
->desc_mem
= ANV_STATE_NULL
;
926 set
->desc_surface_state
= ANV_STATE_NULL
;
930 set
->layout
= layout
;
931 anv_descriptor_set_layout_ref(layout
);
935 (struct anv_buffer_view
*) &set
->descriptors
[layout
->size
];
936 set
->buffer_view_count
= layout
->buffer_view_count
;
938 /* By defining the descriptors to be zero now, we can later verify that
939 * a descriptor has not been populated with user data.
941 memset(set
->descriptors
, 0, sizeof(struct anv_descriptor
) * layout
->size
);
943 /* Go through and fill out immutable samplers if we have any */
944 struct anv_descriptor
*desc
= set
->descriptors
;
945 for (uint32_t b
= 0; b
< layout
->binding_count
; b
++) {
946 if (layout
->binding
[b
].immutable_samplers
) {
947 for (uint32_t i
= 0; i
< layout
->binding
[b
].array_size
; i
++) {
948 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
949 * UpdateDescriptorSets if needed. However, if the descriptor
950 * set has an immutable sampler, UpdateDescriptorSets may never
951 * touch it, so we need to make sure it's 100% valid now.
953 * We don't need to actually provide a sampler because the helper
954 * will always write in the immutable sampler regardless of what
955 * is in the sampler parameter.
957 struct VkDescriptorImageInfo info
= { };
958 anv_descriptor_set_write_image_view(device
, set
, &info
,
959 VK_DESCRIPTOR_TYPE_SAMPLER
,
963 desc
+= layout
->binding
[b
].array_size
;
966 /* Allocate surface state for the buffer views. */
967 for (uint32_t b
= 0; b
< layout
->buffer_view_count
; b
++) {
968 set
->buffer_views
[b
].surface_state
=
969 anv_descriptor_pool_alloc_state(pool
);
972 list_addtail(&set
->pool_link
, &pool
->desc_sets
);
980 anv_descriptor_set_destroy(struct anv_device
*device
,
981 struct anv_descriptor_pool
*pool
,
982 struct anv_descriptor_set
*set
)
984 anv_descriptor_set_layout_unref(device
, set
->layout
);
986 if (set
->desc_mem
.alloc_size
) {
987 util_vma_heap_free(&pool
->bo_heap
,
988 (uint64_t)set
->desc_mem
.offset
+ POOL_HEAP_OFFSET
,
989 set
->desc_mem
.alloc_size
);
990 anv_descriptor_pool_free_state(pool
, set
->desc_surface_state
);
993 for (uint32_t b
= 0; b
< set
->buffer_view_count
; b
++)
994 anv_descriptor_pool_free_state(pool
, set
->buffer_views
[b
].surface_state
);
996 list_del(&set
->pool_link
);
998 anv_descriptor_pool_free_set(pool
, set
);
1001 VkResult
anv_AllocateDescriptorSets(
1003 const VkDescriptorSetAllocateInfo
* pAllocateInfo
,
1004 VkDescriptorSet
* pDescriptorSets
)
1006 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1007 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, pAllocateInfo
->descriptorPool
);
1009 VkResult result
= VK_SUCCESS
;
1010 struct anv_descriptor_set
*set
;
1013 for (i
= 0; i
< pAllocateInfo
->descriptorSetCount
; i
++) {
1014 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
,
1015 pAllocateInfo
->pSetLayouts
[i
]);
1017 result
= anv_descriptor_set_create(device
, pool
, layout
, &set
);
1018 if (result
!= VK_SUCCESS
)
1021 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1024 if (result
!= VK_SUCCESS
)
1025 anv_FreeDescriptorSets(_device
, pAllocateInfo
->descriptorPool
,
1026 i
, pDescriptorSets
);
1031 VkResult
anv_FreeDescriptorSets(
1033 VkDescriptorPool descriptorPool
,
1035 const VkDescriptorSet
* pDescriptorSets
)
1037 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1038 ANV_FROM_HANDLE(anv_descriptor_pool
, pool
, descriptorPool
);
1040 for (uint32_t i
= 0; i
< count
; i
++) {
1041 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1046 anv_descriptor_set_destroy(device
, pool
, set
);
1053 anv_descriptor_set_write_image_param(uint32_t *param_desc_map
,
1054 const struct brw_image_param
*param
)
1056 #define WRITE_PARAM_FIELD(field, FIELD) \
1057 for (unsigned i = 0; i < ARRAY_SIZE(param->field); i++) \
1058 param_desc_map[BRW_IMAGE_PARAM_##FIELD##_OFFSET + i] = param->field[i]
1060 WRITE_PARAM_FIELD(offset
, OFFSET
);
1061 WRITE_PARAM_FIELD(size
, SIZE
);
1062 WRITE_PARAM_FIELD(stride
, STRIDE
);
1063 WRITE_PARAM_FIELD(tiling
, TILING
);
1064 WRITE_PARAM_FIELD(swizzling
, SWIZZLING
);
1065 WRITE_PARAM_FIELD(size
, SIZE
);
1067 #undef WRITE_PARAM_FIELD
1071 anv_surface_state_to_handle(struct anv_state state
)
1073 /* Bits 31:12 of the bindless surface offset in the extended message
1074 * descriptor is bits 25:6 of the byte-based address.
1076 assert(state
.offset
>= 0);
1077 uint32_t offset
= state
.offset
;
1078 assert((offset
& 0x3f) == 0 && offset
< (1 << 26));
1083 anv_descriptor_set_write_image_view(struct anv_device
*device
,
1084 struct anv_descriptor_set
*set
,
1085 const VkDescriptorImageInfo
* const info
,
1086 VkDescriptorType type
,
1090 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1091 &set
->layout
->binding
[binding
];
1092 struct anv_descriptor
*desc
=
1093 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
1094 struct anv_image_view
*image_view
= NULL
;
1095 struct anv_sampler
*sampler
= NULL
;
1097 /* We get called with just VK_DESCRIPTOR_TYPE_SAMPLER as part of descriptor
1098 * set initialization to set the bindless samplers.
1100 assert(type
== bind_layout
->type
||
1101 type
== VK_DESCRIPTOR_TYPE_SAMPLER
);
1104 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1105 sampler
= anv_sampler_from_handle(info
->sampler
);
1108 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1109 image_view
= anv_image_view_from_handle(info
->imageView
);
1110 sampler
= anv_sampler_from_handle(info
->sampler
);
1113 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1114 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1115 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1116 image_view
= anv_image_view_from_handle(info
->imageView
);
1120 unreachable("invalid descriptor type");
1123 /* If this descriptor has an immutable sampler, we don't want to stomp on
1126 sampler
= bind_layout
->immutable_samplers
?
1127 bind_layout
->immutable_samplers
[element
] :
1130 *desc
= (struct anv_descriptor
) {
1132 .layout
= info
->imageLayout
,
1133 .image_view
= image_view
,
1137 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
+
1138 element
* anv_descriptor_size(bind_layout
);
1140 if (bind_layout
->data
& ANV_DESCRIPTOR_SAMPLED_IMAGE
) {
1141 struct anv_sampled_image_descriptor desc_data
[3];
1142 memset(desc_data
, 0, sizeof(desc_data
));
1145 for (unsigned p
= 0; p
< image_view
->n_planes
; p
++) {
1146 struct anv_surface_state sstate
=
1147 (desc
->layout
== VK_IMAGE_LAYOUT_GENERAL
) ?
1148 image_view
->planes
[p
].general_sampler_surface_state
:
1149 image_view
->planes
[p
].optimal_sampler_surface_state
;
1150 desc_data
[p
].image
= anv_surface_state_to_handle(sstate
.state
);
1155 for (unsigned p
= 0; p
< sampler
->n_planes
; p
++)
1156 desc_data
[p
].sampler
= sampler
->bindless_state
.offset
+ p
* 32;
1159 /* We may have max_plane_count < 0 if this isn't a sampled image but it
1160 * can be no more than the size of our array of handles.
1162 assert(bind_layout
->max_plane_count
<= ARRAY_SIZE(desc_data
));
1163 memcpy(desc_map
, desc_data
,
1164 MAX2(1, bind_layout
->max_plane_count
) * sizeof(desc_data
[0]));
1167 if (bind_layout
->data
& ANV_DESCRIPTOR_STORAGE_IMAGE
) {
1168 assert(!(bind_layout
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
));
1169 assert(image_view
->n_planes
== 1);
1170 struct anv_storage_image_descriptor desc_data
= {
1171 .read_write
= anv_surface_state_to_handle(
1172 image_view
->planes
[0].storage_surface_state
.state
),
1173 .write_only
= anv_surface_state_to_handle(
1174 image_view
->planes
[0].writeonly_storage_surface_state
.state
),
1176 memcpy(desc_map
, &desc_data
, sizeof(desc_data
));
1179 if (bind_layout
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
) {
1180 /* Storage images can only ever have one plane */
1181 assert(image_view
->n_planes
== 1);
1182 const struct brw_image_param
*image_param
=
1183 &image_view
->planes
[0].storage_image_param
;
1185 anv_descriptor_set_write_image_param(desc_map
, image_param
);
1190 anv_descriptor_set_write_buffer_view(struct anv_device
*device
,
1191 struct anv_descriptor_set
*set
,
1192 VkDescriptorType type
,
1193 struct anv_buffer_view
*buffer_view
,
1197 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1198 &set
->layout
->binding
[binding
];
1199 struct anv_descriptor
*desc
=
1200 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
1202 assert(type
== bind_layout
->type
);
1204 *desc
= (struct anv_descriptor
) {
1206 .buffer_view
= buffer_view
,
1209 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
+
1210 element
* anv_descriptor_size(bind_layout
);
1212 if (bind_layout
->data
& ANV_DESCRIPTOR_SAMPLED_IMAGE
) {
1213 struct anv_sampled_image_descriptor desc_data
= {
1214 .image
= anv_surface_state_to_handle(buffer_view
->surface_state
),
1216 memcpy(desc_map
, &desc_data
, sizeof(desc_data
));
1219 if (bind_layout
->data
& ANV_DESCRIPTOR_STORAGE_IMAGE
) {
1220 assert(!(bind_layout
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
));
1221 struct anv_storage_image_descriptor desc_data
= {
1222 .read_write
= anv_surface_state_to_handle(
1223 buffer_view
->storage_surface_state
),
1224 .write_only
= anv_surface_state_to_handle(
1225 buffer_view
->writeonly_storage_surface_state
),
1227 memcpy(desc_map
, &desc_data
, sizeof(desc_data
));
1230 if (bind_layout
->data
& ANV_DESCRIPTOR_IMAGE_PARAM
) {
1231 anv_descriptor_set_write_image_param(desc_map
,
1232 &buffer_view
->storage_image_param
);
1237 anv_descriptor_set_write_buffer(struct anv_device
*device
,
1238 struct anv_descriptor_set
*set
,
1239 struct anv_state_stream
*alloc_stream
,
1240 VkDescriptorType type
,
1241 struct anv_buffer
*buffer
,
1244 VkDeviceSize offset
,
1247 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1248 &set
->layout
->binding
[binding
];
1249 struct anv_descriptor
*desc
=
1250 &set
->descriptors
[bind_layout
->descriptor_index
+ element
];
1252 assert(type
== bind_layout
->type
);
1254 struct anv_address bind_addr
= anv_address_add(buffer
->address
, offset
);
1255 uint64_t bind_range
= anv_buffer_get_range(buffer
, offset
, range
);
1257 if (type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
1258 type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
) {
1259 *desc
= (struct anv_descriptor
) {
1266 assert(bind_layout
->data
& ANV_DESCRIPTOR_BUFFER_VIEW
);
1267 struct anv_buffer_view
*bview
=
1268 &set
->buffer_views
[bind_layout
->buffer_view_index
+ element
];
1270 bview
->format
= anv_isl_format_for_descriptor_type(type
);
1271 bview
->range
= bind_range
;
1272 bview
->address
= bind_addr
;
1274 /* If we're writing descriptors through a push command, we need to
1275 * allocate the surface state from the command buffer. Otherwise it will
1276 * be allocated by the descriptor pool when calling
1277 * vkAllocateDescriptorSets. */
1279 bview
->surface_state
= anv_state_stream_alloc(alloc_stream
, 64, 64);
1281 anv_fill_buffer_surface_state(device
, bview
->surface_state
,
1282 bview
->format
, bind_addr
, bind_range
, 1);
1284 *desc
= (struct anv_descriptor
) {
1286 .buffer_view
= bview
,
1290 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
+
1291 element
* anv_descriptor_size(bind_layout
);
1293 if (bind_layout
->data
& ANV_DESCRIPTOR_ADDRESS_RANGE
) {
1294 struct anv_address_range_descriptor desc
= {
1295 .address
= anv_address_physical(bind_addr
),
1296 .range
= bind_range
,
1298 memcpy(desc_map
, &desc
, sizeof(desc
));
1303 anv_descriptor_set_write_inline_uniform_data(struct anv_device
*device
,
1304 struct anv_descriptor_set
*set
,
1310 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1311 &set
->layout
->binding
[binding
];
1313 assert(bind_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
);
1315 void *desc_map
= set
->desc_mem
.map
+ bind_layout
->descriptor_offset
;
1317 memcpy(desc_map
+ offset
, data
, size
);
1320 void anv_UpdateDescriptorSets(
1322 uint32_t descriptorWriteCount
,
1323 const VkWriteDescriptorSet
* pDescriptorWrites
,
1324 uint32_t descriptorCopyCount
,
1325 const VkCopyDescriptorSet
* pDescriptorCopies
)
1327 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1329 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1330 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1331 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->dstSet
);
1333 switch (write
->descriptorType
) {
1334 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1335 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1336 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1337 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1338 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1339 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1340 anv_descriptor_set_write_image_view(device
, set
,
1341 write
->pImageInfo
+ j
,
1342 write
->descriptorType
,
1344 write
->dstArrayElement
+ j
);
1348 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1349 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1350 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1351 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1352 write
->pTexelBufferView
[j
]);
1354 anv_descriptor_set_write_buffer_view(device
, set
,
1355 write
->descriptorType
,
1358 write
->dstArrayElement
+ j
);
1362 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1363 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1364 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1365 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1366 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1367 assert(write
->pBufferInfo
[j
].buffer
);
1368 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1371 anv_descriptor_set_write_buffer(device
, set
,
1373 write
->descriptorType
,
1376 write
->dstArrayElement
+ j
,
1377 write
->pBufferInfo
[j
].offset
,
1378 write
->pBufferInfo
[j
].range
);
1382 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
: {
1383 const VkWriteDescriptorSetInlineUniformBlockEXT
*inline_write
=
1384 vk_find_struct_const(write
->pNext
,
1385 WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT
);
1386 assert(inline_write
->dataSize
== write
->descriptorCount
);
1387 anv_descriptor_set_write_inline_uniform_data(device
, set
,
1389 inline_write
->pData
,
1390 write
->dstArrayElement
,
1391 inline_write
->dataSize
);
1400 for (uint32_t i
= 0; i
< descriptorCopyCount
; i
++) {
1401 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1402 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->srcSet
);
1403 ANV_FROM_HANDLE(anv_descriptor_set
, dst
, copy
->dstSet
);
1405 const struct anv_descriptor_set_binding_layout
*src_layout
=
1406 &src
->layout
->binding
[copy
->srcBinding
];
1407 struct anv_descriptor
*src_desc
=
1408 &src
->descriptors
[src_layout
->descriptor_index
];
1409 src_desc
+= copy
->srcArrayElement
;
1411 const struct anv_descriptor_set_binding_layout
*dst_layout
=
1412 &dst
->layout
->binding
[copy
->dstBinding
];
1413 struct anv_descriptor
*dst_desc
=
1414 &dst
->descriptors
[dst_layout
->descriptor_index
];
1415 dst_desc
+= copy
->dstArrayElement
;
1417 for (uint32_t j
= 0; j
< copy
->descriptorCount
; j
++)
1418 dst_desc
[j
] = src_desc
[j
];
1420 if (src_layout
->data
& ANV_DESCRIPTOR_INLINE_UNIFORM
) {
1421 assert(src_layout
->data
== ANV_DESCRIPTOR_INLINE_UNIFORM
);
1422 memcpy(dst
->desc_mem
.map
+ dst_layout
->descriptor_offset
+
1423 copy
->dstArrayElement
,
1424 src
->desc_mem
.map
+ src_layout
->descriptor_offset
+
1425 copy
->srcArrayElement
,
1426 copy
->descriptorCount
);
1428 unsigned desc_size
= anv_descriptor_size(src_layout
);
1429 if (desc_size
> 0) {
1430 assert(desc_size
== anv_descriptor_size(dst_layout
));
1431 memcpy(dst
->desc_mem
.map
+ dst_layout
->descriptor_offset
+
1432 copy
->dstArrayElement
* desc_size
,
1433 src
->desc_mem
.map
+ src_layout
->descriptor_offset
+
1434 copy
->srcArrayElement
* desc_size
,
1435 copy
->descriptorCount
* desc_size
);
1442 * Descriptor update templates.
1446 anv_descriptor_set_write_template(struct anv_device
*device
,
1447 struct anv_descriptor_set
*set
,
1448 struct anv_state_stream
*alloc_stream
,
1449 const struct anv_descriptor_update_template
*template,
1452 for (uint32_t i
= 0; i
< template->entry_count
; i
++) {
1453 const struct anv_descriptor_template_entry
*entry
=
1454 &template->entries
[i
];
1456 switch (entry
->type
) {
1457 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1458 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1459 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1460 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1461 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1462 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
1463 const VkDescriptorImageInfo
*info
=
1464 data
+ entry
->offset
+ j
* entry
->stride
;
1465 anv_descriptor_set_write_image_view(device
, set
,
1468 entry
->array_element
+ j
);
1472 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1473 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1474 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
1475 const VkBufferView
*_bview
=
1476 data
+ entry
->offset
+ j
* entry
->stride
;
1477 ANV_FROM_HANDLE(anv_buffer_view
, bview
, *_bview
);
1479 anv_descriptor_set_write_buffer_view(device
, set
,
1483 entry
->array_element
+ j
);
1487 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1488 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1489 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1490 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1491 for (uint32_t j
= 0; j
< entry
->array_count
; j
++) {
1492 const VkDescriptorBufferInfo
*info
=
1493 data
+ entry
->offset
+ j
* entry
->stride
;
1494 ANV_FROM_HANDLE(anv_buffer
, buffer
, info
->buffer
);
1496 anv_descriptor_set_write_buffer(device
, set
,
1501 entry
->array_element
+ j
,
1502 info
->offset
, info
->range
);
1506 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT
:
1507 anv_descriptor_set_write_inline_uniform_data(device
, set
,
1509 data
+ entry
->offset
,
1510 entry
->array_element
,
1511 entry
->array_count
);
1520 VkResult
anv_CreateDescriptorUpdateTemplate(
1522 const VkDescriptorUpdateTemplateCreateInfo
* pCreateInfo
,
1523 const VkAllocationCallbacks
* pAllocator
,
1524 VkDescriptorUpdateTemplate
* pDescriptorUpdateTemplate
)
1526 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1527 struct anv_descriptor_update_template
*template;
1529 size_t size
= sizeof(*template) +
1530 pCreateInfo
->descriptorUpdateEntryCount
* sizeof(template->entries
[0]);
1531 template = vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
1532 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1533 if (template == NULL
)
1534 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1536 template->bind_point
= pCreateInfo
->pipelineBindPoint
;
1538 if (pCreateInfo
->templateType
== VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET
)
1539 template->set
= pCreateInfo
->set
;
1541 template->entry_count
= pCreateInfo
->descriptorUpdateEntryCount
;
1542 for (uint32_t i
= 0; i
< template->entry_count
; i
++) {
1543 const VkDescriptorUpdateTemplateEntry
*pEntry
=
1544 &pCreateInfo
->pDescriptorUpdateEntries
[i
];
1546 template->entries
[i
] = (struct anv_descriptor_template_entry
) {
1547 .type
= pEntry
->descriptorType
,
1548 .binding
= pEntry
->dstBinding
,
1549 .array_element
= pEntry
->dstArrayElement
,
1550 .array_count
= pEntry
->descriptorCount
,
1551 .offset
= pEntry
->offset
,
1552 .stride
= pEntry
->stride
,
1556 *pDescriptorUpdateTemplate
=
1557 anv_descriptor_update_template_to_handle(template);
1562 void anv_DestroyDescriptorUpdateTemplate(
1564 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1565 const VkAllocationCallbacks
* pAllocator
)
1567 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1568 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1569 descriptorUpdateTemplate
);
1571 vk_free2(&device
->alloc
, pAllocator
, template);
1574 void anv_UpdateDescriptorSetWithTemplate(
1576 VkDescriptorSet descriptorSet
,
1577 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1580 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1581 ANV_FROM_HANDLE(anv_descriptor_set
, set
, descriptorSet
);
1582 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1583 descriptorUpdateTemplate
);
1585 anv_descriptor_set_write_template(device
, set
, NULL
, template, pData
);