2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
36 static bool has_equal_immutable_samplers(const VkSampler
*samplers
, uint32_t count
)
40 for(uint32_t i
= 1; i
< count
; ++i
) {
41 if (memcmp(radv_sampler_from_handle(samplers
[0])->state
,
42 radv_sampler_from_handle(samplers
[i
])->state
, 16)) {
49 static int binding_compare(const void* av
, const void *bv
)
51 const VkDescriptorSetLayoutBinding
*a
= (const VkDescriptorSetLayoutBinding
*)av
;
52 const VkDescriptorSetLayoutBinding
*b
= (const VkDescriptorSetLayoutBinding
*)bv
;
54 return (a
->binding
< b
->binding
) ? -1 : (a
->binding
> b
->binding
) ? 1 : 0;
57 static VkDescriptorSetLayoutBinding
*
58 create_sorted_bindings(const VkDescriptorSetLayoutBinding
*bindings
, unsigned count
) {
59 VkDescriptorSetLayoutBinding
*sorted_bindings
= malloc(count
* sizeof(VkDescriptorSetLayoutBinding
));
63 memcpy(sorted_bindings
, bindings
, count
* sizeof(VkDescriptorSetLayoutBinding
));
65 qsort(sorted_bindings
, count
, sizeof(VkDescriptorSetLayoutBinding
), binding_compare
);
67 return sorted_bindings
;
70 VkResult
radv_CreateDescriptorSetLayout(
72 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
73 const VkAllocationCallbacks
* pAllocator
,
74 VkDescriptorSetLayout
* pSetLayout
)
76 RADV_FROM_HANDLE(radv_device
, device
, _device
);
77 struct radv_descriptor_set_layout
*set_layout
;
79 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
80 const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT
*variable_flags
=
81 vk_find_struct_const(pCreateInfo
->pNext
, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT
);
83 uint32_t max_binding
= 0;
84 uint32_t immutable_sampler_count
= 0;
85 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
86 max_binding
= MAX2(max_binding
, pCreateInfo
->pBindings
[j
].binding
);
87 if (pCreateInfo
->pBindings
[j
].pImmutableSamplers
)
88 immutable_sampler_count
+= pCreateInfo
->pBindings
[j
].descriptorCount
;
91 uint32_t samplers_offset
= sizeof(struct radv_descriptor_set_layout
) +
92 (max_binding
+ 1) * sizeof(set_layout
->binding
[0]);
93 size_t size
= samplers_offset
+ immutable_sampler_count
* 4 * sizeof(uint32_t);
95 set_layout
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
96 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
98 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
100 set_layout
->flags
= pCreateInfo
->flags
;
102 /* We just allocate all the samplers at the end of the struct */
103 uint32_t *samplers
= (uint32_t*)&set_layout
->binding
[max_binding
+ 1];
105 VkDescriptorSetLayoutBinding
*bindings
= create_sorted_bindings(pCreateInfo
->pBindings
,
106 pCreateInfo
->bindingCount
);
108 vk_free2(&device
->alloc
, pAllocator
, set_layout
);
109 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
112 set_layout
->binding_count
= max_binding
+ 1;
113 set_layout
->shader_stages
= 0;
114 set_layout
->dynamic_shader_stages
= 0;
115 set_layout
->has_immutable_samplers
= false;
116 set_layout
->size
= 0;
118 memset(set_layout
->binding
, 0, size
- sizeof(struct radv_descriptor_set_layout
));
120 uint32_t buffer_count
= 0;
121 uint32_t dynamic_offset_count
= 0;
123 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
124 const VkDescriptorSetLayoutBinding
*binding
= bindings
+ j
;
125 uint32_t b
= binding
->binding
;
127 unsigned binding_buffer_count
= 0;
129 switch (binding
->descriptorType
) {
130 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
131 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
132 assert(!(pCreateInfo
->flags
& VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
));
133 set_layout
->binding
[b
].dynamic_offset_count
= 1;
134 set_layout
->dynamic_shader_stages
|= binding
->stageFlags
;
135 set_layout
->binding
[b
].size
= 0;
136 binding_buffer_count
= 1;
139 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
140 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
141 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
142 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
143 set_layout
->binding
[b
].size
= 16;
144 binding_buffer_count
= 1;
147 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
148 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
149 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
150 /* main descriptor + fmask descriptor */
151 set_layout
->binding
[b
].size
= 64;
152 binding_buffer_count
= 1;
155 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
156 /* main descriptor + fmask descriptor + sampler */
157 set_layout
->binding
[b
].size
= 96;
158 binding_buffer_count
= 1;
161 case VK_DESCRIPTOR_TYPE_SAMPLER
:
162 set_layout
->binding
[b
].size
= 16;
166 unreachable("unknown descriptor type\n");
170 set_layout
->size
= align(set_layout
->size
, alignment
);
171 set_layout
->binding
[b
].type
= binding
->descriptorType
;
172 set_layout
->binding
[b
].array_size
= binding
->descriptorCount
;
173 set_layout
->binding
[b
].offset
= set_layout
->size
;
174 set_layout
->binding
[b
].buffer_offset
= buffer_count
;
175 set_layout
->binding
[b
].dynamic_offset_offset
= dynamic_offset_count
;
177 if (variable_flags
&& binding
->binding
< variable_flags
->bindingCount
&&
178 (variable_flags
->pBindingFlags
[binding
->binding
] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT
)) {
179 assert(!binding
->pImmutableSamplers
); /* Terribly ill defined how many samplers are valid */
180 assert(binding
->binding
== max_binding
);
182 set_layout
->has_variable_descriptors
= true;
185 if (binding
->pImmutableSamplers
) {
186 set_layout
->binding
[b
].immutable_samplers_offset
= samplers_offset
;
187 set_layout
->binding
[b
].immutable_samplers_equal
=
188 has_equal_immutable_samplers(binding
->pImmutableSamplers
, binding
->descriptorCount
);
189 set_layout
->has_immutable_samplers
= true;
192 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++)
193 memcpy(samplers
+ 4 * i
, &radv_sampler_from_handle(binding
->pImmutableSamplers
[i
])->state
, 16);
195 /* Don't reserve space for the samplers if they're not accessed. */
196 if (set_layout
->binding
[b
].immutable_samplers_equal
) {
197 if (binding
->descriptorType
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
198 set_layout
->binding
[b
].size
-= 32;
199 else if (binding
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
200 set_layout
->binding
[b
].size
-= 16;
202 samplers
+= 4 * binding
->descriptorCount
;
203 samplers_offset
+= 4 * sizeof(uint32_t) * binding
->descriptorCount
;
206 set_layout
->size
+= binding
->descriptorCount
* set_layout
->binding
[b
].size
;
207 buffer_count
+= binding
->descriptorCount
* binding_buffer_count
;
208 dynamic_offset_count
+= binding
->descriptorCount
*
209 set_layout
->binding
[b
].dynamic_offset_count
;
210 set_layout
->shader_stages
|= binding
->stageFlags
;
215 set_layout
->buffer_count
= buffer_count
;
216 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
218 *pSetLayout
= radv_descriptor_set_layout_to_handle(set_layout
);
223 void radv_DestroyDescriptorSetLayout(
225 VkDescriptorSetLayout _set_layout
,
226 const VkAllocationCallbacks
* pAllocator
)
228 RADV_FROM_HANDLE(radv_device
, device
, _device
);
229 RADV_FROM_HANDLE(radv_descriptor_set_layout
, set_layout
, _set_layout
);
234 vk_free2(&device
->alloc
, pAllocator
, set_layout
);
237 void radv_GetDescriptorSetLayoutSupport(VkDevice device
,
238 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
239 VkDescriptorSetLayoutSupport
* pSupport
)
241 VkDescriptorSetLayoutBinding
*bindings
= create_sorted_bindings(pCreateInfo
->pBindings
,
242 pCreateInfo
->bindingCount
);
244 pSupport
->supported
= false;
248 const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT
*variable_flags
=
249 vk_find_struct_const(pCreateInfo
->pNext
, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT
);
250 VkDescriptorSetVariableDescriptorCountLayoutSupportEXT
*variable_count
=
251 vk_find_struct((void*)pCreateInfo
->pNext
, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT
);
252 if (variable_count
) {
253 variable_count
->maxVariableDescriptorCount
= 0;
256 bool supported
= true;
258 for (uint32_t i
= 0; i
< pCreateInfo
->bindingCount
; i
++) {
259 const VkDescriptorSetLayoutBinding
*binding
= bindings
+ i
;
261 uint64_t descriptor_size
= 0;
262 uint64_t descriptor_alignment
= 1;
263 switch (binding
->descriptorType
) {
264 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
265 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
267 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
268 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
269 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
270 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
271 descriptor_size
= 16;
272 descriptor_alignment
= 16;
274 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
275 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
276 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
277 descriptor_size
= 64;
278 descriptor_alignment
= 32;
280 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
281 if (!has_equal_immutable_samplers(binding
->pImmutableSamplers
, binding
->descriptorCount
)) {
282 descriptor_size
= 64;
284 descriptor_size
= 96;
286 descriptor_alignment
= 32;
288 case VK_DESCRIPTOR_TYPE_SAMPLER
:
289 if (!has_equal_immutable_samplers(binding
->pImmutableSamplers
, binding
->descriptorCount
)) {
290 descriptor_size
= 16;
291 descriptor_alignment
= 16;
295 unreachable("unknown descriptor type\n");
299 if (size
&& !align_u64(size
, descriptor_alignment
)) {
302 size
= align_u64(size
, descriptor_alignment
);
304 uint64_t max_count
= UINT64_MAX
;
306 max_count
= (UINT64_MAX
- size
) / descriptor_size
;
308 if (max_count
< binding
->descriptorCount
) {
311 if (variable_flags
&& binding
->binding
<variable_flags
->bindingCount
&& variable_count
&&
312 (variable_flags
->pBindingFlags
[binding
->binding
] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT
)) {
313 variable_count
->maxVariableDescriptorCount
= MIN2(UINT32_MAX
, max_count
);
315 size
+= binding
->descriptorCount
* descriptor_size
;
320 pSupport
->supported
= supported
;
324 * Pipeline layouts. These have nothing to do with the pipeline. They are
325 * just multiple descriptor set layouts pasted together.
328 VkResult
radv_CreatePipelineLayout(
330 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
331 const VkAllocationCallbacks
* pAllocator
,
332 VkPipelineLayout
* pPipelineLayout
)
334 RADV_FROM_HANDLE(radv_device
, device
, _device
);
335 struct radv_pipeline_layout
*layout
;
336 struct mesa_sha1 ctx
;
338 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
340 layout
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*layout
), 8,
341 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
343 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
345 layout
->num_sets
= pCreateInfo
->setLayoutCount
;
347 unsigned dynamic_offset_count
= 0;
350 _mesa_sha1_init(&ctx
);
351 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
352 RADV_FROM_HANDLE(radv_descriptor_set_layout
, set_layout
,
353 pCreateInfo
->pSetLayouts
[set
]);
354 layout
->set
[set
].layout
= set_layout
;
356 layout
->set
[set
].dynamic_offset_start
= dynamic_offset_count
;
357 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
358 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
* set_layout
->binding
[b
].dynamic_offset_count
;
359 if (set_layout
->binding
[b
].immutable_samplers_offset
)
360 _mesa_sha1_update(&ctx
, radv_immutable_samplers(set_layout
, set_layout
->binding
+ b
),
361 set_layout
->binding
[b
].array_size
* 4 * sizeof(uint32_t));
363 _mesa_sha1_update(&ctx
, set_layout
->binding
,
364 sizeof(set_layout
->binding
[0]) * set_layout
->binding_count
);
367 layout
->dynamic_offset_count
= dynamic_offset_count
;
368 layout
->push_constant_size
= 0;
370 for (unsigned i
= 0; i
< pCreateInfo
->pushConstantRangeCount
; ++i
) {
371 const VkPushConstantRange
*range
= pCreateInfo
->pPushConstantRanges
+ i
;
372 layout
->push_constant_size
= MAX2(layout
->push_constant_size
,
373 range
->offset
+ range
->size
);
376 layout
->push_constant_size
= align(layout
->push_constant_size
, 16);
377 _mesa_sha1_update(&ctx
, &layout
->push_constant_size
,
378 sizeof(layout
->push_constant_size
));
379 _mesa_sha1_final(&ctx
, layout
->sha1
);
380 *pPipelineLayout
= radv_pipeline_layout_to_handle(layout
);
385 void radv_DestroyPipelineLayout(
387 VkPipelineLayout _pipelineLayout
,
388 const VkAllocationCallbacks
* pAllocator
)
390 RADV_FROM_HANDLE(radv_device
, device
, _device
);
391 RADV_FROM_HANDLE(radv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
393 if (!pipeline_layout
)
395 vk_free2(&device
->alloc
, pAllocator
, pipeline_layout
);
401 radv_descriptor_set_create(struct radv_device
*device
,
402 struct radv_descriptor_pool
*pool
,
403 const struct radv_descriptor_set_layout
*layout
,
404 const uint32_t *variable_count
,
405 struct radv_descriptor_set
**out_set
)
407 struct radv_descriptor_set
*set
;
408 unsigned range_offset
= sizeof(struct radv_descriptor_set
) +
409 sizeof(struct radeon_winsys_bo
*) * layout
->buffer_count
;
410 unsigned mem_size
= range_offset
+
411 sizeof(struct radv_descriptor_range
) * layout
->dynamic_offset_count
;
413 if (pool
->host_memory_base
) {
414 if (pool
->host_memory_end
- pool
->host_memory_ptr
< mem_size
)
415 return vk_error(device
->instance
, VK_ERROR_OUT_OF_POOL_MEMORY_KHR
);
417 set
= (struct radv_descriptor_set
*)pool
->host_memory_ptr
;
418 pool
->host_memory_ptr
+= mem_size
;
420 set
= vk_alloc2(&device
->alloc
, NULL
, mem_size
, 8,
421 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
424 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
427 memset(set
, 0, mem_size
);
429 if (layout
->dynamic_offset_count
) {
430 set
->dynamic_descriptors
= (struct radv_descriptor_range
*)((uint8_t*)set
+ range_offset
);
433 set
->layout
= layout
;
434 uint32_t layout_size
= align_u32(layout
->size
, 32);
436 set
->size
= layout_size
;
438 if (!pool
->host_memory_base
&& pool
->entry_count
== pool
->max_entry_count
) {
439 vk_free2(&device
->alloc
, NULL
, set
);
440 return vk_error(device
->instance
, VK_ERROR_OUT_OF_POOL_MEMORY_KHR
);
443 /* try to allocate linearly first, so that we don't spend
444 * time looking for gaps if the app only allocates &
445 * resets via the pool. */
446 if (pool
->current_offset
+ layout_size
<= pool
->size
) {
448 set
->mapped_ptr
= (uint32_t*)(pool
->mapped_ptr
+ pool
->current_offset
);
449 set
->va
= radv_buffer_get_va(set
->bo
) + pool
->current_offset
;
450 if (!pool
->host_memory_base
) {
451 pool
->entries
[pool
->entry_count
].offset
= pool
->current_offset
;
452 pool
->entries
[pool
->entry_count
].size
= layout_size
;
453 pool
->entries
[pool
->entry_count
].set
= set
;
456 pool
->current_offset
+= layout_size
;
457 } else if (!pool
->host_memory_base
) {
461 for (index
= 0; index
< pool
->entry_count
; ++index
) {
462 if (pool
->entries
[index
].offset
- offset
>= layout_size
)
464 offset
= pool
->entries
[index
].offset
+ pool
->entries
[index
].size
;
467 if (pool
->size
- offset
< layout_size
) {
468 vk_free2(&device
->alloc
, NULL
, set
);
469 return vk_error(device
->instance
, VK_ERROR_OUT_OF_POOL_MEMORY_KHR
);
472 set
->mapped_ptr
= (uint32_t*)(pool
->mapped_ptr
+ offset
);
473 set
->va
= radv_buffer_get_va(set
->bo
) + offset
;
474 memmove(&pool
->entries
[index
+ 1], &pool
->entries
[index
],
475 sizeof(pool
->entries
[0]) * (pool
->entry_count
- index
));
476 pool
->entries
[index
].offset
= offset
;
477 pool
->entries
[index
].size
= layout_size
;
478 pool
->entries
[index
].set
= set
;
481 return vk_error(device
->instance
, VK_ERROR_OUT_OF_POOL_MEMORY_KHR
);
484 if (layout
->has_immutable_samplers
) {
485 for (unsigned i
= 0; i
< layout
->binding_count
; ++i
) {
486 if (!layout
->binding
[i
].immutable_samplers_offset
||
487 layout
->binding
[i
].immutable_samplers_equal
)
490 unsigned offset
= layout
->binding
[i
].offset
/ 4;
491 if (layout
->binding
[i
].type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
494 const uint32_t *samplers
= (const uint32_t*)((const char*)layout
+ layout
->binding
[i
].immutable_samplers_offset
);
495 for (unsigned j
= 0; j
< layout
->binding
[i
].array_size
; ++j
) {
496 memcpy(set
->mapped_ptr
+ offset
, samplers
+ 4 * j
, 16);
497 offset
+= layout
->binding
[i
].size
/ 4;
507 radv_descriptor_set_destroy(struct radv_device
*device
,
508 struct radv_descriptor_pool
*pool
,
509 struct radv_descriptor_set
*set
,
512 assert(!pool
->host_memory_base
);
514 if (free_bo
&& set
->size
&& !pool
->host_memory_base
) {
515 uint32_t offset
= (uint8_t*)set
->mapped_ptr
- pool
->mapped_ptr
;
516 for (int i
= 0; i
< pool
->entry_count
; ++i
) {
517 if (pool
->entries
[i
].offset
== offset
) {
518 memmove(&pool
->entries
[i
], &pool
->entries
[i
+1],
519 sizeof(pool
->entries
[i
]) * (pool
->entry_count
- i
- 1));
525 vk_free2(&device
->alloc
, NULL
, set
);
528 VkResult
radv_CreateDescriptorPool(
530 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
531 const VkAllocationCallbacks
* pAllocator
,
532 VkDescriptorPool
* pDescriptorPool
)
534 RADV_FROM_HANDLE(radv_device
, device
, _device
);
535 struct radv_descriptor_pool
*pool
;
536 int size
= sizeof(struct radv_descriptor_pool
);
537 uint64_t bo_size
= 0, bo_count
= 0, range_count
= 0;
540 for (unsigned i
= 0; i
< pCreateInfo
->poolSizeCount
; ++i
) {
541 if (pCreateInfo
->pPoolSizes
[i
].type
!= VK_DESCRIPTOR_TYPE_SAMPLER
)
542 bo_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
544 switch(pCreateInfo
->pPoolSizes
[i
].type
) {
545 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
546 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
547 range_count
+= pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
549 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
550 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
551 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
552 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
553 case VK_DESCRIPTOR_TYPE_SAMPLER
:
554 /* 32 as we may need to align for images */
555 bo_size
+= 32 * pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
557 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
558 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
559 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
560 bo_size
+= 64 * pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
562 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
563 bo_size
+= 96 * pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
566 unreachable("unknown descriptor type\n");
571 if (!(pCreateInfo
->flags
& VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT
)) {
572 uint64_t host_size
= pCreateInfo
->maxSets
* sizeof(struct radv_descriptor_set
);
573 host_size
+= sizeof(struct radeon_winsys_bo
*) * bo_count
;
574 host_size
+= sizeof(struct radv_descriptor_range
) * range_count
;
577 size
+= sizeof(struct radv_descriptor_pool_entry
) * pCreateInfo
->maxSets
;
580 pool
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
581 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
583 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
585 memset(pool
, 0, sizeof(*pool
));
587 if (!(pCreateInfo
->flags
& VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT
)) {
588 pool
->host_memory_base
= (uint8_t*)pool
+ sizeof(struct radv_descriptor_pool
);
589 pool
->host_memory_ptr
= pool
->host_memory_base
;
590 pool
->host_memory_end
= (uint8_t*)pool
+ size
;
594 pool
->bo
= device
->ws
->buffer_create(device
->ws
, bo_size
, 32,
596 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
597 RADEON_FLAG_READ_ONLY
|
599 pool
->mapped_ptr
= (uint8_t*)device
->ws
->buffer_map(pool
->bo
);
601 pool
->size
= bo_size
;
602 pool
->max_entry_count
= pCreateInfo
->maxSets
;
604 *pDescriptorPool
= radv_descriptor_pool_to_handle(pool
);
608 void radv_DestroyDescriptorPool(
610 VkDescriptorPool _pool
,
611 const VkAllocationCallbacks
* pAllocator
)
613 RADV_FROM_HANDLE(radv_device
, device
, _device
);
614 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, _pool
);
619 if (!pool
->host_memory_base
) {
620 for(int i
= 0; i
< pool
->entry_count
; ++i
) {
621 radv_descriptor_set_destroy(device
, pool
, pool
->entries
[i
].set
, false);
626 device
->ws
->buffer_destroy(pool
->bo
);
627 vk_free2(&device
->alloc
, pAllocator
, pool
);
630 VkResult
radv_ResetDescriptorPool(
632 VkDescriptorPool descriptorPool
,
633 VkDescriptorPoolResetFlags flags
)
635 RADV_FROM_HANDLE(radv_device
, device
, _device
);
636 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, descriptorPool
);
638 if (!pool
->host_memory_base
) {
639 for(int i
= 0; i
< pool
->entry_count
; ++i
) {
640 radv_descriptor_set_destroy(device
, pool
, pool
->entries
[i
].set
, false);
642 pool
->entry_count
= 0;
645 pool
->current_offset
= 0;
646 pool
->host_memory_ptr
= pool
->host_memory_base
;
651 VkResult
radv_AllocateDescriptorSets(
653 const VkDescriptorSetAllocateInfo
* pAllocateInfo
,
654 VkDescriptorSet
* pDescriptorSets
)
656 RADV_FROM_HANDLE(radv_device
, device
, _device
);
657 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, pAllocateInfo
->descriptorPool
);
659 VkResult result
= VK_SUCCESS
;
661 struct radv_descriptor_set
*set
= NULL
;
663 const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT
*variable_counts
=
664 vk_find_struct_const(pAllocateInfo
->pNext
, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT
);
665 const uint32_t zero
= 0;
667 /* allocate a set of buffers for each shader to contain descriptors */
668 for (i
= 0; i
< pAllocateInfo
->descriptorSetCount
; i
++) {
669 RADV_FROM_HANDLE(radv_descriptor_set_layout
, layout
,
670 pAllocateInfo
->pSetLayouts
[i
]);
672 const uint32_t *variable_count
= NULL
;
673 if (variable_counts
) {
674 if (i
< variable_counts
->descriptorSetCount
)
675 variable_count
= variable_counts
->pDescriptorCounts
+ i
;
677 variable_count
= &zero
;
680 assert(!(layout
->flags
& VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
));
682 result
= radv_descriptor_set_create(device
, pool
, layout
, variable_count
, &set
);
683 if (result
!= VK_SUCCESS
)
686 pDescriptorSets
[i
] = radv_descriptor_set_to_handle(set
);
689 if (result
!= VK_SUCCESS
)
690 radv_FreeDescriptorSets(_device
, pAllocateInfo
->descriptorPool
,
695 VkResult
radv_FreeDescriptorSets(
697 VkDescriptorPool descriptorPool
,
699 const VkDescriptorSet
* pDescriptorSets
)
701 RADV_FROM_HANDLE(radv_device
, device
, _device
);
702 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, descriptorPool
);
704 for (uint32_t i
= 0; i
< count
; i
++) {
705 RADV_FROM_HANDLE(radv_descriptor_set
, set
, pDescriptorSets
[i
]);
707 if (set
&& !pool
->host_memory_base
)
708 radv_descriptor_set_destroy(device
, pool
, set
, true);
713 static void write_texel_buffer_descriptor(struct radv_device
*device
,
714 struct radv_cmd_buffer
*cmd_buffer
,
716 struct radeon_winsys_bo
**buffer_list
,
717 const VkBufferView _buffer_view
)
719 RADV_FROM_HANDLE(radv_buffer_view
, buffer_view
, _buffer_view
);
721 memcpy(dst
, buffer_view
->state
, 4 * 4);
724 radv_cs_add_buffer(device
->ws
, cmd_buffer
->cs
, buffer_view
->bo
);
726 *buffer_list
= buffer_view
->bo
;
729 static void write_buffer_descriptor(struct radv_device
*device
,
730 struct radv_cmd_buffer
*cmd_buffer
,
732 struct radeon_winsys_bo
**buffer_list
,
733 const VkDescriptorBufferInfo
*buffer_info
)
735 RADV_FROM_HANDLE(radv_buffer
, buffer
, buffer_info
->buffer
);
736 uint64_t va
= radv_buffer_get_va(buffer
->bo
);
737 uint32_t range
= buffer_info
->range
;
739 if (buffer_info
->range
== VK_WHOLE_SIZE
)
740 range
= buffer
->size
- buffer_info
->offset
;
742 va
+= buffer_info
->offset
+ buffer
->offset
;
744 dst
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32);
746 dst
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
747 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
748 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
749 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
750 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
751 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
754 radv_cs_add_buffer(device
->ws
, cmd_buffer
->cs
, buffer
->bo
);
756 *buffer_list
= buffer
->bo
;
759 static void write_dynamic_buffer_descriptor(struct radv_device
*device
,
760 struct radv_descriptor_range
*range
,
761 struct radeon_winsys_bo
**buffer_list
,
762 const VkDescriptorBufferInfo
*buffer_info
)
764 RADV_FROM_HANDLE(radv_buffer
, buffer
, buffer_info
->buffer
);
765 uint64_t va
= radv_buffer_get_va(buffer
->bo
);
766 unsigned size
= buffer_info
->range
;
768 if (buffer_info
->range
== VK_WHOLE_SIZE
)
769 size
= buffer
->size
- buffer_info
->offset
;
771 va
+= buffer_info
->offset
+ buffer
->offset
;
775 *buffer_list
= buffer
->bo
;
779 write_image_descriptor(struct radv_device
*device
,
780 struct radv_cmd_buffer
*cmd_buffer
,
782 struct radeon_winsys_bo
**buffer_list
,
783 VkDescriptorType descriptor_type
,
784 const VkDescriptorImageInfo
*image_info
)
786 RADV_FROM_HANDLE(radv_image_view
, iview
, image_info
->imageView
);
787 uint32_t *descriptor
;
789 if (descriptor_type
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
) {
790 descriptor
= iview
->storage_descriptor
;
792 descriptor
= iview
->descriptor
;
795 memcpy(dst
, descriptor
, 16 * 4);
798 radv_cs_add_buffer(device
->ws
, cmd_buffer
->cs
, iview
->bo
);
800 *buffer_list
= iview
->bo
;
804 write_combined_image_sampler_descriptor(struct radv_device
*device
,
805 struct radv_cmd_buffer
*cmd_buffer
,
807 struct radeon_winsys_bo
**buffer_list
,
808 VkDescriptorType descriptor_type
,
809 const VkDescriptorImageInfo
*image_info
,
812 RADV_FROM_HANDLE(radv_sampler
, sampler
, image_info
->sampler
);
814 write_image_descriptor(device
, cmd_buffer
, dst
, buffer_list
, descriptor_type
, image_info
);
815 /* copy over sampler state */
817 memcpy(dst
+ 16, sampler
->state
, 16);
821 write_sampler_descriptor(struct radv_device
*device
,
823 const VkDescriptorImageInfo
*image_info
)
825 RADV_FROM_HANDLE(radv_sampler
, sampler
, image_info
->sampler
);
827 memcpy(dst
, sampler
->state
, 16);
830 void radv_update_descriptor_sets(
831 struct radv_device
* device
,
832 struct radv_cmd_buffer
* cmd_buffer
,
833 VkDescriptorSet dstSetOverride
,
834 uint32_t descriptorWriteCount
,
835 const VkWriteDescriptorSet
* pDescriptorWrites
,
836 uint32_t descriptorCopyCount
,
837 const VkCopyDescriptorSet
* pDescriptorCopies
)
840 for (i
= 0; i
< descriptorWriteCount
; i
++) {
841 const VkWriteDescriptorSet
*writeset
= &pDescriptorWrites
[i
];
842 RADV_FROM_HANDLE(radv_descriptor_set
, set
,
843 dstSetOverride
? dstSetOverride
: writeset
->dstSet
);
844 const struct radv_descriptor_set_binding_layout
*binding_layout
=
845 set
->layout
->binding
+ writeset
->dstBinding
;
846 uint32_t *ptr
= set
->mapped_ptr
;
847 struct radeon_winsys_bo
**buffer_list
= set
->descriptors
;
848 /* Immutable samplers are not copied into push descriptors when they are
849 * allocated, so if we are writing push descriptors we have to copy the
850 * immutable samplers into them now.
852 const bool copy_immutable_samplers
= cmd_buffer
&&
853 binding_layout
->immutable_samplers_offset
&& !binding_layout
->immutable_samplers_equal
;
854 const uint32_t *samplers
= radv_immutable_samplers(set
->layout
, binding_layout
);
856 ptr
+= binding_layout
->offset
/ 4;
857 ptr
+= binding_layout
->size
* writeset
->dstArrayElement
/ 4;
858 buffer_list
+= binding_layout
->buffer_offset
;
859 buffer_list
+= writeset
->dstArrayElement
;
860 for (j
= 0; j
< writeset
->descriptorCount
; ++j
) {
861 switch(writeset
->descriptorType
) {
862 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
863 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
864 unsigned idx
= writeset
->dstArrayElement
+ j
;
865 idx
+= binding_layout
->dynamic_offset_offset
;
866 assert(!(set
->layout
->flags
& VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
));
867 write_dynamic_buffer_descriptor(device
, set
->dynamic_descriptors
+ idx
,
868 buffer_list
, writeset
->pBufferInfo
+ j
);
871 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
872 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
873 write_buffer_descriptor(device
, cmd_buffer
, ptr
, buffer_list
,
874 writeset
->pBufferInfo
+ j
);
876 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
877 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
878 write_texel_buffer_descriptor(device
, cmd_buffer
, ptr
, buffer_list
,
879 writeset
->pTexelBufferView
[j
]);
881 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
882 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
883 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
884 write_image_descriptor(device
, cmd_buffer
, ptr
, buffer_list
,
885 writeset
->descriptorType
,
886 writeset
->pImageInfo
+ j
);
888 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
889 write_combined_image_sampler_descriptor(device
, cmd_buffer
, ptr
, buffer_list
,
890 writeset
->descriptorType
,
891 writeset
->pImageInfo
+ j
,
892 !binding_layout
->immutable_samplers_offset
);
893 if (copy_immutable_samplers
) {
894 const unsigned idx
= writeset
->dstArrayElement
+ j
;
895 memcpy(ptr
+ 16, samplers
+ 4 * idx
, 16);
898 case VK_DESCRIPTOR_TYPE_SAMPLER
:
899 if (!binding_layout
->immutable_samplers_offset
) {
900 write_sampler_descriptor(device
, ptr
,
901 writeset
->pImageInfo
+ j
);
902 } else if (copy_immutable_samplers
) {
903 unsigned idx
= writeset
->dstArrayElement
+ j
;
904 memcpy(ptr
, samplers
+ 4 * idx
, 16);
908 unreachable("unimplemented descriptor type");
911 ptr
+= binding_layout
->size
/ 4;
917 for (i
= 0; i
< descriptorCopyCount
; i
++) {
918 const VkCopyDescriptorSet
*copyset
= &pDescriptorCopies
[i
];
919 RADV_FROM_HANDLE(radv_descriptor_set
, src_set
,
921 RADV_FROM_HANDLE(radv_descriptor_set
, dst_set
,
923 const struct radv_descriptor_set_binding_layout
*src_binding_layout
=
924 src_set
->layout
->binding
+ copyset
->srcBinding
;
925 const struct radv_descriptor_set_binding_layout
*dst_binding_layout
=
926 dst_set
->layout
->binding
+ copyset
->dstBinding
;
927 uint32_t *src_ptr
= src_set
->mapped_ptr
;
928 uint32_t *dst_ptr
= dst_set
->mapped_ptr
;
929 struct radeon_winsys_bo
**src_buffer_list
= src_set
->descriptors
;
930 struct radeon_winsys_bo
**dst_buffer_list
= dst_set
->descriptors
;
932 src_ptr
+= src_binding_layout
->offset
/ 4;
933 dst_ptr
+= dst_binding_layout
->offset
/ 4;
935 src_ptr
+= src_binding_layout
->size
* copyset
->srcArrayElement
/ 4;
936 dst_ptr
+= dst_binding_layout
->size
* copyset
->dstArrayElement
/ 4;
938 src_buffer_list
+= src_binding_layout
->buffer_offset
;
939 src_buffer_list
+= copyset
->srcArrayElement
;
941 dst_buffer_list
+= dst_binding_layout
->buffer_offset
;
942 dst_buffer_list
+= copyset
->dstArrayElement
;
944 for (j
= 0; j
< copyset
->descriptorCount
; ++j
) {
945 switch (src_binding_layout
->type
) {
946 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
947 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
948 unsigned src_idx
= copyset
->srcArrayElement
+ j
;
949 unsigned dst_idx
= copyset
->dstArrayElement
+ j
;
950 struct radv_descriptor_range
*src_range
, *dst_range
;
951 src_idx
+= src_binding_layout
->dynamic_offset_offset
;
952 dst_idx
+= dst_binding_layout
->dynamic_offset_offset
;
954 src_range
= src_set
->dynamic_descriptors
+ src_idx
;
955 dst_range
= dst_set
->dynamic_descriptors
+ dst_idx
;
956 *dst_range
= *src_range
;
960 memcpy(dst_ptr
, src_ptr
, src_binding_layout
->size
);
962 src_ptr
+= src_binding_layout
->size
/ 4;
963 dst_ptr
+= dst_binding_layout
->size
/ 4;
964 dst_buffer_list
[j
] = src_buffer_list
[j
];
971 void radv_UpdateDescriptorSets(
973 uint32_t descriptorWriteCount
,
974 const VkWriteDescriptorSet
* pDescriptorWrites
,
975 uint32_t descriptorCopyCount
,
976 const VkCopyDescriptorSet
* pDescriptorCopies
)
978 RADV_FROM_HANDLE(radv_device
, device
, _device
);
980 radv_update_descriptor_sets(device
, NULL
, VK_NULL_HANDLE
, descriptorWriteCount
, pDescriptorWrites
,
981 descriptorCopyCount
, pDescriptorCopies
);
984 VkResult
radv_CreateDescriptorUpdateTemplate(VkDevice _device
,
985 const VkDescriptorUpdateTemplateCreateInfoKHR
*pCreateInfo
,
986 const VkAllocationCallbacks
*pAllocator
,
987 VkDescriptorUpdateTemplateKHR
*pDescriptorUpdateTemplate
)
989 RADV_FROM_HANDLE(radv_device
, device
, _device
);
990 RADV_FROM_HANDLE(radv_descriptor_set_layout
, set_layout
, pCreateInfo
->descriptorSetLayout
);
991 const uint32_t entry_count
= pCreateInfo
->descriptorUpdateEntryCount
;
992 const size_t size
= sizeof(struct radv_descriptor_update_template
) +
993 sizeof(struct radv_descriptor_update_template_entry
) * entry_count
;
994 struct radv_descriptor_update_template
*templ
;
997 templ
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
999 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1001 templ
->entry_count
= entry_count
;
1002 templ
->bind_point
= pCreateInfo
->pipelineBindPoint
;
1004 for (i
= 0; i
< entry_count
; i
++) {
1005 const VkDescriptorUpdateTemplateEntryKHR
*entry
= &pCreateInfo
->pDescriptorUpdateEntries
[i
];
1006 const struct radv_descriptor_set_binding_layout
*binding_layout
=
1007 set_layout
->binding
+ entry
->dstBinding
;
1008 const uint32_t buffer_offset
= binding_layout
->buffer_offset
+ entry
->dstArrayElement
;
1009 const uint32_t *immutable_samplers
= NULL
;
1010 uint32_t dst_offset
;
1011 uint32_t dst_stride
;
1013 /* dst_offset is an offset into dynamic_descriptors when the descriptor
1014 is dynamic, and an offset into mapped_ptr otherwise */
1015 switch (entry
->descriptorType
) {
1016 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1017 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1018 assert(pCreateInfo
->templateType
== VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR
);
1019 dst_offset
= binding_layout
->dynamic_offset_offset
+ entry
->dstArrayElement
;
1020 dst_stride
= 0; /* Not used */
1023 switch (entry
->descriptorType
) {
1024 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1025 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1026 /* Immutable samplers are copied into push descriptors when they are pushed */
1027 if (pCreateInfo
->templateType
== VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR
&&
1028 binding_layout
->immutable_samplers_offset
&& !binding_layout
->immutable_samplers_equal
) {
1029 immutable_samplers
= radv_immutable_samplers(set_layout
, binding_layout
) + entry
->dstArrayElement
* 4;
1035 dst_offset
= binding_layout
->offset
/ 4 + binding_layout
->size
* entry
->dstArrayElement
/ 4;
1036 dst_stride
= binding_layout
->size
/ 4;
1040 templ
->entry
[i
] = (struct radv_descriptor_update_template_entry
) {
1041 .descriptor_type
= entry
->descriptorType
,
1042 .descriptor_count
= entry
->descriptorCount
,
1043 .src_offset
= entry
->offset
,
1044 .src_stride
= entry
->stride
,
1045 .dst_offset
= dst_offset
,
1046 .dst_stride
= dst_stride
,
1047 .buffer_offset
= buffer_offset
,
1048 .has_sampler
= !binding_layout
->immutable_samplers_offset
,
1049 .immutable_samplers
= immutable_samplers
1053 *pDescriptorUpdateTemplate
= radv_descriptor_update_template_to_handle(templ
);
1057 void radv_DestroyDescriptorUpdateTemplate(VkDevice _device
,
1058 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate
,
1059 const VkAllocationCallbacks
*pAllocator
)
1061 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1062 RADV_FROM_HANDLE(radv_descriptor_update_template
, templ
, descriptorUpdateTemplate
);
1067 vk_free2(&device
->alloc
, pAllocator
, templ
);
1070 void radv_update_descriptor_set_with_template(struct radv_device
*device
,
1071 struct radv_cmd_buffer
*cmd_buffer
,
1072 struct radv_descriptor_set
*set
,
1073 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate
,
1076 RADV_FROM_HANDLE(radv_descriptor_update_template
, templ
, descriptorUpdateTemplate
);
1079 for (i
= 0; i
< templ
->entry_count
; ++i
) {
1080 struct radeon_winsys_bo
**buffer_list
= set
->descriptors
+ templ
->entry
[i
].buffer_offset
;
1081 uint32_t *pDst
= set
->mapped_ptr
+ templ
->entry
[i
].dst_offset
;
1082 const uint8_t *pSrc
= ((const uint8_t *) pData
) + templ
->entry
[i
].src_offset
;
1085 for (j
= 0; j
< templ
->entry
[i
].descriptor_count
; ++j
) {
1086 switch (templ
->entry
[i
].descriptor_type
) {
1087 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1088 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
1089 const unsigned idx
= templ
->entry
[i
].dst_offset
+ j
;
1090 assert(!(set
->layout
->flags
& VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR
));
1091 write_dynamic_buffer_descriptor(device
, set
->dynamic_descriptors
+ idx
,
1092 buffer_list
, (struct VkDescriptorBufferInfo
*) pSrc
);
1095 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1096 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1097 write_buffer_descriptor(device
, cmd_buffer
, pDst
, buffer_list
,
1098 (struct VkDescriptorBufferInfo
*) pSrc
);
1100 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1101 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1102 write_texel_buffer_descriptor(device
, cmd_buffer
, pDst
, buffer_list
,
1103 *(VkBufferView
*) pSrc
);
1105 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1106 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1107 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1108 write_image_descriptor(device
, cmd_buffer
, pDst
, buffer_list
,
1109 templ
->entry
[i
].descriptor_type
,
1110 (struct VkDescriptorImageInfo
*) pSrc
);
1112 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1113 write_combined_image_sampler_descriptor(device
, cmd_buffer
, pDst
, buffer_list
,
1114 templ
->entry
[i
].descriptor_type
,
1115 (struct VkDescriptorImageInfo
*) pSrc
,
1116 templ
->entry
[i
].has_sampler
);
1117 if (templ
->entry
[i
].immutable_samplers
)
1118 memcpy(pDst
+ 16, templ
->entry
[i
].immutable_samplers
+ 4 * j
, 16);
1120 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1121 if (templ
->entry
[i
].has_sampler
)
1122 write_sampler_descriptor(device
, pDst
,
1123 (struct VkDescriptorImageInfo
*) pSrc
);
1124 else if (templ
->entry
[i
].immutable_samplers
)
1125 memcpy(pDst
, templ
->entry
[i
].immutable_samplers
+ 4 * j
, 16);
1128 unreachable("unimplemented descriptor type");
1131 pSrc
+= templ
->entry
[i
].src_stride
;
1132 pDst
+= templ
->entry
[i
].dst_stride
;
1138 void radv_UpdateDescriptorSetWithTemplate(VkDevice _device
,
1139 VkDescriptorSet descriptorSet
,
1140 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate
,
1143 RADV_FROM_HANDLE(radv_device
, device
, _device
);
1144 RADV_FROM_HANDLE(radv_descriptor_set
, set
, descriptorSet
);
1146 radv_update_descriptor_set_with_template(device
, NULL
, set
, descriptorUpdateTemplate
, pData
);
1150 VkResult
radv_CreateSamplerYcbcrConversion(VkDevice device
,
1151 const VkSamplerYcbcrConversionCreateInfo
* pCreateInfo
,
1152 const VkAllocationCallbacks
* pAllocator
,
1153 VkSamplerYcbcrConversion
* pYcbcrConversion
)
1155 *pYcbcrConversion
= VK_NULL_HANDLE
;
1160 void radv_DestroySamplerYcbcrConversion(VkDevice device
,
1161 VkSamplerYcbcrConversion ycbcrConversion
,
1162 const VkAllocationCallbacks
* pAllocator
)