2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
34 VkResult
radv_CreateDescriptorSetLayout(
36 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
37 const VkAllocationCallbacks
* pAllocator
,
38 VkDescriptorSetLayout
* pSetLayout
)
40 RADV_FROM_HANDLE(radv_device
, device
, _device
);
41 struct radv_descriptor_set_layout
*set_layout
;
43 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
45 uint32_t max_binding
= 0;
46 uint32_t immutable_sampler_count
= 0;
47 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
48 max_binding
= MAX2(max_binding
, pCreateInfo
->pBindings
[j
].binding
);
49 if (pCreateInfo
->pBindings
[j
].pImmutableSamplers
)
50 immutable_sampler_count
+= pCreateInfo
->pBindings
[j
].descriptorCount
;
53 size_t size
= sizeof(struct radv_descriptor_set_layout
) +
54 (max_binding
+ 1) * sizeof(set_layout
->binding
[0]) +
55 immutable_sampler_count
* 4 * sizeof(uint32_t);
57 set_layout
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
62 /* We just allocate all the samplers at the end of the struct */
63 uint32_t *samplers
= (uint32_t*)&set_layout
->binding
[max_binding
+ 1];
65 set_layout
->binding_count
= max_binding
+ 1;
66 set_layout
->shader_stages
= 0;
69 memset(set_layout
->binding
, 0, size
- sizeof(struct radv_descriptor_set_layout
));
71 uint32_t buffer_count
= 0;
72 uint32_t dynamic_offset_count
= 0;
74 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
75 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBindings
[j
];
76 uint32_t b
= binding
->binding
;
79 switch (binding
->descriptorType
) {
80 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
81 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
82 set_layout
->binding
[b
].dynamic_offset_count
= 1;
83 set_layout
->dynamic_shader_stages
|= binding
->stageFlags
;
84 set_layout
->binding
[b
].size
= 0;
85 set_layout
->binding
[b
].buffer_count
= 1;
88 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
89 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
90 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
91 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
92 set_layout
->binding
[b
].size
= 16;
93 set_layout
->binding
[b
].buffer_count
= 1;
96 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
97 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
98 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
99 /* main descriptor + fmask descriptor */
100 set_layout
->binding
[b
].size
= 64;
101 set_layout
->binding
[b
].buffer_count
= 1;
104 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
105 /* main descriptor + fmask descriptor + sampler */
106 set_layout
->binding
[b
].size
= 96;
107 set_layout
->binding
[b
].buffer_count
= 1;
110 case VK_DESCRIPTOR_TYPE_SAMPLER
:
111 set_layout
->binding
[b
].size
= 16;
115 unreachable("unknown descriptor type\n");
119 set_layout
->size
= align(set_layout
->size
, alignment
);
120 assert(binding
->descriptorCount
> 0);
121 set_layout
->binding
[b
].type
= binding
->descriptorType
;
122 set_layout
->binding
[b
].array_size
= binding
->descriptorCount
;
123 set_layout
->binding
[b
].offset
= set_layout
->size
;
124 set_layout
->binding
[b
].buffer_offset
= buffer_count
;
125 set_layout
->binding
[b
].dynamic_offset_offset
= dynamic_offset_count
;
127 if (binding
->pImmutableSamplers
) {
128 set_layout
->binding
[b
].immutable_samplers
= samplers
;
129 set_layout
->binding
[b
].immutable_samplers_equal
= true;
130 samplers
+= 4 * binding
->descriptorCount
;
132 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++)
133 memcpy(set_layout
->binding
[b
].immutable_samplers
+ 4 * i
, &radv_sampler_from_handle(binding
->pImmutableSamplers
[i
])->state
, 16);
134 for (uint32_t i
= 1; i
< binding
->descriptorCount
; i
++)
135 if (memcmp(set_layout
->binding
[b
].immutable_samplers
+ 4 * i
,
136 set_layout
->binding
[b
].immutable_samplers
, 16) != 0)
137 set_layout
->binding
[b
].immutable_samplers_equal
= false;
139 /* Don't reserve space for the samplers if they're not accessed. */
140 if (set_layout
->binding
[b
].immutable_samplers_equal
) {
141 if (binding
->descriptorType
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
142 set_layout
->binding
[b
].size
-= 32;
143 else if (binding
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
144 set_layout
->binding
[b
].size
-= 16;
148 set_layout
->size
+= binding
->descriptorCount
* set_layout
->binding
[b
].size
;
149 buffer_count
+= binding
->descriptorCount
* set_layout
->binding
[b
].buffer_count
;
150 dynamic_offset_count
+= binding
->descriptorCount
*
151 set_layout
->binding
[b
].dynamic_offset_count
;
152 set_layout
->shader_stages
|= binding
->stageFlags
;
155 set_layout
->buffer_count
= buffer_count
;
156 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
158 *pSetLayout
= radv_descriptor_set_layout_to_handle(set_layout
);
163 void radv_DestroyDescriptorSetLayout(
165 VkDescriptorSetLayout _set_layout
,
166 const VkAllocationCallbacks
* pAllocator
)
168 RADV_FROM_HANDLE(radv_device
, device
, _device
);
169 RADV_FROM_HANDLE(radv_descriptor_set_layout
, set_layout
, _set_layout
);
174 vk_free2(&device
->alloc
, pAllocator
, set_layout
);
178 * Pipeline layouts. These have nothing to do with the pipeline. They are
179 * just muttiple descriptor set layouts pasted together
182 VkResult
radv_CreatePipelineLayout(
184 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
185 const VkAllocationCallbacks
* pAllocator
,
186 VkPipelineLayout
* pPipelineLayout
)
188 RADV_FROM_HANDLE(radv_device
, device
, _device
);
189 struct radv_pipeline_layout
*layout
;
190 struct mesa_sha1
*ctx
;
192 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
194 layout
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*layout
), 8,
195 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
197 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
199 layout
->num_sets
= pCreateInfo
->setLayoutCount
;
201 unsigned dynamic_offset_count
= 0;
204 ctx
= _mesa_sha1_init();
205 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
206 RADV_FROM_HANDLE(radv_descriptor_set_layout
, set_layout
,
207 pCreateInfo
->pSetLayouts
[set
]);
208 layout
->set
[set
].layout
= set_layout
;
210 layout
->set
[set
].dynamic_offset_start
= dynamic_offset_count
;
211 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
212 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
* set_layout
->binding
[b
].dynamic_offset_count
;
214 _mesa_sha1_update(ctx
, set_layout
->binding
,
215 sizeof(set_layout
->binding
[0]) * set_layout
->binding_count
);
218 layout
->dynamic_offset_count
= dynamic_offset_count
;
219 layout
->push_constant_size
= 0;
220 for (unsigned i
= 0; i
< pCreateInfo
->pushConstantRangeCount
; ++i
) {
221 const VkPushConstantRange
*range
= pCreateInfo
->pPushConstantRanges
+ i
;
222 layout
->push_constant_size
= MAX2(layout
->push_constant_size
,
223 range
->offset
+ range
->size
);
226 layout
->push_constant_size
= align(layout
->push_constant_size
, 16);
227 _mesa_sha1_update(ctx
, &layout
->push_constant_size
,
228 sizeof(layout
->push_constant_size
));
229 _mesa_sha1_final(ctx
, layout
->sha1
);
230 *pPipelineLayout
= radv_pipeline_layout_to_handle(layout
);
235 void radv_DestroyPipelineLayout(
237 VkPipelineLayout _pipelineLayout
,
238 const VkAllocationCallbacks
* pAllocator
)
240 RADV_FROM_HANDLE(radv_device
, device
, _device
);
241 RADV_FROM_HANDLE(radv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
243 if (!pipeline_layout
)
245 vk_free2(&device
->alloc
, pAllocator
, pipeline_layout
);
251 radv_descriptor_set_create(struct radv_device
*device
,
252 struct radv_descriptor_pool
*pool
,
253 struct radv_cmd_buffer
*cmd_buffer
,
254 const struct radv_descriptor_set_layout
*layout
,
255 struct radv_descriptor_set
**out_set
)
257 struct radv_descriptor_set
*set
;
258 unsigned mem_size
= sizeof(struct radv_descriptor_set
) +
259 sizeof(struct radeon_winsys_bo
*) * layout
->buffer_count
;
260 set
= vk_alloc2(&device
->alloc
, NULL
, mem_size
, 8,
261 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
264 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
266 memset(set
, 0, mem_size
);
268 if (layout
->dynamic_offset_count
) {
269 unsigned size
= sizeof(struct radv_descriptor_range
) *
270 layout
->dynamic_offset_count
;
271 set
->dynamic_descriptors
= vk_alloc2(&device
->alloc
, NULL
, size
, 8,
272 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
274 if (!set
->dynamic_descriptors
) {
275 vk_free2(&device
->alloc
, NULL
, set
);
276 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
280 set
->layout
= layout
;
282 uint32_t layout_size
= align_u32(layout
->size
, 32);
283 set
->size
= layout
->size
;
285 /* try to allocate linearly first, so that we don't spend
286 * time looking for gaps if the app only allocates &
287 * resets via the pool. */
288 if (pool
->current_offset
+ layout_size
<= pool
->size
) {
290 set
->mapped_ptr
= (uint32_t*)(pool
->mapped_ptr
+ pool
->current_offset
);
291 set
->va
= device
->ws
->buffer_get_va(set
->bo
) + pool
->current_offset
;
292 pool
->current_offset
+= layout_size
;
293 list_addtail(&set
->vram_list
, &pool
->vram_list
);
296 struct list_head
*prev
= &pool
->vram_list
;
297 struct radv_descriptor_set
*cur
;
298 LIST_FOR_EACH_ENTRY(cur
, &pool
->vram_list
, vram_list
) {
299 uint64_t start
= (uint8_t*)cur
->mapped_ptr
- pool
->mapped_ptr
;
300 if (start
- offset
>= layout_size
)
303 offset
= start
+ cur
->size
;
304 prev
= &cur
->vram_list
;
307 if (pool
->size
- offset
< layout_size
) {
308 vk_free2(&device
->alloc
, NULL
, set
->dynamic_descriptors
);
309 vk_free2(&device
->alloc
, NULL
, set
);
310 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR
);
313 set
->mapped_ptr
= (uint32_t*)(pool
->mapped_ptr
+ offset
);
314 set
->va
= device
->ws
->buffer_get_va(set
->bo
) + offset
;
315 list_add(&set
->vram_list
, prev
);
319 if (!radv_cmd_buffer_upload_alloc(cmd_buffer
, set
->size
, 32,
321 (void**)&set
->mapped_ptr
)) {
322 vk_free2(&device
->alloc
, NULL
, set
->dynamic_descriptors
);
323 vk_free2(&device
->alloc
, NULL
, set
);
324 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
327 set
->va
= device
->ws
->buffer_get_va(cmd_buffer
->upload
.upload_bo
);
328 set
->va
+= bo_offset
;
332 for (unsigned i
= 0; i
< layout
->binding_count
; ++i
) {
333 if (!layout
->binding
[i
].immutable_samplers
||
334 layout
->binding
[i
].immutable_samplers_equal
)
337 unsigned offset
= layout
->binding
[i
].offset
/ 4;
338 if (layout
->binding
[i
].type
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
341 for (unsigned j
= 0; j
< layout
->binding
[i
].array_size
; ++j
) {
342 memcpy(set
->mapped_ptr
+ offset
, layout
->binding
[i
].immutable_samplers
+ 4 * j
, 16);
343 offset
+= layout
->binding
[i
].size
/ 4;
352 radv_descriptor_set_destroy(struct radv_device
*device
,
353 struct radv_descriptor_pool
*pool
,
354 struct radv_descriptor_set
*set
,
357 if (free_bo
&& set
->size
)
358 list_del(&set
->vram_list
);
359 if (set
->dynamic_descriptors
)
360 vk_free2(&device
->alloc
, NULL
, set
->dynamic_descriptors
);
361 vk_free2(&device
->alloc
, NULL
, set
);
365 radv_temp_descriptor_set_create(struct radv_device
*device
,
366 struct radv_cmd_buffer
*cmd_buffer
,
367 VkDescriptorSetLayout _layout
,
368 VkDescriptorSet
*_set
)
370 RADV_FROM_HANDLE(radv_descriptor_set_layout
, layout
, _layout
);
371 struct radv_descriptor_set
*set
;
374 ret
= radv_descriptor_set_create(device
, NULL
, cmd_buffer
, layout
, &set
);
375 *_set
= radv_descriptor_set_to_handle(set
);
380 radv_temp_descriptor_set_destroy(struct radv_device
*device
,
381 VkDescriptorSet _set
)
383 RADV_FROM_HANDLE(radv_descriptor_set
, set
, _set
);
385 radv_descriptor_set_destroy(device
, NULL
, set
, false);
388 VkResult
radv_CreateDescriptorPool(
390 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
391 const VkAllocationCallbacks
* pAllocator
,
392 VkDescriptorPool
* pDescriptorPool
)
394 RADV_FROM_HANDLE(radv_device
, device
, _device
);
395 struct radv_descriptor_pool
*pool
;
396 int size
= sizeof(struct radv_descriptor_pool
);
397 uint64_t bo_size
= 0;
398 pool
= vk_alloc2(&device
->alloc
, pAllocator
, size
, 8,
399 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
401 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
403 memset(pool
, 0, sizeof(*pool
));
405 for (unsigned i
= 0; i
< pCreateInfo
->poolSizeCount
; ++i
) {
406 switch(pCreateInfo
->pPoolSizes
[i
].type
) {
407 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
408 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
410 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
411 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
412 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
413 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
414 case VK_DESCRIPTOR_TYPE_SAMPLER
:
415 /* 32 as we may need to align for images */
416 bo_size
+= 32 * pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
418 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
419 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
420 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
421 bo_size
+= 64 * pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
423 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
424 bo_size
+= 96 * pCreateInfo
->pPoolSizes
[i
].descriptorCount
;
427 unreachable("unknown descriptor type\n");
433 pool
->bo
= device
->ws
->buffer_create(device
->ws
, bo_size
,
434 32, RADEON_DOMAIN_VRAM
, 0);
435 pool
->mapped_ptr
= (uint8_t*)device
->ws
->buffer_map(pool
->bo
);
437 pool
->size
= bo_size
;
439 list_inithead(&pool
->vram_list
);
440 *pDescriptorPool
= radv_descriptor_pool_to_handle(pool
);
444 void radv_DestroyDescriptorPool(
446 VkDescriptorPool _pool
,
447 const VkAllocationCallbacks
* pAllocator
)
449 RADV_FROM_HANDLE(radv_device
, device
, _device
);
450 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, _pool
);
455 list_for_each_entry_safe(struct radv_descriptor_set
, set
,
456 &pool
->vram_list
, vram_list
) {
457 radv_descriptor_set_destroy(device
, pool
, set
, false);
461 device
->ws
->buffer_destroy(pool
->bo
);
462 vk_free2(&device
->alloc
, pAllocator
, pool
);
465 VkResult
radv_ResetDescriptorPool(
467 VkDescriptorPool descriptorPool
,
468 VkDescriptorPoolResetFlags flags
)
470 RADV_FROM_HANDLE(radv_device
, device
, _device
);
471 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, descriptorPool
);
473 list_for_each_entry_safe(struct radv_descriptor_set
, set
,
474 &pool
->vram_list
, vram_list
) {
475 radv_descriptor_set_destroy(device
, pool
, set
, false);
478 list_inithead(&pool
->vram_list
);
480 pool
->current_offset
= 0;
485 VkResult
radv_AllocateDescriptorSets(
487 const VkDescriptorSetAllocateInfo
* pAllocateInfo
,
488 VkDescriptorSet
* pDescriptorSets
)
490 RADV_FROM_HANDLE(radv_device
, device
, _device
);
491 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, pAllocateInfo
->descriptorPool
);
493 VkResult result
= VK_SUCCESS
;
495 struct radv_descriptor_set
*set
;
497 /* allocate a set of buffers for each shader to contain descriptors */
498 for (i
= 0; i
< pAllocateInfo
->descriptorSetCount
; i
++) {
499 RADV_FROM_HANDLE(radv_descriptor_set_layout
, layout
,
500 pAllocateInfo
->pSetLayouts
[i
]);
502 result
= radv_descriptor_set_create(device
, pool
, NULL
, layout
, &set
);
503 if (result
!= VK_SUCCESS
)
506 pDescriptorSets
[i
] = radv_descriptor_set_to_handle(set
);
509 if (result
!= VK_SUCCESS
)
510 radv_FreeDescriptorSets(_device
, pAllocateInfo
->descriptorPool
,
515 VkResult
radv_FreeDescriptorSets(
517 VkDescriptorPool descriptorPool
,
519 const VkDescriptorSet
* pDescriptorSets
)
521 RADV_FROM_HANDLE(radv_device
, device
, _device
);
522 RADV_FROM_HANDLE(radv_descriptor_pool
, pool
, descriptorPool
);
524 for (uint32_t i
= 0; i
< count
; i
++) {
525 RADV_FROM_HANDLE(radv_descriptor_set
, set
, pDescriptorSets
[i
]);
528 radv_descriptor_set_destroy(device
, pool
, set
, true);
533 static void write_texel_buffer_descriptor(struct radv_device
*device
,
535 struct radeon_winsys_bo
**buffer_list
,
536 const VkBufferView _buffer_view
)
538 RADV_FROM_HANDLE(radv_buffer_view
, buffer_view
, _buffer_view
);
540 memcpy(dst
, buffer_view
->state
, 4 * 4);
541 *buffer_list
= buffer_view
->bo
;
544 static void write_buffer_descriptor(struct radv_device
*device
,
546 struct radeon_winsys_bo
**buffer_list
,
547 const VkDescriptorBufferInfo
*buffer_info
)
549 RADV_FROM_HANDLE(radv_buffer
, buffer
, buffer_info
->buffer
);
550 uint64_t va
= device
->ws
->buffer_get_va(buffer
->bo
);
551 uint32_t range
= buffer_info
->range
;
553 if (buffer_info
->range
== VK_WHOLE_SIZE
)
554 range
= buffer
->size
- buffer_info
->offset
;
556 va
+= buffer_info
->offset
+ buffer
->offset
;
558 dst
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32);
560 dst
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
561 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
562 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
563 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
564 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
565 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
567 *buffer_list
= buffer
->bo
;
570 static void write_dynamic_buffer_descriptor(struct radv_device
*device
,
571 struct radv_descriptor_range
*range
,
572 struct radeon_winsys_bo
**buffer_list
,
573 const VkDescriptorBufferInfo
*buffer_info
)
575 RADV_FROM_HANDLE(radv_buffer
, buffer
, buffer_info
->buffer
);
576 uint64_t va
= device
->ws
->buffer_get_va(buffer
->bo
);
577 unsigned size
= buffer_info
->range
;
579 if (buffer_info
->range
== VK_WHOLE_SIZE
)
580 size
= buffer
->size
- buffer_info
->offset
;
582 va
+= buffer_info
->offset
+ buffer
->offset
;
586 *buffer_list
= buffer
->bo
;
590 write_image_descriptor(struct radv_device
*device
,
592 struct radeon_winsys_bo
**buffer_list
,
593 const VkDescriptorImageInfo
*image_info
)
595 RADV_FROM_HANDLE(radv_image_view
, iview
, image_info
->imageView
);
596 memcpy(dst
, iview
->descriptor
, 8 * 4);
597 memcpy(dst
+ 8, iview
->fmask_descriptor
, 8 * 4);
598 *buffer_list
= iview
->bo
;
602 write_combined_image_sampler_descriptor(struct radv_device
*device
,
604 struct radeon_winsys_bo
**buffer_list
,
605 const VkDescriptorImageInfo
*image_info
,
608 RADV_FROM_HANDLE(radv_sampler
, sampler
, image_info
->sampler
);
610 write_image_descriptor(device
, dst
, buffer_list
, image_info
);
611 /* copy over sampler state */
613 memcpy(dst
+ 16, sampler
->state
, 16);
617 write_sampler_descriptor(struct radv_device
*device
,
619 const VkDescriptorImageInfo
*image_info
)
621 RADV_FROM_HANDLE(radv_sampler
, sampler
, image_info
->sampler
);
623 memcpy(dst
, sampler
->state
, 16);
626 void radv_UpdateDescriptorSets(
628 uint32_t descriptorWriteCount
,
629 const VkWriteDescriptorSet
* pDescriptorWrites
,
630 uint32_t descriptorCopyCount
,
631 const VkCopyDescriptorSet
* pDescriptorCopies
)
633 RADV_FROM_HANDLE(radv_device
, device
, _device
);
635 for (i
= 0; i
< descriptorWriteCount
; i
++) {
636 const VkWriteDescriptorSet
*writeset
= &pDescriptorWrites
[i
];
637 RADV_FROM_HANDLE(radv_descriptor_set
, set
, writeset
->dstSet
);
638 const struct radv_descriptor_set_binding_layout
*binding_layout
=
639 set
->layout
->binding
+ writeset
->dstBinding
;
640 uint32_t *ptr
= set
->mapped_ptr
;
641 struct radeon_winsys_bo
**buffer_list
= set
->descriptors
;
643 ptr
+= binding_layout
->offset
/ 4;
644 ptr
+= binding_layout
->size
* writeset
->dstArrayElement
/ 4;
645 buffer_list
+= binding_layout
->buffer_offset
;
646 buffer_list
+= binding_layout
->buffer_count
* writeset
->dstArrayElement
;
647 for (j
= 0; j
< writeset
->descriptorCount
; ++j
) {
648 switch(writeset
->descriptorType
) {
649 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
650 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
651 unsigned idx
= writeset
->dstArrayElement
+ j
;
652 idx
+= binding_layout
->dynamic_offset_offset
;
653 write_dynamic_buffer_descriptor(device
, set
->dynamic_descriptors
+ idx
,
654 buffer_list
, writeset
->pBufferInfo
+ j
);
657 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
658 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
659 write_buffer_descriptor(device
, ptr
, buffer_list
,
660 writeset
->pBufferInfo
+ j
);
662 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
663 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
664 write_texel_buffer_descriptor(device
, ptr
, buffer_list
,
665 writeset
->pTexelBufferView
[j
]);
667 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
668 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
669 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
670 write_image_descriptor(device
, ptr
, buffer_list
,
671 writeset
->pImageInfo
+ j
);
673 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
674 write_combined_image_sampler_descriptor(device
, ptr
, buffer_list
,
675 writeset
->pImageInfo
+ j
,
676 !binding_layout
->immutable_samplers
);
678 case VK_DESCRIPTOR_TYPE_SAMPLER
:
679 assert(!binding_layout
->immutable_samplers
);
680 write_sampler_descriptor(device
, ptr
,
681 writeset
->pImageInfo
+ j
);
684 unreachable("unimplemented descriptor type");
687 ptr
+= binding_layout
->size
/ 4;
688 buffer_list
+= binding_layout
->buffer_count
;
692 if (descriptorCopyCount
)
693 radv_finishme("copy descriptors");