radv: Store the immutable samplers as uint32_t[4].
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33
34 VkResult radv_CreateDescriptorSetLayout(
35 VkDevice _device,
36 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
37 const VkAllocationCallbacks* pAllocator,
38 VkDescriptorSetLayout* pSetLayout)
39 {
40 RADV_FROM_HANDLE(radv_device, device, _device);
41 struct radv_descriptor_set_layout *set_layout;
42
43 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
44
45 uint32_t max_binding = 0;
46 uint32_t immutable_sampler_count = 0;
47 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
48 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
49 if (pCreateInfo->pBindings[j].pImmutableSamplers)
50 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
51 }
52
53 size_t size = sizeof(struct radv_descriptor_set_layout) +
54 (max_binding + 1) * sizeof(set_layout->binding[0]) +
55 immutable_sampler_count * 4 * sizeof(uint32_t);
56
57 set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
59 if (!set_layout)
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
61
62 /* We just allocate all the samplers at the end of the struct */
63 uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
64
65 set_layout->binding_count = max_binding + 1;
66 set_layout->shader_stages = 0;
67 set_layout->size = 0;
68
69 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
70
71 uint32_t buffer_count = 0;
72 uint32_t dynamic_offset_count = 0;
73
74 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
75 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
76 uint32_t b = binding->binding;
77 uint32_t alignment;
78
79 switch (binding->descriptorType) {
80 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
81 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
82 set_layout->binding[b].dynamic_offset_count = 1;
83 set_layout->dynamic_shader_stages |= binding->stageFlags;
84 set_layout->binding[b].size = 0;
85 set_layout->binding[b].buffer_count = 1;
86 alignment = 1;
87 break;
88 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
89 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
90 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
91 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
92 set_layout->binding[b].size = 16;
93 set_layout->binding[b].buffer_count = 1;
94 alignment = 16;
95 break;
96 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
97 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
98 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
99 /* main descriptor + fmask descriptor */
100 set_layout->binding[b].size = 64;
101 set_layout->binding[b].buffer_count = 1;
102 alignment = 32;
103 break;
104 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
105 /* main descriptor + fmask descriptor + sampler */
106 set_layout->binding[b].size = 96;
107 set_layout->binding[b].buffer_count = 1;
108 alignment = 32;
109 break;
110 case VK_DESCRIPTOR_TYPE_SAMPLER:
111 set_layout->binding[b].size = 16;
112 alignment = 16;
113 break;
114 default:
115 unreachable("unknown descriptor type\n");
116 break;
117 }
118
119 set_layout->size = align(set_layout->size, alignment);
120 assert(binding->descriptorCount > 0);
121 set_layout->binding[b].type = binding->descriptorType;
122 set_layout->binding[b].array_size = binding->descriptorCount;
123 set_layout->binding[b].offset = set_layout->size;
124 set_layout->binding[b].buffer_offset = buffer_count;
125 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
126
127 set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
128 buffer_count += binding->descriptorCount * set_layout->binding[b].buffer_count;
129 dynamic_offset_count += binding->descriptorCount *
130 set_layout->binding[b].dynamic_offset_count;
131
132
133 if (binding->pImmutableSamplers) {
134 set_layout->binding[b].immutable_samplers = samplers;
135 samplers += 4 * binding->descriptorCount;
136
137 for (uint32_t i = 0; i < binding->descriptorCount; i++)
138 memcpy(set_layout->binding[b].immutable_samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
139 } else {
140 set_layout->binding[b].immutable_samplers = NULL;
141 }
142
143 set_layout->shader_stages |= binding->stageFlags;
144 }
145
146 set_layout->buffer_count = buffer_count;
147 set_layout->dynamic_offset_count = dynamic_offset_count;
148
149 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
150
151 return VK_SUCCESS;
152 }
153
154 void radv_DestroyDescriptorSetLayout(
155 VkDevice _device,
156 VkDescriptorSetLayout _set_layout,
157 const VkAllocationCallbacks* pAllocator)
158 {
159 RADV_FROM_HANDLE(radv_device, device, _device);
160 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
161
162 if (!set_layout)
163 return;
164
165 vk_free2(&device->alloc, pAllocator, set_layout);
166 }
167
168 /*
169 * Pipeline layouts. These have nothing to do with the pipeline. They are
170 * just muttiple descriptor set layouts pasted together
171 */
172
173 VkResult radv_CreatePipelineLayout(
174 VkDevice _device,
175 const VkPipelineLayoutCreateInfo* pCreateInfo,
176 const VkAllocationCallbacks* pAllocator,
177 VkPipelineLayout* pPipelineLayout)
178 {
179 RADV_FROM_HANDLE(radv_device, device, _device);
180 struct radv_pipeline_layout *layout;
181 struct mesa_sha1 *ctx;
182
183 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
184
185 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
186 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
187 if (layout == NULL)
188 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
189
190 layout->num_sets = pCreateInfo->setLayoutCount;
191
192 unsigned dynamic_offset_count = 0;
193
194
195 ctx = _mesa_sha1_init();
196 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
197 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
198 pCreateInfo->pSetLayouts[set]);
199 layout->set[set].layout = set_layout;
200
201 layout->set[set].dynamic_offset_start = dynamic_offset_count;
202 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
203 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
204 }
205 _mesa_sha1_update(ctx, set_layout->binding,
206 sizeof(set_layout->binding[0]) * set_layout->binding_count);
207 }
208
209 layout->dynamic_offset_count = dynamic_offset_count;
210 layout->push_constant_size = 0;
211 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
212 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
213 layout->push_constant_size = MAX2(layout->push_constant_size,
214 range->offset + range->size);
215 }
216
217 layout->push_constant_size = align(layout->push_constant_size, 16);
218 _mesa_sha1_update(ctx, &layout->push_constant_size,
219 sizeof(layout->push_constant_size));
220 _mesa_sha1_final(ctx, layout->sha1);
221 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
222
223 return VK_SUCCESS;
224 }
225
226 void radv_DestroyPipelineLayout(
227 VkDevice _device,
228 VkPipelineLayout _pipelineLayout,
229 const VkAllocationCallbacks* pAllocator)
230 {
231 RADV_FROM_HANDLE(radv_device, device, _device);
232 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
233
234 if (!pipeline_layout)
235 return;
236 vk_free2(&device->alloc, pAllocator, pipeline_layout);
237 }
238
239 #define EMPTY 1
240
241 static VkResult
242 radv_descriptor_set_create(struct radv_device *device,
243 struct radv_descriptor_pool *pool,
244 struct radv_cmd_buffer *cmd_buffer,
245 const struct radv_descriptor_set_layout *layout,
246 struct radv_descriptor_set **out_set)
247 {
248 struct radv_descriptor_set *set;
249 unsigned mem_size = sizeof(struct radv_descriptor_set) +
250 sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
251 set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
252 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
253
254 if (!set)
255 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
256
257 memset(set, 0, mem_size);
258
259 if (layout->dynamic_offset_count) {
260 unsigned size = sizeof(struct radv_descriptor_range) *
261 layout->dynamic_offset_count;
262 set->dynamic_descriptors = vk_alloc2(&device->alloc, NULL, size, 8,
263 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
264
265 if (!set->dynamic_descriptors) {
266 vk_free2(&device->alloc, NULL, set);
267 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
268 }
269 }
270
271 set->layout = layout;
272 if (layout->size) {
273 uint32_t layout_size = align_u32(layout->size, 32);
274 set->size = layout->size;
275 if (!cmd_buffer) {
276 /* try to allocate linearly first, so that we don't spend
277 * time looking for gaps if the app only allocates &
278 * resets via the pool. */
279 if (pool->current_offset + layout_size <= pool->size) {
280 set->bo = pool->bo;
281 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
282 set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
283 pool->current_offset += layout_size;
284 list_addtail(&set->vram_list, &pool->vram_list);
285 } else {
286 uint64_t offset = 0;
287 struct list_head *prev = &pool->vram_list;
288 struct radv_descriptor_set *cur;
289 LIST_FOR_EACH_ENTRY(cur, &pool->vram_list, vram_list) {
290 uint64_t start = (uint8_t*)cur->mapped_ptr - pool->mapped_ptr;
291 if (start - offset >= layout_size)
292 break;
293
294 offset = start + cur->size;
295 prev = &cur->vram_list;
296 }
297
298 if (pool->size - offset < layout_size) {
299 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
300 vk_free2(&device->alloc, NULL, set);
301 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
302 }
303 set->bo = pool->bo;
304 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
305 set->va = device->ws->buffer_get_va(set->bo) + offset;
306 list_add(&set->vram_list, prev);
307 }
308 } else {
309 unsigned bo_offset;
310 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
311 &bo_offset,
312 (void**)&set->mapped_ptr)) {
313 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
314 vk_free2(&device->alloc, NULL, set);
315 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
316 }
317
318 set->va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
319 set->va += bo_offset;
320 }
321 }
322
323 for (unsigned i = 0; i < layout->binding_count; ++i) {
324 if (!layout->binding[i].immutable_samplers)
325 continue;
326
327 unsigned offset = layout->binding[i].offset / 4;
328 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
329 offset += 16;
330
331 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
332 memcpy(set->mapped_ptr + offset, layout->binding[i].immutable_samplers + 4 * j, 16);
333 offset += layout->binding[i].size / 4;
334 }
335
336 }
337 *out_set = set;
338 return VK_SUCCESS;
339 }
340
341 static void
342 radv_descriptor_set_destroy(struct radv_device *device,
343 struct radv_descriptor_pool *pool,
344 struct radv_descriptor_set *set,
345 bool free_bo)
346 {
347 if (free_bo && set->size)
348 list_del(&set->vram_list);
349 if (set->dynamic_descriptors)
350 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
351 vk_free2(&device->alloc, NULL, set);
352 }
353
354 VkResult
355 radv_temp_descriptor_set_create(struct radv_device *device,
356 struct radv_cmd_buffer *cmd_buffer,
357 VkDescriptorSetLayout _layout,
358 VkDescriptorSet *_set)
359 {
360 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, _layout);
361 struct radv_descriptor_set *set;
362 VkResult ret;
363
364 ret = radv_descriptor_set_create(device, NULL, cmd_buffer, layout, &set);
365 *_set = radv_descriptor_set_to_handle(set);
366 return ret;
367 }
368
369 void
370 radv_temp_descriptor_set_destroy(struct radv_device *device,
371 VkDescriptorSet _set)
372 {
373 RADV_FROM_HANDLE(radv_descriptor_set, set, _set);
374
375 radv_descriptor_set_destroy(device, NULL, set, false);
376 }
377
378 VkResult radv_CreateDescriptorPool(
379 VkDevice _device,
380 const VkDescriptorPoolCreateInfo* pCreateInfo,
381 const VkAllocationCallbacks* pAllocator,
382 VkDescriptorPool* pDescriptorPool)
383 {
384 RADV_FROM_HANDLE(radv_device, device, _device);
385 struct radv_descriptor_pool *pool;
386 int size = sizeof(struct radv_descriptor_pool);
387 uint64_t bo_size = 0;
388 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
389 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
390 if (!pool)
391 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
392
393 memset(pool, 0, sizeof(*pool));
394
395 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
396 switch(pCreateInfo->pPoolSizes[i].type) {
397 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
398 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
399 break;
400 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
401 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
402 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
403 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
404 case VK_DESCRIPTOR_TYPE_SAMPLER:
405 /* 32 as we may need to align for images */
406 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
407 break;
408 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
409 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
410 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
411 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
412 break;
413 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
414 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
415 break;
416 default:
417 unreachable("unknown descriptor type\n");
418 break;
419 }
420 }
421
422 if (bo_size) {
423 pool->bo = device->ws->buffer_create(device->ws, bo_size,
424 32, RADEON_DOMAIN_VRAM, 0);
425 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
426 }
427 pool->size = bo_size;
428
429 list_inithead(&pool->vram_list);
430 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
431 return VK_SUCCESS;
432 }
433
434 void radv_DestroyDescriptorPool(
435 VkDevice _device,
436 VkDescriptorPool _pool,
437 const VkAllocationCallbacks* pAllocator)
438 {
439 RADV_FROM_HANDLE(radv_device, device, _device);
440 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
441
442 if (!pool)
443 return;
444
445 list_for_each_entry_safe(struct radv_descriptor_set, set,
446 &pool->vram_list, vram_list) {
447 radv_descriptor_set_destroy(device, pool, set, false);
448 }
449
450 if (pool->bo)
451 device->ws->buffer_destroy(pool->bo);
452 vk_free2(&device->alloc, pAllocator, pool);
453 }
454
455 VkResult radv_ResetDescriptorPool(
456 VkDevice _device,
457 VkDescriptorPool descriptorPool,
458 VkDescriptorPoolResetFlags flags)
459 {
460 RADV_FROM_HANDLE(radv_device, device, _device);
461 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
462
463 list_for_each_entry_safe(struct radv_descriptor_set, set,
464 &pool->vram_list, vram_list) {
465 radv_descriptor_set_destroy(device, pool, set, false);
466 }
467
468 list_inithead(&pool->vram_list);
469
470 pool->current_offset = 0;
471
472 return VK_SUCCESS;
473 }
474
475 VkResult radv_AllocateDescriptorSets(
476 VkDevice _device,
477 const VkDescriptorSetAllocateInfo* pAllocateInfo,
478 VkDescriptorSet* pDescriptorSets)
479 {
480 RADV_FROM_HANDLE(radv_device, device, _device);
481 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
482
483 VkResult result = VK_SUCCESS;
484 uint32_t i;
485 struct radv_descriptor_set *set;
486
487 /* allocate a set of buffers for each shader to contain descriptors */
488 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
489 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
490 pAllocateInfo->pSetLayouts[i]);
491
492 result = radv_descriptor_set_create(device, pool, NULL, layout, &set);
493 if (result != VK_SUCCESS)
494 break;
495
496 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
497 }
498
499 if (result != VK_SUCCESS)
500 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
501 i, pDescriptorSets);
502 return result;
503 }
504
505 VkResult radv_FreeDescriptorSets(
506 VkDevice _device,
507 VkDescriptorPool descriptorPool,
508 uint32_t count,
509 const VkDescriptorSet* pDescriptorSets)
510 {
511 RADV_FROM_HANDLE(radv_device, device, _device);
512 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
513
514 for (uint32_t i = 0; i < count; i++) {
515 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
516
517 if (set)
518 radv_descriptor_set_destroy(device, pool, set, true);
519 }
520 return VK_SUCCESS;
521 }
522
523 static void write_texel_buffer_descriptor(struct radv_device *device,
524 unsigned *dst,
525 struct radeon_winsys_bo **buffer_list,
526 const VkBufferView _buffer_view)
527 {
528 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
529
530 memcpy(dst, buffer_view->state, 4 * 4);
531 *buffer_list = buffer_view->bo;
532 }
533
534 static void write_buffer_descriptor(struct radv_device *device,
535 unsigned *dst,
536 struct radeon_winsys_bo **buffer_list,
537 const VkDescriptorBufferInfo *buffer_info)
538 {
539 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
540 uint64_t va = device->ws->buffer_get_va(buffer->bo);
541 uint32_t range = buffer_info->range;
542
543 if (buffer_info->range == VK_WHOLE_SIZE)
544 range = buffer->size - buffer_info->offset;
545
546 va += buffer_info->offset + buffer->offset;
547 dst[0] = va;
548 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
549 dst[2] = range;
550 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
551 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
552 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
553 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
554 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
555 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
556
557 *buffer_list = buffer->bo;
558 }
559
560 static void write_dynamic_buffer_descriptor(struct radv_device *device,
561 struct radv_descriptor_range *range,
562 struct radeon_winsys_bo **buffer_list,
563 const VkDescriptorBufferInfo *buffer_info)
564 {
565 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
566 uint64_t va = device->ws->buffer_get_va(buffer->bo);
567 unsigned size = buffer_info->range;
568
569 if (buffer_info->range == VK_WHOLE_SIZE)
570 size = buffer->size - buffer_info->offset;
571
572 va += buffer_info->offset + buffer->offset;
573 range->va = va;
574 range->size = size;
575
576 *buffer_list = buffer->bo;
577 }
578
579 static void
580 write_image_descriptor(struct radv_device *device,
581 unsigned *dst,
582 struct radeon_winsys_bo **buffer_list,
583 const VkDescriptorImageInfo *image_info)
584 {
585 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
586 memcpy(dst, iview->descriptor, 8 * 4);
587 memcpy(dst + 8, iview->fmask_descriptor, 8 * 4);
588 *buffer_list = iview->bo;
589 }
590
591 static void
592 write_combined_image_sampler_descriptor(struct radv_device *device,
593 unsigned *dst,
594 struct radeon_winsys_bo **buffer_list,
595 const VkDescriptorImageInfo *image_info,
596 bool has_sampler)
597 {
598 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
599
600 write_image_descriptor(device, dst, buffer_list, image_info);
601 /* copy over sampler state */
602 if (has_sampler)
603 memcpy(dst + 16, sampler->state, 16);
604 }
605
606 static void
607 write_sampler_descriptor(struct radv_device *device,
608 unsigned *dst,
609 const VkDescriptorImageInfo *image_info)
610 {
611 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
612
613 memcpy(dst, sampler->state, 16);
614 }
615
616 void radv_UpdateDescriptorSets(
617 VkDevice _device,
618 uint32_t descriptorWriteCount,
619 const VkWriteDescriptorSet* pDescriptorWrites,
620 uint32_t descriptorCopyCount,
621 const VkCopyDescriptorSet* pDescriptorCopies)
622 {
623 RADV_FROM_HANDLE(radv_device, device, _device);
624 uint32_t i, j;
625 for (i = 0; i < descriptorWriteCount; i++) {
626 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
627 RADV_FROM_HANDLE(radv_descriptor_set, set, writeset->dstSet);
628 const struct radv_descriptor_set_binding_layout *binding_layout =
629 set->layout->binding + writeset->dstBinding;
630 uint32_t *ptr = set->mapped_ptr;
631 struct radeon_winsys_bo **buffer_list = set->descriptors;
632
633 ptr += binding_layout->offset / 4;
634 ptr += binding_layout->size * writeset->dstArrayElement / 4;
635 buffer_list += binding_layout->buffer_offset;
636 buffer_list += binding_layout->buffer_count * writeset->dstArrayElement;
637 for (j = 0; j < writeset->descriptorCount; ++j) {
638 switch(writeset->descriptorType) {
639 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
640 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
641 unsigned idx = writeset->dstArrayElement + j;
642 idx += binding_layout->dynamic_offset_offset;
643 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
644 buffer_list, writeset->pBufferInfo + j);
645 break;
646 }
647 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
648 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
649 write_buffer_descriptor(device, ptr, buffer_list,
650 writeset->pBufferInfo + j);
651 break;
652 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
653 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
654 write_texel_buffer_descriptor(device, ptr, buffer_list,
655 writeset->pTexelBufferView[j]);
656 break;
657 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
658 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
659 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
660 write_image_descriptor(device, ptr, buffer_list,
661 writeset->pImageInfo + j);
662 break;
663 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
664 write_combined_image_sampler_descriptor(device, ptr, buffer_list,
665 writeset->pImageInfo + j,
666 !binding_layout->immutable_samplers);
667 break;
668 case VK_DESCRIPTOR_TYPE_SAMPLER:
669 assert(!binding_layout->immutable_samplers);
670 write_sampler_descriptor(device, ptr,
671 writeset->pImageInfo + j);
672 break;
673 default:
674 unreachable("unimplemented descriptor type");
675 break;
676 }
677 ptr += binding_layout->size / 4;
678 buffer_list += binding_layout->buffer_count;
679 }
680
681 }
682 if (descriptorCopyCount)
683 radv_finishme("copy descriptors");
684 }