radv: Use different allocator for descriptor set vram.
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33
34 VkResult radv_CreateDescriptorSetLayout(
35 VkDevice _device,
36 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
37 const VkAllocationCallbacks* pAllocator,
38 VkDescriptorSetLayout* pSetLayout)
39 {
40 RADV_FROM_HANDLE(radv_device, device, _device);
41 struct radv_descriptor_set_layout *set_layout;
42
43 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
44
45 uint32_t max_binding = 0;
46 uint32_t immutable_sampler_count = 0;
47 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
48 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
49 if (pCreateInfo->pBindings[j].pImmutableSamplers)
50 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
51 }
52
53 size_t size = sizeof(struct radv_descriptor_set_layout) +
54 (max_binding + 1) * sizeof(set_layout->binding[0]) +
55 immutable_sampler_count * sizeof(struct radv_sampler *);
56
57 set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
59 if (!set_layout)
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
61
62 /* We just allocate all the samplers at the end of the struct */
63 struct radv_sampler **samplers =
64 (struct radv_sampler **)&set_layout->binding[max_binding + 1];
65
66 set_layout->binding_count = max_binding + 1;
67 set_layout->shader_stages = 0;
68 set_layout->size = 0;
69
70 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
71
72 uint32_t buffer_count = 0;
73 uint32_t dynamic_offset_count = 0;
74
75 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
76 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
77 uint32_t b = binding->binding;
78 uint32_t alignment;
79
80 switch (binding->descriptorType) {
81 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
82 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
83 set_layout->binding[b].dynamic_offset_count = 1;
84 set_layout->dynamic_shader_stages |= binding->stageFlags;
85 set_layout->binding[b].size = 0;
86 set_layout->binding[b].buffer_count = 1;
87 alignment = 1;
88 break;
89 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
90 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
91 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
92 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
93 set_layout->binding[b].size = 16;
94 set_layout->binding[b].buffer_count = 1;
95 alignment = 16;
96 break;
97 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
98 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
99 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
100 /* main descriptor + fmask descriptor */
101 set_layout->binding[b].size = 64;
102 set_layout->binding[b].buffer_count = 1;
103 alignment = 32;
104 break;
105 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
106 /* main descriptor + fmask descriptor + sampler */
107 set_layout->binding[b].size = 96;
108 set_layout->binding[b].buffer_count = 1;
109 alignment = 32;
110 break;
111 case VK_DESCRIPTOR_TYPE_SAMPLER:
112 set_layout->binding[b].size = 16;
113 alignment = 16;
114 break;
115 default:
116 unreachable("unknown descriptor type\n");
117 break;
118 }
119
120 set_layout->size = align(set_layout->size, alignment);
121 assert(binding->descriptorCount > 0);
122 set_layout->binding[b].type = binding->descriptorType;
123 set_layout->binding[b].array_size = binding->descriptorCount;
124 set_layout->binding[b].offset = set_layout->size;
125 set_layout->binding[b].buffer_offset = buffer_count;
126 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
127
128 set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
129 buffer_count += binding->descriptorCount * set_layout->binding[b].buffer_count;
130 dynamic_offset_count += binding->descriptorCount *
131 set_layout->binding[b].dynamic_offset_count;
132
133
134 if (binding->pImmutableSamplers) {
135 set_layout->binding[b].immutable_samplers = samplers;
136 samplers += binding->descriptorCount;
137
138 for (uint32_t i = 0; i < binding->descriptorCount; i++)
139 set_layout->binding[b].immutable_samplers[i] =
140 radv_sampler_from_handle(binding->pImmutableSamplers[i]);
141 } else {
142 set_layout->binding[b].immutable_samplers = NULL;
143 }
144
145 set_layout->shader_stages |= binding->stageFlags;
146 }
147
148 set_layout->buffer_count = buffer_count;
149 set_layout->dynamic_offset_count = dynamic_offset_count;
150
151 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
152
153 return VK_SUCCESS;
154 }
155
156 void radv_DestroyDescriptorSetLayout(
157 VkDevice _device,
158 VkDescriptorSetLayout _set_layout,
159 const VkAllocationCallbacks* pAllocator)
160 {
161 RADV_FROM_HANDLE(radv_device, device, _device);
162 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
163
164 if (!set_layout)
165 return;
166
167 vk_free2(&device->alloc, pAllocator, set_layout);
168 }
169
170 /*
171 * Pipeline layouts. These have nothing to do with the pipeline. They are
172 * just muttiple descriptor set layouts pasted together
173 */
174
175 VkResult radv_CreatePipelineLayout(
176 VkDevice _device,
177 const VkPipelineLayoutCreateInfo* pCreateInfo,
178 const VkAllocationCallbacks* pAllocator,
179 VkPipelineLayout* pPipelineLayout)
180 {
181 RADV_FROM_HANDLE(radv_device, device, _device);
182 struct radv_pipeline_layout *layout;
183 struct mesa_sha1 *ctx;
184
185 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
186
187 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
188 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
189 if (layout == NULL)
190 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
191
192 layout->num_sets = pCreateInfo->setLayoutCount;
193
194 unsigned dynamic_offset_count = 0;
195
196
197 ctx = _mesa_sha1_init();
198 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
199 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
200 pCreateInfo->pSetLayouts[set]);
201 layout->set[set].layout = set_layout;
202
203 layout->set[set].dynamic_offset_start = dynamic_offset_count;
204 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
205 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
206 }
207 _mesa_sha1_update(ctx, set_layout->binding,
208 sizeof(set_layout->binding[0]) * set_layout->binding_count);
209 }
210
211 layout->dynamic_offset_count = dynamic_offset_count;
212 layout->push_constant_size = 0;
213 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
214 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
215 layout->push_constant_size = MAX2(layout->push_constant_size,
216 range->offset + range->size);
217 }
218
219 layout->push_constant_size = align(layout->push_constant_size, 16);
220 _mesa_sha1_update(ctx, &layout->push_constant_size,
221 sizeof(layout->push_constant_size));
222 _mesa_sha1_final(ctx, layout->sha1);
223 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
224
225 return VK_SUCCESS;
226 }
227
228 void radv_DestroyPipelineLayout(
229 VkDevice _device,
230 VkPipelineLayout _pipelineLayout,
231 const VkAllocationCallbacks* pAllocator)
232 {
233 RADV_FROM_HANDLE(radv_device, device, _device);
234 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
235
236 if (!pipeline_layout)
237 return;
238 vk_free2(&device->alloc, pAllocator, pipeline_layout);
239 }
240
241 #define EMPTY 1
242
243 static VkResult
244 radv_descriptor_set_create(struct radv_device *device,
245 struct radv_descriptor_pool *pool,
246 struct radv_cmd_buffer *cmd_buffer,
247 const struct radv_descriptor_set_layout *layout,
248 struct radv_descriptor_set **out_set)
249 {
250 struct radv_descriptor_set *set;
251 unsigned mem_size = sizeof(struct radv_descriptor_set) +
252 sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
253 set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
254 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
255
256 if (!set)
257 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
258
259 memset(set, 0, mem_size);
260
261 if (layout->dynamic_offset_count) {
262 unsigned size = sizeof(struct radv_descriptor_range) *
263 layout->dynamic_offset_count;
264 set->dynamic_descriptors = vk_alloc2(&device->alloc, NULL, size, 8,
265 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
266
267 if (!set->dynamic_descriptors) {
268 vk_free2(&device->alloc, NULL, set);
269 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
270 }
271 }
272
273 set->layout = layout;
274 if (layout->size) {
275 uint32_t layout_size = align_u32(layout->size, 32);
276 set->size = layout->size;
277 if (!cmd_buffer) {
278 /* try to allocate linearly first, so that we don't spend
279 * time looking for gaps if the app only allocates &
280 * resets via the pool. */
281 if (pool->current_offset + layout_size <= pool->size) {
282 set->bo = pool->bo;
283 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
284 set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
285 pool->current_offset += layout_size;
286 list_addtail(&set->vram_list, &pool->vram_list);
287 } else {
288 uint64_t offset = 0;
289 struct list_head *prev = &pool->vram_list;
290 struct radv_descriptor_set *cur;
291 LIST_FOR_EACH_ENTRY(cur, &pool->vram_list, vram_list) {
292 uint64_t start = (uint8_t*)cur->mapped_ptr - pool->mapped_ptr;
293 if (start - offset >= layout_size)
294 break;
295
296 offset = start + cur->size;
297 prev = &cur->vram_list;
298 }
299
300 if (pool->size - offset < layout_size) {
301 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
302 vk_free2(&device->alloc, NULL, set);
303 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
304 }
305 set->bo = pool->bo;
306 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
307 set->va = device->ws->buffer_get_va(set->bo) + offset;
308 list_add(&set->vram_list, prev);
309 }
310 } else {
311 unsigned bo_offset;
312 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
313 &bo_offset,
314 (void**)&set->mapped_ptr)) {
315 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
316 vk_free2(&device->alloc, NULL, set);
317 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
318 }
319
320 set->va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
321 set->va += bo_offset;
322 }
323 }
324
325 for (unsigned i = 0; i < layout->binding_count; ++i) {
326 if (!layout->binding[i].immutable_samplers)
327 continue;
328
329 unsigned offset = layout->binding[i].offset / 4;
330 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
331 offset += 16;
332
333 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
334 struct radv_sampler* sampler = layout->binding[i].immutable_samplers[j];
335
336 memcpy(set->mapped_ptr + offset, &sampler->state, 16);
337 offset += layout->binding[i].size / 4;
338 }
339
340 }
341 *out_set = set;
342 return VK_SUCCESS;
343 }
344
345 static void
346 radv_descriptor_set_destroy(struct radv_device *device,
347 struct radv_descriptor_pool *pool,
348 struct radv_descriptor_set *set,
349 bool free_bo)
350 {
351 if (free_bo && set->size)
352 list_del(&set->vram_list);
353 if (set->dynamic_descriptors)
354 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
355 vk_free2(&device->alloc, NULL, set);
356 }
357
358 VkResult
359 radv_temp_descriptor_set_create(struct radv_device *device,
360 struct radv_cmd_buffer *cmd_buffer,
361 VkDescriptorSetLayout _layout,
362 VkDescriptorSet *_set)
363 {
364 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, _layout);
365 struct radv_descriptor_set *set;
366 VkResult ret;
367
368 ret = radv_descriptor_set_create(device, NULL, cmd_buffer, layout, &set);
369 *_set = radv_descriptor_set_to_handle(set);
370 return ret;
371 }
372
373 void
374 radv_temp_descriptor_set_destroy(struct radv_device *device,
375 VkDescriptorSet _set)
376 {
377 RADV_FROM_HANDLE(radv_descriptor_set, set, _set);
378
379 radv_descriptor_set_destroy(device, NULL, set, false);
380 }
381
382 VkResult radv_CreateDescriptorPool(
383 VkDevice _device,
384 const VkDescriptorPoolCreateInfo* pCreateInfo,
385 const VkAllocationCallbacks* pAllocator,
386 VkDescriptorPool* pDescriptorPool)
387 {
388 RADV_FROM_HANDLE(radv_device, device, _device);
389 struct radv_descriptor_pool *pool;
390 int size = sizeof(struct radv_descriptor_pool);
391 uint64_t bo_size = 0;
392 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
393 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
394 if (!pool)
395 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
396
397 memset(pool, 0, sizeof(*pool));
398
399 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
400 switch(pCreateInfo->pPoolSizes[i].type) {
401 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
402 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
403 break;
404 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
405 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
406 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
407 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
408 case VK_DESCRIPTOR_TYPE_SAMPLER:
409 /* 32 as we may need to align for images */
410 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
411 break;
412 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
413 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
414 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
415 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
416 break;
417 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
418 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
419 break;
420 default:
421 unreachable("unknown descriptor type\n");
422 break;
423 }
424 }
425
426 if (bo_size) {
427 pool->bo = device->ws->buffer_create(device->ws, bo_size,
428 32, RADEON_DOMAIN_VRAM, 0);
429 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
430 }
431 pool->size = bo_size;
432
433 list_inithead(&pool->vram_list);
434 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
435 return VK_SUCCESS;
436 }
437
438 void radv_DestroyDescriptorPool(
439 VkDevice _device,
440 VkDescriptorPool _pool,
441 const VkAllocationCallbacks* pAllocator)
442 {
443 RADV_FROM_HANDLE(radv_device, device, _device);
444 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
445
446 if (!pool)
447 return;
448
449 list_for_each_entry_safe(struct radv_descriptor_set, set,
450 &pool->vram_list, vram_list) {
451 radv_descriptor_set_destroy(device, pool, set, false);
452 }
453
454 if (pool->bo)
455 device->ws->buffer_destroy(pool->bo);
456 vk_free2(&device->alloc, pAllocator, pool);
457 }
458
459 VkResult radv_ResetDescriptorPool(
460 VkDevice _device,
461 VkDescriptorPool descriptorPool,
462 VkDescriptorPoolResetFlags flags)
463 {
464 RADV_FROM_HANDLE(radv_device, device, _device);
465 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
466
467 list_for_each_entry_safe(struct radv_descriptor_set, set,
468 &pool->vram_list, vram_list) {
469 radv_descriptor_set_destroy(device, pool, set, false);
470 }
471
472 list_inithead(&pool->vram_list);
473
474 pool->current_offset = 0;
475
476 return VK_SUCCESS;
477 }
478
479 VkResult radv_AllocateDescriptorSets(
480 VkDevice _device,
481 const VkDescriptorSetAllocateInfo* pAllocateInfo,
482 VkDescriptorSet* pDescriptorSets)
483 {
484 RADV_FROM_HANDLE(radv_device, device, _device);
485 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
486
487 VkResult result = VK_SUCCESS;
488 uint32_t i;
489 struct radv_descriptor_set *set;
490
491 /* allocate a set of buffers for each shader to contain descriptors */
492 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
493 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
494 pAllocateInfo->pSetLayouts[i]);
495
496 result = radv_descriptor_set_create(device, pool, NULL, layout, &set);
497 if (result != VK_SUCCESS)
498 break;
499
500 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
501 }
502
503 if (result != VK_SUCCESS)
504 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
505 i, pDescriptorSets);
506 return result;
507 }
508
509 VkResult radv_FreeDescriptorSets(
510 VkDevice _device,
511 VkDescriptorPool descriptorPool,
512 uint32_t count,
513 const VkDescriptorSet* pDescriptorSets)
514 {
515 RADV_FROM_HANDLE(radv_device, device, _device);
516 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
517
518 for (uint32_t i = 0; i < count; i++) {
519 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
520
521 if (set)
522 radv_descriptor_set_destroy(device, pool, set, true);
523 }
524 return VK_SUCCESS;
525 }
526
527 static void write_texel_buffer_descriptor(struct radv_device *device,
528 unsigned *dst,
529 struct radeon_winsys_bo **buffer_list,
530 const VkBufferView _buffer_view)
531 {
532 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
533
534 memcpy(dst, buffer_view->state, 4 * 4);
535 *buffer_list = buffer_view->bo;
536 }
537
538 static void write_buffer_descriptor(struct radv_device *device,
539 unsigned *dst,
540 struct radeon_winsys_bo **buffer_list,
541 const VkDescriptorBufferInfo *buffer_info)
542 {
543 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
544 uint64_t va = device->ws->buffer_get_va(buffer->bo);
545 uint32_t range = buffer_info->range;
546
547 if (buffer_info->range == VK_WHOLE_SIZE)
548 range = buffer->size - buffer_info->offset;
549
550 va += buffer_info->offset + buffer->offset;
551 dst[0] = va;
552 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
553 dst[2] = range;
554 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
555 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
556 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
557 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
558 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
559 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
560
561 *buffer_list = buffer->bo;
562 }
563
564 static void write_dynamic_buffer_descriptor(struct radv_device *device,
565 struct radv_descriptor_range *range,
566 struct radeon_winsys_bo **buffer_list,
567 const VkDescriptorBufferInfo *buffer_info)
568 {
569 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
570 uint64_t va = device->ws->buffer_get_va(buffer->bo);
571 unsigned size = buffer_info->range;
572
573 if (buffer_info->range == VK_WHOLE_SIZE)
574 size = buffer->size - buffer_info->offset;
575
576 va += buffer_info->offset + buffer->offset;
577 range->va = va;
578 range->size = size;
579
580 *buffer_list = buffer->bo;
581 }
582
583 static void
584 write_image_descriptor(struct radv_device *device,
585 unsigned *dst,
586 struct radeon_winsys_bo **buffer_list,
587 const VkDescriptorImageInfo *image_info)
588 {
589 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
590 memcpy(dst, iview->descriptor, 8 * 4);
591 memcpy(dst + 8, iview->fmask_descriptor, 8 * 4);
592 *buffer_list = iview->bo;
593 }
594
595 static void
596 write_combined_image_sampler_descriptor(struct radv_device *device,
597 unsigned *dst,
598 struct radeon_winsys_bo **buffer_list,
599 const VkDescriptorImageInfo *image_info,
600 bool has_sampler)
601 {
602 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
603
604 write_image_descriptor(device, dst, buffer_list, image_info);
605 /* copy over sampler state */
606 if (has_sampler)
607 memcpy(dst + 16, sampler->state, 16);
608 }
609
610 static void
611 write_sampler_descriptor(struct radv_device *device,
612 unsigned *dst,
613 const VkDescriptorImageInfo *image_info)
614 {
615 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
616
617 memcpy(dst, sampler->state, 16);
618 }
619
620 void radv_UpdateDescriptorSets(
621 VkDevice _device,
622 uint32_t descriptorWriteCount,
623 const VkWriteDescriptorSet* pDescriptorWrites,
624 uint32_t descriptorCopyCount,
625 const VkCopyDescriptorSet* pDescriptorCopies)
626 {
627 RADV_FROM_HANDLE(radv_device, device, _device);
628 uint32_t i, j;
629 for (i = 0; i < descriptorWriteCount; i++) {
630 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
631 RADV_FROM_HANDLE(radv_descriptor_set, set, writeset->dstSet);
632 const struct radv_descriptor_set_binding_layout *binding_layout =
633 set->layout->binding + writeset->dstBinding;
634 uint32_t *ptr = set->mapped_ptr;
635 struct radeon_winsys_bo **buffer_list = set->descriptors;
636
637 ptr += binding_layout->offset / 4;
638 ptr += binding_layout->size * writeset->dstArrayElement / 4;
639 buffer_list += binding_layout->buffer_offset;
640 buffer_list += binding_layout->buffer_count * writeset->dstArrayElement;
641 for (j = 0; j < writeset->descriptorCount; ++j) {
642 switch(writeset->descriptorType) {
643 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
644 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
645 unsigned idx = writeset->dstArrayElement + j;
646 idx += binding_layout->dynamic_offset_offset;
647 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
648 buffer_list, writeset->pBufferInfo + j);
649 break;
650 }
651 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
652 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
653 write_buffer_descriptor(device, ptr, buffer_list,
654 writeset->pBufferInfo + j);
655 break;
656 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
657 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
658 write_texel_buffer_descriptor(device, ptr, buffer_list,
659 writeset->pTexelBufferView[j]);
660 break;
661 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
662 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
663 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
664 write_image_descriptor(device, ptr, buffer_list,
665 writeset->pImageInfo + j);
666 break;
667 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
668 write_combined_image_sampler_descriptor(device, ptr, buffer_list,
669 writeset->pImageInfo + j,
670 !binding_layout->immutable_samplers);
671 break;
672 case VK_DESCRIPTOR_TYPE_SAMPLER:
673 assert(!binding_layout->immutable_samplers);
674 write_sampler_descriptor(device, ptr,
675 writeset->pImageInfo + j);
676 break;
677 default:
678 unreachable("unimplemented descriptor type");
679 break;
680 }
681 ptr += binding_layout->size / 4;
682 buffer_list += binding_layout->buffer_count;
683 }
684
685 }
686 if (descriptorCopyCount)
687 radv_finishme("copy descriptors");
688 }