radv: move to using shared vk_alloc inlines.
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33
34 VkResult radv_CreateDescriptorSetLayout(
35 VkDevice _device,
36 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
37 const VkAllocationCallbacks* pAllocator,
38 VkDescriptorSetLayout* pSetLayout)
39 {
40 RADV_FROM_HANDLE(radv_device, device, _device);
41 struct radv_descriptor_set_layout *set_layout;
42
43 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
44
45 uint32_t max_binding = 0;
46 uint32_t immutable_sampler_count = 0;
47 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
48 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
49 if (pCreateInfo->pBindings[j].pImmutableSamplers)
50 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
51 }
52
53 size_t size = sizeof(struct radv_descriptor_set_layout) +
54 (max_binding + 1) * sizeof(set_layout->binding[0]) +
55 immutable_sampler_count * sizeof(struct radv_sampler *);
56
57 set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
59 if (!set_layout)
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
61
62 /* We just allocate all the samplers at the end of the struct */
63 struct radv_sampler **samplers =
64 (struct radv_sampler **)&set_layout->binding[max_binding + 1];
65
66 set_layout->binding_count = max_binding + 1;
67 set_layout->shader_stages = 0;
68 set_layout->size = 0;
69
70 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
71
72 uint32_t buffer_count = 0;
73 uint32_t dynamic_offset_count = 0;
74
75 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
76 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
77 uint32_t b = binding->binding;
78 uint32_t alignment;
79
80 switch (binding->descriptorType) {
81 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
82 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
83 set_layout->binding[b].dynamic_offset_count = 1;
84 set_layout->dynamic_shader_stages |= binding->stageFlags;
85 set_layout->binding[b].size = 0;
86 set_layout->binding[b].buffer_count = 1;
87 alignment = 1;
88 break;
89 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
90 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
91 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
92 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
93 set_layout->binding[b].size = 16;
94 set_layout->binding[b].buffer_count = 1;
95 alignment = 16;
96 break;
97 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
98 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
99 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
100 /* main descriptor + fmask descriptor */
101 set_layout->binding[b].size = 64;
102 set_layout->binding[b].buffer_count = 1;
103 alignment = 32;
104 break;
105 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
106 /* main descriptor + fmask descriptor + sampler */
107 set_layout->binding[b].size = 96;
108 set_layout->binding[b].buffer_count = 1;
109 alignment = 32;
110 break;
111 case VK_DESCRIPTOR_TYPE_SAMPLER:
112 set_layout->binding[b].size = 16;
113 alignment = 16;
114 break;
115 default:
116 unreachable("unknown descriptor type\n");
117 break;
118 }
119
120 set_layout->size = align(set_layout->size, alignment);
121 assert(binding->descriptorCount > 0);
122 set_layout->binding[b].type = binding->descriptorType;
123 set_layout->binding[b].array_size = binding->descriptorCount;
124 set_layout->binding[b].offset = set_layout->size;
125 set_layout->binding[b].buffer_offset = buffer_count;
126 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
127
128 set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
129 buffer_count += binding->descriptorCount * set_layout->binding[b].buffer_count;
130 dynamic_offset_count += binding->descriptorCount *
131 set_layout->binding[b].dynamic_offset_count;
132
133
134 if (binding->pImmutableSamplers) {
135 set_layout->binding[b].immutable_samplers = samplers;
136 samplers += binding->descriptorCount;
137
138 for (uint32_t i = 0; i < binding->descriptorCount; i++)
139 set_layout->binding[b].immutable_samplers[i] =
140 radv_sampler_from_handle(binding->pImmutableSamplers[i]);
141 } else {
142 set_layout->binding[b].immutable_samplers = NULL;
143 }
144
145 set_layout->shader_stages |= binding->stageFlags;
146 }
147
148 set_layout->buffer_count = buffer_count;
149 set_layout->dynamic_offset_count = dynamic_offset_count;
150
151 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
152
153 return VK_SUCCESS;
154 }
155
156 void radv_DestroyDescriptorSetLayout(
157 VkDevice _device,
158 VkDescriptorSetLayout _set_layout,
159 const VkAllocationCallbacks* pAllocator)
160 {
161 RADV_FROM_HANDLE(radv_device, device, _device);
162 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
163
164 if (!set_layout)
165 return;
166
167 vk_free2(&device->alloc, pAllocator, set_layout);
168 }
169
170 /*
171 * Pipeline layouts. These have nothing to do with the pipeline. They are
172 * just muttiple descriptor set layouts pasted together
173 */
174
175 VkResult radv_CreatePipelineLayout(
176 VkDevice _device,
177 const VkPipelineLayoutCreateInfo* pCreateInfo,
178 const VkAllocationCallbacks* pAllocator,
179 VkPipelineLayout* pPipelineLayout)
180 {
181 RADV_FROM_HANDLE(radv_device, device, _device);
182 struct radv_pipeline_layout *layout;
183 struct mesa_sha1 *ctx;
184
185 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
186
187 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
188 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
189 if (layout == NULL)
190 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
191
192 layout->num_sets = pCreateInfo->setLayoutCount;
193
194 unsigned dynamic_offset_count = 0;
195
196
197 ctx = _mesa_sha1_init();
198 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
199 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
200 pCreateInfo->pSetLayouts[set]);
201 layout->set[set].layout = set_layout;
202
203 layout->set[set].dynamic_offset_start = dynamic_offset_count;
204 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
205 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
206 }
207 _mesa_sha1_update(ctx, set_layout->binding,
208 sizeof(set_layout->binding[0]) * set_layout->binding_count);
209 }
210
211 layout->dynamic_offset_count = dynamic_offset_count;
212 layout->push_constant_size = 0;
213 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
214 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
215 layout->push_constant_size = MAX2(layout->push_constant_size,
216 range->offset + range->size);
217 }
218
219 layout->push_constant_size = align(layout->push_constant_size, 16);
220 _mesa_sha1_update(ctx, &layout->push_constant_size,
221 sizeof(layout->push_constant_size));
222 _mesa_sha1_final(ctx, layout->sha1);
223 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
224
225 return VK_SUCCESS;
226 }
227
228 void radv_DestroyPipelineLayout(
229 VkDevice _device,
230 VkPipelineLayout _pipelineLayout,
231 const VkAllocationCallbacks* pAllocator)
232 {
233 RADV_FROM_HANDLE(radv_device, device, _device);
234 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
235
236 if (!pipeline_layout)
237 return;
238 vk_free2(&device->alloc, pAllocator, pipeline_layout);
239 }
240
241 #define EMPTY 1
242
243 static VkResult
244 radv_descriptor_set_create(struct radv_device *device,
245 struct radv_descriptor_pool *pool,
246 struct radv_cmd_buffer *cmd_buffer,
247 const struct radv_descriptor_set_layout *layout,
248 struct radv_descriptor_set **out_set)
249 {
250 struct radv_descriptor_set *set;
251 unsigned mem_size = sizeof(struct radv_descriptor_set) +
252 sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
253 set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
254 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
255
256 if (!set)
257 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
258
259 memset(set, 0, mem_size);
260
261 if (layout->dynamic_offset_count) {
262 unsigned size = sizeof(struct radv_descriptor_range) *
263 layout->dynamic_offset_count;
264 set->dynamic_descriptors = vk_alloc2(&device->alloc, NULL, size, 8,
265 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
266
267 if (!set->dynamic_descriptors) {
268 vk_free2(&device->alloc, NULL, set);
269 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
270 }
271 }
272
273 set->layout = layout;
274 if (layout->size) {
275 uint32_t layout_size = align_u32(layout->size, 32);
276 set->size = layout->size;
277 if (!cmd_buffer) {
278 if (pool->current_offset + layout_size <= pool->size) {
279 set->bo = pool->bo;
280 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
281 set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
282 pool->current_offset += layout_size;
283
284 } else {
285 int entry = pool->free_list, prev_entry = -1;
286 uint32_t offset;
287 while (entry >= 0) {
288 if (pool->free_nodes[entry].size >= layout_size) {
289 if (prev_entry >= 0)
290 pool->free_nodes[prev_entry].next = pool->free_nodes[entry].next;
291 else
292 pool->free_list = pool->free_nodes[entry].next;
293 break;
294 }
295 prev_entry = entry;
296 entry = pool->free_nodes[entry].next;
297 }
298
299 if (entry < 0) {
300 vk_free2(&device->alloc, NULL, set);
301 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
302 }
303 offset = pool->free_nodes[entry].offset;
304 pool->free_nodes[entry].next = pool->full_list;
305 pool->full_list = entry;
306
307 set->bo = pool->bo;
308 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
309 set->va = device->ws->buffer_get_va(set->bo) + offset;
310 }
311 } else {
312 unsigned bo_offset;
313 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
314 &bo_offset,
315 (void**)&set->mapped_ptr)) {
316 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
317 vk_free2(&device->alloc, NULL, set);
318 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
319 }
320
321 set->va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
322 set->va += bo_offset;
323 }
324 }
325
326 if (pool)
327 list_add(&set->descriptor_pool, &pool->descriptor_sets);
328 else
329 list_inithead(&set->descriptor_pool);
330
331 for (unsigned i = 0; i < layout->binding_count; ++i) {
332 if (!layout->binding[i].immutable_samplers)
333 continue;
334
335 unsigned offset = layout->binding[i].offset / 4;
336 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
337 offset += 16;
338
339 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
340 struct radv_sampler* sampler = layout->binding[i].immutable_samplers[j];
341
342 memcpy(set->mapped_ptr + offset, &sampler->state, 16);
343 offset += layout->binding[i].size / 4;
344 }
345
346 }
347 *out_set = set;
348 return VK_SUCCESS;
349 }
350
351 static void
352 radv_descriptor_set_destroy(struct radv_device *device,
353 struct radv_descriptor_pool *pool,
354 struct radv_descriptor_set *set,
355 bool free_bo)
356 {
357 if (free_bo && set->size) {
358 assert(pool->full_list >= 0);
359 int next = pool->free_nodes[pool->full_list].next;
360 pool->free_nodes[pool->full_list].next = pool->free_list;
361 pool->free_nodes[pool->full_list].offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
362 pool->free_nodes[pool->full_list].size = align_u32(set->size, 32);
363 pool->free_list = pool->full_list;
364 pool->full_list = next;
365 }
366 if (set->dynamic_descriptors)
367 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
368 if (!list_empty(&set->descriptor_pool))
369 list_del(&set->descriptor_pool);
370 vk_free2(&device->alloc, NULL, set);
371 }
372
373 VkResult
374 radv_temp_descriptor_set_create(struct radv_device *device,
375 struct radv_cmd_buffer *cmd_buffer,
376 VkDescriptorSetLayout _layout,
377 VkDescriptorSet *_set)
378 {
379 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, _layout);
380 struct radv_descriptor_set *set;
381 VkResult ret;
382
383 ret = radv_descriptor_set_create(device, NULL, cmd_buffer, layout, &set);
384 *_set = radv_descriptor_set_to_handle(set);
385 return ret;
386 }
387
388 void
389 radv_temp_descriptor_set_destroy(struct radv_device *device,
390 VkDescriptorSet _set)
391 {
392 RADV_FROM_HANDLE(radv_descriptor_set, set, _set);
393
394 radv_descriptor_set_destroy(device, NULL, set, false);
395 }
396
397 VkResult radv_CreateDescriptorPool(
398 VkDevice _device,
399 const VkDescriptorPoolCreateInfo* pCreateInfo,
400 const VkAllocationCallbacks* pAllocator,
401 VkDescriptorPool* pDescriptorPool)
402 {
403 RADV_FROM_HANDLE(radv_device, device, _device);
404 struct radv_descriptor_pool *pool;
405 unsigned max_sets = pCreateInfo->maxSets * 2;
406 int size = sizeof(struct radv_descriptor_pool) +
407 max_sets * sizeof(struct radv_descriptor_pool_free_node);
408 uint64_t bo_size = 0;
409 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
410 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
411 if (!pool)
412 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
413
414 memset(pool, 0, sizeof(*pool));
415
416 pool->free_list = -1;
417 pool->full_list = 0;
418 pool->free_nodes[max_sets - 1].next = -1;
419 pool->max_sets = max_sets;
420
421 for (int i = 0; i + 1 < max_sets; ++i)
422 pool->free_nodes[i].next = i + 1;
423
424 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
425 switch(pCreateInfo->pPoolSizes[i].type) {
426 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
427 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
428 break;
429 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
430 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
431 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
432 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
433 case VK_DESCRIPTOR_TYPE_SAMPLER:
434 /* 32 as we may need to align for images */
435 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
436 break;
437 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
438 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
439 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
440 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
441 break;
442 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
443 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
444 break;
445 default:
446 unreachable("unknown descriptor type\n");
447 break;
448 }
449 }
450
451 if (bo_size) {
452 pool->bo = device->ws->buffer_create(device->ws, bo_size,
453 32, RADEON_DOMAIN_VRAM, 0);
454 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
455 }
456 pool->size = bo_size;
457
458 list_inithead(&pool->descriptor_sets);
459 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
460 return VK_SUCCESS;
461 }
462
463 void radv_DestroyDescriptorPool(
464 VkDevice _device,
465 VkDescriptorPool _pool,
466 const VkAllocationCallbacks* pAllocator)
467 {
468 RADV_FROM_HANDLE(radv_device, device, _device);
469 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
470
471 if (!pool)
472 return;
473
474 list_for_each_entry_safe(struct radv_descriptor_set, set,
475 &pool->descriptor_sets, descriptor_pool) {
476 radv_descriptor_set_destroy(device, pool, set, false);
477 }
478
479 if (pool->bo)
480 device->ws->buffer_destroy(pool->bo);
481 vk_free2(&device->alloc, pAllocator, pool);
482 }
483
484 VkResult radv_ResetDescriptorPool(
485 VkDevice _device,
486 VkDescriptorPool descriptorPool,
487 VkDescriptorPoolResetFlags flags)
488 {
489 RADV_FROM_HANDLE(radv_device, device, _device);
490 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
491
492 list_for_each_entry_safe(struct radv_descriptor_set, set,
493 &pool->descriptor_sets, descriptor_pool) {
494 radv_descriptor_set_destroy(device, pool, set, false);
495 }
496
497 pool->current_offset = 0;
498 pool->free_list = -1;
499 pool->full_list = 0;
500 pool->free_nodes[pool->max_sets - 1].next = -1;
501
502 for (int i = 0; i + 1 < pool->max_sets; ++i)
503 pool->free_nodes[i].next = i + 1;
504
505 return VK_SUCCESS;
506 }
507
508 VkResult radv_AllocateDescriptorSets(
509 VkDevice _device,
510 const VkDescriptorSetAllocateInfo* pAllocateInfo,
511 VkDescriptorSet* pDescriptorSets)
512 {
513 RADV_FROM_HANDLE(radv_device, device, _device);
514 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
515
516 VkResult result = VK_SUCCESS;
517 uint32_t i;
518 struct radv_descriptor_set *set;
519
520 /* allocate a set of buffers for each shader to contain descriptors */
521 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
522 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
523 pAllocateInfo->pSetLayouts[i]);
524
525 result = radv_descriptor_set_create(device, pool, NULL, layout, &set);
526 if (result != VK_SUCCESS)
527 break;
528
529 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
530 }
531
532 if (result != VK_SUCCESS)
533 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
534 i, pDescriptorSets);
535 return result;
536 }
537
538 VkResult radv_FreeDescriptorSets(
539 VkDevice _device,
540 VkDescriptorPool descriptorPool,
541 uint32_t count,
542 const VkDescriptorSet* pDescriptorSets)
543 {
544 RADV_FROM_HANDLE(radv_device, device, _device);
545 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
546
547 for (uint32_t i = 0; i < count; i++) {
548 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
549
550 if (set)
551 radv_descriptor_set_destroy(device, pool, set, true);
552 }
553 return VK_SUCCESS;
554 }
555
556 static void write_texel_buffer_descriptor(struct radv_device *device,
557 unsigned *dst,
558 struct radeon_winsys_bo **buffer_list,
559 const VkBufferView _buffer_view)
560 {
561 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
562
563 memcpy(dst, buffer_view->state, 4 * 4);
564 *buffer_list = buffer_view->bo;
565 }
566
567 static void write_buffer_descriptor(struct radv_device *device,
568 unsigned *dst,
569 struct radeon_winsys_bo **buffer_list,
570 const VkDescriptorBufferInfo *buffer_info)
571 {
572 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
573 uint64_t va = device->ws->buffer_get_va(buffer->bo);
574 uint32_t range = buffer_info->range;
575
576 if (buffer_info->range == VK_WHOLE_SIZE)
577 range = buffer->size - buffer_info->offset;
578
579 va += buffer_info->offset + buffer->offset;
580 dst[0] = va;
581 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
582 dst[2] = range;
583 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
584 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
585 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
586 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
587 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
588 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
589
590 *buffer_list = buffer->bo;
591 }
592
593 static void write_dynamic_buffer_descriptor(struct radv_device *device,
594 struct radv_descriptor_range *range,
595 struct radeon_winsys_bo **buffer_list,
596 const VkDescriptorBufferInfo *buffer_info)
597 {
598 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
599 uint64_t va = device->ws->buffer_get_va(buffer->bo);
600 unsigned size = buffer_info->range;
601
602 if (buffer_info->range == VK_WHOLE_SIZE)
603 size = buffer->size - buffer_info->offset;
604
605 va += buffer_info->offset + buffer->offset;
606 range->va = va;
607 range->size = size;
608
609 *buffer_list = buffer->bo;
610 }
611
612 static void
613 write_image_descriptor(struct radv_device *device,
614 unsigned *dst,
615 struct radeon_winsys_bo **buffer_list,
616 const VkDescriptorImageInfo *image_info)
617 {
618 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
619 memcpy(dst, iview->descriptor, 8 * 4);
620 memcpy(dst + 8, iview->fmask_descriptor, 8 * 4);
621 *buffer_list = iview->bo;
622 }
623
624 static void
625 write_combined_image_sampler_descriptor(struct radv_device *device,
626 unsigned *dst,
627 struct radeon_winsys_bo **buffer_list,
628 const VkDescriptorImageInfo *image_info,
629 bool has_sampler)
630 {
631 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
632
633 write_image_descriptor(device, dst, buffer_list, image_info);
634 /* copy over sampler state */
635 if (has_sampler)
636 memcpy(dst + 16, sampler->state, 16);
637 }
638
639 static void
640 write_sampler_descriptor(struct radv_device *device,
641 unsigned *dst,
642 const VkDescriptorImageInfo *image_info)
643 {
644 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
645
646 memcpy(dst, sampler->state, 16);
647 }
648
649 void radv_UpdateDescriptorSets(
650 VkDevice _device,
651 uint32_t descriptorWriteCount,
652 const VkWriteDescriptorSet* pDescriptorWrites,
653 uint32_t descriptorCopyCount,
654 const VkCopyDescriptorSet* pDescriptorCopies)
655 {
656 RADV_FROM_HANDLE(radv_device, device, _device);
657 uint32_t i, j;
658 for (i = 0; i < descriptorWriteCount; i++) {
659 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
660 RADV_FROM_HANDLE(radv_descriptor_set, set, writeset->dstSet);
661 const struct radv_descriptor_set_binding_layout *binding_layout =
662 set->layout->binding + writeset->dstBinding;
663 uint32_t *ptr = set->mapped_ptr;
664 struct radeon_winsys_bo **buffer_list = set->descriptors;
665
666 ptr += binding_layout->offset / 4;
667 ptr += binding_layout->size * writeset->dstArrayElement / 4;
668 buffer_list += binding_layout->buffer_offset;
669 buffer_list += binding_layout->buffer_count * writeset->dstArrayElement;
670 for (j = 0; j < writeset->descriptorCount; ++j) {
671 switch(writeset->descriptorType) {
672 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
673 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
674 unsigned idx = writeset->dstArrayElement + j;
675 idx += binding_layout->dynamic_offset_offset;
676 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
677 buffer_list, writeset->pBufferInfo + j);
678 break;
679 }
680 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
681 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
682 write_buffer_descriptor(device, ptr, buffer_list,
683 writeset->pBufferInfo + j);
684 break;
685 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
686 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
687 write_texel_buffer_descriptor(device, ptr, buffer_list,
688 writeset->pTexelBufferView[j]);
689 break;
690 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
691 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
692 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
693 write_image_descriptor(device, ptr, buffer_list,
694 writeset->pImageInfo + j);
695 break;
696 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
697 write_combined_image_sampler_descriptor(device, ptr, buffer_list,
698 writeset->pImageInfo + j,
699 !binding_layout->immutable_samplers);
700 break;
701 case VK_DESCRIPTOR_TYPE_SAMPLER:
702 assert(!binding_layout->immutable_samplers);
703 write_sampler_descriptor(device, ptr,
704 writeset->pImageInfo + j);
705 break;
706 default:
707 unreachable("unimplemented descriptor type");
708 break;
709 }
710 ptr += binding_layout->size / 4;
711 buffer_list += binding_layout->buffer_count;
712 }
713
714 }
715 if (descriptorCopyCount)
716 radv_finishme("copy descriptors");
717 }