radv: Detect if all immutable samplers for a binding are equal.
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33
34 VkResult radv_CreateDescriptorSetLayout(
35 VkDevice _device,
36 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
37 const VkAllocationCallbacks* pAllocator,
38 VkDescriptorSetLayout* pSetLayout)
39 {
40 RADV_FROM_HANDLE(radv_device, device, _device);
41 struct radv_descriptor_set_layout *set_layout;
42
43 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
44
45 uint32_t max_binding = 0;
46 uint32_t immutable_sampler_count = 0;
47 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
48 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
49 if (pCreateInfo->pBindings[j].pImmutableSamplers)
50 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
51 }
52
53 size_t size = sizeof(struct radv_descriptor_set_layout) +
54 (max_binding + 1) * sizeof(set_layout->binding[0]) +
55 immutable_sampler_count * 4 * sizeof(uint32_t);
56
57 set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
58 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
59 if (!set_layout)
60 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
61
62 /* We just allocate all the samplers at the end of the struct */
63 uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
64
65 set_layout->binding_count = max_binding + 1;
66 set_layout->shader_stages = 0;
67 set_layout->size = 0;
68
69 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
70
71 uint32_t buffer_count = 0;
72 uint32_t dynamic_offset_count = 0;
73
74 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
75 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
76 uint32_t b = binding->binding;
77 uint32_t alignment;
78
79 switch (binding->descriptorType) {
80 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
81 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
82 set_layout->binding[b].dynamic_offset_count = 1;
83 set_layout->dynamic_shader_stages |= binding->stageFlags;
84 set_layout->binding[b].size = 0;
85 set_layout->binding[b].buffer_count = 1;
86 alignment = 1;
87 break;
88 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
89 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
90 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
91 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
92 set_layout->binding[b].size = 16;
93 set_layout->binding[b].buffer_count = 1;
94 alignment = 16;
95 break;
96 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
97 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
98 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
99 /* main descriptor + fmask descriptor */
100 set_layout->binding[b].size = 64;
101 set_layout->binding[b].buffer_count = 1;
102 alignment = 32;
103 break;
104 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
105 /* main descriptor + fmask descriptor + sampler */
106 set_layout->binding[b].size = 96;
107 set_layout->binding[b].buffer_count = 1;
108 alignment = 32;
109 break;
110 case VK_DESCRIPTOR_TYPE_SAMPLER:
111 set_layout->binding[b].size = 16;
112 alignment = 16;
113 break;
114 default:
115 unreachable("unknown descriptor type\n");
116 break;
117 }
118
119 set_layout->size = align(set_layout->size, alignment);
120 assert(binding->descriptorCount > 0);
121 set_layout->binding[b].type = binding->descriptorType;
122 set_layout->binding[b].array_size = binding->descriptorCount;
123 set_layout->binding[b].offset = set_layout->size;
124 set_layout->binding[b].buffer_offset = buffer_count;
125 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
126
127 set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
128 buffer_count += binding->descriptorCount * set_layout->binding[b].buffer_count;
129 dynamic_offset_count += binding->descriptorCount *
130 set_layout->binding[b].dynamic_offset_count;
131
132
133 if (binding->pImmutableSamplers) {
134 set_layout->binding[b].immutable_samplers = samplers;
135 set_layout->binding[b].immutable_samplers_equal = true;
136 samplers += 4 * binding->descriptorCount;
137
138 for (uint32_t i = 0; i < binding->descriptorCount; i++)
139 memcpy(set_layout->binding[b].immutable_samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
140 for (uint32_t i = 1; i < binding->descriptorCount; i++)
141 if (memcmp(set_layout->binding[b].immutable_samplers + 4 * i,
142 set_layout->binding[b].immutable_samplers, 16) != 0)
143 set_layout->binding[b].immutable_samplers_equal = false;
144 } else {
145 set_layout->binding[b].immutable_samplers = NULL;
146 }
147
148 set_layout->shader_stages |= binding->stageFlags;
149 }
150
151 set_layout->buffer_count = buffer_count;
152 set_layout->dynamic_offset_count = dynamic_offset_count;
153
154 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
155
156 return VK_SUCCESS;
157 }
158
159 void radv_DestroyDescriptorSetLayout(
160 VkDevice _device,
161 VkDescriptorSetLayout _set_layout,
162 const VkAllocationCallbacks* pAllocator)
163 {
164 RADV_FROM_HANDLE(radv_device, device, _device);
165 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
166
167 if (!set_layout)
168 return;
169
170 vk_free2(&device->alloc, pAllocator, set_layout);
171 }
172
173 /*
174 * Pipeline layouts. These have nothing to do with the pipeline. They are
175 * just muttiple descriptor set layouts pasted together
176 */
177
178 VkResult radv_CreatePipelineLayout(
179 VkDevice _device,
180 const VkPipelineLayoutCreateInfo* pCreateInfo,
181 const VkAllocationCallbacks* pAllocator,
182 VkPipelineLayout* pPipelineLayout)
183 {
184 RADV_FROM_HANDLE(radv_device, device, _device);
185 struct radv_pipeline_layout *layout;
186 struct mesa_sha1 *ctx;
187
188 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
189
190 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
191 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
192 if (layout == NULL)
193 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
194
195 layout->num_sets = pCreateInfo->setLayoutCount;
196
197 unsigned dynamic_offset_count = 0;
198
199
200 ctx = _mesa_sha1_init();
201 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
202 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
203 pCreateInfo->pSetLayouts[set]);
204 layout->set[set].layout = set_layout;
205
206 layout->set[set].dynamic_offset_start = dynamic_offset_count;
207 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
208 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
209 }
210 _mesa_sha1_update(ctx, set_layout->binding,
211 sizeof(set_layout->binding[0]) * set_layout->binding_count);
212 }
213
214 layout->dynamic_offset_count = dynamic_offset_count;
215 layout->push_constant_size = 0;
216 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
217 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
218 layout->push_constant_size = MAX2(layout->push_constant_size,
219 range->offset + range->size);
220 }
221
222 layout->push_constant_size = align(layout->push_constant_size, 16);
223 _mesa_sha1_update(ctx, &layout->push_constant_size,
224 sizeof(layout->push_constant_size));
225 _mesa_sha1_final(ctx, layout->sha1);
226 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
227
228 return VK_SUCCESS;
229 }
230
231 void radv_DestroyPipelineLayout(
232 VkDevice _device,
233 VkPipelineLayout _pipelineLayout,
234 const VkAllocationCallbacks* pAllocator)
235 {
236 RADV_FROM_HANDLE(radv_device, device, _device);
237 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
238
239 if (!pipeline_layout)
240 return;
241 vk_free2(&device->alloc, pAllocator, pipeline_layout);
242 }
243
244 #define EMPTY 1
245
246 static VkResult
247 radv_descriptor_set_create(struct radv_device *device,
248 struct radv_descriptor_pool *pool,
249 struct radv_cmd_buffer *cmd_buffer,
250 const struct radv_descriptor_set_layout *layout,
251 struct radv_descriptor_set **out_set)
252 {
253 struct radv_descriptor_set *set;
254 unsigned mem_size = sizeof(struct radv_descriptor_set) +
255 sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
256 set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
257 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
258
259 if (!set)
260 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
261
262 memset(set, 0, mem_size);
263
264 if (layout->dynamic_offset_count) {
265 unsigned size = sizeof(struct radv_descriptor_range) *
266 layout->dynamic_offset_count;
267 set->dynamic_descriptors = vk_alloc2(&device->alloc, NULL, size, 8,
268 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
269
270 if (!set->dynamic_descriptors) {
271 vk_free2(&device->alloc, NULL, set);
272 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
273 }
274 }
275
276 set->layout = layout;
277 if (layout->size) {
278 uint32_t layout_size = align_u32(layout->size, 32);
279 set->size = layout->size;
280 if (!cmd_buffer) {
281 /* try to allocate linearly first, so that we don't spend
282 * time looking for gaps if the app only allocates &
283 * resets via the pool. */
284 if (pool->current_offset + layout_size <= pool->size) {
285 set->bo = pool->bo;
286 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
287 set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
288 pool->current_offset += layout_size;
289 list_addtail(&set->vram_list, &pool->vram_list);
290 } else {
291 uint64_t offset = 0;
292 struct list_head *prev = &pool->vram_list;
293 struct radv_descriptor_set *cur;
294 LIST_FOR_EACH_ENTRY(cur, &pool->vram_list, vram_list) {
295 uint64_t start = (uint8_t*)cur->mapped_ptr - pool->mapped_ptr;
296 if (start - offset >= layout_size)
297 break;
298
299 offset = start + cur->size;
300 prev = &cur->vram_list;
301 }
302
303 if (pool->size - offset < layout_size) {
304 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
305 vk_free2(&device->alloc, NULL, set);
306 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
307 }
308 set->bo = pool->bo;
309 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
310 set->va = device->ws->buffer_get_va(set->bo) + offset;
311 list_add(&set->vram_list, prev);
312 }
313 } else {
314 unsigned bo_offset;
315 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
316 &bo_offset,
317 (void**)&set->mapped_ptr)) {
318 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
319 vk_free2(&device->alloc, NULL, set);
320 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
321 }
322
323 set->va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
324 set->va += bo_offset;
325 }
326 }
327
328 for (unsigned i = 0; i < layout->binding_count; ++i) {
329 if (!layout->binding[i].immutable_samplers)
330 continue;
331
332 unsigned offset = layout->binding[i].offset / 4;
333 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
334 offset += 16;
335
336 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
337 memcpy(set->mapped_ptr + offset, layout->binding[i].immutable_samplers + 4 * j, 16);
338 offset += layout->binding[i].size / 4;
339 }
340
341 }
342 *out_set = set;
343 return VK_SUCCESS;
344 }
345
346 static void
347 radv_descriptor_set_destroy(struct radv_device *device,
348 struct radv_descriptor_pool *pool,
349 struct radv_descriptor_set *set,
350 bool free_bo)
351 {
352 if (free_bo && set->size)
353 list_del(&set->vram_list);
354 if (set->dynamic_descriptors)
355 vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
356 vk_free2(&device->alloc, NULL, set);
357 }
358
359 VkResult
360 radv_temp_descriptor_set_create(struct radv_device *device,
361 struct radv_cmd_buffer *cmd_buffer,
362 VkDescriptorSetLayout _layout,
363 VkDescriptorSet *_set)
364 {
365 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, _layout);
366 struct radv_descriptor_set *set;
367 VkResult ret;
368
369 ret = radv_descriptor_set_create(device, NULL, cmd_buffer, layout, &set);
370 *_set = radv_descriptor_set_to_handle(set);
371 return ret;
372 }
373
374 void
375 radv_temp_descriptor_set_destroy(struct radv_device *device,
376 VkDescriptorSet _set)
377 {
378 RADV_FROM_HANDLE(radv_descriptor_set, set, _set);
379
380 radv_descriptor_set_destroy(device, NULL, set, false);
381 }
382
383 VkResult radv_CreateDescriptorPool(
384 VkDevice _device,
385 const VkDescriptorPoolCreateInfo* pCreateInfo,
386 const VkAllocationCallbacks* pAllocator,
387 VkDescriptorPool* pDescriptorPool)
388 {
389 RADV_FROM_HANDLE(radv_device, device, _device);
390 struct radv_descriptor_pool *pool;
391 int size = sizeof(struct radv_descriptor_pool);
392 uint64_t bo_size = 0;
393 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
394 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
395 if (!pool)
396 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
397
398 memset(pool, 0, sizeof(*pool));
399
400 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
401 switch(pCreateInfo->pPoolSizes[i].type) {
402 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
403 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
404 break;
405 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
406 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
407 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
408 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
409 case VK_DESCRIPTOR_TYPE_SAMPLER:
410 /* 32 as we may need to align for images */
411 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
412 break;
413 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
414 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
415 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
416 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
417 break;
418 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
419 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
420 break;
421 default:
422 unreachable("unknown descriptor type\n");
423 break;
424 }
425 }
426
427 if (bo_size) {
428 pool->bo = device->ws->buffer_create(device->ws, bo_size,
429 32, RADEON_DOMAIN_VRAM, 0);
430 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
431 }
432 pool->size = bo_size;
433
434 list_inithead(&pool->vram_list);
435 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
436 return VK_SUCCESS;
437 }
438
439 void radv_DestroyDescriptorPool(
440 VkDevice _device,
441 VkDescriptorPool _pool,
442 const VkAllocationCallbacks* pAllocator)
443 {
444 RADV_FROM_HANDLE(radv_device, device, _device);
445 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
446
447 if (!pool)
448 return;
449
450 list_for_each_entry_safe(struct radv_descriptor_set, set,
451 &pool->vram_list, vram_list) {
452 radv_descriptor_set_destroy(device, pool, set, false);
453 }
454
455 if (pool->bo)
456 device->ws->buffer_destroy(pool->bo);
457 vk_free2(&device->alloc, pAllocator, pool);
458 }
459
460 VkResult radv_ResetDescriptorPool(
461 VkDevice _device,
462 VkDescriptorPool descriptorPool,
463 VkDescriptorPoolResetFlags flags)
464 {
465 RADV_FROM_HANDLE(radv_device, device, _device);
466 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
467
468 list_for_each_entry_safe(struct radv_descriptor_set, set,
469 &pool->vram_list, vram_list) {
470 radv_descriptor_set_destroy(device, pool, set, false);
471 }
472
473 list_inithead(&pool->vram_list);
474
475 pool->current_offset = 0;
476
477 return VK_SUCCESS;
478 }
479
480 VkResult radv_AllocateDescriptorSets(
481 VkDevice _device,
482 const VkDescriptorSetAllocateInfo* pAllocateInfo,
483 VkDescriptorSet* pDescriptorSets)
484 {
485 RADV_FROM_HANDLE(radv_device, device, _device);
486 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
487
488 VkResult result = VK_SUCCESS;
489 uint32_t i;
490 struct radv_descriptor_set *set;
491
492 /* allocate a set of buffers for each shader to contain descriptors */
493 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
494 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
495 pAllocateInfo->pSetLayouts[i]);
496
497 result = radv_descriptor_set_create(device, pool, NULL, layout, &set);
498 if (result != VK_SUCCESS)
499 break;
500
501 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
502 }
503
504 if (result != VK_SUCCESS)
505 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
506 i, pDescriptorSets);
507 return result;
508 }
509
510 VkResult radv_FreeDescriptorSets(
511 VkDevice _device,
512 VkDescriptorPool descriptorPool,
513 uint32_t count,
514 const VkDescriptorSet* pDescriptorSets)
515 {
516 RADV_FROM_HANDLE(radv_device, device, _device);
517 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
518
519 for (uint32_t i = 0; i < count; i++) {
520 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
521
522 if (set)
523 radv_descriptor_set_destroy(device, pool, set, true);
524 }
525 return VK_SUCCESS;
526 }
527
528 static void write_texel_buffer_descriptor(struct radv_device *device,
529 unsigned *dst,
530 struct radeon_winsys_bo **buffer_list,
531 const VkBufferView _buffer_view)
532 {
533 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
534
535 memcpy(dst, buffer_view->state, 4 * 4);
536 *buffer_list = buffer_view->bo;
537 }
538
539 static void write_buffer_descriptor(struct radv_device *device,
540 unsigned *dst,
541 struct radeon_winsys_bo **buffer_list,
542 const VkDescriptorBufferInfo *buffer_info)
543 {
544 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
545 uint64_t va = device->ws->buffer_get_va(buffer->bo);
546 uint32_t range = buffer_info->range;
547
548 if (buffer_info->range == VK_WHOLE_SIZE)
549 range = buffer->size - buffer_info->offset;
550
551 va += buffer_info->offset + buffer->offset;
552 dst[0] = va;
553 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
554 dst[2] = range;
555 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
556 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
557 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
558 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
559 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
560 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
561
562 *buffer_list = buffer->bo;
563 }
564
565 static void write_dynamic_buffer_descriptor(struct radv_device *device,
566 struct radv_descriptor_range *range,
567 struct radeon_winsys_bo **buffer_list,
568 const VkDescriptorBufferInfo *buffer_info)
569 {
570 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
571 uint64_t va = device->ws->buffer_get_va(buffer->bo);
572 unsigned size = buffer_info->range;
573
574 if (buffer_info->range == VK_WHOLE_SIZE)
575 size = buffer->size - buffer_info->offset;
576
577 va += buffer_info->offset + buffer->offset;
578 range->va = va;
579 range->size = size;
580
581 *buffer_list = buffer->bo;
582 }
583
584 static void
585 write_image_descriptor(struct radv_device *device,
586 unsigned *dst,
587 struct radeon_winsys_bo **buffer_list,
588 const VkDescriptorImageInfo *image_info)
589 {
590 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
591 memcpy(dst, iview->descriptor, 8 * 4);
592 memcpy(dst + 8, iview->fmask_descriptor, 8 * 4);
593 *buffer_list = iview->bo;
594 }
595
596 static void
597 write_combined_image_sampler_descriptor(struct radv_device *device,
598 unsigned *dst,
599 struct radeon_winsys_bo **buffer_list,
600 const VkDescriptorImageInfo *image_info,
601 bool has_sampler)
602 {
603 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
604
605 write_image_descriptor(device, dst, buffer_list, image_info);
606 /* copy over sampler state */
607 if (has_sampler)
608 memcpy(dst + 16, sampler->state, 16);
609 }
610
611 static void
612 write_sampler_descriptor(struct radv_device *device,
613 unsigned *dst,
614 const VkDescriptorImageInfo *image_info)
615 {
616 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
617
618 memcpy(dst, sampler->state, 16);
619 }
620
621 void radv_UpdateDescriptorSets(
622 VkDevice _device,
623 uint32_t descriptorWriteCount,
624 const VkWriteDescriptorSet* pDescriptorWrites,
625 uint32_t descriptorCopyCount,
626 const VkCopyDescriptorSet* pDescriptorCopies)
627 {
628 RADV_FROM_HANDLE(radv_device, device, _device);
629 uint32_t i, j;
630 for (i = 0; i < descriptorWriteCount; i++) {
631 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
632 RADV_FROM_HANDLE(radv_descriptor_set, set, writeset->dstSet);
633 const struct radv_descriptor_set_binding_layout *binding_layout =
634 set->layout->binding + writeset->dstBinding;
635 uint32_t *ptr = set->mapped_ptr;
636 struct radeon_winsys_bo **buffer_list = set->descriptors;
637
638 ptr += binding_layout->offset / 4;
639 ptr += binding_layout->size * writeset->dstArrayElement / 4;
640 buffer_list += binding_layout->buffer_offset;
641 buffer_list += binding_layout->buffer_count * writeset->dstArrayElement;
642 for (j = 0; j < writeset->descriptorCount; ++j) {
643 switch(writeset->descriptorType) {
644 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
645 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
646 unsigned idx = writeset->dstArrayElement + j;
647 idx += binding_layout->dynamic_offset_offset;
648 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
649 buffer_list, writeset->pBufferInfo + j);
650 break;
651 }
652 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
653 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
654 write_buffer_descriptor(device, ptr, buffer_list,
655 writeset->pBufferInfo + j);
656 break;
657 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
658 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
659 write_texel_buffer_descriptor(device, ptr, buffer_list,
660 writeset->pTexelBufferView[j]);
661 break;
662 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
663 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
664 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
665 write_image_descriptor(device, ptr, buffer_list,
666 writeset->pImageInfo + j);
667 break;
668 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
669 write_combined_image_sampler_descriptor(device, ptr, buffer_list,
670 writeset->pImageInfo + j,
671 !binding_layout->immutable_samplers);
672 break;
673 case VK_DESCRIPTOR_TYPE_SAMPLER:
674 assert(!binding_layout->immutable_samplers);
675 write_sampler_descriptor(device, ptr,
676 writeset->pImageInfo + j);
677 break;
678 default:
679 unreachable("unimplemented descriptor type");
680 break;
681 }
682 ptr += binding_layout->size / 4;
683 buffer_list += binding_layout->buffer_count;
684 }
685
686 }
687 if (descriptorCopyCount)
688 radv_finishme("copy descriptors");
689 }