radv: Fix descriptor set allocation failure.
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33 #include "vk_format.h"
34 #include "vk_util.h"
35
36
37 static bool has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
38 {
39 if (!samplers)
40 return false;
41 for(uint32_t i = 1; i < count; ++i) {
42 if (memcmp(radv_sampler_from_handle(samplers[0])->state,
43 radv_sampler_from_handle(samplers[i])->state, 16)) {
44 return false;
45 }
46 }
47 return true;
48 }
49
50 static int binding_compare(const void* av, const void *bv)
51 {
52 const VkDescriptorSetLayoutBinding *a = (const VkDescriptorSetLayoutBinding*)av;
53 const VkDescriptorSetLayoutBinding *b = (const VkDescriptorSetLayoutBinding*)bv;
54
55 return (a->binding < b->binding) ? -1 : (a->binding > b->binding) ? 1 : 0;
56 }
57
58 static VkDescriptorSetLayoutBinding *
59 create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings, unsigned count) {
60 VkDescriptorSetLayoutBinding *sorted_bindings = malloc(count * sizeof(VkDescriptorSetLayoutBinding));
61 if (!sorted_bindings)
62 return NULL;
63
64 memcpy(sorted_bindings, bindings, count * sizeof(VkDescriptorSetLayoutBinding));
65
66 qsort(sorted_bindings, count, sizeof(VkDescriptorSetLayoutBinding), binding_compare);
67
68 return sorted_bindings;
69 }
70
71 VkResult radv_CreateDescriptorSetLayout(
72 VkDevice _device,
73 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
74 const VkAllocationCallbacks* pAllocator,
75 VkDescriptorSetLayout* pSetLayout)
76 {
77 RADV_FROM_HANDLE(radv_device, device, _device);
78 struct radv_descriptor_set_layout *set_layout;
79
80 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
81 const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
82 vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
83
84 uint32_t max_binding = 0;
85 uint32_t immutable_sampler_count = 0;
86 uint32_t ycbcr_sampler_count = 0;
87 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
88 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
89 if ((pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
90 pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
91 pCreateInfo->pBindings[j].pImmutableSamplers) {
92 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
93
94 bool has_ycbcr_sampler = false;
95 for (unsigned i = 0; i < pCreateInfo->pBindings[j].descriptorCount; ++i) {
96 if (radv_sampler_from_handle(pCreateInfo->pBindings[j].pImmutableSamplers[i])->ycbcr_sampler)
97 has_ycbcr_sampler = true;
98 }
99
100 if (has_ycbcr_sampler)
101 ycbcr_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
102 }
103 }
104
105 uint32_t samplers_offset = sizeof(struct radv_descriptor_set_layout) +
106 (max_binding + 1) * sizeof(set_layout->binding[0]);
107 size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
108 if (ycbcr_sampler_count > 0) {
109 size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion) + (max_binding + 1) * sizeof(uint32_t);
110 }
111
112 set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
113 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
114 if (!set_layout)
115 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
116
117 set_layout->flags = pCreateInfo->flags;
118 set_layout->layout_size = size;
119
120 /* We just allocate all the samplers at the end of the struct */
121 uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
122 struct radv_sampler_ycbcr_conversion *ycbcr_samplers = NULL;
123 uint32_t *ycbcr_sampler_offsets = NULL;
124
125 if (ycbcr_sampler_count > 0) {
126 ycbcr_sampler_offsets = samplers + 4 * immutable_sampler_count;
127 set_layout->ycbcr_sampler_offsets_offset = (char*)ycbcr_sampler_offsets - (char*)set_layout;
128 ycbcr_samplers = (struct radv_sampler_ycbcr_conversion *)(ycbcr_sampler_offsets + max_binding + 1);
129 } else
130 set_layout->ycbcr_sampler_offsets_offset = 0;
131
132 VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
133 pCreateInfo->bindingCount);
134 if (!bindings) {
135 vk_free2(&device->alloc, pAllocator, set_layout);
136 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
137 }
138
139 set_layout->binding_count = max_binding + 1;
140 set_layout->shader_stages = 0;
141 set_layout->dynamic_shader_stages = 0;
142 set_layout->has_immutable_samplers = false;
143 set_layout->size = 0;
144
145 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
146
147 uint32_t buffer_count = 0;
148 uint32_t dynamic_offset_count = 0;
149
150 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
151 const VkDescriptorSetLayoutBinding *binding = bindings + j;
152 uint32_t b = binding->binding;
153 uint32_t alignment;
154 unsigned binding_buffer_count = 0;
155 uint32_t descriptor_count = binding->descriptorCount;
156 bool has_ycbcr_sampler = false;
157
158 /* main image + fmask */
159 uint32_t max_sampled_image_descriptors = 2;
160
161 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
162 binding->pImmutableSamplers) {
163 for (unsigned i = 0; i < binding->descriptorCount; ++i) {
164 struct radv_sampler_ycbcr_conversion *conversion =
165 radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
166
167 if (conversion) {
168 has_ycbcr_sampler = true;
169 max_sampled_image_descriptors = MAX2(max_sampled_image_descriptors,
170 vk_format_get_plane_count(conversion->format));
171 }
172 }
173 }
174
175 switch (binding->descriptorType) {
176 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
177 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
178 assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
179 set_layout->binding[b].dynamic_offset_count = 1;
180 set_layout->dynamic_shader_stages |= binding->stageFlags;
181 set_layout->binding[b].size = 0;
182 binding_buffer_count = 1;
183 alignment = 1;
184 break;
185 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
186 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
187 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
188 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
189 set_layout->binding[b].size = 16;
190 binding_buffer_count = 1;
191 alignment = 16;
192 break;
193 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
194 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
195 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
196 /* main descriptor + fmask descriptor */
197 set_layout->binding[b].size = 64;
198 binding_buffer_count = 1;
199 alignment = 32;
200 break;
201 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
202 /* main descriptor + fmask descriptor + sampler */
203 set_layout->binding[b].size = 96;
204 binding_buffer_count = 1;
205 alignment = 32;
206 break;
207 case VK_DESCRIPTOR_TYPE_SAMPLER:
208 set_layout->binding[b].size = 16;
209 alignment = 16;
210 break;
211 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
212 alignment = 16;
213 set_layout->binding[b].size = descriptor_count;
214 descriptor_count = 1;
215 break;
216 default:
217 unreachable("unknown descriptor type\n");
218 break;
219 }
220
221 set_layout->size = align(set_layout->size, alignment);
222 set_layout->binding[b].type = binding->descriptorType;
223 set_layout->binding[b].array_size = descriptor_count;
224 set_layout->binding[b].offset = set_layout->size;
225 set_layout->binding[b].buffer_offset = buffer_count;
226 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
227
228 if (variable_flags && binding->binding < variable_flags->bindingCount &&
229 (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
230 assert(!binding->pImmutableSamplers); /* Terribly ill defined how many samplers are valid */
231 assert(binding->binding == max_binding);
232
233 set_layout->has_variable_descriptors = true;
234 }
235
236 if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
237 binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
238 binding->pImmutableSamplers) {
239 set_layout->binding[b].immutable_samplers_offset = samplers_offset;
240 set_layout->binding[b].immutable_samplers_equal =
241 has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
242 set_layout->has_immutable_samplers = true;
243
244
245 for (uint32_t i = 0; i < binding->descriptorCount; i++)
246 memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
247
248 /* Don't reserve space for the samplers if they're not accessed. */
249 if (set_layout->binding[b].immutable_samplers_equal) {
250 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
251 max_sampled_image_descriptors <= 2)
252 set_layout->binding[b].size -= 32;
253 else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
254 set_layout->binding[b].size -= 16;
255 }
256 samplers += 4 * binding->descriptorCount;
257 samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
258
259 if (has_ycbcr_sampler) {
260 ycbcr_sampler_offsets[b] = (const char*)ycbcr_samplers - (const char*)set_layout;
261 for (uint32_t i = 0; i < binding->descriptorCount; i++) {
262 if (radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler)
263 ycbcr_samplers[i] = *radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
264 else
265 ycbcr_samplers[i].format = VK_FORMAT_UNDEFINED;
266 }
267 ycbcr_samplers += binding->descriptorCount;
268 }
269 }
270
271 set_layout->size += descriptor_count * set_layout->binding[b].size;
272 buffer_count += descriptor_count * binding_buffer_count;
273 dynamic_offset_count += descriptor_count *
274 set_layout->binding[b].dynamic_offset_count;
275 set_layout->shader_stages |= binding->stageFlags;
276 }
277
278 free(bindings);
279
280 set_layout->buffer_count = buffer_count;
281 set_layout->dynamic_offset_count = dynamic_offset_count;
282
283 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
284
285 return VK_SUCCESS;
286 }
287
288 void radv_DestroyDescriptorSetLayout(
289 VkDevice _device,
290 VkDescriptorSetLayout _set_layout,
291 const VkAllocationCallbacks* pAllocator)
292 {
293 RADV_FROM_HANDLE(radv_device, device, _device);
294 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
295
296 if (!set_layout)
297 return;
298
299 vk_free2(&device->alloc, pAllocator, set_layout);
300 }
301
302 void radv_GetDescriptorSetLayoutSupport(VkDevice device,
303 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
304 VkDescriptorSetLayoutSupport* pSupport)
305 {
306 VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
307 pCreateInfo->bindingCount);
308 if (!bindings) {
309 pSupport->supported = false;
310 return;
311 }
312
313 const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
314 vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
315 VkDescriptorSetVariableDescriptorCountLayoutSupportEXT *variable_count =
316 vk_find_struct((void*)pCreateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT);
317 if (variable_count) {
318 variable_count->maxVariableDescriptorCount = 0;
319 }
320
321 bool supported = true;
322 uint64_t size = 0;
323 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
324 const VkDescriptorSetLayoutBinding *binding = bindings + i;
325
326 uint64_t descriptor_size = 0;
327 uint64_t descriptor_alignment = 1;
328 uint32_t descriptor_count = binding->descriptorCount;
329 switch (binding->descriptorType) {
330 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
331 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
332 break;
333 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
334 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
335 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
336 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
337 descriptor_size = 16;
338 descriptor_alignment = 16;
339 break;
340 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
341 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
342 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
343 descriptor_size = 64;
344 descriptor_alignment = 32;
345 break;
346 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
347 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
348 descriptor_size = 64;
349 } else {
350 descriptor_size = 96;
351 }
352 descriptor_alignment = 32;
353 break;
354 case VK_DESCRIPTOR_TYPE_SAMPLER:
355 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
356 descriptor_size = 16;
357 descriptor_alignment = 16;
358 }
359 break;
360 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
361 descriptor_alignment = 16;
362 descriptor_size = descriptor_count;
363 descriptor_count = 1;
364 break;
365 default:
366 unreachable("unknown descriptor type\n");
367 break;
368 }
369
370 if (size && !align_u64(size, descriptor_alignment)) {
371 supported = false;
372 }
373 size = align_u64(size, descriptor_alignment);
374
375 uint64_t max_count = INT32_MAX;
376 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
377 max_count = INT32_MAX - size;
378 else if (descriptor_size)
379 max_count = (INT32_MAX - size) / descriptor_size;
380
381 if (max_count < descriptor_count) {
382 supported = false;
383 }
384 if (variable_flags && binding->binding <variable_flags->bindingCount && variable_count &&
385 (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
386 variable_count->maxVariableDescriptorCount = MIN2(UINT32_MAX, max_count);
387 }
388 size += descriptor_count * descriptor_size;
389 }
390
391 free(bindings);
392
393 pSupport->supported = supported;
394 }
395
396 /*
397 * Pipeline layouts. These have nothing to do with the pipeline. They are
398 * just multiple descriptor set layouts pasted together.
399 */
400
401 VkResult radv_CreatePipelineLayout(
402 VkDevice _device,
403 const VkPipelineLayoutCreateInfo* pCreateInfo,
404 const VkAllocationCallbacks* pAllocator,
405 VkPipelineLayout* pPipelineLayout)
406 {
407 RADV_FROM_HANDLE(radv_device, device, _device);
408 struct radv_pipeline_layout *layout;
409 struct mesa_sha1 ctx;
410
411 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
412
413 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
414 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
415 if (layout == NULL)
416 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
417
418 layout->num_sets = pCreateInfo->setLayoutCount;
419
420 unsigned dynamic_offset_count = 0;
421 uint16_t dynamic_shader_stages = 0;
422
423
424 _mesa_sha1_init(&ctx);
425 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
426 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
427 pCreateInfo->pSetLayouts[set]);
428 layout->set[set].layout = set_layout;
429
430 layout->set[set].dynamic_offset_start = dynamic_offset_count;
431 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
432 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
433 dynamic_shader_stages |= set_layout->dynamic_shader_stages;
434 }
435 _mesa_sha1_update(&ctx, set_layout, set_layout->layout_size);
436 }
437
438 layout->dynamic_offset_count = dynamic_offset_count;
439 layout->dynamic_shader_stages = dynamic_shader_stages;
440 layout->push_constant_size = 0;
441
442 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
443 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
444 layout->push_constant_size = MAX2(layout->push_constant_size,
445 range->offset + range->size);
446 }
447
448 layout->push_constant_size = align(layout->push_constant_size, 16);
449 _mesa_sha1_update(&ctx, &layout->push_constant_size,
450 sizeof(layout->push_constant_size));
451 _mesa_sha1_final(&ctx, layout->sha1);
452 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
453
454 return VK_SUCCESS;
455 }
456
457 void radv_DestroyPipelineLayout(
458 VkDevice _device,
459 VkPipelineLayout _pipelineLayout,
460 const VkAllocationCallbacks* pAllocator)
461 {
462 RADV_FROM_HANDLE(radv_device, device, _device);
463 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
464
465 if (!pipeline_layout)
466 return;
467 vk_free2(&device->alloc, pAllocator, pipeline_layout);
468 }
469
470 #define EMPTY 1
471
472 static VkResult
473 radv_descriptor_set_create(struct radv_device *device,
474 struct radv_descriptor_pool *pool,
475 const struct radv_descriptor_set_layout *layout,
476 const uint32_t *variable_count,
477 struct radv_descriptor_set **out_set)
478 {
479 struct radv_descriptor_set *set;
480 uint32_t buffer_count = layout->buffer_count;
481 if (variable_count) {
482 unsigned stride = 1;
483 if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_SAMPLER ||
484 layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
485 stride = 0;
486 buffer_count = layout->binding[layout->binding_count - 1].buffer_offset +
487 *variable_count * stride;
488 }
489 unsigned range_offset = sizeof(struct radv_descriptor_set) +
490 sizeof(struct radeon_winsys_bo *) * buffer_count;
491 unsigned mem_size = range_offset +
492 sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
493
494 if (pool->host_memory_base) {
495 if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
496 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
497
498 set = (struct radv_descriptor_set*)pool->host_memory_ptr;
499 pool->host_memory_ptr += mem_size;
500 } else {
501 set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
502 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
503
504 if (!set)
505 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
506 }
507
508 memset(set, 0, mem_size);
509
510 if (layout->dynamic_offset_count) {
511 set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
512 }
513
514 set->layout = layout;
515 uint32_t layout_size = layout->size;
516 if (variable_count) {
517 assert(layout->has_variable_descriptors);
518 uint32_t stride = layout->binding[layout->binding_count - 1].size;
519 if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
520 stride = 1;
521
522 layout_size = layout->binding[layout->binding_count - 1].offset +
523 *variable_count * stride;
524 }
525 layout_size = align_u32(layout_size, 32);
526 if (layout_size) {
527 set->size = layout_size;
528
529 if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
530 vk_free2(&device->alloc, NULL, set);
531 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
532 }
533
534 /* try to allocate linearly first, so that we don't spend
535 * time looking for gaps if the app only allocates &
536 * resets via the pool. */
537 if (pool->current_offset + layout_size <= pool->size) {
538 set->bo = pool->bo;
539 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
540 set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
541 if (!pool->host_memory_base) {
542 pool->entries[pool->entry_count].offset = pool->current_offset;
543 pool->entries[pool->entry_count].size = layout_size;
544 pool->entries[pool->entry_count].set = set;
545 pool->entry_count++;
546 }
547 pool->current_offset += layout_size;
548 } else if (!pool->host_memory_base) {
549 uint64_t offset = 0;
550 int index;
551
552 for (index = 0; index < pool->entry_count; ++index) {
553 if (pool->entries[index].offset - offset >= layout_size)
554 break;
555 offset = pool->entries[index].offset + pool->entries[index].size;
556 }
557
558 if (pool->size - offset < layout_size) {
559 vk_free2(&device->alloc, NULL, set);
560 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
561 }
562 set->bo = pool->bo;
563 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
564 set->va = radv_buffer_get_va(set->bo) + offset;
565 memmove(&pool->entries[index + 1], &pool->entries[index],
566 sizeof(pool->entries[0]) * (pool->entry_count - index));
567 pool->entries[index].offset = offset;
568 pool->entries[index].size = layout_size;
569 pool->entries[index].set = set;
570 pool->entry_count++;
571 } else
572 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
573 }
574
575 if (layout->has_immutable_samplers) {
576 for (unsigned i = 0; i < layout->binding_count; ++i) {
577 if (!layout->binding[i].immutable_samplers_offset ||
578 layout->binding[i].immutable_samplers_equal)
579 continue;
580
581 unsigned offset = layout->binding[i].offset / 4;
582 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
583 offset += radv_combined_image_descriptor_sampler_offset(layout->binding + i) / 4;
584
585 const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
586 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
587 memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
588 offset += layout->binding[i].size / 4;
589 }
590
591 }
592 }
593 *out_set = set;
594 return VK_SUCCESS;
595 }
596
597 static void
598 radv_descriptor_set_destroy(struct radv_device *device,
599 struct radv_descriptor_pool *pool,
600 struct radv_descriptor_set *set,
601 bool free_bo)
602 {
603 assert(!pool->host_memory_base);
604
605 if (free_bo && set->size && !pool->host_memory_base) {
606 uint32_t offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
607 for (int i = 0; i < pool->entry_count; ++i) {
608 if (pool->entries[i].offset == offset) {
609 memmove(&pool->entries[i], &pool->entries[i+1],
610 sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
611 --pool->entry_count;
612 break;
613 }
614 }
615 }
616 vk_free2(&device->alloc, NULL, set);
617 }
618
619 VkResult radv_CreateDescriptorPool(
620 VkDevice _device,
621 const VkDescriptorPoolCreateInfo* pCreateInfo,
622 const VkAllocationCallbacks* pAllocator,
623 VkDescriptorPool* pDescriptorPool)
624 {
625 RADV_FROM_HANDLE(radv_device, device, _device);
626 struct radv_descriptor_pool *pool;
627 uint64_t size = sizeof(struct radv_descriptor_pool);
628 uint64_t bo_size = 0, bo_count = 0, range_count = 0;
629
630 vk_foreach_struct(ext, pCreateInfo->pNext) {
631 switch (ext->sType) {
632 case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: {
633 const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT *info =
634 (const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT*)ext;
635 /* the sizes are 4 aligned, and we need to align to at
636 * most 32, which needs at most 28 bytes extra per
637 * binding. */
638 bo_size += 28llu * info->maxInlineUniformBlockBindings;
639 break;
640 }
641 default:
642 break;
643 }
644 }
645
646 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
647 if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
648 bo_count += pCreateInfo->pPoolSizes[i].descriptorCount;
649
650 switch(pCreateInfo->pPoolSizes[i].type) {
651 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
652 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
653 range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
654 break;
655 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
656 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
657 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
658 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
659 case VK_DESCRIPTOR_TYPE_SAMPLER:
660 /* 32 as we may need to align for images */
661 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
662 break;
663 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
664 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
665 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
666 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
667 break;
668 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
669 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
670 break;
671 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
672 bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
673 break;
674 default:
675 unreachable("unknown descriptor type\n");
676 break;
677 }
678 }
679
680 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
681 uint64_t host_size = pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
682 host_size += sizeof(struct radeon_winsys_bo*) * bo_count;
683 host_size += sizeof(struct radv_descriptor_range) * range_count;
684 size += host_size;
685 } else {
686 size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
687 }
688
689 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
690 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
691 if (!pool)
692 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
693
694 memset(pool, 0, sizeof(*pool));
695
696 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
697 pool->host_memory_base = (uint8_t*)pool + sizeof(struct radv_descriptor_pool);
698 pool->host_memory_ptr = pool->host_memory_base;
699 pool->host_memory_end = (uint8_t*)pool + size;
700 }
701
702 if (bo_size) {
703 pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
704 RADEON_DOMAIN_VRAM,
705 RADEON_FLAG_NO_INTERPROCESS_SHARING |
706 RADEON_FLAG_READ_ONLY |
707 RADEON_FLAG_32BIT,
708 RADV_BO_PRIORITY_DESCRIPTOR);
709 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
710 }
711 pool->size = bo_size;
712 pool->max_entry_count = pCreateInfo->maxSets;
713
714 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
715 return VK_SUCCESS;
716 }
717
718 void radv_DestroyDescriptorPool(
719 VkDevice _device,
720 VkDescriptorPool _pool,
721 const VkAllocationCallbacks* pAllocator)
722 {
723 RADV_FROM_HANDLE(radv_device, device, _device);
724 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
725
726 if (!pool)
727 return;
728
729 if (!pool->host_memory_base) {
730 for(int i = 0; i < pool->entry_count; ++i) {
731 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
732 }
733 }
734
735 if (pool->bo)
736 device->ws->buffer_destroy(pool->bo);
737 vk_free2(&device->alloc, pAllocator, pool);
738 }
739
740 VkResult radv_ResetDescriptorPool(
741 VkDevice _device,
742 VkDescriptorPool descriptorPool,
743 VkDescriptorPoolResetFlags flags)
744 {
745 RADV_FROM_HANDLE(radv_device, device, _device);
746 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
747
748 if (!pool->host_memory_base) {
749 for(int i = 0; i < pool->entry_count; ++i) {
750 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
751 }
752 pool->entry_count = 0;
753 }
754
755 pool->current_offset = 0;
756 pool->host_memory_ptr = pool->host_memory_base;
757
758 return VK_SUCCESS;
759 }
760
761 VkResult radv_AllocateDescriptorSets(
762 VkDevice _device,
763 const VkDescriptorSetAllocateInfo* pAllocateInfo,
764 VkDescriptorSet* pDescriptorSets)
765 {
766 RADV_FROM_HANDLE(radv_device, device, _device);
767 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
768
769 VkResult result = VK_SUCCESS;
770 uint32_t i;
771 struct radv_descriptor_set *set = NULL;
772
773 const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT *variable_counts =
774 vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT);
775 const uint32_t zero = 0;
776
777 /* allocate a set of buffers for each shader to contain descriptors */
778 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
779 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
780 pAllocateInfo->pSetLayouts[i]);
781
782 const uint32_t *variable_count = NULL;
783 if (variable_counts) {
784 if (i < variable_counts->descriptorSetCount)
785 variable_count = variable_counts->pDescriptorCounts + i;
786 else
787 variable_count = &zero;
788 }
789
790 assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
791
792 result = radv_descriptor_set_create(device, pool, layout, variable_count, &set);
793 if (result != VK_SUCCESS)
794 break;
795
796 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
797 }
798
799 if (result != VK_SUCCESS) {
800 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
801 i, pDescriptorSets);
802 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
803 pDescriptorSets[i] = VK_NULL_HANDLE;
804 }
805 }
806 return result;
807 }
808
809 VkResult radv_FreeDescriptorSets(
810 VkDevice _device,
811 VkDescriptorPool descriptorPool,
812 uint32_t count,
813 const VkDescriptorSet* pDescriptorSets)
814 {
815 RADV_FROM_HANDLE(radv_device, device, _device);
816 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
817
818 for (uint32_t i = 0; i < count; i++) {
819 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
820
821 if (set && !pool->host_memory_base)
822 radv_descriptor_set_destroy(device, pool, set, true);
823 }
824 return VK_SUCCESS;
825 }
826
827 static void write_texel_buffer_descriptor(struct radv_device *device,
828 struct radv_cmd_buffer *cmd_buffer,
829 unsigned *dst,
830 struct radeon_winsys_bo **buffer_list,
831 const VkBufferView _buffer_view)
832 {
833 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
834
835 memcpy(dst, buffer_view->state, 4 * 4);
836
837 if (cmd_buffer)
838 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo);
839 else
840 *buffer_list = buffer_view->bo;
841 }
842
843 static void write_buffer_descriptor(struct radv_device *device,
844 struct radv_cmd_buffer *cmd_buffer,
845 unsigned *dst,
846 struct radeon_winsys_bo **buffer_list,
847 const VkDescriptorBufferInfo *buffer_info)
848 {
849 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
850 uint64_t va = radv_buffer_get_va(buffer->bo);
851 uint32_t range = buffer_info->range;
852
853 if (buffer_info->range == VK_WHOLE_SIZE)
854 range = buffer->size - buffer_info->offset;
855
856 va += buffer_info->offset + buffer->offset;
857 dst[0] = va;
858 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
859 dst[2] = range;
860 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
861 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
862 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
863 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
864
865 if (device->physical_device->rad_info.chip_class >= GFX10) {
866 dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
867 S_008F0C_OOB_SELECT(3) |
868 S_008F0C_RESOURCE_LEVEL(1);
869 } else {
870 dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
871 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
872 }
873
874 if (cmd_buffer)
875 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo);
876 else
877 *buffer_list = buffer->bo;
878 }
879
880 static void write_block_descriptor(struct radv_device *device,
881 struct radv_cmd_buffer *cmd_buffer,
882 void *dst,
883 const VkWriteDescriptorSet *writeset)
884 {
885 const VkWriteDescriptorSetInlineUniformBlockEXT *inline_ub =
886 vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
887
888 memcpy(dst, inline_ub->pData, inline_ub->dataSize);
889 }
890
891 static void write_dynamic_buffer_descriptor(struct radv_device *device,
892 struct radv_descriptor_range *range,
893 struct radeon_winsys_bo **buffer_list,
894 const VkDescriptorBufferInfo *buffer_info)
895 {
896 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
897 uint64_t va = radv_buffer_get_va(buffer->bo);
898 unsigned size = buffer_info->range;
899
900 if (buffer_info->range == VK_WHOLE_SIZE)
901 size = buffer->size - buffer_info->offset;
902
903 va += buffer_info->offset + buffer->offset;
904 range->va = va;
905 range->size = size;
906
907 *buffer_list = buffer->bo;
908 }
909
910 static void
911 write_image_descriptor(struct radv_device *device,
912 struct radv_cmd_buffer *cmd_buffer,
913 unsigned size, unsigned *dst,
914 struct radeon_winsys_bo **buffer_list,
915 VkDescriptorType descriptor_type,
916 const VkDescriptorImageInfo *image_info)
917 {
918 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
919 union radv_descriptor *descriptor;
920
921 if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
922 descriptor = &iview->storage_descriptor;
923 } else {
924 descriptor = &iview->descriptor;
925 }
926
927 memcpy(dst, descriptor, size);
928
929 if (cmd_buffer)
930 radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo);
931 else
932 *buffer_list = iview->bo;
933 }
934
935 static void
936 write_combined_image_sampler_descriptor(struct radv_device *device,
937 struct radv_cmd_buffer *cmd_buffer,
938 unsigned sampler_offset,
939 unsigned *dst,
940 struct radeon_winsys_bo **buffer_list,
941 VkDescriptorType descriptor_type,
942 const VkDescriptorImageInfo *image_info,
943 bool has_sampler)
944 {
945 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
946
947 write_image_descriptor(device, cmd_buffer, sampler_offset, dst, buffer_list,
948 descriptor_type, image_info);
949 /* copy over sampler state */
950 if (has_sampler) {
951 memcpy(dst + sampler_offset / sizeof(*dst), sampler->state, 16);
952 }
953 }
954
955 static void
956 write_sampler_descriptor(struct radv_device *device,
957 unsigned *dst,
958 const VkDescriptorImageInfo *image_info)
959 {
960 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
961
962 memcpy(dst, sampler->state, 16);
963 }
964
965 void radv_update_descriptor_sets(
966 struct radv_device* device,
967 struct radv_cmd_buffer* cmd_buffer,
968 VkDescriptorSet dstSetOverride,
969 uint32_t descriptorWriteCount,
970 const VkWriteDescriptorSet* pDescriptorWrites,
971 uint32_t descriptorCopyCount,
972 const VkCopyDescriptorSet* pDescriptorCopies)
973 {
974 uint32_t i, j;
975 for (i = 0; i < descriptorWriteCount; i++) {
976 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
977 RADV_FROM_HANDLE(radv_descriptor_set, set,
978 dstSetOverride ? dstSetOverride : writeset->dstSet);
979 const struct radv_descriptor_set_binding_layout *binding_layout =
980 set->layout->binding + writeset->dstBinding;
981 uint32_t *ptr = set->mapped_ptr;
982 struct radeon_winsys_bo **buffer_list = set->descriptors;
983 /* Immutable samplers are not copied into push descriptors when they are
984 * allocated, so if we are writing push descriptors we have to copy the
985 * immutable samplers into them now.
986 */
987 const bool copy_immutable_samplers = cmd_buffer &&
988 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
989 const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
990
991 ptr += binding_layout->offset / 4;
992
993 if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
994 write_block_descriptor(device, cmd_buffer, (uint8_t*)ptr + writeset->dstArrayElement, writeset);
995 continue;
996 }
997
998 ptr += binding_layout->size * writeset->dstArrayElement / 4;
999 buffer_list += binding_layout->buffer_offset;
1000 buffer_list += writeset->dstArrayElement;
1001 for (j = 0; j < writeset->descriptorCount; ++j) {
1002 switch(writeset->descriptorType) {
1003 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1004 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1005 unsigned idx = writeset->dstArrayElement + j;
1006 idx += binding_layout->dynamic_offset_offset;
1007 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1008 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1009 buffer_list, writeset->pBufferInfo + j);
1010 break;
1011 }
1012 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1013 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1014 write_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1015 writeset->pBufferInfo + j);
1016 break;
1017 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1018 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1019 write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1020 writeset->pTexelBufferView[j]);
1021 break;
1022 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1023 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1024 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1025 write_image_descriptor(device, cmd_buffer, 64, ptr, buffer_list,
1026 writeset->descriptorType,
1027 writeset->pImageInfo + j);
1028 break;
1029 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
1030 unsigned sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout);
1031 write_combined_image_sampler_descriptor(device, cmd_buffer, sampler_offset,
1032 ptr, buffer_list,
1033 writeset->descriptorType,
1034 writeset->pImageInfo + j,
1035 !binding_layout->immutable_samplers_offset);
1036 if (copy_immutable_samplers) {
1037 const unsigned idx = writeset->dstArrayElement + j;
1038 memcpy((char*)ptr + sampler_offset, samplers + 4 * idx, 16);
1039 }
1040 break;
1041 }
1042 case VK_DESCRIPTOR_TYPE_SAMPLER:
1043 if (!binding_layout->immutable_samplers_offset) {
1044 write_sampler_descriptor(device, ptr,
1045 writeset->pImageInfo + j);
1046 } else if (copy_immutable_samplers) {
1047 unsigned idx = writeset->dstArrayElement + j;
1048 memcpy(ptr, samplers + 4 * idx, 16);
1049 }
1050 break;
1051 default:
1052 unreachable("unimplemented descriptor type");
1053 break;
1054 }
1055 ptr += binding_layout->size / 4;
1056 ++buffer_list;
1057 }
1058
1059 }
1060
1061 for (i = 0; i < descriptorCopyCount; i++) {
1062 const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
1063 RADV_FROM_HANDLE(radv_descriptor_set, src_set,
1064 copyset->srcSet);
1065 RADV_FROM_HANDLE(radv_descriptor_set, dst_set,
1066 copyset->dstSet);
1067 const struct radv_descriptor_set_binding_layout *src_binding_layout =
1068 src_set->layout->binding + copyset->srcBinding;
1069 const struct radv_descriptor_set_binding_layout *dst_binding_layout =
1070 dst_set->layout->binding + copyset->dstBinding;
1071 uint32_t *src_ptr = src_set->mapped_ptr;
1072 uint32_t *dst_ptr = dst_set->mapped_ptr;
1073 struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
1074 struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
1075
1076 src_ptr += src_binding_layout->offset / 4;
1077 dst_ptr += dst_binding_layout->offset / 4;
1078
1079 src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
1080 dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
1081
1082 src_buffer_list += src_binding_layout->buffer_offset;
1083 src_buffer_list += copyset->srcArrayElement;
1084
1085 dst_buffer_list += dst_binding_layout->buffer_offset;
1086 dst_buffer_list += copyset->dstArrayElement;
1087
1088 for (j = 0; j < copyset->descriptorCount; ++j) {
1089 switch (src_binding_layout->type) {
1090 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1091 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1092 unsigned src_idx = copyset->srcArrayElement + j;
1093 unsigned dst_idx = copyset->dstArrayElement + j;
1094 struct radv_descriptor_range *src_range, *dst_range;
1095 src_idx += src_binding_layout->dynamic_offset_offset;
1096 dst_idx += dst_binding_layout->dynamic_offset_offset;
1097
1098 src_range = src_set->dynamic_descriptors + src_idx;
1099 dst_range = dst_set->dynamic_descriptors + dst_idx;
1100 *dst_range = *src_range;
1101 break;
1102 }
1103 default:
1104 memcpy(dst_ptr, src_ptr, src_binding_layout->size);
1105 }
1106 src_ptr += src_binding_layout->size / 4;
1107 dst_ptr += dst_binding_layout->size / 4;
1108
1109 if (src_binding_layout->type != VK_DESCRIPTOR_TYPE_SAMPLER) {
1110 /* Sampler descriptors don't have a buffer list. */
1111 dst_buffer_list[j] = src_buffer_list[j];
1112 }
1113 }
1114 }
1115 }
1116
1117 void radv_UpdateDescriptorSets(
1118 VkDevice _device,
1119 uint32_t descriptorWriteCount,
1120 const VkWriteDescriptorSet* pDescriptorWrites,
1121 uint32_t descriptorCopyCount,
1122 const VkCopyDescriptorSet* pDescriptorCopies)
1123 {
1124 RADV_FROM_HANDLE(radv_device, device, _device);
1125
1126 radv_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
1127 descriptorCopyCount, pDescriptorCopies);
1128 }
1129
1130 VkResult radv_CreateDescriptorUpdateTemplate(VkDevice _device,
1131 const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
1132 const VkAllocationCallbacks *pAllocator,
1133 VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
1134 {
1135 RADV_FROM_HANDLE(radv_device, device, _device);
1136 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
1137 const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
1138 const size_t size = sizeof(struct radv_descriptor_update_template) +
1139 sizeof(struct radv_descriptor_update_template_entry) * entry_count;
1140 struct radv_descriptor_update_template *templ;
1141 uint32_t i;
1142
1143 templ = vk_alloc2(&device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1144 if (!templ)
1145 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1146
1147 templ->entry_count = entry_count;
1148 templ->bind_point = pCreateInfo->pipelineBindPoint;
1149
1150 for (i = 0; i < entry_count; i++) {
1151 const VkDescriptorUpdateTemplateEntry *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
1152 const struct radv_descriptor_set_binding_layout *binding_layout =
1153 set_layout->binding + entry->dstBinding;
1154 const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
1155 const uint32_t *immutable_samplers = NULL;
1156 uint32_t dst_offset;
1157 uint32_t dst_stride;
1158
1159 /* dst_offset is an offset into dynamic_descriptors when the descriptor
1160 is dynamic, and an offset into mapped_ptr otherwise */
1161 switch (entry->descriptorType) {
1162 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1163 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1164 assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
1165 dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
1166 dst_stride = 0; /* Not used */
1167 break;
1168 default:
1169 switch (entry->descriptorType) {
1170 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1171 case VK_DESCRIPTOR_TYPE_SAMPLER:
1172 /* Immutable samplers are copied into push descriptors when they are pushed */
1173 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
1174 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal) {
1175 immutable_samplers = radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
1176 }
1177 break;
1178 default:
1179 break;
1180 }
1181 dst_offset = binding_layout->offset / 4;
1182 if (entry->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
1183 dst_offset += entry->dstArrayElement / 4;
1184 else
1185 dst_offset += binding_layout->size * entry->dstArrayElement / 4;
1186
1187 dst_stride = binding_layout->size / 4;
1188 break;
1189 }
1190
1191 templ->entry[i] = (struct radv_descriptor_update_template_entry) {
1192 .descriptor_type = entry->descriptorType,
1193 .descriptor_count = entry->descriptorCount,
1194 .src_offset = entry->offset,
1195 .src_stride = entry->stride,
1196 .dst_offset = dst_offset,
1197 .dst_stride = dst_stride,
1198 .buffer_offset = buffer_offset,
1199 .has_sampler = !binding_layout->immutable_samplers_offset,
1200 .sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout),
1201 .immutable_samplers = immutable_samplers
1202 };
1203 }
1204
1205 *pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
1206 return VK_SUCCESS;
1207 }
1208
1209 void radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
1210 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1211 const VkAllocationCallbacks *pAllocator)
1212 {
1213 RADV_FROM_HANDLE(radv_device, device, _device);
1214 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1215
1216 if (!templ)
1217 return;
1218
1219 vk_free2(&device->alloc, pAllocator, templ);
1220 }
1221
1222 void radv_update_descriptor_set_with_template(struct radv_device *device,
1223 struct radv_cmd_buffer *cmd_buffer,
1224 struct radv_descriptor_set *set,
1225 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1226 const void *pData)
1227 {
1228 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1229 uint32_t i;
1230
1231 for (i = 0; i < templ->entry_count; ++i) {
1232 struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
1233 uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
1234 const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
1235 uint32_t j;
1236
1237 if (templ->entry[i].descriptor_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1238 memcpy((uint8_t*)pDst, pSrc, templ->entry[i].descriptor_count);
1239 continue;
1240 }
1241
1242 for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
1243 switch (templ->entry[i].descriptor_type) {
1244 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1245 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1246 const unsigned idx = templ->entry[i].dst_offset + j;
1247 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1248 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1249 buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
1250 break;
1251 }
1252 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1253 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1254 write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1255 (struct VkDescriptorBufferInfo *) pSrc);
1256 break;
1257 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1258 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1259 write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1260 *(VkBufferView *) pSrc);
1261 break;
1262 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1263 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1264 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1265 write_image_descriptor(device, cmd_buffer, 64, pDst, buffer_list,
1266 templ->entry[i].descriptor_type,
1267 (struct VkDescriptorImageInfo *) pSrc);
1268 break;
1269 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1270 write_combined_image_sampler_descriptor(device, cmd_buffer, templ->entry[i].sampler_offset,
1271 pDst, buffer_list, templ->entry[i].descriptor_type,
1272 (struct VkDescriptorImageInfo *) pSrc,
1273 templ->entry[i].has_sampler);
1274 if (templ->entry[i].immutable_samplers) {
1275 memcpy((char*)pDst + templ->entry[i].sampler_offset, templ->entry[i].immutable_samplers + 4 * j, 16);
1276 }
1277 break;
1278 case VK_DESCRIPTOR_TYPE_SAMPLER:
1279 if (templ->entry[i].has_sampler)
1280 write_sampler_descriptor(device, pDst,
1281 (struct VkDescriptorImageInfo *) pSrc);
1282 else if (templ->entry[i].immutable_samplers)
1283 memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
1284 break;
1285 default:
1286 unreachable("unimplemented descriptor type");
1287 break;
1288 }
1289 pSrc += templ->entry[i].src_stride;
1290 pDst += templ->entry[i].dst_stride;
1291 ++buffer_list;
1292 }
1293 }
1294 }
1295
1296 void radv_UpdateDescriptorSetWithTemplate(VkDevice _device,
1297 VkDescriptorSet descriptorSet,
1298 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1299 const void *pData)
1300 {
1301 RADV_FROM_HANDLE(radv_device, device, _device);
1302 RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1303
1304 radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
1305 }
1306
1307
1308 VkResult radv_CreateSamplerYcbcrConversion(VkDevice _device,
1309 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
1310 const VkAllocationCallbacks* pAllocator,
1311 VkSamplerYcbcrConversion* pYcbcrConversion)
1312 {
1313 RADV_FROM_HANDLE(radv_device, device, _device);
1314 struct radv_sampler_ycbcr_conversion *conversion = NULL;
1315
1316 conversion = vk_zalloc2(&device->alloc, pAllocator, sizeof(*conversion), 8,
1317 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1318
1319 if (conversion == NULL)
1320 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1321
1322 conversion->format = pCreateInfo->format;
1323 conversion->ycbcr_model = pCreateInfo->ycbcrModel;
1324 conversion->ycbcr_range = pCreateInfo->ycbcrRange;
1325 conversion->components = pCreateInfo->components;
1326 conversion->chroma_offsets[0] = pCreateInfo->xChromaOffset;
1327 conversion->chroma_offsets[1] = pCreateInfo->yChromaOffset;
1328 conversion->chroma_filter = pCreateInfo->chromaFilter;
1329
1330 *pYcbcrConversion = radv_sampler_ycbcr_conversion_to_handle(conversion);
1331 return VK_SUCCESS;
1332 }
1333
1334
1335 void radv_DestroySamplerYcbcrConversion(VkDevice _device,
1336 VkSamplerYcbcrConversion ycbcrConversion,
1337 const VkAllocationCallbacks* pAllocator)
1338 {
1339 RADV_FROM_HANDLE(radv_device, device, _device);
1340 RADV_FROM_HANDLE(radv_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
1341
1342 if (ycbcr_conversion)
1343 vk_free2(&device->alloc, pAllocator, ycbcr_conversion);
1344 }