radv: use the common base object type for VkDevice
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33 #include "vk_format.h"
34 #include "vk_util.h"
35
36
37 static bool has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
38 {
39 if (!samplers)
40 return false;
41 for(uint32_t i = 1; i < count; ++i) {
42 if (memcmp(radv_sampler_from_handle(samplers[0])->state,
43 radv_sampler_from_handle(samplers[i])->state, 16)) {
44 return false;
45 }
46 }
47 return true;
48 }
49
50 static int binding_compare(const void* av, const void *bv)
51 {
52 const VkDescriptorSetLayoutBinding *a = (const VkDescriptorSetLayoutBinding*)av;
53 const VkDescriptorSetLayoutBinding *b = (const VkDescriptorSetLayoutBinding*)bv;
54
55 return (a->binding < b->binding) ? -1 : (a->binding > b->binding) ? 1 : 0;
56 }
57
58 static VkDescriptorSetLayoutBinding *
59 create_sorted_bindings(const VkDescriptorSetLayoutBinding *bindings, unsigned count) {
60 VkDescriptorSetLayoutBinding *sorted_bindings = malloc(count * sizeof(VkDescriptorSetLayoutBinding));
61 if (!sorted_bindings)
62 return NULL;
63
64 memcpy(sorted_bindings, bindings, count * sizeof(VkDescriptorSetLayoutBinding));
65
66 qsort(sorted_bindings, count, sizeof(VkDescriptorSetLayoutBinding), binding_compare);
67
68 return sorted_bindings;
69 }
70
71 VkResult radv_CreateDescriptorSetLayout(
72 VkDevice _device,
73 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
74 const VkAllocationCallbacks* pAllocator,
75 VkDescriptorSetLayout* pSetLayout)
76 {
77 RADV_FROM_HANDLE(radv_device, device, _device);
78 struct radv_descriptor_set_layout *set_layout;
79
80 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
81 const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
82 vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
83
84 uint32_t max_binding = 0;
85 uint32_t immutable_sampler_count = 0;
86 uint32_t ycbcr_sampler_count = 0;
87 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
88 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
89 if ((pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
90 pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
91 pCreateInfo->pBindings[j].pImmutableSamplers) {
92 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
93
94 bool has_ycbcr_sampler = false;
95 for (unsigned i = 0; i < pCreateInfo->pBindings[j].descriptorCount; ++i) {
96 if (radv_sampler_from_handle(pCreateInfo->pBindings[j].pImmutableSamplers[i])->ycbcr_sampler)
97 has_ycbcr_sampler = true;
98 }
99
100 if (has_ycbcr_sampler)
101 ycbcr_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
102 }
103 }
104
105 uint32_t samplers_offset = sizeof(struct radv_descriptor_set_layout) +
106 (max_binding + 1) * sizeof(set_layout->binding[0]);
107 size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
108 if (ycbcr_sampler_count > 0) {
109 size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion) + (max_binding + 1) * sizeof(uint32_t);
110 }
111
112 set_layout = vk_zalloc2(&device->vk.alloc, pAllocator, size, 8,
113 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
114 if (!set_layout)
115 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
116
117 set_layout->flags = pCreateInfo->flags;
118 set_layout->layout_size = size;
119
120 /* We just allocate all the samplers at the end of the struct */
121 uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
122 struct radv_sampler_ycbcr_conversion *ycbcr_samplers = NULL;
123 uint32_t *ycbcr_sampler_offsets = NULL;
124
125 if (ycbcr_sampler_count > 0) {
126 ycbcr_sampler_offsets = samplers + 4 * immutable_sampler_count;
127 set_layout->ycbcr_sampler_offsets_offset = (char*)ycbcr_sampler_offsets - (char*)set_layout;
128 ycbcr_samplers = (struct radv_sampler_ycbcr_conversion *)(ycbcr_sampler_offsets + max_binding + 1);
129 } else
130 set_layout->ycbcr_sampler_offsets_offset = 0;
131
132 VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
133 pCreateInfo->bindingCount);
134 if (!bindings) {
135 vk_free2(&device->vk.alloc, pAllocator, set_layout);
136 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
137 }
138
139 set_layout->binding_count = max_binding + 1;
140 set_layout->shader_stages = 0;
141 set_layout->dynamic_shader_stages = 0;
142 set_layout->has_immutable_samplers = false;
143 set_layout->size = 0;
144
145 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
146
147 uint32_t buffer_count = 0;
148 uint32_t dynamic_offset_count = 0;
149
150 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
151 const VkDescriptorSetLayoutBinding *binding = bindings + j;
152 uint32_t b = binding->binding;
153 uint32_t alignment = 0;
154 unsigned binding_buffer_count = 0;
155 uint32_t descriptor_count = binding->descriptorCount;
156 bool has_ycbcr_sampler = false;
157
158 /* main image + fmask */
159 uint32_t max_sampled_image_descriptors = 2;
160
161 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
162 binding->pImmutableSamplers) {
163 for (unsigned i = 0; i < binding->descriptorCount; ++i) {
164 struct radv_sampler_ycbcr_conversion *conversion =
165 radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
166
167 if (conversion) {
168 has_ycbcr_sampler = true;
169 max_sampled_image_descriptors = MAX2(max_sampled_image_descriptors,
170 vk_format_get_plane_count(conversion->format));
171 }
172 }
173 }
174
175 switch (binding->descriptorType) {
176 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
177 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
178 assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
179 set_layout->binding[b].dynamic_offset_count = 1;
180 set_layout->dynamic_shader_stages |= binding->stageFlags;
181 set_layout->binding[b].size = 0;
182 binding_buffer_count = 1;
183 alignment = 1;
184 break;
185 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
186 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
187 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
188 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
189 set_layout->binding[b].size = 16;
190 binding_buffer_count = 1;
191 alignment = 16;
192 break;
193 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
194 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
195 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
196 /* main descriptor + fmask descriptor */
197 set_layout->binding[b].size = 64;
198 binding_buffer_count = 1;
199 alignment = 32;
200 break;
201 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
202 /* main descriptor + fmask descriptor + sampler */
203 set_layout->binding[b].size = 96;
204 binding_buffer_count = 1;
205 alignment = 32;
206 break;
207 case VK_DESCRIPTOR_TYPE_SAMPLER:
208 set_layout->binding[b].size = 16;
209 alignment = 16;
210 break;
211 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
212 alignment = 16;
213 set_layout->binding[b].size = descriptor_count;
214 descriptor_count = 1;
215 break;
216 default:
217 break;
218 }
219
220 set_layout->size = align(set_layout->size, alignment);
221 set_layout->binding[b].type = binding->descriptorType;
222 set_layout->binding[b].array_size = descriptor_count;
223 set_layout->binding[b].offset = set_layout->size;
224 set_layout->binding[b].buffer_offset = buffer_count;
225 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
226
227 if (variable_flags && binding->binding < variable_flags->bindingCount &&
228 (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
229 assert(!binding->pImmutableSamplers); /* Terribly ill defined how many samplers are valid */
230 assert(binding->binding == max_binding);
231
232 set_layout->has_variable_descriptors = true;
233 }
234
235 if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
236 binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
237 binding->pImmutableSamplers) {
238 set_layout->binding[b].immutable_samplers_offset = samplers_offset;
239 set_layout->binding[b].immutable_samplers_equal =
240 has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
241 set_layout->has_immutable_samplers = true;
242
243
244 for (uint32_t i = 0; i < binding->descriptorCount; i++)
245 memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
246
247 /* Don't reserve space for the samplers if they're not accessed. */
248 if (set_layout->binding[b].immutable_samplers_equal) {
249 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
250 max_sampled_image_descriptors <= 2)
251 set_layout->binding[b].size -= 32;
252 else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
253 set_layout->binding[b].size -= 16;
254 }
255 samplers += 4 * binding->descriptorCount;
256 samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
257
258 if (has_ycbcr_sampler) {
259 ycbcr_sampler_offsets[b] = (const char*)ycbcr_samplers - (const char*)set_layout;
260 for (uint32_t i = 0; i < binding->descriptorCount; i++) {
261 if (radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler)
262 ycbcr_samplers[i] = *radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
263 else
264 ycbcr_samplers[i].format = VK_FORMAT_UNDEFINED;
265 }
266 ycbcr_samplers += binding->descriptorCount;
267 }
268 }
269
270 set_layout->size += descriptor_count * set_layout->binding[b].size;
271 buffer_count += descriptor_count * binding_buffer_count;
272 dynamic_offset_count += descriptor_count *
273 set_layout->binding[b].dynamic_offset_count;
274 set_layout->shader_stages |= binding->stageFlags;
275 }
276
277 free(bindings);
278
279 set_layout->buffer_count = buffer_count;
280 set_layout->dynamic_offset_count = dynamic_offset_count;
281
282 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
283
284 return VK_SUCCESS;
285 }
286
287 void radv_DestroyDescriptorSetLayout(
288 VkDevice _device,
289 VkDescriptorSetLayout _set_layout,
290 const VkAllocationCallbacks* pAllocator)
291 {
292 RADV_FROM_HANDLE(radv_device, device, _device);
293 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
294
295 if (!set_layout)
296 return;
297
298 vk_free2(&device->vk.alloc, pAllocator, set_layout);
299 }
300
301 void radv_GetDescriptorSetLayoutSupport(VkDevice device,
302 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
303 VkDescriptorSetLayoutSupport* pSupport)
304 {
305 VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
306 pCreateInfo->bindingCount);
307 if (!bindings) {
308 pSupport->supported = false;
309 return;
310 }
311
312 const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
313 vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
314 VkDescriptorSetVariableDescriptorCountLayoutSupport *variable_count =
315 vk_find_struct((void*)pCreateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT);
316 if (variable_count) {
317 variable_count->maxVariableDescriptorCount = 0;
318 }
319
320 bool supported = true;
321 uint64_t size = 0;
322 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
323 const VkDescriptorSetLayoutBinding *binding = bindings + i;
324
325 uint64_t descriptor_size = 0;
326 uint64_t descriptor_alignment = 1;
327 uint32_t descriptor_count = binding->descriptorCount;
328 switch (binding->descriptorType) {
329 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
330 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
331 break;
332 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
333 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
334 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
335 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
336 descriptor_size = 16;
337 descriptor_alignment = 16;
338 break;
339 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
340 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
341 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
342 descriptor_size = 64;
343 descriptor_alignment = 32;
344 break;
345 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
346 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
347 descriptor_size = 64;
348 } else {
349 descriptor_size = 96;
350 }
351 descriptor_alignment = 32;
352 break;
353 case VK_DESCRIPTOR_TYPE_SAMPLER:
354 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
355 descriptor_size = 16;
356 descriptor_alignment = 16;
357 }
358 break;
359 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
360 descriptor_alignment = 16;
361 descriptor_size = descriptor_count;
362 descriptor_count = 1;
363 break;
364 default:
365 break;
366 }
367
368 if (size && !align_u64(size, descriptor_alignment)) {
369 supported = false;
370 }
371 size = align_u64(size, descriptor_alignment);
372
373 uint64_t max_count = INT32_MAX;
374 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
375 max_count = INT32_MAX - size;
376 else if (descriptor_size)
377 max_count = (INT32_MAX - size) / descriptor_size;
378
379 if (max_count < descriptor_count) {
380 supported = false;
381 }
382 if (variable_flags && binding->binding <variable_flags->bindingCount && variable_count &&
383 (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
384 variable_count->maxVariableDescriptorCount = MIN2(UINT32_MAX, max_count);
385 }
386 size += descriptor_count * descriptor_size;
387 }
388
389 free(bindings);
390
391 pSupport->supported = supported;
392 }
393
394 /*
395 * Pipeline layouts. These have nothing to do with the pipeline. They are
396 * just multiple descriptor set layouts pasted together.
397 */
398
399 VkResult radv_CreatePipelineLayout(
400 VkDevice _device,
401 const VkPipelineLayoutCreateInfo* pCreateInfo,
402 const VkAllocationCallbacks* pAllocator,
403 VkPipelineLayout* pPipelineLayout)
404 {
405 RADV_FROM_HANDLE(radv_device, device, _device);
406 struct radv_pipeline_layout *layout;
407 struct mesa_sha1 ctx;
408
409 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
410
411 layout = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*layout), 8,
412 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
413 if (layout == NULL)
414 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
415
416 layout->num_sets = pCreateInfo->setLayoutCount;
417
418 unsigned dynamic_offset_count = 0;
419 uint16_t dynamic_shader_stages = 0;
420
421
422 _mesa_sha1_init(&ctx);
423 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
424 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
425 pCreateInfo->pSetLayouts[set]);
426 layout->set[set].layout = set_layout;
427
428 layout->set[set].dynamic_offset_start = dynamic_offset_count;
429 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
430 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
431 dynamic_shader_stages |= set_layout->dynamic_shader_stages;
432 }
433 _mesa_sha1_update(&ctx, set_layout, set_layout->layout_size);
434 }
435
436 layout->dynamic_offset_count = dynamic_offset_count;
437 layout->dynamic_shader_stages = dynamic_shader_stages;
438 layout->push_constant_size = 0;
439
440 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
441 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
442 layout->push_constant_size = MAX2(layout->push_constant_size,
443 range->offset + range->size);
444 }
445
446 layout->push_constant_size = align(layout->push_constant_size, 16);
447 _mesa_sha1_update(&ctx, &layout->push_constant_size,
448 sizeof(layout->push_constant_size));
449 _mesa_sha1_final(&ctx, layout->sha1);
450 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
451
452 return VK_SUCCESS;
453 }
454
455 void radv_DestroyPipelineLayout(
456 VkDevice _device,
457 VkPipelineLayout _pipelineLayout,
458 const VkAllocationCallbacks* pAllocator)
459 {
460 RADV_FROM_HANDLE(radv_device, device, _device);
461 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
462
463 if (!pipeline_layout)
464 return;
465 vk_free2(&device->vk.alloc, pAllocator, pipeline_layout);
466 }
467
468 #define EMPTY 1
469
470 static VkResult
471 radv_descriptor_set_create(struct radv_device *device,
472 struct radv_descriptor_pool *pool,
473 const struct radv_descriptor_set_layout *layout,
474 const uint32_t *variable_count,
475 struct radv_descriptor_set **out_set)
476 {
477 struct radv_descriptor_set *set;
478 uint32_t buffer_count = layout->buffer_count;
479 if (variable_count) {
480 unsigned stride = 1;
481 if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_SAMPLER ||
482 layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
483 stride = 0;
484 buffer_count = layout->binding[layout->binding_count - 1].buffer_offset +
485 *variable_count * stride;
486 }
487 unsigned range_offset = sizeof(struct radv_descriptor_set) +
488 sizeof(struct radeon_winsys_bo *) * buffer_count;
489 unsigned mem_size = range_offset +
490 sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
491
492 if (pool->host_memory_base) {
493 if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
494 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
495
496 set = (struct radv_descriptor_set*)pool->host_memory_ptr;
497 pool->host_memory_ptr += mem_size;
498 } else {
499 set = vk_alloc2(&device->vk.alloc, NULL, mem_size, 8,
500 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
501
502 if (!set)
503 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
504 }
505
506 memset(set, 0, mem_size);
507
508 if (layout->dynamic_offset_count) {
509 set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
510 }
511
512 set->layout = layout;
513 set->buffer_count = buffer_count;
514 uint32_t layout_size = layout->size;
515 if (variable_count) {
516 assert(layout->has_variable_descriptors);
517 uint32_t stride = layout->binding[layout->binding_count - 1].size;
518 if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
519 stride = 1;
520
521 layout_size = layout->binding[layout->binding_count - 1].offset +
522 *variable_count * stride;
523 }
524 layout_size = align_u32(layout_size, 32);
525 if (layout_size) {
526 set->size = layout_size;
527
528 if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
529 vk_free2(&device->vk.alloc, NULL, set);
530 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
531 }
532
533 /* try to allocate linearly first, so that we don't spend
534 * time looking for gaps if the app only allocates &
535 * resets via the pool. */
536 if (pool->current_offset + layout_size <= pool->size) {
537 set->bo = pool->bo;
538 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
539 set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
540 if (!pool->host_memory_base) {
541 pool->entries[pool->entry_count].offset = pool->current_offset;
542 pool->entries[pool->entry_count].size = layout_size;
543 pool->entries[pool->entry_count].set = set;
544 pool->entry_count++;
545 }
546 pool->current_offset += layout_size;
547 } else if (!pool->host_memory_base) {
548 uint64_t offset = 0;
549 int index;
550
551 for (index = 0; index < pool->entry_count; ++index) {
552 if (pool->entries[index].offset - offset >= layout_size)
553 break;
554 offset = pool->entries[index].offset + pool->entries[index].size;
555 }
556
557 if (pool->size - offset < layout_size) {
558 vk_free2(&device->vk.alloc, NULL, set);
559 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
560 }
561 set->bo = pool->bo;
562 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
563 set->va = radv_buffer_get_va(set->bo) + offset;
564 memmove(&pool->entries[index + 1], &pool->entries[index],
565 sizeof(pool->entries[0]) * (pool->entry_count - index));
566 pool->entries[index].offset = offset;
567 pool->entries[index].size = layout_size;
568 pool->entries[index].set = set;
569 pool->entry_count++;
570 } else
571 return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
572 }
573
574 if (layout->has_immutable_samplers) {
575 for (unsigned i = 0; i < layout->binding_count; ++i) {
576 if (!layout->binding[i].immutable_samplers_offset ||
577 layout->binding[i].immutable_samplers_equal)
578 continue;
579
580 unsigned offset = layout->binding[i].offset / 4;
581 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
582 offset += radv_combined_image_descriptor_sampler_offset(layout->binding + i) / 4;
583
584 const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
585 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
586 memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
587 offset += layout->binding[i].size / 4;
588 }
589
590 }
591 }
592 *out_set = set;
593 return VK_SUCCESS;
594 }
595
596 static void
597 radv_descriptor_set_destroy(struct radv_device *device,
598 struct radv_descriptor_pool *pool,
599 struct radv_descriptor_set *set,
600 bool free_bo)
601 {
602 assert(!pool->host_memory_base);
603
604 if (free_bo && set->size && !pool->host_memory_base) {
605 uint32_t offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
606 for (int i = 0; i < pool->entry_count; ++i) {
607 if (pool->entries[i].offset == offset) {
608 memmove(&pool->entries[i], &pool->entries[i+1],
609 sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
610 --pool->entry_count;
611 break;
612 }
613 }
614 }
615 vk_free2(&device->vk.alloc, NULL, set);
616 }
617
618 VkResult radv_CreateDescriptorPool(
619 VkDevice _device,
620 const VkDescriptorPoolCreateInfo* pCreateInfo,
621 const VkAllocationCallbacks* pAllocator,
622 VkDescriptorPool* pDescriptorPool)
623 {
624 RADV_FROM_HANDLE(radv_device, device, _device);
625 struct radv_descriptor_pool *pool;
626 uint64_t size = sizeof(struct radv_descriptor_pool);
627 uint64_t bo_size = 0, bo_count = 0, range_count = 0;
628
629 vk_foreach_struct(ext, pCreateInfo->pNext) {
630 switch (ext->sType) {
631 case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: {
632 const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT *info =
633 (const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT*)ext;
634 /* the sizes are 4 aligned, and we need to align to at
635 * most 32, which needs at most 28 bytes extra per
636 * binding. */
637 bo_size += 28llu * info->maxInlineUniformBlockBindings;
638 break;
639 }
640 default:
641 break;
642 }
643 }
644
645 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
646 if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
647 bo_count += pCreateInfo->pPoolSizes[i].descriptorCount;
648
649 switch(pCreateInfo->pPoolSizes[i].type) {
650 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
651 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
652 range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
653 break;
654 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
655 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
656 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
657 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
658 case VK_DESCRIPTOR_TYPE_SAMPLER:
659 /* 32 as we may need to align for images */
660 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
661 break;
662 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
663 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
664 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
665 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
666 break;
667 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
668 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
669 break;
670 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
671 bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
672 break;
673 default:
674 break;
675 }
676 }
677
678 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
679 uint64_t host_size = pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
680 host_size += sizeof(struct radeon_winsys_bo*) * bo_count;
681 host_size += sizeof(struct radv_descriptor_range) * range_count;
682 size += host_size;
683 } else {
684 size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
685 }
686
687 pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
688 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
689 if (!pool)
690 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
691
692 memset(pool, 0, sizeof(*pool));
693
694 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
695 pool->host_memory_base = (uint8_t*)pool + sizeof(struct radv_descriptor_pool);
696 pool->host_memory_ptr = pool->host_memory_base;
697 pool->host_memory_end = (uint8_t*)pool + size;
698 }
699
700 if (bo_size) {
701 pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
702 RADEON_DOMAIN_VRAM,
703 RADEON_FLAG_NO_INTERPROCESS_SHARING |
704 RADEON_FLAG_READ_ONLY |
705 RADEON_FLAG_32BIT,
706 RADV_BO_PRIORITY_DESCRIPTOR);
707 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
708 }
709 pool->size = bo_size;
710 pool->max_entry_count = pCreateInfo->maxSets;
711
712 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
713 return VK_SUCCESS;
714 }
715
716 void radv_DestroyDescriptorPool(
717 VkDevice _device,
718 VkDescriptorPool _pool,
719 const VkAllocationCallbacks* pAllocator)
720 {
721 RADV_FROM_HANDLE(radv_device, device, _device);
722 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
723
724 if (!pool)
725 return;
726
727 if (!pool->host_memory_base) {
728 for(int i = 0; i < pool->entry_count; ++i) {
729 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
730 }
731 }
732
733 if (pool->bo)
734 device->ws->buffer_destroy(pool->bo);
735 vk_free2(&device->vk.alloc, pAllocator, pool);
736 }
737
738 VkResult radv_ResetDescriptorPool(
739 VkDevice _device,
740 VkDescriptorPool descriptorPool,
741 VkDescriptorPoolResetFlags flags)
742 {
743 RADV_FROM_HANDLE(radv_device, device, _device);
744 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
745
746 if (!pool->host_memory_base) {
747 for(int i = 0; i < pool->entry_count; ++i) {
748 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
749 }
750 pool->entry_count = 0;
751 }
752
753 pool->current_offset = 0;
754 pool->host_memory_ptr = pool->host_memory_base;
755
756 return VK_SUCCESS;
757 }
758
759 VkResult radv_AllocateDescriptorSets(
760 VkDevice _device,
761 const VkDescriptorSetAllocateInfo* pAllocateInfo,
762 VkDescriptorSet* pDescriptorSets)
763 {
764 RADV_FROM_HANDLE(radv_device, device, _device);
765 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
766
767 VkResult result = VK_SUCCESS;
768 uint32_t i;
769 struct radv_descriptor_set *set = NULL;
770
771 const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_counts =
772 vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
773 const uint32_t zero = 0;
774
775 /* allocate a set of buffers for each shader to contain descriptors */
776 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
777 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
778 pAllocateInfo->pSetLayouts[i]);
779
780 const uint32_t *variable_count = NULL;
781 if (variable_counts) {
782 if (i < variable_counts->descriptorSetCount)
783 variable_count = variable_counts->pDescriptorCounts + i;
784 else
785 variable_count = &zero;
786 }
787
788 assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
789
790 result = radv_descriptor_set_create(device, pool, layout, variable_count, &set);
791 if (result != VK_SUCCESS)
792 break;
793
794 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
795 }
796
797 if (result != VK_SUCCESS) {
798 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
799 i, pDescriptorSets);
800 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
801 pDescriptorSets[i] = VK_NULL_HANDLE;
802 }
803 }
804 return result;
805 }
806
807 VkResult radv_FreeDescriptorSets(
808 VkDevice _device,
809 VkDescriptorPool descriptorPool,
810 uint32_t count,
811 const VkDescriptorSet* pDescriptorSets)
812 {
813 RADV_FROM_HANDLE(radv_device, device, _device);
814 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
815
816 for (uint32_t i = 0; i < count; i++) {
817 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
818
819 if (set && !pool->host_memory_base)
820 radv_descriptor_set_destroy(device, pool, set, true);
821 }
822 return VK_SUCCESS;
823 }
824
825 static void write_texel_buffer_descriptor(struct radv_device *device,
826 struct radv_cmd_buffer *cmd_buffer,
827 unsigned *dst,
828 struct radeon_winsys_bo **buffer_list,
829 const VkBufferView _buffer_view)
830 {
831 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
832
833 if (!buffer_view) {
834 memset(dst, 0, 4 * 4);
835 return;
836 }
837
838 memcpy(dst, buffer_view->state, 4 * 4);
839
840 if (cmd_buffer)
841 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo);
842 else
843 *buffer_list = buffer_view->bo;
844 }
845
846 static void write_buffer_descriptor(struct radv_device *device,
847 struct radv_cmd_buffer *cmd_buffer,
848 unsigned *dst,
849 struct radeon_winsys_bo **buffer_list,
850 const VkDescriptorBufferInfo *buffer_info)
851 {
852 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
853
854 if (!buffer) {
855 memset(dst, 0, 4 * 4);
856 return;
857 }
858
859 uint64_t va = radv_buffer_get_va(buffer->bo);
860 uint32_t range = buffer_info->range;
861
862 if (buffer_info->range == VK_WHOLE_SIZE)
863 range = buffer->size - buffer_info->offset;
864
865 /* robustBufferAccess is relaxed enough to allow this (in combination
866 * with the alignment/size we return from vkGetBufferMemoryRequirements)
867 * and this allows the shader compiler to create more efficient 8/16-bit
868 * buffer accesses. */
869 range = align(range, 4);
870
871 va += buffer_info->offset + buffer->offset;
872 dst[0] = va;
873 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
874 dst[2] = range;
875 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
876 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
877 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
878 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
879
880 if (device->physical_device->rad_info.chip_class >= GFX10) {
881 dst[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
882 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
883 S_008F0C_RESOURCE_LEVEL(1);
884 } else {
885 dst[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
886 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
887 }
888
889 if (cmd_buffer)
890 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo);
891 else
892 *buffer_list = buffer->bo;
893 }
894
895 static void write_block_descriptor(struct radv_device *device,
896 struct radv_cmd_buffer *cmd_buffer,
897 void *dst,
898 const VkWriteDescriptorSet *writeset)
899 {
900 const VkWriteDescriptorSetInlineUniformBlockEXT *inline_ub =
901 vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
902
903 memcpy(dst, inline_ub->pData, inline_ub->dataSize);
904 }
905
906 static void write_dynamic_buffer_descriptor(struct radv_device *device,
907 struct radv_descriptor_range *range,
908 struct radeon_winsys_bo **buffer_list,
909 const VkDescriptorBufferInfo *buffer_info)
910 {
911 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
912 uint64_t va;
913 unsigned size;
914
915 if (!buffer)
916 return;
917
918 va = radv_buffer_get_va(buffer->bo);
919 size = buffer_info->range;
920
921 if (buffer_info->range == VK_WHOLE_SIZE)
922 size = buffer->size - buffer_info->offset;
923
924 /* robustBufferAccess is relaxed enough to allow this (in combination
925 * with the alignment/size we return from vkGetBufferMemoryRequirements)
926 * and this allows the shader compiler to create more efficient 8/16-bit
927 * buffer accesses. */
928 size = align(size, 4);
929
930 va += buffer_info->offset + buffer->offset;
931 range->va = va;
932 range->size = size;
933
934 *buffer_list = buffer->bo;
935 }
936
937 static void
938 write_image_descriptor(struct radv_device *device,
939 struct radv_cmd_buffer *cmd_buffer,
940 unsigned size, unsigned *dst,
941 struct radeon_winsys_bo **buffer_list,
942 VkDescriptorType descriptor_type,
943 const VkDescriptorImageInfo *image_info)
944 {
945 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
946 union radv_descriptor *descriptor;
947
948 if (!iview) {
949 memset(dst, 0, size);
950 return;
951 }
952
953 if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
954 descriptor = &iview->storage_descriptor;
955 } else {
956 descriptor = &iview->descriptor;
957 }
958
959 memcpy(dst, descriptor, size);
960
961 if (cmd_buffer)
962 radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo);
963 else
964 *buffer_list = iview->bo;
965 }
966
967 static void
968 write_combined_image_sampler_descriptor(struct radv_device *device,
969 struct radv_cmd_buffer *cmd_buffer,
970 unsigned sampler_offset,
971 unsigned *dst,
972 struct radeon_winsys_bo **buffer_list,
973 VkDescriptorType descriptor_type,
974 const VkDescriptorImageInfo *image_info,
975 bool has_sampler)
976 {
977 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
978
979 write_image_descriptor(device, cmd_buffer, sampler_offset, dst, buffer_list,
980 descriptor_type, image_info);
981 /* copy over sampler state */
982 if (has_sampler) {
983 memcpy(dst + sampler_offset / sizeof(*dst), sampler->state, 16);
984 }
985 }
986
987 static void
988 write_sampler_descriptor(struct radv_device *device,
989 unsigned *dst,
990 const VkDescriptorImageInfo *image_info)
991 {
992 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
993
994 memcpy(dst, sampler->state, 16);
995 }
996
997 void radv_update_descriptor_sets(
998 struct radv_device* device,
999 struct radv_cmd_buffer* cmd_buffer,
1000 VkDescriptorSet dstSetOverride,
1001 uint32_t descriptorWriteCount,
1002 const VkWriteDescriptorSet* pDescriptorWrites,
1003 uint32_t descriptorCopyCount,
1004 const VkCopyDescriptorSet* pDescriptorCopies)
1005 {
1006 uint32_t i, j;
1007 for (i = 0; i < descriptorWriteCount; i++) {
1008 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
1009 RADV_FROM_HANDLE(radv_descriptor_set, set,
1010 dstSetOverride ? dstSetOverride : writeset->dstSet);
1011 const struct radv_descriptor_set_binding_layout *binding_layout =
1012 set->layout->binding + writeset->dstBinding;
1013 uint32_t *ptr = set->mapped_ptr;
1014 struct radeon_winsys_bo **buffer_list = set->descriptors;
1015 /* Immutable samplers are not copied into push descriptors when they are
1016 * allocated, so if we are writing push descriptors we have to copy the
1017 * immutable samplers into them now.
1018 */
1019 const bool copy_immutable_samplers = cmd_buffer &&
1020 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
1021 const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
1022
1023 ptr += binding_layout->offset / 4;
1024
1025 if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1026 write_block_descriptor(device, cmd_buffer, (uint8_t*)ptr + writeset->dstArrayElement, writeset);
1027 continue;
1028 }
1029
1030 ptr += binding_layout->size * writeset->dstArrayElement / 4;
1031 buffer_list += binding_layout->buffer_offset;
1032 buffer_list += writeset->dstArrayElement;
1033 for (j = 0; j < writeset->descriptorCount; ++j) {
1034 switch(writeset->descriptorType) {
1035 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1036 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1037 unsigned idx = writeset->dstArrayElement + j;
1038 idx += binding_layout->dynamic_offset_offset;
1039 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1040 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1041 buffer_list, writeset->pBufferInfo + j);
1042 break;
1043 }
1044 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1045 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1046 write_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1047 writeset->pBufferInfo + j);
1048 break;
1049 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1050 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1051 write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
1052 writeset->pTexelBufferView[j]);
1053 break;
1054 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1055 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1056 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1057 write_image_descriptor(device, cmd_buffer, 64, ptr, buffer_list,
1058 writeset->descriptorType,
1059 writeset->pImageInfo + j);
1060 break;
1061 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
1062 unsigned sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout);
1063 write_combined_image_sampler_descriptor(device, cmd_buffer, sampler_offset,
1064 ptr, buffer_list,
1065 writeset->descriptorType,
1066 writeset->pImageInfo + j,
1067 !binding_layout->immutable_samplers_offset);
1068 if (copy_immutable_samplers) {
1069 const unsigned idx = writeset->dstArrayElement + j;
1070 memcpy((char*)ptr + sampler_offset, samplers + 4 * idx, 16);
1071 }
1072 break;
1073 }
1074 case VK_DESCRIPTOR_TYPE_SAMPLER:
1075 if (!binding_layout->immutable_samplers_offset) {
1076 write_sampler_descriptor(device, ptr,
1077 writeset->pImageInfo + j);
1078 } else if (copy_immutable_samplers) {
1079 unsigned idx = writeset->dstArrayElement + j;
1080 memcpy(ptr, samplers + 4 * idx, 16);
1081 }
1082 break;
1083 default:
1084 break;
1085 }
1086 ptr += binding_layout->size / 4;
1087 ++buffer_list;
1088 }
1089
1090 }
1091
1092 for (i = 0; i < descriptorCopyCount; i++) {
1093 const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
1094 RADV_FROM_HANDLE(radv_descriptor_set, src_set,
1095 copyset->srcSet);
1096 RADV_FROM_HANDLE(radv_descriptor_set, dst_set,
1097 copyset->dstSet);
1098 const struct radv_descriptor_set_binding_layout *src_binding_layout =
1099 src_set->layout->binding + copyset->srcBinding;
1100 const struct radv_descriptor_set_binding_layout *dst_binding_layout =
1101 dst_set->layout->binding + copyset->dstBinding;
1102 uint32_t *src_ptr = src_set->mapped_ptr;
1103 uint32_t *dst_ptr = dst_set->mapped_ptr;
1104 struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
1105 struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
1106
1107 src_ptr += src_binding_layout->offset / 4;
1108 dst_ptr += dst_binding_layout->offset / 4;
1109
1110 if (src_binding_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1111 src_ptr += copyset->srcArrayElement / 4;
1112 dst_ptr += copyset->dstArrayElement / 4;
1113
1114 memcpy(dst_ptr, src_ptr, copyset->descriptorCount);
1115 continue;
1116 }
1117
1118 src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
1119 dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
1120
1121 src_buffer_list += src_binding_layout->buffer_offset;
1122 src_buffer_list += copyset->srcArrayElement;
1123
1124 dst_buffer_list += dst_binding_layout->buffer_offset;
1125 dst_buffer_list += copyset->dstArrayElement;
1126
1127 for (j = 0; j < copyset->descriptorCount; ++j) {
1128 switch (src_binding_layout->type) {
1129 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1130 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1131 unsigned src_idx = copyset->srcArrayElement + j;
1132 unsigned dst_idx = copyset->dstArrayElement + j;
1133 struct radv_descriptor_range *src_range, *dst_range;
1134 src_idx += src_binding_layout->dynamic_offset_offset;
1135 dst_idx += dst_binding_layout->dynamic_offset_offset;
1136
1137 src_range = src_set->dynamic_descriptors + src_idx;
1138 dst_range = dst_set->dynamic_descriptors + dst_idx;
1139 *dst_range = *src_range;
1140 break;
1141 }
1142 default:
1143 memcpy(dst_ptr, src_ptr, src_binding_layout->size);
1144 }
1145 src_ptr += src_binding_layout->size / 4;
1146 dst_ptr += dst_binding_layout->size / 4;
1147
1148 if (src_binding_layout->type != VK_DESCRIPTOR_TYPE_SAMPLER) {
1149 /* Sampler descriptors don't have a buffer list. */
1150 dst_buffer_list[j] = src_buffer_list[j];
1151 }
1152 }
1153 }
1154 }
1155
1156 void radv_UpdateDescriptorSets(
1157 VkDevice _device,
1158 uint32_t descriptorWriteCount,
1159 const VkWriteDescriptorSet* pDescriptorWrites,
1160 uint32_t descriptorCopyCount,
1161 const VkCopyDescriptorSet* pDescriptorCopies)
1162 {
1163 RADV_FROM_HANDLE(radv_device, device, _device);
1164
1165 radv_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
1166 descriptorCopyCount, pDescriptorCopies);
1167 }
1168
1169 VkResult radv_CreateDescriptorUpdateTemplate(VkDevice _device,
1170 const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
1171 const VkAllocationCallbacks *pAllocator,
1172 VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
1173 {
1174 RADV_FROM_HANDLE(radv_device, device, _device);
1175 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
1176 const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
1177 const size_t size = sizeof(struct radv_descriptor_update_template) +
1178 sizeof(struct radv_descriptor_update_template_entry) * entry_count;
1179 struct radv_descriptor_update_template *templ;
1180 uint32_t i;
1181
1182 templ = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1183 if (!templ)
1184 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1185
1186 templ->entry_count = entry_count;
1187
1188 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
1189 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
1190
1191 /* descriptorSetLayout should be ignored for push descriptors
1192 * and instead it refers to pipelineLayout and set.
1193 */
1194 assert(pCreateInfo->set < MAX_SETS);
1195 set_layout = pipeline_layout->set[pCreateInfo->set].layout;
1196
1197 templ->bind_point = pCreateInfo->pipelineBindPoint;
1198 }
1199
1200 for (i = 0; i < entry_count; i++) {
1201 const VkDescriptorUpdateTemplateEntry *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
1202 const struct radv_descriptor_set_binding_layout *binding_layout =
1203 set_layout->binding + entry->dstBinding;
1204 const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
1205 const uint32_t *immutable_samplers = NULL;
1206 uint32_t dst_offset;
1207 uint32_t dst_stride;
1208
1209 /* dst_offset is an offset into dynamic_descriptors when the descriptor
1210 is dynamic, and an offset into mapped_ptr otherwise */
1211 switch (entry->descriptorType) {
1212 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1213 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1214 assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
1215 dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
1216 dst_stride = 0; /* Not used */
1217 break;
1218 default:
1219 switch (entry->descriptorType) {
1220 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1221 case VK_DESCRIPTOR_TYPE_SAMPLER:
1222 /* Immutable samplers are copied into push descriptors when they are pushed */
1223 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
1224 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal) {
1225 immutable_samplers = radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
1226 }
1227 break;
1228 default:
1229 break;
1230 }
1231 dst_offset = binding_layout->offset / 4;
1232 if (entry->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
1233 dst_offset += entry->dstArrayElement / 4;
1234 else
1235 dst_offset += binding_layout->size * entry->dstArrayElement / 4;
1236
1237 dst_stride = binding_layout->size / 4;
1238 break;
1239 }
1240
1241 templ->entry[i] = (struct radv_descriptor_update_template_entry) {
1242 .descriptor_type = entry->descriptorType,
1243 .descriptor_count = entry->descriptorCount,
1244 .src_offset = entry->offset,
1245 .src_stride = entry->stride,
1246 .dst_offset = dst_offset,
1247 .dst_stride = dst_stride,
1248 .buffer_offset = buffer_offset,
1249 .has_sampler = !binding_layout->immutable_samplers_offset,
1250 .sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout),
1251 .immutable_samplers = immutable_samplers
1252 };
1253 }
1254
1255 *pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
1256 return VK_SUCCESS;
1257 }
1258
1259 void radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
1260 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1261 const VkAllocationCallbacks *pAllocator)
1262 {
1263 RADV_FROM_HANDLE(radv_device, device, _device);
1264 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1265
1266 if (!templ)
1267 return;
1268
1269 vk_free2(&device->vk.alloc, pAllocator, templ);
1270 }
1271
1272 void radv_update_descriptor_set_with_template(struct radv_device *device,
1273 struct radv_cmd_buffer *cmd_buffer,
1274 struct radv_descriptor_set *set,
1275 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1276 const void *pData)
1277 {
1278 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1279 uint32_t i;
1280
1281 for (i = 0; i < templ->entry_count; ++i) {
1282 struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
1283 uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
1284 const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
1285 uint32_t j;
1286
1287 if (templ->entry[i].descriptor_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
1288 memcpy((uint8_t*)pDst, pSrc, templ->entry[i].descriptor_count);
1289 continue;
1290 }
1291
1292 for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
1293 switch (templ->entry[i].descriptor_type) {
1294 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1295 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1296 const unsigned idx = templ->entry[i].dst_offset + j;
1297 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1298 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1299 buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
1300 break;
1301 }
1302 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1303 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1304 write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1305 (struct VkDescriptorBufferInfo *) pSrc);
1306 break;
1307 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1308 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1309 write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1310 *(VkBufferView *) pSrc);
1311 break;
1312 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1313 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1314 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1315 write_image_descriptor(device, cmd_buffer, 64, pDst, buffer_list,
1316 templ->entry[i].descriptor_type,
1317 (struct VkDescriptorImageInfo *) pSrc);
1318 break;
1319 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1320 write_combined_image_sampler_descriptor(device, cmd_buffer, templ->entry[i].sampler_offset,
1321 pDst, buffer_list, templ->entry[i].descriptor_type,
1322 (struct VkDescriptorImageInfo *) pSrc,
1323 templ->entry[i].has_sampler);
1324 if (templ->entry[i].immutable_samplers) {
1325 memcpy((char*)pDst + templ->entry[i].sampler_offset, templ->entry[i].immutable_samplers + 4 * j, 16);
1326 }
1327 break;
1328 case VK_DESCRIPTOR_TYPE_SAMPLER:
1329 if (templ->entry[i].has_sampler)
1330 write_sampler_descriptor(device, pDst,
1331 (struct VkDescriptorImageInfo *) pSrc);
1332 else if (templ->entry[i].immutable_samplers)
1333 memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
1334 break;
1335 default:
1336 break;
1337 }
1338 pSrc += templ->entry[i].src_stride;
1339 pDst += templ->entry[i].dst_stride;
1340 ++buffer_list;
1341 }
1342 }
1343 }
1344
1345 void radv_UpdateDescriptorSetWithTemplate(VkDevice _device,
1346 VkDescriptorSet descriptorSet,
1347 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1348 const void *pData)
1349 {
1350 RADV_FROM_HANDLE(radv_device, device, _device);
1351 RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1352
1353 radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
1354 }
1355
1356
1357 VkResult radv_CreateSamplerYcbcrConversion(VkDevice _device,
1358 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
1359 const VkAllocationCallbacks* pAllocator,
1360 VkSamplerYcbcrConversion* pYcbcrConversion)
1361 {
1362 RADV_FROM_HANDLE(radv_device, device, _device);
1363 struct radv_sampler_ycbcr_conversion *conversion = NULL;
1364
1365 conversion = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*conversion), 8,
1366 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1367
1368 if (conversion == NULL)
1369 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1370
1371 conversion->format = pCreateInfo->format;
1372 conversion->ycbcr_model = pCreateInfo->ycbcrModel;
1373 conversion->ycbcr_range = pCreateInfo->ycbcrRange;
1374 conversion->components = pCreateInfo->components;
1375 conversion->chroma_offsets[0] = pCreateInfo->xChromaOffset;
1376 conversion->chroma_offsets[1] = pCreateInfo->yChromaOffset;
1377 conversion->chroma_filter = pCreateInfo->chromaFilter;
1378
1379 *pYcbcrConversion = radv_sampler_ycbcr_conversion_to_handle(conversion);
1380 return VK_SUCCESS;
1381 }
1382
1383
1384 void radv_DestroySamplerYcbcrConversion(VkDevice _device,
1385 VkSamplerYcbcrConversion ycbcrConversion,
1386 const VkAllocationCallbacks* pAllocator)
1387 {
1388 RADV_FROM_HANDLE(radv_device, device, _device);
1389 RADV_FROM_HANDLE(radv_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
1390
1391 if (ycbcr_conversion)
1392 vk_free2(&device->vk.alloc, pAllocator, ycbcr_conversion);
1393 }