radv: drop assert on bindingDescriptorCount > 0
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "radv_private.h"
32 #include "sid.h"
33
34
35 static bool has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
36 {
37 if (!samplers)
38 return false;
39 for(uint32_t i = 1; i < count; ++i) {
40 if (memcmp(radv_sampler_from_handle(samplers[0])->state,
41 radv_sampler_from_handle(samplers[i])->state, 16)) {
42 return false;
43 }
44 }
45 return true;
46 }
47
48 VkResult radv_CreateDescriptorSetLayout(
49 VkDevice _device,
50 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
51 const VkAllocationCallbacks* pAllocator,
52 VkDescriptorSetLayout* pSetLayout)
53 {
54 RADV_FROM_HANDLE(radv_device, device, _device);
55 struct radv_descriptor_set_layout *set_layout;
56
57 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
58
59 uint32_t max_binding = 0;
60 uint32_t immutable_sampler_count = 0;
61 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
62 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
63 if (pCreateInfo->pBindings[j].pImmutableSamplers)
64 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
65 }
66
67 uint32_t samplers_offset = sizeof(struct radv_descriptor_set_layout) +
68 (max_binding + 1) * sizeof(set_layout->binding[0]);
69 size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
70
71 set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
72 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
73 if (!set_layout)
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
75
76 set_layout->flags = pCreateInfo->flags;
77
78 /* We just allocate all the samplers at the end of the struct */
79 uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
80
81 set_layout->binding_count = max_binding + 1;
82 set_layout->shader_stages = 0;
83 set_layout->dynamic_shader_stages = 0;
84 set_layout->has_immutable_samplers = false;
85 set_layout->size = 0;
86
87 memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
88
89 uint32_t buffer_count = 0;
90 uint32_t dynamic_offset_count = 0;
91
92 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
93 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
94 uint32_t b = binding->binding;
95 uint32_t alignment;
96 unsigned binding_buffer_count = 0;
97
98 switch (binding->descriptorType) {
99 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
100 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
101 assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
102 set_layout->binding[b].dynamic_offset_count = 1;
103 set_layout->dynamic_shader_stages |= binding->stageFlags;
104 set_layout->binding[b].size = 0;
105 binding_buffer_count = 1;
106 alignment = 1;
107 break;
108 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
109 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
110 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
111 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
112 set_layout->binding[b].size = 16;
113 binding_buffer_count = 1;
114 alignment = 16;
115 break;
116 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
117 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
118 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
119 /* main descriptor + fmask descriptor */
120 set_layout->binding[b].size = 64;
121 binding_buffer_count = 1;
122 alignment = 32;
123 break;
124 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
125 /* main descriptor + fmask descriptor + sampler */
126 set_layout->binding[b].size = 96;
127 binding_buffer_count = 1;
128 alignment = 32;
129 break;
130 case VK_DESCRIPTOR_TYPE_SAMPLER:
131 set_layout->binding[b].size = 16;
132 alignment = 16;
133 break;
134 default:
135 unreachable("unknown descriptor type\n");
136 break;
137 }
138
139 set_layout->size = align(set_layout->size, alignment);
140 set_layout->binding[b].type = binding->descriptorType;
141 set_layout->binding[b].array_size = binding->descriptorCount;
142 set_layout->binding[b].offset = set_layout->size;
143 set_layout->binding[b].buffer_offset = buffer_count;
144 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
145
146 if (binding->pImmutableSamplers) {
147 set_layout->binding[b].immutable_samplers_offset = samplers_offset;
148 set_layout->binding[b].immutable_samplers_equal =
149 has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
150 set_layout->has_immutable_samplers = true;
151
152
153 for (uint32_t i = 0; i < binding->descriptorCount; i++)
154 memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
155
156 /* Don't reserve space for the samplers if they're not accessed. */
157 if (set_layout->binding[b].immutable_samplers_equal) {
158 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
159 set_layout->binding[b].size -= 32;
160 else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
161 set_layout->binding[b].size -= 16;
162 }
163 samplers += 4 * binding->descriptorCount;
164 samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
165 }
166
167 set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
168 buffer_count += binding->descriptorCount * binding_buffer_count;
169 dynamic_offset_count += binding->descriptorCount *
170 set_layout->binding[b].dynamic_offset_count;
171 set_layout->shader_stages |= binding->stageFlags;
172 }
173
174 set_layout->buffer_count = buffer_count;
175 set_layout->dynamic_offset_count = dynamic_offset_count;
176
177 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
178
179 return VK_SUCCESS;
180 }
181
182 void radv_DestroyDescriptorSetLayout(
183 VkDevice _device,
184 VkDescriptorSetLayout _set_layout,
185 const VkAllocationCallbacks* pAllocator)
186 {
187 RADV_FROM_HANDLE(radv_device, device, _device);
188 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, _set_layout);
189
190 if (!set_layout)
191 return;
192
193 vk_free2(&device->alloc, pAllocator, set_layout);
194 }
195
196 void radv_GetDescriptorSetLayoutSupport(VkDevice device,
197 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
198 VkDescriptorSetLayoutSupport* pSupport)
199 {
200 bool supported = true;
201 uint64_t size = 0;
202 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
203 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[i];
204
205 if (binding->descriptorCount == 0)
206 continue;
207
208 uint64_t descriptor_size = 0;
209 uint64_t descriptor_alignment = 1;
210 switch (binding->descriptorType) {
211 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
212 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
213 break;
214 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
215 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
216 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
217 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
218 descriptor_size = 16;
219 descriptor_alignment = 16;
220 break;
221 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
222 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
223 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
224 descriptor_size = 64;
225 descriptor_alignment = 32;
226 break;
227 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
228 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount)) {
229 descriptor_size = 64;
230 } else {
231 descriptor_size = 96;
232 }
233 descriptor_alignment = 32;
234 break;
235 case VK_DESCRIPTOR_TYPE_SAMPLER:
236 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount)) {
237 descriptor_size = 16;
238 descriptor_alignment = 16;
239 }
240 break;
241 default:
242 unreachable("unknown descriptor type\n");
243 break;
244 }
245
246 if (size && !align_u64(size, descriptor_alignment)) {
247 supported = false;
248 }
249 size = align_u64(size, descriptor_alignment);
250 if (descriptor_size && (UINT64_MAX - size) / descriptor_size < binding->descriptorCount) {
251 supported = false;
252 }
253 size += binding->descriptorCount * descriptor_size;
254 }
255
256 pSupport->supported = supported;
257 }
258
259 /*
260 * Pipeline layouts. These have nothing to do with the pipeline. They are
261 * just muttiple descriptor set layouts pasted together
262 */
263
264 VkResult radv_CreatePipelineLayout(
265 VkDevice _device,
266 const VkPipelineLayoutCreateInfo* pCreateInfo,
267 const VkAllocationCallbacks* pAllocator,
268 VkPipelineLayout* pPipelineLayout)
269 {
270 RADV_FROM_HANDLE(radv_device, device, _device);
271 struct radv_pipeline_layout *layout;
272 struct mesa_sha1 ctx;
273
274 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
275
276 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
277 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
278 if (layout == NULL)
279 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
280
281 layout->num_sets = pCreateInfo->setLayoutCount;
282
283 unsigned dynamic_offset_count = 0;
284
285
286 _mesa_sha1_init(&ctx);
287 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
288 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout,
289 pCreateInfo->pSetLayouts[set]);
290 layout->set[set].layout = set_layout;
291
292 layout->set[set].dynamic_offset_start = dynamic_offset_count;
293 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
294 dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
295 if (set_layout->binding[b].immutable_samplers_offset)
296 _mesa_sha1_update(&ctx, radv_immutable_samplers(set_layout, set_layout->binding + b),
297 set_layout->binding[b].array_size * 4 * sizeof(uint32_t));
298 }
299 _mesa_sha1_update(&ctx, set_layout->binding,
300 sizeof(set_layout->binding[0]) * set_layout->binding_count);
301 }
302
303 layout->dynamic_offset_count = dynamic_offset_count;
304 layout->push_constant_size = 0;
305
306 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
307 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
308 layout->push_constant_size = MAX2(layout->push_constant_size,
309 range->offset + range->size);
310 }
311
312 layout->push_constant_size = align(layout->push_constant_size, 16);
313 _mesa_sha1_update(&ctx, &layout->push_constant_size,
314 sizeof(layout->push_constant_size));
315 _mesa_sha1_final(&ctx, layout->sha1);
316 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
317
318 return VK_SUCCESS;
319 }
320
321 void radv_DestroyPipelineLayout(
322 VkDevice _device,
323 VkPipelineLayout _pipelineLayout,
324 const VkAllocationCallbacks* pAllocator)
325 {
326 RADV_FROM_HANDLE(radv_device, device, _device);
327 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
328
329 if (!pipeline_layout)
330 return;
331 vk_free2(&device->alloc, pAllocator, pipeline_layout);
332 }
333
334 #define EMPTY 1
335
336 static VkResult
337 radv_descriptor_set_create(struct radv_device *device,
338 struct radv_descriptor_pool *pool,
339 const struct radv_descriptor_set_layout *layout,
340 struct radv_descriptor_set **out_set)
341 {
342 struct radv_descriptor_set *set;
343 unsigned range_offset = sizeof(struct radv_descriptor_set) +
344 sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
345 unsigned mem_size = range_offset +
346 sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
347
348 if (pool->host_memory_base) {
349 if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
350 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
351
352 set = (struct radv_descriptor_set*)pool->host_memory_ptr;
353 pool->host_memory_ptr += mem_size;
354 } else {
355 set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
356 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
357
358 if (!set)
359 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
360 }
361
362 memset(set, 0, mem_size);
363
364 if (layout->dynamic_offset_count) {
365 set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
366 }
367
368 set->layout = layout;
369 if (layout->size) {
370 uint32_t layout_size = align_u32(layout->size, 32);
371 set->size = layout->size;
372
373 if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
374 vk_free2(&device->alloc, NULL, set);
375 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
376 }
377
378 /* try to allocate linearly first, so that we don't spend
379 * time looking for gaps if the app only allocates &
380 * resets via the pool. */
381 if (pool->current_offset + layout_size <= pool->size) {
382 set->bo = pool->bo;
383 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
384 set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
385 if (!pool->host_memory_base) {
386 pool->entries[pool->entry_count].offset = pool->current_offset;
387 pool->entries[pool->entry_count].size = layout_size;
388 pool->entries[pool->entry_count].set = set;
389 pool->entry_count++;
390 }
391 pool->current_offset += layout_size;
392 } else if (!pool->host_memory_base) {
393 uint64_t offset = 0;
394 int index;
395
396 for (index = 0; index < pool->entry_count; ++index) {
397 if (pool->entries[index].offset - offset >= layout_size)
398 break;
399 offset = pool->entries[index].offset + pool->entries[index].size;
400 }
401
402 if (pool->size - offset < layout_size) {
403 vk_free2(&device->alloc, NULL, set);
404 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
405 }
406 set->bo = pool->bo;
407 set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
408 set->va = radv_buffer_get_va(set->bo) + offset;
409 memmove(&pool->entries[index + 1], &pool->entries[index],
410 sizeof(pool->entries[0]) * (pool->entry_count - index));
411 pool->entries[index].offset = offset;
412 pool->entries[index].size = layout_size;
413 pool->entries[index].set = set;
414 pool->entry_count++;
415 } else
416 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
417 }
418
419 if (layout->has_immutable_samplers) {
420 for (unsigned i = 0; i < layout->binding_count; ++i) {
421 if (!layout->binding[i].immutable_samplers_offset ||
422 layout->binding[i].immutable_samplers_equal)
423 continue;
424
425 unsigned offset = layout->binding[i].offset / 4;
426 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
427 offset += 16;
428
429 const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
430 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
431 memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
432 offset += layout->binding[i].size / 4;
433 }
434
435 }
436 }
437 *out_set = set;
438 return VK_SUCCESS;
439 }
440
441 static void
442 radv_descriptor_set_destroy(struct radv_device *device,
443 struct radv_descriptor_pool *pool,
444 struct radv_descriptor_set *set,
445 bool free_bo)
446 {
447 assert(!pool->host_memory_base);
448
449 if (free_bo && set->size && !pool->host_memory_base) {
450 uint32_t offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
451 for (int i = 0; i < pool->entry_count; ++i) {
452 if (pool->entries[i].offset == offset) {
453 memmove(&pool->entries[i], &pool->entries[i+1],
454 sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
455 --pool->entry_count;
456 break;
457 }
458 }
459 }
460 vk_free2(&device->alloc, NULL, set);
461 }
462
463 VkResult radv_CreateDescriptorPool(
464 VkDevice _device,
465 const VkDescriptorPoolCreateInfo* pCreateInfo,
466 const VkAllocationCallbacks* pAllocator,
467 VkDescriptorPool* pDescriptorPool)
468 {
469 RADV_FROM_HANDLE(radv_device, device, _device);
470 struct radv_descriptor_pool *pool;
471 int size = sizeof(struct radv_descriptor_pool);
472 uint64_t bo_size = 0, bo_count = 0, range_count = 0;
473
474
475 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
476 if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
477 bo_count += pCreateInfo->pPoolSizes[i].descriptorCount;
478
479 switch(pCreateInfo->pPoolSizes[i].type) {
480 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
481 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
482 range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
483 break;
484 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
485 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
486 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
487 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
488 case VK_DESCRIPTOR_TYPE_SAMPLER:
489 /* 32 as we may need to align for images */
490 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
491 break;
492 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
493 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
494 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
495 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
496 break;
497 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
498 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
499 break;
500 default:
501 unreachable("unknown descriptor type\n");
502 break;
503 }
504 }
505
506 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
507 uint64_t host_size = pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
508 host_size += sizeof(struct radeon_winsys_bo*) * bo_count;
509 host_size += sizeof(struct radv_descriptor_range) * range_count;
510 size += host_size;
511 } else {
512 size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
513 }
514
515 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
516 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
517 if (!pool)
518 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
519
520 memset(pool, 0, sizeof(*pool));
521
522 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
523 pool->host_memory_base = (uint8_t*)pool + sizeof(struct radv_descriptor_pool);
524 pool->host_memory_ptr = pool->host_memory_base;
525 pool->host_memory_end = (uint8_t*)pool + size;
526 }
527
528 if (bo_size) {
529 pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
530 RADEON_DOMAIN_VRAM,
531 RADEON_FLAG_NO_INTERPROCESS_SHARING |
532 RADEON_FLAG_READ_ONLY);
533 pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
534 }
535 pool->size = bo_size;
536 pool->max_entry_count = pCreateInfo->maxSets;
537
538 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
539 return VK_SUCCESS;
540 }
541
542 void radv_DestroyDescriptorPool(
543 VkDevice _device,
544 VkDescriptorPool _pool,
545 const VkAllocationCallbacks* pAllocator)
546 {
547 RADV_FROM_HANDLE(radv_device, device, _device);
548 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
549
550 if (!pool)
551 return;
552
553 if (!pool->host_memory_base) {
554 for(int i = 0; i < pool->entry_count; ++i) {
555 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
556 }
557 }
558
559 if (pool->bo)
560 device->ws->buffer_destroy(pool->bo);
561 vk_free2(&device->alloc, pAllocator, pool);
562 }
563
564 VkResult radv_ResetDescriptorPool(
565 VkDevice _device,
566 VkDescriptorPool descriptorPool,
567 VkDescriptorPoolResetFlags flags)
568 {
569 RADV_FROM_HANDLE(radv_device, device, _device);
570 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
571
572 if (!pool->host_memory_base) {
573 for(int i = 0; i < pool->entry_count; ++i) {
574 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
575 }
576 pool->entry_count = 0;
577 }
578
579 pool->current_offset = 0;
580 pool->host_memory_ptr = pool->host_memory_base;
581
582 return VK_SUCCESS;
583 }
584
585 VkResult radv_AllocateDescriptorSets(
586 VkDevice _device,
587 const VkDescriptorSetAllocateInfo* pAllocateInfo,
588 VkDescriptorSet* pDescriptorSets)
589 {
590 RADV_FROM_HANDLE(radv_device, device, _device);
591 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
592
593 VkResult result = VK_SUCCESS;
594 uint32_t i;
595 struct radv_descriptor_set *set = NULL;
596
597 /* allocate a set of buffers for each shader to contain descriptors */
598 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
599 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
600 pAllocateInfo->pSetLayouts[i]);
601
602 assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
603
604 result = radv_descriptor_set_create(device, pool, layout, &set);
605 if (result != VK_SUCCESS)
606 break;
607
608 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
609 }
610
611 if (result != VK_SUCCESS)
612 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
613 i, pDescriptorSets);
614 return result;
615 }
616
617 VkResult radv_FreeDescriptorSets(
618 VkDevice _device,
619 VkDescriptorPool descriptorPool,
620 uint32_t count,
621 const VkDescriptorSet* pDescriptorSets)
622 {
623 RADV_FROM_HANDLE(radv_device, device, _device);
624 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
625
626 for (uint32_t i = 0; i < count; i++) {
627 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
628
629 if (set && !pool->host_memory_base)
630 radv_descriptor_set_destroy(device, pool, set, true);
631 }
632 return VK_SUCCESS;
633 }
634
635 static void write_texel_buffer_descriptor(struct radv_device *device,
636 struct radv_cmd_buffer *cmd_buffer,
637 unsigned *dst,
638 struct radeon_winsys_bo **buffer_list,
639 const VkBufferView _buffer_view)
640 {
641 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
642
643 memcpy(dst, buffer_view->state, 4 * 4);
644
645 if (cmd_buffer)
646 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo, 7);
647 else
648 *buffer_list = buffer_view->bo;
649 }
650
651 static void write_buffer_descriptor(struct radv_device *device,
652 struct radv_cmd_buffer *cmd_buffer,
653 unsigned *dst,
654 struct radeon_winsys_bo **buffer_list,
655 const VkDescriptorBufferInfo *buffer_info)
656 {
657 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
658 uint64_t va = radv_buffer_get_va(buffer->bo);
659 uint32_t range = buffer_info->range;
660
661 if (buffer_info->range == VK_WHOLE_SIZE)
662 range = buffer->size - buffer_info->offset;
663
664 va += buffer_info->offset + buffer->offset;
665 dst[0] = va;
666 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
667 dst[2] = range;
668 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
669 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
670 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
671 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
672 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
673 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
674
675 if (cmd_buffer)
676 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo, 7);
677 else
678 *buffer_list = buffer->bo;
679 }
680
681 static void write_dynamic_buffer_descriptor(struct radv_device *device,
682 struct radv_descriptor_range *range,
683 struct radeon_winsys_bo **buffer_list,
684 const VkDescriptorBufferInfo *buffer_info)
685 {
686 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
687 uint64_t va = radv_buffer_get_va(buffer->bo);
688 unsigned size = buffer_info->range;
689
690 if (buffer_info->range == VK_WHOLE_SIZE)
691 size = buffer->size - buffer_info->offset;
692
693 va += buffer_info->offset + buffer->offset;
694 range->va = va;
695 range->size = size;
696
697 *buffer_list = buffer->bo;
698 }
699
700 static void
701 write_image_descriptor(struct radv_device *device,
702 struct radv_cmd_buffer *cmd_buffer,
703 unsigned *dst,
704 struct radeon_winsys_bo **buffer_list,
705 VkDescriptorType descriptor_type,
706 const VkDescriptorImageInfo *image_info)
707 {
708 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
709 uint32_t *descriptor;
710
711 if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
712 descriptor = iview->storage_descriptor;
713 } else {
714 descriptor = iview->descriptor;
715 }
716
717 memcpy(dst, descriptor, 16 * 4);
718
719 if (cmd_buffer)
720 radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo, 7);
721 else
722 *buffer_list = iview->bo;
723 }
724
725 static void
726 write_combined_image_sampler_descriptor(struct radv_device *device,
727 struct radv_cmd_buffer *cmd_buffer,
728 unsigned *dst,
729 struct radeon_winsys_bo **buffer_list,
730 VkDescriptorType descriptor_type,
731 const VkDescriptorImageInfo *image_info,
732 bool has_sampler)
733 {
734 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
735
736 write_image_descriptor(device, cmd_buffer, dst, buffer_list, descriptor_type, image_info);
737 /* copy over sampler state */
738 if (has_sampler)
739 memcpy(dst + 16, sampler->state, 16);
740 }
741
742 static void
743 write_sampler_descriptor(struct radv_device *device,
744 unsigned *dst,
745 const VkDescriptorImageInfo *image_info)
746 {
747 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
748
749 memcpy(dst, sampler->state, 16);
750 }
751
752 void radv_update_descriptor_sets(
753 struct radv_device* device,
754 struct radv_cmd_buffer* cmd_buffer,
755 VkDescriptorSet dstSetOverride,
756 uint32_t descriptorWriteCount,
757 const VkWriteDescriptorSet* pDescriptorWrites,
758 uint32_t descriptorCopyCount,
759 const VkCopyDescriptorSet* pDescriptorCopies)
760 {
761 uint32_t i, j;
762 for (i = 0; i < descriptorWriteCount; i++) {
763 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
764 RADV_FROM_HANDLE(radv_descriptor_set, set,
765 dstSetOverride ? dstSetOverride : writeset->dstSet);
766 const struct radv_descriptor_set_binding_layout *binding_layout =
767 set->layout->binding + writeset->dstBinding;
768 uint32_t *ptr = set->mapped_ptr;
769 struct radeon_winsys_bo **buffer_list = set->descriptors;
770 /* Immutable samplers are not copied into push descriptors when they are
771 * allocated, so if we are writing push descriptors we have to copy the
772 * immutable samplers into them now.
773 */
774 const bool copy_immutable_samplers = cmd_buffer &&
775 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
776 const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
777
778 ptr += binding_layout->offset / 4;
779 ptr += binding_layout->size * writeset->dstArrayElement / 4;
780 buffer_list += binding_layout->buffer_offset;
781 buffer_list += writeset->dstArrayElement;
782 for (j = 0; j < writeset->descriptorCount; ++j) {
783 switch(writeset->descriptorType) {
784 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
785 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
786 unsigned idx = writeset->dstArrayElement + j;
787 idx += binding_layout->dynamic_offset_offset;
788 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
789 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
790 buffer_list, writeset->pBufferInfo + j);
791 break;
792 }
793 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
794 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
795 write_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
796 writeset->pBufferInfo + j);
797 break;
798 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
799 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
800 write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
801 writeset->pTexelBufferView[j]);
802 break;
803 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
804 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
805 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
806 write_image_descriptor(device, cmd_buffer, ptr, buffer_list,
807 writeset->descriptorType,
808 writeset->pImageInfo + j);
809 break;
810 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
811 write_combined_image_sampler_descriptor(device, cmd_buffer, ptr, buffer_list,
812 writeset->descriptorType,
813 writeset->pImageInfo + j,
814 !binding_layout->immutable_samplers_offset);
815 if (copy_immutable_samplers) {
816 const unsigned idx = writeset->dstArrayElement + j;
817 memcpy(ptr + 16, samplers + 4 * idx, 16);
818 }
819 break;
820 case VK_DESCRIPTOR_TYPE_SAMPLER:
821 if (!binding_layout->immutable_samplers_offset) {
822 write_sampler_descriptor(device, ptr,
823 writeset->pImageInfo + j);
824 } else if (copy_immutable_samplers) {
825 unsigned idx = writeset->dstArrayElement + j;
826 memcpy(ptr, samplers + 4 * idx, 16);
827 }
828 break;
829 default:
830 unreachable("unimplemented descriptor type");
831 break;
832 }
833 ptr += binding_layout->size / 4;
834 ++buffer_list;
835 }
836
837 }
838
839 for (i = 0; i < descriptorCopyCount; i++) {
840 const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
841 RADV_FROM_HANDLE(radv_descriptor_set, src_set,
842 copyset->srcSet);
843 RADV_FROM_HANDLE(radv_descriptor_set, dst_set,
844 copyset->dstSet);
845 const struct radv_descriptor_set_binding_layout *src_binding_layout =
846 src_set->layout->binding + copyset->srcBinding;
847 const struct radv_descriptor_set_binding_layout *dst_binding_layout =
848 dst_set->layout->binding + copyset->dstBinding;
849 uint32_t *src_ptr = src_set->mapped_ptr;
850 uint32_t *dst_ptr = dst_set->mapped_ptr;
851 struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
852 struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
853
854 src_ptr += src_binding_layout->offset / 4;
855 dst_ptr += dst_binding_layout->offset / 4;
856
857 src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
858 dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
859
860 src_buffer_list += src_binding_layout->buffer_offset;
861 src_buffer_list += copyset->srcArrayElement;
862
863 dst_buffer_list += dst_binding_layout->buffer_offset;
864 dst_buffer_list += copyset->dstArrayElement;
865
866 for (j = 0; j < copyset->descriptorCount; ++j) {
867 switch (src_binding_layout->type) {
868 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
869 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
870 unsigned src_idx = copyset->srcArrayElement + j;
871 unsigned dst_idx = copyset->dstArrayElement + j;
872 struct radv_descriptor_range *src_range, *dst_range;
873 src_idx += src_binding_layout->dynamic_offset_offset;
874 dst_idx += dst_binding_layout->dynamic_offset_offset;
875
876 src_range = src_set->dynamic_descriptors + src_idx;
877 dst_range = dst_set->dynamic_descriptors + dst_idx;
878 *dst_range = *src_range;
879 break;
880 }
881 default:
882 memcpy(dst_ptr, src_ptr, src_binding_layout->size);
883 }
884 src_ptr += src_binding_layout->size / 4;
885 dst_ptr += dst_binding_layout->size / 4;
886 dst_buffer_list[j] = src_buffer_list[j];
887 ++src_buffer_list;
888 ++dst_buffer_list;
889 }
890 }
891 }
892
893 void radv_UpdateDescriptorSets(
894 VkDevice _device,
895 uint32_t descriptorWriteCount,
896 const VkWriteDescriptorSet* pDescriptorWrites,
897 uint32_t descriptorCopyCount,
898 const VkCopyDescriptorSet* pDescriptorCopies)
899 {
900 RADV_FROM_HANDLE(radv_device, device, _device);
901
902 radv_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
903 descriptorCopyCount, pDescriptorCopies);
904 }
905
906 VkResult radv_CreateDescriptorUpdateTemplate(VkDevice _device,
907 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
908 const VkAllocationCallbacks *pAllocator,
909 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate)
910 {
911 RADV_FROM_HANDLE(radv_device, device, _device);
912 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
913 const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
914 const size_t size = sizeof(struct radv_descriptor_update_template) +
915 sizeof(struct radv_descriptor_update_template_entry) * entry_count;
916 struct radv_descriptor_update_template *templ;
917 uint32_t i;
918
919 templ = vk_alloc2(&device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
920 if (!templ)
921 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
922
923 templ->entry_count = entry_count;
924 templ->bind_point = pCreateInfo->pipelineBindPoint;
925
926 for (i = 0; i < entry_count; i++) {
927 const VkDescriptorUpdateTemplateEntryKHR *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
928 const struct radv_descriptor_set_binding_layout *binding_layout =
929 set_layout->binding + entry->dstBinding;
930 const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
931 const uint32_t *immutable_samplers = NULL;
932 uint32_t dst_offset;
933 uint32_t dst_stride;
934
935 /* dst_offset is an offset into dynamic_descriptors when the descriptor
936 is dynamic, and an offset into mapped_ptr otherwise */
937 switch (entry->descriptorType) {
938 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
939 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
940 assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR);
941 dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
942 dst_stride = 0; /* Not used */
943 break;
944 default:
945 switch (entry->descriptorType) {
946 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
947 case VK_DESCRIPTOR_TYPE_SAMPLER:
948 /* Immutable samplers are copied into push descriptors when they are pushed */
949 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
950 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal) {
951 immutable_samplers = radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
952 }
953 break;
954 default:
955 break;
956 }
957 dst_offset = binding_layout->offset / 4 + binding_layout->size * entry->dstArrayElement / 4;
958 dst_stride = binding_layout->size / 4;
959 break;
960 }
961
962 templ->entry[i] = (struct radv_descriptor_update_template_entry) {
963 .descriptor_type = entry->descriptorType,
964 .descriptor_count = entry->descriptorCount,
965 .src_offset = entry->offset,
966 .src_stride = entry->stride,
967 .dst_offset = dst_offset,
968 .dst_stride = dst_stride,
969 .buffer_offset = buffer_offset,
970 .has_sampler = !binding_layout->immutable_samplers_offset,
971 .immutable_samplers = immutable_samplers
972 };
973 }
974
975 *pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
976 return VK_SUCCESS;
977 }
978
979 void radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
980 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
981 const VkAllocationCallbacks *pAllocator)
982 {
983 RADV_FROM_HANDLE(radv_device, device, _device);
984 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
985
986 if (!templ)
987 return;
988
989 vk_free2(&device->alloc, pAllocator, templ);
990 }
991
992 void radv_update_descriptor_set_with_template(struct radv_device *device,
993 struct radv_cmd_buffer *cmd_buffer,
994 struct radv_descriptor_set *set,
995 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
996 const void *pData)
997 {
998 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
999 uint32_t i;
1000
1001 for (i = 0; i < templ->entry_count; ++i) {
1002 struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
1003 uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
1004 const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
1005 uint32_t j;
1006
1007 for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
1008 switch (templ->entry[i].descriptor_type) {
1009 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1010 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1011 const unsigned idx = templ->entry[i].dst_offset + j;
1012 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1013 write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
1014 buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
1015 break;
1016 }
1017 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1018 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1019 write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1020 (struct VkDescriptorBufferInfo *) pSrc);
1021 break;
1022 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1023 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1024 write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
1025 *(VkBufferView *) pSrc);
1026 break;
1027 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1028 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1029 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1030 write_image_descriptor(device, cmd_buffer, pDst, buffer_list,
1031 templ->entry[i].descriptor_type,
1032 (struct VkDescriptorImageInfo *) pSrc);
1033 break;
1034 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1035 write_combined_image_sampler_descriptor(device, cmd_buffer, pDst, buffer_list,
1036 templ->entry[i].descriptor_type,
1037 (struct VkDescriptorImageInfo *) pSrc,
1038 templ->entry[i].has_sampler);
1039 if (templ->entry[i].immutable_samplers)
1040 memcpy(pDst + 16, templ->entry[i].immutable_samplers + 4 * j, 16);
1041 break;
1042 case VK_DESCRIPTOR_TYPE_SAMPLER:
1043 if (templ->entry[i].has_sampler)
1044 write_sampler_descriptor(device, pDst,
1045 (struct VkDescriptorImageInfo *) pSrc);
1046 else if (templ->entry[i].immutable_samplers)
1047 memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
1048 break;
1049 default:
1050 unreachable("unimplemented descriptor type");
1051 break;
1052 }
1053 pSrc += templ->entry[i].src_stride;
1054 pDst += templ->entry[i].dst_stride;
1055 ++buffer_list;
1056 }
1057 }
1058 }
1059
1060 void radv_UpdateDescriptorSetWithTemplate(VkDevice _device,
1061 VkDescriptorSet descriptorSet,
1062 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
1063 const void *pData)
1064 {
1065 RADV_FROM_HANDLE(radv_device, device, _device);
1066 RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1067
1068 radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
1069 }
1070
1071
1072 VkResult radv_CreateSamplerYcbcrConversion(VkDevice device,
1073 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
1074 const VkAllocationCallbacks* pAllocator,
1075 VkSamplerYcbcrConversion* pYcbcrConversion)
1076 {
1077 *pYcbcrConversion = VK_NULL_HANDLE;
1078 return VK_SUCCESS;
1079 }
1080
1081
1082 void radv_DestroySamplerYcbcrConversion(VkDevice device,
1083 VkSamplerYcbcrConversion ycbcrConversion,
1084 const VkAllocationCallbacks* pAllocator)
1085 {
1086 /* Do nothing. */
1087 }