immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
}
- size_t size = sizeof(struct radv_descriptor_set_layout) +
- (max_binding + 1) * sizeof(set_layout->binding[0]) +
- immutable_sampler_count * 4 * sizeof(uint32_t);
+ uint32_t samplers_offset = sizeof(struct radv_descriptor_set_layout) +
+ (max_binding + 1) * sizeof(set_layout->binding[0]);
+ size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set_layout)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ set_layout->flags = pCreateInfo->flags;
+
/* We just allocate all the samplers at the end of the struct */
uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
uint32_t b = binding->binding;
uint32_t alignment;
+ unsigned binding_buffer_count = 0;
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
set_layout->binding[b].dynamic_offset_count = 1;
set_layout->dynamic_shader_stages |= binding->stageFlags;
set_layout->binding[b].size = 0;
- set_layout->binding[b].buffer_count = 1;
+ binding_buffer_count = 1;
alignment = 1;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
set_layout->binding[b].size = 16;
- set_layout->binding[b].buffer_count = 1;
+ binding_buffer_count = 1;
alignment = 16;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
/* main descriptor + fmask descriptor */
set_layout->binding[b].size = 64;
- set_layout->binding[b].buffer_count = 1;
+ binding_buffer_count = 1;
alignment = 32;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
/* main descriptor + fmask descriptor + sampler */
set_layout->binding[b].size = 96;
- set_layout->binding[b].buffer_count = 1;
+ binding_buffer_count = 1;
alignment = 32;
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
if (binding->pImmutableSamplers) {
- set_layout->binding[b].immutable_samplers = samplers;
+ set_layout->binding[b].immutable_samplers_offset = samplers_offset;
set_layout->binding[b].immutable_samplers_equal = true;
- samplers += 4 * binding->descriptorCount;
+
for (uint32_t i = 0; i < binding->descriptorCount; i++)
- memcpy(set_layout->binding[b].immutable_samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
+ memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
for (uint32_t i = 1; i < binding->descriptorCount; i++)
- if (memcmp(set_layout->binding[b].immutable_samplers + 4 * i,
- set_layout->binding[b].immutable_samplers, 16) != 0)
+ if (memcmp(samplers + 4 * i, samplers, 16) != 0)
set_layout->binding[b].immutable_samplers_equal = false;
/* Don't reserve space for the samplers if they're not accessed. */
else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
set_layout->binding[b].size -= 16;
}
+ samplers += 4 * binding->descriptorCount;
+ samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
}
set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
- buffer_count += binding->descriptorCount * set_layout->binding[b].buffer_count;
+ buffer_count += binding->descriptorCount * binding_buffer_count;
dynamic_offset_count += binding->descriptorCount *
set_layout->binding[b].dynamic_offset_count;
set_layout->shader_stages |= binding->stageFlags;
layout->set[set].dynamic_offset_start = dynamic_offset_count;
for (uint32_t b = 0; b < set_layout->binding_count; b++) {
dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
+ if (set_layout->binding[b].immutable_samplers_offset)
+ _mesa_sha1_update(&ctx, radv_immutable_samplers(set_layout, set_layout->binding + b),
+ set_layout->binding[b].array_size * 4 * sizeof(uint32_t));
}
_mesa_sha1_update(&ctx, set_layout->binding,
sizeof(set_layout->binding[0]) * set_layout->binding_count);
static VkResult
radv_descriptor_set_create(struct radv_device *device,
struct radv_descriptor_pool *pool,
- struct radv_cmd_buffer *cmd_buffer,
const struct radv_descriptor_set_layout *layout,
struct radv_descriptor_set **out_set)
{
struct radv_descriptor_set *set;
- unsigned mem_size = sizeof(struct radv_descriptor_set) +
+ unsigned range_offset = sizeof(struct radv_descriptor_set) +
sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
- set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ unsigned mem_size = range_offset +
+ sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
- if (!set)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (pool->host_memory_base) {
+ if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
+ return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
+
+ set = (struct radv_descriptor_set*)pool->host_memory_ptr;
+ pool->host_memory_ptr += mem_size;
+ } else {
+ set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ if (!set)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
memset(set, 0, mem_size);
if (layout->dynamic_offset_count) {
- unsigned size = sizeof(struct radv_descriptor_range) *
- layout->dynamic_offset_count;
- set->dynamic_descriptors = vk_alloc2(&device->alloc, NULL, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
- if (!set->dynamic_descriptors) {
- vk_free2(&device->alloc, NULL, set);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
}
set->layout = layout;
if (layout->size) {
uint32_t layout_size = align_u32(layout->size, 32);
set->size = layout->size;
- if (!cmd_buffer) {
- /* try to allocate linearly first, so that we don't spend
- * time looking for gaps if the app only allocates &
- * resets via the pool. */
- if (pool->current_offset + layout_size <= pool->size) {
- set->bo = pool->bo;
- set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
- set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
- pool->current_offset += layout_size;
- list_addtail(&set->vram_list, &pool->vram_list);
- } else {
- uint64_t offset = 0;
- struct list_head *prev = &pool->vram_list;
- struct radv_descriptor_set *cur;
- LIST_FOR_EACH_ENTRY(cur, &pool->vram_list, vram_list) {
- uint64_t start = (uint8_t*)cur->mapped_ptr - pool->mapped_ptr;
- if (start - offset >= layout_size)
- break;
-
- offset = start + cur->size;
- prev = &cur->vram_list;
- }
- if (pool->size - offset < layout_size) {
- vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
- vk_free2(&device->alloc, NULL, set);
- return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
- }
- set->bo = pool->bo;
- set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
- set->va = device->ws->buffer_get_va(set->bo) + offset;
- list_add(&set->vram_list, prev);
+ /* try to allocate linearly first, so that we don't spend
+ * time looking for gaps if the app only allocates &
+ * resets via the pool. */
+ if (pool->current_offset + layout_size <= pool->size) {
+ set->bo = pool->bo;
+ set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
+ set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
+ pool->current_offset += layout_size;
+ list_addtail(&set->vram_list, &pool->vram_list);
+ } else if (!pool->host_memory_base) {
+ uint64_t offset = 0;
+ struct list_head *prev = &pool->vram_list;
+ struct radv_descriptor_set *cur;
+
+ assert(!pool->host_memory_base);
+ LIST_FOR_EACH_ENTRY(cur, &pool->vram_list, vram_list) {
+ uint64_t start = (uint8_t*)cur->mapped_ptr - pool->mapped_ptr;
+ if (start - offset >= layout_size)
+ break;
+
+ offset = start + cur->size;
+ prev = &cur->vram_list;
}
- } else {
- unsigned bo_offset;
- if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
- &bo_offset,
- (void**)&set->mapped_ptr)) {
+
+ if (pool->size - offset < layout_size) {
vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
vk_free2(&device->alloc, NULL, set);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
}
-
- set->va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
- set->va += bo_offset;
- }
+ set->bo = pool->bo;
+ set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
+ set->va = device->ws->buffer_get_va(set->bo) + offset;
+ list_add(&set->vram_list, prev);
+ } else
+ return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
}
for (unsigned i = 0; i < layout->binding_count; ++i) {
- if (!layout->binding[i].immutable_samplers ||
+ if (!layout->binding[i].immutable_samplers_offset ||
layout->binding[i].immutable_samplers_equal)
continue;
if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
offset += 16;
+ const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
- memcpy(set->mapped_ptr + offset, layout->binding[i].immutable_samplers + 4 * j, 16);
+ memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
offset += layout->binding[i].size / 4;
}
struct radv_descriptor_set *set,
bool free_bo)
{
+ assert(!pool->host_memory_base);
+
if (free_bo && set->size)
list_del(&set->vram_list);
- if (set->dynamic_descriptors)
- vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
vk_free2(&device->alloc, NULL, set);
}
-VkResult
-radv_temp_descriptor_set_create(struct radv_device *device,
- struct radv_cmd_buffer *cmd_buffer,
- VkDescriptorSetLayout _layout,
- VkDescriptorSet *_set)
-{
- RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, _layout);
- struct radv_descriptor_set *set = NULL;
- VkResult ret;
-
- ret = radv_descriptor_set_create(device, NULL, cmd_buffer, layout, &set);
- *_set = radv_descriptor_set_to_handle(set);
- return ret;
-}
-
-void
-radv_temp_descriptor_set_destroy(struct radv_device *device,
- VkDescriptorSet _set)
-{
- RADV_FROM_HANDLE(radv_descriptor_set, set, _set);
-
- radv_descriptor_set_destroy(device, NULL, set, false);
-}
-
VkResult radv_CreateDescriptorPool(
VkDevice _device,
const VkDescriptorPoolCreateInfo* pCreateInfo,
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_descriptor_pool *pool;
int size = sizeof(struct radv_descriptor_pool);
- uint64_t bo_size = 0;
- pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!pool)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ uint64_t bo_size = 0, bo_count = 0, range_count = 0;
- memset(pool, 0, sizeof(*pool));
for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
+ if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
+ bo_count += pCreateInfo->pPoolSizes[i].descriptorCount;
+
switch(pCreateInfo->pPoolSizes[i].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
}
}
+ if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
+ uint64_t host_size = pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
+ host_size += sizeof(struct radeon_winsys_bo*) * bo_count;
+ host_size += sizeof(struct radv_descriptor_range) * range_count;
+ size += host_size;
+ }
+
+ pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!pool)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ memset(pool, 0, sizeof(*pool));
+
+ if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
+ pool->host_memory_base = (uint8_t*)pool + sizeof(struct radv_descriptor_pool);
+ pool->host_memory_ptr = pool->host_memory_base;
+ pool->host_memory_end = (uint8_t*)pool + size;
+ }
+
if (bo_size) {
pool->bo = device->ws->buffer_create(device->ws, bo_size,
32, RADEON_DOMAIN_VRAM, 0);
if (!pool)
return;
- list_for_each_entry_safe(struct radv_descriptor_set, set,
- &pool->vram_list, vram_list) {
- radv_descriptor_set_destroy(device, pool, set, false);
+ if (!pool->host_memory_base) {
+ list_for_each_entry_safe(struct radv_descriptor_set, set,
+ &pool->vram_list, vram_list) {
+ radv_descriptor_set_destroy(device, pool, set, false);
+ }
}
if (pool->bo)
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
- list_for_each_entry_safe(struct radv_descriptor_set, set,
- &pool->vram_list, vram_list) {
- radv_descriptor_set_destroy(device, pool, set, false);
+ if (!pool->host_memory_base) {
+ list_for_each_entry_safe(struct radv_descriptor_set, set,
+ &pool->vram_list, vram_list) {
+ radv_descriptor_set_destroy(device, pool, set, false);
+ }
}
list_inithead(&pool->vram_list);
pool->current_offset = 0;
+ pool->host_memory_ptr = pool->host_memory_base;
return VK_SUCCESS;
}
RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
- result = radv_descriptor_set_create(device, pool, NULL, layout, &set);
+ assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
+
+ result = radv_descriptor_set_create(device, pool, layout, &set);
if (result != VK_SUCCESS)
break;
for (uint32_t i = 0; i < count; i++) {
RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
- if (set)
+ if (set && !pool->host_memory_base)
radv_descriptor_set_destroy(device, pool, set, true);
}
return VK_SUCCESS;
set->layout->binding + writeset->dstBinding;
uint32_t *ptr = set->mapped_ptr;
struct radeon_winsys_bo **buffer_list = set->descriptors;
+ /* Immutable samplers are not copied into push descriptors when they are
+ * allocated, so if we are writing push descriptors we have to copy the
+ * immutable samplers into them now.
+ */
+ const bool copy_immutable_samplers = cmd_buffer &&
+ binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
+ const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
ptr += binding_layout->offset / 4;
ptr += binding_layout->size * writeset->dstArrayElement / 4;
buffer_list += binding_layout->buffer_offset;
- buffer_list += binding_layout->buffer_count * writeset->dstArrayElement;
+ buffer_list += writeset->dstArrayElement;
for (j = 0; j < writeset->descriptorCount; ++j) {
switch(writeset->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
unsigned idx = writeset->dstArrayElement + j;
idx += binding_layout->dynamic_offset_offset;
+ assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
buffer_list, writeset->pBufferInfo + j);
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
write_combined_image_sampler_descriptor(device, cmd_buffer, ptr, buffer_list,
writeset->pImageInfo + j,
- !binding_layout->immutable_samplers);
+ !binding_layout->immutable_samplers_offset);
+ if (copy_immutable_samplers) {
+ const unsigned idx = writeset->dstArrayElement + j;
+ memcpy(ptr + 16, samplers + 4 * idx, 16);
+ }
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
- if (!binding_layout->immutable_samplers)
+ if (!binding_layout->immutable_samplers_offset) {
write_sampler_descriptor(device, ptr,
writeset->pImageInfo + j);
+ } else if (copy_immutable_samplers) {
+ unsigned idx = writeset->dstArrayElement + j;
+ memcpy(ptr, samplers + 4 * idx, 16);
+ }
break;
default:
unreachable("unimplemented descriptor type");
break;
}
ptr += binding_layout->size / 4;
- buffer_list += binding_layout->buffer_count;
+ ++buffer_list;
}
}
radv_update_descriptor_sets(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
descriptorCopyCount, pDescriptorCopies);
}
+
+VkResult radv_CreateDescriptorUpdateTemplateKHR(VkDevice _device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
+ const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
+ const size_t size = sizeof(struct radv_descriptor_update_template) +
+ sizeof(struct radv_descriptor_update_template_entry) * entry_count;
+ struct radv_descriptor_update_template *templ;
+ uint32_t i;
+
+ templ = vk_alloc2(&device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!templ)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ templ->entry_count = entry_count;
+
+ for (i = 0; i < entry_count; i++) {
+ const VkDescriptorUpdateTemplateEntryKHR *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
+ const struct radv_descriptor_set_binding_layout *binding_layout =
+ set_layout->binding + entry->dstBinding;
+ const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
+ const uint32_t *immutable_samplers = NULL;
+ uint32_t dst_offset;
+ uint32_t dst_stride;
+
+ /* dst_offset is an offset into dynamic_descriptors when the descriptor
+ is dynamic, and an offset into mapped_ptr otherwise */
+ switch (entry->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR);
+ dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
+ dst_stride = 0; /* Not used */
+ break;
+ default:
+ switch (entry->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ /* Immutable samplers are copied into push descriptors when they are pushed */
+ if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
+ binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal) {
+ immutable_samplers = radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
+ }
+ break;
+ default:
+ break;
+ }
+ dst_offset = binding_layout->offset / 4 + binding_layout->size * entry->dstArrayElement / 4;
+ dst_stride = binding_layout->size / 4;
+ break;
+ }
+
+ templ->entry[i] = (struct radv_descriptor_update_template_entry) {
+ .descriptor_type = entry->descriptorType,
+ .descriptor_count = entry->descriptorCount,
+ .src_offset = entry->offset,
+ .src_stride = entry->stride,
+ .dst_offset = dst_offset,
+ .dst_stride = dst_stride,
+ .buffer_offset = buffer_offset,
+ .has_sampler = !binding_layout->immutable_samplers_offset,
+ .immutable_samplers = immutable_samplers
+ };
+ }
+
+ *pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
+ return VK_SUCCESS;
+}
+
+void radv_DestroyDescriptorUpdateTemplateKHR(VkDevice _device,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks *pAllocator)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
+
+ if (!templ)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, templ);
+}
+
+void radv_update_descriptor_set_with_template(struct radv_device *device,
+ struct radv_cmd_buffer *cmd_buffer,
+ struct radv_descriptor_set *set,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData)
+{
+ RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
+ uint32_t i;
+
+ for (i = 0; i < templ->entry_count; ++i) {
+ struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
+ uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
+ const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
+ uint32_t j;
+
+ for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
+ switch (templ->entry[i].descriptor_type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+ const unsigned idx = templ->entry[i].dst_offset + j;
+ assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
+ write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
+ buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
+ break;
+ }
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
+ (struct VkDescriptorBufferInfo *) pSrc);
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
+ *(VkBufferView *) pSrc);
+ break;
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ write_image_descriptor(device, cmd_buffer, pDst, buffer_list,
+ (struct VkDescriptorImageInfo *) pSrc);
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ write_combined_image_sampler_descriptor(device, cmd_buffer, pDst, buffer_list,
+ (struct VkDescriptorImageInfo *) pSrc,
+ templ->entry[i].has_sampler);
+ if (templ->entry[i].immutable_samplers)
+ memcpy(pDst + 16, templ->entry[i].immutable_samplers + 4 * j, 16);
+ break;
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ if (templ->entry[i].has_sampler)
+ write_sampler_descriptor(device, pDst,
+ (struct VkDescriptorImageInfo *) pSrc);
+ else if (templ->entry[i].immutable_samplers)
+ memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
+ break;
+ default:
+ unreachable("unimplemented descriptor type");
+ break;
+ }
+ pSrc += templ->entry[i].src_stride;
+ pDst += templ->entry[i].dst_stride;
+ ++buffer_list;
+ }
+ }
+}
+
+void radv_UpdateDescriptorSetWithTemplateKHR(VkDevice _device,
+ VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
+
+ radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
+}