#include "util/mesa-sha1.h"
#include "radv_private.h"
#include "sid.h"
+#include "vk_util.h"
static bool has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
struct radv_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
+ const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
+ vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
uint32_t max_binding = 0;
uint32_t immutable_sampler_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
- if (pCreateInfo->pBindings[j].pImmutableSamplers)
+ if ((pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
+ pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
+ pCreateInfo->pBindings[j].pImmutableSamplers)
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
}
set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set_layout)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
set_layout->flags = pCreateInfo->flags;
pCreateInfo->bindingCount);
if (!bindings) {
vk_free2(&device->alloc, pAllocator, set_layout);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
set_layout->binding_count = max_binding + 1;
memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
+ uint32_t buffer_count = 0;
uint32_t dynamic_offset_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
const VkDescriptorSetLayoutBinding *binding = bindings + j;
uint32_t b = binding->binding;
uint32_t alignment;
+ unsigned binding_buffer_count = 0;
+ uint32_t descriptor_count = binding->descriptorCount;
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
set_layout->binding[b].dynamic_offset_count = 1;
set_layout->dynamic_shader_stages |= binding->stageFlags;
set_layout->binding[b].size = 0;
+ binding_buffer_count = 1;
alignment = 1;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
set_layout->binding[b].size = 16;
+ binding_buffer_count = 1;
alignment = 16;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
/* main descriptor + fmask descriptor */
set_layout->binding[b].size = 64;
+ binding_buffer_count = 1;
alignment = 32;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
/* main descriptor + fmask descriptor + sampler */
set_layout->binding[b].size = 96;
+ binding_buffer_count = 1;
alignment = 32;
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
set_layout->binding[b].size = 16;
alignment = 16;
break;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
+ alignment = 16;
+ set_layout->binding[b].size = descriptor_count;
+ descriptor_count = 1;
+ break;
default:
unreachable("unknown descriptor type\n");
break;
set_layout->size = align(set_layout->size, alignment);
set_layout->binding[b].type = binding->descriptorType;
- set_layout->binding[b].array_size = binding->descriptorCount;
+ set_layout->binding[b].array_size = descriptor_count;
set_layout->binding[b].offset = set_layout->size;
+ set_layout->binding[b].buffer_offset = buffer_count;
set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
- if (binding->pImmutableSamplers) {
+ if (variable_flags && binding->binding < variable_flags->bindingCount &&
+ (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
+ assert(!binding->pImmutableSamplers); /* Terribly ill defined how many samplers are valid */
+ assert(binding->binding == max_binding);
+
+ set_layout->has_variable_descriptors = true;
+ }
+
+ if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
+ binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
+ binding->pImmutableSamplers) {
set_layout->binding[b].immutable_samplers_offset = samplers_offset;
set_layout->binding[b].immutable_samplers_equal =
has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
}
- set_layout->size += binding->descriptorCount * set_layout->binding[b].size;
- dynamic_offset_count += binding->descriptorCount *
+ set_layout->size += descriptor_count * set_layout->binding[b].size;
+ buffer_count += descriptor_count * binding_buffer_count;
+ dynamic_offset_count += descriptor_count *
set_layout->binding[b].dynamic_offset_count;
set_layout->shader_stages |= binding->stageFlags;
}
free(bindings);
+ set_layout->buffer_count = buffer_count;
set_layout->dynamic_offset_count = dynamic_offset_count;
*pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
return;
}
+ const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *variable_flags =
+ vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT);
+ VkDescriptorSetVariableDescriptorCountLayoutSupportEXT *variable_count =
+ vk_find_struct((void*)pCreateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT);
+ if (variable_count) {
+ variable_count->maxVariableDescriptorCount = 0;
+ }
+
bool supported = true;
uint64_t size = 0;
for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
uint64_t descriptor_size = 0;
uint64_t descriptor_alignment = 1;
+ uint32_t descriptor_count = binding->descriptorCount;
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
descriptor_alignment = 32;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- if (!has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount)) {
+ if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
descriptor_size = 64;
} else {
descriptor_size = 96;
descriptor_alignment = 32;
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
- if (!has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount)) {
+ if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
descriptor_size = 16;
descriptor_alignment = 16;
}
break;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
+ descriptor_alignment = 16;
+ descriptor_size = descriptor_count;
+ descriptor_count = 1;
+ break;
default:
unreachable("unknown descriptor type\n");
break;
supported = false;
}
size = align_u64(size, descriptor_alignment);
- if (descriptor_size && (UINT64_MAX - size) / descriptor_size < binding->descriptorCount) {
+
+ uint64_t max_count = INT32_MAX;
+ if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
+ max_count = INT32_MAX - size;
+ else if (descriptor_size)
+ max_count = (INT32_MAX - size) / descriptor_size;
+
+ if (max_count < descriptor_count) {
supported = false;
}
- size += binding->descriptorCount * descriptor_size;
+ if (variable_flags && binding->binding <variable_flags->bindingCount && variable_count &&
+ (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
+ variable_count->maxVariableDescriptorCount = MIN2(UINT32_MAX, max_count);
+ }
+ size += descriptor_count * descriptor_size;
}
free(bindings);
/*
* Pipeline layouts. These have nothing to do with the pipeline. They are
- * just muttiple descriptor set layouts pasted together
+ * just multiple descriptor set layouts pasted together.
*/
VkResult radv_CreatePipelineLayout(
layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (layout == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
layout->num_sets = pCreateInfo->setLayoutCount;
unsigned dynamic_offset_count = 0;
+ uint16_t dynamic_shader_stages = 0;
_mesa_sha1_init(&ctx);
layout->set[set].dynamic_offset_start = dynamic_offset_count;
for (uint32_t b = 0; b < set_layout->binding_count; b++) {
dynamic_offset_count += set_layout->binding[b].array_size * set_layout->binding[b].dynamic_offset_count;
+ dynamic_shader_stages |= set_layout->dynamic_shader_stages;
if (set_layout->binding[b].immutable_samplers_offset)
_mesa_sha1_update(&ctx, radv_immutable_samplers(set_layout, set_layout->binding + b),
set_layout->binding[b].array_size * 4 * sizeof(uint32_t));
}
layout->dynamic_offset_count = dynamic_offset_count;
+ layout->dynamic_shader_stages = dynamic_shader_stages;
layout->push_constant_size = 0;
for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
radv_descriptor_set_create(struct radv_device *device,
struct radv_descriptor_pool *pool,
const struct radv_descriptor_set_layout *layout,
+ const uint32_t *variable_count,
struct radv_descriptor_set **out_set)
{
struct radv_descriptor_set *set;
- unsigned range_offset = sizeof(struct radv_descriptor_set);
+ unsigned range_offset = sizeof(struct radv_descriptor_set) +
+ sizeof(struct radeon_winsys_bo *) * layout->buffer_count;
unsigned mem_size = range_offset +
sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
if (pool->host_memory_base) {
if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
- return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
set = (struct radv_descriptor_set*)pool->host_memory_ptr;
pool->host_memory_ptr += mem_size;
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
memset(set, 0, mem_size);
}
set->layout = layout;
- if (layout->size) {
- uint32_t layout_size = align_u32(layout->size, 32);
- set->size = layout->size;
+ uint32_t layout_size = align_u32(layout->size, 32);
+ if (layout_size) {
+ set->size = layout_size;
if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
vk_free2(&device->alloc, NULL, set);
- return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
/* try to allocate linearly first, so that we don't spend
if (pool->size - offset < layout_size) {
vk_free2(&device->alloc, NULL, set);
- return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
set->bo = pool->bo;
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
pool->entries[index].set = set;
pool->entry_count++;
} else
- return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
if (layout->has_immutable_samplers) {
{
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_descriptor_pool *pool;
- int size = sizeof(struct radv_descriptor_pool);
+ uint64_t size = sizeof(struct radv_descriptor_pool);
uint64_t bo_size = 0, bo_count = 0, range_count = 0;
+ vk_foreach_struct(ext, pCreateInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: {
+ const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT *info =
+ (const struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT*)ext;
+ /* the sizes are 4 aligned, and we need to align to at
+ * most 32, which needs at most 28 bytes extra per
+ * binding. */
+ bo_size += 28llu * info->maxInlineUniformBlockBindings;
+ break;
+ }
+ default:
+ break;
+ }
+ }
for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
if (pCreateInfo->pPoolSizes[i].type != VK_DESCRIPTOR_TYPE_SAMPLER)
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
+ case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
+ bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
+ break;
default:
unreachable("unknown descriptor type\n");
break;
pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
memset(pool, 0, sizeof(*pool));
pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING |
- RADEON_FLAG_READ_ONLY);
+ RADEON_FLAG_READ_ONLY |
+ RADEON_FLAG_32BIT,
+ RADV_BO_PRIORITY_DESCRIPTOR);
pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
}
pool->size = bo_size;
uint32_t i;
struct radv_descriptor_set *set = NULL;
+ const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT *variable_counts =
+ vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT);
+ const uint32_t zero = 0;
+
/* allocate a set of buffers for each shader to contain descriptors */
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
+ const uint32_t *variable_count = NULL;
+ if (variable_counts) {
+ if (i < variable_counts->descriptorSetCount)
+ variable_count = variable_counts->pDescriptorCounts + i;
+ else
+ variable_count = &zero;
+ }
+
assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
- result = radv_descriptor_set_create(device, pool, layout, &set);
+ result = radv_descriptor_set_create(device, pool, layout, variable_count, &set);
if (result != VK_SUCCESS)
break;
}
static void write_texel_buffer_descriptor(struct radv_device *device,
+ struct radv_cmd_buffer *cmd_buffer,
unsigned *dst,
+ struct radeon_winsys_bo **buffer_list,
const VkBufferView _buffer_view)
{
RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
memcpy(dst, buffer_view->state, 4 * 4);
+
+ if (cmd_buffer)
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo);
+ else
+ *buffer_list = buffer_view->bo;
}
static void write_buffer_descriptor(struct radv_device *device,
+ struct radv_cmd_buffer *cmd_buffer,
unsigned *dst,
+ struct radeon_winsys_bo **buffer_list,
const VkDescriptorBufferInfo *buffer_info)
{
RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+ if (cmd_buffer)
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo);
+ else
+ *buffer_list = buffer->bo;
+}
+
+static void write_block_descriptor(struct radv_device *device,
+ struct radv_cmd_buffer *cmd_buffer,
+ void *dst,
+ const VkWriteDescriptorSet *writeset)
+{
+ const VkWriteDescriptorSetInlineUniformBlockEXT *inline_ub =
+ vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
+
+ memcpy(dst, inline_ub->pData, inline_ub->dataSize);
}
static void write_dynamic_buffer_descriptor(struct radv_device *device,
struct radv_descriptor_range *range,
+ struct radeon_winsys_bo **buffer_list,
const VkDescriptorBufferInfo *buffer_info)
{
RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
va += buffer_info->offset + buffer->offset;
range->va = va;
range->size = size;
+
+ *buffer_list = buffer->bo;
}
static void
write_image_descriptor(struct radv_device *device,
+ struct radv_cmd_buffer *cmd_buffer,
unsigned *dst,
+ struct radeon_winsys_bo **buffer_list,
VkDescriptorType descriptor_type,
const VkDescriptorImageInfo *image_info)
{
}
memcpy(dst, descriptor, 16 * 4);
+
+ if (cmd_buffer)
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo);
+ else
+ *buffer_list = iview->bo;
}
static void
write_combined_image_sampler_descriptor(struct radv_device *device,
+ struct radv_cmd_buffer *cmd_buffer,
unsigned *dst,
+ struct radeon_winsys_bo **buffer_list,
VkDescriptorType descriptor_type,
const VkDescriptorImageInfo *image_info,
bool has_sampler)
{
RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
- write_image_descriptor(device, dst, descriptor_type, image_info);
+ write_image_descriptor(device, cmd_buffer, dst, buffer_list, descriptor_type, image_info);
/* copy over sampler state */
if (has_sampler)
memcpy(dst + 16, sampler->state, 16);
const struct radv_descriptor_set_binding_layout *binding_layout =
set->layout->binding + writeset->dstBinding;
uint32_t *ptr = set->mapped_ptr;
-
+ struct radeon_winsys_bo **buffer_list = set->descriptors;
/* Immutable samplers are not copied into push descriptors when they are
* allocated, so if we are writing push descriptors we have to copy the
* immutable samplers into them now.
const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
ptr += binding_layout->offset / 4;
+
+ if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
+ write_block_descriptor(device, cmd_buffer, (uint8_t*)ptr + writeset->dstArrayElement, writeset);
+ continue;
+ }
+
ptr += binding_layout->size * writeset->dstArrayElement / 4;
+ buffer_list += binding_layout->buffer_offset;
+ buffer_list += writeset->dstArrayElement;
for (j = 0; j < writeset->descriptorCount; ++j) {
switch(writeset->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
idx += binding_layout->dynamic_offset_offset;
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
- writeset->pBufferInfo + j);
+ buffer_list, writeset->pBufferInfo + j);
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- write_buffer_descriptor(device, ptr, writeset->pBufferInfo + j);
+ write_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
+ writeset->pBufferInfo + j);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- write_texel_buffer_descriptor(device, ptr, writeset->pTexelBufferView[j]);
+ write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list,
+ writeset->pTexelBufferView[j]);
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- write_image_descriptor(device, ptr, writeset->descriptorType,
+ write_image_descriptor(device, cmd_buffer, ptr, buffer_list,
+ writeset->descriptorType,
writeset->pImageInfo + j);
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- write_combined_image_sampler_descriptor(device, ptr,
+ write_combined_image_sampler_descriptor(device, cmd_buffer, ptr, buffer_list,
writeset->descriptorType,
writeset->pImageInfo + j,
!binding_layout->immutable_samplers_offset);
break;
}
ptr += binding_layout->size / 4;
+ ++buffer_list;
}
}
dst_set->layout->binding + copyset->dstBinding;
uint32_t *src_ptr = src_set->mapped_ptr;
uint32_t *dst_ptr = dst_set->mapped_ptr;
+ struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
+ struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
src_ptr += src_binding_layout->offset / 4;
dst_ptr += dst_binding_layout->offset / 4;
src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
+ src_buffer_list += src_binding_layout->buffer_offset;
+ src_buffer_list += copyset->srcArrayElement;
+
+ dst_buffer_list += dst_binding_layout->buffer_offset;
+ dst_buffer_list += copyset->dstArrayElement;
+
for (j = 0; j < copyset->descriptorCount; ++j) {
switch (src_binding_layout->type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
}
src_ptr += src_binding_layout->size / 4;
dst_ptr += dst_binding_layout->size / 4;
+
+ if (src_binding_layout->type != VK_DESCRIPTOR_TYPE_SAMPLER) {
+ /* Sampler descriptors don't have a buffer list. */
+ dst_buffer_list[j] = src_buffer_list[j];
+ }
}
}
}
}
VkResult radv_CreateDescriptorUpdateTemplate(VkDevice _device,
- const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
- VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate)
+ VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
{
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
templ = vk_alloc2(&device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!templ)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
templ->entry_count = entry_count;
templ->bind_point = pCreateInfo->pipelineBindPoint;
for (i = 0; i < entry_count; i++) {
- const VkDescriptorUpdateTemplateEntryKHR *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
+ const VkDescriptorUpdateTemplateEntry *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
const struct radv_descriptor_set_binding_layout *binding_layout =
set_layout->binding + entry->dstBinding;
+ const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
const uint32_t *immutable_samplers = NULL;
uint32_t dst_offset;
uint32_t dst_stride;
switch (entry->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR);
+ assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
dst_stride = 0; /* Not used */
break;
default:
break;
}
- dst_offset = binding_layout->offset / 4 + binding_layout->size * entry->dstArrayElement / 4;
+ dst_offset = binding_layout->offset / 4;
+ if (entry->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
+ dst_offset += entry->dstArrayElement / 4;
+ else
+ dst_offset += binding_layout->size * entry->dstArrayElement / 4;
+
dst_stride = binding_layout->size / 4;
break;
}
.src_stride = entry->stride,
.dst_offset = dst_offset,
.dst_stride = dst_stride,
+ .buffer_offset = buffer_offset,
.has_sampler = !binding_layout->immutable_samplers_offset,
.immutable_samplers = immutable_samplers
};
}
void radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator)
{
RADV_FROM_HANDLE(radv_device, device, _device);
void radv_update_descriptor_set_with_template(struct radv_device *device,
struct radv_cmd_buffer *cmd_buffer,
struct radv_descriptor_set *set,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
uint32_t i;
for (i = 0; i < templ->entry_count; ++i) {
+ struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
uint32_t j;
+ if (templ->entry[i].descriptor_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
+ memcpy((uint8_t*)pDst, pSrc, templ->entry[i].descriptor_count);
+ continue;
+ }
+
for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
switch (templ->entry[i].descriptor_type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
const unsigned idx = templ->entry[i].dst_offset + j;
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
- (struct VkDescriptorBufferInfo *) pSrc);
+ buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- write_buffer_descriptor(device, pDst,
+ write_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
(struct VkDescriptorBufferInfo *) pSrc);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- write_texel_buffer_descriptor(device, pDst,
+ write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list,
*(VkBufferView *) pSrc);
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- write_image_descriptor(device, pDst,
+ write_image_descriptor(device, cmd_buffer, pDst, buffer_list,
templ->entry[i].descriptor_type,
(struct VkDescriptorImageInfo *) pSrc);
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- write_combined_image_sampler_descriptor(device, pDst,
+ write_combined_image_sampler_descriptor(device, cmd_buffer, pDst, buffer_list,
templ->entry[i].descriptor_type,
(struct VkDescriptorImageInfo *) pSrc,
templ->entry[i].has_sampler);
}
pSrc += templ->entry[i].src_stride;
pDst += templ->entry[i].dst_stride;
+ ++buffer_list;
}
}
}
void radv_UpdateDescriptorSetWithTemplate(VkDevice _device,
VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
RADV_FROM_HANDLE(radv_device, device, _device);