unsigned array_size = set_layout->binding[b].array_size;
for (unsigned j = 0; j < array_size; j++) {
+ uint32_t range = 0;
+ if (desc->buffer_view)
+ range = desc->buffer_view;
push->dynamic[d].offset = *(offsets++);
- push->dynamic[d].range = (desc++)->range;
+ push->dynamic[d].range = range;
+ desc++;
d++;
}
}
state.offset + dword * 4, bo, offset);
}
-static void
-fill_descriptor_buffer_surface_state(struct anv_device *device, void *state,
- gl_shader_stage stage,
- VkDescriptorType type,
- uint32_t offset, uint32_t range)
+const struct anv_format *
+anv_format_for_descriptor_type(VkDescriptorType type)
{
- VkFormat format;
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- format = VK_FORMAT_R32G32B32A32_SFLOAT;
- break;
+ return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- format = VK_FORMAT_UNDEFINED;
- break;
+ return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
default:
unreachable("Invalid descriptor type");
}
-
- anv_fill_buffer_surface_state(device, state,
- anv_format_for_vk_format(format)->surface_format,
- offset, range, 1);
}
VkResult
surface_state =
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
- fill_descriptor_buffer_surface_state(cmd_buffer->device,
- surface_state.map, stage,
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- bo_offset, 12);
+ const struct anv_format *format =
+ anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ anv_fill_buffer_surface_state(cmd_buffer->device, surface_state.map,
+ format->surface_format, bo_offset, 12, 1);
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(surface_state);
/* Nothing for us to do here */
continue;
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
- bo = desc->buffer->bo;
- bo_offset = desc->buffer->offset + desc->offset;
-
- surface_state =
- anv_cmd_buffer_alloc_surface_state(cmd_buffer);
-
- fill_descriptor_buffer_surface_state(cmd_buffer->device,
- surface_state.map,
- stage, desc->type,
- bo_offset, desc->range);
-
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(surface_state);
-
- break;
- }
-
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
break;
}
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
surface_state = desc->buffer_view->surface_state;
bo = desc->buffer_view->bo;
uint32_t sampler_count[MESA_SHADER_STAGES] = { 0, };
uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
uint32_t image_count[MESA_SHADER_STAGES] = { 0, };
+ uint32_t buffer_count = 0;
uint32_t dynamic_offset_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
}
switch (binding->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ set_layout->binding[b].buffer_index = buffer_count;
+ buffer_count += binding->descriptorCount;
+ /* fall through */
+
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
anv_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].surface_index = surface_count[s];
set_layout->shader_stages |= binding->stageFlags;
}
+ set_layout->buffer_count = buffer_count;
set_layout->dynamic_offset_count = dynamic_offset_count;
*pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
desc += layout->binding[b].array_size;
}
+ /* XXX: Use the pool */
+ set->buffer_views =
+ anv_alloc(&device->alloc,
+ sizeof(set->buffer_views[0]) * layout->buffer_count, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!set->buffer_views) {
+ anv_free(&device->alloc, set);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ for (uint32_t b = 0; b < layout->buffer_count; b++) {
+ set->buffer_views[b].surface_state =
+ anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
+ }
+
*out_set = set;
return VK_SUCCESS;
anv_descriptor_set_destroy(struct anv_device *device,
struct anv_descriptor_set *set)
{
- anv_free(&device->alloc /* XXX: Use the pool */, set);
+ /* XXX: Use the pool */
+ for (uint32_t b = 0; b < set->layout->buffer_count; b++)
+ anv_state_pool_free(&device->surface_state_pool,
+ set->buffer_views[b].surface_state);
+
+ anv_free(&device->alloc, set->buffer_views);
+ anv_free(&device->alloc, set);
}
VkResult anv_AllocateDescriptorSets(
}
void anv_UpdateDescriptorSets(
- VkDevice device,
+ VkDevice _device,
uint32_t descriptorWriteCount,
const VkWriteDescriptorSet* pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet* pDescriptorCopies)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
assert(buffer);
- desc[j] = (struct anv_descriptor) {
- .type = write->descriptorType,
- .buffer = buffer,
- .offset = write->pBufferInfo[j].offset,
- .range = write->pBufferInfo[j].range,
- };
+ struct anv_buffer_view *view =
+ &set->buffer_views[bind_layout->descriptor_index + j];
+
+ const struct anv_format *format =
+ anv_format_for_descriptor_type(write->descriptorType);
+
+ view->format = format->surface_format;
+ view->bo = buffer->bo;
+ view->offset = buffer->offset + write->pBufferInfo[j].offset;
/* For buffers with dynamic offsets, we use the full possible
* range in the surface state and do the actual range-checking
* in the shader.
*/
if (bind_layout->dynamic_offset_index >= 0)
- desc[j].range = buffer->size - desc[j].offset;
+ view->range = buffer->size - write->pBufferInfo[j].offset;
+ else
+ view->range = write->pBufferInfo[j].range;
+
+ anv_fill_buffer_surface_state(device, view->surface_state.map,
+ view->format,
+ view->offset, view->range, 1);
+
+ if (!device->info.has_llc)
+ anv_state_clflush(view->surface_state);
+
+ desc[j] = (struct anv_descriptor) {
+ .type = write->descriptorType,
+ .buffer_view = view,
+ };
+
}
default:
/* Index into the dynamic state array for a dynamic buffer */
int16_t dynamic_offset_index;
+ /* Index into the descriptor set buffer views */
+ int16_t buffer_index;
+
struct {
/* Index into the binding table for the associated surface */
int16_t surface_index;
/* Shader stages affected by this descriptor set */
uint16_t shader_stages;
+ /* Number of buffers in this descriptor set */
+ uint16_t buffer_count;
+
/* Number of dynamic offsets used by this descriptor set */
uint16_t dynamic_offset_count;
};
struct anv_buffer_view *buffer_view;
-
- struct {
- struct anv_buffer *buffer;
- uint64_t offset;
- uint64_t range;
- };
};
};
struct anv_descriptor_set {
const struct anv_descriptor_set_layout *layout;
+ struct anv_buffer_view *buffer_views;
struct anv_descriptor descriptors[0];
};
struct anv_state storage_surface_state;
};
+const struct anv_format *
+anv_format_for_descriptor_type(VkDescriptorType type);
+
void anv_fill_buffer_surface_state(struct anv_device *device, void *state,
enum isl_format format,
uint32_t offset, uint32_t range,