enabled_images = sctx->images[processor].enabled_mask;
}
- if (processor == PIPE_SHADER_VERTEX) {
+ if (processor == PIPE_SHADER_VERTEX &&
+ sctx->vb_descriptors_buffer &&
+ sctx->vb_descriptors_gpu_list &&
+ sctx->vertex_elements) {
assert(info); /* only CS may not have an info struct */
+ struct si_descriptors desc = {};
- si_dump_descriptor_list(sctx->screen, &sctx->vertex_buffers, name,
+ desc.buffer = sctx->vb_descriptors_buffer;
+ desc.list = sctx->vb_descriptors_gpu_list;
+ desc.gpu_list = sctx->vb_descriptors_gpu_list;
+ desc.element_dw_size = 4;
+ desc.num_active_slots = sctx->vertex_elements->desc_list_byte_size / 16;
+
+ si_dump_descriptor_list(sctx->screen, &desc, name,
" - Vertex buffer", 4, info->num_inputs,
si_identity, log);
}
static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
{
- struct si_descriptors *desc = &sctx->vertex_buffers;
int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
int i;
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
- if (!desc->buffer)
+ if (!sctx->vb_descriptors_buffer)
return;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- desc->buffer, RADEON_USAGE_READ,
- RADEON_PRIO_DESCRIPTORS);
+ sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
+ RADEON_PRIO_DESCRIPTORS);
}
bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
{
struct si_vertex_elements *velems = sctx->vertex_elements;
- struct si_descriptors *desc = &sctx->vertex_buffers;
unsigned i, count;
unsigned desc_list_byte_size;
unsigned first_vb_use_mask;
* directly through a staging buffer and don't go through
* the fine-grained upload path.
*/
- unsigned buffer_offset = 0;
u_upload_alloc(sctx->b.b.const_uploader, 0,
desc_list_byte_size,
si_optimal_tcc_alignment(sctx, desc_list_byte_size),
- &buffer_offset,
- (struct pipe_resource**)&desc->buffer, (void**)&ptr);
- if (!desc->buffer) {
- desc->gpu_address = 0;
+ &sctx->vb_descriptors_offset,
+ (struct pipe_resource**)&sctx->vb_descriptors_buffer,
+ (void**)&ptr);
+ if (!sctx->vb_descriptors_buffer) {
+ sctx->vb_descriptors_offset = 0;
+ sctx->vb_descriptors_gpu_list = NULL;
return false;
}
- desc->gpu_address = desc->buffer->gpu_address + buffer_offset;
- desc->list = ptr;
+ sctx->vb_descriptors_gpu_list = ptr;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- desc->buffer, RADEON_USAGE_READ,
- RADEON_PRIO_DESCRIPTORS);
+ sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
+ RADEON_PRIO_DESCRIPTORS);
assert(count <= SI_MAX_ATTRIBS);
SI_NUM_SHADER_DESCS);
if (shader == PIPE_SHADER_VERTEX)
- sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
+ sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
}
static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
{
sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
- sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
+ sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
if (sctx->vertex_buffer_pointer_dirty) {
- si_emit_shader_pointer(sctx, &sctx->vertex_buffers,
- sh_base[PIPE_SHADER_VERTEX]);
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ unsigned sh_offset = sh_base[PIPE_SHADER_VERTEX] +
+ SI_SGPR_VERTEX_BUFFERS * 4;
+
+ si_emit_shader_pointer_head(cs, sh_offset, 1);
+ si_emit_shader_pointer_body(sctx->screen, cs,
+ sctx->vb_descriptors_buffer->gpu_address +
+ sctx->vb_descriptors_offset);
sctx->vertex_buffer_pointer_dirty = false;
}
RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
- si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
- 4, SI_NUM_VERTEX_BUFFERS);
- FREE(sctx->vertex_buffers.list); /* not used */
- sctx->vertex_buffers.list = NULL;
-
/* Initialize an array of 1024 bindless descriptors, when the limit is
* reached, just make it larger and re-upload the whole array.
*/
for (i = 0; i < SI_NUM_DESCS; ++i)
si_release_descriptors(&sctx->descriptors[i]);
- sctx->vertex_buffers.list = NULL; /* points into a mapped buffer */
- si_release_descriptors(&sctx->vertex_buffers);
+ r600_resource_reference(&sctx->vb_descriptors_buffer, NULL);
+ sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
+
si_release_bindless_descriptors(sctx);
}