It can now override both buffer offsets and strides in additions to resources.
Overriding buffer offsets was kinda hackish and could cause issues with
non-native vertex formats.
void *saved_ve, *fallback_ve;
boolean ve_binding_lock;
- unsigned saved_buffer_offset[PIPE_MAX_ATTRIBS];
-
boolean any_user_vbs;
boolean incompatible_vb_layout;
};
struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
unsigned i;
- for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) {
+ for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i], NULL);
+ }
+ for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) {
+ pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
}
translate_cache_destroy(mgr->translate_cache);
if (mgr->translate_vb_slot != ~0) {
/* Setup the new vertex buffer. */
pipe_resource_reference(
- &mgr->b.real_vertex_buffer[mgr->translate_vb_slot], out_buffer);
- mgr->b.vertex_buffer[mgr->translate_vb_slot].buffer_offset = out_offset;
- mgr->b.vertex_buffer[mgr->translate_vb_slot].stride = key.output_stride;
+ &mgr->b.real_vertex_buffer[mgr->translate_vb_slot].buffer, out_buffer);
+ mgr->b.real_vertex_buffer[mgr->translate_vb_slot].buffer_offset = out_offset;
+ mgr->b.real_vertex_buffer[mgr->translate_vb_slot].stride = key.output_stride;
/* Setup new vertex elements. */
for (i = 0; i < mgr->ve->count; i++) {
mgr->fallback_ve = NULL;
/* Delete the now-unused VBO. */
- pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->translate_vb_slot],
+ pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->translate_vb_slot].buffer,
NULL);
mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers;
}
const struct pipe_vertex_buffer *vb = &bufs[i];
pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer);
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i], NULL);
- mgr->saved_buffer_offset[i] = vb->buffer_offset;
+ pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
+
+ mgr->b.real_vertex_buffer[i].buffer_offset =
+ mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset;
+
+ mgr->b.real_vertex_buffer[i].stride =
+ mgr->b.vertex_buffer[i].stride = vb->stride;
if (!vb->buffer) {
continue;
continue;
}
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i], vb->buffer);
+ pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer);
}
- for (; i < mgr->b.nr_real_vertex_buffers; i++) {
+ for (i = count; i < mgr->b.nr_vertex_buffers; i++) {
pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i], NULL);
}
-
- memcpy(mgr->b.vertex_buffer, bufs,
- sizeof(struct pipe_vertex_buffer) * count);
+ for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) {
+ pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
+ }
mgr->b.nr_vertex_buffers = count;
mgr->b.nr_real_vertex_buffers = count;
u_upload_data(mgr->b.uploader, first, size,
u_vbuf_resource(vb->buffer)->user_ptr + first,
- &vb->buffer_offset,
- &mgr->b.real_vertex_buffer[index],
+ &mgr->b.real_vertex_buffer[index].buffer_offset,
+ &mgr->b.real_vertex_buffer[index].buffer,
&flushed);
- vb->buffer_offset -= first;
+ mgr->b.real_vertex_buffer[index].buffer_offset -= first;
uploaded[index] = TRUE;
if (flushed)
retval |= U_VBUF_UPLOAD_FLUSHED;
} else {
- assert(mgr->b.real_vertex_buffer[index]);
+ assert(mgr->b.real_vertex_buffer[index].buffer);
}
}
void u_vbuf_mgr_draw_end(struct u_vbuf_mgr *mgrb)
{
struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
- unsigned i;
-
- /* buffer offsets were modified in u_vbuf_upload_buffers */
- if (mgr->any_user_vbs) {
- for (i = 0; i < mgr->b.nr_vertex_buffers; i++)
- mgr->b.vertex_buffer[i].buffer_offset = mgr->saved_buffer_offset[i];
- }
if (mgr->fallback_ve) {
u_vbuf_translate_end(mgr);
/* Contains only real vertex buffers.
* Hardware drivers should use real_vertex_buffers[i]
* instead of vertex_buffers[i].buffer. */
- struct pipe_resource *real_vertex_buffer[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS];
int nr_real_vertex_buffers;
/* Precomputed max_index for hardware vertex buffers. */
void r300_emit_vertex_arrays(struct r300_context* r300, int offset,
boolean indexed, int instance_id)
{
- struct pipe_vertex_buffer *vbuf = r300->vbuf_mgr->vertex_buffer;
- struct pipe_resource **valid_vbuf = r300->vbuf_mgr->real_vertex_buffer;
+ struct pipe_vertex_buffer *vbuf = r300->vbuf_mgr->real_vertex_buffer;
struct pipe_vertex_element *velem = r300->velems->velem;
struct r300_resource *buf;
int i;
}
for (i = 0; i < vertex_array_count; i++) {
- buf = r300_resource(valid_vbuf[velem[i].vertex_buffer_index]);
+ buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
OUT_CS_RELOC(buf);
}
} else {
}
for (i = 0; i < vertex_array_count; i++) {
- buf = r300_resource(valid_vbuf[velem[i].vertex_buffer_index]);
+ buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
OUT_CS_RELOC(buf);
}
}
r300_resource(r300->vbo)->domain, 0);
/* ...vertex buffers for HWTCL path... */
if (do_validate_vertex_buffers && r300->vertex_arrays_dirty) {
- struct pipe_resource **buf = r300->vbuf_mgr->real_vertex_buffer;
- struct pipe_resource **last = r300->vbuf_mgr->real_vertex_buffer +
+ struct pipe_vertex_buffer *vbuf = r300->vbuf_mgr->real_vertex_buffer;
+ struct pipe_vertex_buffer *last = r300->vbuf_mgr->real_vertex_buffer +
r300->vbuf_mgr->nr_real_vertex_buffers;
- for (; buf != last; buf++) {
- if (!*buf)
+ struct pipe_resource *buf;
+ for (; vbuf != last; vbuf++) {
+ buf = vbuf->buffer;
+ if (!buf)
continue;
- r300->rws->cs_add_reloc(r300->cs, r300_resource(*buf)->cs_buf,
- r300_resource(*buf)->domain, 0);
+ r300->rws->cs_add_reloc(r300->cs, r300_resource(buf)->cs_buf,
+ r300_resource(buf)->domain, 0);
}
}
/* ...and index buffer for HWTCL path. */
static void r300_split_index_bias(struct r300_context *r300, int index_bias,
int *buffer_offset, int *index_offset)
{
- struct pipe_vertex_buffer *vb, *vbufs = r300->vbuf_mgr->vertex_buffer;
+ struct pipe_vertex_buffer *vb, *vbufs = r300->vbuf_mgr->real_vertex_buffer;
struct pipe_vertex_element *velem = r300->velems->velem;
unsigned i, size;
int max_neg_bias;
vbi = velem->vertex_buffer_index;
if (!checked[vbi]) {
- buf = r300->vbuf_mgr->real_vertex_buffer[vbi];
+ buf = r300->vbuf_mgr->real_vertex_buffer[vbi].buffer;
if ((r300_resource(buf)->domain != RADEON_DOMAIN_GTT)) {
return FALSE;
velem = &r300->velems->velem[i];
size[i] = r300->velems->format_size[i] / 4;
vbi = velem->vertex_buffer_index;
- vbuf = &r300->vbuf_mgr->vertex_buffer[vbi];
+ vbuf = &r300->vbuf_mgr->real_vertex_buffer[vbi];
stride[i] = vbuf->stride / 4;
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
- r300_resource(r300->vbuf_mgr->real_vertex_buffer[vbi])->buf,
+ r300_resource(vbuf->buffer)->buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
vbi = r300->velems->velem[i].vertex_buffer_index;
if (map[vbi]) {
- r300->rws->buffer_unmap(r300_resource(r300->vbuf_mgr->real_vertex_buffer[vbi])->buf);
+ r300->rws->buffer_unmap(r300_resource(r300->vbuf_mgr->real_vertex_buffer[vbi].buffer)->buf);
map[vbi] = NULL;
}
}
/* one resource per vertex elements */
unsigned vbuffer_index;
vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
- vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[vbuffer_index];
- rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
+ vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
+ rbuffer = (struct r600_resource*)vertex_buffer->buffer;
offset = rctx->vertex_elements->vbuffer_offset[i];
} else {
/* bind vertex buffer once */
- vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[i];
- rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[i];
+ vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[i];
+ rbuffer = (struct r600_resource*)vertex_buffer->buffer;
offset = 0;
}
if (vertex_buffer == NULL || rbuffer == NULL)