This makes u_vbuf_mgr call the driver instead of the other way around.
struct translate_cache *translate_cache;
struct cso_cache *cso_cache;
+ /* The index buffer. */
+ struct pipe_index_buffer index_buffer;
+
/* Vertex element state bound by the state tracker. */
void *saved_ve;
/* and its associated helper structure for this module. */
boolean incompatible_vb_layout;
/* Per-buffer flags. */
boolean incompatible_vb[PIPE_MAX_ATTRIBS];
+
+ void (*driver_set_index_buffer)(struct pipe_context *pipe,
+ const struct pipe_index_buffer *);
};
static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr)
0, PIPE_BIND_VERTEX_BUFFER);
}
+static void u_vbuf_install(struct u_vbuf_priv *mgr);
+
struct u_vbuf *
u_vbuf_create(struct pipe_context *pipe,
unsigned upload_buffer_size,
fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED;
u_vbuf_init_format_caps(mgr);
-
+ u_vbuf_install(mgr);
return &mgr->b;
}
struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
unsigned i;
+ assert(mgr->pipe->draw);
+ mgr->pipe->draw = NULL;
+
for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
}
/* Translate. */
if (unroll_indices) {
- struct pipe_index_buffer *ib = &mgr->b.index_buffer;
+ struct pipe_index_buffer *ib = &mgr->index_buffer;
struct pipe_transfer *transfer = NULL;
unsigned offset = ib->offset + start_index * ib->index_size;
uint8_t *map;
mgr->b.nr_real_vertex_buffers = count;
}
-void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
- const struct pipe_index_buffer *ib)
+static void u_vbuf_set_index_buffer(struct pipe_context *pipe,
+ const struct pipe_index_buffer *ib)
{
+ struct u_vbuf_priv *mgr = pipe->draw;
+
if (ib && ib->buffer) {
assert(ib->offset % ib->index_size == 0);
pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer);
} else {
pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
}
+
+ mgr->driver_set_index_buffer(pipe, ib);
}
static void
max_index = info->max_index;
index_bounds_valid = true;
} else if (u_vbuf_need_minmax_index(mgr)) {
- u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info,
+ u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, info,
&min_index, &max_index);
index_bounds_valid = true;
}
u_vbuf_translate_end(mgr);
}
}
+
+static void u_vbuf_install(struct u_vbuf_priv *mgr)
+{
+ struct pipe_context *pipe = mgr->pipe;
+ assert(!pipe->draw);
+
+ pipe->draw = mgr;
+ mgr->driver_set_index_buffer = pipe->set_index_buffer;
+ pipe->set_index_buffer = u_vbuf_set_index_buffer;
+}
struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS];
int nr_real_vertex_buffers;
- /* The index buffer. */
- struct pipe_index_buffer index_buffer;
-
/* This uploader can optionally be used by the driver.
*
* Allowed functions:
unsigned count,
const struct pipe_vertex_buffer *bufs);
-void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
- const struct pipe_index_buffer *ib);
-
enum u_vbuf_return_flags u_vbuf_draw_begin(struct u_vbuf *mgr,
struct pipe_draw_info *info);
void *dsa_decompress_zmask;
struct u_vbuf *vbuf_mgr;
- struct pipe_index_buffer swtcl_index_buffer;
+ struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer swtcl_vertex_buffer[PIPE_MAX_ATTRIBS];
unsigned swtcl_nr_vertex_buffers;
uint8_t *ptr1;
uint16_t *ptr2;
uint32_t *ptr4;
- unsigned index_size = r300->vbuf_mgr->index_buffer.index_size;
+ unsigned index_size = r300->index_buffer.index_size;
unsigned i, count_dwords = index_size == 4 ? info->count :
(info->count + 1) / 2;
CS_LOCALS(r300);
switch (index_size) {
case 1:
- ptr1 = r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr;
+ ptr1 = r300_resource(r300->index_buffer.buffer)->b.user_ptr;
ptr1 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 2:
- ptr2 = (uint16_t*)r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr;
+ ptr2 = (uint16_t*)r300_resource(r300->index_buffer.buffer)->b.user_ptr;
ptr2 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 4:
- ptr4 = (uint32_t*)r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr;
+ ptr4 = (uint32_t*)r300_resource(r300->index_buffer.buffer)->b.user_ptr;
ptr4 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
const struct pipe_draw_info *info,
int instance_id)
{
- struct pipe_resource *indexBuffer = r300->vbuf_mgr->index_buffer.buffer;
- unsigned indexSize = r300->vbuf_mgr->index_buffer.index_size;
+ struct pipe_resource *indexBuffer = r300->index_buffer.buffer;
+ unsigned indexSize = r300->index_buffer.index_size;
struct pipe_resource* orgIndexBuffer = indexBuffer;
unsigned start = info->start;
unsigned count = info->count;
struct r300_context* r300 = r300_context(pipe);
struct pipe_draw_info info = *dinfo;
- info.indexed = info.indexed && r300->vbuf_mgr->index_buffer.buffer;
+ info.indexed = info.indexed && r300->index_buffer.buffer;
if (r300->skip_rendering ||
!u_trim_pipe_prim(info.mode, &info.count)) {
}
info.max_index = max_count - 1;
- info.start += r300->vbuf_mgr->index_buffer.offset / r300->vbuf_mgr->index_buffer.index_size;
+ info.start += r300->index_buffer.offset / r300->index_buffer.index_size;
if (info.instance_count <= 1) {
if (info.count <= 8 &&
- r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr) {
+ r300_resource(r300->index_buffer.buffer)->b.user_ptr) {
r300_draw_elements_immediate(r300, &info);
} else {
r300_draw_elements(r300, &info, -1);
struct pipe_transfer *ib_transfer = NULL;
int i;
void *indices = NULL;
- boolean indexed = info->indexed && r300->swtcl_index_buffer.buffer;
+ boolean indexed = info->indexed && r300->index_buffer.buffer;
if (r300->skip_rendering) {
return;
}
if (indexed) {
- indices = pipe_buffer_map(pipe, r300->swtcl_index_buffer.buffer,
+ indices = pipe_buffer_map(pipe, r300->index_buffer.buffer,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer);
}
{
struct r300_context* r300 = r300_context(pipe);
- if (r300->screen->caps.has_tcl) {
- u_vbuf_set_index_buffer(r300->vbuf_mgr, ib);
+ if (ib) {
+ pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
+ memcpy(&r300->index_buffer, ib, sizeof(*ib));
} else {
- if (ib) {
- pipe_resource_reference(&r300->swtcl_index_buffer.buffer, ib->buffer);
- memcpy(&r300->swtcl_index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&r300->swtcl_index_buffer.buffer, NULL);
- }
+ pipe_resource_reference(&r300->index_buffer.buffer, NULL);
+ }
+
+ if (!r300->screen->caps.has_tcl) {
draw_set_index_buffer(r300->draw, ib);
}
}
bool vertex_buffers_dirty;
boolean dual_src_blend;
unsigned color0_format;
+
+ struct pipe_index_buffer index_buffer;
};
static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
FREE(state);
}
-
void r600_set_index_buffer(struct pipe_context *ctx,
const struct pipe_index_buffer *ib)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- u_vbuf_set_index_buffer(rctx->vbuf_mgr, ib);
+ if (ib) {
+ pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
+ memcpy(&rctx->index_buffer, ib, sizeof(*ib));
+ } else {
+ pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
+ }
}
void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
uint8_t *ptr;
if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
- (info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
+ (info.indexed && !rctx->index_buffer.buffer) ||
!r600_conv_pipe_prim(info.mode, &prim)) {
assert(0);
return;
if (info.indexed) {
/* Initialize the index buffer struct. */
- pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer);
- ib.index_size = rctx->vbuf_mgr->index_buffer.index_size;
- ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size;
+ pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
+ ib.index_size = rctx->index_buffer.index_size;
+ ib.offset = rctx->index_buffer.offset + info.start * ib.index_size;
/* Translate or upload, if needed. */
r600_translate_index_buffer(rctx, &ib, info.count);