Adapted drivers: i915, llvmpipe, r300, r600, radeonsi, softpipe.
User index buffers have been disabled in nv30, nv50, nvc0 and svga to keep
things working.
/* Ubyte indices. */
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
- struct pipe_resource *elts,
+ struct pipe_index_buffer *ib,
int index_bias,
unsigned start,
unsigned count,
void *out)
{
- struct pipe_transfer *src_transfer;
- unsigned char *in_map;
+ struct pipe_transfer *src_transfer = NULL;
+ const unsigned char *in_map;
unsigned short *out_map = out;
unsigned i;
- in_map = pipe_buffer_map(context, elts,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED,
- &src_transfer);
+ if (ib->user_buffer) {
+ in_map = ib->user_buffer;
+ } else {
+ in_map = pipe_buffer_map(context, ib->buffer,
+ PIPE_TRANSFER_READ |
+ PIPE_TRANSFER_UNSYNCHRONIZED,
+ &src_transfer);
+ }
in_map += start;
for (i = 0; i < count; i++) {
out_map++;
}
- pipe_buffer_unmap(context, src_transfer);
+ if (src_transfer)
+ pipe_buffer_unmap(context, src_transfer);
}
void util_shorten_ubyte_elts(struct pipe_context *context,
- struct pipe_resource **elts,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buf,
int index_bias,
unsigned start,
unsigned count)
out_map = pipe_buffer_map(context, new_elts, PIPE_TRANSFER_WRITE,
&dst_transfer);
- util_shorten_ubyte_elts_to_userptr(context, *elts, index_bias,
+ util_shorten_ubyte_elts_to_userptr(context, ib, index_bias,
start, count, out_map);
pipe_buffer_unmap(context, dst_transfer);
- *elts = new_elts;
+ pipe_resource_reference(out_buf, NULL);
+ *out_buf = new_elts;
}
/* Ushort indices. */
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
- struct pipe_resource *elts,
+ struct pipe_index_buffer *ib,
int index_bias,
unsigned start, unsigned count,
void *out)
{
struct pipe_transfer *in_transfer = NULL;
- unsigned short *in_map;
+ const unsigned short *in_map;
unsigned short *out_map = out;
unsigned i;
- in_map = pipe_buffer_map(context, elts,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED,
- &in_transfer);
+ if (ib->user_buffer) {
+ in_map = ib->user_buffer;
+ } else {
+ in_map = pipe_buffer_map(context, ib->buffer,
+ PIPE_TRANSFER_READ |
+ PIPE_TRANSFER_UNSYNCHRONIZED,
+ &in_transfer);
+ }
in_map += start;
for (i = 0; i < count; i++) {
out_map++;
}
- pipe_buffer_unmap(context, in_transfer);
+ if (in_transfer)
+ pipe_buffer_unmap(context, in_transfer);
}
void util_rebuild_ushort_elts(struct pipe_context *context,
- struct pipe_resource **elts,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buf,
int index_bias,
unsigned start, unsigned count)
{
out_map = pipe_buffer_map(context, new_elts,
PIPE_TRANSFER_WRITE, &out_transfer);
- util_rebuild_ushort_elts_to_userptr(context, *elts, index_bias,
+ util_rebuild_ushort_elts_to_userptr(context, ib, index_bias,
start, count, out_map);
pipe_buffer_unmap(context, out_transfer);
- *elts = new_elts;
+ pipe_resource_reference(out_buf, NULL);
+ *out_buf = new_elts;
}
/* Uint indices. */
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
- struct pipe_resource *elts,
+ struct pipe_index_buffer *ib,
int index_bias,
unsigned start, unsigned count,
void *out)
{
struct pipe_transfer *in_transfer = NULL;
- unsigned int *in_map;
+ const unsigned int *in_map;
unsigned int *out_map = out;
unsigned i;
- in_map = pipe_buffer_map(context, elts,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED,
- &in_transfer);
+ if (ib->user_buffer) {
+ in_map = ib->user_buffer;
+ } else {
+ in_map = pipe_buffer_map(context, ib->buffer,
+ PIPE_TRANSFER_READ |
+ PIPE_TRANSFER_UNSYNCHRONIZED,
+ &in_transfer);
+ }
in_map += start;
for (i = 0; i < count; i++) {
out_map++;
}
- pipe_buffer_unmap(context, in_transfer);
+ if (in_transfer)
+ pipe_buffer_unmap(context, in_transfer);
}
void util_rebuild_uint_elts(struct pipe_context *context,
- struct pipe_resource **elts,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buf,
int index_bias,
unsigned start, unsigned count)
{
out_map = pipe_buffer_map(context, new_elts,
PIPE_TRANSFER_WRITE, &out_transfer);
- util_rebuild_uint_elts_to_userptr(context, *elts, index_bias,
+ util_rebuild_uint_elts_to_userptr(context, ib, index_bias,
start, count, out_map);
pipe_buffer_unmap(context, out_transfer);
- *elts = new_elts;
+ pipe_resource_reference(out_buf, NULL);
+ *out_buf = new_elts;
}
struct pipe_context;
struct pipe_resource;
+struct pipe_index_buffer;
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
- struct pipe_resource *elts,
+ struct pipe_index_buffer *ib,
int index_bias,
unsigned start,
unsigned count,
void *out);
void util_shorten_ubyte_elts(struct pipe_context *context,
- struct pipe_resource **elts,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buf,
int index_bias,
unsigned start,
unsigned count);
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
- struct pipe_resource *elts,
+ struct pipe_index_buffer *ib,
int index_bias,
unsigned start, unsigned count,
void *out);
void util_rebuild_ushort_elts(struct pipe_context *context,
- struct pipe_resource **elts,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buf,
int index_bias,
unsigned start, unsigned count);
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
- struct pipe_resource *elts,
+ struct pipe_index_buffer *ib,
int index_bias,
unsigned start, unsigned count,
void *out);
void util_rebuild_uint_elts(struct pipe_context *context,
- struct pipe_resource **elts,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buf,
int index_bias,
unsigned start, unsigned count);
unsigned offset = ib->offset + start_index * ib->index_size;
uint8_t *map;
- assert(ib->buffer && ib->index_size);
+ assert((ib->buffer || ib->user_buffer) && ib->index_size);
- if (ib->buffer->user_ptr) {
- map = ib->buffer->user_ptr + offset;
+ if (ib->user_buffer) {
+ map = (uint8_t*)ib->user_buffer + offset;
} else {
map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset,
num_indices * ib->index_size,
{
struct pipe_context *pipe = mgr->pipe;
- if (ib && ib->buffer) {
+ if (ib) {
assert(ib->offset % ib->index_size == 0);
pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer);
- mgr->index_buffer.offset = ib->offset;
- mgr->index_buffer.index_size = ib->index_size;
+ memcpy(&mgr->index_buffer, ib, sizeof(*ib));
} else {
pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
}
unsigned i;
unsigned restart_index = info->restart_index;
- if (ib->buffer->user_ptr) {
- indices = ib->buffer->user_ptr +
+ if (ib->user_buffer) {
+ indices = (uint8_t*)ib->user_buffer +
ib->offset + info->start * ib->index_size;
} else {
indices = pipe_buffer_map_range(pipe, ib->buffer,
{
struct i915_context *i915 = i915_context(pipe);
struct draw_context *draw = i915->draw;
- void *mapped_indices = NULL;
+ const void *mapped_indices = NULL;
/*
/*
* Map index buffer, if present
*/
- if (info->indexed && i915->index_buffer.buffer)
- mapped_indices = i915_buffer(i915->index_buffer.buffer)->data;
+ if (info->indexed) {
+ mapped_indices = i915->index_buffer.user_buffer;
+ if (!mapped_indices)
+ mapped_indices = i915_buffer(i915->index_buffer.buffer)->data;
+ }
draw_set_mapped_index_buffer(draw, mapped_indices);
if (i915->constants[PIPE_SHADER_VERTEX])
{
struct llvmpipe_context *lp = llvmpipe_context(pipe);
struct draw_context *draw = lp->draw;
- void *mapped_indices = NULL;
+ const void *mapped_indices = NULL;
unsigned i;
if (!llvmpipe_check_render_cond(lp))
}
/* Map index buffer, if present */
- if (info->indexed && lp->index_buffer.buffer)
- mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer);
+ if (info->indexed) {
+ mapped_indices = lp->index_buffer.user_buffer;
+ if (!mapped_indices)
+ mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer);
+ }
draw_set_mapped_index_buffer(draw, mapped_indices);
}
if (info->indexed) {
- void *map = pipe_buffer_map(pipe, nv30->idxbuf.buffer,
+ const void *map = nv30->idxbuf.user_buffer;
+ if (!map)
+ pipe_buffer_map(pipe, nv30->idxbuf.buffer,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transferi);
draw_set_index_buffer(draw, &nv30->idxbuf);
draw_vbo(draw, info);
draw_flush(draw);
- if (info->indexed)
+ if (info->indexed && transferi)
pipe_buffer_unmap(pipe, transferi);
for (i = 0; i < nv30->num_vtxbufs; i++)
if (transfer[i])
case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
- case PIPE_CAP_USER_INDEX_BUFFERS:
case PIPE_CAP_USER_CONSTANT_BUFFERS:
return 1;
+ case PIPE_CAP_USER_INDEX_BUFFERS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
return 0;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
- return 0; /* state trackers will know better */
case PIPE_CAP_USER_INDEX_BUFFERS:
+ return 0; /* state trackers will know better */
case PIPE_CAP_USER_CONSTANT_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
case PIPE_CAP_USER_VERTEX_BUFFERS:
- return 0; /* state trackers will know better */
case PIPE_CAP_USER_INDEX_BUFFERS:
+ return 0; /* state trackers will know better */
case PIPE_CAP_USER_CONSTANT_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
/* r300_render_translate.c */
void r300_translate_index_buffer(struct r300_context *r300,
- struct pipe_resource **index_buffer,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_index_buffer,
unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count);
static void r300_draw_elements_immediate(struct r300_context *r300,
const struct pipe_draw_info *info)
{
- uint8_t *ptr1;
- uint16_t *ptr2;
- uint32_t *ptr4;
+ const uint8_t *ptr1;
+ const uint16_t *ptr2;
+ const uint32_t *ptr4;
unsigned index_size = r300->index_buffer.index_size;
unsigned i, count_dwords = index_size == 4 ? info->count :
(info->count + 1) / 2;
switch (index_size) {
case 1:
- ptr1 = r300->index_buffer.buffer->user_ptr;
+ ptr1 = (uint8_t*)r300->index_buffer.user_buffer;
ptr1 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 2:
- ptr2 = (uint16_t*)r300->index_buffer.buffer->user_ptr;
+ ptr2 = (uint16_t*)r300->index_buffer.user_buffer;
ptr2 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 4:
- ptr4 = (uint32_t*)r300->index_buffer.buffer->user_ptr;
+ ptr4 = (uint32_t*)r300->index_buffer.user_buffer;
ptr4 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
uint16_t indices3[3];
if (info->index_bias && !r300->screen->caps.is_r500) {
- r300_split_index_bias(r300, info->index_bias, &buffer_offset, &index_offset);
+ r300_split_index_bias(r300, info->index_bias, &buffer_offset,
+ &index_offset);
}
- r300_translate_index_buffer(r300, &indexBuffer, &indexSize, index_offset,
- &start, count);
+ r300_translate_index_buffer(r300, &r300->index_buffer, &indexBuffer,
+ &indexSize, index_offset, &start, count);
/* Fallback for misaligned ushort indices. */
- if (indexSize == 2 && (start & 1) &&
- !indexBuffer->user_ptr) {
+ if (indexSize == 2 && (start & 1) && indexBuffer) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf,
r300->cs,
}
r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->cs_buf);
} else {
- if (indexBuffer->user_ptr)
+ if (r300->index_buffer.user_buffer)
r300_upload_index_buffer(r300, &indexBuffer, indexSize,
&start, count,
- indexBuffer->user_ptr);
+ r300->index_buffer.user_buffer);
}
/* 19 dwords for emit_draw_elements. Give up if the function fails. */
struct r300_context* r300 = r300_context(pipe);
struct pipe_draw_info info = *dinfo;
- info.indexed = info.indexed && r300->index_buffer.buffer;
+ info.indexed = info.indexed;
if (r300->skip_rendering ||
!u_trim_pipe_prim(info.mode, &info.count)) {
if (info.instance_count <= 1) {
if (info.count <= 8 &&
- r300->index_buffer.buffer->user_ptr) {
+ r300->index_buffer.user_buffer) {
r300_draw_elements_immediate(r300, &info);
} else {
r300_draw_elements(r300, &info, -1);
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
struct pipe_transfer *ib_transfer = NULL;
int i;
- void *indices = NULL;
- boolean indexed = info->indexed && r300->index_buffer.buffer;
+ const void *indices = NULL;
+ boolean indexed = info->indexed;
if (r300->skip_rendering) {
return;
}
if (indexed) {
- indices = pipe_buffer_map(pipe, r300->index_buffer.buffer,
- PIPE_TRANSFER_READ |
- PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer);
+ if (r300->index_buffer.user_buffer) {
+ indices = r300->index_buffer.user_buffer;
+ } else {
+ indices = pipe_buffer_map(pipe, r300->index_buffer.buffer,
+ PIPE_TRANSFER_READ |
+ PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer);
+ }
}
draw_set_mapped_index_buffer(r300->draw, indices);
}
if (indexed) {
- pipe_buffer_unmap(pipe, ib_transfer);
+ if (ib_transfer)
+ pipe_buffer_unmap(pipe, ib_transfer);
draw_set_mapped_index_buffer(r300->draw, NULL);
}
}
void r300_translate_index_buffer(struct r300_context *r300,
- struct pipe_resource **index_buffer,
+ struct pipe_index_buffer *ib,
+ struct pipe_resource **out_buffer,
unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count)
{
- struct pipe_resource *out_buffer = NULL;
unsigned out_offset;
void *ptr;
switch (*index_size) {
case 1:
+ *out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 2,
- &out_offset, &out_buffer, &ptr);
+ &out_offset, out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
- &r300->context, *index_buffer, index_offset,
+ &r300->context, ib, index_offset,
*start, count, ptr);
- *index_buffer = NULL;
- pipe_resource_reference(index_buffer, out_buffer);
*index_size = 2;
*start = out_offset / 2;
break;
case 2:
if (index_offset) {
+ *out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 2,
- &out_offset, &out_buffer, &ptr);
+ &out_offset, out_buffer, &ptr);
- util_rebuild_ushort_elts_to_userptr(&r300->context, *index_buffer,
+ util_rebuild_ushort_elts_to_userptr(&r300->context, ib,
index_offset, *start,
count, ptr);
- *index_buffer = NULL;
- pipe_resource_reference(index_buffer, out_buffer);
*start = out_offset / 2;
}
break;
case 4:
if (index_offset) {
+ *out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 4,
- &out_offset, &out_buffer, &ptr);
+ &out_offset, out_buffer, &ptr);
- util_rebuild_uint_elts_to_userptr(&r300->context, *index_buffer,
+ util_rebuild_uint_elts_to_userptr(&r300->context, ib,
index_offset, *start,
count, ptr);
- *index_buffer = NULL;
- pipe_resource_reference(index_buffer, out_buffer);
*start = out_offset / 4;
}
break;
void r300_upload_index_buffer(struct r300_context *r300,
struct pipe_resource **index_buffer,
unsigned index_size, unsigned *start,
- unsigned count, uint8_t *ptr)
+ unsigned count, const uint8_t *ptr)
{
unsigned index_offset;
void r300_upload_index_buffer(struct r300_context *r300,
struct pipe_resource **index_buffer,
unsigned index_size, unsigned *start,
- unsigned count, uint8_t *ptr);
+ unsigned count, const uint8_t *ptr);
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
const struct pipe_resource *templ);
uint8_t *ptr;
if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
- (info.indexed && !rctx->index_buffer.buffer) ||
!r600_conv_pipe_prim(info.mode, &prim)) {
assert(0);
return;
if (info.indexed) {
/* Initialize the index buffer struct. */
pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
+ ib.user_buffer = rctx->index_buffer.user_buffer;
ib.index_size = rctx->index_buffer.index_size;
ib.offset = rctx->index_buffer.offset + info.start * ib.index_size;
/* Translate or upload, if needed. */
r600_translate_index_buffer(rctx, &ib, info.count);
- ptr = ib.buffer->user_ptr;
- if (ptr) {
+ ptr = (uint8_t*)ib.user_buffer;
+ if (!ib.buffer && ptr) {
u_upload_data(rctx->uploader, 0, info.count * ib.index_size,
ptr, &ib.offset, &ib.buffer);
}
&out_offset, &out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
- &r600->context, ib->buffer, 0, ib->offset, count, ptr);
+ &r600->context, ib, 0, ib->offset, count, ptr);
pipe_resource_reference(&ib->buffer, NULL);
ib->buffer = out_buffer;
void r600_upload_index_buffer(struct r600_context *rctx,
struct pipe_index_buffer *ib, unsigned count)
{
- struct r600_resource *rbuffer = r600_resource(ib->buffer);
-
u_upload_data(rctx->uploader, 0, count * ib->index_size,
- rbuffer->b.b.user_ptr, &ib->offset, &ib->buffer);
+ ib->user_buffer, &ib->offset, &ib->buffer);
}
void r600_upload_const_buffer(struct r600_context *rctx, struct r600_resource **rbuffer,
/* Translate or upload, if needed. */
r600_translate_index_buffer(rctx, &ib, info.count);
- if (ib.buffer->user_ptr) {
+ if (ib.user_buffer) {
r600_upload_index_buffer(rctx, &ib, info.count);
}
*/
#include "util/u_index_modify.h"
-#include "util/u_inlines.h"
#include "util/u_upload_mgr.h"
#include "radeonsi_pipe.h"
&out_offset, &out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
- &r600->context, ib->buffer, 0, ib->offset, count, ptr);
+ &r600->context, ib, 0, ib->offset, count, ptr);
pipe_resource_reference(&ib->buffer, NULL);
ib->buffer = out_buffer;
{
struct softpipe_context *sp = softpipe_context(pipe);
struct draw_context *draw = sp->draw;
- void *mapped_indices = NULL;
+ const void *mapped_indices = NULL;
unsigned i;
if (!softpipe_check_render_cond(sp))
}
/* Map index buffer, if present */
- if (info->indexed && sp->index_buffer.buffer)
- mapped_indices = softpipe_resource(sp->index_buffer.buffer)->data;
+ if (info->indexed) {
+ mapped_indices = sp->index_buffer.user_buffer;
+ if (!mapped_indices)
+ mapped_indices = softpipe_resource(sp->index_buffer.buffer)->data;
+ }
draw_set_mapped_index_buffer(draw, mapped_indices);
case PIPE_CAP_TEXTURE_SWIZZLE:
return 1;
case PIPE_CAP_USER_VERTEX_BUFFERS:
- return 0;
case PIPE_CAP_USER_INDEX_BUFFERS:
+ return 0;
case PIPE_CAP_USER_CONSTANT_BUFFERS:
return 1;
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
unsigned index_size; /**< size of an index, in bytes */
unsigned offset; /**< offset to start of data in buffer, in bytes */
struct pipe_resource *buffer; /**< the actual buffer */
+ const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
};
const struct _mesa_index_buffer *ib,
struct pipe_index_buffer *ibuffer)
{
- struct pipe_context *pipe = st->pipe;
struct gl_buffer_object *bufobj = ib->obj;
ibuffer->index_size = vbo_sizeof_ib_type(ib->type);
/* get/create the index buffer object */
if (_mesa_is_bufferobj(bufobj)) {
/* indices are in a real VBO */
- struct st_buffer_object *stobj = st_buffer_object(bufobj);
- pipe_resource_reference(&ibuffer->buffer, stobj->buffer);
+ ibuffer->buffer = st_buffer_object(bufobj)->buffer;
ibuffer->offset = pointer_to_offset(ib->ptr);
}
else if (st->indexbuf_uploader) {
}
else {
/* indices are in user space memory */
- ibuffer->buffer =
- pipe_user_buffer_create(pipe->screen, (void *) ib->ptr,
- ib->count * ibuffer->index_size,
- PIPE_BIND_INDEX_BUFFER);
+ ibuffer->user_buffer = ib->ptr;
}
cso_set_index_buffer(st->cso_context, ibuffer);
unsigned num_sub_prims;
assert(info.indexed);
- assert(ibuffer->buffer);
+ assert(ibuffer->buffer || ibuffer->user_buffer);
assert(ib);
- if (!ibuffer->buffer || !ib)
+ if (!ibuffer->buffer || !ibuffer->user_buffer || !ib)
return;
info.primitive_restart = FALSE;
cso_draw_vbo(st->cso_context, &info);
}
- pipe_resource_reference(&ibuffer.buffer, NULL);
+ if (ib && st->indexbuf_uploader && !_mesa_is_bufferobj(ib->obj)) {
+ pipe_resource_reference(&ibuffer.buffer, NULL);
+ }
}