unsigned i;
if (ctx->pipe) {
- ctx->pipe->set_index_buffer(ctx->pipe, NULL);
-
ctx->pipe->bind_blend_state( ctx->pipe, NULL );
ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
/* drawing */
-void
-cso_set_index_buffer(struct cso_context *cso,
- const struct pipe_index_buffer *ib)
-{
- struct u_vbuf *vbuf = cso->vbuf;
-
- if (vbuf) {
- u_vbuf_set_index_buffer(vbuf, ib);
- } else {
- struct pipe_context *pipe = cso->pipe;
- pipe->set_index_buffer(pipe, ib);
- }
-}
-
void
cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info)
/* drawing */
-void
-cso_set_index_buffer(struct cso_context *cso,
- const struct pipe_index_buffer *ib);
-
void
cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info);
/**
* Tell the draw module where vertex indexes/elements are located, and
* their size (in bytes).
- *
- * Note: the caller must apply the pipe_index_buffer::offset value to
- * the address. The draw module doesn't do that.
*/
void
draw_set_indexes(struct draw_context *draw,
info->count = target->internal_offset / vertex_buffer->stride;
/* Stream output draw can not be indexed */
- debug_assert(!info->indexed);
+ debug_assert(!info->index_size);
info->max_index = info->count - 1;
}
}
info = &resolved_info;
assert(info->instance_count > 0);
- if (info->indexed)
+ if (info->index_size)
assert(draw->pt.user.elts);
count = info->count;
draw->pt.user.eltBias = info->index_bias;
draw->pt.user.min_index = info->min_index;
draw->pt.user.max_index = info->max_index;
- draw->pt.user.eltSize = info->indexed ? draw->pt.user.eltSizeIB : 0;
+ draw->pt.user.eltSize = info->index_size ? draw->pt.user.eltSizeIB : 0;
if (0)
debug_printf("draw_vbo(mode=%u start=%u count=%u):\n",
*
* // emulate unsupported primitives:
* if (info->mode needs emulating) {
- * util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
* util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
* util_primconvert_draw_vbo(ctx->primconvert, info);
* return;
struct primconvert_context
{
struct pipe_context *pipe;
- struct pipe_index_buffer saved_ib;
uint32_t primtypes_mask;
unsigned api_pv;
};
void
util_primconvert_destroy(struct primconvert_context *pc)
{
- util_primconvert_save_index_buffer(pc, NULL);
FREE(pc);
}
-void
-util_primconvert_save_index_buffer(struct primconvert_context *pc,
- const struct pipe_index_buffer *ib)
-{
- if (ib) {
- pipe_resource_reference(&pc->saved_ib.buffer, ib->buffer);
- pc->saved_ib.index_size = ib->index_size;
- pc->saved_ib.offset = ib->offset;
- pc->saved_ib.user_buffer = ib->user_buffer;
- }
- else {
- pipe_resource_reference(&pc->saved_ib.buffer, NULL);
- }
-}
-
void
util_primconvert_save_rasterizer_state(struct primconvert_context *pc,
const struct pipe_rasterizer_state
util_primconvert_draw_vbo(struct primconvert_context *pc,
const struct pipe_draw_info *info)
{
- struct pipe_index_buffer *ib = &pc->saved_ib;
- struct pipe_index_buffer new_ib;
struct pipe_draw_info new_info;
struct pipe_transfer *src_transfer = NULL;
u_translate_func trans_func;
u_generate_func gen_func;
const void *src = NULL;
void *dst;
+ unsigned ib_offset;
- memset(&new_ib, 0, sizeof(new_ib));
util_draw_init_info(&new_info);
- new_info.indexed = true;
new_info.min_index = info->min_index;
new_info.max_index = info->max_index;
new_info.index_bias = info->index_bias;
new_info.instance_count = info->instance_count;
new_info.primitive_restart = info->primitive_restart;
new_info.restart_index = info->restart_index;
- if (info->indexed) {
+ if (info->index_size) {
enum pipe_prim_type mode = 0;
+ unsigned index_size;
u_index_translator(pc->primtypes_mask,
- info->mode, pc->saved_ib.index_size, info->count,
+ info->mode, info->index_size, info->count,
pc->api_pv, pc->api_pv,
info->primitive_restart ? PR_ENABLE : PR_DISABLE,
- &mode, &new_ib.index_size, &new_info.count,
+ &mode, &index_size, &new_info.count,
&trans_func);
new_info.mode = mode;
- src = ib->user_buffer;
+ new_info.index_size = index_size;
+ src = info->has_user_indices ? info->index.user : NULL;
if (!src) {
- src = pipe_buffer_map(pc->pipe, ib->buffer,
+ src = pipe_buffer_map(pc->pipe, info->index.resource,
PIPE_TRANSFER_READ, &src_transfer);
}
- src = (const uint8_t *)src + ib->offset;
+ src = (const uint8_t *)src;
}
else {
enum pipe_prim_type mode = 0;
+ unsigned index_size;
u_index_generator(pc->primtypes_mask,
info->mode, info->start, info->count,
pc->api_pv, pc->api_pv,
- &mode, &new_ib.index_size, &new_info.count,
+ &mode, &index_size, &new_info.count,
&gen_func);
new_info.mode = mode;
+ new_info.index_size = index_size;
}
- u_upload_alloc(pc->pipe->stream_uploader, 0, new_ib.index_size * new_info.count, 4,
- &new_ib.offset, &new_ib.buffer, &dst);
+ u_upload_alloc(pc->pipe->stream_uploader, 0, new_info.index_size * new_info.count, 4,
+ &ib_offset, &new_info.index.resource, &dst);
+ new_info.start = ib_offset / new_info.index_size;
- if (info->indexed) {
+ if (info->index_size) {
trans_func(src, info->start, info->count, new_info.count, info->restart_index, dst);
}
else {
u_upload_unmap(pc->pipe->stream_uploader);
- /* bind new index buffer: */
- pc->pipe->set_index_buffer(pc->pipe, &new_ib);
-
/* to the translated draw: */
pc->pipe->draw_vbo(pc->pipe, &new_info);
- /* and then restore saved ib: */
- pc->pipe->set_index_buffer(pc->pipe, ib);
-
- pipe_resource_reference(&new_ib.buffer, NULL);
+ pipe_resource_reference(&new_info.index.resource, NULL);
}
struct primconvert_context *util_primconvert_create(struct pipe_context *pipe,
uint32_t primtypes_mask);
void util_primconvert_destroy(struct primconvert_context *pc);
-void util_primconvert_save_index_buffer(struct primconvert_context *pc,
- const struct pipe_index_buffer *ib);
void util_primconvert_save_rasterizer_state(struct primconvert_context *pc,
const struct pipe_rasterizer_state
*rast);
struct pipe_draw_info info;
struct pipe_transfer *transfer;
uint32_t *params;
- const unsigned num_params = info_in->indexed ? 5 : 4;
+ const unsigned num_params = info_in->index_size ? 5 : 4;
assert(info_in->indirect);
assert(!info_in->count_from_stream_output);
info.count = params[0];
info.instance_count = params[1];
info.start = params[2];
- info.index_bias = info_in->indexed ? params[3] : 0;
- info.start_instance = info_in->indexed ? params[4] : params[3];
+ info.index_bias = info_in->index_size ? params[3] : 0;
+ info.start_instance = info_in->index_size ? params[4] : params[3];
info.indirect = NULL;
pipe_buffer_unmap(pipe, transfer);
}
static inline void
-util_draw_elements(struct pipe_context *pipe, int index_bias,
- enum pipe_prim_type mode,
+util_draw_elements(struct pipe_context *pipe, unsigned index_size,
+ int index_bias, enum pipe_prim_type mode,
uint start,
uint count)
{
struct pipe_draw_info info;
util_draw_init_info(&info);
- info.indexed = TRUE;
+ info.index_size = index_size;
info.mode = mode;
info.start = start;
info.count = count;
static inline void
util_draw_elements_instanced(struct pipe_context *pipe,
+ unsigned index_size,
int index_bias,
enum pipe_prim_type mode,
uint start,
struct pipe_draw_info info;
util_draw_init_info(&info);
- info.indexed = TRUE;
+ info.index_size = index_size;
info.mode = mode;
info.start = start;
info.count = count;
util_dump_constant_buffer(FILE *stream,
const struct pipe_constant_buffer *state);
-void
-util_dump_index_buffer(FILE *stream, const struct pipe_index_buffer *state);
-
void
util_dump_vertex_buffer(FILE *stream,
const struct pipe_vertex_buffer *state);
}
-void
-util_dump_index_buffer(FILE *stream, const struct pipe_index_buffer *state)
-{
- if (!state) {
- util_dump_null(stream);
- return;
- }
-
- util_dump_struct_begin(stream, "pipe_index_buffer");
-
- util_dump_member(stream, uint, state, index_size);
- util_dump_member(stream, uint, state, offset);
- util_dump_member(stream, ptr, state, buffer);
- util_dump_member(stream, ptr, state, user_buffer);
-
- util_dump_struct_end(stream);
-}
-
-
void
util_dump_vertex_buffer(FILE *stream, const struct pipe_vertex_buffer *state)
{
util_dump_struct_begin(stream, "pipe_draw_info");
- util_dump_member(stream, bool, state, indexed);
+ util_dump_member(stream, uint, state, index_size);
+ util_dump_member(stream, uint, state, has_user_indices);
util_dump_member(stream, enum_prim_mode, state, mode);
util_dump_member(stream, uint, state, start);
util_dump_member(stream, bool, state, primitive_restart);
util_dump_member(stream, uint, state, restart_index);
+ util_dump_member(stream, ptr, state, index.resource);
util_dump_member(stream, ptr, state, count_from_stream_output);
if (!state->indirect) {
*dst_count = util_last_bit(enabled_buffers);
}
-
-void
-util_set_index_buffer(struct pipe_index_buffer *dst,
- const struct pipe_index_buffer *src)
-{
- if (src) {
- pipe_resource_reference(&dst->buffer, src->buffer);
- memcpy(dst, src, sizeof(*dst));
- }
- else {
- pipe_resource_reference(&dst->buffer, NULL);
- memset(dst, 0, sizeof(*dst));
- }
-}
-
/**
* Given a user index buffer, save the structure to "saved", and upload it.
*/
bool
-util_save_and_upload_index_buffer(struct pipe_context *pipe,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib,
- struct pipe_index_buffer *out_saved)
+util_upload_index_buffer(struct pipe_context *pipe,
+ const struct pipe_draw_info *info,
+ struct pipe_resource **out_buffer,
+ unsigned *out_offset)
{
- struct pipe_index_buffer new_ib = {0};
- unsigned start_offset = info->start * ib->index_size;
+ unsigned start_offset = info->start * info->index_size;
u_upload_data(pipe->stream_uploader, start_offset,
- info->count * ib->index_size, 4,
- (char*)ib->user_buffer + start_offset,
- &new_ib.offset, &new_ib.buffer);
- if (!new_ib.buffer)
- return false;
+ info->count * info->index_size, 4,
+ (char*)info->index.user + start_offset,
+ out_offset, out_buffer);
u_upload_unmap(pipe->stream_uploader);
-
- new_ib.offset -= start_offset;
- new_ib.index_size = ib->index_size;
-
- util_set_index_buffer(out_saved, ib);
- pipe->set_index_buffer(pipe, &new_ib);
- pipe_resource_reference(&new_ib.buffer, NULL);
- return true;
+ *out_offset -= start_offset;
+ return *out_buffer != NULL;
}
struct pipe_query *
const struct pipe_vertex_buffer *src,
unsigned start_slot, unsigned count);
-void util_set_index_buffer(struct pipe_index_buffer *dst,
- const struct pipe_index_buffer *src);
-
-bool util_save_and_upload_index_buffer(struct pipe_context *pipe,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib,
- struct pipe_index_buffer *out_saved);
+bool util_upload_index_buffer(struct pipe_context *pipe,
+ const struct pipe_draw_info *info,
+ struct pipe_resource **out_buffer,
+ unsigned *out_offset);
struct pipe_query *
util_begin_pipestat_query(struct pipe_context *ctx);
/* Ubyte indices. */
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start,
unsigned short *out_map = out;
unsigned i;
- if (ib->user_buffer) {
- in_map = ib->user_buffer;
+ if (info->has_user_indices) {
+ in_map = info->index.user;
} else {
- in_map = pipe_buffer_map(context, ib->buffer,
+ in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
add_transfer_flags,
&src_transfer);
/* Ushort indices. */
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
unsigned short *out_map = out;
unsigned i;
- if (ib->user_buffer) {
- in_map = ib->user_buffer;
+ if (info->has_user_indices) {
+ in_map = info->index.user;
} else {
- in_map = pipe_buffer_map(context, ib->buffer,
+ in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
add_transfer_flags,
&in_transfer);
/* Uint indices. */
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
unsigned int *out_map = out;
unsigned i;
- if (ib->user_buffer) {
- in_map = ib->user_buffer;
+ if (info->has_user_indices) {
+ in_map = info->index.user;
} else {
- in_map = pipe_buffer_map(context, ib->buffer,
+ in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
add_transfer_flags,
&in_transfer);
struct pipe_context;
struct pipe_resource;
-struct pipe_index_buffer;
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start,
void *out);
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
void *out);
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
*/
enum pipe_error
util_translate_prim_restart_ib(struct pipe_context *context,
- struct pipe_index_buffer *src_buffer,
- struct pipe_resource **dst_buffer,
- unsigned num_indexes,
- unsigned restart_index)
+ const struct pipe_draw_info *info,
+ struct pipe_resource **dst_buffer)
{
struct pipe_screen *screen = context->screen;
struct pipe_transfer *src_transfer = NULL, *dst_transfer = NULL;
void *src_map = NULL, *dst_map = NULL;
- const unsigned src_index_size = src_buffer->index_size;
+ const unsigned src_index_size = info->index_size;
unsigned dst_index_size;
/* 1-byte indexes are converted to 2-byte indexes, 4-byte stays 4-byte */
- dst_index_size = MAX2(2, src_buffer->index_size);
+ dst_index_size = MAX2(2, info->index_size);
assert(dst_index_size == 2 || dst_index_size == 4);
/* no user buffers for now */
- assert(src_buffer->user_buffer == NULL);
+ assert(!info->has_user_indices);
/* Create new index buffer */
*dst_buffer = pipe_buffer_create(screen, PIPE_BIND_INDEX_BUFFER,
PIPE_USAGE_STREAM,
- num_indexes * dst_index_size);
+ info->count * dst_index_size);
if (!*dst_buffer)
goto error;
goto error;
/* Map original / src index buffer */
- src_map = pipe_buffer_map_range(context, src_buffer->buffer,
- src_buffer->offset,
- num_indexes * src_index_size,
+ src_map = pipe_buffer_map_range(context, info->index.resource,
+ info->start * src_index_size,
+ info->count * src_index_size,
PIPE_TRANSFER_READ,
&src_transfer);
if (!src_map)
uint8_t *src = (uint8_t *) src_map;
uint16_t *dst = (uint16_t *) dst_map;
unsigned i;
- for (i = 0; i < num_indexes; i++) {
- dst[i] = (src[i] == restart_index) ? 0xffff : src[i];
+ for (i = 0; i < info->count; i++) {
+ dst[i] = (src[i] == info->restart_index) ? 0xffff : src[i];
}
}
else if (src_index_size == 2 && dst_index_size == 2) {
uint16_t *src = (uint16_t *) src_map;
uint16_t *dst = (uint16_t *) dst_map;
unsigned i;
- for (i = 0; i < num_indexes; i++) {
- dst[i] = (src[i] == restart_index) ? 0xffff : src[i];
+ for (i = 0; i < info->count; i++) {
+ dst[i] = (src[i] == info->restart_index) ? 0xffff : src[i];
}
}
else {
unsigned i;
assert(src_index_size == 4);
assert(dst_index_size == 4);
- for (i = 0; i < num_indexes; i++) {
- dst[i] = (src[i] == restart_index) ? 0xffffffff : src[i];
+ for (i = 0; i < info->count; i++) {
+ dst[i] = (src[i] == info->restart_index) ? 0xffffffff : src[i];
}
}
*/
enum pipe_error
util_draw_vbo_without_prim_restart(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
const struct pipe_draw_info *info)
{
const void *src_map;
struct pipe_transfer *src_transfer = NULL;
unsigned i, start, count;
- assert(info->indexed);
+ assert(info->index_size);
assert(info->primitive_restart);
/* Get pointer to the index data */
- if (ib->buffer) {
+ if (!info->has_user_indices) {
/* map the index buffer (only the range we need to scan) */
- src_map = pipe_buffer_map_range(context, ib->buffer,
- ib->offset + info->start * ib->index_size,
- info->count * ib->index_size,
+ src_map = pipe_buffer_map_range(context, info->index.resource,
+ info->start * info->index_size,
+ info->count * info->index_size,
PIPE_TRANSFER_READ,
&src_transfer);
if (!src_map) {
}
}
else {
- if (!ib->user_buffer) {
+ if (!info->index.user) {
debug_printf("User-space index buffer is null!");
return PIPE_ERROR_BAD_INPUT;
}
- src_map = (const uint8_t *) ib->user_buffer
- + ib->offset
- + info->start * ib->index_size;
+ src_map = (const uint8_t *) info->index.user
+ + info->start * info->index_size;
}
#define SCAN_INDEXES(TYPE) \
} \
}
- start = info->start;
+ start = 0;
count = 0;
- switch (ib->index_size) {
+ switch (info->index_size) {
case 1:
SCAN_INDEXES(uint8_t);
break;
struct pipe_context;
struct pipe_draw_info;
-struct pipe_index_buffer;
+union pipe_index_binding;
struct pipe_resource;
enum pipe_error
util_translate_prim_restart_ib(struct pipe_context *context,
- struct pipe_index_buffer *src_buffer,
- struct pipe_resource **dst_buffer,
- unsigned num_indexes,
- unsigned restart_index);
+ const struct pipe_draw_info *info,
+ struct pipe_resource **dst_buffer);
enum pipe_error
util_draw_vbo_without_prim_restart(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
const struct pipe_draw_info *info);
uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last
call of set_vertex_buffers */
- /* The index buffer. */
- struct pipe_index_buffer index_buffer;
-
/* Vertex elements. */
struct u_vbuf_elements *ve, *ve_saved;
unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
PIPE_SHADER_CAP_MAX_INPUTS);
- mgr->pipe->set_index_buffer(mgr->pipe, NULL);
- pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
-
mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
static enum pipe_error
u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
+ const struct pipe_draw_info *info,
unsigned vb_mask, unsigned out_vb,
int start_vertex, unsigned num_vertices,
- int start_index, unsigned num_indices, int min_index,
- boolean unroll_indices)
+ int min_index, boolean unroll_indices)
{
struct translate *tr;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
/* Translate. */
if (unroll_indices) {
- struct pipe_index_buffer *ib = &mgr->index_buffer;
struct pipe_transfer *transfer = NULL;
- unsigned offset = ib->offset + start_index * ib->index_size;
+ unsigned offset = info->start * info->index_size;
uint8_t *map;
- assert((ib->buffer || ib->user_buffer) && ib->index_size);
-
/* Create and map the output buffer. */
u_upload_alloc(mgr->pipe->stream_uploader, 0,
- key->output_stride * num_indices, 4,
+ key->output_stride * info->count, 4,
&out_offset, &out_buffer,
(void**)&out_map);
if (!out_buffer)
return PIPE_ERROR_OUT_OF_MEMORY;
- if (ib->user_buffer) {
- map = (uint8_t*)ib->user_buffer + offset;
+ if (info->has_user_indices) {
+ map = (uint8_t*)info->index.user + offset;
} else {
- map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset,
- num_indices * ib->index_size,
+ map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
+ info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer);
}
- switch (ib->index_size) {
+ switch (info->index_size) {
case 4:
- tr->run_elts(tr, (unsigned*)map, num_indices, 0, 0, out_map);
+ tr->run_elts(tr, (unsigned*)map, info->count, 0, 0, out_map);
break;
case 2:
- tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, 0, out_map);
+ tr->run_elts16(tr, (uint16_t*)map, info->count, 0, 0, out_map);
break;
case 1:
- tr->run_elts8(tr, map, num_indices, 0, 0, out_map);
+ tr->run_elts8(tr, map, info->count, 0, 0, out_map);
break;
}
static boolean
u_vbuf_translate_begin(struct u_vbuf *mgr,
+ const struct pipe_draw_info *info,
int start_vertex, unsigned num_vertices,
- int start_instance, unsigned num_instances,
- int start_index, unsigned num_indices, int min_index,
- boolean unroll_indices)
+ int min_index, boolean unroll_indices)
{
unsigned mask[VB_NUM] = {0};
struct translate_key key[VB_NUM];
mgr->ve->used_vb_mask;
int start[VB_NUM] = {
- start_vertex, /* VERTEX */
- start_instance, /* INSTANCE */
- 0 /* CONST */
+ start_vertex, /* VERTEX */
+ info->start_instance, /* INSTANCE */
+ 0 /* CONST */
};
unsigned num[VB_NUM] = {
- num_vertices, /* VERTEX */
- num_instances, /* INSTANCE */
- 1 /* CONST */
+ num_vertices, /* VERTEX */
+ info->instance_count, /* INSTANCE */
+ 1 /* CONST */
};
memset(key, 0, sizeof(key));
for (type = 0; type < VB_NUM; type++) {
if (key[type].nr_elements) {
enum pipe_error err;
- err = u_vbuf_translate_buffers(mgr, &key[type], mask[type],
+ err = u_vbuf_translate_buffers(mgr, &key[type], info, mask[type],
mgr->fallback_vbs[type],
- start[type], num[type],
- start_index, num_indices, min_index,
+ start[type], num[type], min_index,
unroll_indices && type == VB_VERTEX);
if (err != PIPE_OK)
return FALSE;
mgr->dirty_real_vb_mask |= ~mask;
}
-void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
- const struct pipe_index_buffer *ib)
-{
- struct pipe_context *pipe = mgr->pipe;
-
- if (ib) {
- assert(ib->offset % ib->index_size == 0);
- pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer);
- memcpy(&mgr->index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
- }
-
- pipe->set_index_buffer(pipe, ib);
-}
-
static enum pipe_error
u_vbuf_upload_buffers(struct u_vbuf *mgr,
int start_vertex, unsigned num_vertices,
}
static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
- struct pipe_index_buffer *ib,
- boolean primitive_restart,
- unsigned restart_index,
- unsigned start, unsigned count,
- int *out_min_index,
- int *out_max_index)
+ const struct pipe_draw_info *info,
+ int *out_min_index, int *out_max_index)
{
struct pipe_transfer *transfer = NULL;
const void *indices;
unsigned i;
- if (ib->user_buffer) {
- indices = (uint8_t*)ib->user_buffer +
- ib->offset + start * ib->index_size;
+ if (info->has_user_indices) {
+ indices = (uint8_t*)info->index.user +
+ info->start * info->index_size;
} else {
- indices = pipe_buffer_map_range(pipe, ib->buffer,
- ib->offset + start * ib->index_size,
- count * ib->index_size,
+ indices = pipe_buffer_map_range(pipe, info->index.resource,
+ info->start * info->index_size,
+ info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer);
}
- switch (ib->index_size) {
+ switch (info->index_size) {
case 4: {
const unsigned *ui_indices = (const unsigned*)indices;
unsigned max_ui = 0;
unsigned min_ui = ~0U;
- if (primitive_restart) {
- for (i = 0; i < count; i++) {
- if (ui_indices[i] != restart_index) {
+ if (info->primitive_restart) {
+ for (i = 0; i < info->count; i++) {
+ if (ui_indices[i] != info->restart_index) {
if (ui_indices[i] > max_ui) max_ui = ui_indices[i];
if (ui_indices[i] < min_ui) min_ui = ui_indices[i];
}
}
}
else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < info->count; i++) {
if (ui_indices[i] > max_ui) max_ui = ui_indices[i];
if (ui_indices[i] < min_ui) min_ui = ui_indices[i];
}
const unsigned short *us_indices = (const unsigned short*)indices;
unsigned max_us = 0;
unsigned min_us = ~0U;
- if (primitive_restart) {
- for (i = 0; i < count; i++) {
- if (us_indices[i] != restart_index) {
+ if (info->primitive_restart) {
+ for (i = 0; i < info->count; i++) {
+ if (us_indices[i] != info->restart_index) {
if (us_indices[i] > max_us) max_us = us_indices[i];
if (us_indices[i] < min_us) min_us = us_indices[i];
}
}
}
else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < info->count; i++) {
if (us_indices[i] > max_us) max_us = us_indices[i];
if (us_indices[i] < min_us) min_us = us_indices[i];
}
const unsigned char *ub_indices = (const unsigned char*)indices;
unsigned max_ub = 0;
unsigned min_ub = ~0U;
- if (primitive_restart) {
- for (i = 0; i < count; i++) {
- if (ub_indices[i] != restart_index) {
+ if (info->primitive_restart) {
+ for (i = 0; i < info->count; i++) {
+ if (ub_indices[i] != info->restart_index) {
if (ub_indices[i] > max_ub) max_ub = ub_indices[i];
if (ub_indices[i] < min_ub) min_ub = ub_indices[i];
}
}
}
else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < info->count; i++) {
if (ub_indices[i] > max_ub) max_ub = ub_indices[i];
if (ub_indices[i] < min_ub) min_ub = ub_indices[i];
}
struct pipe_transfer *transfer = NULL;
int *data;
- if (new_info.indexed) {
+ if (new_info.index_size) {
data = pipe_buffer_map_range(pipe, new_info.indirect->buffer,
new_info.indirect->offset, 20,
PIPE_TRANSFER_READ, &transfer);
new_info.indirect = NULL;
}
- if (new_info.indexed) {
+ if (new_info.index_size) {
/* See if anything needs to be done for per-vertex attribs. */
if (u_vbuf_need_minmax_index(mgr)) {
int max_index;
min_index = new_info.min_index;
max_index = new_info.max_index;
} else {
- u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer,
- new_info.primitive_restart,
- new_info.restart_index, new_info.start,
- new_info.count, &min_index, &max_index);
+ u_vbuf_get_minmax_index(mgr->pipe, &new_info,
+ &min_index, &max_index);
}
assert(min_index <= max_index);
if (unroll_indices ||
incompatible_vb_mask ||
mgr->ve->incompatible_elem_mask) {
- if (!u_vbuf_translate_begin(mgr, start_vertex, num_vertices,
- new_info.start_instance,
- new_info.instance_count, new_info.start,
- new_info.count, min_index, unroll_indices)) {
+ if (!u_vbuf_translate_begin(mgr, &new_info, start_vertex, num_vertices,
+ min_index, unroll_indices)) {
debug_warn_once("u_vbuf_translate_begin() failed");
return;
}
if (unroll_indices) {
- new_info.indexed = FALSE;
+ new_info.index_size = 0;
new_info.index_bias = 0;
new_info.min_index = 0;
new_info.max_index = new_info.count - 1;
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *bufs);
-void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
- const struct pipe_index_buffer *ib);
void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info);
/* Save/restore functionality. */
* ``set_vertex_buffers``
-* ``set_index_buffer``
-
Non-CSO State
^^^^^^^^^^^^^
Every instance with instanceID in the range between ``start_instance`` and
``start_instance``+``instance_count``-1, inclusive, will be drawn.
-If there is an index buffer bound, and ``indexed`` field is true, all vertex
-indices will be looked up in the index buffer.
+If ``index_size`` != 0, all vertex indices will be looked up from the index
+buffer.
In indexed draw, ``min_index`` and ``max_index`` respectively provide a lower
and upper bound of the indices contained in the index buffer inside the range
pipe->set_vertex_buffers(pipe, start, num_buffers, buffers);
}
-static void
-dd_context_set_index_buffer(struct pipe_context *_pipe,
- const struct pipe_index_buffer *ib)
-{
- struct dd_context *dctx = dd_context(_pipe);
- struct pipe_context *pipe = dctx->pipe;
-
- safe_memcpy(&dctx->draw_state.index_buffer, ib, sizeof(*ib));
- pipe->set_index_buffer(pipe, ib);
-}
-
static void
dd_context_set_stream_output_targets(struct pipe_context *_pipe,
unsigned num_targets,
CTX_INIT(set_shader_buffers);
CTX_INIT(set_shader_images);
CTX_INIT(set_vertex_buffers);
- CTX_INIT(set_index_buffer);
CTX_INIT(create_stream_output_target);
CTX_INIT(stream_output_target_destroy);
CTX_INIT(set_stream_output_targets);
int sh, i;
DUMP(draw_info, info);
- if (info->indexed) {
- DUMP(index_buffer, &dstate->index_buffer);
- if (dstate->index_buffer.buffer)
- DUMP_M(resource, &dstate->index_buffer, buffer);
- }
if (info->count_from_stream_output)
DUMP_M(stream_output_target, info,
count_from_stream_output);
pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
+ if (dst->info.draw_vbo.draw.index_size &&
+ !dst->info.draw_vbo.draw.has_user_indices)
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
+ else
+ dst->info.draw_vbo.draw.index.user = NULL;
break;
case CALL_LAUNCH_GRID:
pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
src->info.draw_vbo.indirect.buffer);
pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count,
src->info.draw_vbo.indirect.indirect_draw_count);
+
+ if (dst->info.draw_vbo.draw.index_size &&
+ !dst->info.draw_vbo.draw.has_user_indices)
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
+ else
+ dst->info.draw_vbo.draw.index.user = NULL;
+
+ if (src->info.draw_vbo.draw.index_size &&
+ !src->info.draw_vbo.draw.has_user_indices) {
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource,
+ src->info.draw_vbo.draw.index.resource);
+ }
+
dst->info.draw_vbo = src->info.draw_vbo;
if (!src->info.draw_vbo.draw.indirect)
dst->info.draw_vbo.draw.indirect = NULL;
/* Just clear pointers to gallium objects. Don't clear the whole structure,
* because it would kill performance with its size of 130 KB.
*/
- memset(&state->base.index_buffer, 0,
- sizeof(state->base.index_buffer));
memset(state->base.vertex_buffers, 0,
sizeof(state->base.vertex_buffers));
memset(state->base.so_targets, 0,
struct dd_draw_state *dst = &state->base;
unsigned i,j;
- util_set_index_buffer(&dst->index_buffer, NULL);
-
for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
dst->render_cond.query = NULL;
}
- util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
-
for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
&src->vertex_buffers[i]);
unsigned mode;
} render_cond;
- struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
unsigned num_so_targets;
* buffer state as dirty
*/
- if (info->indexed) {
+ if (info->index_size) {
uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL;
if (info->primitive_restart)
if (!(ctx->prim_hwsupport & (1 << info->mode))) {
struct primconvert_context *primconvert = ctx->primconvert;
- util_primconvert_save_index_buffer(primconvert, &ctx->index_buffer.ib);
util_primconvert_save_rasterizer_state(primconvert, ctx->rasterizer);
util_primconvert_draw_vbo(primconvert, info);
return;
}
/* Upload a user index buffer. */
- struct pipe_index_buffer ibuffer_saved = {};
- if (info->indexed && ctx->index_buffer.ib.user_buffer &&
- !util_save_and_upload_index_buffer(pctx, info, &ctx->index_buffer.ib,
- &ibuffer_saved)) {
+ unsigned index_offset = 0;
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
+ if (info->index_size && info->has_user_indices &&
+ !util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) {
BUG("Index buffer upload failed.");
return;
}
- if (info->indexed && !ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
+ if (info->index_size && indexbuf) {
+ ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
+ ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
+ ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
+ ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
+ ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
+ }
+
+ if (info->index_size && !ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
BUG("Unsupported or no index buffer");
return;
}
}
/* Mark index buffer as being read */
- resource_read(ctx, ctx->index_buffer.ib.buffer);
+ resource_read(ctx, indexbuf);
/* Mark textures as being read */
for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
/* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
etna_emit_state(ctx);
- if (info->indexed)
+ if (info->index_size)
etna_draw_indexed_primitives(ctx->stream, draw_mode, info->start, prims, info->index_bias);
else
etna_draw_primitives(ctx->stream, draw_mode, info->start, prims);
etna_resource(ctx->framebuffer.cbuf->texture)->seqno++;
if (ctx->framebuffer.zsbuf)
etna_resource(ctx->framebuffer.zsbuf->texture)->seqno++;
- if (info->indexed && ibuffer_saved.user_buffer)
- pctx->set_index_buffer(pctx, &ibuffer_saved);
+ if (info->index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
}
static void
struct etna_shader_variant;
struct etna_index_buffer {
- struct pipe_index_buffer ib;
struct etna_reloc FE_INDEX_STREAM_BASE_ADDR;
uint32_t FE_INDEX_STREAM_CONTROL;
uint32_t FE_PRIMITIVE_RESTART_INDEX;
/*03818*/ EMIT_STATE(GL_MULTI_SAMPLE_CONFIG, val);
}
- if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER)) &&
- ctx->index_buffer.ib.buffer) {
+ if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER))) {
/*00644*/ EMIT_STATE_RELOC(FE_INDEX_STREAM_BASE_ADDR, &ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR);
/*00648*/ EMIT_STATE(FE_INDEX_STREAM_CONTROL, ctx->index_buffer.FE_INDEX_STREAM_CONTROL);
}
ctx->dirty |= ETNA_DIRTY_VERTEX_BUFFERS;
}
-static void
-etna_set_index_buffer(struct pipe_context *pctx, const struct pipe_index_buffer *ib)
-{
- struct etna_context *ctx = etna_context(pctx);
- uint32_t ctrl;
-
- if (ib) {
- pipe_resource_reference(&ctx->index_buffer.ib.buffer, ib->buffer);
- memcpy(&ctx->index_buffer.ib, ib, sizeof(ctx->index_buffer.ib));
- ctrl = translate_index_size(ctx->index_buffer.ib.index_size);
- } else {
- pipe_resource_reference(&ctx->index_buffer.ib.buffer, NULL);
- ctrl = 0;
- }
-
- if (ctx->index_buffer.ib.buffer && ctrl != ETNA_NO_MATCH) {
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(ctx->index_buffer.ib.buffer)->bo;
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = ctx->index_buffer.ib.offset;
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
- ctx->index_buffer.FE_INDEX_STREAM_CONTROL = ctrl;
- } else {
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = NULL;
- ctx->index_buffer.FE_INDEX_STREAM_CONTROL = 0;
- }
-
- ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
-}
-
static void
etna_blend_state_bind(struct pipe_context *pctx, void *bs)
{
pctx->set_viewport_states = etna_set_viewport_states;
pctx->set_vertex_buffers = etna_set_vertex_buffers;
- pctx->set_index_buffer = etna_set_index_buffer;
pctx->bind_blend_state = etna_blend_state_bind;
pctx->delete_blend_state = etna_blend_state_delete;
}
static bool
-fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd_ringbuffer *ring = ctx->batch->draw;
OUT_RING(ring, add_sat(info->min_index, info->index_bias)); /* VFD_INDEX_MIN */
OUT_RING(ring, add_sat(info->max_index, info->index_bias)); /* VFD_INDEX_MAX */
OUT_RING(ring, info->start_instance); /* VFD_INSTANCEID_OFFSET */
- OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
+ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_PKT0(ring, REG_A3XX_PC_RESTART_INDEX, 1);
OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
}
static bool
-fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd3_emit emit = {
val |= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc);
}
- if (info->indexed && info->primitive_restart) {
+ if (info->index_size && info->primitive_restart) {
val |= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
}
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd4_emit *emit)
+ struct fd4_emit *emit, unsigned index_offset)
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
fd4_emit_vertex_bufs(ring, emit);
OUT_PKT0(ring, REG_A4XX_VFD_INDEX_OFFSET, 2);
- OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
+ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */
OUT_PKT0(ring, REG_A4XX_PC_RESTART_INDEX, 1);
fd4_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
- info);
+ info, index_offset);
}
/* fixup dirty shader state in case some "unrelated" (from the state-
}
static bool
-fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd4_emit emit = {
OUT_RING(ring, A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE);
}
- draw_impl(ctx, ctx->batch->draw, &emit);
+ draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
if (ctx->rasterizer->rasterizer_discard) {
fd_wfi(ctx->batch, ring);
emit.dirty = dirty & ~(FD_DIRTY_BLEND);
emit.vp = NULL; /* we changed key so need to refetch vp */
emit.fp = NULL;
- draw_impl(ctx, ctx->batch->binning, &emit);
+ draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
fd_context_all_clean(ctx);
fd4_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
- if (info->indexed) {
- struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+ if (info->index_size) {
+ assert(!info->has_user_indices);
- assert(!idx->user_buffer);
-
- idx_buffer = idx->buffer;
- idx_type = fd4_size2indextype(idx->index_size);
- idx_size = idx->index_size * info->count;
- idx_offset = idx->offset + (info->start * idx->index_size);
+ idx_buffer = info->index.resource;
+ idx_type = fd4_size2indextype(info->index_size);
+ idx_size = info->index_size * info->count;
+ idx_offset = index_offset + info->start * info->index_size;
src_sel = DI_SRC_SEL_DMA;
} else {
idx_buffer = NULL;
fd4_rasterizer_stateobj(ctx->rasterizer);
uint32_t val = rast->pc_prim_vtx_cntl;
- if (info->indexed && info->primitive_restart)
+ if (info->index_size && info->primitive_restart)
val |= A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
val |= COND(vp->writes_psize, A4XX_PC_PRIM_VTX_CNTL_PSIZE);
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd5_emit *emit)
+ struct fd5_emit *emit, unsigned index_offset)
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
fd5_emit_vertex_bufs(ring, emit);
OUT_PKT4(ring, REG_A5XX_VFD_INDEX_OFFSET, 2);
- OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
+ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */
OUT_PKT4(ring, REG_A5XX_PC_RESTART_INDEX, 1);
fd5_emit_render_cntl(ctx, false);
fd5_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
- info);
+ info, index_offset);
}
/* fixup dirty shader state in case some "unrelated" (from the state-
}
static bool
-fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct fd5_emit emit = {
emit.key.binning_pass = false;
emit.dirty = dirty;
- draw_impl(ctx, ctx->batch->draw, &emit);
+ draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
// /* and now binning pass: */
// emit.key.binning_pass = true;
fd5_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
- if (info->indexed) {
- struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+ if (info->index_size) {
+ assert(!info->has_user_indices);
- assert(!idx->user_buffer);
-
- idx_buffer = idx->buffer;
- idx_type = fd4_size2indextype(idx->index_size);
- idx_size = idx->index_size * info->count;
- idx_offset = idx->offset + (info->start * idx->index_size);
+ idx_buffer = info->index.resource;
+ idx_type = fd4_size2indextype(info->index_size);
+ idx_size = info->index_size * info->count;
+ idx_offset = index_offset + info->start * info->index_size;
src_sel = DI_SRC_SEL_DMA;
} else {
idx_buffer = NULL;
FD_DIRTY_VIEWPORT = BIT(8),
FD_DIRTY_VTXSTATE = BIT(9),
FD_DIRTY_VTXBUF = BIT(10),
- FD_DIRTY_INDEXBUF = BIT(11),
+
FD_DIRTY_SCISSOR = BIT(12),
FD_DIRTY_STREAMOUT = BIT(13),
FD_DIRTY_UCP = BIT(14),
struct pipe_viewport_state viewport;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
- struct pipe_index_buffer indexbuf;
struct fd_streamout_stateobj streamout;
struct pipe_clip_state ucp;
void (*emit_sysmem_fini)(struct fd_batch *batch);
/* draw: */
- bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info);
+ bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset);
void (*clear)(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil);
if (!fd_supported_prim(ctx, info->mode)) {
if (ctx->streamout.num_targets > 0)
debug_error("stream-out with emulated prims");
- util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
util_primconvert_draw_vbo(ctx->primconvert, info);
return;
}
/* Upload a user index buffer. */
- struct pipe_index_buffer ibuffer_saved = {};
- if (info->indexed && ctx->indexbuf.user_buffer &&
- !util_save_and_upload_index_buffer(pctx, info, &ctx->indexbuf,
- &ibuffer_saved)) {
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
+ unsigned index_offset = 0;
+ if (info->index_size && info->has_user_indices &&
+ !util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) {
return;
}
}
/* Mark index buffer as being read */
- resource_read(batch, ctx->indexbuf.buffer);
+ resource_read(batch, indexbuf);
/* Mark textures as being read */
foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
- if (ctx->draw_vbo(ctx, info))
+ if (ctx->draw_vbo(ctx, info, index_offset))
batch->needs_flush = true;
for (i = 0; i < ctx->streamout.num_targets; i++)
fd_context_all_dirty(ctx);
fd_batch_check_size(batch);
-
- if (info->indexed && ibuffer_saved.user_buffer)
- pctx->set_index_buffer(pctx, &ibuffer_saved);
+ if (info->index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
}
/* Generic clear implementation (partially) using u_blitter: */
.max_index = 1,
.instance_count = 1,
};
- ctx->draw_vbo(ctx, &info);
+ ctx->draw_vbo(ctx, &info, 0);
util_blitter_restore_constant_buffer_state(blitter);
util_blitter_restore_vertex_states(blitter);
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
- if (info->indexed) {
- struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+ if (info->index_size) {
+ assert(!info->has_user_indices);
- assert(!idx->user_buffer);
-
- idx_buffer = idx->buffer;
- idx_type = size2indextype(idx->index_size);
- idx_size = idx->index_size * info->count;
- idx_offset = idx->offset + (info->start * idx->index_size);
+ idx_buffer = info->index.resource;
+ idx_type = size2indextype(info->index_size);
+ idx_size = info->index_size * info->count;
+ idx_offset = info->start * info->index_size;
src_sel = DI_SRC_SEL_DMA;
} else {
idx_buffer = NULL;
ctx->dirty |= FD_DIRTY_VTXBUF;
}
- /* Index buffer */
- if (ctx->indexbuf.buffer == prsc)
- ctx->dirty |= FD_DIRTY_INDEXBUF;
-
/* per-shader-stage resources: */
for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
/* Constbufs.. note that constbuf[0] is normal uniforms emitted in
ctx->dirty |= FD_DIRTY_VTXBUF;
}
-static void
-fd_set_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib)
-{
- struct fd_context *ctx = fd_context(pctx);
-
- if (ib) {
- pipe_resource_reference(&ctx->indexbuf.buffer, ib->buffer);
- ctx->indexbuf.index_size = ib->index_size;
- ctx->indexbuf.offset = ib->offset;
- ctx->indexbuf.user_buffer = ib->user_buffer;
- } else {
- pipe_resource_reference(&ctx->indexbuf.buffer, NULL);
- }
-
- ctx->dirty |= FD_DIRTY_INDEXBUF;
-}
-
static void
fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
pctx->set_viewport_states = fd_set_viewport_states;
pctx->set_vertex_buffers = fd_set_vertex_buffers;
- pctx->set_index_buffer = fd_set_index_buffer;
pctx->bind_blend_state = fd_blend_state_bind;
pctx->delete_blend_state = fd_blend_state_delete;
uint32_t offset = v->constbase.driver_param;
if (v->constlen > offset) {
uint32_t vertex_params[IR3_DP_VS_COUNT] = {
- [IR3_DP_VTXID_BASE] = info->indexed ?
+ [IR3_DP_VTXID_BASE] = info->index_size ?
info->index_bias : info->start,
[IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
};
/*
* Map index buffer, if present
*/
- if (info->indexed) {
- mapped_indices = i915->index_buffer.user_buffer;
+ if (info->index_size) {
+ mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices)
- mapped_indices = i915_buffer(i915->index_buffer.buffer)->data;
+ mapped_indices = i915_buffer(info->index.resource)->data;
draw_set_indexes(draw,
- (ubyte *) mapped_indices + i915->index_buffer.offset,
- i915->index_buffer.index_size, ~0);
+ (ubyte *) mapped_indices,
+ info->index_size, ~0);
}
if (i915->constants[PIPE_SHADER_VERTEX])
struct pipe_sampler_view *fragment_sampler_views[PIPE_MAX_SAMPLERS];
struct pipe_sampler_view *vertex_sampler_views[PIPE_MAX_SAMPLERS];
struct pipe_viewport_state viewport;
- struct pipe_index_buffer index_buffer;
unsigned dirty;
FREE( velems );
}
-static void i915_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct i915_context *i915 = i915_context(pipe);
-
- if (ib)
- memcpy(&i915->index_buffer, ib, sizeof(i915->index_buffer));
- else
- memset(&i915->index_buffer, 0, sizeof(i915->index_buffer));
-}
-
static void
i915_set_sample_mask(struct pipe_context *pipe,
unsigned sample_mask)
i915->base.sampler_view_destroy = i915_sampler_view_destroy;
i915->base.set_viewport_states = i915_set_viewport_states;
i915->base.set_vertex_buffers = i915_set_vertex_buffers;
- i915->base.set_index_buffer = i915_set_index_buffer;
}
struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer index_buffer;
unsigned num_samplers[PIPE_SHADER_TYPES];
unsigned num_sampler_views[PIPE_SHADER_TYPES];
}
/* Map index buffer, if present */
- if (info->indexed) {
+ if (info->index_size) {
unsigned available_space = ~0;
- mapped_indices = lp->index_buffer.user_buffer;
+ mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) {
- mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer);
- if (lp->index_buffer.buffer->width0 > lp->index_buffer.offset)
- available_space =
- (lp->index_buffer.buffer->width0 - lp->index_buffer.offset);
- else
- available_space = 0;
+ mapped_indices = llvmpipe_resource_data(info->index.resource);
+ available_space = info->index.resource->width0;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices + lp->index_buffer.offset,
- lp->index_buffer.index_size, available_space);
+ (ubyte *) mapped_indices,
+ info->index_size, available_space);
}
for (i = 0; i < lp->num_so_targets; i++) {
}
-static void
-llvmpipe_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
-
- if (ib)
- memcpy(&llvmpipe->index_buffer, ib, sizeof(llvmpipe->index_buffer));
- else
- memset(&llvmpipe->index_buffer, 0, sizeof(llvmpipe->index_buffer));
-}
-
void
llvmpipe_init_vertex_funcs(struct llvmpipe_context *llvmpipe)
{
llvmpipe->pipe.delete_vertex_elements_state = llvmpipe_delete_vertex_elements_state;
llvmpipe->pipe.set_vertex_buffers = llvmpipe_set_vertex_buffers;
- llvmpipe->pipe.set_index_buffer = llvmpipe_set_index_buffer;
}
FREE(state);
}
-static void noop_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
-}
-
static void noop_set_vertex_buffers(struct pipe_context *ctx,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers)
ctx->set_scissor_states = noop_set_scissor_states;
ctx->set_stencil_ref = noop_set_stencil_ref;
ctx->set_vertex_buffers = noop_set_vertex_buffers;
- ctx->set_index_buffer = noop_set_index_buffer;
ctx->set_viewport_states = noop_set_viewport_states;
ctx->sampler_view_destroy = noop_sampler_view_destroy;
ctx->surface_destroy = noop_surface_destroy;
}
}
}
- if (res->bind & PIPE_BIND_INDEX_BUFFER) {
- if (nv30->idxbuf.buffer == res) {
- nouveau_bufctx_reset(nv30->bufctx, BUFCTX_IDXBUF);
- if (!--ref)
- return ref;
- }
- }
if (res->bind & PIPE_BIND_SAMPLER_VIEW) {
for (i = 0; i < nv30->fragprog.num_textures; ++i) {
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
- struct pipe_index_buffer idxbuf;
uint32_t vbo_fifo;
uint32_t vbo_user;
unsigned vbo_min_index;
draw_set_mapped_vertex_buffer(draw, i, map, ~0);
}
- if (info->indexed) {
- const void *map = nv30->idxbuf.user_buffer;
+ if (info->index_size) {
+ const void *map = info->has_user_indices ? info->index.user : NULL;
if (!map)
- map = pipe_buffer_map(pipe, nv30->idxbuf.buffer,
+ map = pipe_buffer_map(pipe, info->index.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transferi);
draw_set_indexes(draw,
- (ubyte *) map + nv30->idxbuf.offset,
- nv30->idxbuf.index_size, ~0);
+ (ubyte *) map,
+ info->index_size, ~0);
} else {
draw_set_indexes(draw, NULL, 0, 0);
}
draw_vbo(draw, info);
draw_flush(draw);
- if (info->indexed && transferi)
+ if (info->index_size && transferi)
pipe_buffer_unmap(pipe, transferi);
for (i = 0; i < nv30->num_vtxbufs; i++)
if (transfer[i])
{
struct push_context ctx;
unsigned i, index_size;
- bool apply_bias = info->indexed && info->index_bias;
+ bool apply_bias = info->index_size && info->index_bias;
ctx.push = nv30->base.pushbuf;
ctx.translate = nv30->vertex->translate;
ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
}
- if (info->indexed) {
- if (nv30->idxbuf.buffer)
+ if (info->index_size) {
+ if (!info->has_user_indices)
ctx.idxbuf = nouveau_resource_map_offset(&nv30->base,
- nv04_resource(nv30->idxbuf.buffer), nv30->idxbuf.offset,
+ nv04_resource(info->index.resource), info->start * info->index_size,
NOUVEAU_BO_RD);
else
- ctx.idxbuf = nv30->idxbuf.user_buffer;
+ ctx.idxbuf = info->index.user;
if (!ctx.idxbuf) {
nv30_state_release(nv30);
return;
}
- index_size = nv30->idxbuf.index_size;
+ index_size = info->index_size;
ctx.primitive_restart = info->primitive_restart;
ctx.restart_index = info->restart_index;
} else {
BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (ctx.push, NV30_3D_VERTEX_BEGIN_END_STOP);
- if (info->indexed)
- nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer));
+ if (info->index_size && !info->has_user_indices)
+ nouveau_resource_unmap(nv04_resource(info->index.resource));
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (nv30->vtxbuf[i].buffer.resource) {
if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = true;
}
-
- if (nv30->idxbuf.buffer &&
- nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv30->base.vbo_dirty = true;
}
}
nv30->dirty |= NV30_NEW_ARRAYS;
}
-static void
-nv30_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct nv30_context *nv30 = nv30_context(pipe);
-
- if (ib) {
- pipe_resource_reference(&nv30->idxbuf.buffer, ib->buffer);
- nv30->idxbuf.index_size = ib->index_size;
- nv30->idxbuf.offset = ib->offset;
- nv30->idxbuf.user_buffer = ib->user_buffer;
- } else {
- pipe_resource_reference(&nv30->idxbuf.buffer, NULL);
- nv30->idxbuf.user_buffer = NULL;
- }
-}
-
void
nv30_state_init(struct pipe_context *pipe)
{
pipe->set_viewport_states = nv30_set_viewport_states;
pipe->set_vertex_buffers = nv30_set_vertex_buffers;
- pipe->set_index_buffer = nv30_set_index_buffer;
}
static void
nv30_draw_elements(struct nv30_context *nv30, bool shorten,
+ const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count,
- unsigned instance_count, int32_t index_bias)
+ unsigned instance_count, int32_t index_bias,
+ unsigned index_size)
{
- const unsigned index_size = nv30->idxbuf.index_size;
struct nouveau_pushbuf *push = nv30->base.pushbuf;
struct nouveau_object *eng3d = nv30->screen->eng3d;
unsigned prim = nv30_prim_gl(mode);
}
if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
- nv30->idxbuf.buffer) {
- struct nv04_resource *res = nv04_resource(nv30->idxbuf.buffer);
- unsigned offset = nv30->idxbuf.offset;
+ !info->has_user_indices) {
+ struct nv04_resource *res = nv04_resource(info->index.resource);
+ unsigned offset = 0;
assert(nouveau_resource_mapped_by_gpu(&res->base));
PUSH_RESET(push, BUFCTX_IDXBUF);
} else {
const void *data;
- if (nv30->idxbuf.buffer)
+ if (!info->has_user_indices)
data = nouveau_resource_map_offset(&nv30->base,
- nv04_resource(nv30->idxbuf.buffer),
- nv30->idxbuf.offset, NOUVEAU_BO_RD);
+ nv04_resource(info->index.resource),
+ start * index_size, NOUVEAU_BO_RD);
else
- data = nv30->idxbuf.user_buffer;
+ data = info->index.user;
if (!data)
return;
* if index count is larger and we expect repeated vertices, suggest upload.
*/
nv30->vbo_push_hint = /* the 64 is heuristic */
- !(info->indexed &&
+ !(info->index_size &&
((info->max_index - info->min_index + 64) < info->count));
nv30->vbo_min_index = info->min_index;
nv30->base.vbo_dirty = true;
}
- if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer &&
- nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
+ info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = true;
if (nv30->base.vbo_dirty) {
nv30->base.vbo_dirty = false;
}
- if (!info->indexed) {
+ if (!info->index_size) {
nv30_draw_arrays(nv30,
info->mode, info->start, info->count,
info->instance_count);
shorten = false;
}
- nv30_draw_elements(nv30, shorten,
+ nv30_draw_elements(nv30, shorten, info,
info->mode, info->start, info->count,
- info->instance_count, info->index_bias);
+ info->instance_count, info->index_bias, info->index_size);
}
nv30_state_release(nv30);
nv50->base.vbo_dirty = true;
}
- if (nv50->idxbuf.buffer &&
- nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv50->base.vbo_dirty = true;
-
for (s = 0; s < 3 && !nv50->cb_dirty; ++s) {
uint32_t valid = nv50->constbuf_valid[s];
for (i = 0; i < nv50->num_vtxbufs; ++i)
pipe_resource_reference(&nv50->vtxbuf[i].buffer.resource, NULL);
- pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
-
for (s = 0; s < 3; ++s) {
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i)
}
}
- if (nv50->idxbuf.buffer == res) {
- /* Just rebind to the bufctx as there is no separate dirty bit */
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
- BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(res), RD);
- if (!--ref)
- return ref;
- }
-
for (s = 0; s < 3; ++s) {
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i) {
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
uint32_t vtxbufs_coherent;
- struct pipe_index_buffer idxbuf;
uint32_t vbo_fifo; /* bitmask of vertex elements to be pushed to FIFO */
uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
uint32_t vbo_constant; /* bitmask of user buffers with stride 0 */
unsigned i, index_size;
unsigned inst_count = info->instance_count;
unsigned vert_count = info->count;
- bool apply_bias = info->indexed && info->index_bias;
+ bool apply_bias = info->index_size && info->index_bias;
ctx.push = nv50->base.pushbuf;
ctx.translate = nv50->vertex->translate;
ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
}
- if (info->indexed) {
- if (nv50->idxbuf.buffer) {
+ if (info->index_size) {
+ if (!info->has_user_indices) {
ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
- nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
+ nv04_resource(info->index.resource), info->start * info->index_size,
NOUVEAU_BO_RD);
} else {
- ctx.idxbuf = nv50->idxbuf.user_buffer;
+ ctx.idxbuf = info->index.user;
}
if (!ctx.idxbuf)
return;
- index_size = nv50->idxbuf.index_size;
+ index_size = info->index_size;
ctx.primitive_restart = info->primitive_restart;
ctx.restart_index = info->restart_index;
} else {
}
}
-static void
-nv50_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct nv50_context *nv50 = nv50_context(pipe);
-
- if (nv50->idxbuf.buffer)
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
-
- if (ib) {
- pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
- nv50->idxbuf.index_size = ib->index_size;
- if (ib->buffer) {
- nv50->idxbuf.offset = ib->offset;
- BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(ib->buffer), RD);
- } else {
- nv50->idxbuf.user_buffer = ib->user_buffer;
- }
- } else {
- pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
- }
-}
-
static void
nv50_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
{
pipe->bind_vertex_elements_state = nv50_vertex_state_bind;
pipe->set_vertex_buffers = nv50_set_vertex_buffers;
- pipe->set_index_buffer = nv50_set_index_buffer;
pipe->create_stream_output_target = nv50_so_target_create;
pipe->stream_output_target_destroy = nv50_so_target_destroy;
static void
nv50_draw_elements(struct nv50_context *nv50, bool shorten,
+ const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count,
- unsigned instance_count, int32_t index_bias)
+ unsigned instance_count, int32_t index_bias,
+ unsigned index_size)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
unsigned prim;
- const unsigned index_size = nv50->idxbuf.index_size;
prim = nv50_prim_gl(mode);
nv50->state.index_bias = index_bias;
}
- if (nv50->idxbuf.buffer) {
- struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
+ if (!info->has_user_indices) {
+ struct nv04_resource *buf = nv04_resource(info->index.resource);
unsigned pb_start;
unsigned pb_bytes;
- const unsigned base = (buf->offset + nv50->idxbuf.offset) & ~3;
+ const unsigned base = buf->offset & ~3;
- start += ((buf->offset + nv50->idxbuf.offset) & 3) >> (index_size >> 1);
+ start += (buf->offset & 3) >> (index_size >> 1);
- assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer));
+ assert(nouveau_resource_mapped_by_gpu(info->index.resource));
/* This shouldn't have to be here. The going theory is that the buffer
* is being filled in by PGRAPH, and it's not done yet by the time it
prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
} else {
- const void *data = nv50->idxbuf.user_buffer;
+ const void *data = info->index.user;
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
bool tex_dirty = false;
int s;
+ if (info->index_size && !info->has_user_indices) {
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
+ BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(info->index.resource), RD);
+ }
+
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nv50->vb_elt_first = info->min_index + info->index_bias;
nv50->vb_elt_limit = info->max_index - info->min_index;
* if index count is larger and we expect repeated vertices, suggest upload.
*/
nv50->vbo_push_hint = /* the 64 is heuristic */
- !(info->indexed && ((nv50->vb_elt_limit + 64) < info->count));
+ !(info->index_size && ((nv50->vb_elt_limit + 64) < info->count));
if (nv50->vbo_user && !(nv50->dirty_3d & (NV50_NEW_3D_ARRAYS | NV50_NEW_3D_VERTEX))) {
if (!!nv50->vbo_fifo != nv50->vbo_push_hint)
nv50->base.vbo_dirty = false;
}
- if (info->indexed) {
+ if (info->index_size) {
bool shorten = info->max_index <= 65535;
if (info->primitive_restart != nv50->state.prim_restart) {
shorten = false;
}
- nv50_draw_elements(nv50, shorten,
+ nv50_draw_elements(nv50, shorten, info,
info->mode, info->start, info->count,
- info->instance_count, info->index_bias);
+ info->instance_count, info->index_bias, info->index_size);
} else
if (unlikely(info->count_from_stream_output)) {
nva0_draw_stream_output(nv50, info);
nvc0->base.vbo_dirty = true;
}
- if (nvc0->idxbuf.buffer &&
- nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->base.vbo_dirty = true;
-
for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
uint32_t valid = nvc0->constbuf_valid[s];
for (i = 0; i < nvc0->num_vtxbufs; ++i)
pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
- pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
-
for (s = 0; s < 6; ++s) {
for (i = 0; i < nvc0->num_textures[s]; ++i)
pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
}
}
- if (nvc0->idxbuf.buffer == res) {
- nvc0->dirty_3d |= NVC0_NEW_3D_IDXBUF;
- nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
- if (!--ref)
- return ref;
- }
-
for (s = 0; s < 6; ++s) {
for (i = 0; i < nvc0->num_textures[s]; ++i) {
if (nvc0->textures[s][i] &&
#define NVC0_NEW_3D_TEXTURES (1 << 19)
#define NVC0_NEW_3D_SAMPLERS (1 << 20)
#define NVC0_NEW_3D_TFB_TARGETS (1 << 21)
-#define NVC0_NEW_3D_IDXBUF (1 << 22)
+
#define NVC0_NEW_3D_SURFACES (1 << 23)
#define NVC0_NEW_3D_MIN_SAMPLES (1 << 24)
#define NVC0_NEW_3D_TESSFACTOR (1 << 25)
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
uint32_t vtxbufs_coherent;
- struct pipe_index_buffer idxbuf;
uint32_t constant_vbos;
uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
uint32_t vb_elt_first; /* from pipe_draw_info, for vertex upload */
}
}
-static void
-nvc0_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct nvc0_context *nvc0 = nvc0_context(pipe);
-
- if (nvc0->idxbuf.buffer)
- nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
-
- if (ib) {
- pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
- nvc0->idxbuf.index_size = ib->index_size;
- if (ib->buffer) {
- nvc0->idxbuf.offset = ib->offset;
- nvc0->dirty_3d |= NVC0_NEW_3D_IDXBUF;
- } else {
- nvc0->idxbuf.user_buffer = ib->user_buffer;
- nvc0->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
- }
- } else {
- nvc0->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
- pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
- }
-}
-
static void
nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
{
pipe->bind_vertex_elements_state = nvc0_vertex_state_bind;
pipe->set_vertex_buffers = nvc0_set_vertex_buffers;
- pipe->set_index_buffer = nvc0_set_index_buffer;
pipe->create_stream_output_target = nvc0_so_target_create;
pipe->stream_output_target_destroy = nvc0_so_target_destroy;
if (!ctx_to->vertex)
ctx_to->dirty_3d &= ~(NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS);
- if (!ctx_to->idxbuf.buffer)
- ctx_to->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
if (!ctx_to->vertprog)
ctx_to->dirty_3d &= ~NVC0_NEW_3D_VERTPROG;
{ nvc0_vertex_arrays_validate, NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS },
{ nvc0_validate_surfaces, NVC0_NEW_3D_SURFACES },
{ nvc0_validate_buffers, NVC0_NEW_3D_BUFFERS },
- { nvc0_idxbuf_validate, NVC0_NEW_3D_IDXBUF },
{ nvc0_tfb_validate, NVC0_NEW_3D_TFB_TARGETS | NVC0_NEW_3D_GMTYPROG },
{ nvc0_layer_validate, NVC0_NEW_3D_VERTPROG |
NVC0_NEW_3D_TEVLPROG |
nvc0_validate_vertex_buffers(nvc0);
}
-void
-nvc0_idxbuf_validate(struct nvc0_context *nvc0)
-{
- struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
-
- assert(buf);
- assert(nouveau_resource_mapped_by_gpu(&buf->base));
-
- PUSH_SPACE(push, 6);
- BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
- PUSH_DATAh(push, buf->address + nvc0->idxbuf.offset);
- PUSH_DATA (push, buf->address + nvc0->idxbuf.offset);
- PUSH_DATAh(push, buf->address + buf->base.width0 - 1);
- PUSH_DATA (push, buf->address + buf->base.width0 - 1);
- PUSH_DATA (push, nvc0->idxbuf.index_size >> 1);
-
- BCTX_REFN(nvc0->bufctx_3d, 3D_IDX, buf, RD);
-}
-
#define NVC0_PRIM_GL_CASE(n) \
case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
unsigned prim;
if (nvc0->state.index_bias) {
- /* index_bias is implied 0 if !info->indexed (really ?) */
+ /* index_bias is implied 0 if !info->index_size (really ?) */
/* TODO: can we deactivate it for the VERTEX_BUFFER_FIRST command ? */
PUSH_SPACE(push, 2);
IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
static void
nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten,
+ const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count,
- unsigned instance_count, int32_t index_bias)
+ unsigned instance_count, int32_t index_bias,
+ unsigned index_size)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned prim;
- const unsigned index_size = nvc0->idxbuf.index_size;
prim = nvc0_prim_gl(mode);
nvc0->state.index_bias = index_bias;
}
- if (nvc0->idxbuf.buffer) {
+ if (!info->has_user_indices) {
PUSH_SPACE(push, 1);
IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim);
do {
} while (instance_count);
IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
} else {
- const void *data = nvc0->idxbuf.user_buffer;
+ const void *data = info->index.user;
while (instance_count--) {
PUSH_SPACE(push, 2);
BEGIN_NVC0(push, NVC0_3D(CB_POS), 1);
PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO);
- if (info->indexed) {
- assert(nvc0->idxbuf.buffer);
- assert(nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer));
+ if (info->index_size) {
+ assert(!info->has_user_indices);
+ assert(nouveau_resource_mapped_by_gpu(info->index.resource));
size = 5;
if (buf_count)
macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT;
macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT;
} else {
if (nvc0->state.index_bias) {
- /* index_bias is implied 0 if !info->indexed (really ?) */
+ /* index_bias is implied 0 if !info->index_size (really ?) */
IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
IMMED_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 0);
nvc0->state.index_bias = 0;
struct nvc0_screen *screen = nvc0->screen;
int s;
+ if (info->index_size)
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
+
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nvc0->vb_elt_first = info->min_index + info->index_bias;
nvc0->vb_elt_limit = info->max_index - info->min_index;
* if index count is larger and we expect repeated vertices, suggest upload.
*/
nvc0->vbo_push_hint =
- !info->indirect && info->indexed &&
+ !info->indirect && info->index_size &&
(nvc0->vb_elt_limit >= (info->count * 2));
/* Check whether we want to switch vertex-submission mode. */
IMMED_NVC0(push, NVC0_3D(PATCH_VERTICES), nvc0->state.patch_vertices);
}
+ if (info->index_size && !info->has_user_indices) {
+ struct nv04_resource *buf = nv04_resource(info->index.resource);
+
+ assert(buf);
+ assert(nouveau_resource_mapped_by_gpu(&buf->base));
+
+ PUSH_SPACE(push, 6);
+ BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
+ PUSH_DATAh(push, buf->address);
+ PUSH_DATA (push, buf->address);
+ PUSH_DATAh(push, buf->address + buf->base.width0 - 1);
+ PUSH_DATA (push, buf->address + buf->base.width0 - 1);
+ PUSH_DATA (push, info->index_size >> 1);
+
+ BCTX_REFN(nvc0->bufctx_3d, 3D_IDX, buf, RD);
+ }
+
nvc0_state_validate_3d(nvc0, ~0);
if (nvc0->vertprog->vp.need_draw_parameters && !info->indirect) {
nvc0->base.vbo_dirty |= !!nvc0->vtxbufs_coherent;
- if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer &&
- nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (!nvc0->base.vbo_dirty && info->index_size && !info->has_user_indices &&
+ info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->base.vbo_dirty = true;
nvc0_update_prim_restart(nvc0, info->primitive_restart, info->restart_index);
if (unlikely(info->count_from_stream_output)) {
nvc0_draw_stream_output(nvc0, info);
} else
- if (info->indexed) {
+ if (info->index_size) {
bool shorten = info->max_index <= 65535;
if (info->primitive_restart && info->restart_index > 65535)
shorten = false;
- nvc0_draw_elements(nvc0, shorten,
+ nvc0_draw_elements(nvc0, shorten, info,
info->mode, info->start, info->count,
- info->instance_count, info->index_bias);
+ info->instance_count, info->index_bias, info->index_size);
} else {
nvc0_draw_arrays(nvc0,
info->mode, info->start, info->count,
}
static inline void
-nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
+nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0,
+ const struct pipe_draw_info *info,
+ unsigned offset)
{
- if (nvc0->idxbuf.buffer) {
- struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
+ if (!info->has_user_indices) {
+ struct nv04_resource *buf = nv04_resource(info->index.resource);
ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
- buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
+ buf, offset, NOUVEAU_BO_RD);
} else {
- ctx->idxbuf = nvc0->idxbuf.user_buffer;
+ ctx->idxbuf = info->index.user;
}
}
*/
BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
PUSH_DATA (ctx.push, 1);
- PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
+ PUSH_DATA (ctx.push, info->index_size ? 0xffffffff : info->restart_index);
} else
if (nvc0->state.prim_restart) {
IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
}
nvc0->state.prim_restart = info->primitive_restart;
- if (info->indexed) {
- nvc0_push_map_idxbuf(&ctx, nvc0);
- index_size = nvc0->idxbuf.index_size;
+ if (info->index_size) {
+ nvc0_push_map_idxbuf(&ctx, nvc0, info, info->start * info->index_size);
+ index_size = info->index_size;
} else {
if (unlikely(info->count_from_stream_output)) {
struct pipe_context *pipe = &nvc0->base.pipe;
IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
}
- if (info->indexed)
- nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
+ if (info->index_size && !info->has_user_indices)
+ nouveau_resource_unmap(nv04_resource(info->index.resource));
for (i = 0; i < nvc0->num_vtxbufs; ++i)
nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
uint64_t va;
uint32_t *data;
uint32_t format;
- unsigned index_size = nvc0->idxbuf.index_size;
+ unsigned index_size = info->index_size;
unsigned i;
unsigned a = nvc0->vertex->num_elements;
bo);
nouveau_pushbuf_validate(push);
- if (info->indexed) {
+ if (info->index_size) {
if (!info->index_bias) {
memcpy(data, ctx->idxbuf, info->count * index_size);
} else {
- switch (nvc0->idxbuf.index_size) {
+ switch (info->index_size) {
case 1:
copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
break;
void *dsa_decompress_zmask;
- struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
unsigned nr_vertex_buffers;
struct u_upload_mgr *uploader;
/* r300_render_translate.c */
void r300_translate_index_buffer(struct r300_context *r300,
- struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
struct pipe_resource **out_index_buffer,
unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count);
const uint8_t *ptr1;
const uint16_t *ptr2;
const uint32_t *ptr4;
- unsigned index_size = r300->index_buffer.index_size;
+ unsigned index_size = info->index_size;
unsigned i, count_dwords = index_size == 4 ? info->count :
(info->count + 1) / 2;
CS_LOCALS(r300);
switch (index_size) {
case 1:
- ptr1 = (uint8_t*)r300->index_buffer.user_buffer;
+ ptr1 = (uint8_t*)info->index.user;
ptr1 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 2:
- ptr2 = (uint16_t*)r300->index_buffer.user_buffer;
+ ptr2 = (uint16_t*)info->index.user;
ptr2 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
break;
case 4:
- ptr4 = (uint32_t*)r300->index_buffer.user_buffer;
+ ptr4 = (uint32_t*)info->index.user;
ptr4 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
const struct pipe_draw_info *info,
int instance_id)
{
- struct pipe_resource *indexBuffer = r300->index_buffer.buffer;
- unsigned indexSize = r300->index_buffer.index_size;
+ struct pipe_resource *indexBuffer =
+ info->has_user_indices ? NULL : info->index.resource;
+ unsigned indexSize = info->index_size;
struct pipe_resource* orgIndexBuffer = indexBuffer;
unsigned start = info->start;
unsigned count = info->count;
&index_offset);
}
- r300_translate_index_buffer(r300, &r300->index_buffer, &indexBuffer,
+ r300_translate_index_buffer(r300, info, &indexBuffer,
&indexSize, index_offset, &start, count);
/* Fallback for misaligned ushort indices. */
count, (uint8_t*)ptr);
}
} else {
- if (r300->index_buffer.user_buffer)
+ if (info->has_user_indices)
r300_upload_index_buffer(r300, &indexBuffer, indexSize,
&start, count,
- r300->index_buffer.user_buffer);
+ info->index.user);
}
/* 19 dwords for emit_draw_elements. Give up if the function fails. */
r300_update_derived_state(r300);
/* Draw. */
- if (info.indexed) {
+ if (info.index_size) {
unsigned max_count = r300_max_vertex_count(r300);
if (!max_count) {
}
info.max_index = max_count - 1;
- info.start += r300->index_buffer.offset / r300->index_buffer.index_size;
if (info.instance_count <= 1) {
- if (info.count <= 8 &&
- r300->index_buffer.user_buffer) {
+ if (info.count <= 8 && info.has_user_indices) {
r300_draw_elements_immediate(r300, &info);
} else {
r300_draw_elements(r300, &info, -1);
if (!u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
return;
+ if (info->index_size) {
+ draw_set_indexes(r300->draw,
+ info->has_user_indices ?
+ info->index.user :
+ r300_resource(info->index.resource)->malloced_buffer,
+ info->index_size, ~0);
+ }
+
r300_update_derived_state(r300);
draw_vbo(r300->draw, info);
void r300_translate_index_buffer(struct r300_context *r300,
- struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
struct pipe_resource **out_buffer,
unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count)
&out_offset, out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
- &r300->context, ib, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
+ &r300->context, info, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
*start, count, ptr);
*index_size = 2;
u_upload_alloc(r300->uploader, 0, count * 2, 4,
&out_offset, out_buffer, &ptr);
- util_rebuild_ushort_elts_to_userptr(&r300->context, ib,
+ util_rebuild_ushort_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
u_upload_alloc(r300->uploader, 0, count * 4, 4,
&out_offset, out_buffer, &ptr);
- util_rebuild_uint_elts_to_userptr(&r300->context, ib,
+ util_rebuild_uint_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
}
}
-static void r300_set_index_buffer_hwtcl(struct pipe_context* pipe,
- const struct pipe_index_buffer *ib)
-{
- struct r300_context* r300 = r300_context(pipe);
-
- if (ib) {
- pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
- memcpy(&r300->index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&r300->index_buffer.buffer, NULL);
- }
-}
-
-static void r300_set_index_buffer_swtcl(struct pipe_context* pipe,
- const struct pipe_index_buffer *ib)
-{
- struct r300_context* r300 = r300_context(pipe);
-
- if (ib) {
- const void *buf = NULL;
- if (ib->user_buffer) {
- buf = ib->user_buffer;
- } else if (ib->buffer) {
- buf = r300_resource(ib->buffer)->malloced_buffer;
- }
- draw_set_indexes(r300->draw,
- (const ubyte *) buf + ib->offset,
- ib->index_size, ~0);
- }
-}
-
/* Initialize the PSC tables. */
static void r300_vertex_psc(struct r300_vertex_element_state *velems)
{
if (r300->screen->caps.has_tcl) {
r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl;
- r300->context.set_index_buffer = r300_set_index_buffer_hwtcl;
} else {
r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl;
- r300->context.set_index_buffer = r300_set_index_buffer_swtcl;
}
r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;
* the GPU addresses are updated. */
struct list_head texture_buffers;
- /* Index buffer. */
- struct pipe_index_buffer index_buffer;
-
/* Last draw state (-1 = unset). */
enum pipe_prim_type last_primitive_type; /* Last primitive type used in draw_vbo. */
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
FREE(shader);
}
-static void r600_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct r600_context *rctx = (struct r600_context *)ctx;
-
- if (ib) {
- pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
- memcpy(&rctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, ib->buffer);
- } else {
- pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
- }
-}
-
void r600_vertex_buffers_dirty(struct r600_context *rctx)
{
if (rctx->vertex_buffer_state.dirty_mask) {
static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct pipe_index_buffer ib = {};
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
+ bool has_user_indices = info->has_user_indices;
uint64_t mask;
- unsigned num_patches, dirty_tex_counter;
+ unsigned num_patches, dirty_tex_counter, index_offset = 0;
+ unsigned index_size = info->index_size;
int index_bias;
- if (!info->indirect && !info->count && (info->indexed || !info->count_from_stream_output)) {
+ if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
return;
}
: (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
: info->mode;
- if (info->indexed) {
- /* Initialize the index buffer struct. */
- pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
- ib.user_buffer = rctx->index_buffer.user_buffer;
- ib.index_size = rctx->index_buffer.index_size;
- ib.offset = rctx->index_buffer.offset;
- if (!info->indirect) {
- ib.offset += info->start * ib.index_size;
- }
+ if (index_size) {
+ index_offset += info->start * index_size;
/* Translate 8-bit indices to 16-bit. */
- if (unlikely(ib.index_size == 1)) {
+ if (unlikely(index_size == 1)) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset;
void *ptr;
PIPE_TRANSFER_READ);
if (data) {
data += info->indirect->offset / sizeof(unsigned);
- start = data[2] * ib.index_size;
+ start = data[2] * index_size;
count = data[0];
}
else {
u_upload_alloc(ctx->stream_uploader, start, count * 2,
256, &out_offset, &out_buffer, &ptr);
- if (unlikely(!ptr)) {
- pipe_resource_reference(&ib.buffer, NULL);
+ if (unlikely(!ptr))
return;
- }
util_shorten_ubyte_elts_to_userptr(
- &rctx->b.b, &ib, 0, 0, ib.offset + start, count, ptr);
+ &rctx->b.b, info, 0, 0, index_offset, count, ptr);
- pipe_resource_reference(&ib.buffer, NULL);
- ib.user_buffer = NULL;
- ib.buffer = out_buffer;
- ib.offset = out_offset;
- ib.index_size = 2;
+ indexbuf = out_buffer;
+ index_offset = out_offset;
+ index_size = 2;
+ has_user_indices = false;
}
/* Upload the index buffer.
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */
- if (ib.user_buffer && (R600_BIG_ENDIAN || info->indirect ||
+ if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
info->instance_count > 1 ||
- info->count*ib.index_size > 20)) {
+ info->count*index_size > 20)) {
+ indexbuf = NULL;
u_upload_data(ctx->stream_uploader, 0,
- info->count * ib.index_size, 256,
- ib.user_buffer, &ib.offset, &ib.buffer);
- ib.user_buffer = NULL;
+ info->count * index_size, 256,
+ info->index.user, &index_offset, &indexbuf);
+ has_user_indices = false;
}
index_bias = info->index_bias;
} else {
evergreen_setup_tess_constants(rctx, info, &num_patches);
/* Emit states. */
- r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
+ r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE);
r600_flush_emit(rctx);
mask = rctx->dirty_atoms;
RADEON_PRIO_DRAW_INDIRECT));
}
- if (info->indexed) {
+ if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cs, ib.index_size == 4 ?
+ radeon_emit(cs, index_size == 4 ?
(VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
(VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
- if (ib.user_buffer) {
- unsigned size_bytes = info->count*ib.index_size;
+ if (has_user_indices) {
+ unsigned size_bytes = info->count*index_size;
unsigned size_dw = align(size_bytes, 4) / 4;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
- radeon_emit_array(cs, ib.user_buffer, size_dw);
+ radeon_emit_array(cs, info->index.user, size_dw);
} else {
- uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset;
+ uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
if (likely(!info->indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
+ (struct r600_resource*)indexbuf,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER));
}
else {
- uint32_t max_size = (ib.buffer->width0 - ib.offset) / ib.index_size;
+ uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, va);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
+ (struct r600_resource*)indexbuf,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER));
rctx->framebuffer.do_update_surf_dirtiness = false;
}
- pipe_resource_reference(&ib.buffer, NULL);
+ if (index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
rctx->b.num_draw_calls++;
}
rctx->b.b.set_sample_mask = r600_set_sample_mask;
rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
- rctx->b.b.set_index_buffer = r600_set_index_buffer;
rctx->b.b.set_sampler_views = r600_set_sampler_views;
rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
rctx->b.b.texture_barrier = r600_texture_barrier;
/* Vertex and index buffers. */
bool vertex_buffers_dirty;
bool vertex_buffer_pointer_dirty;
- struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
/* MSAA config state. */
sctx->vertex_buffers_dirty = true;
}
-static void si_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct si_context *sctx = (struct si_context *)ctx;
-
- if (ib) {
- struct pipe_resource *buf = ib->buffer;
-
- pipe_resource_reference(&sctx->index_buffer.buffer, buf);
- memcpy(&sctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, buf);
- if (buf)
- r600_resource(buf)->bind_history |= PIPE_BIND_INDEX_BUFFER;
- } else {
- pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
- }
-}
-
/*
* Misc
*/
sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements;
sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element;
sctx->b.b.set_vertex_buffers = si_set_vertex_buffers;
- sctx->b.b.set_index_buffer = si_set_index_buffer;
sctx->b.b.texture_barrier = si_texture_barrier;
sctx->b.b.memory_barrier = si_memory_barrier;
const struct pipe_draw_info *info)
{
sctx->current_vs_state &= C_VS_STATE_INDEXED;
- sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->indexed);
+ sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
if (sctx->current_vs_state != sctx->last_vs_state) {
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
static void si_emit_draw_packets(struct si_context *sctx,
const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
+ struct pipe_resource *indexbuf,
+ unsigned index_size,
+ unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
}
/* draw packet */
- if (info->indexed) {
- if (ib->index_size != sctx->last_index_size) {
+ if (index_size) {
+ if (index_size != sctx->last_index_size) {
unsigned index_type;
/* index type */
- switch (ib->index_size) {
+ switch (index_size) {
case 1:
index_type = V_028A7C_VGT_INDEX_8;
break;
radeon_emit(cs, index_type);
}
- sctx->last_index_size = ib->index_size;
+ sctx->last_index_size = index_size;
}
- index_max_size = (ib->buffer->width0 - ib->offset) /
- ib->index_size;
- index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
+ index_max_size = (indexbuf->width0 - index_offset) /
+ index_size;
+ index_va = r600_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource *)ib->buffer,
+ (struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
/* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
(struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
- unsigned di_src_sel = info->indexed ? V_0287F0_DI_SRC_SEL_DMA
+ unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
assert(indirect->offset % 4 == 0);
- if (info->indexed) {
+ if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, index_va);
radeon_emit(cs, index_va >> 32);
}
if (!sctx->screen->has_draw_indirect_multi) {
- radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT
+ radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT
: PKT3_DRAW_INDIRECT,
3, render_cond_bit));
radeon_emit(cs, indirect->offset);
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
}
- radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
+ radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
PKT3_DRAW_INDIRECT_MULTI,
8, render_cond_bit));
radeon_emit(cs, indirect->offset);
radeon_emit(cs, info->instance_count);
/* Base vertex and start instance. */
- base_vertex = info->indexed ? info->index_bias : info->start;
+ base_vertex = index_size ? info->index_bias : info->start;
if (base_vertex != sctx->last_base_vertex ||
sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
sctx->last_sh_base_reg = sh_base_reg;
}
- if (info->indexed) {
- index_va += info->start * ib->index_size;
+ if (index_size) {
+ index_va += info->start * index_size;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
radeon_emit(cs, index_max_size);
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
- const struct pipe_index_buffer *ib = &sctx->index_buffer;
- struct pipe_index_buffer ib_tmp; /* for index buffer uploads only */
+ struct pipe_resource *indexbuf = info->index.resource;
unsigned mask, dirty_tex_counter;
enum pipe_prim_type rast_prim;
unsigned num_patches = 0;
+ unsigned index_size = info->index_size;
+ unsigned index_offset = info->indirect ? info->start * index_size : 0;
if (likely(!info->indirect)) {
/* SI-CI treat instance_count==0 as instance_count==1. There is
/* Handle count == 0. */
if (unlikely(!info->count &&
- (info->indexed || !info->count_from_stream_output)))
+ (index_size || !info->count_from_stream_output)))
return;
}
if (!si_upload_graphics_shader_descriptors(sctx))
return;
- ib_tmp.buffer = NULL;
-
- if (info->indexed) {
+ if (index_size) {
/* Translate or upload, if needed. */
/* 8-bit indices are supported on VI. */
- if (sctx->b.chip_class <= CIK && ib->index_size == 1) {
- unsigned start, count, start_offset, size;
+ if (sctx->b.chip_class <= CIK && index_size == 1) {
+ unsigned start, count, start_offset, size, offset;
void *ptr;
si_get_draw_start_count(sctx, info, &start, &count);
start_offset = start * 2;
size = count * 2;
+ indexbuf = NULL;
u_upload_alloc(ctx->stream_uploader, start_offset,
size,
si_optimal_tcc_alignment(sctx, size),
- &ib_tmp.offset, &ib_tmp.buffer, &ptr);
- if (!ib_tmp.buffer)
+ &offset, &indexbuf, &ptr);
+ if (!indexbuf)
return;
- util_shorten_ubyte_elts_to_userptr(&sctx->b.b, ib, 0, 0,
- ib->offset + start,
+ util_shorten_ubyte_elts_to_userptr(&sctx->b.b, info, 0, 0,
+ index_offset + start,
count, ptr);
/* info->start will be added by the drawing code */
- ib_tmp.offset -= start_offset;
- ib_tmp.index_size = 2;
- ib = &ib_tmp;
- } else if (ib->user_buffer && !ib->buffer) {
+ index_offset = offset - start_offset;
+ index_size = 2;
+ } else if (info->has_user_indices) {
unsigned start_offset;
assert(!info->indirect);
- start_offset = info->start * ib->index_size;
+ start_offset = info->start * index_size;
+ indexbuf = NULL;
u_upload_data(ctx->stream_uploader, start_offset,
- info->count * ib->index_size,
+ info->count * index_size,
sctx->screen->b.info.tcc_cache_line_size,
- (char*)ib->user_buffer + start_offset,
- &ib_tmp.offset, &ib_tmp.buffer);
- if (!ib_tmp.buffer)
+ (char*)info->index.user + start_offset,
+ &index_offset, &indexbuf);
+ if (!indexbuf)
return;
/* info->start will be added by the drawing code */
- ib_tmp.offset -= start_offset;
- ib_tmp.index_size = ib->index_size;
- ib = &ib_tmp;
+ index_offset -= start_offset;
} else if (sctx->b.chip_class <= CIK &&
- r600_resource(ib->buffer)->TC_L2_dirty) {
+ r600_resource(indexbuf)->TC_L2_dirty) {
/* VI reads index buffers through TC L2, so it doesn't
* need this. */
sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(ib->buffer)->TC_L2_dirty = false;
+ r600_resource(indexbuf)->TC_L2_dirty = false;
}
}
si_emit_draw_registers(sctx, info, num_patches);
si_ce_pre_draw_synchronization(sctx);
- si_emit_draw_packets(sctx, info, ib);
+ si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
si_ce_post_draw_synchronization(sctx);
if (sctx->trace_buf)
sctx->framebuffer.do_update_surf_dirtiness = false;
}
- pipe_resource_reference(&ib_tmp.buffer, NULL);
sctx->b.num_draw_calls++;
if (info->primitive_restart)
sctx->b.num_prim_restart_calls++;
if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
sctx->b.num_spill_draw_calls++;
+ if (index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
}
void si_trace_emit(struct si_context *sctx)
mtx_unlock(&rb_pipe->call_mutex);
}
-static void
-rbug_set_index_buffer(struct pipe_context *_pipe,
- const struct pipe_index_buffer *_ib)
-{
- struct rbug_context *rb_pipe = rbug_context(_pipe);
- struct pipe_context *pipe = rb_pipe->pipe;
- struct pipe_index_buffer unwrapped_ib, *ib = NULL;
-
- if (_ib) {
- unwrapped_ib = *_ib;
- unwrapped_ib.buffer = rbug_resource_unwrap(_ib->buffer);
- ib = &unwrapped_ib;
- }
-
- mtx_lock(&rb_pipe->call_mutex);
- pipe->set_index_buffer(pipe, ib);
- mtx_unlock(&rb_pipe->call_mutex);
-}
-
static void
rbug_set_sample_mask(struct pipe_context *_pipe,
unsigned sample_mask)
rb_pipe->base.set_viewport_states = rbug_set_viewport_states;
rb_pipe->base.set_sampler_views = rbug_set_sampler_views;
rb_pipe->base.set_vertex_buffers = rbug_set_vertex_buffers;
- rb_pipe->base.set_index_buffer = rbug_set_index_buffer;
rb_pipe->base.set_sample_mask = rbug_set_sample_mask;
rb_pipe->base.create_stream_output_target = rbug_create_stream_output_target;
rb_pipe->base.stream_output_target_destroy = rbug_stream_output_target_destroy;
struct pipe_shader_buffer buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer index_buffer;
struct pipe_resource *mapped_vs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_resource *mapped_gs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS];
}
/* Map index buffer, if present */
- if (info->indexed) {
+ if (info->index_size) {
unsigned available_space = ~0;
- mapped_indices = sp->index_buffer.user_buffer;
+ mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) {
- mapped_indices = softpipe_resource_data(sp->index_buffer.buffer);
- if (sp->index_buffer.buffer->width0 > sp->index_buffer.offset)
- available_space =
- (sp->index_buffer.buffer->width0 - sp->index_buffer.offset);
- else
- available_space = 0;
+ mapped_indices = softpipe_resource_data(info->index.resource);
+ available_space = info->index.resource->width0;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices + sp->index_buffer.offset,
- sp->index_buffer.index_size, available_space);
+ (ubyte *) mapped_indices,
+ info->index_size, available_space);
}
}
-static void
-softpipe_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct softpipe_context *softpipe = softpipe_context(pipe);
-
- if (ib)
- memcpy(&softpipe->index_buffer, ib, sizeof(softpipe->index_buffer));
- else
- memset(&softpipe->index_buffer, 0, sizeof(softpipe->index_buffer));
-}
-
-
void
softpipe_init_vertex_funcs(struct pipe_context *pipe)
{
pipe->delete_vertex_elements_state = softpipe_delete_vertex_elements_state;
pipe->set_vertex_buffers = softpipe_set_vertex_buffers;
- pipe->set_index_buffer = softpipe_set_index_buffer;
}
struct svga_geometry_shader *gs; /* derived GS */
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer ib;
/** Constant buffers for each shader.
* The size should probably always match with that of
* svga_shader_emitter_v10.num_shader_consts.
need_fallback_prim_restart(const struct svga_context *svga,
const struct pipe_draw_info *info)
{
- if (info->primitive_restart && info->indexed) {
+ if (info->primitive_restart && info->index_size) {
if (!svga_have_vgpu10(svga))
return TRUE;
else if (!svga->state.sw.need_swtnl) {
- if (svga->curr.ib.index_size == 1)
+ if (info->index_size == 1)
return TRUE; /* no device support for 1-byte indexes */
- else if (svga->curr.ib.index_size == 2)
+ else if (info->index_size == 2)
return info->restart_index != 0xffff;
else
return info->restart_index != 0xffffffff;
unsigned count = info->count;
enum pipe_error ret = 0;
boolean needed_swtnl;
+ struct pipe_resource *indexbuf =
+ info->has_user_indices ? NULL : info->index.resource;
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);
goto done;
/* Upload a user index buffer. */
- struct pipe_index_buffer ibuffer_saved = {0};
- if (info->indexed && svga->curr.ib.user_buffer &&
- !util_save_and_upload_index_buffer(pipe, info, &svga->curr.ib,
- &ibuffer_saved)) {
- return;
+ unsigned index_offset = 0;
+ if (info->index_size && info->has_user_indices &&
+ !util_upload_index_buffer(pipe, info, &indexbuf, &index_offset)) {
+ goto done;
}
/*
if (need_fallback_prim_restart(svga, info)) {
enum pipe_error r;
- r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info);
+ r = util_draw_vbo_without_prim_restart(pipe, info);
assert(r == PIPE_OK);
(void) r;
goto done;
/* Avoid leaking the previous hwtnl bias to swtnl */
svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
- ret = svga_swtnl_draw_vbo( svga, info );
+ ret = svga_swtnl_draw_vbo(svga, info, indexbuf, index_offset);
}
else {
- if (info->indexed && svga->curr.ib.buffer) {
+ if (info->index_size && indexbuf) {
unsigned offset;
- assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
- offset = svga->curr.ib.offset / svga->curr.ib.index_size;
+ assert(index_offset % info->index_size == 0);
+ offset = index_offset / info->index_size;
ret = retry_draw_range_elements( svga,
- svga->curr.ib.buffer,
- svga->curr.ib.index_size,
+ indexbuf,
+ info->index_size,
info->index_bias,
info->min_index,
info->max_index,
}
done:
- if (info->indexed && ibuffer_saved.user_buffer)
- pipe->set_index_buffer(pipe, &ibuffer_saved);
-
+ if (info->index_size && info->index.resource != indexbuf)
+ pipe_resource_reference(&indexbuf, NULL);
SVGA_STATS_TIME_POP(svga_sws(svga));
}
}
-static void svga_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct svga_context *svga = svga_context(pipe);
-
- util_set_index_buffer(&svga->curr.ib, ib);
-}
-
-
/**
* Does the given vertex attrib format need range adjustment in the VS?
* Range adjustment scales and biases values from [0,1] to [-1,1].
void svga_init_vertex_functions( struct svga_context *svga )
{
svga->pipe.set_vertex_buffers = svga_set_vertex_buffers;
- svga->pipe.set_index_buffer = svga_set_index_buffer;
svga->pipe.create_vertex_elements_state = svga_create_vertex_elements_state;
svga->pipe.bind_vertex_elements_state = svga_bind_vertex_elements_state;
svga->pipe.delete_vertex_elements_state = svga_delete_vertex_elements_state;
enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga,
- const struct pipe_draw_info *info);
+ const struct pipe_draw_info *info,
+ struct pipe_resource *indexbuf,
+ unsigned index_offset);
#endif
enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ struct pipe_resource *indexbuf,
+ unsigned index_offset)
{
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = { 0 };
struct pipe_transfer *ib_transfer = NULL;
/* Map index buffer, if present */
map = NULL;
- if (info->indexed && svga->curr.ib.buffer) {
- map = pipe_buffer_map(&svga->pipe, svga->curr.ib.buffer,
+ if (info->index_size && indexbuf) {
+ map = pipe_buffer_map(&svga->pipe, indexbuf,
PIPE_TRANSFER_READ,
&ib_transfer);
+ map = (ubyte *) map + index_offset;
draw_set_indexes(draw,
- (const ubyte *) map + svga->curr.ib.offset,
- svga->curr.ib.index_size, ~0);
+ (const ubyte *) map,
+ info->index_size, ~0);
}
/* Map constant buffers */
struct pipe_viewport_state viewport;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer index_buffer;
struct blitter_context *blitter;
feState.bEnableCutIndex = info->primitive_restart;
SwrSetFrontendState(ctx->swrContext, &feState);
- if (info->indexed)
+ if (info->index_size)
SwrDrawIndexedInstanced(ctx->swrContext,
swr_convert_prim_topology(info->mode),
info->count,
}
-static void
-swr_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct swr_context *ctx = swr_context(pipe);
-
- if (ib)
- memcpy(&ctx->index_buffer, ib, sizeof(ctx->index_buffer));
- else
- memset(&ctx->index_buffer, 0, sizeof(ctx->index_buffer));
-
- ctx->dirty |= SWR_NEW_VERTEX;
-}
-
static void
swr_set_polygon_stipple(struct pipe_context *pipe,
const struct pipe_poly_stipple *stipple)
}
/* VBO index buffer */
- if (p_draw_info && p_draw_info->indexed) {
- struct pipe_index_buffer *ib = &ctx->index_buffer;
- if (!ib->user_buffer)
- swr_resource_read(ib->buffer);
+ if (p_draw_info && p_draw_info->index_size) {
+ if (!p_draw_info->has_user_indices)
+ swr_resource_read(p_draw_info->index.resource);
}
/* transform feedback buffers */
/* Set vertex & index buffers */
/* (using draw info if called by swr_draw_vbo) */
- if (ctx->dirty & SWR_NEW_VERTEX) {
+ /* TODO: This is always true, because the index buffer comes from
+ * pipe_draw_info.
+ */
+ if (1 || ctx->dirty & SWR_NEW_VERTEX) {
uint32_t scratch_total;
uint8_t *scratch = NULL;
/* index buffer, if required (info passed in by swr_draw_vbo) */
SWR_FORMAT index_type = R32_UINT; /* Default for non-indexed draws */
- if (info.indexed) {
+ if (info.index_size) {
const uint8_t *p_data;
uint32_t size, pitch;
- struct pipe_index_buffer *ib = &ctx->index_buffer;
- pitch = ib->index_size ? ib->index_size : sizeof(uint32_t);
+ pitch = p_draw_info->index_size ? p_draw_info->index_size : sizeof(uint32_t);
index_type = swr_convert_index_type(pitch);
- if (!ib->user_buffer) {
+ if (!info.has_user_indices) {
/* VBO
* size is based on buffer->width0 rather than info.count
* to prevent having to validate VBO on each draw */
- size = ib->buffer->width0;
- p_data = swr_resource_data(ib->buffer) + ib->offset;
+ size = info.index.resource->width0;
+ p_data = swr_resource_data(info.index.resource);
} else {
/* Client buffer
* client memory is one-time use, re-trigger SWR_NEW_VERTEX to
size = AlignUp(size, 4);
/* Copy indices to scratch space */
- const void *ptr = ib->user_buffer;
+ const void *ptr = info.index.user;
ptr = swr_copy_to_scratch_space(
ctx, &ctx->scratch->index_buffer, ptr, size);
p_data = (const uint8_t *)ptr;
}
SWR_INDEX_BUFFER_STATE swrIndexBuffer;
- swrIndexBuffer.format = swr_convert_index_type(ib->index_size);
+ swrIndexBuffer.format = swr_convert_index_type(p_draw_info->index_size);
swrIndexBuffer.pIndices = p_data;
swrIndexBuffer.size = size;
pipe->delete_vertex_elements_state = swr_delete_vertex_elements_state;
pipe->set_vertex_buffers = swr_set_vertex_buffers;
- pipe->set_index_buffer = swr_set_index_buffer;
pipe->set_polygon_stipple = swr_set_polygon_stipple;
pipe->set_clip_state = swr_set_clip_state;
}
-static void
-trace_context_set_index_buffer(struct pipe_context *_pipe,
- const struct pipe_index_buffer *ib)
-{
- struct trace_context *tr_ctx = trace_context(_pipe);
- struct pipe_context *pipe = tr_ctx->pipe;
-
- trace_dump_call_begin("pipe_context", "set_index_buffer");
-
- trace_dump_arg(ptr, pipe);
- trace_dump_arg(index_buffer, ib);
-
- pipe->set_index_buffer(pipe, ib);
-
- trace_dump_call_end();
-}
-
-
static struct pipe_stream_output_target *
trace_context_create_stream_output_target(struct pipe_context *_pipe,
struct pipe_resource *res,
TR_CTX_INIT(create_surface);
TR_CTX_INIT(surface_destroy);
TR_CTX_INIT(set_vertex_buffers);
- TR_CTX_INIT(set_index_buffer);
TR_CTX_INIT(create_stream_output_target);
TR_CTX_INIT(stream_output_target_destroy);
TR_CTX_INIT(set_stream_output_targets);
}
-void trace_dump_index_buffer(const struct pipe_index_buffer *state)
-{
- if (!trace_dumping_enabled_locked())
- return;
-
- if (!state) {
- trace_dump_null();
- return;
- }
-
- trace_dump_struct_begin("pipe_index_buffer");
-
- trace_dump_member(uint, state, index_size);
- trace_dump_member(uint, state, offset);
- trace_dump_member(ptr, state, buffer);
- trace_dump_member(ptr, state, user_buffer);
-
- trace_dump_struct_end();
-}
-
-
void trace_dump_vertex_element(const struct pipe_vertex_element *state)
{
if (!trace_dumping_enabled_locked())
trace_dump_struct_begin("pipe_draw_info");
- trace_dump_member(bool, state, indexed);
+ trace_dump_member(uint, state, index_size);
+ trace_dump_member(uint, state, has_user_indices);
trace_dump_member(uint, state, mode);
trace_dump_member(uint, state, start);
trace_dump_member(bool, state, primitive_restart);
trace_dump_member(uint, state, restart_index);
+ trace_dump_member(ptr, state, index.resource);
trace_dump_member(ptr, state, count_from_stream_output);
if (!state->indirect) {
void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state);
-void trace_dump_index_buffer(const struct pipe_index_buffer *state);
-
void trace_dump_vertex_element(const struct pipe_vertex_element *state);
void trace_dump_constant_buffer(const struct pipe_constant_buffer *state);
#define VC4_DIRTY_CONSTBUF (1 << 13)
#define VC4_DIRTY_VTXSTATE (1 << 14)
#define VC4_DIRTY_VTXBUF (1 << 15)
-#define VC4_DIRTY_INDEXBUF (1 << 16)
+
#define VC4_DIRTY_SCISSOR (1 << 17)
#define VC4_DIRTY_FLAT_SHADE_FLAGS (1 << 18)
#define VC4_DIRTY_PRIM_MODE (1 << 19)
struct pipe_viewport_state viewport;
struct vc4_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct vc4_vertexbuf_stateobj vertexbuf;
- struct pipe_index_buffer indexbuf;
/** @} */
};
return;
if (info->mode >= PIPE_PRIM_QUADS) {
- util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf);
util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base);
util_primconvert_draw_vbo(vc4->primconvert, info);
perf_debug("Fallback conversion for %d %s vertices\n",
* definitions, up to but not including QUADS.
*/
struct vc4_cl_out *bcl = cl_start(&job->bcl);
- if (info->indexed) {
- uint32_t offset = vc4->indexbuf.offset;
- uint32_t index_size = vc4->indexbuf.index_size;
+ if (info->index_size) {
+ uint32_t index_size = info->index_size;
+ uint32_t offset = info->start * index_size;
struct pipe_resource *prsc;
- if (vc4->indexbuf.index_size == 4) {
- prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf,
+ if (info->index_size == 4) {
+ prsc = vc4_get_shadow_index_buffer(pctx, info,
+ offset,
info->count, &offset);
index_size = 2;
} else {
- if (vc4->indexbuf.user_buffer) {
+ if (info->has_user_indices) {
prsc = NULL;
u_upload_data(vc4->uploader, 0,
info->count * index_size, 4,
- vc4->indexbuf.user_buffer,
+ info->index.user,
&offset, &prsc);
} else {
- prsc = vc4->indexbuf.buffer;
+ prsc = info->index.resource;
}
}
struct vc4_resource *rsc = vc4_resource(prsc);
cl_u32(&bcl, vc4->max_index);
job->draw_calls_queued++;
- if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer)
+ if (info->index_size == 4 || info->has_user_indices)
pipe_resource_reference(&prsc, NULL);
} else {
uint32_t count = info->count;
*/
struct pipe_resource *
vc4_get_shadow_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
+ uint32_t offset,
uint32_t count,
uint32_t *shadow_offset)
{
struct vc4_context *vc4 = vc4_context(pctx);
- struct vc4_resource *orig = vc4_resource(ib->buffer);
+ struct vc4_resource *orig = vc4_resource(info->index.resource);
perf_debug("Fallback conversion for %d uint indices\n", count);
void *data;
struct pipe_transfer *src_transfer = NULL;
const uint32_t *src;
- if (ib->user_buffer) {
- src = ib->user_buffer;
+ if (info->has_user_indices) {
+ src = info->index.user;
} else {
src = pipe_buffer_map_range(pctx, &orig->base.b,
- ib->offset,
+ offset,
count * 4,
PIPE_TRANSFER_READ, &src_transfer);
}
void vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
struct pipe_sampler_view *view);
struct pipe_resource *vc4_get_shadow_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
+ uint32_t offset,
uint32_t count,
- uint32_t *offset);
+ uint32_t *shadow_offset);
void vc4_dump_surface(struct pipe_surface *psurf);
#endif /* VC4_RESOURCE_H */
vc4->dirty |= VC4_DIRTY_VTXBUF;
}
-static void
-vc4_set_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib)
-{
- struct vc4_context *vc4 = vc4_context(pctx);
-
- if (ib) {
- pipe_resource_reference(&vc4->indexbuf.buffer, ib->buffer);
- vc4->indexbuf.index_size = ib->index_size;
- vc4->indexbuf.offset = ib->offset;
- vc4->indexbuf.user_buffer = ib->user_buffer;
- } else {
- pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
- }
-
- vc4->dirty |= VC4_DIRTY_INDEXBUF;
-}
-
static void
vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
pctx->set_viewport_states = vc4_set_viewport_states;
pctx->set_vertex_buffers = vc4_set_vertex_buffers;
- pctx->set_index_buffer = vc4_set_index_buffer;
pctx->create_blend_state = vc4_create_blend_state;
pctx->bind_blend_state = vc4_blend_state_bind;
}
}
-static void virgl_attach_res_index_buffer(struct virgl_context *vctx)
+static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
+ struct virgl_indexbuf *ib)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
struct virgl_resource *res;
- res = virgl_resource(vctx->index_buffer.buffer);
+ res = virgl_resource(ib->buffer);
if (res)
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
virgl_attach_res_sampler_views(vctx, shader_type);
virgl_attach_res_uniform_buffers(vctx, shader_type);
}
- virgl_attach_res_index_buffer(vctx);
virgl_attach_res_vertex_buffers(vctx);
virgl_attach_res_so_targets(vctx);
}
virgl_encoder_set_blend_color(vctx, color);
}
-static void virgl_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct virgl_context *vctx = virgl_context(ctx);
-
- if (ib) {
- pipe_resource_reference(&vctx->index_buffer.buffer, ib->buffer);
- memcpy(&vctx->index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&vctx->index_buffer.buffer, NULL);
- }
-}
-
static void virgl_hw_set_index_buffer(struct pipe_context *ctx,
- struct pipe_index_buffer *ib)
+ struct virgl_indexbuf *ib)
{
struct virgl_context *vctx = virgl_context(ctx);
virgl_encoder_set_index_buffer(vctx, ib);
- virgl_attach_res_index_buffer(vctx);
+ virgl_attach_res_index_buffer(vctx, ib);
}
static void virgl_set_constant_buffer(struct pipe_context *ctx,
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_screen *rs = virgl_screen(ctx->screen);
- struct pipe_index_buffer ib = {};
+ struct virgl_indexbuf ib = {};
struct pipe_draw_info info = *dinfo;
if (!dinfo->count_from_stream_output && !dinfo->indirect &&
return;
if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
- util_primconvert_save_index_buffer(vctx->primconvert, &vctx->index_buffer);
util_primconvert_draw_vbo(vctx->primconvert, dinfo);
return;
}
- if (info.indexed) {
- pipe_resource_reference(&ib.buffer, vctx->index_buffer.buffer);
- ib.user_buffer = vctx->index_buffer.user_buffer;
- ib.index_size = vctx->index_buffer.index_size;
- ib.offset = vctx->index_buffer.offset + info.start * ib.index_size;
+ if (info.index_size) {
+ pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
+ ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
+ ib.index_size = dinfo->index_size;
+ ib.offset = info.start * ib.index_size;
if (ib.user_buffer) {
u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
vctx->num_draws++;
virgl_hw_set_vertex_buffers(ctx);
- if (info.indexed)
+ if (info.index_size)
virgl_hw_set_index_buffer(ctx, &ib);
virgl_encoder_draw_vbo(vctx, &info);
vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
- vctx->base.set_index_buffer = virgl_set_index_buffer;
vctx->base.set_constant_buffer = virgl_set_constant_buffer;
vctx->base.create_vs_state = virgl_create_vs_state;
struct slab_child_pool texture_transfer_pool;
- struct pipe_index_buffer index_buffer;
struct u_upload_mgr *uploader;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
}
int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
- const struct pipe_index_buffer *ib)
+ const struct virgl_indexbuf *ib)
{
int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
struct virgl_resource *res = NULL;
virgl_encoder_write_res(ctx, res);
if (ib) {
virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
- virgl_encoder_write_dword(ctx->cbuf, ib->offset);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
}
return 0;
}
virgl_encoder_write_dword(ctx->cbuf, info->start);
virgl_encoder_write_dword(ctx->cbuf, info->count);
virgl_encoder_write_dword(ctx->cbuf, info->mode);
- virgl_encoder_write_dword(ctx->cbuf, info->indexed);
+ virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
uint32_t handle;
};
+struct virgl_indexbuf {
+ unsigned offset;
+ unsigned index_size; /**< size of an index, in bytes */
+ struct pipe_resource *buffer; /**< the actual buffer */
+ const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
+};
+
static inline struct virgl_surface *virgl_surface(struct pipe_surface *surf)
{
return (struct virgl_surface *)surf;
uint32_t *handles);
int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
- const struct pipe_index_buffer *ib);
+ const struct virgl_indexbuf *ib);
uint32_t virgl_object_assign_handle(void);
struct pipe_fence_handle;
struct pipe_framebuffer_state;
struct pipe_image_view;
-struct pipe_index_buffer;
struct pipe_query;
struct pipe_poly_stipple;
struct pipe_rasterizer_state;
unsigned num_buffers,
const struct pipe_vertex_buffer * );
- void (*set_index_buffer)( struct pipe_context *pipe,
- const struct pipe_index_buffer * );
-
/*@}*/
/**
};
-/**
- * An index buffer. When an index buffer is bound, all indices to vertices
- * will be looked up in the buffer.
- */
-struct pipe_index_buffer
-{
- unsigned index_size; /**< size of an index, in bytes */
- unsigned offset; /**< offset to start of data in buffer, in bytes */
- struct pipe_resource *buffer; /**< the actual buffer */
- const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
-};
-
-
struct pipe_draw_indirect_info
{
unsigned offset; /**< must be 4 byte aligned */
/* Indirect draw parameters resource is laid out as follows:
*
- * if indexed is TRUE:
+ * if using indexed drawing:
* struct {
* uint32_t count;
* uint32_t instance_count;
*/
struct pipe_draw_info
{
- boolean indexed; /**< use index buffer */
+ ubyte index_size; /**< if 0, the draw is not indexed. */
enum pipe_prim_type mode:8; /**< the mode of the primitive */
- boolean primitive_restart;
+ unsigned primitive_restart:1;
+ unsigned has_user_indices:1; /**< if true, use index.user_buffer */
ubyte vertices_per_patch; /**< the number of vertices per patch */
- unsigned start; /**< the index of the first vertex */
+ /**
+ * Direct draws: start is the index of the first vertex
+ * Non-indexed indirect draws: not used
+ * Indexed indirect draws: start is added to the indirect start.
+ */
+ unsigned start;
unsigned count; /**< number of vertices */
unsigned start_instance; /**< first instance id */
/* Pointers must be at the end for an optimal structure layout on 64-bit. */
+ /**
+ * An index buffer. When an index buffer is bound, all indices to vertices
+ * will be looked up from the buffer.
+ *
+ * If has_user_indices, use index.user, else use index.resource.
+ */
+ union {
+ struct pipe_resource *resource; /**< real buffer */
+ const void *user; /**< pointer to a user buffer */
+ } index;
+
struct pipe_draw_indirect_info *indirect; /**< Indirect draw. */
/**
UINT VertexStreamZeroStride )
{
struct pipe_vertex_buffer vbuf;
- struct pipe_index_buffer ibuf;
DBG("iface %p, PrimitiveType %u, MinVertexIndex %u, NumVertices %u "
"PrimitiveCount %u, pIndexData %p, IndexDataFormat %u "
vbuf.is_user_buffer = true;
vbuf.buffer.user = pVertexStreamZeroData;
- ibuf.index_size = (IndexDataFormat == D3DFMT_INDEX16) ? 2 : 4;
- ibuf.offset = 0;
- ibuf.buffer = NULL;
- ibuf.user_buffer = pIndexData;
+ unsigned index_size = (IndexDataFormat == D3DFMT_INDEX16) ? 2 : 4;
+ struct pipe_resource *ibuf = NULL;
if (!This->driver_caps.user_vbufs) {
const unsigned base = MinVertexIndex * VertexStreamZeroStride;
/* Won't be used: */
vbuf.buffer_offset -= base;
}
+
+ unsigned index_offset = 0;
if (This->csmt_active) {
u_upload_data(This->pipe_secondary->stream_uploader,
0,
- (prim_count_to_vertex_count(PrimitiveType, PrimitiveCount)) * ibuf.index_size,
+ (prim_count_to_vertex_count(PrimitiveType, PrimitiveCount)) * index_size,
4,
- ibuf.user_buffer,
- &ibuf.offset,
- &ibuf.buffer);
+ pIndexData,
+ &index_offset,
+ &ibuf);
u_upload_unmap(This->pipe_secondary->stream_uploader);
- ibuf.user_buffer = NULL;
}
NineBeforeDraw(This);
NumVertices,
PrimitiveCount,
&vbuf,
- &ibuf);
+ ibuf,
+ ibuf ? NULL : (void*)pIndexData,
+ index_offset,
+ index_size);
NineAfterDraw(This);
pipe_vertex_buffer_unreference(&vbuf);
- pipe_resource_reference(&ibuf.buffer, NULL);
+ pipe_resource_reference(&ibuf, NULL);
NineDevice9_PauseRecording(This);
NineDevice9_SetIndices(This, NULL);
draw.count_from_stream_output = NULL;
draw.indirect = NULL;
draw.instance_count = 1;
- draw.indexed = FALSE;
+ draw.index_size = 0;
draw.start = 0;
draw.index_bias = 0;
draw.min_index = 0;
if (FAILED(hr))
return hr;
- This->buffer.buffer = NULL;
- This->buffer.offset = 0;
+ This->buffer = NULL;
+ This->offset = 0;
switch (pDesc->Format) {
- case D3DFMT_INDEX16: This->buffer.index_size = 2; break;
- case D3DFMT_INDEX32: This->buffer.index_size = 4; break;
+ case D3DFMT_INDEX16: This->index_size = 2; break;
+ case D3DFMT_INDEX32: This->index_size = 4; break;
default:
user_assert(!"Invalid index format.", D3DERR_INVALIDCALL);
break;
}
- This->buffer.user_buffer = NULL;
pDesc->Type = D3DRTYPE_INDEXBUFFER;
This->desc = *pDesc;
NineBuffer9_dtor(&This->base);
}
-const struct pipe_index_buffer *
+struct pipe_resource *
NineIndexBuffer9_GetBuffer( struct NineIndexBuffer9 *This )
{
/* The resource may change */
- This->buffer.buffer = NineBuffer9_GetResource(&This->base, &This->buffer.offset);
- return &This->buffer;
+ This->buffer = NineBuffer9_GetResource(&This->base, &This->offset);
+ return This->buffer;
}
HRESULT NINE_WINAPI
struct pipe_screen;
struct pipe_context;
-struct pipe_index_buffer;
struct pipe_transfer;
struct NineDevice9;
struct NineBuffer9 base;
/* g3d stuff */
- struct pipe_index_buffer buffer;
+ struct pipe_resource *buffer;
+ unsigned offset;
+ unsigned index_size;
D3DINDEXBUFFER_DESC desc;
};
/*** Nine private ***/
-const struct pipe_index_buffer *
+struct pipe_resource *
NineIndexBuffer9_GetBuffer( struct NineIndexBuffer9 *This );
/*** Direct3D public ***/
cso_set_rasterizer(context->cso, &context->pipe_data.rast);
}
-static inline void
-commit_index_buffer(struct NineDevice9 *device)
-{
- struct nine_context *context = &device->context;
- struct pipe_context *pipe = context->pipe;
- if (context->idxbuf.buffer)
- pipe->set_index_buffer(pipe, &context->idxbuf);
- else
- pipe->set_index_buffer(pipe, NULL);
-}
-
static inline void
commit_vs_constants(struct NineDevice9 *device)
{
update_viewport(device);
if (group & (NINE_STATE_VDECL | NINE_STATE_VS | NINE_STATE_STREAMFREQ))
update_vertex_elements(device);
- if (group & NINE_STATE_IDXBUF)
- commit_index_buffer(device);
}
if (likely(group & (NINE_STATE_FREQUENT | NINE_STATE_VS | NINE_STATE_PS | NINE_STATE_SWVP))) {
{
struct nine_context *context = &device->context;
- context->idxbuf.index_size = IndexSize;
- context->idxbuf.offset = OffsetInBytes;
- pipe_resource_reference(&context->idxbuf.buffer, res);
- context->idxbuf.user_buffer = NULL;
+ context->index_size = IndexSize;
+ context->index_offset = OffsetInBytes;
+ pipe_resource_reference(&context->idxbuf, res);
context->changed.group |= NINE_STATE_IDXBUF;
}
nine_context_set_indices(struct NineDevice9 *device,
struct NineIndexBuffer9 *idxbuf)
{
- const struct pipe_index_buffer *pipe_idxbuf;
struct pipe_resource *res = NULL;
UINT IndexSize = 0;
UINT OffsetInBytes = 0;
if (idxbuf) {
- pipe_idxbuf = NineIndexBuffer9_GetBuffer(idxbuf);
- IndexSize = pipe_idxbuf->index_size;
- res = pipe_idxbuf->buffer;
- OffsetInBytes = pipe_idxbuf->offset;
+ res = NineIndexBuffer9_GetBuffer(idxbuf);
+ IndexSize = idxbuf->index_size;
+ OffsetInBytes = idxbuf->offset;
}
nine_context_set_indices_apply(device, res, IndexSize, OffsetInBytes);
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = FALSE;
+ info.index_size = 0;
info.start = StartVertex;
info.index_bias = 0;
info.min_index = info.start;
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = TRUE;
- info.start = StartIndex;
+ info.index_size = context->index_size;
+ info.start = context->index_offset / context->index_size + StartIndex;
info.index_bias = BaseVertexIndex;
/* These don't include index bias: */
info.min_index = MinVertexIndex;
info.max_index = MinVertexIndex + NumVertices - 1;
+ info.index.resource = context->idxbuf;
context->pipe->draw_vbo(context->pipe, &info);
}
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = FALSE;
+ info.index_size = 0;
info.start = 0;
info.index_bias = 0;
info.min_index = 0;
ARG_VAL(UINT, NumVertices),
ARG_VAL(UINT, PrimitiveCount),
ARG_BIND_VBUF(struct pipe_vertex_buffer, vbuf),
- ARG_BIND_IBUF(struct pipe_index_buffer, ibuf))
+ ARG_BIND_RES(struct pipe_resource, ibuf),
+ ARG_VAL(void *, user_ibuf),
+ ARG_VAL(UINT, index_offset),
+ ARG_VAL(UINT, index_size))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = TRUE;
- info.start = 0;
+ info.index_size = index_size;
+ info.start = index_offset / info.index_size;
info.index_bias = 0;
info.min_index = MinVertexIndex;
info.max_index = MinVertexIndex + NumVertices - 1;
+ info.has_user_indices = ibuf == NULL;
+ if (ibuf)
+ info.index.resource = ibuf;
+ else
+ info.index.user = user_ibuf;
+
context->pipe->set_vertex_buffers(context->pipe, 0, 1, vbuf);
- context->pipe->set_index_buffer(context->pipe, ibuf);
context->pipe->draw_vbo(context->pipe, &info);
}
cso_set_sampler_views(cso, PIPE_SHADER_FRAGMENT, 0, NULL);
pipe->set_vertex_buffers(pipe, 0, device->caps.MaxStreams, NULL);
- pipe->set_index_buffer(pipe, NULL);
for (i = 0; i < ARRAY_SIZE(context->rt); ++i)
nine_bind(&context->rt[i], NULL);
nine_bind(&context->vdecl, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; ++i)
pipe_vertex_buffer_unreference(&context->vtxbuf[i]);
- pipe_resource_reference(&context->idxbuf.buffer, NULL);
+ pipe_resource_reference(&context->idxbuf, NULL);
for (i = 0; i < NINE_MAX_SAMPLERS; ++i) {
context->texture[i].enabled = FALSE;
uint32_t stream_instancedata_mask; /* derived from stream_freq */
uint32_t stream_usage_mask; /* derived from VS and vdecl */
- struct pipe_index_buffer idxbuf;
+ struct pipe_resource *idxbuf;
+ unsigned index_offset;
+ unsigned index_size;
struct pipe_clip_state clip;
UINT NumVertices,
UINT PrimitiveCount,
struct pipe_vertex_buffer *vbuf,
- struct pipe_index_buffer *ibuf);
+ struct pipe_resource *ibuf,
+ void *user_ibuf,
+ unsigned index_offset,
+ unsigned index_size);
void
nine_context_resource_copy_region(struct NineDevice9 *device,
}
-/**
- * Basically, translate Mesa's index buffer information into
- * a pipe_index_buffer object.
- */
-static void
-setup_index_buffer(struct st_context *st,
- const struct _mesa_index_buffer *ib)
-{
- struct pipe_index_buffer ibuffer;
- struct gl_buffer_object *bufobj = ib->obj;
-
- ibuffer.index_size = ib->index_size;
-
- /* get/create the index buffer object */
- if (_mesa_is_bufferobj(bufobj)) {
- /* indices are in a real VBO */
- ibuffer.buffer = st_buffer_object(bufobj)->buffer;
- ibuffer.offset = pointer_to_offset(ib->ptr);
- ibuffer.user_buffer = NULL;
- }
- else {
- /* indices are in user space memory */
- ibuffer.buffer = NULL;
- ibuffer.offset = 0;
- ibuffer.user_buffer = ib->ptr;
- }
-
- cso_set_index_buffer(st->cso_context, &ibuffer);
-}
-
-
/**
* Set the restart index.
*/
static void
-setup_primitive_restart(struct gl_context *ctx, struct pipe_draw_info *info,
- unsigned index_size)
+setup_primitive_restart(struct gl_context *ctx, struct pipe_draw_info *info)
{
if (ctx->Array._PrimitiveRestart) {
+ unsigned index_size = info->index_size;
+
info->restart_index =
_mesa_primitive_restart_index(ctx, index_size);
struct pipe_draw_info info;
const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
unsigned i;
+ unsigned start = 0;
/* Mesa core state should have been validated already */
assert(ctx->NewState == 0x0);
util_draw_init_info(&info);
if (ib) {
+ struct gl_buffer_object *bufobj = ib->obj;
+
/* Get index bounds for user buffers. */
if (!index_bounds_valid)
if (!all_varyings_in_vbos(arrays))
vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index,
nr_prims);
- setup_index_buffer(st, ib);
-
- info.indexed = TRUE;
+ info.index_size = ib->index_size;
info.min_index = min_index;
info.max_index = max_index;
- setup_primitive_restart(ctx, &info, ib->index_size);
+ if (_mesa_is_bufferobj(bufobj)) {
+ /* indices are in a real VBO */
+ info.index.resource = st_buffer_object(bufobj)->buffer;
+ start = pointer_to_offset(ib->ptr) / info.index_size;
+ } else {
+ /* indices are in user space memory */
+ info.has_user_indices = true;
+ info.index.user = ib->ptr;
+ }
+
+ setup_primitive_restart(ctx, &info);
}
else {
/* Transform feedback drawing is always non-indexed. */
/* do actual drawing */
for (i = 0; i < nr_prims; i++) {
info.mode = translate_prim(ctx, prims[i].mode);
- info.start = prims[i].start;
+ info.start = start + prims[i].start;
info.count = prims[i].count;
info.start_instance = prims[i].base_instance;
info.instance_count = prims[i].num_instances;
}
if (ST_DEBUG & DEBUG_DRAW) {
- debug_printf("st/draw: mode %s start %u count %u indexed %d\n",
+ debug_printf("st/draw: mode %s start %u count %u index_size %d\n",
u_prim_name(info.mode),
info.start,
info.count,
- info.indexed);
+ info.index_size);
}
/* Don't call u_trim_pipe_prim. Drivers should do it if they need it. */
memset(&indirect, 0, sizeof(indirect));
util_draw_init_info(&info);
+ info.start = 0; /* index offset / index size */
if (ib) {
- setup_index_buffer(st, ib);
+ struct gl_buffer_object *bufobj = ib->obj;
+
+ /* indices are always in a real VBO */
+ assert(_mesa_is_bufferobj(bufobj));
- info.indexed = TRUE;
+ info.index_size = ib->index_size;
+ info.index.resource = st_buffer_object(bufobj)->buffer;
+ info.start = pointer_to_offset(ib->ptr) / info.index_size;
/* Primitive restart is not handled by the VBO module in this case. */
- setup_primitive_restart(ctx, &info, ib->index_size);
+ setup_primitive_restart(ctx, &info);
}
info.mode = translate_prim(ctx, mode);
indirect.offset = indirect_offset;
if (ST_DEBUG & DEBUG_DRAW) {
- debug_printf("st/draw indirect: mode %s drawcount %d indexed %d\n",
+ debug_printf("st/draw indirect: mode %s drawcount %d index_size %d\n",
u_prim_name(info.mode),
draw_count,
- info.indexed);
+ info.index_size);
}
if (!st->has_multi_draw_indirect) {
const struct pipe_shader_state *vs;
struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS];
struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer ibuffer;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {NULL};
struct pipe_transfer *ib_transfer = NULL;
const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
draw_set_vertex_buffers(draw, 0, vp->num_inputs, vbuffers);
draw_set_vertex_elements(draw, vp->num_inputs, velements);
- memset(&ibuffer, 0, sizeof(ibuffer));
+ unsigned start = 0;
+
if (ib) {
struct gl_buffer_object *bufobj = ib->obj;
+ unsigned index_size = ib->index_size;
- ibuffer.index_size = ib->index_size;
- if (ibuffer.index_size == 0)
+ if (index_size == 0)
goto out_unref_vertex;
if (bufobj && bufobj->Name) {
struct st_buffer_object *stobj = st_buffer_object(bufobj);
- pipe_resource_reference(&ibuffer.buffer, stobj->buffer);
- ibuffer.offset = pointer_to_offset(ib->ptr);
-
+ start = pointer_to_offset(ib->ptr) / index_size;
mapped_indices = pipe_buffer_map(pipe, stobj->buffer,
PIPE_TRANSFER_READ, &ib_transfer);
}
else {
- /* skip setting ibuffer.buffer as the draw module does not use it */
mapped_indices = ib->ptr;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices + ibuffer.offset,
- ibuffer.index_size, ~0);
+ (ubyte *) mapped_indices,
+ index_size, ~0);
}
/* set the constant buffer */
/* draw here */
for (i = 0; i < nr_prims; i++) {
- draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count);
+ draw_arrays(draw, prims[i].mode, start + prims[i].start, prims[i].count);
}
draw_set_indexes(draw, NULL, 0, 0);
if (ib_transfer)
pipe_buffer_unmap(pipe, ib_transfer);
- pipe_resource_reference(&ibuffer.buffer, NULL);
}
out_unref_vertex: