This avoids repeated translations of the enum.
Reviewed-by: Ilia Mirkin <imirkin@alum.mit.edu>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
/* Updates are signaled by BRW_NEW_INDEX_BUFFER. */
struct brw_bo *bo;
uint32_t size;
- GLuint type;
+ unsigned index_size;
/* Offset to index buffer index to use in CMD_3D_PRIM so that we can
* avoid re-uploading the IB packet over and over if we're actually
const struct gl_vertex_array *glarray);
static inline unsigned
-brw_get_index_type(GLenum type)
+brw_get_index_type(unsigned index_size)
{
- assert((type == GL_UNSIGNED_BYTE)
- || (type == GL_UNSIGNED_SHORT)
- || (type == GL_UNSIGNED_INT));
-
- /* The possible values for type are GL_UNSIGNED_BYTE (0x1401),
- * GL_UNSIGNED_SHORT (0x1403), and GL_UNSIGNED_INT (0x1405) which we want
- * to map to scale factors of 0, 1, and 2, respectively. These scale
- * factors are then left-shfited by 8 to be in the correct position in the
- * CMD_INDEX_BUFFER packet.
- *
- * Subtracting 0x1401 gives 0, 2, and 4. Shifting left by 7 afterwards
- * gives 0x00000000, 0x00000100, and 0x00000200. These just happen to be
- * the values the need to be written in the CMD_INDEX_BUFFER packet.
+ /* The hw needs 0x00000000, 0x00000100, and 0x00000200 for ubyte, ushort,
+ * and uint, respectively.
*/
- return (type - 0x1401) << 7;
+ return (index_size >> 1) << 8;
}
void brw_prepare_vertices(struct brw_context *brw);
if (index_buffer == NULL)
return;
- ib_type_size = _mesa_sizeof_type(index_buffer->type);
+ ib_type_size = index_buffer->index_size;
ib_size = index_buffer->count ? ib_type_size * index_buffer->count :
index_buffer->obj->Size;
bufferobj = index_buffer->obj;
if (brw->ib.bo != old_bo)
brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
- if (index_buffer->type != brw->ib.type) {
- brw->ib.type = index_buffer->type;
+ if (index_buffer->index_size != brw->ib.index_size) {
+ brw->ib.index_size = index_buffer->index_size;
brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
}
}
BEGIN_BATCH(3);
OUT_BATCH(CMD_INDEX_BUFFER << 16 |
cut_index_setting |
- brw_get_index_type(index_buffer->type) |
+ brw_get_index_type(index_buffer->index_size) |
1);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
bool cut_index_will_work;
- switch (ib->type) {
- case GL_UNSIGNED_BYTE:
+ switch (ib->index_size) {
+ case 1:
cut_index_will_work = ctx->Array.RestartIndex == 0xff;
break;
- case GL_UNSIGNED_SHORT:
+ case 2:
cut_index_will_work = ctx->Array.RestartIndex == 0xffff;
break;
- case GL_UNSIGNED_INT:
+ case 4:
cut_index_will_work = ctx->Array.RestartIndex == 0xffffffff;
break;
default:
/* BRW_NEW_INDEX_BUFFER */
unsigned cut_index;
if (brw->ib.ib) {
- cut_index = _mesa_primitive_restart_index(ctx, brw->ib.type);
+ cut_index = _mesa_primitive_restart_index(ctx, brw->ib.index_size);
} else {
/* There's no index buffer, but primitive restart may still apply
* to glDrawArrays and such. FIXED_INDEX mode only applies to drawing
BEGIN_BATCH(5);
OUT_BATCH(CMD_INDEX_BUFFER << 16 | (5 - 2));
- OUT_BATCH(brw_get_index_type(index_buffer->type) | mocs_wb);
+ OUT_BATCH(brw_get_index_type(index_buffer->index_size) | mocs_wb);
OUT_RELOC64(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, 0);
OUT_BATCH(brw->ib.size);
ADVANCE_BATCH();
*/
brw->ctx.NewDriverState |= BRW_NEW_BLORP;
brw->no_depth_or_stencil = false;
- brw->ib.type = -1;
+ brw->ib.index_size = -1;
if (params->dst.enabled)
brw_render_cache_set_add_bo(brw, params->dst.addr.buffer);
brw->ctx.NewDriverState |= BRW_NEW_BATCH;
- brw->ib.type = -1;
+ brw->ib.index_size = -1;
/* We need to periodically reap the shader time results, because rollover
* happens every few seconds. We also want to see results every once in a
unsigned max_out;
if (ib) {
- switch (ib->type) {
- case GL_UNSIGNED_INT:
+ switch (ib->index_size) {
+ case 4:
max_out = MAX_OUT_I32;
break;
- case GL_UNSIGNED_SHORT:
+ case 2:
max_out = MAX_OUT_I16;
break;
- case GL_UNSIGNED_BYTE:
+ case 1:
max_out = MAX_OUT_I16;
break;
GLboolean imm = (render->mode == IMM);
int i, attr;
- if (ib)
- nouveau_init_array(&render->ib, 0, 0, ib->count, ib->type,
+ if (ib) {
+ GLenum ib_type;
+
+ if (ib->index_size == 4)
+ ib_type = GL_UNSIGNED_INT;
+ else if (ib->index_size == 2)
+ ib_type = GL_UNSIGNED_SHORT;
+ else
+ ib_type = GL_UNSIGNED_BYTE;
+
+ nouveau_init_array(&render->ib, 0, 0, ib->count, ib_type,
ib->obj, ib->ptr, GL_TRUE, ctx);
+ }
FOR_EACH_BOUND_ATTR(render, i, attr) {
const struct gl_vertex_array *array = arrays[attr];
unsigned
-_mesa_primitive_restart_index(const struct gl_context *ctx, GLenum ib_type)
+_mesa_primitive_restart_index(const struct gl_context *ctx,
+ unsigned index_size)
{
/* From the OpenGL 4.3 core specification, page 302:
* "If both PRIMITIVE_RESTART and PRIMITIVE_RESTART_FIXED_INDEX are
* is used."
*/
if (ctx->Array.PrimitiveRestartFixedIndex) {
- switch (ib_type) {
- case GL_UNSIGNED_BYTE:
+ switch (index_size) {
+ case 1:
return 0xff;
- case GL_UNSIGNED_SHORT:
+ case 2:
return 0xffff;
- case GL_UNSIGNED_INT:
+ case 4:
return 0xffffffff;
default:
- assert(!"_mesa_primitive_restart_index: Invalid index buffer type.");
+ assert(!"_mesa_primitive_restart_index: Invalid index size.");
}
}
_mesa_VertexAttribDivisor(GLuint index, GLuint divisor);
extern unsigned
-_mesa_primitive_restart_index(const struct gl_context *ctx, GLenum ib_type);
+_mesa_primitive_restart_index(const struct gl_context *ctx,
+ unsigned index_size);
extern void GLAPIENTRY
_mesa_BindVertexBuffer(GLuint bindingIndex, GLuint buffer, GLintptr offset,
struct pipe_index_buffer ibuffer;
struct gl_buffer_object *bufobj = ib->obj;
- ibuffer.index_size = vbo_sizeof_ib_type(ib->type);
+ ibuffer.index_size = ib->index_size;
/* get/create the index buffer object */
if (_mesa_is_bufferobj(bufobj)) {
* Set the restart index.
*/
static void
-setup_primitive_restart(struct gl_context *ctx,
- const struct _mesa_index_buffer *ib,
- struct pipe_draw_info *info)
+setup_primitive_restart(struct gl_context *ctx, struct pipe_draw_info *info,
+ unsigned index_size)
{
if (ctx->Array._PrimitiveRestart) {
- info->restart_index = _mesa_primitive_restart_index(ctx, ib->type);
+ info->restart_index =
+ _mesa_primitive_restart_index(ctx, index_size);
/* Enable primitive restart only when the restart index can have an
* effect. This is required for correctness in radeonsi VI support.
* Other hardware may also benefit from taking a faster, non-restart path
* when possible.
*/
- if ((ib->type == GL_UNSIGNED_INT) ||
- (ib->type == GL_UNSIGNED_SHORT && info->restart_index <= 0xffff) ||
- (ib->type == GL_UNSIGNED_BYTE && info->restart_index <= 0xff))
+ if (index_size == 4 || info->restart_index < (1 << (index_size * 8)))
info->primitive_restart = true;
}
}
/* The VBO module handles restart for the non-indexed GLDrawArrays
* so we only set these fields for indexed drawing:
*/
- setup_primitive_restart(ctx, ib, &info);
+ setup_primitive_restart(ctx, &info, ib->index_size);
}
else {
/* Transform feedback drawing is always non-indexed. */
info.indexed = TRUE;
/* Primitive restart is not handled by the VBO module in this case. */
- setup_primitive_restart(ctx, ib, &info);
+ setup_primitive_restart(ctx, &info, ib->index_size);
}
info.mode = translate_prim(ctx, mode);
if (ib) {
struct gl_buffer_object *bufobj = ib->obj;
- ibuffer.index_size = vbo_sizeof_ib_type(ib->type);
+ ibuffer.index_size = ib->index_size;
if (ibuffer.index_size == 0)
goto out_unref_vertex;
bo[*nr_bo] = ib->obj;
(*nr_bo)++;
ptr = ctx->Driver.MapBufferRange(ctx, (GLsizeiptr) ib->ptr,
- ib->count * vbo_sizeof_ib_type(ib->type),
+ ib->count * ib->index_size,
GL_MAP_READ_BIT, ib->obj,
MAP_INTERNAL);
assert(ib->obj->Mappings[MAP_INTERNAL].Pointer);
ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
}
- if (ib->type == GL_UNSIGNED_INT && VB->Primitive[0].basevertex == 0) {
+ if (ib->index_size == 4 && VB->Primitive[0].basevertex == 0) {
VB->Elts = (GLuint *) ptr;
}
else {
GLuint *elts = (GLuint *)get_space(ctx, ib->count * sizeof(GLuint));
VB->Elts = elts;
- if (ib->type == GL_UNSIGNED_INT) {
+ if (ib->index_size == 4) {
const GLuint *in = (GLuint *)ptr;
for (i = 0; i < ib->count; i++)
*elts++ = (GLuint)(*in++) + VB->Primitive[0].basevertex;
}
- else if (ib->type == GL_UNSIGNED_SHORT) {
+ else if (ib->index_size == 2) {
const GLushort *in = (GLushort *)ptr;
for (i = 0; i < ib->count; i++)
*elts++ = (GLuint)(*in++) + VB->Primitive[0].basevertex;
*/
struct _mesa_index_buffer {
GLuint count;
- GLenum type;
+ unsigned index_size;
struct gl_buffer_object *obj;
const void *ptr;
};
vbo_bind_arrays(ctx);
ib.count = count;
- ib.type = type;
+ ib.index_size = vbo_sizeof_ib_type(type);
ib.obj = ctx->Array.VAO->IndexBufferObj;
ib.ptr = indices;
if (!fallback) {
ib.count = (max_index_ptr - min_index_ptr) / index_type_size;
- ib.type = type;
+ ib.index_size = vbo_sizeof_ib_type(type);
ib.obj = ctx->Array.VAO->IndexBufferObj;
ib.ptr = (void *) min_index_ptr;
if (count[i] == 0)
continue;
ib.count = count[i];
- ib.type = type;
+ ib.index_size = vbo_sizeof_ib_type(type);
ib.obj = ctx->Array.VAO->IndexBufferObj;
ib.ptr = indices[i];
vbo_bind_arrays(ctx);
ib.count = 0; /* unknown */
- ib.type = type;
+ ib.index_size = vbo_sizeof_ib_type(type);
ib.obj = ctx->Array.VAO->IndexBufferObj;
ib.ptr = NULL;
/* NOTE: IndexBufferObj is guaranteed to be a VBO. */
ib.count = 0; /* unknown */
- ib.type = type;
+ ib.index_size = vbo_sizeof_ib_type(type);
ib.obj = ctx->Array.VAO->IndexBufferObj;
ib.ptr = NULL;
/* NOTE: IndexBufferObj is guaranteed to be a VBO. */
ib.count = 0; /* unknown */
- ib.type = type;
+ ib.index_size = vbo_sizeof_ib_type(type);
ib.obj = ctx->Array.VAO->IndexBufferObj;
ib.ptr = NULL;
struct minmax_cache_key {
GLintptr offset;
GLuint count;
- GLenum type;
+ unsigned index_size;
};
vbo_minmax_cache_key_equal(const struct minmax_cache_key *a,
const struct minmax_cache_key *b)
{
- return (a->offset == b->offset) && (a->count == b->count) && (a->type == b->type);
+ return (a->offset == b->offset) && (a->count == b->count) &&
+ (a->index_size == b->index_size);
}
static GLboolean
vbo_get_minmax_cached(struct gl_buffer_object *bufferObj,
- GLenum type, GLintptr offset, GLuint count,
+ unsigned index_size, GLintptr offset, GLuint count,
GLuint *min_index, GLuint *max_index)
{
GLboolean found = GL_FALSE;
goto out_invalidate;
}
- key.type = type;
+ key.index_size = index_size;
key.offset = offset;
key.count = count;
hash = vbo_minmax_cache_hash(&key);
static void
vbo_minmax_cache_store(struct gl_context *ctx,
struct gl_buffer_object *bufferObj,
- GLenum type, GLintptr offset, GLuint count,
+ unsigned index_size, GLintptr offset, GLuint count,
GLuint min, GLuint max)
{
struct minmax_cache_entry *entry;
entry->key.offset = offset;
entry->key.count = count;
- entry->key.type = type;
+ entry->key.index_size = index_size;
entry->min = min;
entry->max = max;
hash = vbo_minmax_cache_hash(&entry->key);
const GLuint count)
{
const GLboolean restart = ctx->Array._PrimitiveRestart;
- const GLuint restartIndex = _mesa_primitive_restart_index(ctx, ib->type);
- const int index_size = vbo_sizeof_ib_type(ib->type);
+ const GLuint restartIndex =
+ _mesa_primitive_restart_index(ctx, ib->index_size);
const char *indices;
GLuint i;
- indices = (char *) ib->ptr + prim->start * index_size;
+ indices = (char *) ib->ptr + prim->start * ib->index_size;
if (_mesa_is_bufferobj(ib->obj)) {
- GLsizeiptr size = MIN2(count * index_size, ib->obj->Size);
+ GLsizeiptr size = MIN2(count * ib->index_size, ib->obj->Size);
- if (vbo_get_minmax_cached(ib->obj, ib->type, (GLintptr) indices, count,
- min_index, max_index))
+ if (vbo_get_minmax_cached(ib->obj, ib->index_size, (GLintptr) indices,
+ count, min_index, max_index))
return;
indices = ctx->Driver.MapBufferRange(ctx, (GLintptr) indices, size,
MAP_INTERNAL);
}
- switch (ib->type) {
- case GL_UNSIGNED_INT: {
+ switch (ib->index_size) {
+ case 4: {
const GLuint *ui_indices = (const GLuint *)indices;
GLuint max_ui = 0;
GLuint min_ui = ~0U;
*max_index = max_ui;
break;
}
- case GL_UNSIGNED_SHORT: {
+ case 2: {
const GLushort *us_indices = (const GLushort *)indices;
GLuint max_us = 0;
GLuint min_us = ~0U;
*max_index = max_us;
break;
}
- case GL_UNSIGNED_BYTE: {
+ case 1: {
const GLubyte *ub_indices = (const GLubyte *)indices;
GLuint max_ub = 0;
GLuint min_ub = ~0U;
}
if (_mesa_is_bufferobj(ib->obj)) {
- vbo_minmax_cache_store(ctx, ib->obj, ib->type, prim->start, count,
+ vbo_minmax_cache_store(ctx, ib->obj, ib->index_size, prim->start, count,
*min_index, *max_index);
ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
}
GLuint sub_prim_num;
GLuint end_index;
GLuint sub_end_index;
- GLuint restart_index = _mesa_primitive_restart_index(ctx, ib->type);
+ GLuint restart_index = _mesa_primitive_restart_index(ctx, ib->index_size);
struct _mesa_prim temp_prim;
struct vbo_context *vbo = vbo_context(ctx);
vbo_draw_func draw_prims_func = vbo->draw_prims;
ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
- sub_prims = find_sub_primitives(ptr, vbo_sizeof_ib_type(ib->type),
+ sub_prims = find_sub_primitives(ptr, ib->index_size,
0, ib->count, restart_index,
&num_sub_prims);
/* Some users might prefer it if we translated elements to
* GLuints here. Others wouldn't...
*/
- switch (ib->type) {
- case GL_UNSIGNED_INT:
+ switch (ib->index_size) {
+ case 4:
tmp_indices = rebase_GLuint( ptr, ib->count, min_index );
break;
- case GL_UNSIGNED_SHORT:
+ case 2:
tmp_indices = rebase_GLushort( ptr, ib->count, min_index );
break;
- case GL_UNSIGNED_BYTE:
+ case 1:
tmp_indices = rebase_GLubyte( ptr, ib->count, min_index );
break;
}
tmp_ib.obj = ctx->Shared->NullBufferObj;
tmp_ib.ptr = tmp_indices;
tmp_ib.count = ib->count;
- tmp_ib.type = ib->type;
+ tmp_ib.index_size = ib->index_size;
ib = &tmp_ib;
}
ADD_POINTERS(copy->ib->obj->Mappings[MAP_INTERNAL].Pointer,
copy->ib->ptr);
- switch (copy->ib->type) {
- case GL_UNSIGNED_BYTE:
+ switch (copy->ib->index_size) {
+ case 1:
copy->translated_elt_buf = malloc(sizeof(GLuint) * copy->ib->count);
copy->srcelt = copy->translated_elt_buf;
copy->translated_elt_buf[i] = ((const GLubyte *)srcptr)[i];
break;
- case GL_UNSIGNED_SHORT:
+ case 2:
copy->translated_elt_buf = malloc(sizeof(GLuint) * copy->ib->count);
copy->srcelt = copy->translated_elt_buf;
copy->translated_elt_buf[i] = ((const GLushort *)srcptr)[i];
break;
- case GL_UNSIGNED_INT:
+ case 4:
copy->translated_elt_buf = NULL;
copy->srcelt = (const GLuint *)srcptr;
break;
* list:
*/
copy->dstib.count = 0; /* duplicates dstelt_nr */
- copy->dstib.type = GL_UNSIGNED_INT;
+ copy->dstib.index_size = 4;
copy->dstib.obj = ctx->Shared->NullBufferObj;
copy->dstib.ptr = copy->dstelt;
}
ib.count = split->max_index - split->min_index + 1;
ib.ptr = (const void *)((const char *)ib.ptr +
- split->min_index * _mesa_sizeof_type(ib.type));
+ split->min_index * ib.index_size);
/* Rebase the primitives to save index buffer entries. */
for (i = 0; i < split->dstprim_nr; i++)
elts[j] = prim->start + j;
ib.count = count;
- ib.type = GL_UNSIGNED_INT;
+ ib.index_size = 4;
ib.obj = split->ctx->Shared->NullBufferObj;
ib.ptr = elts;