case GL_UNSIGNED_INT: return uint_types_scale[size];
case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
+ /* This produces GL_FIXED inputs as values between INT32_MIN and
+ * INT32_MAX, which will be scaled down by 1/65536 by the VS.
+ */
+ case GL_FIXED: return int_types_scale[size];
default: assert(0); return 0;
- }
+ }
}
}
case GL_UNSIGNED_INT: return sizeof(GLuint);
case GL_UNSIGNED_SHORT: return sizeof(GLushort);
case GL_UNSIGNED_BYTE: return sizeof(GLubyte);
- default: return 0;
- }
+ case GL_FIXED: return sizeof(GLuint);
+ default: assert(0); return 0;
+ }
}
-static GLuint get_index_type(GLenum type)
+static GLuint get_index_type(GLenum type)
{
switch (type) {
case GL_UNSIGNED_BYTE: return BRW_INDEX_BYTE;
{
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
+ /* CACHE_NEW_VS_PROG */
GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
const unsigned char *ptr = NULL;
GLuint interleaved = 0, total_size = 0;
/* Accumulate the list of enabled arrays. */
brw->vb.nr_enabled = 0;
while (vs_inputs) {
- GLuint i = _mesa_ffsll(vs_inputs) - 1;
+ GLuint i = ffs(vs_inputs) - 1;
struct brw_vertex_element *input = &brw->vb.inputs[i];
vs_inputs &= ~(1 << i);
- brw->vb.enabled[brw->vb.nr_enabled++] = input;
+ if (input->glarray->Size && get_size(input->glarray->Type))
+ brw->vb.enabled[brw->vb.nr_enabled++] = input;
}
if (brw->vb.nr_enabled == 0)
struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
/* Named buffer object: Just reference its contents directly. */
- buffer->bo = intel_bufferobj_source(intel, intel_buffer,
+ buffer->bo = intel_bufferobj_source(intel,
+ intel_buffer, type_size,
&buffer->offset);
drm_intel_bo_reference(buffer->bo);
buffer->offset += (uintptr_t)glarray->Ptr;
}
/* can we simply extend the current vb? */
- if (0 && j == brw->vb.nr_current_buffers) {
+ if (j == brw->vb.nr_current_buffers) {
int delta = 0;
for (i = 0; i < j; i++) {
int d;
break;
d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset;
- if (delta == 0)
+ if (d < 0)
+ break;
+ if (i == 0)
delta = d / brw->vb.current_buffers[i].stride;
if (delta * brw->vb.current_buffers[i].stride != d)
break;
*/
if (brw->vb.nr_enabled == 0) {
BEGIN_BATCH(3);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
+ OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
if (intel->gen >= 6) {
OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID |
if (brw->vb.nr_buffers) {
BEGIN_BATCH(1 + 4*brw->vb.nr_buffers);
- OUT_BATCH((CMD_VERTEX_BUFFER << 16) | (4*brw->vb.nr_buffers - 1));
+ OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4*brw->vb.nr_buffers - 1));
for (i = 0; i < brw->vb.nr_buffers; i++) {
struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
uint32_t dw0;
dw0 = BRW_VB0_ACCESS_VERTEXDATA | (i << BRW_VB0_INDEX_SHIFT);
}
+ if (intel->gen >= 7)
+ dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
+
OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT));
OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
if (intel->gen >= 5) {
OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1);
} else
- OUT_BATCH(buffer->bo->size / buffer->stride);
+ OUT_BATCH(0);
OUT_BATCH(0); /* Instance data step rate */
brw->vb.current_buffers[i].handle = buffer->bo->handle;
}
BEGIN_BATCH(1 + brw->vb.nr_enabled * 2);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | (2*brw->vb.nr_enabled - 1));
+ OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2*brw->vb.nr_enabled - 1));
for (i = 0; i < brw->vb.nr_enabled; i++) {
struct brw_vertex_element *input = brw->vb.enabled[i];
uint32_t format = get_surface_type(input->glarray->Type,
.dirty = {
.mesa = 0,
.brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
- .cache = 0,
+ .cache = CACHE_NEW_VS_PROG,
},
.prepare = brw_prepare_vertices,
.emit = brw_emit_vertices,
intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
&bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
- offset = 0;
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
&bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
- offset = 0;
ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
} else {
*/
brw->ib.start_vertex_offset = offset / ib_type_size;
- bo = intel_bufferobj_source(intel, intel_buffer_object(bufferobj),
+ bo = intel_bufferobj_source(intel,
+ intel_buffer_object(bufferobj),
+ ib_type_size,
&offset);
drm_intel_bo_reference(bo);
+
+ brw->ib.start_vertex_offset += offset / ib_type_size;
}
}
- if (brw->ib.bo != bo || brw->ib.offset != offset) {
+ if (brw->ib.bo != bo) {
drm_intel_bo_unreference(brw->ib.bo);
brw->ib.bo = bo;
- brw->ib.offset = offset;
brw_add_validated_bo(brw, brw->ib.bo);
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
} else {
drm_intel_bo_unreference(bo);
}
+
+ if (index_buffer->type != brw->ib.type) {
+ brw->ib.type = index_buffer->type;
+ brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
+ }
}
const struct brw_tracked_state brw_indices = {
1);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
- brw->ib.offset);
+ 0);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
brw->ib.bo->size - 1);