case GL_UNSIGNED_INT: return uint_types_scale[size];
case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
+ /* This produces GL_FIXED inputs as values between INT32_MIN and
+ * INT32_MAX, which will be scaled down by 1/65536 by the VS.
+ */
+ case GL_FIXED: return int_types_scale[size];
default: assert(0); return 0;
}
}
case GL_UNSIGNED_INT: return sizeof(GLuint);
case GL_UNSIGNED_SHORT: return sizeof(GLushort);
case GL_UNSIGNED_BYTE: return sizeof(GLubyte);
+ case GL_FIXED: return sizeof(GLuint);
default: assert(0); return 0;
}
}
{
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
+ /* CACHE_NEW_VS_PROG */
GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
const unsigned char *ptr = NULL;
GLuint interleaved = 0, total_size = 0;
/* Accumulate the list of enabled arrays. */
brw->vb.nr_enabled = 0;
while (vs_inputs) {
- GLuint i = _mesa_ffsll(vs_inputs) - 1;
+ GLuint i = ffs(vs_inputs) - 1;
struct brw_vertex_element *input = &brw->vb.inputs[i];
vs_inputs &= ~(1 << i);
break;
d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset;
+ if (d < 0)
+ break;
if (i == 0)
delta = d / brw->vb.current_buffers[i].stride;
if (delta * brw->vb.current_buffers[i].stride != d)
*/
if (brw->vb.nr_enabled == 0) {
BEGIN_BATCH(3);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
+ OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
if (intel->gen >= 6) {
OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID |
if (brw->vb.nr_buffers) {
BEGIN_BATCH(1 + 4*brw->vb.nr_buffers);
- OUT_BATCH((CMD_VERTEX_BUFFER << 16) | (4*brw->vb.nr_buffers - 1));
+ OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4*brw->vb.nr_buffers - 1));
for (i = 0; i < brw->vb.nr_buffers; i++) {
struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
uint32_t dw0;
dw0 = BRW_VB0_ACCESS_VERTEXDATA | (i << BRW_VB0_INDEX_SHIFT);
}
+ if (intel->gen >= 7)
+ dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
+
OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT));
OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
if (intel->gen >= 5) {
OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1);
} else
- OUT_BATCH(buffer->bo->size / buffer->stride);
+ OUT_BATCH(0);
OUT_BATCH(0); /* Instance data step rate */
brw->vb.current_buffers[i].handle = buffer->bo->handle;
}
BEGIN_BATCH(1 + brw->vb.nr_enabled * 2);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | (2*brw->vb.nr_enabled - 1));
+ OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2*brw->vb.nr_enabled - 1));
for (i = 0; i < brw->vb.nr_enabled; i++) {
struct brw_vertex_element *input = brw->vb.enabled[i];
uint32_t format = get_surface_type(input->glarray->Type,
.dirty = {
.mesa = 0,
.brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
- .cache = 0,
+ .cache = CACHE_NEW_VS_PROG,
},
.prepare = brw_prepare_vertices,
.emit = brw_emit_vertices,
* rebase it into a temporary.
*/
if ((get_size(index_buffer->type) - 1) & offset) {
- GLubyte *map = ctx->Driver.MapBuffer(ctx,
- GL_ELEMENT_ARRAY_BUFFER_ARB,
- GL_DYNAMIC_DRAW_ARB,
- bufferobj);
- map += offset;
+ GLubyte *map = ctx->Driver.MapBufferRange(ctx,
+ offset,
+ ib_size,
+ GL_MAP_WRITE_BIT,
+ bufferobj);
intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
&bo, &offset);
brw->ib.start_vertex_offset = offset / ib_type_size;
- ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, bufferobj);
} else {
/* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
* the index buffer state when we're just moving the start index