*
**************************************************************************/
+#undef NDEBUG
#include "main/glheader.h"
#include "main/bufferobj.h"
#include "main/context.h"
#include "main/enums.h"
+#include "main/macros.h"
#include "brw_draw.h"
#include "brw_defines.h"
BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
};
+static GLuint uint_types_direct[5] = {
+ 0,
+ BRW_SURFACEFORMAT_R32_UINT,
+ BRW_SURFACEFORMAT_R32G32_UINT,
+ BRW_SURFACEFORMAT_R32G32B32_UINT,
+ BRW_SURFACEFORMAT_R32G32B32A32_UINT
+};
+
static GLuint uint_types_norm[5] = {
0,
BRW_SURFACEFORMAT_R32_UNORM,
BRW_SURFACEFORMAT_R32G32B32A32_USCALED
};
+static GLuint int_types_direct[5] = {
+ 0,
+ BRW_SURFACEFORMAT_R32_SINT,
+ BRW_SURFACEFORMAT_R32G32_SINT,
+ BRW_SURFACEFORMAT_R32G32B32_SINT,
+ BRW_SURFACEFORMAT_R32G32B32A32_SINT
+};
+
static GLuint int_types_norm[5] = {
0,
BRW_SURFACEFORMAT_R32_SNORM,
BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
};
+static GLuint ushort_types_direct[5] = {
+ 0,
+ BRW_SURFACEFORMAT_R16_UINT,
+ BRW_SURFACEFORMAT_R16G16_UINT,
+ BRW_SURFACEFORMAT_R16G16B16A16_UINT,
+ BRW_SURFACEFORMAT_R16G16B16A16_UINT
+};
+
static GLuint ushort_types_norm[5] = {
0,
BRW_SURFACEFORMAT_R16_UNORM,
BRW_SURFACEFORMAT_R16G16B16A16_USCALED
};
+static GLuint short_types_direct[5] = {
+ 0,
+ BRW_SURFACEFORMAT_R16_SINT,
+ BRW_SURFACEFORMAT_R16G16_SINT,
+ BRW_SURFACEFORMAT_R16G16B16A16_SINT,
+ BRW_SURFACEFORMAT_R16G16B16A16_SINT
+};
+
static GLuint short_types_norm[5] = {
0,
BRW_SURFACEFORMAT_R16_SNORM,
BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
};
+static GLuint ubyte_types_direct[5] = {
+ 0,
+ BRW_SURFACEFORMAT_R8_UINT,
+ BRW_SURFACEFORMAT_R8G8_UINT,
+ BRW_SURFACEFORMAT_R8G8B8A8_UINT,
+ BRW_SURFACEFORMAT_R8G8B8A8_UINT
+};
+
static GLuint ubyte_types_norm[5] = {
0,
BRW_SURFACEFORMAT_R8_UNORM,
BRW_SURFACEFORMAT_R8G8B8A8_USCALED
};
+static GLuint byte_types_direct[5] = {
+ 0,
+ BRW_SURFACEFORMAT_R8_SINT,
+ BRW_SURFACEFORMAT_R8G8_SINT,
+ BRW_SURFACEFORMAT_R8G8B8A8_SINT,
+ BRW_SURFACEFORMAT_R8G8B8A8_SINT
+};
+
static GLuint byte_types_norm[5] = {
0,
BRW_SURFACEFORMAT_R8_SNORM,
* Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
*/
static GLuint get_surface_type( GLenum type, GLuint size,
- GLenum format, GLboolean normalized )
+ GLenum format, bool normalized, bool integer )
{
if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
printf("type %s size %d normalized %d\n",
_mesa_lookup_enum_by_nr(type), size, normalized);
- if (normalized) {
+ if (integer) {
+ assert(format == GL_RGBA); /* sanity check */
+ switch (type) {
+ case GL_INT: return int_types_direct[size];
+ case GL_SHORT: return short_types_direct[size];
+ case GL_BYTE: return byte_types_direct[size];
+ case GL_UNSIGNED_INT: return uint_types_direct[size];
+ case GL_UNSIGNED_SHORT: return ushort_types_direct[size];
+ case GL_UNSIGNED_BYTE: return ubyte_types_direct[size];
+ default: assert(0); return 0;
+ }
+ } else if (normalized) {
switch (type) {
case GL_DOUBLE: return double_types[size];
case GL_FLOAT: return float_types[size];
case GL_UNSIGNED_INT: return uint_types_scale[size];
case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
+ /* This produces GL_FIXED inputs as values between INT32_MIN and
+ * INT32_MAX, which will be scaled down by 1/65536 by the VS.
+ */
+ case GL_FIXED: return int_types_scale[size];
default: assert(0); return 0;
- }
+ }
}
}
case GL_UNSIGNED_INT: return sizeof(GLuint);
case GL_UNSIGNED_SHORT: return sizeof(GLushort);
case GL_UNSIGNED_BYTE: return sizeof(GLubyte);
- default: return 0;
- }
+ case GL_FIXED: return sizeof(GLuint);
+ default: assert(0); return 0;
+ }
}
-static GLuint get_index_type(GLenum type)
+static GLuint get_index_type(GLenum type)
{
switch (type) {
case GL_UNSIGNED_BYTE: return BRW_INDEX_BYTE;
}
}
-static void wrap_buffers( struct brw_context *brw,
- GLuint size )
+static void
+copy_array_to_vbo_array(struct brw_context *brw,
+ struct brw_vertex_element *element,
+ int min, int max,
+ struct brw_vertex_buffer *buffer,
+ GLuint dst_stride)
{
- if (size < BRW_UPLOAD_INIT_SIZE)
- size = BRW_UPLOAD_INIT_SIZE;
-
- brw->vb.upload.offset = 0;
-
- if (brw->vb.upload.bo != NULL)
- drm_intel_bo_unreference(brw->vb.upload.bo);
- brw->vb.upload.bo = drm_intel_bo_alloc(brw->intel.bufmgr, "temporary VBO",
- size, 1);
-}
+ if (min == -1) {
+ /* If we don't have computed min/max bounds, then this must be a use of
+ * the current attribute, which has a 0 stride. Otherwise, we wouldn't
+ * know what data to upload.
+ */
+ assert(element->glarray->StrideB == 0);
-static void get_space( struct brw_context *brw,
- GLuint size,
- drm_intel_bo **bo_return,
- GLuint *offset_return )
-{
- size = ALIGN(size, 64);
+ intel_upload_data(&brw->intel, element->glarray->Ptr,
+ element->element_size,
+ element->element_size,
+ &buffer->bo, &buffer->offset);
- if (brw->vb.upload.bo == NULL ||
- brw->vb.upload.offset + size > brw->vb.upload.bo->size) {
- wrap_buffers(brw, size);
+ buffer->stride = 0;
+ return;
}
- assert(*bo_return == NULL);
- drm_intel_bo_reference(brw->vb.upload.bo);
- *bo_return = brw->vb.upload.bo;
- *offset_return = brw->vb.upload.offset;
- brw->vb.upload.offset += size;
-}
-
-static void
-copy_array_to_vbo_array( struct brw_context *brw,
- struct brw_vertex_element *element,
- GLuint dst_stride)
-{
- GLuint size = element->count * dst_stride;
+ int src_stride = element->glarray->StrideB;
+ const unsigned char *src = element->glarray->Ptr + min * src_stride;
+ int count = max - min + 1;
+ GLuint size = count * dst_stride;
- get_space(brw, size, &element->bo, &element->offset);
-
- if (element->glarray->StrideB == 0) {
- assert(element->count == 1);
- element->stride = 0;
+ if (dst_stride == src_stride) {
+ intel_upload_data(&brw->intel, src, size, dst_stride,
+ &buffer->bo, &buffer->offset);
} else {
- element->stride = dst_stride;
- }
+ char * const map = intel_upload_map(&brw->intel, size, dst_stride);
+ char *dst = map;
- if (dst_stride == element->glarray->StrideB) {
- drm_intel_gem_bo_map_gtt(element->bo);
- memcpy((char *)element->bo->virtual + element->offset,
- element->glarray->Ptr, size);
- drm_intel_gem_bo_unmap_gtt(element->bo);
- } else {
- char *dest;
- const unsigned char *src = element->glarray->Ptr;
- int i;
-
- drm_intel_gem_bo_map_gtt(element->bo);
- dest = element->bo->virtual;
- dest += element->offset;
-
- for (i = 0; i < element->count; i++) {
- memcpy(dest, src, dst_stride);
- src += element->glarray->StrideB;
- dest += dst_stride;
+ while (count--) {
+ memcpy(dst, src, dst_stride);
+ src += src_stride;
+ dst += dst_stride;
}
-
- drm_intel_gem_bo_unmap_gtt(element->bo);
+ intel_upload_unmap(&brw->intel, map, size, dst_stride,
+ &buffer->bo, &buffer->offset);
}
+ buffer->stride = dst_stride;
}
static void brw_prepare_vertices(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
- GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
- GLuint i;
+ /* CACHE_NEW_VS_PROG */
+ GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read;
const unsigned char *ptr = NULL;
- GLuint interleave = 0;
+ GLuint interleaved = 0, total_size = 0;
unsigned int min_index = brw->vb.min_index;
unsigned int max_index = brw->vb.max_index;
+ int delta, i, j;
struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
GLuint nr_uploads = 0;
/* Accumulate the list of enabled arrays. */
brw->vb.nr_enabled = 0;
while (vs_inputs) {
- GLuint i = _mesa_ffsll(vs_inputs) - 1;
+ GLuint i = ffsll(vs_inputs) - 1;
struct brw_vertex_element *input = &brw->vb.inputs[i];
- vs_inputs &= ~(1 << i);
- brw->vb.enabled[brw->vb.nr_enabled++] = input;
+ vs_inputs &= ~BITFIELD64_BIT(i);
+ if (input->glarray->Size && get_size(input->glarray->Type))
+ brw->vb.enabled[brw->vb.nr_enabled++] = input;
}
- /* XXX: In the rare cases where this happens we fallback all
- * the way to software rasterization, although a tnl fallback
- * would be sufficient. I don't know of *any* real world
- * cases with > 17 vertex attributes enabled, so it probably
- * isn't an issue at this point.
- */
- if (brw->vb.nr_enabled >= BRW_VEP_MAX) {
- intel->Fallback = GL_TRUE; /* boolean, not bitfield */
+ if (brw->vb.nr_enabled == 0)
return;
- }
- for (i = 0; i < brw->vb.nr_enabled; i++) {
+ if (brw->vb.nr_buffers)
+ goto prepare;
+
+ for (i = j = 0; i < brw->vb.nr_enabled; i++) {
struct brw_vertex_element *input = brw->vb.enabled[i];
+ const struct gl_client_array *glarray = input->glarray;
+ int type_size = get_size(glarray->Type);
- input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
+ input->element_size = type_size * glarray->Size;
- if (_mesa_is_bufferobj(input->glarray->BufferObj)) {
+ if (_mesa_is_bufferobj(glarray->BufferObj)) {
struct intel_buffer_object *intel_buffer =
- intel_buffer_object(input->glarray->BufferObj);
- GLuint offset;
-
- /* Named buffer object: Just reference its contents directly. */
- drm_intel_bo_unreference(input->bo);
- input->bo = intel_bufferobj_source(intel, intel_buffer, &offset);
- drm_intel_bo_reference(input->bo);
- input->offset = offset + (unsigned long)input->glarray->Ptr;
- input->stride = input->glarray->StrideB;
- input->count = input->glarray->_MaxElement;
+ intel_buffer_object(glarray->BufferObj);
+ int k;
+
+ for (k = 0; k < i; k++) {
+ const struct gl_client_array *other = brw->vb.enabled[k]->glarray;
+ if (glarray->BufferObj == other->BufferObj &&
+ glarray->StrideB == other->StrideB &&
+ (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB)
+ {
+ input->buffer = brw->vb.enabled[k]->buffer;
+ input->offset = glarray->Ptr - other->Ptr;
+ break;
+ }
+ }
+ if (k == i) {
+ struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
+
+ /* Named buffer object: Just reference its contents directly. */
+ buffer->bo = intel_bufferobj_source(intel,
+ intel_buffer, type_size,
+ &buffer->offset);
+ drm_intel_bo_reference(buffer->bo);
+ buffer->offset += (uintptr_t)glarray->Ptr;
+ buffer->stride = glarray->StrideB;
+
+ input->buffer = j++;
+ input->offset = 0;
+ }
/* This is a common place to reach if the user mistakenly supplies
* a pointer in place of a VBO offset. If we just let it go through,
* probably a service to the poor programmer to do so rather than
* trying to just not render.
*/
- assert(input->offset < input->bo->size);
+ assert(input->offset < brw->vb.buffers[input->buffer].bo->size);
} else {
- input->count = input->glarray->StrideB ? max_index + 1 : 1;
- if (input->bo != NULL) {
- /* Already-uploaded vertex data is present from a previous
- * prepare_vertices, but we had to re-validate state due to
- * check_aperture failing and a new batch being produced.
- */
- continue;
- }
-
/* Queue the buffer object up to be uploaded in the next pass,
* when we've decided if we're doing interleaved or not.
*/
- if (input->attrib == VERT_ATTRIB_POS) {
+ if (nr_uploads == 0) {
/* Position array not properly enabled:
*/
- if (input->glarray->StrideB == 0) {
- intel->Fallback = GL_TRUE; /* boolean, not bitfield */
+ if (input->attrib == VERT_ATTRIB_POS && glarray->StrideB == 0) {
+ intel->Fallback = true; /* boolean, not bitfield */
return;
}
- interleave = input->glarray->StrideB;
- ptr = input->glarray->Ptr;
+ interleaved = glarray->StrideB;
+ ptr = glarray->Ptr;
+ }
+ else if (interleaved != glarray->StrideB ||
+ (uintptr_t)(glarray->Ptr - ptr) > interleaved)
+ {
+ interleaved = 0;
}
- else if (interleave != input->glarray->StrideB ||
- (const unsigned char *)input->glarray->Ptr - ptr < 0 ||
- (const unsigned char *)input->glarray->Ptr - ptr > interleave)
+ else if ((uintptr_t)(glarray->Ptr - ptr) & (type_size -1))
{
- interleave = 0;
+ /* enforce natural alignment (for doubles) */
+ interleaved = 0;
}
upload[nr_uploads++] = input;
+ total_size = ALIGN(total_size, type_size);
+ total_size += input->element_size;
}
}
+ /* If we need to upload all the arrays, then we can trim those arrays to
+ * only the used elements [min_index, max_index] so long as we adjust all
+ * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
+ */
+ brw->vb.start_vertex_bias = 0;
+ delta = min_index;
+ if (nr_uploads == brw->vb.nr_enabled) {
+ brw->vb.start_vertex_bias = -delta;
+ delta = 0;
+ }
+ if (delta && !brw->intel.intelScreen->relaxed_relocations)
+ min_index = delta = 0;
+
/* Handle any arrays to be uploaded. */
- if (nr_uploads > 1 && interleave && interleave <= 256) {
- /* All uploads are interleaved, so upload the arrays together as
- * interleaved. First, upload the contents and set up upload[0].
- */
- copy_array_to_vbo_array(brw, upload[0], interleave);
-
- for (i = 1; i < nr_uploads; i++) {
- /* Then, just point upload[i] at upload[0]'s buffer. */
- upload[i]->stride = interleave;
- upload[i]->offset = upload[0]->offset +
- ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
- upload[i]->bo = upload[0]->bo;
- drm_intel_bo_reference(upload[i]->bo);
+ if (nr_uploads > 1) {
+ if (interleaved && interleaved <= 2*total_size) {
+ struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
+ /* All uploads are interleaved, so upload the arrays together as
+ * interleaved. First, upload the contents and set up upload[0].
+ */
+ copy_array_to_vbo_array(brw, upload[0], min_index, max_index,
+ buffer, interleaved);
+ buffer->offset -= delta * interleaved;
+
+ for (i = 0; i < nr_uploads; i++) {
+ /* Then, just point upload[i] at upload[0]'s buffer. */
+ upload[i]->offset =
+ ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
+ upload[i]->buffer = j;
+ }
+ j++;
+
+ nr_uploads = 0;
}
- }
- else {
- /* Upload non-interleaved arrays */
- for (i = 0; i < nr_uploads; i++) {
- copy_array_to_vbo_array(brw, upload[i], upload[i]->element_size);
+ else if (total_size < 2048) {
+ /* Upload non-interleaved arrays into a single interleaved array */
+ struct brw_vertex_buffer *buffer;
+ int count = MAX2(max_index - min_index + 1, 1);
+ int offset;
+ char *map;
+
+ map = intel_upload_map(&brw->intel, total_size * count, total_size);
+ for (i = offset = 0; i < nr_uploads; i++) {
+ const unsigned char *src = upload[i]->glarray->Ptr;
+ int size = upload[i]->element_size;
+ int stride = upload[i]->glarray->StrideB;
+ char *dst;
+ int n;
+
+ offset = ALIGN(offset, get_size(upload[i]->glarray->Type));
+ dst = map + offset;
+ src += min_index * stride;
+
+ for (n = 0; n < count; n++) {
+ memcpy(dst, src, size);
+ src += stride;
+ dst += total_size;
+ }
+
+ upload[i]->offset = offset;
+ upload[i]->buffer = j;
+
+ offset += size;
+ }
+ assert(offset == total_size);
+ buffer = &brw->vb.buffers[j++];
+ intel_upload_unmap(&brw->intel, map, offset * count, offset,
+ &buffer->bo, &buffer->offset);
+ buffer->stride = offset;
+ buffer->offset -= delta * offset;
+
+ nr_uploads = 0;
}
}
+ /* Upload non-interleaved arrays */
+ for (i = 0; i < nr_uploads; i++) {
+ struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
+ copy_array_to_vbo_array(brw, upload[i], min_index, max_index,
+ buffer, upload[i]->element_size);
+ buffer->offset -= delta * buffer->stride;
+ upload[i]->buffer = j++;
+ upload[i]->offset = 0;
+ }
- brw_prepare_query_begin(brw);
-
- for (i = 0; i < brw->vb.nr_enabled; i++) {
- struct brw_vertex_element *input = brw->vb.enabled[i];
+ /* can we simply extend the current vb? */
+ if (j == brw->vb.nr_current_buffers) {
+ int delta = 0;
+ for (i = 0; i < j; i++) {
+ int d;
+
+ if (brw->vb.current_buffers[i].handle != brw->vb.buffers[i].bo->handle ||
+ brw->vb.current_buffers[i].stride != brw->vb.buffers[i].stride)
+ break;
+
+ d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset;
+ if (d < 0)
+ break;
+ if (i == 0)
+ delta = d / brw->vb.current_buffers[i].stride;
+ if (delta * brw->vb.current_buffers[i].stride != d)
+ break;
+ }
- brw_add_validated_bo(brw, input->bo);
+ if (i == j) {
+ brw->vb.start_vertex_bias += delta;
+ while (--j >= 0)
+ drm_intel_bo_unreference(brw->vb.buffers[j].bo);
+ j = 0;
+ }
}
+
+ brw->vb.nr_buffers = j;
+
+prepare:
+ brw_prepare_query_begin(brw);
}
static void brw_emit_vertices(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
- GLuint i;
+ GLuint i, nr_elements;
+
+ brw_prepare_vertices(brw);
brw_emit_query_begin(brw);
*/
if (brw->vb.nr_enabled == 0) {
BEGIN_BATCH(3);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
+ OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
if (intel->gen >= 6) {
OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID |
(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
(BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
- ADVANCE_BATCH();
+ CACHED_BATCH();
return;
}
/* Now emit VB and VEP state packets.
- *
- * This still defines a hardware VB for each input, even if they
- * are interleaved or from the same VBO. TBD if this makes a
- * performance difference.
*/
- BEGIN_BATCH(1 + brw->vb.nr_enabled * 4);
- OUT_BATCH((CMD_VERTEX_BUFFER << 16) |
- ((1 + brw->vb.nr_enabled * 4) - 2));
-
- for (i = 0; i < brw->vb.nr_enabled; i++) {
- struct brw_vertex_element *input = brw->vb.enabled[i];
- uint32_t dw0;
+ if (brw->vb.nr_buffers) {
if (intel->gen >= 6) {
- dw0 = GEN6_VB0_ACCESS_VERTEXDATA |
- (i << GEN6_VB0_INDEX_SHIFT);
+ assert(brw->vb.nr_buffers <= 33);
} else {
- dw0 = BRW_VB0_ACCESS_VERTEXDATA |
- (i << BRW_VB0_INDEX_SHIFT);
+ assert(brw->vb.nr_buffers <= 17);
+ }
+
+ BEGIN_BATCH(1 + 4*brw->vb.nr_buffers);
+ OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4*brw->vb.nr_buffers - 1));
+ for (i = 0; i < brw->vb.nr_buffers; i++) {
+ struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
+ uint32_t dw0;
+
+ if (intel->gen >= 6) {
+ dw0 = GEN6_VB0_ACCESS_VERTEXDATA | (i << GEN6_VB0_INDEX_SHIFT);
+ } else {
+ dw0 = BRW_VB0_ACCESS_VERTEXDATA | (i << BRW_VB0_INDEX_SHIFT);
+ }
+
+ if (intel->gen >= 7)
+ dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
+
+ OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT));
+ OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
+ if (intel->gen >= 5) {
+ OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1);
+ } else
+ OUT_BATCH(0);
+ OUT_BATCH(0); /* Instance data step rate */
+
+ brw->vb.current_buffers[i].handle = buffer->bo->handle;
+ brw->vb.current_buffers[i].offset = buffer->offset;
+ brw->vb.current_buffers[i].stride = buffer->stride;
}
+ brw->vb.nr_current_buffers = i;
+ ADVANCE_BATCH();
+ }
- OUT_BATCH(dw0 |
- (input->stride << BRW_VB0_PITCH_SHIFT));
- OUT_RELOC(input->bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- input->offset);
- if (intel->gen >= 5) {
- OUT_RELOC(input->bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- input->bo->size - 1);
- } else
- OUT_BATCH(input->stride ? input->count : 0);
- OUT_BATCH(0); /* Instance data step rate */
+ nr_elements = brw->vb.nr_enabled + brw->vs.prog_data->uses_vertexid;
+
+ /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
+ * for VertexID/InstanceID.
+ */
+ if (intel->gen >= 6) {
+ assert(nr_elements <= 34);
+ } else {
+ assert(nr_elements <= 18);
}
- ADVANCE_BATCH();
- BEGIN_BATCH(1 + brw->vb.nr_enabled * 2);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + brw->vb.nr_enabled * 2) - 2));
+ BEGIN_BATCH(1 + nr_elements * 2);
+ OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
for (i = 0; i < brw->vb.nr_enabled; i++) {
struct brw_vertex_element *input = brw->vb.enabled[i];
uint32_t format = get_surface_type(input->glarray->Type,
input->glarray->Size,
input->glarray->Format,
- input->glarray->Normalized);
+ input->glarray->Normalized,
+ input->glarray->Integer);
uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
- case 3: comp3 = BRW_VE1_COMPONENT_STORE_1_FLT;
+ case 3: comp3 = input->glarray->Integer ? BRW_VE1_COMPONENT_STORE_1_INT
+ : BRW_VE1_COMPONENT_STORE_1_FLT;
break;
}
if (intel->gen >= 6) {
- OUT_BATCH((i << GEN6_VE0_INDEX_SHIFT) |
+ OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID |
(format << BRW_VE0_FORMAT_SHIFT) |
- (0 << BRW_VE0_SRC_OFFSET_SHIFT));
+ (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
} else {
- OUT_BATCH((i << BRW_VE0_INDEX_SHIFT) |
+ OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
BRW_VE0_VALID |
(format << BRW_VE0_FORMAT_SHIFT) |
- (0 << BRW_VE0_SRC_OFFSET_SHIFT));
+ (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
}
if (intel->gen >= 5)
(comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
}
- ADVANCE_BATCH();
+
+ if (brw->vs.prog_data->uses_vertexid) {
+ uint32_t dw0 = 0, dw1 = 0;
+
+ dw1 = ((BRW_VE1_COMPONENT_STORE_VID << BRW_VE1_COMPONENT_0_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_IID << BRW_VE1_COMPONENT_1_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_PID << BRW_VE1_COMPONENT_2_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
+
+ if (intel->gen >= 6) {
+ dw0 |= GEN6_VE0_VALID;
+ } else {
+ dw0 |= BRW_VE0_VALID;
+ dw1 |= (i * 4) << BRW_VE1_DST_OFFSET_SHIFT;
+ }
+
+ /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
+ * the format is ignored and the value is always int.
+ */
+
+ OUT_BATCH(dw0);
+ OUT_BATCH(dw1);
+ }
+
+ CACHED_BATCH();
}
const struct brw_tracked_state brw_vertices = {
.dirty = {
.mesa = 0,
.brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
- .cache = 0,
+ .cache = CACHE_NEW_VS_PROG,
},
- .prepare = brw_prepare_vertices,
.emit = brw_emit_vertices,
};
-static void brw_prepare_indices(struct brw_context *brw)
+static void brw_upload_indices(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
ib_type_size = get_size(index_buffer->type);
ib_size = ib_type_size * index_buffer->count;
- bufferobj = index_buffer->obj;;
+ bufferobj = index_buffer->obj;
/* Turn into a proper VBO:
*/
if (!_mesa_is_bufferobj(bufferobj)) {
- brw->ib.start_vertex_offset = 0;
/* Get new bufferobj, offset:
*/
- get_space(brw, ib_size, &bo, &offset);
-
- /* Straight upload
- */
- drm_intel_gem_bo_map_gtt(bo);
- memcpy((char *)bo->virtual + offset, index_buffer->ptr, ib_size);
- drm_intel_gem_bo_unmap_gtt(bo);
+ intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
+ &bo, &offset);
+ brw->ib.start_vertex_offset = offset / ib_type_size;
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
- brw->ib.start_vertex_offset = 0;
/* If the index buffer isn't aligned to its element size, we have to
* rebase it into a temporary.
*/
if ((get_size(index_buffer->type) - 1) & offset) {
- GLubyte *map = ctx->Driver.MapBuffer(ctx,
- GL_ELEMENT_ARRAY_BUFFER_ARB,
- GL_DYNAMIC_DRAW_ARB,
- bufferobj);
- map += offset;
+ GLubyte *map = ctx->Driver.MapBufferRange(ctx,
+ offset,
+ ib_size,
+ GL_MAP_WRITE_BIT,
+ bufferobj);
- get_space(brw, ib_size, &bo, &offset);
+ intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
+ &bo, &offset);
+ brw->ib.start_vertex_offset = offset / ib_type_size;
- drm_intel_bo_subdata(bo, offset, ib_size, map);
-
- ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
+ ctx->Driver.UnmapBuffer(ctx, bufferobj);
} else {
/* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
* the index buffer state when we're just moving the start index
*/
brw->ib.start_vertex_offset = offset / ib_type_size;
- bo = intel_bufferobj_source(intel, intel_buffer_object(bufferobj),
+ bo = intel_bufferobj_source(intel,
+ intel_buffer_object(bufferobj),
+ ib_type_size,
&offset);
drm_intel_bo_reference(bo);
- ib_size = bo->size;
+ brw->ib.start_vertex_offset += offset / ib_type_size;
}
}
- if (brw->ib.bo != bo ||
- brw->ib.offset != offset ||
- brw->ib.size != ib_size)
- {
+ if (brw->ib.bo != bo) {
drm_intel_bo_unreference(brw->ib.bo);
brw->ib.bo = bo;
- brw->ib.offset = offset;
- brw->ib.size = ib_size;
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
} else {
drm_intel_bo_unreference(bo);
}
- brw_add_validated_bo(brw, brw->ib.bo);
+ if (index_buffer->type != brw->ib.type) {
+ brw->ib.type = index_buffer->type;
+ brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
+ }
}
const struct brw_tracked_state brw_indices = {
.brw = BRW_NEW_INDICES,
.cache = 0,
},
- .prepare = brw_prepare_indices,
+ .emit = brw_upload_indices,
};
static void brw_emit_index_buffer(struct brw_context *brw)
1);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
- brw->ib.offset);
+ 0);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
- brw->ib.offset + brw->ib.size - 1);
+ brw->ib.bo->size - 1);
ADVANCE_BATCH();
}