#include <stdlib.h>
-#include "glheader.h"
-#include "context.h"
-#include "state.h"
-#include "api_validate.h"
-#include "enums.h"
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/state.h"
+#include "main/api_validate.h"
+#include "main/enums.h"
#include "brw_draw.h"
#include "brw_defines.h"
#include "brw_state.h"
#include "brw_fallback.h"
-#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
#include "intel_tex.h"
};
-static GLuint get_surface_type( GLenum type, GLuint size, GLboolean normalized )
+/**
+ * Given vertex array type/size/format/normalized info, return
+ * the appopriate hardware surface type.
+ * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
+ */
+static GLuint get_surface_type( GLenum type, GLuint size,
+ GLenum format, GLboolean normalized )
{
if (INTEL_DEBUG & DEBUG_VERTS)
_mesa_printf("type %s size %d normalized %d\n",
case GL_BYTE: return byte_types_norm[size];
case GL_UNSIGNED_INT: return uint_types_norm[size];
case GL_UNSIGNED_SHORT: return ushort_types_norm[size];
- case GL_UNSIGNED_BYTE: return ubyte_types_norm[size];
+ case GL_UNSIGNED_BYTE:
+ if (format == GL_BGRA) {
+ /* See GL_EXT_vertex_array_bgra */
+ assert(size == 4);
+ return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
+ }
+ else {
+ return ubyte_types_norm[size];
+ }
default: assert(0); return 0;
}
}
else {
+ assert(format == GL_RGBA); /* sanity check */
switch (type) {
case GL_DOUBLE: return double_types[size];
case GL_FLOAT: return float_types[size];
}
}
-static void copy_strided_array( GLubyte *dest,
- const GLubyte *src,
- GLuint size,
- GLuint stride,
- GLuint count )
-{
- if (size == stride)
- memcpy(dest, src, count * size);
- else {
- GLuint i;
-
- for (i = 0; i < count; i++) {
- memcpy(dest, src, size);
- src += stride;
- dest += size;
- }
- }
-}
-
static void wrap_buffers( struct brw_context *brw,
GLuint size )
{
if (brw->vb.upload.bo != NULL)
dri_bo_unreference(brw->vb.upload.bo);
brw->vb.upload.bo = dri_bo_alloc(brw->intel.bufmgr, "temporary VBO",
- size, 1,
- DRM_BO_FLAG_MEM_LOCAL |
- DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_CACHED_MAPPED);
+ size, 1);
/* Set the internal VBO\ to no-backing-store. We only use them as a
* temporary within a brw_try_draw_prims while the lock is held.
wrap_buffers(brw, size);
}
+ assert(*bo_return == NULL);
dri_bo_reference(brw->vb.upload.bo);
*bo_return = brw->vb.upload.bo;
*offset_return = brw->vb.upload.offset;
-
brw->vb.upload.offset += size;
}
struct brw_vertex_element *element,
GLuint dst_stride)
{
+ struct intel_context *intel = &brw->intel;
GLuint size = element->count * dst_stride;
get_space(brw, size, &element->bo, &element->offset);
element->stride = dst_stride;
}
- dri_bo_map(element->bo, GL_TRUE);
- copy_strided_array((unsigned char *)element->bo->virtual + element->offset,
- element->glarray->Ptr,
- dst_stride,
- element->glarray->StrideB,
- element->count);
- dri_bo_unmap(element->bo);
+ if (dst_stride == element->glarray->StrideB) {
+ if (intel->intelScreen->kernel_exec_fencing) {
+ drm_intel_gem_bo_map_gtt(element->bo);
+ memcpy((char *)element->bo->virtual + element->offset,
+ element->glarray->Ptr, size);
+ drm_intel_gem_bo_unmap_gtt(element->bo);
+ } else {
+ dri_bo_subdata(element->bo,
+ element->offset,
+ size,
+ element->glarray->Ptr);
+ }
+ } else {
+ char *dest;
+ const unsigned char *src = element->glarray->Ptr;
+ int i;
+
+ if (intel->intelScreen->kernel_exec_fencing) {
+ drm_intel_gem_bo_map_gtt(element->bo);
+ dest = element->bo->virtual;
+ dest += element->offset;
+
+ for (i = 0; i < element->count; i++) {
+ memcpy(dest, src, dst_stride);
+ src += element->glarray->StrideB;
+ dest += dst_stride;
+ }
+
+ drm_intel_gem_bo_unmap_gtt(element->bo);
+ } else {
+ void *data;
+
+ data = _mesa_malloc(dst_stride * element->count);
+ dest = data;
+ for (i = 0; i < element->count; i++) {
+ memcpy(dest, src, dst_stride);
+ src += element->glarray->StrideB;
+ dest += dst_stride;
+ }
+
+ dri_bo_subdata(element->bo,
+ element->offset,
+ size,
+ data);
+
+ _mesa_free(data);
+ }
+ }
}
-int brw_prepare_vertices( struct brw_context *brw,
- GLuint min_index,
- GLuint max_index )
+static void brw_prepare_vertices(struct brw_context *brw)
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
- GLuint tmp = brw->vs.prog_data->inputs_read;
+ GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
GLuint i;
const unsigned char *ptr = NULL;
GLuint interleave = 0;
- int ret;
+ unsigned int min_index = brw->vb.min_index;
+ unsigned int max_index = brw->vb.max_index;
struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
GLuint nr_enabled = 0;
_mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
/* Accumulate the list of enabled arrays. */
- while (tmp) {
- GLuint i = _mesa_ffsll(tmp)-1;
+ while (vs_inputs) {
+ GLuint i = _mesa_ffsll(vs_inputs) - 1;
struct brw_vertex_element *input = &brw->vb.inputs[i];
- tmp &= ~(1<<i);
+ vs_inputs &= ~(1 << i);
enabled[nr_enabled++] = input;
}
* cases with > 17 vertex attributes enabled, so it probably
* isn't an issue at this point.
*/
- if (nr_enabled >= BRW_VEP_MAX)
- return -1;
+ if (nr_enabled >= BRW_VEP_MAX) {
+ intel->Fallback = 1;
+ return;
+ }
for (i = 0; i < nr_enabled; i++) {
struct brw_vertex_element *input = enabled[i];
intel_buffer_object(input->glarray->BufferObj);
/* Named buffer object: Just reference its contents directly. */
+ dri_bo_unreference(input->bo);
input->bo = intel_bufferobj_buffer(intel, intel_buffer,
INTEL_READ);
dri_bo_reference(input->bo);
input->offset = (unsigned long)input->glarray->Ptr;
input->stride = input->glarray->StrideB;
} else {
+ if (input->bo != NULL) {
+ /* Already-uploaded vertex data is present from a previous
+ * prepare_vertices, but we had to re-validate state due to
+ * check_aperture failing and a new batch being produced.
+ */
+ continue;
+ }
+
/* Queue the buffer object up to be uploaded in the next pass,
* when we've decided if we're doing interleaved or not.
*/
if (i == 0) {
/* Position array not properly enabled:
*/
- if (input->glarray->StrideB == 0)
- return -1;
+ if (input->glarray->StrideB == 0) {
+ intel->Fallback = 1;
+ return;
+ }
interleave = input->glarray->StrideB;
ptr = input->glarray->Ptr;
else {
/* Upload non-interleaved arrays */
for (i = 0; i < nr_uploads; i++) {
- copy_array_to_vbo_array(brw, upload[i], upload[i]->element_size);
+ copy_array_to_vbo_array(brw, upload[i], upload[i]->element_size);
}
}
- if (brw->vb.upload.bo) {
- ret = dri_bufmgr_check_aperture_space(brw->vb.upload.bo);
- if (ret)
- return 1;
- }
+ brw_prepare_query_begin(brw);
+
+ for (i = 0; i < nr_enabled; i++) {
+ struct brw_vertex_element *input = enabled[i];
- return 0;
+ brw_add_validated_bo(brw, input->bo);
+ }
}
-void brw_emit_vertices( struct brw_context *brw,
- GLuint min_index,
- GLuint max_index )
+static void brw_emit_vertices(struct brw_context *brw)
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
- GLuint tmp = brw->vs.prog_data->inputs_read;
+ GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
GLuint i;
GLuint nr_enabled = 0;
/* Accumulate the list of enabled arrays. */
- while (tmp) {
- i = _mesa_ffsll(tmp)-1;
+ while (vs_inputs) {
+ i = _mesa_ffsll(vs_inputs) - 1;
struct brw_vertex_element *input = &brw->vb.inputs[i];
- tmp &= ~(1<<i);
+ vs_inputs &= ~(1 << i);
enabled[nr_enabled++] = input;
}
+ brw_emit_query_begin(brw);
/* Now emit VB and VEP state packets.
*
BRW_VB0_ACCESS_VERTEXDATA |
(input->stride << BRW_VB0_PITCH_SHIFT));
OUT_RELOC(input->bo,
- DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ I915_GEM_DOMAIN_VERTEX, 0,
input->offset);
- OUT_BATCH(max_index);
+ OUT_BATCH(brw->vb.max_index);
OUT_BATCH(0); /* Instance data step rate */
-
- /* Unreference the buffer so it can get freed, now that we won't
- * touch it any more.
- */
- dri_bo_unreference(input->bo);
- input->bo = NULL;
}
ADVANCE_BATCH();
struct brw_vertex_element *input = enabled[i];
uint32_t format = get_surface_type(input->glarray->Type,
input->glarray->Size,
+ input->glarray->Format,
input->glarray->Normalized);
uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
ADVANCE_BATCH();
}
-int brw_prepare_indices( struct brw_context *brw,
- const struct _mesa_index_buffer *index_buffer,
- dri_bo **bo_return,
- GLuint *offset_return)
+const struct brw_tracked_state brw_vertices = {
+ .dirty = {
+ .mesa = 0,
+ .brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
+ .cache = 0,
+ },
+ .prepare = brw_prepare_vertices,
+ .emit = brw_emit_vertices,
+};
+
+static void brw_prepare_indices(struct brw_context *brw)
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
- GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
- dri_bo *bo;
- struct gl_buffer_object *bufferobj = index_buffer->obj;
- GLuint offset = (GLuint)index_buffer->ptr;
- int ret;
+ const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
+ GLuint ib_size;
+ dri_bo *bo = NULL;
+ struct gl_buffer_object *bufferobj;
+ GLuint offset;
+
+ if (index_buffer == NULL)
+ return;
+
+ ib_size = get_size(index_buffer->type) * index_buffer->count;
+ bufferobj = index_buffer->obj;;
/* Turn into a proper VBO:
*/
/* Straight upload
*/
- dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
+ if (intel->intelScreen->kernel_exec_fencing) {
+ drm_intel_gem_bo_map_gtt(bo);
+ memcpy((char *)bo->virtual + offset, index_buffer->ptr, ib_size);
+ drm_intel_gem_bo_unmap_gtt(bo);
+ } else {
+ dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
+ }
} else {
+ offset = (GLuint) (unsigned long) index_buffer->ptr;
+
/* If the index buffer isn't aligned to its element size, we have to
* rebase it into a temporary.
*/
}
}
- *bo_return = bo;
- *offset_return = offset;
- ret = dri_bufmgr_check_aperture_space(bo);
- return ret;
+ dri_bo_unreference(brw->ib.bo);
+ brw->ib.bo = bo;
+ brw->ib.offset = offset;
+
+ brw_add_validated_bo(brw, brw->ib.bo);
}
-void brw_emit_indices(struct brw_context *brw,
- const struct _mesa_index_buffer *index_buffer,
- dri_bo *bo,
- GLuint offset)
+static void brw_emit_indices(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
- GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
+ const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
+ GLuint ib_size;
+
+ if (index_buffer == NULL)
+ return;
+
+ ib_size = get_size(index_buffer->type) * index_buffer->count;
+
/* Emit the indexbuffer packet:
*/
{
BEGIN_BATCH(4, IGNORE_CLIPRECTS);
OUT_BATCH( ib.header.dword );
- OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset);
- OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
- offset + ib_size);
+ OUT_RELOC(brw->ib.bo,
+ I915_GEM_DOMAIN_VERTEX, 0,
+ brw->ib.offset);
+ OUT_RELOC(brw->ib.bo,
+ I915_GEM_DOMAIN_VERTEX, 0,
+ brw->ib.offset + ib_size);
OUT_BATCH( 0 );
ADVANCE_BATCH();
-
- dri_bo_unreference(bo);
}
}
+const struct brw_tracked_state brw_indices = {
+ .dirty = {
+ .mesa = 0,
+ .brw = BRW_NEW_BATCH | BRW_NEW_INDICES,
+ .cache = 0,
+ },
+ .prepare = brw_prepare_indices,
+ .emit = brw_emit_indices,
+};