dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
struct block *block = (struct block *)calloc(sizeof *block, 1);
- unsigned int align_log2 = _mesa_ffs(bo_fake->alignment);
+ unsigned int align_log2 = _mesa_ffs(bo_fake->alignment) - 1;
GLuint sz;
if (!block)
static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
{
dri_bo_fake *bo_fake;
- DBG("free block %p\n", block);
+ DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
if (!block)
return;
bo_fake = (dri_bo_fake *)block->bo;
- if (bo_fake->card_dirty == GL_TRUE) {
- memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
- bo_fake->card_dirty = GL_FALSE;
- bo_fake->dirty = GL_TRUE;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
+ memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+ bo_fake->card_dirty = 1;
+ bo_fake->dirty = 1;
}
if (block->on_hardware) {
/* Blocks are ordered by fence, so if one fails, all from
* here will fail also:
*/
+ DBG("fence not passed: offset %x sz %x %d %d \n",
+ block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
break;
}
}
struct block *block, *tmp;
foreach_s (block, tmp, &bufmgr_fake->on_hardware) {
- DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
- block->mem->size, block->bo, fence);
+ DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
+ block->mem->size, block->mem->ofs, block->bo, fence);
block->fence = fence;
block->on_hardware = 0;
free_block(bufmgr_fake, block);
bo_fake->block = NULL;
bo_fake->validated = GL_FALSE;
- bo_fake->dirty = GL_TRUE;
- block->bo->offset = -1;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+ bo_fake->dirty = 1;
}
}
*/
dri_bufmgr_fake_wait_idle(bufmgr_fake);
- /* we may never have mapped this BO so it might not have any backing store */
- /* if this happens it should be rare, but 0 the card memory in any case */
+ /* we may never have mapped this BO so it might not have any backing
+ * store if this happens it should be rare, but 0 the card memory
+ * in any case */
if (bo_fake->backing_store)
- memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
+ memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
else
- memset(bo_fake->block->virtual, 0, bo->size);
+ memset(bo_fake->block->virtual, 0, bo->size);
bo_fake->dirty = 0;
}
+ bo_fake->block->fenced = 0;
bo_fake->block->on_hardware = 1;
move_to_tail(&bufmgr_fake->on_hardware, bo_fake->block);
struct fake_buffer_reloc *r;
dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
- int ret, i;
+ int i;
assert(reloc_buf);
assert(target_buf);
- if (!target_fake->is_static && !target_fake->size_accounted) {
- ret = dri_fake_check_aperture_space(target_buf);
- if (ret)
- return ret;
- }
+ assert(target_fake->is_static || target_fake->size_accounted);
if (reloc_fake->relocs == NULL) {
reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
/* Validate the target buffer if that hasn't been done. */
if (!target_fake->validated) {
- ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
- if (ret != 0) {
- if (bo->virtual != NULL)
- dri_bo_unmap(bo);
- return ret;
- }
+ ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
+ if (ret != 0) {
+ if (bo->virtual != NULL)
+ dri_bo_unmap(bo);
+ return ret;
+ }
}
/* Calculate the value of the relocation entry. */
if (bo_fake->validate_flags & DRM_BO_FLAG_WRITE) {
if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
if (bo_fake->backing_store == 0)
- alloc_backing_store(bo);
+ alloc_backing_store(bo);
- bo_fake->card_dirty = GL_TRUE;
+ bo_fake->card_dirty = 1;
}
bufmgr_fake->performed_rendering = GL_TRUE;
}
ret = dri_fake_reloc_and_validate_buffer(batch_buf);
if (bufmgr_fake->fail == 1) {
if (retry_count == 0) {
- retry_count++;
- dri_fake_kick_all(bufmgr_fake);
- bufmgr_fake->fail = 0;
- goto restart;
- }
+ retry_count++;
+ dri_fake_kick_all(bufmgr_fake);
+ bufmgr_fake->fail = 0;
+ goto restart;
+ } else /* dump out the memory here */
+ mmDumpMemInfo(bufmgr_fake->heap);
}
+
assert(ret == 0);
*count_p = 0; /* junk */
return 0;
if (bufmgr_fake->current_total_size + sz > bufmgr_fake->size) {
- DBG("check_space: bo %d %d overflowed bufmgr\n", bo_fake->id, sz);
+ DBG("check_space: %s bo %d %d overflowed bufmgr size %d\n", bo_fake->name, bo_fake->id, sz, bufmgr_fake->size);
return -1;
}
bufmgr_fake->current_total_size += sz;
bo_fake->size_accounted = 1;
- DBG("check_space: bo %d %d %d\n", bo_fake->id, bo->size, bufmgr_fake->current_total_size);
+ DBG("drm_check_space: buf %d, %s %d %d\n", bo_fake->id, bo_fake->name, bo->size, bufmgr_fake->current_total_size);
return 0;
}
#include "macros.h"
#include "enums.h"
-static void upload_cc_vp( struct brw_context *brw )
+static int upload_cc_vp( struct brw_context *brw )
{
struct brw_cc_viewport ccv;
dri_bo_unreference(brw->cc.vp_bo);
brw->cc.vp_bo = brw_cache_data( &brw->cache, BRW_CC_VP, &ccv, NULL, 0 );
+ return dri_bufmgr_check_aperture_space(brw->cc.vp_bo);
}
const struct brw_tracked_state brw_cc_vp = {
.brw = BRW_NEW_CONTEXT,
.cache = 0
},
- .update = upload_cc_vp
+ .prepare = upload_cc_vp
};
struct brw_cc_unit_key {
return bo;
}
-static void upload_cc_unit( struct brw_context *brw )
+static int prepare_cc_unit( struct brw_context *brw )
{
struct brw_cc_unit_key key;
if (brw->cc.state_bo == NULL)
brw->cc.state_bo = cc_unit_create_from_key(brw, &key);
+ return dri_bufmgr_check_aperture_space(brw->cc.state_bo);
}
const struct brw_tracked_state brw_cc_unit = {
.brw = 0,
.cache = CACHE_NEW_CC_VP
},
- .update = upload_cc_unit,
+ .prepare = prepare_cc_unit,
};
/* Calculate interpolants for triangle and line rasterization.
*/
-static void upload_clip_prog( struct brw_context *brw )
+static int upload_clip_prog( struct brw_context *brw )
{
GLcontext *ctx = &brw->intel.ctx;
struct brw_clip_prog_key key;
&brw->clip.prog_data);
if (brw->clip.prog_bo == NULL)
compile_clip_prog( brw, &key );
+
+ return dri_bufmgr_check_aperture_space(brw->clip.prog_bo);
}
.brw = (BRW_NEW_REDUCED_PRIMITIVE),
.cache = CACHE_NEW_VS_PROG
},
- .update = upload_clip_prog
+ .prepare = upload_clip_prog
};
return bo;
}
-static void upload_clip_unit( struct brw_context *brw )
+static int upload_clip_unit( struct brw_context *brw )
{
struct brw_clip_unit_key key;
+ int ret = 0;
clip_unit_populate_key(brw, &key);
if (brw->clip.state_bo == NULL) {
brw->clip.state_bo = clip_unit_create_from_key(brw, &key);
}
+
+ ret = dri_bufmgr_check_aperture_space(brw->clip.state_bo);
+ return ret;
}
const struct brw_tracked_state brw_clip_unit = {
BRW_NEW_URB_FENCE),
.cache = CACHE_NEW_CLIP_PROG
},
- .update = upload_clip_unit,
+ .prepare = upload_clip_unit,
};
*/
struct brw_tracked_state {
struct brw_state_flags dirty;
- void (*update)( struct brw_context *brw );
+ int (*prepare)( struct brw_context *brw );
+ void (*emit)( struct brw_context *brw );
};
/* Flags for brw->state.cache.
/*======================================================================
* brw_state.c
*/
-void brw_validate_state( struct brw_context *brw );
+int brw_validate_state( struct brw_context *brw );
void brw_init_state( struct brw_context *brw );
void brw_destroy_state( struct brw_context *brw );
/* Partition the CURBE between the various users of constant values:
*/
-static void calculate_curbe_offsets( struct brw_context *brw )
+static int calculate_curbe_offsets( struct brw_context *brw )
{
/* CACHE_NEW_WM_PROG */
GLuint nr_fp_regs = (brw->wm.prog_data->nr_params + 15) / 16;
brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
}
+ return 0;
}
.brw = BRW_NEW_VERTEX_PROGRAM,
.cache = CACHE_NEW_WM_PROG
},
- .update = calculate_curbe_offsets
+ .prepare = calculate_curbe_offsets
};
* cache mechanism, but maybe would benefit from a comparison against
* the current uploaded set of constants.
*/
-static void upload_constant_buffer(struct brw_context *brw)
+static int prepare_constant_buffer(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
GLcontext *ctx = &brw->intel.ctx;
struct brw_vertex_program *vp = (struct brw_vertex_program *)brw->vertex_program;
struct brw_fragment_program *fp = (struct brw_fragment_program *)brw->fragment_program;
brw->curbe.tracked_state.dirty.mesa |= fp->param_state;
if (sz == 0) {
- BEGIN_BATCH(2, IGNORE_CLIPRECTS);
- OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
- OUT_BATCH(0);
- ADVANCE_BATCH();
if (brw->curbe.last_buf) {
free(brw->curbe.last_buf);
brw->curbe.last_bufsz = 0;
}
- return;
+ return 0;
}
buf = (GLfloat *)malloc(bufsz);
dri_bo_subdata(brw->curbe.curbe_bo, brw->curbe.curbe_offset, bufsz, buf);
}
+
/* Because this provokes an action (ie copy the constants into the
* URB), it shouldn't be shortcircuited if identical to the
* previous time - because eg. the urb destination may have
* flushes as necessary when doublebuffering of CURBEs isn't
* possible.
*/
+
+ /* check aperture space for this bo */
+ return dri_bufmgr_check_aperture_space(brw->curbe.curbe_bo);
+}
+
+
+static void emit_constant_buffer(struct brw_context *brw)
+{
+ struct intel_context *intel = &brw->intel;
+ GLuint sz = brw->curbe.total_size;
+
BEGIN_BATCH(2, IGNORE_CLIPRECTS);
- OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
- OUT_RELOC(brw->curbe.curbe_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
- (sz - 1) + brw->curbe.curbe_offset);
+ if (sz == 0) {
+ OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
+ OUT_BATCH(0);
+ } else {
+ OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
+ OUT_RELOC(brw->curbe.curbe_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ (sz - 1) + brw->curbe.curbe_offset);
+ }
ADVANCE_BATCH();
}
BRW_NEW_BATCH),
.cache = (CACHE_NEW_WM_PROG)
},
- .update = upload_constant_buffer,
+ .prepare = prepare_constant_buffer,
+ .emit = emit_constant_buffer,
};
* programs be immune to the active primitive (ie. cope with all
* possibilities). That may not be realistic however.
*/
-static GLuint brw_set_prim(struct brw_context *brw, GLenum prim)
+static GLuint brw_set_prim(struct brw_context *brw, GLenum prim, GLboolean *need_flush)
{
+ int ret;
if (INTEL_DEBUG & DEBUG_PRIMS)
_mesa_printf("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim));
brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
}
- brw_validate_state(brw);
+ ret = brw_validate_state(brw);
+ if (ret)
+ *need_flush = GL_TRUE;
}
return hw_prim[prim];
{
struct brw_3d_primitive prim_packet;
+ GLboolean need_flush = GL_FALSE;
if (INTEL_DEBUG & DEBUG_PRIMS)
_mesa_printf("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
prim_packet.header.opcode = CMD_3D_PRIM;
prim_packet.header.length = sizeof(prim_packet)/4 - 2;
prim_packet.header.pad = 0;
- prim_packet.header.topology = brw_set_prim(brw, prim->mode);
+ prim_packet.header.topology = brw_set_prim(brw, prim->mode, &need_flush);
prim_packet.header.indexed = prim->indexed;
prim_packet.verts_per_instance = trim(prim->mode, prim->count);
intel_batchbuffer_data( brw->intel.batch, &prim_packet,
sizeof(prim_packet), LOOP_CLIPRECTS);
}
+
+ assert(need_flush == GL_FALSE);
}
static void brw_merge_inputs( struct brw_context *brw,
struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
GLboolean retval = GL_FALSE;
- GLuint i;
-
+ GLuint i, ret;
+ GLuint ib_offset;
+ dri_bo *ib_bo;
+ GLboolean force_flush = GL_FALSE;
if (ctx->NewState)
_mesa_update_state( ctx );
* an upper bound of how much we might emit in a single
* brw_try_draw_prims().
*/
+ flush:
+ if (force_flush)
+ brw->no_batch_wrap = GL_FALSE;
+
if (intel->batch->ptr - intel->batch->map > intel->batch->size * 3 / 4
/* brw_emit_prim may change the cliprect_mode to LOOP_CLIPRECTS */
- || intel->batch->cliprect_mode != LOOP_CLIPRECTS)
+ || intel->batch->cliprect_mode != LOOP_CLIPRECTS || (force_flush == GL_TRUE))
intel_batchbuffer_flush(intel->batch);
+ force_flush = GL_FALSE;
brw->no_batch_wrap = GL_TRUE;
/* Set the first primitive early, ahead of validate_state:
*/
- brw_set_prim(brw, prim[0].mode);
+ brw_set_prim(brw, prim[0].mode, &force_flush);
/* XXX: Need to separate validate and upload of state.
*/
- brw_validate_state( brw );
+ ret = brw_validate_state( brw );
+ if (ret) {
+ force_flush = GL_TRUE;
+ goto flush;
+ }
+
+ /* need to account for index buffer and vertex buffer */
+ if (ib) {
+ ret = brw_prepare_indices( brw, ib , &ib_bo, &ib_offset);
+ if (ret) {
+ force_flush = GL_TRUE;
+ goto flush;
+ }
+ }
+
+ ret = brw_prepare_vertices( brw, min_index, max_index);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ force_flush = GL_TRUE;
+ goto flush;
+ }
+
+
/* Various fallback checks:
*/
/* Upload index, vertex data:
*/
if (ib)
- brw_upload_indices( brw, ib );
+ brw_emit_indices( brw, ib, ib_bo, ib_offset);
- if (!brw_upload_vertices( brw, min_index, max_index)) {
- goto out;
- }
+ brw_emit_vertices( brw, min_index, max_index);
for (i = 0; i < nr_prims; i++) {
brw_emit_prim(brw, &prim[i]);
#include "mtypes.h" /* for GLcontext... */
#include "vbo/vbo.h"
+#include "dri_bufmgr.h"
struct brw_context;
/* brw_draw_upload.c
*/
-void brw_upload_indices( struct brw_context *brw,
- const struct _mesa_index_buffer *index_buffer);
+int brw_prepare_indices( struct brw_context *brw,
+ const struct _mesa_index_buffer *index_buffer,
+ dri_bo **bo_return,
+ GLuint *offset_return);
-GLboolean brw_upload_vertices( struct brw_context *brw,
+void brw_emit_indices( struct brw_context *brw,
+ const struct _mesa_index_buffer *index_buffer,
+ dri_bo *bo,
+ GLuint offset);
+
+int brw_prepare_vertices( struct brw_context *brw,
+ GLuint min_index,
+ GLuint max_index );
+
+void brw_emit_vertices( struct brw_context *brw,
GLuint min_index,
GLuint max_index );
/* Set the internal VBO\ to no-backing-store. We only use them as a
* temporary within a brw_try_draw_prims while the lock is held.
*/
- if (!brw->intel.ttm)
- dri_bo_fake_disable_backing_store(brw->vb.upload.bo, NULL, NULL);
+ /* DON'T DO THIS AS IF WE HAVE TO RE-ORG MEMORY WE NEED SOMEWHERE WITH
+ FAKE TO PUSH THIS STUFF */
+// if (!brw->intel.ttm)
+// dri_bo_fake_disable_backing_store(brw->vb.upload.bo, NULL, NULL);
}
static void get_space( struct brw_context *brw,
dri_bo_unmap(element->bo);
}
-GLboolean brw_upload_vertices( struct brw_context *brw,
+int brw_prepare_vertices( struct brw_context *brw,
GLuint min_index,
GLuint max_index )
{
GLuint i;
const unsigned char *ptr = NULL;
GLuint interleave = 0;
+ int ret;
struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
GLuint nr_enabled = 0;
* isn't an issue at this point.
*/
if (nr_enabled >= BRW_VEP_MAX)
- return GL_FALSE;
+ return -1;
for (i = 0; i < nr_enabled; i++) {
struct brw_vertex_element *input = enabled[i];
/* Position array not properly enabled:
*/
if (input->glarray->StrideB == 0)
- return GL_FALSE;
+ return -1;
interleave = input->glarray->StrideB;
ptr = input->glarray->Ptr;
}
}
+ ret = dri_bufmgr_check_aperture_space(brw->vb.upload.bo);
+ if (ret)
+ return 1;
+
+ return 0;
+}
+
+void brw_emit_vertices( struct brw_context *brw,
+ GLuint min_index,
+ GLuint max_index )
+{
+ GLcontext *ctx = &brw->intel.ctx;
+ struct intel_context *intel = intel_context(ctx);
+ GLuint tmp = brw->vs.prog_data->inputs_read;
+ struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
+ GLuint i;
+ GLuint nr_enabled = 0;
+
+ /* Accumulate the list of enabled arrays. */
+ while (tmp) {
+ i = _mesa_ffsll(tmp)-1;
+ struct brw_vertex_element *input = &brw->vb.inputs[i];
+
+ tmp &= ~(1<<i);
+ enabled[nr_enabled++] = input;
+ }
+
+
/* Now emit VB and VEP state packets.
*
* This still defines a hardware VB for each input, even if they
((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
}
ADVANCE_BATCH();
-
- return GL_TRUE;
}
-void brw_upload_indices( struct brw_context *brw,
- const struct _mesa_index_buffer *index_buffer )
+int brw_prepare_indices( struct brw_context *brw,
+ const struct _mesa_index_buffer *index_buffer,
+ dri_bo **bo_return,
+ GLuint *offset_return)
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
dri_bo *bo;
struct gl_buffer_object *bufferobj = index_buffer->obj;
GLuint offset = (GLuint)index_buffer->ptr;
+ int ret;
/* Turn into a proper VBO:
*/
}
}
+ *bo_return = bo;
+ *offset_return = offset;
+ ret = dri_bufmgr_check_aperture_space(bo);
+ return ret;
+}
+
+void brw_emit_indices(struct brw_context *brw,
+ const struct _mesa_index_buffer *index_buffer,
+ dri_bo *bo,
+ GLuint offset)
+{
+ struct intel_context *intel = &brw->intel;
+ GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
/* Emit the indexbuffer packet:
*/
{
dri_bo_unreference(bo);
}
}
+
return GL_FALSE;
}
-static void check_fallback(struct brw_context *brw)
+static int check_fallback(struct brw_context *brw)
{
brw->intel.Fallback = do_check_fallback(brw);
+ return 0;
}
const struct brw_tracked_state brw_check_fallback = {
.brw = BRW_NEW_METAOPS,
.cache = 0
},
- .update = check_fallback
+ .prepare = check_fallback
};
/* Calculate interpolants for triangle and line rasterization.
*/
-static void upload_gs_prog( struct brw_context *brw )
+static int prepare_gs_prog( struct brw_context *brw )
{
struct brw_gs_prog_key key;
if (brw->gs.prog_bo == NULL)
compile_gs_prog( brw, &key );
}
+
+ return dri_bufmgr_check_aperture_space(brw->gs.prog_bo);
}
.brw = BRW_NEW_PRIMITIVE,
.cache = CACHE_NEW_VS_PROG
},
- .update = upload_gs_prog
+ .prepare = prepare_gs_prog
};
return bo;
}
-static void upload_gs_unit( struct brw_context *brw )
+static int prepare_gs_unit( struct brw_context *brw )
{
struct brw_gs_unit_key key;
if (brw->gs.state_bo == NULL) {
brw->gs.state_bo = gs_unit_create_from_key(brw, &key);
}
+ return dri_bufmgr_check_aperture_space(brw->gs.state_bo);
}
const struct brw_tracked_state brw_gs_unit = {
BRW_NEW_URB_FENCE),
.cache = CACHE_NEW_GS_PROG
},
- .update = upload_gs_unit,
+ .prepare = prepare_gs_unit,
};
.brw = 0,
.cache = 0
},
- .update = upload_blend_constant_color
+ .emit = upload_blend_constant_color
};
/**
.brw = BRW_NEW_BATCH,
.cache = CACHE_NEW_SURF_BIND,
},
- .update = upload_binding_table_pointers,
+ .emit = upload_binding_table_pointers,
};
CACHE_NEW_WM_UNIT |
CACHE_NEW_CC_UNIT)
},
- .update = upload_pipelined_state_pointers
+ .emit = upload_pipelined_state_pointers
};
#endif
CACHE_NEW_WM_UNIT |
CACHE_NEW_CC_UNIT)
},
- .update = upload_psp_urb_cbs,
+ .emit = upload_psp_urb_cbs,
};
/**
* We have to do this per state validation as we need to emit the relocation
* in the batch buffer.
*/
-static void upload_depthbuffer(struct brw_context *brw)
+
+static int prepare_depthbuffer(struct brw_context *brw)
+{
+ struct intel_region *region = brw->state.depth_region;
+
+ if (region->buffer)
+ return 0;
+ return dri_bufmgr_check_aperture_space(region->buffer);
+}
+
+static void emit_depthbuffer(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
struct intel_region *region = brw->state.depth_region;
.brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
.cache = 0,
},
- .update = upload_depthbuffer,
+ .prepare = prepare_depthbuffer,
+ .emit = emit_depthbuffer,
};
.brw = 0,
.cache = 0
},
- .update = upload_polygon_stipple
+ .emit = upload_polygon_stipple
};
.brw = 0,
.cache = 0
},
- .update = upload_polygon_stipple_offset
+ .emit = upload_polygon_stipple_offset
};
/**********************************************************************
.brw = BRW_NEW_CONTEXT,
.cache = 0
},
- .update = upload_aa_line_parameters
+ .emit = upload_aa_line_parameters
};
/***********************************************************************
.brw = 0,
.cache = 0
},
- .update = upload_line_stipple
+ .emit = upload_line_stipple
};
.brw = BRW_NEW_BATCH,
.cache = 0
},
- .update = upload_pipe_control
+ .emit = upload_pipe_control
};
.brw = BRW_NEW_CONTEXT,
.cache = 0
},
- .update = upload_invarient_state
+ .emit = upload_invarient_state
};
/**
.brw = BRW_NEW_CONTEXT,
.cache = 0,
},
- .update = upload_state_base_address
+ .emit = upload_state_base_address
};
/* Calculate interpolants for triangle and line rasterization.
*/
-static void upload_sf_prog( struct brw_context *brw )
+static int upload_sf_prog( struct brw_context *brw )
{
struct brw_sf_prog_key key;
&brw->sf.prog_data);
if (brw->sf.prog_bo == NULL)
compile_sf_prog( brw, &key );
+ return dri_bufmgr_check_aperture_space(brw->sf.prog_bo);
}
.brw = (BRW_NEW_REDUCED_PRIMITIVE),
.cache = CACHE_NEW_VS_PROG
},
- .update = upload_sf_prog
+ .prepare = upload_sf_prog
};
#include "macros.h"
#include "intel_fbo.h"
-static void upload_sf_vp(struct brw_context *brw)
+static int upload_sf_vp(struct brw_context *brw)
{
GLcontext *ctx = &brw->intel.ctx;
const GLfloat depth_scale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
dri_bo_unreference(brw->sf.vp_bo);
brw->sf.vp_bo = brw_cache_data( &brw->cache, BRW_SF_VP, &sfv, NULL, 0 );
+
+ return dri_bufmgr_check_aperture_space(brw->sf.vp_bo);
}
const struct brw_tracked_state brw_sf_vp = {
.brw = BRW_NEW_METAOPS,
.cache = 0
},
- .update = upload_sf_vp
+ .prepare = upload_sf_vp
};
struct brw_sf_unit_key {
return bo;
}
-static void upload_sf_unit( struct brw_context *brw )
+static int upload_sf_unit( struct brw_context *brw )
{
struct brw_sf_unit_key key;
dri_bo *reloc_bufs[2];
+ int ret = 0;
sf_unit_populate_key(brw, &key);
if (brw->sf.state_bo == NULL) {
brw->sf.state_bo = sf_unit_create_from_key(brw, &key, reloc_bufs);
}
+
+ if (reloc_bufs[0])
+ ret |= dri_bufmgr_check_aperture_space(reloc_bufs[0]);
+
+ if (reloc_bufs[1])
+ ret |= dri_bufmgr_check_aperture_space(reloc_bufs[1]);
+
+ ret |= dri_bufmgr_check_aperture_space(brw->sf.state_bo);
+ return ret;
}
const struct brw_tracked_state brw_sf_unit = {
.cache = (CACHE_NEW_SF_VP |
CACHE_NEW_SF_PROG)
},
- .update = upload_sf_unit,
+ .prepare = upload_sf_unit,
};
/***********************************************************************
* Emit all state:
*/
-void brw_validate_state( struct brw_context *brw )
+int brw_validate_state( struct brw_context *brw )
{
struct brw_state_flags *state = &brw->state.dirty;
- GLuint i;
+ GLuint i, ret, count;
state->mesa |= brw->intel.NewGLState;
brw->intel.NewGLState = 0;
if (state->mesa == 0 &&
state->cache == 0 &&
state->brw == 0)
- return;
+ return 0;
if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
brw_clear_batch_cache_flush(brw);
brw->intel.Fallback = 0;
+ count = 0;
+
+ /* do prepare stage for all atoms */
+ for (i = 0; i < Elements(atoms); i++) {
+ const struct brw_tracked_state *atom = brw->state.atoms[i];
+
+ if (brw->intel.Fallback)
+ break;
+
+ if (check_state(state, &atom->dirty)) {
+ if (atom->prepare) {
+ ret = atom->prepare(brw);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ if (brw->intel.Fallback)
+ return 0;
+
if (INTEL_DEBUG) {
/* Debug version which enforces various sanity checks on the
* state flags which are generated and checked to help ensure
assert(atom->dirty.mesa ||
atom->dirty.brw ||
atom->dirty.cache);
- assert(atom->update);
if (brw->intel.Fallback)
break;
if (check_state(state, &atom->dirty)) {
- atom->update( brw );
-
-/* emit_foo(brw); */
+ if (atom->emit)
+ atom->emit( brw );
}
accumulate_state(&examined, &atom->dirty);
if (brw->intel.Fallback)
break;
- if (check_state(state, &atom->dirty))
- atom->update( brw );
+ if (check_state(state, &atom->dirty)) {
+ if (atom->emit)
+ atom->emit( brw );
+ }
}
}
if (!brw->intel.Fallback)
memset(state, 0, sizeof(*state));
+ return 0;
}
/* Most minimal update, forces re-emit of URB fence packet after GS
* unit turned on/off.
*/
-static void recalculate_urb_fence( struct brw_context *brw )
+static int recalculate_urb_fence( struct brw_context *brw )
{
GLuint csize = brw->curbe.total_size;
GLuint vsize = brw->vs.prog_data->urb_entry_size;
brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
}
+ return 0;
}
.cache = (CACHE_NEW_VS_PROG |
CACHE_NEW_SF_PROG)
},
- .update = recalculate_urb_fence
+ .prepare = recalculate_urb_fence
};
}
-static void brw_upload_vs_prog( struct brw_context *brw )
+static int brw_upload_vs_prog( struct brw_context *brw )
{
struct brw_vs_prog_key key;
struct brw_vertex_program *vp =
&brw->vs.prog_data);
if (brw->vs.prog_bo == NULL)
do_vs_prog(brw, vp, &key);
+ return dri_bufmgr_check_aperture_space(brw->vs.prog_bo);
}
.brw = BRW_NEW_VERTEX_PROGRAM | BRW_NEW_METAOPS,
.cache = 0
},
- .update = brw_upload_vs_prog
+ .prepare = brw_upload_vs_prog
};
/* Calculate sizes of vertex program outputs. Size is the largest
* component index which might vary from [0,0,0,1]
*/
-static void calc_wm_input_sizes( struct brw_context *brw )
+static int calc_wm_input_sizes( struct brw_context *brw )
{
/* BRW_NEW_VERTEX_PROGRAM */
struct brw_vertex_program *vp =
memcpy(brw->wm.input_size_masks, t.size_masks, sizeof(t.size_masks));
brw->state.dirty.brw |= BRW_NEW_WM_INPUT_DIMENSIONS;
}
+ return 0;
}
const struct brw_tracked_state brw_wm_input_sizes = {
.brw = BRW_NEW_VERTEX_PROGRAM | BRW_NEW_INPUT_DIMENSIONS,
.cache = 0
},
- .update = calc_wm_input_sizes
+ .prepare = calc_wm_input_sizes
};
return bo;
}
-static void upload_vs_unit( struct brw_context *brw )
+static int prepare_vs_unit( struct brw_context *brw )
{
struct brw_vs_unit_key key;
if (brw->vs.state_bo == NULL) {
brw->vs.state_bo = vs_unit_create_from_key(brw, &key);
}
+ return dri_bufmgr_check_aperture_space(brw->vs.state_bo);
}
const struct brw_tracked_state brw_vs_unit = {
BRW_NEW_URB_FENCE),
.cache = CACHE_NEW_VS_PROG
},
- .update = upload_vs_unit,
+ .prepare = prepare_vs_unit,
};
return hash;
}
-static void update_tnl_program( struct brw_context *brw )
+static int prepare_tnl_program( struct brw_context *brw )
{
GLcontext *ctx = &brw->intel.ctx;
struct state_key key;
/* _NEW_PROGRAM */
if (brw->attribs.VertexProgram->_Current)
- return;
+ return 0;
/* Grab all the relevent state and put it in a single structure:
*/
if (old != brw->tnl_program)
brw->state.dirty.brw |= BRW_NEW_TNL_PROGRAM;
+ return 0;
}
/* Note: See brw_draw.c - the vertex program must not rely on
BRW_NEW_INPUT_VARYING),
.cache = 0
},
- .update = update_tnl_program
+ .prepare = prepare_tnl_program
};
-static void update_active_vertprog( struct brw_context *brw )
+static int prepare_active_vertprog( struct brw_context *brw )
{
const struct gl_vertex_program *prev = brw->vertex_program;
if (brw->vertex_program != prev)
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
+
+ return 0;
}
.brw = BRW_NEW_TNL_PROGRAM,
.cache = 0
},
- .update = update_active_vertprog
+ .prepare = prepare_active_vertprog
};
}
-static void brw_upload_wm_prog( struct brw_context *brw )
+static int brw_prepare_wm_prog( struct brw_context *brw )
{
struct brw_wm_prog_key key;
struct brw_fragment_program *fp = (struct brw_fragment_program *)
&brw->wm.prog_data);
if (brw->wm.prog_bo == NULL)
do_wm_prog(brw, fp, &key);
+
+ return dri_bufmgr_check_aperture_space(brw->wm.prog_bo);
}
BRW_NEW_REDUCED_PRIMITIVE),
.cache = 0
},
- .update = brw_upload_wm_prog
+ .prepare = brw_prepare_wm_prog
};
* complicates various things. However, this is still too confusing -
* FIXME: simplify all the different new texture state flags.
*/
-static void upload_wm_samplers( struct brw_context *brw )
+static int upload_wm_samplers( struct brw_context *brw )
{
struct wm_sampler_key key;
int i;
+ int ret = 0;
brw_wm_sampler_populate_key(brw, &key);
dri_bo_unreference(brw->wm.sampler_bo);
brw->wm.sampler_bo = NULL;
if (brw->wm.sampler_count == 0)
- return;
+ return 0;
brw->wm.sampler_bo = brw_search_cache(&brw->cache, BRW_SAMPLER,
&key, sizeof(key),
if (!brw->attribs.Texture->Unit[i]._ReallyEnabled)
continue;
+ ret |= dri_bufmgr_check_aperture_space(brw->wm.sdc_bo[i]);
dri_emit_reloc(brw->wm.sampler_bo,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
0,
brw->wm.sdc_bo[i]);
}
}
+
+ ret |= dri_bufmgr_check_aperture_space(brw->wm.sampler_bo);
+ return ret;
+
}
const struct brw_tracked_state brw_wm_samplers = {
.brw = 0,
.cache = 0
},
- .update = upload_wm_samplers,
+ .prepare = upload_wm_samplers,
};
}
-static void upload_wm_unit( struct brw_context *brw )
+static int upload_wm_unit( struct brw_context *brw )
{
struct intel_context *intel = &brw->intel;
struct brw_wm_unit_key key;
dri_bo *reloc_bufs[3];
-
+ int ret = 0, i;
wm_unit_populate_key(brw, &key);
/* Allocate the necessary scratch space if we haven't already. Don't
if (brw->wm.state_bo == NULL) {
brw->wm.state_bo = wm_unit_create_from_key(brw, &key, reloc_bufs);
}
+
+ for (i = 0; i < 3; i++)
+ if (reloc_bufs[i])
+ ret |= dri_bufmgr_check_aperture_space(reloc_bufs[i]);
+ ret |= dri_bufmgr_check_aperture_space(brw->wm.state_bo);
+ return ret;
}
const struct brw_tracked_state brw_wm_unit = {
CACHE_NEW_WM_PROG |
CACHE_NEW_SAMPLER)
},
- .update = upload_wm_unit,
+ .prepare = upload_wm_unit,
};
return bo;
}
-static void
+static int
brw_update_texture_surface( GLcontext *ctx, GLuint unit )
{
struct brw_context *brw = brw_context(ctx);
struct intel_texture_object *intelObj = intel_texture_object(tObj);
struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel];
struct brw_wm_surface_key key;
+ int ret = 0;
memset(&key, 0, sizeof(key));
key.target = tObj->Target;
key.depth = firstImage->Depth;
key.tiled = intelObj->mt->region->tiled;
+ ret |= dri_bufmgr_check_aperture_space(key.bo);
+
dri_bo_unreference(brw->wm.surf_bo[unit + MAX_DRAW_BUFFERS]);
brw->wm.surf_bo[unit + MAX_DRAW_BUFFERS] = brw_search_cache(&brw->cache, BRW_SS_SURFACE,
&key, sizeof(key),
&key.bo, 1,
NULL);
- if (brw->wm.surf_bo[unit + MAX_DRAW_BUFFERS] == NULL)
+ if (brw->wm.surf_bo[unit + MAX_DRAW_BUFFERS] == NULL) {
brw->wm.surf_bo[unit + MAX_DRAW_BUFFERS] = brw_create_texture_surface(brw, &key);
+ }
+
+ ret |= dri_bufmgr_check_aperture_space(brw->wm.surf_bo[unit + MAX_DRAW_BUFFERS]);
+ return ret;
}
/**
* While it is only used for the front/back buffer currently, it should be
* usable for further buffers when doing ARB_draw_buffer support.
*/
-static void
+static int
brw_update_region_surface(struct brw_context *brw, struct intel_region *region,
unsigned int unit, GLboolean cached)
{
dri_bo *region_bo = NULL;
-
+ int ret = 0;
struct {
unsigned int surface_type;
unsigned int surface_format;
key.width = region->pitch; /* XXX: not really! */
key.height = region->height;
key.cpp = region->cpp;
+
+ ret |= dri_bufmgr_check_aperture_space(region->buffer);
} else {
key.surface_type = BRW_SURFACE_NULL;
key.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
region_bo);
}
}
+
+ ret |= dri_bufmgr_check_aperture_space(brw->wm.surf_bo[unit]);
+
+ return ret;
}
return bind_bo;
}
-static void upload_wm_surfaces(struct brw_context *brw )
+static int prepare_wm_surfaces(struct brw_context *brw )
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel;
- GLuint i;
+ GLuint i, ret;
+
if (brw->state.nr_draw_regions > 1) {
- for (i = 0; i < brw->state.nr_draw_regions; i++)
- brw_update_region_surface(brw, brw->state.draw_regions[i], i,
- GL_FALSE);
- }else
- brw_update_region_surface(brw, brw->state.draw_regions[0], 0, GL_TRUE);
+ for (i = 0; i < brw->state.nr_draw_regions; i++) {
+ ret = brw_update_region_surface(brw, brw->state.draw_regions[i], i,
+ GL_FALSE);
+ if (ret)
+ return ret;
+ }
+ }else {
+ ret = brw_update_region_surface(brw, brw->state.draw_regions[0], 0, GL_TRUE);
+ if (ret)
+ return ret;
+ }
brw->wm.nr_surfaces = MAX_DRAW_BUFFERS;
struct gl_texture_unit *texUnit = &brw->attribs.Texture->Unit[i];
/* _NEW_TEXTURE, BRW_NEW_TEXDATA */
- if(texUnit->_ReallyEnabled &&
- texUnit->_Current == intel->frame_buffer_texobj)
- {
- dri_bo_unreference(brw->wm.surf_bo[i+MAX_DRAW_BUFFERS]);
- brw->wm.surf_bo[i+MAX_DRAW_BUFFERS] = brw->wm.surf_bo[0];
- dri_bo_reference(brw->wm.surf_bo[i+MAX_DRAW_BUFFERS]);
- brw->wm.nr_surfaces = i + MAX_DRAW_BUFFERS + 1;
- } else if (texUnit->_ReallyEnabled) {
- brw_update_texture_surface(ctx, i);
- brw->wm.nr_surfaces = i + MAX_DRAW_BUFFERS + 1;
+ if(texUnit->_ReallyEnabled) {
+ if (texUnit->_Current == intel->frame_buffer_texobj) {
+ dri_bo_unreference(brw->wm.surf_bo[i+MAX_DRAW_BUFFERS]);
+ brw->wm.surf_bo[i+MAX_DRAW_BUFFERS] = brw->wm.surf_bo[0];
+ dri_bo_reference(brw->wm.surf_bo[i+MAX_DRAW_BUFFERS]);
+ brw->wm.nr_surfaces = i + MAX_DRAW_BUFFERS + 1;
+ } else {
+ ret = brw_update_texture_surface(ctx, i);
+ brw->wm.nr_surfaces = i + MAX_DRAW_BUFFERS + 1;
+
+ if (ret)
+ return ret;
+ }
} else {
- dri_bo_unreference(brw->wm.surf_bo[i+MAX_DRAW_BUFFERS]);
- brw->wm.surf_bo[i+MAX_DRAW_BUFFERS] = NULL;
+ dri_bo_unreference(brw->wm.surf_bo[i+MAX_DRAW_BUFFERS]);
+ brw->wm.surf_bo[i+MAX_DRAW_BUFFERS] = NULL;
}
+
}
dri_bo_unreference(brw->wm.bind_bo);
brw->wm.bind_bo = brw_wm_get_binding_table(brw);
+
+ return dri_bufmgr_check_aperture_space(brw->wm.bind_bo);
}
+
const struct brw_tracked_state brw_wm_surfaces = {
.dirty = {
.mesa = _NEW_COLOR | _NEW_TEXTURE | _NEW_BUFFERS,
.brw = BRW_NEW_CONTEXT,
.cache = 0
},
- .update = upload_wm_surfaces,
+ .prepare = prepare_wm_surfaces,
};
GLuint CMD, BR13;
int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w;
+ int ret;
BATCH_LOCALS;
+ again:
+ ret = dri_bufmgr_check_aperture_space(dst_buffer);
+ ret |= dri_bufmgr_check_aperture_space(src_buffer);
+ if (ret) {
+ intel_batchbuffer_flush(intel->batch);
+ goto again;
+ }
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,