struct
{
void (*destroy) (struct brw_context * brw);
- void (*finish_batch) (struct brw_context * brw);
void (*new_batch) (struct brw_context * brw);
void (*update_texture_surface)(struct gl_context *ctx,
drm_intel_gem_context_destroy(brw->hw_ctx);
}
-/**
- * called from intel_batchbuffer_flush and children before sending a
- * batchbuffer off.
- *
- * Note that ALL state emitted here must fit in the reserved space
- * at the end of a batchbuffer. If you add more GPU state, increase
- * the BATCH_RESERVED macro.
- */
-static void
-brw_finish_batch(struct brw_context *brw)
-{
- brw_emit_query_end(brw);
-
- if (brw->curbe.curbe_bo) {
- drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
- drm_intel_bo_unreference(brw->curbe.curbe_bo);
- brw->curbe.curbe_bo = NULL;
- }
-}
-
-
/**
* called from intelFlushBatchLocked
*/
void brwInitVtbl( struct brw_context *brw )
{
brw->vtbl.new_batch = brw_new_batch;
- brw->vtbl.finish_batch = brw_finish_batch;
brw->vtbl.destroy = brw_destroy_context;
assert(brw->gen >= 4);
}
}
+/**
+ * Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
+ * sending it off.
+ *
+ * This function can emit state (say, to preserve registers that aren't saved
+ * between batches). All of this state MUST fit in the reserved space at the
+ * end of the batchbuffer. If you add more GPU state, increase the reserved
+ * space by updating the BATCH_RESERVED macro.
+ */
+static void
+brw_finish_batch(struct brw_context *brw)
+{
+ brw_emit_query_end(brw);
+
+ if (brw->curbe.curbe_bo) {
+ drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
+ drm_intel_bo_unreference(brw->curbe.curbe_bo);
+ brw->curbe.curbe_bo = NULL;
+ }
+}
+
/* TODO: Push this whole function into bufmgr.
*/
static int
brw->batch.reserved_space = 0;
- if (brw->vtbl.finish_batch)
- brw->vtbl.finish_batch(brw);
+ brw_finish_batch(brw);
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);