brw->emit_state_always = 0;
+ intel->batch.need_workaround_flush = true;
+
ctx->VertexProgram._MaintainTnlProgram = GL_TRUE;
ctx->FragmentProgram._MaintainTexEnvProgram = GL_TRUE;
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
+ intel->batch.need_workaround_flush = true;
+
if (intel->always_flush_cache) {
intel_batchbuffer_emit_mi_flush(intel);
}
*/
brw->state.dirty.brw |= BRW_NEW_CONTEXT | BRW_NEW_BATCH;
+ /* Assume that the last command before the start of our batch was a
+ * primitive, for safety.
+ */
+ intel->batch.need_workaround_flush = true;
+
brw->vb.nr_current_buffers = 0;
/* Mark that the current program cache BO has been used by the GPU.
static void
intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
{
+ if (!intel->batch.need_workaround_flush)
+ return;
+
BEGIN_BATCH(4);
OUT_BATCH(_3DSTATE_PIPE_CONTROL);
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT, 0);
OUT_BATCH(0); /* write data */
ADVANCE_BATCH();
+
+ intel->batch.need_workaround_flush = false;
}
/* Emit a pipelined flush to either flush render and texture cache for
drm_intel_bo *last_bo;
/** BO for post-sync nonzero writes for gen6 workaround. */
drm_intel_bo *workaround_bo;
+ bool need_workaround_flush;
+
struct cached_batch_item *cached_items;
uint16_t emit, total;