#define R600_CONTEXT_WAIT_CP_DMA_IDLE (1 << 18)
#define R600_CONTEXT_VGT_FLUSH (1 << 19)
#define R600_CONTEXT_VGT_STREAMOUT_SYNC (1 << 20)
+#define R600_CONTEXT_CS_PARTIAL_FLUSH (1 << 21)
/* other flags */
#define R600_CONTEXT_FLAG_COMPUTE (1u << 31)
si_pm4_cmd_add(pm4, 1); /* DISPATCH_INITIATOR */
si_pm4_cmd_end(pm4, false);
- si_pm4_cmd_begin(pm4, PKT3_EVENT_WRITE);
- si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(0x4)));
- si_pm4_cmd_end(pm4, false);
-
si_pm4_emit(sctx, pm4);
#if 0
si_pm4_free_state(sctx, pm4, ~0);
- sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
+ sctx->b.flags |= R600_CONTEXT_CS_PARTIAL_FLUSH |
+ R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_INV_SHADER_CACHE |
R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_FLAG_COMPUTE;
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
+ if (sctx->flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ }
+
if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 19 }; /* number of CS dwords */
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 21 }; /* number of CS dwords */
static void si_get_draw_start_count(struct si_context *sctx,
const struct pipe_draw_info *info,