r600_flush_emit(ctx);
ctx->b.flags = 0;
+ if (ctx->b.chip_class >= CAYMAN) {
+ ctx->skip_surface_sync_on_next_cs_flush = true;
+ }
+
#if 0
COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
for (i = 0; i < cs->cdw; i++) {
S_0085F0_SMX_ACTION_ENA(1);
}
- if (cp_coher_cntl) {
+ if (cp_coher_cntl && !rctx->skip_surface_sync_on_next_cs_flush) {
cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
cs->buf[cs->cdw++] = cp_coher_cntl; /* CP_COHER_CNTL */
cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
/* Flush the CS. */
ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++);
+
+ ctx->skip_surface_sync_on_next_cs_flush = false;
}
void r600_begin_new_cs(struct r600_context *ctx)
void *sb_context;
struct r600_isa *isa;
+
+ /* Work-around for flushing problems with compute shaders on Cayman:
+ * Emitting a SURFACE_SYNC packet with any of the CB*_DEST_BASE_ENA
+ * or DB_DEST_BASE_ENA bits set after dispatching a compute shader
+ * hangs the GPU.
+ *
+ * Setting this to true will prevent r600_flush_emit() from emitting
+ * a SURFACE_SYNC packet. This field will be cleared by
+ * by r600_context_flush() after flushing the command stream. */
+ boolean skip_surface_sync_on_next_cs_flush;
};
static INLINE void r600_emit_command_buffer(struct radeon_winsys_cs *cs,