ctx->b.flags = 0;
if (ctx->b.chip_class >= CAYMAN) {
- ctx->skip_surface_sync_on_next_cs_flush = true;
+ cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
+ cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4);
+ /* DEALLOC_STATE prevents the GPU from hanging when a
+ * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT
+ * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set.
+ */
+ cs->buf[cs->cdw++] = PKT3C(PKT3_DEALLOC_STATE, 0, 0);
+ cs->buf[cs->cdw++] = 0;
}
#if 0
#define R600_TEXEL_PITCH_ALIGNMENT_MASK 0x7
#define PKT3_NOP 0x10
+#define PKT3_DEALLOC_STATE 0x14
#define PKT3_DISPATCH_DIRECT 0x15
#define PKT3_DISPATCH_INDIRECT 0x16
#define PKT3_INDIRECT_BUFFER_END 0x17
S_0085F0_SMX_ACTION_ENA(1);
}
- if (cp_coher_cntl && !rctx->skip_surface_sync_on_next_cs_flush) {
+ if (cp_coher_cntl) {
cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
cs->buf[cs->cdw++] = cp_coher_cntl; /* CP_COHER_CNTL */
cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
/* Flush the CS. */
ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++);
-
- ctx->skip_surface_sync_on_next_cs_flush = false;
}
void r600_begin_new_cs(struct r600_context *ctx)
void *sb_context;
struct r600_isa *isa;
-
- /* Work-around for flushing problems with compute shaders on Cayman:
- * Emitting a SURFACE_SYNC packet with any of the CB*_DEST_BASE_ENA
- * or DB_DEST_BASE_ENA bits set after dispatching a compute shader
- * hangs the GPU.
- *
- * Setting this to true will prevent r600_flush_emit() from emitting
- * a SURFACE_SYNC packet. This field will be cleared by
- * by r600_context_flush() after flushing the command stream. */
- boolean skip_surface_sync_on_next_cs_flush;
};
static INLINE void r600_emit_command_buffer(struct radeon_winsys_cs *cs,