+/**
+ * Write an EOP event.
+ *
+ * \param event EVENT_TYPE_*
+ * \param event_flags Optional cache flush flags (TC)
+ * \param dst_sel MEM or TC_L2
+ * \param int_sel NONE or SEND_DATA_AFTER_WR_CONFIRM
+ * \param data_sel DISCARD, VALUE_32BIT, TIMESTAMP, or GDS
+ * \param buf Buffer
+ * \param va GPU address
+ * \param old_value Previous fence value (for a bug workaround)
+ * \param new_value Fence value to write for this event.
+ */
+void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
+ unsigned event, unsigned event_flags,
+ unsigned dst_sel, unsigned int_sel, unsigned data_sel,
+ struct si_resource *buf, uint64_t va,
+ uint32_t new_fence, unsigned query_type)
+{
+ unsigned op = EVENT_TYPE(event) |
+ EVENT_INDEX(event == V_028A90_CS_DONE ||
+ event == V_028A90_PS_DONE ? 6 : 5) |
+ event_flags;
+ unsigned sel = EOP_DST_SEL(dst_sel) |
+ EOP_INT_SEL(int_sel) |
+ EOP_DATA_SEL(data_sel);
+ bool compute_ib = !ctx->has_graphics ||
+ cs == ctx->prim_discard_compute_cs;
+
+ if (ctx->chip_class >= GFX9 ||
+ (compute_ib && ctx->chip_class >= GFX7)) {
+ /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
+ * counters) must immediately precede every timestamp event to
+ * prevent a GPU hang on GFX9.
+ *
+ * Occlusion queries don't need to do it here, because they
+ * always do ZPASS_DONE before the timestamp.
+ */
+ if (ctx->chip_class == GFX9 && !compute_ib &&
+ query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
+ query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
+ query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
+ struct si_resource *scratch = ctx->eop_bug_scratch;
+
+ assert(16 * ctx->screen->info.num_render_backends <=
+ scratch->b.b.width0);
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
+ radeon_emit(cs, scratch->gpu_address);
+ radeon_emit(cs, scratch->gpu_address >> 32);
+
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+ RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, ctx->chip_class >= GFX9 ? 6 : 5, 0));
+ radeon_emit(cs, op);
+ radeon_emit(cs, sel);
+ radeon_emit(cs, va); /* address lo */
+ radeon_emit(cs, va >> 32); /* address hi */
+ radeon_emit(cs, new_fence); /* immediate data lo */
+ radeon_emit(cs, 0); /* immediate data hi */
+ if (ctx->chip_class >= GFX9)
+ radeon_emit(cs, 0); /* unused */
+ } else {
+ if (ctx->chip_class == GFX7 ||
+ ctx->chip_class == GFX8) {
+ struct si_resource *scratch = ctx->eop_bug_scratch;
+ uint64_t va = scratch->gpu_address;
+
+ /* Two EOP events are required to make all engines go idle
+ * (and optional cache flushes executed) before the timestamp
+ * is written.
+ */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+ radeon_emit(cs, op);
+ radeon_emit(cs, va);
+ radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
+ radeon_emit(cs, 0); /* immediate data */
+ radeon_emit(cs, 0); /* unused */
+
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+ RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+ radeon_emit(cs, op);
+ radeon_emit(cs, va);
+ radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
+ radeon_emit(cs, new_fence); /* immediate data */
+ radeon_emit(cs, 0); /* unused */
+ }
+
+ if (buf) {
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE,
+ RADEON_PRIO_QUERY);
+ }
+}
+
+unsigned si_cp_write_fence_dwords(struct si_screen *screen)
+{
+ unsigned dwords = 6;
+
+ if (screen->info.chip_class == GFX7 ||
+ screen->info.chip_class == GFX8)
+ dwords *= 2;
+
+ return dwords;
+}
+
+void si_cp_wait_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
+ uint64_t va, uint32_t ref, uint32_t mask, unsigned flags)
+{
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_MEM_SPACE(1) | flags);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, ref); /* reference value */
+ radeon_emit(cs, mask); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
+}
+
+static void si_add_fence_dependency(struct si_context *sctx,