- struct radeon_cmdbuf *cs = sctx->gfx_cs;
- uint32_t flags = sctx->flags;
-
- if (!sctx->has_graphics) {
- /* Only process compute flags. */
- flags &= SI_CONTEXT_INV_ICACHE |
- SI_CONTEXT_INV_SCACHE |
- SI_CONTEXT_INV_VCACHE |
- SI_CONTEXT_INV_L2 |
- SI_CONTEXT_WB_L2 |
- SI_CONTEXT_INV_L2_METADATA |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
- }
-
- uint32_t cp_coher_cntl = 0;
- const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
- SI_CONTEXT_FLUSH_AND_INV_DB);
- const bool is_barrier = flush_cb_db ||
- /* INV_ICACHE == beginning of gfx IB. Checking
- * INV_ICACHE fixes corruption for DeusExMD with
- * compute-based culling, but I don't know why.
- */
- flags & (SI_CONTEXT_INV_ICACHE |
- SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_VS_PARTIAL_FLUSH) ||
- (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
- sctx->compute_is_busy);
-
- assert(sctx->chip_class <= GFX9);
-
- if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
- sctx->num_cb_cache_flushes++;
- if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
- sctx->num_db_cache_flushes++;
-
- /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
- * bit is set. An alternative way is to write SQC_CACHES, but that
- * doesn't seem to work reliably. Since the bug doesn't affect
- * correctness (it only does more work than necessary) and
- * the performance impact is likely negligible, there is no plan
- * to add a workaround for it.
- */
-
- if (flags & SI_CONTEXT_INV_ICACHE)
- cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
- if (flags & SI_CONTEXT_INV_SCACHE)
- cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
-
- if (sctx->chip_class <= GFX8) {
- if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
- cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
- S_0085F0_CB0_DEST_BASE_ENA(1) |
- S_0085F0_CB1_DEST_BASE_ENA(1) |
- S_0085F0_CB2_DEST_BASE_ENA(1) |
- S_0085F0_CB3_DEST_BASE_ENA(1) |
- S_0085F0_CB4_DEST_BASE_ENA(1) |
- S_0085F0_CB5_DEST_BASE_ENA(1) |
- S_0085F0_CB6_DEST_BASE_ENA(1) |
- S_0085F0_CB7_DEST_BASE_ENA(1);
-
- /* Necessary for DCC */
- if (sctx->chip_class == GFX8)
- si_cp_release_mem(sctx, cs,
- V_028A90_FLUSH_AND_INV_CB_DATA_TS,
- 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
- EOP_DATA_SEL_DISCARD, NULL,
- 0, 0, SI_NOT_QUERY);
- }
- if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
- cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
- S_0085F0_DB_DEST_BASE_ENA(1);
- }
-
- if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
- /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
- }
- if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB |
- SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
- /* Flush HTILE. SURFACE_SYNC will wait for idle. */
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
- }
-
- /* Wait for shader engines to go idle.
- * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
- * for everything including CB/DB cache flushes.
- */
- if (!flush_cb_db) {
- if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- /* Only count explicit shader flushes, not implicit ones
- * done by SURFACE_SYNC.
- */
- sctx->num_vs_flushes++;
- sctx->num_ps_flushes++;
- } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- sctx->num_vs_flushes++;
- }
- }
-
- if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
- sctx->compute_is_busy) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- sctx->num_cs_flushes++;
- sctx->compute_is_busy = false;
- }
-
- /* VGT state synchronization. */
- if (flags & SI_CONTEXT_VGT_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
- }
- if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
- }
-
- /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
- * wait for idle on GFX9. We have to use a TS event.
- */
- if (sctx->chip_class >= GFX9 && flush_cb_db) {
- uint64_t va;
- unsigned tc_flags, cb_db_event;
-
- /* Set the CB/DB flush event. */
- switch (flush_cb_db) {
- case SI_CONTEXT_FLUSH_AND_INV_CB:
- cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
- break;
- case SI_CONTEXT_FLUSH_AND_INV_DB:
- cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
- break;
- default:
- /* both CB & DB */
- cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
- }
-
- /* These are the only allowed combinations. If you need to
- * do multiple operations at once, do them separately.
- * All operations that invalidate L2 also seem to invalidate
- * metadata. Volatile (VOL) and WC flushes are not listed here.
- *
- * TC | TC_WB = writeback & invalidate L2 & L1
- * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
- * TC_WB | TC_NC = writeback L2 for MTYPE == NC
- * TC | TC_NC = invalidate L2 for MTYPE == NC
- * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
- * TCL1 = invalidate L1
- */
- tc_flags = 0;
-
- if (flags & SI_CONTEXT_INV_L2_METADATA) {
- tc_flags = EVENT_TC_ACTION_ENA |
- EVENT_TC_MD_ACTION_ENA;
- }
-
- /* Ideally flush TC together with CB/DB. */
- if (flags & SI_CONTEXT_INV_L2) {
- /* Writeback and invalidate everything in L2 & L1. */
- tc_flags = EVENT_TC_ACTION_ENA |
- EVENT_TC_WB_ACTION_ENA;
-
- /* Clear the flags. */
- flags &= ~(SI_CONTEXT_INV_L2 |
- SI_CONTEXT_WB_L2 |
- SI_CONTEXT_INV_VCACHE);
- sctx->num_L2_invalidates++;
- }
-
- /* Do the flush (enqueue the event and wait for it). */
- va = sctx->wait_mem_scratch->gpu_address;
- sctx->wait_mem_number++;
-
- si_cp_release_mem(sctx, cs, cb_db_event, tc_flags,
- EOP_DST_SEL_MEM,
- EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
- EOP_DATA_SEL_VALUE_32BIT,
- sctx->wait_mem_scratch, va,
- sctx->wait_mem_number, SI_NOT_QUERY);
- si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff,
- WAIT_REG_MEM_EQUAL);
- }
-
- /* Make sure ME is idle (it executes most packets) before continuing.
- * This prevents read-after-write hazards between PFP and ME.
- */
- if (sctx->has_graphics &&
- (cp_coher_cntl ||
- (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_INV_VCACHE |
- SI_CONTEXT_INV_L2 |
- SI_CONTEXT_WB_L2)))) {
- radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
- radeon_emit(cs, 0);
- }
-
- /* GFX6-GFX8 only:
- * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
- * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
- *
- * cp_coher_cntl should contain all necessary flags except TC flags
- * at this point.
- *
- * GFX6-GFX7 don't support L2 write-back.
- */
- if (flags & SI_CONTEXT_INV_L2 ||
- (sctx->chip_class <= GFX7 &&
- (flags & SI_CONTEXT_WB_L2))) {
- /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
- * WB must be set on GFX8+ when TC_ACTION is set.
- */
- si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
- S_0085F0_TC_ACTION_ENA(1) |
- S_0085F0_TCL1_ACTION_ENA(1) |
- S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
- cp_coher_cntl = 0;
- sctx->num_L2_invalidates++;
- } else {
- /* L1 invalidation and L2 writeback must be done separately,
- * because both operations can't be done together.
- */
- if (flags & SI_CONTEXT_WB_L2) {
- /* WB = write-back
- * NC = apply to non-coherent MTYPEs
- * (i.e. MTYPE <= 1, which is what we use everywhere)
- *
- * WB doesn't work without NC.
- */
- si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
- S_0301F0_TC_WB_ACTION_ENA(1) |
- S_0301F0_TC_NC_ACTION_ENA(1));
- cp_coher_cntl = 0;
- sctx->num_L2_writebacks++;
- }
- if (flags & SI_CONTEXT_INV_VCACHE) {
- /* Invalidate per-CU VMEM L1. */
- si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
- S_0085F0_TCL1_ACTION_ENA(1));
- cp_coher_cntl = 0;
- }
- }
-
- /* If TC flushes haven't cleared this... */
- if (cp_coher_cntl)
- si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
-
- if (is_barrier)
- si_prim_discard_signal_next_compute_ib_start(sctx);
-
- if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
- EVENT_INDEX(0));
- } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
- EVENT_INDEX(0));
- }
-
- sctx->flags = 0;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ uint32_t flags = sctx->flags;
+
+ if (!sctx->has_graphics) {
+ /* Only process compute flags. */
+ flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ }
+
+ uint32_t cp_coher_cntl = 0;
+ const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
+ const bool is_barrier =
+ flush_cb_db ||
+ /* INV_ICACHE == beginning of gfx IB. Checking
+ * INV_ICACHE fixes corruption for DeusExMD with
+ * compute-based culling, but I don't know why.
+ */
+ flags & (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_VS_PARTIAL_FLUSH) ||
+ (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy);
+
+ assert(sctx->chip_class <= GFX9);
+
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
+ sctx->num_cb_cache_flushes++;
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
+ sctx->num_db_cache_flushes++;
+
+ /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
+ * bit is set. An alternative way is to write SQC_CACHES, but that
+ * doesn't seem to work reliably. Since the bug doesn't affect
+ * correctness (it only does more work than necessary) and
+ * the performance impact is likely negligible, there is no plan
+ * to add a workaround for it.
+ */
+
+ if (flags & SI_CONTEXT_INV_ICACHE)
+ cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
+ if (flags & SI_CONTEXT_INV_SCACHE)
+ cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
+
+ if (sctx->chip_class <= GFX8) {
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
+ S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
+ S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
+ S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
+ S_0085F0_CB7_DEST_BASE_ENA(1);
+
+ /* Necessary for DCC */
+ if (sctx->chip_class == GFX8)
+ si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
+ EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
+ }
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
+ cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
+ }
+
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
+ }
+ if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
+ /* Flush HTILE. SURFACE_SYNC will wait for idle. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
+ }
+
+ /* Wait for shader engines to go idle.
+ * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
+ * for everything including CB/DB cache flushes.
+ */
+ if (!flush_cb_db) {
+ if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ /* Only count explicit shader flushes, not implicit ones
+ * done by SURFACE_SYNC.
+ */
+ sctx->num_vs_flushes++;
+ sctx->num_ps_flushes++;
+ } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ sctx->num_vs_flushes++;
+ }
+ }
+
+ if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ sctx->num_cs_flushes++;
+ sctx->compute_is_busy = false;
+ }
+
+ /* VGT state synchronization. */
+ if (flags & SI_CONTEXT_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+ if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
+ }
+
+ /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
+ * wait for idle on GFX9. We have to use a TS event.
+ */
+ if (sctx->chip_class == GFX9 && flush_cb_db) {
+ uint64_t va;
+ unsigned tc_flags, cb_db_event;
+
+ /* Set the CB/DB flush event. */
+ switch (flush_cb_db) {
+ case SI_CONTEXT_FLUSH_AND_INV_CB:
+ cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
+ break;
+ case SI_CONTEXT_FLUSH_AND_INV_DB:
+ cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
+ break;
+ default:
+ /* both CB & DB */
+ cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+ }
+
+ /* These are the only allowed combinations. If you need to
+ * do multiple operations at once, do them separately.
+ * All operations that invalidate L2 also seem to invalidate
+ * metadata. Volatile (VOL) and WC flushes are not listed here.
+ *
+ * TC | TC_WB = writeback & invalidate L2 & L1
+ * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
+ * TC_WB | TC_NC = writeback L2 for MTYPE == NC
+ * TC | TC_NC = invalidate L2 for MTYPE == NC
+ * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
+ * TCL1 = invalidate L1
+ */
+ tc_flags = 0;
+
+ if (flags & SI_CONTEXT_INV_L2_METADATA) {
+ tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
+ }
+
+ /* Ideally flush TC together with CB/DB. */
+ if (flags & SI_CONTEXT_INV_L2) {
+ /* Writeback and invalidate everything in L2 & L1. */
+ tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
+
+ /* Clear the flags. */
+ flags &= ~(SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_VCACHE);
+ sctx->num_L2_invalidates++;
+ }
+
+ /* Do the flush (enqueue the event and wait for it). */
+ va = sctx->wait_mem_scratch->gpu_address;
+ sctx->wait_mem_number++;
+
+ si_cp_release_mem(sctx, cs, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM, EOP_DATA_SEL_VALUE_32BIT,
+ sctx->wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY);
+ si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
+ }
+
+ /* Make sure ME is idle (it executes most packets) before continuing.
+ * This prevents read-after-write hazards between PFP and ME.
+ */
+ if (sctx->has_graphics &&
+ (cp_coher_cntl || (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH | SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2)))) {
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ }
+
+ /* GFX6-GFX8 only:
+ * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
+ * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
+ *
+ * cp_coher_cntl should contain all necessary flags except TC flags
+ * at this point.
+ *
+ * GFX6-GFX7 don't support L2 write-back.
+ */
+ if (flags & SI_CONTEXT_INV_L2 || (sctx->chip_class <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
+ /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
+ * WB must be set on GFX8+ when TC_ACTION is set.
+ */
+ si_emit_surface_sync(sctx, sctx->gfx_cs,
+ cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
+ S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
+ cp_coher_cntl = 0;
+ sctx->num_L2_invalidates++;
+ } else {
+ /* L1 invalidation and L2 writeback must be done separately,
+ * because both operations can't be done together.
+ */
+ if (flags & SI_CONTEXT_WB_L2) {
+ /* WB = write-back
+ * NC = apply to non-coherent MTYPEs
+ * (i.e. MTYPE <= 1, which is what we use everywhere)
+ *
+ * WB doesn't work without NC.
+ */
+ si_emit_surface_sync(
+ sctx, sctx->gfx_cs,
+ cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
+ cp_coher_cntl = 0;
+ sctx->num_L2_writebacks++;
+ }
+ if (flags & SI_CONTEXT_INV_VCACHE) {
+ /* Invalidate per-CU VMEM L1. */
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
+ cp_coher_cntl = 0;
+ }
+ }
+
+ /* If TC flushes haven't cleared this... */
+ if (cp_coher_cntl)
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
+
+ if (is_barrier)
+ si_prim_discard_signal_next_compute_ib_start(sctx);
+
+ if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
+ } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
+ }
+
+ sctx->flags = 0;