radeonsi: clean up compute flush
authorBas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Sat, 2 Apr 2016 09:37:06 +0000 (11:37 +0200)
committerBas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Tue, 19 Apr 2016 16:10:32 +0000 (18:10 +0200)
Signed-off-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
src/gallium/drivers/radeonsi/si_pipe.h
src/gallium/drivers/radeonsi/si_state_draw.c

index c77353fba3204b97025553eef7aa68a1ac6ea122..a28c7d70c5680f75a65072e88624d26126eaa664 100644 (file)
@@ -63,9 +63,6 @@
 #define SI_CONTEXT_CS_PARTIAL_FLUSH    (R600_CONTEXT_PRIVATE_FLAG << 10)
 #define SI_CONTEXT_VGT_FLUSH           (R600_CONTEXT_PRIVATE_FLAG << 11)
 #define SI_CONTEXT_VGT_STREAMOUT_SYNC  (R600_CONTEXT_PRIVATE_FLAG << 12)
-/* Compute only. */
-#define SI_CONTEXT_FLUSH_WITH_INV_L2   (R600_CONTEXT_PRIVATE_FLAG << 13) /* TODO: merge with TC? */
-#define SI_CONTEXT_FLAG_COMPUTE                (R600_CONTEXT_PRIVATE_FLAG << 14)
 
 #define SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER (SI_CONTEXT_FLUSH_AND_INV_CB | \
                                              SI_CONTEXT_FLUSH_AND_INV_CB_META | \
index 232f7a50e4c6d0d5220288486565ca8a5bc05178..15d58d412d3c2cd3a8a6e5c30dd148691c6fb863 100644 (file)
@@ -607,8 +607,6 @@ void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
        struct r600_common_context *sctx = &si_ctx->b;
        struct radeon_winsys_cs *cs = sctx->gfx.cs;
        uint32_t cp_coher_cntl = 0;
-       uint32_t compute =
-               PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
 
        /* SI has a bug that it always flushes ICACHE and KCACHE if either
         * bit is set. An alternative way is to write SQC_CACHES, but that
@@ -645,7 +643,7 @@ void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
 
                /* Necessary for DCC */
                if (sctx->chip_class >= VI) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0) | compute);
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
                        radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS) |
                                        EVENT_INDEX(5));
                        radeon_emit(cs, 0);
@@ -660,22 +658,17 @@ void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
        }
 
        if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
                /* needed for wait for idle in SURFACE_SYNC */
                assert(sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB);
        }
        if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
                /* needed for wait for idle in SURFACE_SYNC */
                assert(sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB);
        }
-       if (sctx->flags & SI_CONTEXT_FLUSH_WITH_INV_L2) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
-               radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
-                               EVENT_WRITE_INV_L2);
-        }
 
        /* Wait for shader engines to go idle.
         * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
@@ -684,25 +677,25 @@ void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
        if (!(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
                             SI_CONTEXT_FLUSH_AND_INV_DB))) {
                if (sctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                        radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
                } else if (sctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                        radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
                }
        }
        if (sctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
        }
 
        /* VGT state synchronization. */
        if (sctx->flags & SI_CONTEXT_VGT_FLUSH) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
        }
        if (sctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
        }