r600g: split INVAL_READ_CACHES into vertex, tex, and const cache flags
authorMarek Olšák <maraeo@gmail.com>
Sun, 30 Jun 2013 15:01:24 +0000 (17:01 +0200)
committerMarek Olšák <maraeo@gmail.com>
Mon, 8 Jul 2013 18:25:18 +0000 (20:25 +0200)
also flushing any cache in evergreen_emit_cs_shader seems to be superfluous
(we don't flush caches when changing the other shaders either)

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
src/gallium/drivers/r600/evergreen_compute.c
src/gallium/drivers/r600/evergreen_hw_context.c
src/gallium/drivers/r600/r600_hw_context.c
src/gallium/drivers/r600/r600_pipe.h
src/gallium/drivers/r600/r600_state_common.c

index fd8d788564a52e126b0ed6bb6991b713504fc086..f76fc9cc25e92ee43ff3acb1e06724bfaf082fa7 100644 (file)
@@ -159,7 +159,7 @@ static void evergreen_cs_set_vertex_buffer(
 
        /* The vertex instructions in the compute shaders use the texture cache,
         * so we need to invalidate it. */
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+       rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE;
        state->enabled_mask |= 1 << vb_index;
        state->dirty_mask |= 1 << vb_index;
        state->atom.dirty = true;
@@ -470,7 +470,9 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
 
        /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
         */
-       ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+       ctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+                     R600_CONTEXT_INV_VERTEX_CACHE |
+                     R600_CONTEXT_INV_TEX_CACHE;
        r600_flush_emit(ctx);
 
 #if 0
@@ -519,8 +521,6 @@ void evergreen_emit_cs_shader(
        r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
        r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
                                                        kernel->code_bo, RADEON_USAGE_READ));
-
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
 }
 
 static void evergreen_launch_grid(
index 3c669cfaa4e3a40c8e5056c9aee95cd1a571d3f8..075ab17a557be5aadd8d082d923d24a85496cc33 100644 (file)
@@ -123,7 +123,9 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
 
        /* We flush the caches, because we might read from or write
         * to resources which are bound right now. */
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
+       rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+                      R600_CONTEXT_INV_VERTEX_CACHE |
+                      R600_CONTEXT_INV_TEX_CACHE |
                       R600_CONTEXT_FLUSH_AND_INV |
                       R600_CONTEXT_FLUSH_AND_INV_CB |
                       R600_CONTEXT_FLUSH_AND_INV_DB |
@@ -168,7 +170,9 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
        }
 
        /* Invalidate the read caches. */
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+       rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+                      R600_CONTEXT_INV_VERTEX_CACHE |
+                      R600_CONTEXT_INV_TEX_CACHE;
 
        util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
                       offset + size);
index 120d0fd354abe25989a8fc805ebf35e854220aee..ff36573ab5eaec7ec6e8c73a09ae716701432024 100644 (file)
@@ -220,6 +220,14 @@ void r600_flush_emit(struct r600_context *rctx)
            (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) {
                cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
                cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0);
+
+               /* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
+                *
+                * This hack predates use of FLUSH_AND_INV_DB_META, so it's
+                * unclear whether it's still needed or even whether it has
+                * any effect.
+                */
+               cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1);
        }
 
        if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV) {
@@ -227,11 +235,15 @@ void r600_flush_emit(struct r600_context *rctx)
                cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
        }
 
-       if (rctx->flags & R600_CONTEXT_INVAL_READ_CACHES) {
-               cp_coher_cntl |= S_0085F0_VC_ACTION_ENA(1) |
-                               S_0085F0_TC_ACTION_ENA(1) |
-                               S_0085F0_SH_ACTION_ENA(1) |
-                               S_0085F0_FULL_CACHE_ENA(1);
+       if (rctx->flags & R600_CONTEXT_INV_CONST_CACHE) {
+               cp_coher_cntl |= S_0085F0_SH_ACTION_ENA(1);
+       }
+       if (rctx->flags & R600_CONTEXT_INV_VERTEX_CACHE) {
+               cp_coher_cntl |= rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
+                                                       : S_0085F0_TC_ACTION_ENA(1);
+       }
+       if (rctx->flags & R600_CONTEXT_INV_TEX_CACHE) {
+               cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
        }
 
        if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
@@ -616,7 +628,9 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx,
 
        /* We flush the caches, because we might read from or write
         * to resources which are bound right now. */
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
+       rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+                      R600_CONTEXT_INV_VERTEX_CACHE |
+                      R600_CONTEXT_INV_TEX_CACHE |
                       R600_CONTEXT_FLUSH_AND_INV |
                       R600_CONTEXT_FLUSH_AND_INV_CB |
                       R600_CONTEXT_FLUSH_AND_INV_DB |
@@ -666,7 +680,9 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx,
        }
 
        /* Invalidate the read caches. */
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+       rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+                      R600_CONTEXT_INV_VERTEX_CACHE |
+                      R600_CONTEXT_INV_TEX_CACHE;
 
        util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
                       dst_offset + size);
index 0a244a8aaccd3c99fb63377742a99602af7ad38b..66ea258647de6d637e2c91f5ffdb56699c0e0eaf 100644 (file)
 #define R600_ERR(fmt, args...) \
        fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
 
-#define R600_CONTEXT_INVAL_READ_CACHES         (1 << 0)
-#define R600_CONTEXT_STREAMOUT_FLUSH           (1 << 1)
-#define R600_CONTEXT_WAIT_3D_IDLE              (1 << 2)
-#define R600_CONTEXT_WAIT_CP_DMA_IDLE          (1 << 3)
-#define R600_CONTEXT_FLUSH_AND_INV             (1 << 4)
-#define R600_CONTEXT_FLUSH_AND_INV_CB_META     (1 << 5)
-#define R600_CONTEXT_PS_PARTIAL_FLUSH          (1 << 6)
-#define R600_CONTEXT_FLUSH_AND_INV_DB_META      (1 << 7)
-#define R600_CONTEXT_FLUSH_AND_INV_DB          (1 << 8)
-#define R600_CONTEXT_FLUSH_AND_INV_CB          (1 << 9)
+/* read caches */
+#define R600_CONTEXT_INV_VERTEX_CACHE          (1 << 0)
+#define R600_CONTEXT_INV_TEX_CACHE             (1 << 1)
+#define R600_CONTEXT_INV_CONST_CACHE           (1 << 2)
+/* read-write caches */
+#define R600_CONTEXT_STREAMOUT_FLUSH           (1 << 8)
+#define R600_CONTEXT_FLUSH_AND_INV             (1 << 9)
+#define R600_CONTEXT_FLUSH_AND_INV_CB_META     (1 << 10)
+#define R600_CONTEXT_FLUSH_AND_INV_DB_META     (1 << 11)
+#define R600_CONTEXT_FLUSH_AND_INV_DB          (1 << 12)
+#define R600_CONTEXT_FLUSH_AND_INV_CB          (1 << 13)
+/* engine synchronization */
+#define R600_CONTEXT_PS_PARTIAL_FLUSH          (1 << 16)
+#define R600_CONTEXT_WAIT_3D_IDLE              (1 << 17)
+#define R600_CONTEXT_WAIT_CP_DMA_IDLE          (1 << 18)
 
 #define R600_QUERY_DRAW_CALLS          (PIPE_QUERY_DRIVER_SPECIFIC + 0)
 #define R600_QUERY_REQUESTED_VRAM      (PIPE_QUERY_DRIVER_SPECIFIC + 1)
index d05675eef5d576afd44e8f4b995d0d7f648ae3ab..ea5a4e7627c564cb34db48edfdde064fe23f05a8 100644 (file)
@@ -89,7 +89,7 @@ static void r600_texture_barrier(struct pipe_context *ctx)
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
 
-       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
+       rctx->flags |= R600_CONTEXT_INV_TEX_CACHE |
                       R600_CONTEXT_FLUSH_AND_INV_CB |
                       R600_CONTEXT_FLUSH_AND_INV |
                       R600_CONTEXT_WAIT_3D_IDLE;
@@ -500,7 +500,7 @@ static void r600_set_index_buffer(struct pipe_context *ctx,
 void r600_vertex_buffers_dirty(struct r600_context *rctx)
 {
        if (rctx->vertex_buffer_state.dirty_mask) {
-               rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+               rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE;
                rctx->vertex_buffer_state.atom.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 11) *
                                               util_bitcount(rctx->vertex_buffer_state.dirty_mask);
                rctx->vertex_buffer_state.atom.dirty = true;
@@ -557,7 +557,7 @@ void r600_sampler_views_dirty(struct r600_context *rctx,
                              struct r600_samplerview_state *state)
 {
        if (state->dirty_mask) {
-               rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+               rctx->flags |= R600_CONTEXT_INV_TEX_CACHE;
                state->atom.num_dw = (rctx->chip_class >= EVERGREEN ? 14 : 13) *
                                     util_bitcount(state->dirty_mask);
                state->atom.dirty = true;
@@ -912,7 +912,7 @@ static void r600_delete_vs_state(struct pipe_context *ctx, void *state)
 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
 {
        if (state->dirty_mask) {
-               rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+               rctx->flags |= R600_CONTEXT_INV_CONST_CACHE;
                state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
                                                                   : util_bitcount(state->dirty_mask)*19;
                state->atom.dirty = true;