radeonsi: import r600_streamout from drivers/radeon
authorMarek Olšák <marek.olsak@amd.com>
Sat, 7 Oct 2017 20:54:31 +0000 (22:54 +0200)
committerMarek Olšák <marek.olsak@amd.com>
Mon, 9 Oct 2017 14:26:55 +0000 (16:26 +0200)
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
16 files changed:
src/gallium/drivers/radeon/Makefile.sources
src/gallium/drivers/radeon/r600_pipe_common.c
src/gallium/drivers/radeon/r600_pipe_common.h
src/gallium/drivers/radeon/r600_query.c
src/gallium/drivers/radeon/r600_streamout.c [deleted file]
src/gallium/drivers/radeonsi/Makefile.sources
src/gallium/drivers/radeonsi/si_blit.c
src/gallium/drivers/radeonsi/si_descriptors.c
src/gallium/drivers/radeonsi/si_hw_context.c
src/gallium/drivers/radeonsi/si_pipe.c
src/gallium/drivers/radeonsi/si_pipe.h
src/gallium/drivers/radeonsi/si_state.c
src/gallium/drivers/radeonsi/si_state.h
src/gallium/drivers/radeonsi/si_state_draw.c
src/gallium/drivers/radeonsi/si_state_shaders.c
src/gallium/drivers/radeonsi/si_state_streamout.c [new file with mode: 0644]

index 5d38bb36b44b895e7b7778c3f798b3ea15a3d6a7..c32ebea95741c4fb60089292edc1635cc49af99c 100644 (file)
@@ -8,7 +8,6 @@ C_SOURCES := \
        r600_pipe_common.h \
        r600_query.c \
        r600_query.h \
-       r600_streamout.c \
        r600_test_dma.c \
        r600_texture.c \
        radeon_uvd.c \
index da615802262972c0e6c609f638757873ce4e1815..a6008a17d9c47f6d56d2b3b93093f7feae1db4d6 100644 (file)
@@ -296,21 +296,10 @@ void si_preflush_suspend_features(struct r600_common_context *ctx)
        /* suspend queries */
        if (!LIST_IS_EMPTY(&ctx->active_queries))
                si_suspend_queries(ctx);
-
-       ctx->streamout.suspended = false;
-       if (ctx->streamout.begin_emitted) {
-               si_emit_streamout_end(ctx);
-               ctx->streamout.suspended = true;
-       }
 }
 
 void si_postflush_resume_features(struct r600_common_context *ctx)
 {
-       if (ctx->streamout.suspended) {
-               ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
-               si_streamout_buffers_dirty(ctx);
-       }
-
        /* resume queries */
        if (!LIST_IS_EMPTY(&ctx->active_queries))
                si_resume_queries(ctx);
@@ -647,7 +636,6 @@ bool si_common_context_init(struct r600_common_context *rctx,
        rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
 
        si_init_context_texture_functions(rctx);
-       si_streamout_init(rctx);
        si_init_query_functions(rctx);
        si_init_msaa(&rctx->b);
 
index e3cb1cfb106b7b30f21e87c93869e2f4743bc6e0..b620e6bfff50345bfd2dbccae3c5ab0f580961da 100644 (file)
@@ -497,43 +497,6 @@ struct r600_atom {
        unsigned short          id;
 };
 
-struct r600_so_target {
-       struct pipe_stream_output_target b;
-
-       /* The buffer where BUFFER_FILLED_SIZE is stored. */
-       struct r600_resource    *buf_filled_size;
-       unsigned                buf_filled_size_offset;
-       bool                    buf_filled_size_valid;
-
-       unsigned                stride_in_dw;
-};
-
-struct r600_streamout {
-       struct r600_atom                begin_atom;
-       bool                            begin_emitted;
-
-       unsigned                        enabled_mask;
-       unsigned                        num_targets;
-       struct r600_so_target           *targets[PIPE_MAX_SO_BUFFERS];
-
-       unsigned                        append_bitmask;
-       bool                            suspended;
-
-       /* External state which comes from the vertex shader,
-        * it must be set explicitly when binding a shader. */
-       uint16_t                        *stride_in_dw;
-       unsigned                        enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
-
-       /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
-       unsigned                        hw_enabled_mask;
-
-       /* The state of VGT_STRMOUT_(CONFIG|EN). */
-       struct r600_atom                enable_atom;
-       bool                            streamout_enabled;
-       bool                            prims_gen_query_enabled;
-       int                             num_prims_gen_queries;
-};
-
 struct r600_ring {
        struct radeon_winsys_cs         *cs;
        void (*flush)(void *ctx, unsigned flags,
@@ -578,9 +541,6 @@ struct r600_common_context {
        uint64_t                        vram;
        uint64_t                        gtt;
 
-       /* States. */
-       struct r600_streamout           streamout;
-
        /* Additional context states. */
        unsigned flags; /* flush flags */
 
@@ -790,17 +750,6 @@ void si_init_query_functions(struct r600_common_context *rctx);
 void si_suspend_queries(struct r600_common_context *ctx);
 void si_resume_queries(struct r600_common_context *ctx);
 
-/* r600_streamout.c */
-void si_streamout_buffers_dirty(struct r600_common_context *rctx);
-void si_common_set_streamout_targets(struct pipe_context *ctx,
-                                    unsigned num_targets,
-                                    struct pipe_stream_output_target **targets,
-                                    const unsigned *offset);
-void si_emit_streamout_end(struct r600_common_context *rctx);
-void si_update_prims_generated_query_state(struct r600_common_context *rctx,
-                                          unsigned type, int diff);
-void si_streamout_init(struct r600_common_context *rctx);
-
 /* r600_test_dma.c */
 void si_test_dma(struct r600_common_screen *rscreen);
 
@@ -900,12 +849,6 @@ r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r
        }
 }
 
-static inline bool r600_get_strmout_en(struct r600_common_context *rctx)
-{
-       return rctx->streamout.streamout_enabled ||
-              rctx->streamout.prims_gen_query_enabled;
-}
-
 #define     SQ_TEX_XY_FILTER_POINT                         0x00
 #define     SQ_TEX_XY_FILTER_BILINEAR                      0x01
 #define     SQ_TEX_XY_FILTER_ANISO_POINT                   0x02
index adf3522ebb867e99c447792f4c3d64f4518d0c58..3abfe1ebdb8d903ae803c5a4d55264e10bc6ad0b 100644 (file)
 #include "os/os_time.h"
 #include "tgsi/tgsi_text.h"
 
+/* TODO: remove this: */
+void si_update_prims_generated_query_state(struct r600_common_context *rctx,
+                                          unsigned type, int diff);
+
 #define R600_MAX_STREAMS 4
 
 struct r600_hw_query_params {
diff --git a/src/gallium/drivers/radeon/r600_streamout.c b/src/gallium/drivers/radeon/r600_streamout.c
deleted file mode 100644 (file)
index 5c14b1b..0000000
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors: Marek Olšák <maraeo@gmail.com>
- *
- */
-
-#include "r600_pipe_common.h"
-#include "r600_cs.h"
-
-#include "util/u_memory.h"
-
-static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable);
-
-static struct pipe_stream_output_target *
-r600_create_so_target(struct pipe_context *ctx,
-                     struct pipe_resource *buffer,
-                     unsigned buffer_offset,
-                     unsigned buffer_size)
-{
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       struct r600_so_target *t;
-       struct r600_resource *rbuffer = (struct r600_resource*)buffer;
-
-       t = CALLOC_STRUCT(r600_so_target);
-       if (!t) {
-               return NULL;
-       }
-
-       u_suballocator_alloc(rctx->allocator_zeroed_memory, 4, 4,
-                            &t->buf_filled_size_offset,
-                            (struct pipe_resource**)&t->buf_filled_size);
-       if (!t->buf_filled_size) {
-               FREE(t);
-               return NULL;
-       }
-
-       t->b.reference.count = 1;
-       t->b.context = ctx;
-       pipe_resource_reference(&t->b.buffer, buffer);
-       t->b.buffer_offset = buffer_offset;
-       t->b.buffer_size = buffer_size;
-
-       util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
-                      buffer_offset + buffer_size);
-       return &t->b;
-}
-
-static void r600_so_target_destroy(struct pipe_context *ctx,
-                                  struct pipe_stream_output_target *target)
-{
-       struct r600_so_target *t = (struct r600_so_target*)target;
-       pipe_resource_reference(&t->b.buffer, NULL);
-       r600_resource_reference(&t->buf_filled_size, NULL);
-       FREE(t);
-}
-
-void si_streamout_buffers_dirty(struct r600_common_context *rctx)
-{
-       if (!rctx->streamout.enabled_mask)
-               return;
-
-       rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, true);
-       r600_set_streamout_enable(rctx, true);
-}
-
-void si_common_set_streamout_targets(struct pipe_context *ctx,
-                                    unsigned num_targets,
-                                    struct pipe_stream_output_target **targets,
-                                    const unsigned *offsets)
-{
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       unsigned i;
-        unsigned enabled_mask = 0, append_bitmask = 0;
-
-       /* Stop streamout. */
-       if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) {
-               si_emit_streamout_end(rctx);
-       }
-
-       /* Set the new targets. */
-       for (i = 0; i < num_targets; i++) {
-               pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]);
-               if (!targets[i])
-                       continue;
-
-               r600_context_add_resource_size(ctx, targets[i]->buffer);
-               enabled_mask |= 1 << i;
-               if (offsets[i] == ((unsigned)-1))
-                       append_bitmask |= 1 << i;
-       }
-       for (; i < rctx->streamout.num_targets; i++) {
-               pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL);
-       }
-
-       rctx->streamout.enabled_mask = enabled_mask;
-
-       rctx->streamout.num_targets = num_targets;
-       rctx->streamout.append_bitmask = append_bitmask;
-
-       if (num_targets) {
-               si_streamout_buffers_dirty(rctx);
-       } else {
-               rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, false);
-               r600_set_streamout_enable(rctx, false);
-       }
-}
-
-static void r600_flush_vgt_streamout(struct r600_common_context *rctx)
-{
-       struct radeon_winsys_cs *cs = rctx->gfx.cs;
-       unsigned reg_strmout_cntl;
-
-       /* The register is at different places on different ASICs. */
-       if (rctx->chip_class >= CIK) {
-               reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
-               radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
-       } else {
-               reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
-               radeon_set_config_reg(cs, reg_strmout_cntl, 0);
-       }
-
-       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
-       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
-
-       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
-       radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
-       radeon_emit(cs, reg_strmout_cntl >> 2);  /* register */
-       radeon_emit(cs, 0);
-       radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */
-       radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */
-       radeon_emit(cs, 4); /* poll interval */
-}
-
-static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
-{
-       struct radeon_winsys_cs *cs = rctx->gfx.cs;
-       struct r600_so_target **t = rctx->streamout.targets;
-       uint16_t *stride_in_dw = rctx->streamout.stride_in_dw;
-       unsigned i;
-
-       r600_flush_vgt_streamout(rctx);
-
-       for (i = 0; i < rctx->streamout.num_targets; i++) {
-               if (!t[i])
-                       continue;
-
-               t[i]->stride_in_dw = stride_in_dw[i];
-
-               /* SI binds streamout buffers as shader resources.
-                * VGT only counts primitives and tells the shader
-                * through SGPRs what to do. */
-               radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
-               radeon_emit(cs, (t[i]->b.buffer_offset +
-                                t[i]->b.buffer_size) >> 2);    /* BUFFER_SIZE (in DW) */
-               radeon_emit(cs, stride_in_dw[i]);               /* VTX_STRIDE (in DW) */
-
-               if (rctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
-                       uint64_t va = t[i]->buf_filled_size->gpu_address +
-                                     t[i]->buf_filled_size_offset;
-
-                       /* Append. */
-                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
-                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
-                                   STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
-                       radeon_emit(cs, 0); /* unused */
-                       radeon_emit(cs, 0); /* unused */
-                       radeon_emit(cs, va); /* src address lo */
-                       radeon_emit(cs, va >> 32); /* src address hi */
-
-                       r600_emit_reloc(rctx,  &rctx->gfx, t[i]->buf_filled_size,
-                                       RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
-               } else {
-                       /* Start from the beginning. */
-                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
-                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
-                                   STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
-                       radeon_emit(cs, 0); /* unused */
-                       radeon_emit(cs, 0); /* unused */
-                       radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
-                       radeon_emit(cs, 0); /* unused */
-               }
-       }
-
-       rctx->streamout.begin_emitted = true;
-}
-
-void si_emit_streamout_end(struct r600_common_context *rctx)
-{
-       struct radeon_winsys_cs *cs = rctx->gfx.cs;
-       struct r600_so_target **t = rctx->streamout.targets;
-       unsigned i;
-       uint64_t va;
-
-       r600_flush_vgt_streamout(rctx);
-
-       for (i = 0; i < rctx->streamout.num_targets; i++) {
-               if (!t[i])
-                       continue;
-
-               va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
-               radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
-               radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
-                           STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
-                           STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
-               radeon_emit(cs, va);     /* dst address lo */
-               radeon_emit(cs, va >> 32); /* dst address hi */
-               radeon_emit(cs, 0); /* unused */
-               radeon_emit(cs, 0); /* unused */
-
-               r600_emit_reloc(rctx,  &rctx->gfx, t[i]->buf_filled_size,
-                               RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
-
-               /* Zero the buffer size. The counters (primitives generated,
-                * primitives emitted) may be enabled even if there is not
-                * buffer bound. This ensures that the primitives-emitted query
-                * won't increment. */
-               radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
-
-               t[i]->buf_filled_size_valid = true;
-       }
-
-       rctx->streamout.begin_emitted = false;
-       rctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
-}
-
-/* STREAMOUT CONFIG DERIVED STATE
- *
- * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
- * The buffer mask is an independent state, so no writes occur if there
- * are no buffers bound.
- */
-
-static void r600_emit_streamout_enable(struct r600_common_context *rctx,
-                                      struct r600_atom *atom)
-{
-       radeon_set_context_reg_seq(rctx->gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
-       radeon_emit(rctx->gfx.cs,
-                   S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx)) |
-                   S_028B94_RAST_STREAM(0) |
-                   S_028B94_STREAMOUT_1_EN(r600_get_strmout_en(rctx)) |
-                   S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) |
-                   S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx)));
-       radeon_emit(rctx->gfx.cs,
-                   rctx->streamout.hw_enabled_mask &
-                   rctx->streamout.enabled_stream_buffers_mask);
-}
-
-static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
-{
-       bool old_strmout_en = r600_get_strmout_en(rctx);
-       unsigned old_hw_enabled_mask = rctx->streamout.hw_enabled_mask;
-
-       rctx->streamout.streamout_enabled = enable;
-
-       rctx->streamout.hw_enabled_mask = rctx->streamout.enabled_mask |
-                                         (rctx->streamout.enabled_mask << 4) |
-                                         (rctx->streamout.enabled_mask << 8) |
-                                         (rctx->streamout.enabled_mask << 12);
-
-       if ((old_strmout_en != r600_get_strmout_en(rctx)) ||
-            (old_hw_enabled_mask != rctx->streamout.hw_enabled_mask)) {
-               rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true);
-       }
-}
-
-void si_update_prims_generated_query_state(struct r600_common_context *rctx,
-                                          unsigned type, int diff)
-{
-       if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
-               bool old_strmout_en = r600_get_strmout_en(rctx);
-
-               rctx->streamout.num_prims_gen_queries += diff;
-               assert(rctx->streamout.num_prims_gen_queries >= 0);
-
-               rctx->streamout.prims_gen_query_enabled =
-                       rctx->streamout.num_prims_gen_queries != 0;
-
-               if (old_strmout_en != r600_get_strmout_en(rctx)) {
-                       rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true);
-               }
-       }
-}
-
-void si_streamout_init(struct r600_common_context *rctx)
-{
-       rctx->b.create_stream_output_target = r600_create_so_target;
-       rctx->b.stream_output_target_destroy = r600_so_target_destroy;
-       rctx->streamout.begin_atom.emit = r600_emit_streamout_begin;
-       rctx->streamout.enable_atom.emit = r600_emit_streamout_enable;
-}
index ed3e52046caa0ca1a65fdbf64e569b7477953207..63cd7a30978e89be178894e35d96e648cdc337c5 100644 (file)
@@ -30,6 +30,7 @@ C_SOURCES := \
        si_state_binning.c \
        si_state_draw.c \
        si_state_shaders.c \
+       si_state_streamout.c \
        si_state_viewport.c \
        si_state.h \
        si_uvd.c
index 4806e7c9415ad7c2aea7feb045f596184a210e4b..03aa4f7737f1290c13ad43e6c82c9c9dd9e9a39c 100644 (file)
@@ -58,8 +58,8 @@ static void si_blitter_begin(struct pipe_context *ctx, enum si_blitter_op op)
        util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso);
        util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso);
        util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso);
-       util_blitter_save_so_targets(sctx->blitter, sctx->b.streamout.num_targets,
-                                    (struct pipe_stream_output_target**)sctx->b.streamout.targets);
+       util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets,
+                                    (struct pipe_stream_output_target**)sctx->streamout.targets);
        util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
 
        if (op & SI_SAVE_FRAGMENT_STATE) {
index dee8e7138ff49f8de592ce03c8c66ddf570e76b0..dd1f1e91b81dbdfe0effebfd2662de3f6cd188ad 100644 (file)
@@ -1373,11 +1373,11 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
        struct si_context *sctx = (struct si_context *)ctx;
        struct si_buffer_resources *buffers = &sctx->rw_buffers;
        struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
-       unsigned old_num_targets = sctx->b.streamout.num_targets;
+       unsigned old_num_targets = sctx->streamout.num_targets;
        unsigned i, bufidx;
 
        /* We are going to unbind the buffers. Mark which caches need to be flushed. */
-       if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
+       if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
                /* Since streamout uses vector writes which go through TC L2
                 * and most other clients can use TC L2 as well, we don't need
                 * to flush it.
@@ -1387,9 +1387,9 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
                 * cases. Thus, flag the TC L2 dirtiness in the resource and
                 * handle it at draw call time.
                 */
-               for (i = 0; i < sctx->b.streamout.num_targets; i++)
-                       if (sctx->b.streamout.targets[i])
-                               r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
+               for (i = 0; i < sctx->streamout.num_targets; i++)
+                       if (sctx->streamout.targets[i])
+                               r600_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
 
                /* Invalidate the scalar cache in case a streamout buffer is
                 * going to be used as a constant buffer.
@@ -1650,11 +1650,11 @@ static void si_rebind_buffer(struct pipe_context *ctx, struct pipe_resource *buf
                                                            true);
 
                        /* Update the streamout state. */
-                       if (sctx->b.streamout.begin_emitted)
-                               si_emit_streamout_end(&sctx->b);
-                       sctx->b.streamout.append_bitmask =
-                                       sctx->b.streamout.enabled_mask;
-                       si_streamout_buffers_dirty(&sctx->b);
+                       if (sctx->streamout.begin_emitted)
+                               si_emit_streamout_end(sctx);
+                       sctx->streamout.append_bitmask =
+                                       sctx->streamout.enabled_mask;
+                       si_streamout_buffers_dirty(sctx);
                }
        }
 
index 72da54e5b4e2d61523e8d75f56ba9c1cdd939cc8..317b50c8aa888792787dd0eef7aa35f5dcb99ef7 100644 (file)
@@ -100,6 +100,12 @@ void si_context_gfx_flush(void *context, unsigned flags,
 
        si_preflush_suspend_features(&ctx->b);
 
+       ctx->streamout.suspended = false;
+       if (ctx->streamout.begin_emitted) {
+               si_emit_streamout_end(ctx);
+               ctx->streamout.suspended = true;
+       }
+
        ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
                        SI_CONTEXT_PS_PARTIAL_FLUSH;
 
@@ -243,7 +249,7 @@ void si_begin_new_cs(struct si_context *ctx)
                si_mark_atom_dirty(ctx, &ctx->dpbb_state);
        si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
        si_mark_atom_dirty(ctx, &ctx->spi_map);
-       si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
+       si_mark_atom_dirty(ctx, &ctx->streamout.enable_atom);
        si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
        si_all_descriptors_begin_new_cs(ctx);
        si_all_resident_buffers_begin_new_cs(ctx);
@@ -260,6 +266,11 @@ void si_begin_new_cs(struct si_context *ctx)
                                               &ctx->scratch_buffer->b.b);
        }
 
+       if (ctx->streamout.suspended) {
+               ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
+               si_streamout_buffers_dirty(ctx);
+       }
+
        si_postflush_resume_features(&ctx->b);
 
        assert(!ctx->b.gfx.cs->prev_dw);
index d0b90e732ad78fe33c92fa8931c5d81e32dfea4e..b9840ad8e31d1ef45f8003afc96c35c63490aa4b 100644 (file)
@@ -205,6 +205,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
        si_init_compute_functions(sctx);
        si_init_cp_dma_functions(sctx);
        si_init_debug_functions(sctx);
+       si_init_streamout_functions(sctx);
 
        if (sscreen->b.info.has_hw_decode) {
                sctx->b.b.create_video_codec = si_uvd_create_decoder;
index cf36100dc9f01678906f8497dbf941e9ba61c726..4e54b7ef16032a8c0ea60e4821f60aa7b840ea8b 100644 (file)
@@ -255,6 +255,43 @@ struct si_sample_mask {
        uint16_t                sample_mask;
 };
 
+struct si_streamout_target {
+       struct pipe_stream_output_target b;
+
+       /* The buffer where BUFFER_FILLED_SIZE is stored. */
+       struct r600_resource    *buf_filled_size;
+       unsigned                buf_filled_size_offset;
+       bool                    buf_filled_size_valid;
+
+       unsigned                stride_in_dw;
+};
+
+struct si_streamout {
+       struct r600_atom                begin_atom;
+       bool                            begin_emitted;
+
+       unsigned                        enabled_mask;
+       unsigned                        num_targets;
+       struct si_streamout_target      *targets[PIPE_MAX_SO_BUFFERS];
+
+       unsigned                        append_bitmask;
+       bool                            suspended;
+
+       /* External state which comes from the vertex shader,
+        * it must be set explicitly when binding a shader. */
+       uint16_t                        *stride_in_dw;
+       unsigned                        enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
+
+       /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
+       unsigned                        hw_enabled_mask;
+
+       /* The state of VGT_STRMOUT_(CONFIG|EN). */
+       struct r600_atom                enable_atom;
+       bool                            streamout_enabled;
+       bool                            prims_gen_query_enabled;
+       int                             num_prims_gen_queries;
+};
+
 /* A shader state consists of the shader selector, which is a constant state
  * object shared by multiple contexts and shouldn't be modified, and
  * the current shader variant selected for this context.
@@ -359,6 +396,7 @@ struct si_context {
        struct si_stencil_ref           stencil_ref;
        struct r600_atom                spi_map;
        struct si_scissors              scissors;
+       struct si_streamout             streamout;
        struct si_viewports             viewports;
 
        /* Precomputed states. */
@@ -644,6 +682,12 @@ static inline struct si_shader* si_get_vs_state(struct si_context *sctx)
        return vs->current ? vs->current : NULL;
 }
 
+static inline bool si_get_strmout_en(struct si_context *sctx)
+{
+       return sctx->streamout.streamout_enabled ||
+              sctx->streamout.prims_gen_query_enabled;
+}
+
 static inline unsigned
 si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
 {
index 99c3ca36886d493736fbd98b0b13b51b3ae52c69..82f3962a6cb5b3473e4297295139a44c9ccd5f72 100644 (file)
@@ -4407,8 +4407,8 @@ static void si_init_config(struct si_context *sctx);
 void si_init_state_functions(struct si_context *sctx)
 {
        si_init_external_atom(sctx, &sctx->b.render_cond_atom, &sctx->atoms.s.render_cond);
-       si_init_external_atom(sctx, &sctx->b.streamout.begin_atom, &sctx->atoms.s.streamout_begin);
-       si_init_external_atom(sctx, &sctx->b.streamout.enable_atom, &sctx->atoms.s.streamout_enable);
+       si_init_external_atom(sctx, &sctx->streamout.begin_atom, &sctx->atoms.s.streamout_begin);
+       si_init_external_atom(sctx, &sctx->streamout.enable_atom, &sctx->atoms.s.streamout_enable);
        si_init_external_atom(sctx, &sctx->scissors.atom, &sctx->atoms.s.scissors);
        si_init_external_atom(sctx, &sctx->viewports.atom, &sctx->atoms.s.viewports);
 
index 03e2a174d21d6d41a2f7306559d70c348dbf5819..9d29878e3097b20e2dad00f529d5f83946668585 100644 (file)
@@ -423,6 +423,17 @@ void si_draw_rectangle(struct blitter_context *blitter,
                       const union blitter_attrib *attrib);
 void si_trace_emit(struct si_context *sctx);
 
+/* si_state_streamout.c */
+void si_streamout_buffers_dirty(struct si_context *sctx);
+void si_common_set_streamout_targets(struct pipe_context *ctx,
+                                    unsigned num_targets,
+                                    struct pipe_stream_output_target **targets,
+                                    const unsigned *offset);
+void si_emit_streamout_end(struct si_context *sctx);
+void si_update_prims_generated_query_state(struct si_context *sctx,
+                                          unsigned type, int diff);
+void si_init_streamout_functions(struct si_context *sctx);
+
 
 static inline unsigned
 si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
index 6eab4cb47d949c07d68864c6c66c27cbd9d29986..9468fde52364b38b420351ebfd7d9b256399dd64 100644 (file)
@@ -652,8 +652,8 @@ static void si_emit_draw_packets(struct si_context *sctx,
        uint64_t index_va = 0;
 
        if (info->count_from_stream_output) {
-               struct r600_so_target *t =
-                       (struct r600_so_target*)info->count_from_stream_output;
+               struct si_streamout_target *t =
+                       (struct si_streamout_target*)info->count_from_stream_output;
                uint64_t va = t->buf_filled_size->gpu_address +
                              t->buf_filled_size_offset;
 
@@ -1486,7 +1486,7 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
        if ((sctx->b.family == CHIP_HAWAII ||
             sctx->b.family == CHIP_TONGA ||
             sctx->b.family == CHIP_FIJI) &&
-           r600_get_strmout_en(&sctx->b)) {
+           si_get_strmout_en(sctx)) {
                sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
        }
 
index dbaa2dcd5cb7b11804d5a4625b08762a3fe8d27b..9340328a72ab3bcf764e484bc6e4dc7b35384ce2 100644 (file)
@@ -2252,9 +2252,9 @@ static void si_update_streamout_state(struct si_context *sctx)
        if (!shader_with_so)
                return;
 
-       sctx->b.streamout.enabled_stream_buffers_mask =
+       sctx->streamout.enabled_stream_buffers_mask =
                shader_with_so->enabled_streamout_buffer_mask;
-       sctx->b.streamout.stride_in_dw = shader_with_so->so.stride;
+       sctx->streamout.stride_in_dw = shader_with_so->so.stride;
 }
 
 static void si_update_clip_regs(struct si_context *sctx,
diff --git a/src/gallium/drivers/radeonsi/si_state_streamout.c b/src/gallium/drivers/radeonsi/si_state_streamout.c
new file mode 100644 (file)
index 0000000..42a83d4
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors: Marek Olšák <maraeo@gmail.com>
+ *
+ */
+
+#include "si_pipe.h"
+#include "si_state.h"
+#include "radeon/r600_cs.h"
+
+#include "util/u_memory.h"
+
+static void si_set_streamout_enable(struct si_context *sctx, bool enable);
+
+static struct pipe_stream_output_target *
+si_create_so_target(struct pipe_context *ctx,
+                   struct pipe_resource *buffer,
+                   unsigned buffer_offset,
+                   unsigned buffer_size)
+{
+       struct si_context *sctx = (struct si_context *)ctx;
+       struct si_streamout_target *t;
+       struct r600_resource *rbuffer = (struct r600_resource*)buffer;
+
+       t = CALLOC_STRUCT(si_streamout_target);
+       if (!t) {
+               return NULL;
+       }
+
+       u_suballocator_alloc(sctx->b.allocator_zeroed_memory, 4, 4,
+                            &t->buf_filled_size_offset,
+                            (struct pipe_resource**)&t->buf_filled_size);
+       if (!t->buf_filled_size) {
+               FREE(t);
+               return NULL;
+       }
+
+       t->b.reference.count = 1;
+       t->b.context = ctx;
+       pipe_resource_reference(&t->b.buffer, buffer);
+       t->b.buffer_offset = buffer_offset;
+       t->b.buffer_size = buffer_size;
+
+       util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
+                      buffer_offset + buffer_size);
+       return &t->b;
+}
+
+static void si_so_target_destroy(struct pipe_context *ctx,
+                                struct pipe_stream_output_target *target)
+{
+       struct si_streamout_target *t = (struct si_streamout_target*)target;
+       pipe_resource_reference(&t->b.buffer, NULL);
+       r600_resource_reference(&t->buf_filled_size, NULL);
+       FREE(t);
+}
+
+void si_streamout_buffers_dirty(struct si_context *sctx)
+{
+       if (!sctx->streamout.enabled_mask)
+               return;
+
+       si_mark_atom_dirty(sctx, &sctx->streamout.begin_atom);
+       si_set_streamout_enable(sctx, true);
+}
+
+void si_common_set_streamout_targets(struct pipe_context *ctx,
+                                    unsigned num_targets,
+                                    struct pipe_stream_output_target **targets,
+                                    const unsigned *offsets)
+{
+       struct si_context *sctx = (struct si_context *)ctx;
+       unsigned i;
+        unsigned enabled_mask = 0, append_bitmask = 0;
+
+       /* Stop streamout. */
+       if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
+               si_emit_streamout_end(sctx);
+       }
+
+       /* Set the new targets. */
+       for (i = 0; i < num_targets; i++) {
+               pipe_so_target_reference((struct pipe_stream_output_target**)&sctx->streamout.targets[i], targets[i]);
+               if (!targets[i])
+                       continue;
+
+               r600_context_add_resource_size(ctx, targets[i]->buffer);
+               enabled_mask |= 1 << i;
+               if (offsets[i] == ((unsigned)-1))
+                       append_bitmask |= 1 << i;
+       }
+       for (; i < sctx->streamout.num_targets; i++) {
+               pipe_so_target_reference((struct pipe_stream_output_target**)&sctx->streamout.targets[i], NULL);
+       }
+
+       sctx->streamout.enabled_mask = enabled_mask;
+
+       sctx->streamout.num_targets = num_targets;
+       sctx->streamout.append_bitmask = append_bitmask;
+
+       if (num_targets) {
+               si_streamout_buffers_dirty(sctx);
+       } else {
+               si_set_atom_dirty(sctx, &sctx->streamout.begin_atom, false);
+               si_set_streamout_enable(sctx, false);
+       }
+}
+
+static void si_flush_vgt_streamout(struct si_context *sctx)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       unsigned reg_strmout_cntl;
+
+       /* The register is at different places on different ASICs. */
+       if (sctx->b.chip_class >= CIK) {
+               reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
+               radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
+       } else {
+               reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
+               radeon_set_config_reg(cs, reg_strmout_cntl, 0);
+       }
+
+       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
+
+       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+       radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
+       radeon_emit(cs, reg_strmout_cntl >> 2);  /* register */
+       radeon_emit(cs, 0);
+       radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */
+       radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */
+       radeon_emit(cs, 4); /* poll interval */
+}
+
+static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
+{
+       struct si_context *sctx = (struct si_context*)rctx;
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct si_streamout_target **t = sctx->streamout.targets;
+       uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
+       unsigned i;
+
+       si_flush_vgt_streamout(sctx);
+
+       for (i = 0; i < sctx->streamout.num_targets; i++) {
+               if (!t[i])
+                       continue;
+
+               t[i]->stride_in_dw = stride_in_dw[i];
+
+               /* SI binds streamout buffers as shader resources.
+                * VGT only counts primitives and tells the shader
+                * through SGPRs what to do. */
+               radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
+               radeon_emit(cs, (t[i]->b.buffer_offset +
+                                t[i]->b.buffer_size) >> 2);    /* BUFFER_SIZE (in DW) */
+               radeon_emit(cs, stride_in_dw[i]);               /* VTX_STRIDE (in DW) */
+
+               if (sctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
+                       uint64_t va = t[i]->buf_filled_size->gpu_address +
+                                     t[i]->buf_filled_size_offset;
+
+                       /* Append. */
+                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+                                   STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, va); /* src address lo */
+                       radeon_emit(cs, va >> 32); /* src address hi */
+
+                       r600_emit_reloc(&sctx->b,  &sctx->b.gfx, t[i]->buf_filled_size,
+                                       RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
+               } else {
+                       /* Start from the beginning. */
+                       radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+                       radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+                                   STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, 0); /* unused */
+                       radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
+                       radeon_emit(cs, 0); /* unused */
+               }
+       }
+
+       sctx->streamout.begin_emitted = true;
+}
+
+void si_emit_streamout_end(struct si_context *sctx)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct si_streamout_target **t = sctx->streamout.targets;
+       unsigned i;
+       uint64_t va;
+
+       si_flush_vgt_streamout(sctx);
+
+       for (i = 0; i < sctx->streamout.num_targets; i++) {
+               if (!t[i])
+                       continue;
+
+               va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
+               radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+               radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+                           STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
+                           STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
+               radeon_emit(cs, va);     /* dst address lo */
+               radeon_emit(cs, va >> 32); /* dst address hi */
+               radeon_emit(cs, 0); /* unused */
+               radeon_emit(cs, 0); /* unused */
+
+               r600_emit_reloc(&sctx->b,  &sctx->b.gfx, t[i]->buf_filled_size,
+                               RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
+
+               /* Zero the buffer size. The counters (primitives generated,
+                * primitives emitted) may be enabled even if there is not
+                * buffer bound. This ensures that the primitives-emitted query
+                * won't increment. */
+               radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
+
+               t[i]->buf_filled_size_valid = true;
+       }
+
+       sctx->streamout.begin_emitted = false;
+       sctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH;
+}
+
+/* STREAMOUT CONFIG DERIVED STATE
+ *
+ * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
+ * The buffer mask is an independent state, so no writes occur if there
+ * are no buffers bound.
+ */
+
+static void si_emit_streamout_enable(struct r600_common_context *rctx,
+                                    struct r600_atom *atom)
+{
+       struct si_context *sctx = (struct si_context*)rctx;
+
+       radeon_set_context_reg_seq(sctx->b.gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+       radeon_emit(sctx->b.gfx.cs,
+                   S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
+                   S_028B94_RAST_STREAM(0) |
+                   S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
+                   S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
+                   S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
+       radeon_emit(sctx->b.gfx.cs,
+                   sctx->streamout.hw_enabled_mask &
+                   sctx->streamout.enabled_stream_buffers_mask);
+}
+
+static void si_set_streamout_enable(struct si_context *sctx, bool enable)
+{
+       bool old_strmout_en = si_get_strmout_en(sctx);
+       unsigned old_hw_enabled_mask = sctx->streamout.hw_enabled_mask;
+
+       sctx->streamout.streamout_enabled = enable;
+
+       sctx->streamout.hw_enabled_mask = sctx->streamout.enabled_mask |
+                                         (sctx->streamout.enabled_mask << 4) |
+                                         (sctx->streamout.enabled_mask << 8) |
+                                         (sctx->streamout.enabled_mask << 12);
+
+       if ((old_strmout_en != si_get_strmout_en(sctx)) ||
+            (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
+               si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+}
+
+void si_update_prims_generated_query_state(struct si_context *sctx,
+                                          unsigned type, int diff)
+{
+       if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
+               bool old_strmout_en = si_get_strmout_en(sctx);
+
+               sctx->streamout.num_prims_gen_queries += diff;
+               assert(sctx->streamout.num_prims_gen_queries >= 0);
+
+               sctx->streamout.prims_gen_query_enabled =
+                       sctx->streamout.num_prims_gen_queries != 0;
+
+               if (old_strmout_en != si_get_strmout_en(sctx))
+                       si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+       }
+}
+
+void si_init_streamout_functions(struct si_context *sctx)
+{
+       sctx->b.b.create_stream_output_target = si_create_so_target;
+       sctx->b.b.stream_output_target_destroy = si_so_target_destroy;
+       sctx->streamout.begin_atom.emit = si_emit_streamout_begin;
+       sctx->streamout.enable_atom.emit = si_emit_streamout_enable;
+}