r600_pipe_common.h \
r600_query.c \
r600_query.h \
- r600_streamout.c \
r600_test_dma.c \
r600_texture.c \
radeon_uvd.c \
/* suspend queries */
if (!LIST_IS_EMPTY(&ctx->active_queries))
si_suspend_queries(ctx);
-
- ctx->streamout.suspended = false;
- if (ctx->streamout.begin_emitted) {
- si_emit_streamout_end(ctx);
- ctx->streamout.suspended = true;
- }
}
void si_postflush_resume_features(struct r600_common_context *ctx)
{
- if (ctx->streamout.suspended) {
- ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
- si_streamout_buffers_dirty(ctx);
- }
-
/* resume queries */
if (!LIST_IS_EMPTY(&ctx->active_queries))
si_resume_queries(ctx);
rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
si_init_context_texture_functions(rctx);
- si_streamout_init(rctx);
si_init_query_functions(rctx);
si_init_msaa(&rctx->b);
unsigned short id;
};
-struct r600_so_target {
- struct pipe_stream_output_target b;
-
- /* The buffer where BUFFER_FILLED_SIZE is stored. */
- struct r600_resource *buf_filled_size;
- unsigned buf_filled_size_offset;
- bool buf_filled_size_valid;
-
- unsigned stride_in_dw;
-};
-
-struct r600_streamout {
- struct r600_atom begin_atom;
- bool begin_emitted;
-
- unsigned enabled_mask;
- unsigned num_targets;
- struct r600_so_target *targets[PIPE_MAX_SO_BUFFERS];
-
- unsigned append_bitmask;
- bool suspended;
-
- /* External state which comes from the vertex shader,
- * it must be set explicitly when binding a shader. */
- uint16_t *stride_in_dw;
- unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
-
- /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
- unsigned hw_enabled_mask;
-
- /* The state of VGT_STRMOUT_(CONFIG|EN). */
- struct r600_atom enable_atom;
- bool streamout_enabled;
- bool prims_gen_query_enabled;
- int num_prims_gen_queries;
-};
-
struct r600_ring {
struct radeon_winsys_cs *cs;
void (*flush)(void *ctx, unsigned flags,
uint64_t vram;
uint64_t gtt;
- /* States. */
- struct r600_streamout streamout;
-
/* Additional context states. */
unsigned flags; /* flush flags */
void si_suspend_queries(struct r600_common_context *ctx);
void si_resume_queries(struct r600_common_context *ctx);
-/* r600_streamout.c */
-void si_streamout_buffers_dirty(struct r600_common_context *rctx);
-void si_common_set_streamout_targets(struct pipe_context *ctx,
- unsigned num_targets,
- struct pipe_stream_output_target **targets,
- const unsigned *offset);
-void si_emit_streamout_end(struct r600_common_context *rctx);
-void si_update_prims_generated_query_state(struct r600_common_context *rctx,
- unsigned type, int diff);
-void si_streamout_init(struct r600_common_context *rctx);
-
/* r600_test_dma.c */
void si_test_dma(struct r600_common_screen *rscreen);
}
}
-static inline bool r600_get_strmout_en(struct r600_common_context *rctx)
-{
- return rctx->streamout.streamout_enabled ||
- rctx->streamout.prims_gen_query_enabled;
-}
-
#define SQ_TEX_XY_FILTER_POINT 0x00
#define SQ_TEX_XY_FILTER_BILINEAR 0x01
#define SQ_TEX_XY_FILTER_ANISO_POINT 0x02
#include "os/os_time.h"
#include "tgsi/tgsi_text.h"
+/* TODO: remove this: */
+void si_update_prims_generated_query_state(struct r600_common_context *rctx,
+ unsigned type, int diff);
+
#define R600_MAX_STREAMS 4
struct r600_hw_query_params {
+++ /dev/null
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors: Marek Olšák <maraeo@gmail.com>
- *
- */
-
-#include "r600_pipe_common.h"
-#include "r600_cs.h"
-
-#include "util/u_memory.h"
-
-static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable);
-
-static struct pipe_stream_output_target *
-r600_create_so_target(struct pipe_context *ctx,
- struct pipe_resource *buffer,
- unsigned buffer_offset,
- unsigned buffer_size)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct r600_so_target *t;
- struct r600_resource *rbuffer = (struct r600_resource*)buffer;
-
- t = CALLOC_STRUCT(r600_so_target);
- if (!t) {
- return NULL;
- }
-
- u_suballocator_alloc(rctx->allocator_zeroed_memory, 4, 4,
- &t->buf_filled_size_offset,
- (struct pipe_resource**)&t->buf_filled_size);
- if (!t->buf_filled_size) {
- FREE(t);
- return NULL;
- }
-
- t->b.reference.count = 1;
- t->b.context = ctx;
- pipe_resource_reference(&t->b.buffer, buffer);
- t->b.buffer_offset = buffer_offset;
- t->b.buffer_size = buffer_size;
-
- util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
- buffer_offset + buffer_size);
- return &t->b;
-}
-
-static void r600_so_target_destroy(struct pipe_context *ctx,
- struct pipe_stream_output_target *target)
-{
- struct r600_so_target *t = (struct r600_so_target*)target;
- pipe_resource_reference(&t->b.buffer, NULL);
- r600_resource_reference(&t->buf_filled_size, NULL);
- FREE(t);
-}
-
-void si_streamout_buffers_dirty(struct r600_common_context *rctx)
-{
- if (!rctx->streamout.enabled_mask)
- return;
-
- rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, true);
- r600_set_streamout_enable(rctx, true);
-}
-
-void si_common_set_streamout_targets(struct pipe_context *ctx,
- unsigned num_targets,
- struct pipe_stream_output_target **targets,
- const unsigned *offsets)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- unsigned i;
- unsigned enabled_mask = 0, append_bitmask = 0;
-
- /* Stop streamout. */
- if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) {
- si_emit_streamout_end(rctx);
- }
-
- /* Set the new targets. */
- for (i = 0; i < num_targets; i++) {
- pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]);
- if (!targets[i])
- continue;
-
- r600_context_add_resource_size(ctx, targets[i]->buffer);
- enabled_mask |= 1 << i;
- if (offsets[i] == ((unsigned)-1))
- append_bitmask |= 1 << i;
- }
- for (; i < rctx->streamout.num_targets; i++) {
- pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL);
- }
-
- rctx->streamout.enabled_mask = enabled_mask;
-
- rctx->streamout.num_targets = num_targets;
- rctx->streamout.append_bitmask = append_bitmask;
-
- if (num_targets) {
- si_streamout_buffers_dirty(rctx);
- } else {
- rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, false);
- r600_set_streamout_enable(rctx, false);
- }
-}
-
-static void r600_flush_vgt_streamout(struct r600_common_context *rctx)
-{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
- unsigned reg_strmout_cntl;
-
- /* The register is at different places on different ASICs. */
- if (rctx->chip_class >= CIK) {
- reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
- radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
- } else {
- reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
- radeon_set_config_reg(cs, reg_strmout_cntl, 0);
- }
-
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
-
- radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
- radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
- radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
- radeon_emit(cs, 0);
- radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */
- radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */
- radeon_emit(cs, 4); /* poll interval */
-}
-
-static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
-{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
- struct r600_so_target **t = rctx->streamout.targets;
- uint16_t *stride_in_dw = rctx->streamout.stride_in_dw;
- unsigned i;
-
- r600_flush_vgt_streamout(rctx);
-
- for (i = 0; i < rctx->streamout.num_targets; i++) {
- if (!t[i])
- continue;
-
- t[i]->stride_in_dw = stride_in_dw[i];
-
- /* SI binds streamout buffers as shader resources.
- * VGT only counts primitives and tells the shader
- * through SGPRs what to do. */
- radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
- radeon_emit(cs, (t[i]->b.buffer_offset +
- t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
- radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
-
- if (rctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
- uint64_t va = t[i]->buf_filled_size->gpu_address +
- t[i]->buf_filled_size_offset;
-
- /* Append. */
- radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
- radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
- STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, va); /* src address lo */
- radeon_emit(cs, va >> 32); /* src address hi */
-
- r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size,
- RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
- } else {
- /* Start from the beginning. */
- radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
- radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
- STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
- radeon_emit(cs, 0); /* unused */
- }
- }
-
- rctx->streamout.begin_emitted = true;
-}
-
-void si_emit_streamout_end(struct r600_common_context *rctx)
-{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
- struct r600_so_target **t = rctx->streamout.targets;
- unsigned i;
- uint64_t va;
-
- r600_flush_vgt_streamout(rctx);
-
- for (i = 0; i < rctx->streamout.num_targets; i++) {
- if (!t[i])
- continue;
-
- va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
- radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
- radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
- STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
- STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
- radeon_emit(cs, va); /* dst address lo */
- radeon_emit(cs, va >> 32); /* dst address hi */
- radeon_emit(cs, 0); /* unused */
- radeon_emit(cs, 0); /* unused */
-
- r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size,
- RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
-
- /* Zero the buffer size. The counters (primitives generated,
- * primitives emitted) may be enabled even if there is not
- * buffer bound. This ensures that the primitives-emitted query
- * won't increment. */
- radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
-
- t[i]->buf_filled_size_valid = true;
- }
-
- rctx->streamout.begin_emitted = false;
- rctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
-}
-
-/* STREAMOUT CONFIG DERIVED STATE
- *
- * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
- * The buffer mask is an independent state, so no writes occur if there
- * are no buffers bound.
- */
-
-static void r600_emit_streamout_enable(struct r600_common_context *rctx,
- struct r600_atom *atom)
-{
- radeon_set_context_reg_seq(rctx->gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
- radeon_emit(rctx->gfx.cs,
- S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx)) |
- S_028B94_RAST_STREAM(0) |
- S_028B94_STREAMOUT_1_EN(r600_get_strmout_en(rctx)) |
- S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) |
- S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx)));
- radeon_emit(rctx->gfx.cs,
- rctx->streamout.hw_enabled_mask &
- rctx->streamout.enabled_stream_buffers_mask);
-}
-
-static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
-{
- bool old_strmout_en = r600_get_strmout_en(rctx);
- unsigned old_hw_enabled_mask = rctx->streamout.hw_enabled_mask;
-
- rctx->streamout.streamout_enabled = enable;
-
- rctx->streamout.hw_enabled_mask = rctx->streamout.enabled_mask |
- (rctx->streamout.enabled_mask << 4) |
- (rctx->streamout.enabled_mask << 8) |
- (rctx->streamout.enabled_mask << 12);
-
- if ((old_strmout_en != r600_get_strmout_en(rctx)) ||
- (old_hw_enabled_mask != rctx->streamout.hw_enabled_mask)) {
- rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true);
- }
-}
-
-void si_update_prims_generated_query_state(struct r600_common_context *rctx,
- unsigned type, int diff)
-{
- if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
- bool old_strmout_en = r600_get_strmout_en(rctx);
-
- rctx->streamout.num_prims_gen_queries += diff;
- assert(rctx->streamout.num_prims_gen_queries >= 0);
-
- rctx->streamout.prims_gen_query_enabled =
- rctx->streamout.num_prims_gen_queries != 0;
-
- if (old_strmout_en != r600_get_strmout_en(rctx)) {
- rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true);
- }
- }
-}
-
-void si_streamout_init(struct r600_common_context *rctx)
-{
- rctx->b.create_stream_output_target = r600_create_so_target;
- rctx->b.stream_output_target_destroy = r600_so_target_destroy;
- rctx->streamout.begin_atom.emit = r600_emit_streamout_begin;
- rctx->streamout.enable_atom.emit = r600_emit_streamout_enable;
-}
si_state_binning.c \
si_state_draw.c \
si_state_shaders.c \
+ si_state_streamout.c \
si_state_viewport.c \
si_state.h \
si_uvd.c
util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso);
util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso);
util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso);
- util_blitter_save_so_targets(sctx->blitter, sctx->b.streamout.num_targets,
- (struct pipe_stream_output_target**)sctx->b.streamout.targets);
+ util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets,
+ (struct pipe_stream_output_target**)sctx->streamout.targets);
util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
if (op & SI_SAVE_FRAGMENT_STATE) {
struct si_context *sctx = (struct si_context *)ctx;
struct si_buffer_resources *buffers = &sctx->rw_buffers;
struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
- unsigned old_num_targets = sctx->b.streamout.num_targets;
+ unsigned old_num_targets = sctx->streamout.num_targets;
unsigned i, bufidx;
/* We are going to unbind the buffers. Mark which caches need to be flushed. */
- if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
+ if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
/* Since streamout uses vector writes which go through TC L2
* and most other clients can use TC L2 as well, we don't need
* to flush it.
* cases. Thus, flag the TC L2 dirtiness in the resource and
* handle it at draw call time.
*/
- for (i = 0; i < sctx->b.streamout.num_targets; i++)
- if (sctx->b.streamout.targets[i])
- r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
+ for (i = 0; i < sctx->streamout.num_targets; i++)
+ if (sctx->streamout.targets[i])
+ r600_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
/* Invalidate the scalar cache in case a streamout buffer is
* going to be used as a constant buffer.
true);
/* Update the streamout state. */
- if (sctx->b.streamout.begin_emitted)
- si_emit_streamout_end(&sctx->b);
- sctx->b.streamout.append_bitmask =
- sctx->b.streamout.enabled_mask;
- si_streamout_buffers_dirty(&sctx->b);
+ if (sctx->streamout.begin_emitted)
+ si_emit_streamout_end(sctx);
+ sctx->streamout.append_bitmask =
+ sctx->streamout.enabled_mask;
+ si_streamout_buffers_dirty(sctx);
}
}
si_preflush_suspend_features(&ctx->b);
+ ctx->streamout.suspended = false;
+ if (ctx->streamout.begin_emitted) {
+ si_emit_streamout_end(ctx);
+ ctx->streamout.suspended = true;
+ }
+
ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_PS_PARTIAL_FLUSH;
si_mark_atom_dirty(ctx, &ctx->dpbb_state);
si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
si_mark_atom_dirty(ctx, &ctx->spi_map);
- si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
+ si_mark_atom_dirty(ctx, &ctx->streamout.enable_atom);
si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
si_all_resident_buffers_begin_new_cs(ctx);
&ctx->scratch_buffer->b.b);
}
+ if (ctx->streamout.suspended) {
+ ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
+ si_streamout_buffers_dirty(ctx);
+ }
+
si_postflush_resume_features(&ctx->b);
assert(!ctx->b.gfx.cs->prev_dw);
si_init_compute_functions(sctx);
si_init_cp_dma_functions(sctx);
si_init_debug_functions(sctx);
+ si_init_streamout_functions(sctx);
if (sscreen->b.info.has_hw_decode) {
sctx->b.b.create_video_codec = si_uvd_create_decoder;
uint16_t sample_mask;
};
+struct si_streamout_target {
+ struct pipe_stream_output_target b;
+
+ /* The buffer where BUFFER_FILLED_SIZE is stored. */
+ struct r600_resource *buf_filled_size;
+ unsigned buf_filled_size_offset;
+ bool buf_filled_size_valid;
+
+ unsigned stride_in_dw;
+};
+
+struct si_streamout {
+ struct r600_atom begin_atom;
+ bool begin_emitted;
+
+ unsigned enabled_mask;
+ unsigned num_targets;
+ struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
+
+ unsigned append_bitmask;
+ bool suspended;
+
+ /* External state which comes from the vertex shader,
+ * it must be set explicitly when binding a shader. */
+ uint16_t *stride_in_dw;
+ unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
+
+ /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
+ unsigned hw_enabled_mask;
+
+ /* The state of VGT_STRMOUT_(CONFIG|EN). */
+ struct r600_atom enable_atom;
+ bool streamout_enabled;
+ bool prims_gen_query_enabled;
+ int num_prims_gen_queries;
+};
+
/* A shader state consists of the shader selector, which is a constant state
* object shared by multiple contexts and shouldn't be modified, and
* the current shader variant selected for this context.
struct si_stencil_ref stencil_ref;
struct r600_atom spi_map;
struct si_scissors scissors;
+ struct si_streamout streamout;
struct si_viewports viewports;
/* Precomputed states. */
return vs->current ? vs->current : NULL;
}
+static inline bool si_get_strmout_en(struct si_context *sctx)
+{
+ return sctx->streamout.streamout_enabled ||
+ sctx->streamout.prims_gen_query_enabled;
+}
+
static inline unsigned
si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
{
void si_init_state_functions(struct si_context *sctx)
{
si_init_external_atom(sctx, &sctx->b.render_cond_atom, &sctx->atoms.s.render_cond);
- si_init_external_atom(sctx, &sctx->b.streamout.begin_atom, &sctx->atoms.s.streamout_begin);
- si_init_external_atom(sctx, &sctx->b.streamout.enable_atom, &sctx->atoms.s.streamout_enable);
+ si_init_external_atom(sctx, &sctx->streamout.begin_atom, &sctx->atoms.s.streamout_begin);
+ si_init_external_atom(sctx, &sctx->streamout.enable_atom, &sctx->atoms.s.streamout_enable);
si_init_external_atom(sctx, &sctx->scissors.atom, &sctx->atoms.s.scissors);
si_init_external_atom(sctx, &sctx->viewports.atom, &sctx->atoms.s.viewports);
const union blitter_attrib *attrib);
void si_trace_emit(struct si_context *sctx);
+/* si_state_streamout.c */
+void si_streamout_buffers_dirty(struct si_context *sctx);
+void si_common_set_streamout_targets(struct pipe_context *ctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offset);
+void si_emit_streamout_end(struct si_context *sctx);
+void si_update_prims_generated_query_state(struct si_context *sctx,
+ unsigned type, int diff);
+void si_init_streamout_functions(struct si_context *sctx);
+
static inline unsigned
si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
uint64_t index_va = 0;
if (info->count_from_stream_output) {
- struct r600_so_target *t =
- (struct r600_so_target*)info->count_from_stream_output;
+ struct si_streamout_target *t =
+ (struct si_streamout_target*)info->count_from_stream_output;
uint64_t va = t->buf_filled_size->gpu_address +
t->buf_filled_size_offset;
if ((sctx->b.family == CHIP_HAWAII ||
sctx->b.family == CHIP_TONGA ||
sctx->b.family == CHIP_FIJI) &&
- r600_get_strmout_en(&sctx->b)) {
+ si_get_strmout_en(sctx)) {
sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
}
if (!shader_with_so)
return;
- sctx->b.streamout.enabled_stream_buffers_mask =
+ sctx->streamout.enabled_stream_buffers_mask =
shader_with_so->enabled_streamout_buffer_mask;
- sctx->b.streamout.stride_in_dw = shader_with_so->so.stride;
+ sctx->streamout.stride_in_dw = shader_with_so->so.stride;
}
static void si_update_clip_regs(struct si_context *sctx,
--- /dev/null
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors: Marek Olšák <maraeo@gmail.com>
+ *
+ */
+
+#include "si_pipe.h"
+#include "si_state.h"
+#include "radeon/r600_cs.h"
+
+#include "util/u_memory.h"
+
+static void si_set_streamout_enable(struct si_context *sctx, bool enable);
+
+static struct pipe_stream_output_target *
+si_create_so_target(struct pipe_context *ctx,
+ struct pipe_resource *buffer,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_streamout_target *t;
+ struct r600_resource *rbuffer = (struct r600_resource*)buffer;
+
+ t = CALLOC_STRUCT(si_streamout_target);
+ if (!t) {
+ return NULL;
+ }
+
+ u_suballocator_alloc(sctx->b.allocator_zeroed_memory, 4, 4,
+ &t->buf_filled_size_offset,
+ (struct pipe_resource**)&t->buf_filled_size);
+ if (!t->buf_filled_size) {
+ FREE(t);
+ return NULL;
+ }
+
+ t->b.reference.count = 1;
+ t->b.context = ctx;
+ pipe_resource_reference(&t->b.buffer, buffer);
+ t->b.buffer_offset = buffer_offset;
+ t->b.buffer_size = buffer_size;
+
+ util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
+ buffer_offset + buffer_size);
+ return &t->b;
+}
+
+static void si_so_target_destroy(struct pipe_context *ctx,
+ struct pipe_stream_output_target *target)
+{
+ struct si_streamout_target *t = (struct si_streamout_target*)target;
+ pipe_resource_reference(&t->b.buffer, NULL);
+ r600_resource_reference(&t->buf_filled_size, NULL);
+ FREE(t);
+}
+
+void si_streamout_buffers_dirty(struct si_context *sctx)
+{
+ if (!sctx->streamout.enabled_mask)
+ return;
+
+ si_mark_atom_dirty(sctx, &sctx->streamout.begin_atom);
+ si_set_streamout_enable(sctx, true);
+}
+
+void si_common_set_streamout_targets(struct pipe_context *ctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ unsigned i;
+ unsigned enabled_mask = 0, append_bitmask = 0;
+
+ /* Stop streamout. */
+ if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
+ si_emit_streamout_end(sctx);
+ }
+
+ /* Set the new targets. */
+ for (i = 0; i < num_targets; i++) {
+ pipe_so_target_reference((struct pipe_stream_output_target**)&sctx->streamout.targets[i], targets[i]);
+ if (!targets[i])
+ continue;
+
+ r600_context_add_resource_size(ctx, targets[i]->buffer);
+ enabled_mask |= 1 << i;
+ if (offsets[i] == ((unsigned)-1))
+ append_bitmask |= 1 << i;
+ }
+ for (; i < sctx->streamout.num_targets; i++) {
+ pipe_so_target_reference((struct pipe_stream_output_target**)&sctx->streamout.targets[i], NULL);
+ }
+
+ sctx->streamout.enabled_mask = enabled_mask;
+
+ sctx->streamout.num_targets = num_targets;
+ sctx->streamout.append_bitmask = append_bitmask;
+
+ if (num_targets) {
+ si_streamout_buffers_dirty(sctx);
+ } else {
+ si_set_atom_dirty(sctx, &sctx->streamout.begin_atom, false);
+ si_set_streamout_enable(sctx, false);
+ }
+}
+
+static void si_flush_vgt_streamout(struct si_context *sctx)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ unsigned reg_strmout_cntl;
+
+ /* The register is at different places on different ASICs. */
+ if (sctx->b.chip_class >= CIK) {
+ reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
+ radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
+ } else {
+ reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
+ radeon_set_config_reg(cs, reg_strmout_cntl, 0);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
+
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
+ radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
+ radeon_emit(cs, 0);
+ radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */
+ radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
+}
+
+static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
+{
+ struct si_context *sctx = (struct si_context*)rctx;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_streamout_target **t = sctx->streamout.targets;
+ uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
+ unsigned i;
+
+ si_flush_vgt_streamout(sctx);
+
+ for (i = 0; i < sctx->streamout.num_targets; i++) {
+ if (!t[i])
+ continue;
+
+ t[i]->stride_in_dw = stride_in_dw[i];
+
+ /* SI binds streamout buffers as shader resources.
+ * VGT only counts primitives and tells the shader
+ * through SGPRs what to do. */
+ radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
+ radeon_emit(cs, (t[i]->b.buffer_offset +
+ t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
+ radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
+
+ if (sctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
+ uint64_t va = t[i]->buf_filled_size->gpu_address +
+ t[i]->buf_filled_size_offset;
+
+ /* Append. */
+ radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+ radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+ STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, va); /* src address lo */
+ radeon_emit(cs, va >> 32); /* src address hi */
+
+ r600_emit_reloc(&sctx->b, &sctx->b.gfx, t[i]->buf_filled_size,
+ RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
+ } else {
+ /* Start from the beginning. */
+ radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+ radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+ STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
+ radeon_emit(cs, 0); /* unused */
+ }
+ }
+
+ sctx->streamout.begin_emitted = true;
+}
+
+void si_emit_streamout_end(struct si_context *sctx)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_streamout_target **t = sctx->streamout.targets;
+ unsigned i;
+ uint64_t va;
+
+ si_flush_vgt_streamout(sctx);
+
+ for (i = 0; i < sctx->streamout.num_targets; i++) {
+ if (!t[i])
+ continue;
+
+ va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
+ radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
+ radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
+ STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
+ STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
+ radeon_emit(cs, va); /* dst address lo */
+ radeon_emit(cs, va >> 32); /* dst address hi */
+ radeon_emit(cs, 0); /* unused */
+ radeon_emit(cs, 0); /* unused */
+
+ r600_emit_reloc(&sctx->b, &sctx->b.gfx, t[i]->buf_filled_size,
+ RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
+
+ /* Zero the buffer size. The counters (primitives generated,
+ * primitives emitted) may be enabled even if there is not
+ * buffer bound. This ensures that the primitives-emitted query
+ * won't increment. */
+ radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
+
+ t[i]->buf_filled_size_valid = true;
+ }
+
+ sctx->streamout.begin_emitted = false;
+ sctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH;
+}
+
+/* STREAMOUT CONFIG DERIVED STATE
+ *
+ * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
+ * The buffer mask is an independent state, so no writes occur if there
+ * are no buffers bound.
+ */
+
+static void si_emit_streamout_enable(struct r600_common_context *rctx,
+ struct r600_atom *atom)
+{
+ struct si_context *sctx = (struct si_context*)rctx;
+
+ radeon_set_context_reg_seq(sctx->b.gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+ radeon_emit(sctx->b.gfx.cs,
+ S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
+ S_028B94_RAST_STREAM(0) |
+ S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
+ S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
+ S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
+ radeon_emit(sctx->b.gfx.cs,
+ sctx->streamout.hw_enabled_mask &
+ sctx->streamout.enabled_stream_buffers_mask);
+}
+
+static void si_set_streamout_enable(struct si_context *sctx, bool enable)
+{
+ bool old_strmout_en = si_get_strmout_en(sctx);
+ unsigned old_hw_enabled_mask = sctx->streamout.hw_enabled_mask;
+
+ sctx->streamout.streamout_enabled = enable;
+
+ sctx->streamout.hw_enabled_mask = sctx->streamout.enabled_mask |
+ (sctx->streamout.enabled_mask << 4) |
+ (sctx->streamout.enabled_mask << 8) |
+ (sctx->streamout.enabled_mask << 12);
+
+ if ((old_strmout_en != si_get_strmout_en(sctx)) ||
+ (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
+ si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+}
+
+void si_update_prims_generated_query_state(struct si_context *sctx,
+ unsigned type, int diff)
+{
+ if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
+ bool old_strmout_en = si_get_strmout_en(sctx);
+
+ sctx->streamout.num_prims_gen_queries += diff;
+ assert(sctx->streamout.num_prims_gen_queries >= 0);
+
+ sctx->streamout.prims_gen_query_enabled =
+ sctx->streamout.num_prims_gen_queries != 0;
+
+ if (old_strmout_en != si_get_strmout_en(sctx))
+ si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+ }
+}
+
+void si_init_streamout_functions(struct si_context *sctx)
+{
+ sctx->b.b.create_stream_output_target = si_create_so_target;
+ sctx->b.b.stream_output_target_destroy = si_so_target_destroy;
+ sctx->streamout.begin_atom.emit = si_emit_streamout_begin;
+ sctx->streamout.enable_atom.emit = si_emit_streamout_enable;
+}