/*
* Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* SOFTWARE.
*/
-#include "si_pipe.h"
-#include "si_state.h"
-#include "sid.h"
-#include "radeon/r600_cs.h"
+#include "si_build_pm4.h"
#include "util/u_memory.h"
+#include "util/u_suballoc.h"
static void si_set_streamout_enable(struct si_context *sctx, bool enable);
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_streamout_target *t;
- struct r600_resource *rbuffer = (struct r600_resource*)buffer;
+ struct si_resource *buf = si_resource(buffer);
t = CALLOC_STRUCT(si_streamout_target);
if (!t) {
return NULL;
}
- u_suballocator_alloc(sctx->b.allocator_zeroed_memory, 4, 4,
+ unsigned buf_filled_size_size = sctx->screen->use_ngg_streamout ? 8 : 4;
+ u_suballocator_alloc(sctx->allocator_zeroed_memory, buf_filled_size_size, 4,
&t->buf_filled_size_offset,
(struct pipe_resource**)&t->buf_filled_size);
if (!t->buf_filled_size) {
t->b.buffer_offset = buffer_offset;
t->b.buffer_size = buffer_size;
- util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
+ util_range_add(&buf->valid_buffer_range, buffer_offset,
buffer_offset + buffer_size);
return &t->b;
}
{
struct si_streamout_target *t = (struct si_streamout_target*)target;
pipe_resource_reference(&t->b.buffer, NULL);
- r600_resource_reference(&t->buf_filled_size, NULL);
+ si_resource_reference(&t->buf_filled_size, NULL);
FREE(t);
}
if (!sctx->streamout.enabled_mask)
return;
- si_mark_atom_dirty(sctx, &sctx->streamout.begin_atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_begin);
si_set_streamout_enable(sctx, true);
}
const unsigned *offsets)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct si_buffer_resources *buffers = &sctx->rw_buffers;
- struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
unsigned old_num_targets = sctx->streamout.num_targets;
- unsigned i, bufidx;
+ unsigned i;
+ bool wait_now = false;
/* We are going to unbind the buffers. Mark which caches need to be flushed. */
if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
* to flush it.
*
* The only cases which requires flushing it is VGT DMA index
- * fetching (on <= CIK) and indirect draw data, which are rare
+ * fetching (on <= GFX7) and indirect draw data, which are rare
* cases. Thus, flag the TC L2 dirtiness in the resource and
* handle it at draw call time.
*/
for (i = 0; i < sctx->streamout.num_targets; i++)
if (sctx->streamout.targets[i])
- r600_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
+ si_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
/* Invalidate the scalar cache in case a streamout buffer is
* going to be used as a constant buffer.
*
- * Invalidate TC L1, because streamout bypasses it (done by
- * setting GLC=1 in the store instruction), but it can contain
- * outdated data of streamout buffers.
+ * Invalidate vL1, because streamout bypasses it (done by
+ * setting GLC=1 in the store instruction), but vL1 in other
+ * CUs can contain outdated data of streamout buffers.
*
* VS_PARTIAL_FLUSH is required if the buffers are going to be
* used as an input immediately.
*/
- sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_VS_PARTIAL_FLUSH;
+ sctx->flags |= SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE;
+
+ /* The BUFFER_FILLED_SIZE is written using a PS_DONE event. */
+ if (sctx->screen->use_ngg_streamout) {
+ sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
+
+ /* Wait now. This is needed to make sure that GDS is not
+ * busy at the end of IBs.
+ *
+ * Also, the next streamout operation will overwrite GDS,
+ * so we need to make sure that it's idle.
+ */
+ wait_now = true;
+ } else {
+ sctx->flags |= SI_CONTEXT_VS_PARTIAL_FLUSH;
+ }
}
/* All readers of the streamout targets need to be finished before we can
* start writing to the targets.
*/
- if (num_targets)
- sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
+ if (num_targets) {
+ if (sctx->screen->use_ngg_streamout)
+ si_allocate_gds(sctx);
+
+ sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ }
/* Streamout buffers must be bound in 2 places:
* 1) in VGT by setting the VGT_STRMOUT registers
if (!targets[i])
continue;
- si_context_add_resource_size(ctx, targets[i]->buffer);
+ si_context_add_resource_size(sctx, targets[i]->buffer);
enabled_mask |= 1 << i;
if (offsets[i] == ((unsigned)-1))
if (num_targets) {
si_streamout_buffers_dirty(sctx);
} else {
- si_set_atom_dirty(sctx, &sctx->streamout.begin_atom, false);
+ si_set_atom_dirty(sctx, &sctx->atoms.s.streamout_begin, false);
si_set_streamout_enable(sctx, false);
}
/* Set the shader resources.*/
for (i = 0; i < num_targets; i++) {
- bufidx = SI_VS_STREAMOUT_BUF0 + i;
-
if (targets[i]) {
- struct pipe_resource *buffer = targets[i]->buffer;
- uint64_t va = r600_resource(buffer)->gpu_address;
-
- /* Set the descriptor.
- *
- * On VI, the format must be non-INVALID, otherwise
- * the buffer will be considered not bound and store
- * instructions will be no-ops.
- */
- uint32_t *desc = descs->list + bufidx*4;
- desc[0] = va;
- desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
- desc[2] = 0xffffffff;
- desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
-
- /* Set the resource. */
- pipe_resource_reference(&buffers->buffers[bufidx],
- buffer);
- radeon_add_to_gfx_buffer_list_check_mem(sctx,
- (struct r600_resource*)buffer,
- buffers->shader_usage,
- RADEON_PRIO_SHADER_RW_BUFFER,
- true);
- r600_resource(buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
-
- buffers->enabled_mask |= 1u << bufidx;
+ struct pipe_shader_buffer sbuf;
+ sbuf.buffer = targets[i]->buffer;
+
+ if (sctx->screen->use_ngg_streamout) {
+ sbuf.buffer_offset = targets[i]->buffer_offset;
+ sbuf.buffer_size = targets[i]->buffer_size;
+ } else {
+ sbuf.buffer_offset = 0;
+ sbuf.buffer_size = targets[i]->buffer_offset +
+ targets[i]->buffer_size;
+ }
+
+ si_set_rw_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, &sbuf);
+ si_resource(targets[i]->buffer)->bind_history |= PIPE_BIND_STREAM_OUTPUT;
} else {
- /* Clear the descriptor and unset the resource. */
- memset(descs->list + bufidx*4, 0,
- sizeof(uint32_t) * 4);
- pipe_resource_reference(&buffers->buffers[bufidx],
- NULL);
- buffers->enabled_mask &= ~(1u << bufidx);
+ si_set_rw_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
+ }
+ }
+ for (; i < old_num_targets; i++)
+ si_set_rw_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
+
+ if (wait_now)
+ sctx->emit_cache_flush(sctx);
+}
+
+static void gfx10_emit_streamout_begin(struct si_context *sctx)
+{
+ struct si_streamout_target **t = sctx->streamout.targets;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ unsigned last_target = 0;
+
+ for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
+ if (t[i])
+ last_target = i;
+ }
+
+ for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
+ if (!t[i])
+ continue;
+
+ t[i]->stride_in_dw = sctx->streamout.stride_in_dw[i];
+
+ bool append = sctx->streamout.append_bitmask & (1 << i);
+ uint64_t va = 0;
+
+ if (append) {
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
+ t[i]->buf_filled_size,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_SO_FILLED_SIZE);
+
+ va = t[i]->buf_filled_size->gpu_address +
+ t[i]->buf_filled_size_offset;
}
+
+ radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
+ radeon_emit(cs, S_411_SRC_SEL(append ? V_411_SRC_ADDR_TC_L2 : V_411_DATA) |
+ S_411_DST_SEL(V_411_GDS) |
+ S_411_CP_SYNC(i == last_target));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, 4 * i); /* destination in GDS */
+ radeon_emit(cs, 0);
+ radeon_emit(cs, S_414_BYTE_COUNT_GFX9(4) |
+ S_414_DISABLE_WR_CONFIRM_GFX9(i != last_target));
}
- for (; i < old_num_targets; i++) {
- bufidx = SI_VS_STREAMOUT_BUF0 + i;
- /* Clear the descriptor and unset the resource. */
- memset(descs->list + bufidx*4, 0, sizeof(uint32_t) * 4);
- pipe_resource_reference(&buffers->buffers[bufidx], NULL);
- buffers->enabled_mask &= ~(1u << bufidx);
+
+ sctx->streamout.begin_emitted = true;
+}
+
+static void gfx10_emit_streamout_end(struct si_context *sctx)
+{
+ struct si_streamout_target **t = sctx->streamout.targets;
+
+ for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
+ if (!t[i])
+ continue;
+
+ uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
+
+ si_cp_release_mem(sctx, sctx->gfx_cs, V_028A90_PS_DONE, 0,
+ EOP_DST_SEL_TC_L2,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_GDS,
+ t[i]->buf_filled_size, va,
+ EOP_DATA_GDS(i, 1), 0);
+
+ t[i]->buf_filled_size_valid = true;
}
- sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
+ sctx->streamout.begin_emitted = false;
}
static void si_flush_vgt_streamout(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= GFX7) {
reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
} else {
radeon_emit(cs, 4); /* poll interval */
}
-static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
+static void si_emit_streamout_begin(struct si_context *sctx)
{
- struct si_context *sctx = (struct si_context*)rctx;
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
unsigned i;
t[i]->stride_in_dw = stride_in_dw[i];
- /* SI binds streamout buffers as shader resources.
+ /* AMD GCN binds streamout buffers as shader resources.
* VGT only counts primitives and tells the shader
* through SGPRs what to do. */
radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
radeon_emit(cs, va); /* src address lo */
radeon_emit(cs, va >> 32); /* src address hi */
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
void si_emit_streamout_end(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ if (sctx->screen->use_ngg_streamout) {
+ gfx10_emit_streamout_end(sctx);
+ return;
+ }
+
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_streamout_target **t = sctx->streamout.targets;
unsigned i;
uint64_t va;
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, sctx->b.gfx_cs,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
t[i]->buf_filled_size,
RADEON_USAGE_WRITE,
RADEON_PRIO_SO_FILLED_SIZE);
* buffer bound. This ensures that the primitives-emitted query
* won't increment. */
radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
+ sctx->context_roll = true;
t[i]->buf_filled_size_valid = true;
}
* are no buffers bound.
*/
-static void si_emit_streamout_enable(struct r600_common_context *rctx,
- struct r600_atom *atom)
+static void si_emit_streamout_enable(struct si_context *sctx)
{
- struct si_context *sctx = (struct si_context*)rctx;
+ assert(!sctx->screen->use_ngg_streamout);
- radeon_set_context_reg_seq(sctx->b.gfx_cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
- radeon_emit(sctx->b.gfx_cs,
+ radeon_set_context_reg_seq(sctx->gfx_cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+ radeon_emit(sctx->gfx_cs,
S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
S_028B94_RAST_STREAM(0) |
S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
- radeon_emit(sctx->b.gfx_cs,
+ radeon_emit(sctx->gfx_cs,
sctx->streamout.hw_enabled_mask &
sctx->streamout.enabled_stream_buffers_mask);
}
(sctx->streamout.enabled_mask << 8) |
(sctx->streamout.enabled_mask << 12);
- if ((old_strmout_en != si_get_strmout_en(sctx)) ||
- (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
- si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+ if (!sctx->screen->use_ngg_streamout &&
+ ((old_strmout_en != si_get_strmout_en(sctx)) ||
+ (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask)))
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
}
void si_update_prims_generated_query_state(struct si_context *sctx,
unsigned type, int diff)
{
- if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
+ if (!sctx->screen->use_ngg_streamout &&
+ type == PIPE_QUERY_PRIMITIVES_GENERATED) {
bool old_strmout_en = si_get_strmout_en(sctx);
sctx->streamout.num_prims_gen_queries += diff;
sctx->streamout.num_prims_gen_queries != 0;
if (old_strmout_en != si_get_strmout_en(sctx))
- si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
+
+ if (si_update_ngg(sctx)) {
+ si_shader_change_notify(sctx);
+ sctx->do_update_shaders = true;
+ }
}
}
void si_init_streamout_functions(struct si_context *sctx)
{
- sctx->b.b.create_stream_output_target = si_create_so_target;
- sctx->b.b.stream_output_target_destroy = si_so_target_destroy;
- sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
- sctx->streamout.begin_atom.emit = si_emit_streamout_begin;
- sctx->streamout.enable_atom.emit = si_emit_streamout_enable;
+ sctx->b.create_stream_output_target = si_create_so_target;
+ sctx->b.stream_output_target_destroy = si_so_target_destroy;
+ sctx->b.set_stream_output_targets = si_set_streamout_targets;
+
+ if (sctx->screen->use_ngg_streamout) {
+ sctx->atoms.s.streamout_begin.emit = gfx10_emit_streamout_begin;
+ } else {
+ sctx->atoms.s.streamout_begin.emit = si_emit_streamout_begin;
+ sctx->atoms.s.streamout_enable.emit = si_emit_streamout_enable;
+ }
}