*/
#include "si_pipe.h"
+#include "si_build_pm4.h"
+#include "sid.h"
#include "util/os_time.h"
+#include "util/u_upload_mgr.h"
/* initialize */
void si_need_gfx_cs_space(struct si_context *ctx)
ctx->gtt = 0;
ctx->vram = 0;
- /* If the IB is sufficiently large, don't count the space needed
- * and just flush if there is not enough space left.
- *
- * Also reserve space for stopping queries at the end of IB, because
- * the number of active queries is mostly unlimited.
- */
- unsigned need_dwords = 2048 + ctx->num_cs_dw_queries_suspend;
- if (!ctx->ws->cs_check_space(cs, need_dwords))
+ unsigned need_dwords = si_get_minimum_num_gfx_cs_dwords(ctx);
+ if (!ctx->ws->cs_check_space(cs, need_dwords, false))
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
+void si_unref_sdma_uploads(struct si_context *sctx)
+{
+ for (unsigned i = 0; i < sctx->num_sdma_uploads; i++) {
+ si_resource_reference(&sctx->sdma_uploads[i].dst, NULL);
+ si_resource_reference(&sctx->sdma_uploads[i].src, NULL);
+ }
+ sctx->num_sdma_uploads = 0;
+}
+
void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{
struct radeon_cmdbuf *cs = ctx->gfx_cs;
struct radeon_winsys *ws = ctx->ws;
+ const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
unsigned wait_flags = 0;
if (ctx->gfx_flush_in_progress)
return;
if (!ctx->screen->info.kernel_flushes_tc_l2_after_ib) {
- wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_INV_GLOBAL_L2;
- } else if (ctx->chip_class == SI) {
+ wait_flags |= wait_ps_cs |
+ SI_CONTEXT_INV_L2;
+ } else if (ctx->chip_class == GFX6) {
/* The kernel flushes L2 before shaders are finished. */
- wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
+ wait_flags |= wait_ps_cs;
} else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
- wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
+ wait_flags |= wait_ps_cs;
}
/* Drop this flush if it's a no-op. */
if (ctx->screen->debug_flags & DBG(CHECK_VM))
flags &= ~PIPE_FLUSH_ASYNC;
+ ctx->gfx_flush_in_progress = true;
+
/* If the state tracker is flushing the GFX IB, si_flush_from_st is
* responsible for flushing the DMA IB and merging the fences from both.
- * This code is only needed when the driver flushes the GFX IB
- * internally, and it never asks for a fence handle.
+ * If the driver flushes the GFX IB internally, and it should never ask
+ * for a fence handle.
*/
- if (radeon_emitted(ctx->dma_cs, 0)) {
- assert(fence == NULL); /* internal flushes only */
- si_flush_dma_cs(ctx, flags, NULL);
+ assert(!radeon_emitted(ctx->dma_cs, 0) || fence == NULL);
+
+ /* Update the sdma_uploads list by flushing the uploader. */
+ u_upload_unmap(ctx->b.const_uploader);
+
+ /* Execute SDMA uploads. */
+ ctx->sdma_uploads_in_progress = true;
+ for (unsigned i = 0; i < ctx->num_sdma_uploads; i++) {
+ struct si_sdma_upload *up = &ctx->sdma_uploads[i];
+ struct pipe_box box;
+
+ assert(up->src_offset % 4 == 0 && up->dst_offset % 4 == 0 &&
+ up->size % 4 == 0);
+
+ u_box_1d(up->src_offset, up->size, &box);
+ ctx->dma_copy(&ctx->b, &up->dst->b.b, 0, up->dst_offset, 0, 0,
+ &up->src->b.b, 0, &box);
}
+ ctx->sdma_uploads_in_progress = false;
+ si_unref_sdma_uploads(ctx);
- ctx->gfx_flush_in_progress = true;
+ /* Flush SDMA (preamble IB). */
+ if (radeon_emitted(ctx->dma_cs, 0))
+ si_flush_dma_cs(ctx, flags, NULL);
- if (!LIST_IS_EMPTY(&ctx->active_queries))
- si_suspend_queries(ctx);
+ if (radeon_emitted(ctx->prim_discard_compute_cs, 0)) {
+ struct radeon_cmdbuf *compute_cs = ctx->prim_discard_compute_cs;
+ si_compute_signal_gfx(ctx);
+
+ /* Make sure compute shaders are idle before leaving the IB, so that
+ * the next IB doesn't overwrite GDS that might be in use. */
+ radeon_emit(compute_cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(compute_cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) |
+ EVENT_INDEX(4));
+
+ /* Save the GDS prim restart counter if needed. */
+ if (ctx->preserve_prim_restart_gds_at_flush) {
+ si_cp_copy_data(ctx, compute_cs,
+ COPY_DATA_DST_MEM, ctx->wait_mem_scratch, 4,
+ COPY_DATA_GDS, NULL, 4);
+ }
+ }
- ctx->streamout.suspended = false;
- if (ctx->streamout.begin_emitted) {
- si_emit_streamout_end(ctx);
- ctx->streamout.suspended = true;
+ if (ctx->has_graphics) {
+ if (!LIST_IS_EMPTY(&ctx->active_queries))
+ si_suspend_queries(ctx);
+
+ ctx->streamout.suspended = false;
+ if (ctx->streamout.begin_emitted) {
+ si_emit_streamout_end(ctx);
+ ctx->streamout.suspended = true;
+
+ /* Since streamout uses GDS on gfx10, we need to make
+ * GDS idle when we leave the IB, otherwise another
+ * process might overwrite it while our shaders are busy.
+ */
+ if (ctx->chip_class >= GFX10)
+ wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
+ }
}
/* Make sure CP DMA is idle at the end of IBs after L2 prefetches
* because the kernel doesn't wait for it. */
- if (ctx->chip_class >= CIK)
+ if (ctx->chip_class >= GFX7)
si_cp_dma_wait_for_idle(ctx);
/* Wait for draw calls to finish if needed. */
if (wait_flags) {
ctx->flags |= wait_flags;
- si_emit_cache_flush(ctx);
+ ctx->emit_cache_flush(ctx);
}
- ctx->gfx_last_ib_is_busy = wait_flags == 0;
+ ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
if (ctx->current_saved_cs) {
si_trace_emit(ctx);
si_log_hw_flush(ctx);
}
+ if (si_compute_prim_discard_enabled(ctx)) {
+ /* The compute IB can start after the previous gfx IB starts. */
+ if (radeon_emitted(ctx->prim_discard_compute_cs, 0) &&
+ ctx->last_gfx_fence) {
+ ctx->ws->cs_add_fence_dependency(ctx->gfx_cs,
+ ctx->last_gfx_fence,
+ RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY |
+ RADEON_DEPENDENCY_START_FENCE);
+ }
+
+ /* Remember the last execution barrier. It's in the IB.
+ * It will signal the start of the next compute IB.
+ */
+ if (flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW &&
+ ctx->last_pkt3_write_data) {
+ *ctx->last_pkt3_write_data = PKT3(PKT3_WRITE_DATA, 3, 0);
+ ctx->last_pkt3_write_data = NULL;
+
+ si_resource_reference(&ctx->last_ib_barrier_buf, ctx->barrier_buf);
+ ctx->last_ib_barrier_buf_offset = ctx->barrier_buf_offset;
+ si_resource_reference(&ctx->barrier_buf, NULL);
+
+ ws->fence_reference(&ctx->last_ib_barrier_fence, NULL);
+ }
+ }
+
/* Flush the CS. */
ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
if (fence)
ctx->num_gfx_cs_flushes++;
+ if (si_compute_prim_discard_enabled(ctx)) {
+ /* Remember the last execution barrier, which is the last fence
+ * in this case.
+ */
+ if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
+ ctx->last_pkt3_write_data = NULL;
+ si_resource_reference(&ctx->last_ib_barrier_buf, NULL);
+ ws->fence_reference(&ctx->last_ib_barrier_fence, ctx->last_gfx_fence);
+ }
+ }
+
/* Check VM faults if needed. */
if (ctx->screen->debug_flags & DBG(CHECK_VM)) {
/* Use conservative timeout 800ms, after which we won't wait any
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
}
+static void si_add_gds_to_buffer_list(struct si_context *sctx)
+{
+ if (sctx->gds) {
+ sctx->ws->cs_add_buffer(sctx->gfx_cs, sctx->gds,
+ RADEON_USAGE_READWRITE, 0, 0);
+ if (sctx->gds_oa) {
+ sctx->ws->cs_add_buffer(sctx->gfx_cs, sctx->gds_oa,
+ RADEON_USAGE_READWRITE, 0, 0);
+ }
+ }
+}
+
+void si_allocate_gds(struct si_context *sctx)
+{
+ struct radeon_winsys *ws = sctx->ws;
+
+ if (sctx->gds)
+ return;
+
+ assert(sctx->chip_class >= GFX10); /* for gfx10 streamout */
+
+ /* 4 streamout GDS counters.
+ * We need 256B (64 dw) of GDS, otherwise streamout hangs.
+ */
+ sctx->gds = ws->buffer_create(ws, 256, 4, RADEON_DOMAIN_GDS, 0);
+ sctx->gds_oa = ws->buffer_create(ws, 4, 1, RADEON_DOMAIN_OA, 0);
+
+ assert(sctx->gds && sctx->gds_oa);
+ si_add_gds_to_buffer_list(sctx);
+}
+
void si_begin_new_gfx_cs(struct si_context *ctx)
{
if (ctx->is_debug)
si_begin_gfx_cs_debug(ctx);
+ si_add_gds_to_buffer_list(ctx);
+
/* Always invalidate caches at the beginning of IBs, because external
* users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
* buffers.
* TODO: Do we also need to invalidate CB & DB caches?
*/
ctx->flags |= SI_CONTEXT_INV_ICACHE |
- SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_INV_GLOBAL_L2 |
+ SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
SI_CONTEXT_START_PIPELINE_STATS;
+ ctx->cs_shader_state.initialized = false;
+ si_all_descriptors_begin_new_cs(ctx);
+
+ if (!ctx->has_graphics) {
+ ctx->initial_gfx_cs_size = ctx->gfx_cs->current.cdw;
+ return;
+ }
+
/* set all valid group as dirty so they get reemited on
* next draw command
*/
si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
- si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
+ if (ctx->chip_class < GFX10)
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
/* CLEAR_STATE disables all window rectangles. */
if (!has_clear_state || ctx->num_window_rectangles > 0)
si_mark_atom_dirty(ctx, &ctx->atoms.s.window_rectangles);
- si_all_descriptors_begin_new_cs(ctx);
- si_all_resident_buffers_begin_new_cs(ctx);
- ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- ctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
ctx->last_prim = -1;
ctx->last_multi_vgt_param = -1;
ctx->last_rast_prim = -1;
+ ctx->last_flatshade_first = -1;
ctx->last_sc_line_stipple = ~0;
ctx->last_vs_state = ~0;
ctx->last_ls = NULL;
ctx->last_tes_sh_base = -1;
ctx->last_num_tcs_input_cp = -1;
ctx->last_ls_hs_config = -1; /* impossible value */
+ ctx->last_binning_enabled = -1;
- ctx->cs_shader_state.initialized = false;
+ ctx->prim_discard_compute_ib_initialized = false;
+
+ /* Compute-based primitive discard:
+ * The index ring is divided into 2 halves. Switch between the halves
+ * in the same fashion as doublebuffering.
+ */
+ if (ctx->index_ring_base)
+ ctx->index_ring_base = 0;
+ else
+ ctx->index_ring_base = ctx->index_ring_size_per_ib;
+
+ ctx->index_ring_offset = 0;
if (has_clear_state) {
ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_CONTROL] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_1] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_2] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_3] = 0x00000000;
- ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_OUT_PRIM_TYPE] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_VERT_OUT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_IDX_FORMAT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK] = 0xffffffff;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM] = 0x00000000;
- ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x0000001e; /* From VI */
+ ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x0000001e; /* From GFX8 */
/* Set all saved registers state to saved. */
ctx->tracked_regs.reg_saved = 0xffffffffffffffff;