{
struct radeon_cmdbuf *cs = ctx->gfx_cs;
struct radeon_winsys *ws = ctx->ws;
+ const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
unsigned wait_flags = 0;
if (ctx->gfx_flush_in_progress)
return;
if (!ctx->screen->info.kernel_flushes_tc_l2_after_ib) {
- wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH |
+ wait_flags |= wait_ps_cs |
SI_CONTEXT_INV_L2;
} else if (ctx->chip_class == GFX6) {
/* The kernel flushes L2 before shaders are finished. */
- wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
+ wait_flags |= wait_ps_cs;
} else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
- wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
+ wait_flags |= wait_ps_cs;
}
/* Drop this flush if it's a no-op. */
if (ctx->streamout.begin_emitted) {
si_emit_streamout_end(ctx);
ctx->streamout.suspended = true;
+
+ /* Since streamout uses GDS on gfx10, we need to make
+ * GDS idle when we leave the IB, otherwise another
+ * process might overwrite it while our shaders are busy.
+ */
+ if (ctx->chip_class >= GFX10)
+ wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
}
}
ctx->flags |= wait_flags;
ctx->emit_cache_flush(ctx);
}
- ctx->gfx_last_ib_is_busy = wait_flags == 0;
+ ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
if (ctx->current_saved_cs) {
si_trace_emit(ctx);
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
}
-void si_begin_new_gfx_cs(struct si_context *ctx)
+static void si_add_gds_to_buffer_list(struct si_context *sctx)
{
- if (ctx->is_debug)
- si_begin_gfx_cs_debug(ctx);
-
- if (ctx->gds) {
- ctx->ws->cs_add_buffer(ctx->gfx_cs, ctx->gds,
+ if (sctx->gds) {
+ sctx->ws->cs_add_buffer(sctx->gfx_cs, sctx->gds,
RADEON_USAGE_READWRITE, 0, 0);
- if (ctx->gds_oa) {
- ctx->ws->cs_add_buffer(ctx->gfx_cs, ctx->gds_oa,
+ if (sctx->gds_oa) {
+ sctx->ws->cs_add_buffer(sctx->gfx_cs, sctx->gds_oa,
RADEON_USAGE_READWRITE, 0, 0);
}
}
+}
+
+void si_allocate_gds(struct si_context *sctx)
+{
+ struct radeon_winsys *ws = sctx->ws;
+
+ if (sctx->gds)
+ return;
+
+ assert(sctx->chip_class >= GFX10); /* for gfx10 streamout */
+
+ /* 4 streamout GDS counters.
+ * We need 256B (64 dw) of GDS, otherwise streamout hangs.
+ */
+ sctx->gds = ws->buffer_create(ws, 256, 4, RADEON_DOMAIN_GDS, 0);
+ sctx->gds_oa = ws->buffer_create(ws, 4, 1, RADEON_DOMAIN_OA, 0);
+
+ assert(sctx->gds && sctx->gds_oa);
+ si_add_gds_to_buffer_list(sctx);
+}
+
+void si_begin_new_gfx_cs(struct si_context *ctx)
+{
+ if (ctx->is_debug)
+ si_begin_gfx_cs_debug(ctx);
+ si_add_gds_to_buffer_list(ctx);
/* Always invalidate caches at the beginning of IBs, because external
* users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
ctx->last_prim = -1;
ctx->last_multi_vgt_param = -1;
ctx->last_rast_prim = -1;
+ ctx->last_flatshade_first = -1;
ctx->last_sc_line_stipple = ~0;
ctx->last_vs_state = ~0;
ctx->last_ls = NULL;
ctx->last_tes_sh_base = -1;
ctx->last_num_tcs_input_cp = -1;
ctx->last_ls_hs_config = -1; /* impossible value */
+ ctx->last_binning_enabled = -1;
ctx->prim_discard_compute_ib_initialized = false;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_IDX_FORMAT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL] = 0x00000000;