*/
#include "si_build_pm4.h"
-#include "gfx9d.h"
+#include "sid.h"
#include "util/u_index_modify.h"
#include "util/u_log.h"
/* When distributed tessellation is unsupported, switch between SEs
* at a higher frequency to compensate for it.
*/
- if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
+ if (!sctx->screen->info.has_distributed_tess && sctx->screen->info.max_se > 1)
*num_patches = MIN2(*num_patches, 16); /* recommended */
/* Make sure that vector lanes are reasonably occupied. It probably
* occupy significantly more CUs.
*/
unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
- if (temp_verts_per_tg > 64 && temp_verts_per_tg % 64 < 48)
- *num_patches = (temp_verts_per_tg & ~63) / max_verts_per_patch;
+ unsigned wave_size = sctx->screen->ge_wave_size;
+
+ if (temp_verts_per_tg > wave_size && temp_verts_per_tg % wave_size < wave_size*3/4)
+ *num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
if (sctx->chip_class == GFX6) {
/* GFX6 bug workaround, related to power management. Limit LS-HS
* threadgroups to only one wave.
*/
- unsigned one_wave = 64 / max_verts_per_patch;
+ unsigned one_wave = wave_size / max_verts_per_patch;
*num_patches = MIN2(*num_patches, one_wave);
}
C_VS_STATE_LS_OUT_VERTEX_SIZE;
sctx->current_vs_state |= tcs_in_layout;
+ /* We should be able to support in-shader LDS use with LLVM >= 9
+ * by just adding the lds_sizes together, but it has never
+ * been tested. */
+ assert(ls_current->config.lds_size == 0);
+
if (sctx->chip_class >= GFX9) {
- unsigned hs_rsrc2 = ls_current->config.rsrc2 |
- S_00B42C_LDS_SIZE(lds_size);
+ unsigned hs_rsrc2 = ls_current->config.rsrc2;
+
+ if (sctx->chip_class >= GFX10)
+ hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(lds_size);
+ else
+ hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
partial_vs_wave = true;
/* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
- if (sscreen->has_distributed_tess) {
+ if (sscreen->info.has_distributed_tess) {
if (key->u.uses_gs) {
if (sscreen->info.chip_class == GFX8)
partial_es_wave = true;
}
}
+static bool si_is_line_stipple_enabled(struct si_context *sctx)
+{
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+
+ return rs->line_stipple_enable &&
+ sctx->current_rast_prim != PIPE_PRIM_POINTS &&
+ (rs->polygon_mode_is_lines ||
+ util_prim_is_lines(sctx->current_rast_prim));
+}
+
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
const struct pipe_draw_info *info,
enum pipe_prim_type prim,
si_num_prims_for_vertices(info, prim) < primgroup_size));
key.u.primitive_restart = primitive_restart;
key.u.count_from_stream_output = info->count_from_stream_output != NULL;
+ key.u.line_stipple_enabled = si_is_line_stipple_enabled(sctx);
ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
return ia_multi_vgt_param;
}
+static unsigned si_conv_prim_to_gs_out(unsigned mode)
+{
+ static const int prim_conv[] = {
+ [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
+ [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
+ [SI_PRIM_RECTANGLE_LIST] = V_028A6C_VGT_OUT_RECT_V0,
+ };
+ assert(mode < ARRAY_SIZE(prim_conv));
+
+ return prim_conv[mode];
+}
+
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+ unsigned initial_cdw = cs->current.cdw;
- /* Skip this if not rendering lines. */
- if (!util_prim_is_lines(rast_prim))
- return;
+ if (unlikely(si_is_line_stipple_enabled(sctx))) {
+ /* For lines, reset the stipple pattern at each primitive. Otherwise,
+ * reset the stipple pattern at each packet (line strips, line loops).
+ */
+ unsigned value = rs->pa_sc_line_stipple |
+ S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2);
- if (rast_prim == sctx->last_rast_prim &&
- rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
- return;
+ radeon_opt_set_context_reg(sctx, R_028A0C_PA_SC_LINE_STIPPLE,
+ SI_TRACKED_PA_SC_LINE_STIPPLE, value);
+ }
- /* For lines, reset the stipple pattern at each primitive. Otherwise,
- * reset the stipple pattern at each packet (line strips, line loops).
- */
- radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
- rs->pa_sc_line_stipple |
- S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
+ unsigned gs_out_prim = si_conv_prim_to_gs_out(rast_prim);
+ if (unlikely(gs_out_prim != sctx->last_gs_out_prim &&
+ (sctx->ngg || sctx->gs_shader.cso))) {
+ radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
+ sctx->last_gs_out_prim = gs_out_prim;
+ }
+
+ if (initial_cdw != cs->current.cdw)
+ sctx->context_roll = true;
+
+ if (sctx->ngg) {
+ unsigned vtx_index = rs->flatshade_first ? 0 : gs_out_prim;
- sctx->last_rast_prim = rast_prim;
- sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
- sctx->context_roll = true;
+ sctx->current_vs_state &= C_VS_STATE_OUTPRIM &
+ C_VS_STATE_PROVOKING_VTX_INDEX;
+ sctx->current_vs_state |= S_VS_STATE_OUTPRIM(gs_out_prim) |
+ S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
+ }
}
static void si_emit_vs_state(struct si_context *sctx,
if (sctx->current_vs_state != sctx->last_vs_state) {
struct radeon_cmdbuf *cs = sctx->gfx_cs;
- /* For the API vertex shader (VS_STATE_INDEXED). */
+ /* For the API vertex shader (VS_STATE_INDEXED, LS_OUT_*). */
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
- /* For vertex color clamping, which is done in the last stage
- * before the rasterizer. */
- if (sctx->gs_shader.cso || sctx->tes_shader.cso) {
- /* GS copy shader or TES if GS is missing. */
+ /* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
+ * before the rasterizer.
+ *
+ * For TES or the GS copy shader without NGG:
+ */
+ if (sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] !=
+ R_00B130_SPI_SHADER_USER_DATA_VS_0) {
radeon_set_sh_reg(cs,
R_00B130_SPI_SHADER_USER_DATA_VS_0 +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
}
+ /* For NGG: */
+ if (sctx->screen->use_ngg &&
+ sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] !=
+ R_00B230_SPI_SHADER_USER_DATA_GS_0) {
+ radeon_set_sh_reg(cs,
+ R_00B230_SPI_SHADER_USER_DATA_GS_0 +
+ SI_SGPR_VS_STATE_BITS * 4,
+ sctx->current_vs_state);
+ }
+
sctx->last_vs_state = sctx->current_vs_state;
}
}
sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
}
-static void si_emit_draw_registers(struct si_context *sctx,
- const struct pipe_draw_info *info,
- enum pipe_prim_type prim,
- unsigned num_patches,
- unsigned instance_count,
- bool primitive_restart)
+static void si_emit_ia_multi_vgt_param(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ enum pipe_prim_type prim,
+ unsigned num_patches,
+ unsigned instance_count,
+ bool primitive_restart)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
- unsigned vgt_prim = si_conv_pipe_prim(prim);
unsigned ia_multi_vgt_param;
ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, prim, num_patches,
/* Draw state. */
if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
- if (sctx->chip_class >= GFX9)
+ if (sctx->chip_class == GFX9)
radeon_set_uconfig_reg_idx(cs, sctx->screen,
R_030960_IA_MULTI_VGT_PARAM, 4,
ia_multi_vgt_param);
sctx->last_multi_vgt_param = ia_multi_vgt_param;
}
+}
+
+/* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
+ * We overload last_multi_vgt_param.
+ */
+static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
+{
+ union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
+ unsigned ge_cntl;
+
+ if (sctx->ngg) {
+ if (sctx->tes_shader.cso) {
+ ge_cntl = S_03096C_PRIM_GRP_SIZE(num_patches) |
+ S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
+ S_03096C_BREAK_WAVE_AT_EOI(key.u.tess_uses_prim_id);
+ } else {
+ ge_cntl = si_get_vs_state(sctx)->ge_cntl;
+ }
+ } else {
+ unsigned primgroup_size;
+ unsigned vertgroup_size = 256; /* 256 = disable vertex grouping */;
+
+ if (sctx->tes_shader.cso) {
+ primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
+ } else if (sctx->gs_shader.cso) {
+ unsigned vgt_gs_onchip_cntl = sctx->gs_shader.current->ctx_reg.gs.vgt_gs_onchip_cntl;
+ primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
+ } else {
+ primgroup_size = 128; /* recommended without a GS and tess */
+ }
+
+ ge_cntl = S_03096C_PRIM_GRP_SIZE(primgroup_size) |
+ S_03096C_VERT_GRP_SIZE(vertgroup_size) |
+ S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id);
+ }
+
+ ge_cntl |= S_03096C_PACKET_TO_ONE_PA(si_is_line_stipple_enabled(sctx));
+
+ if (ge_cntl != sctx->last_multi_vgt_param) {
+ radeon_set_uconfig_reg(sctx->gfx_cs, R_03096C_GE_CNTL, ge_cntl);
+ sctx->last_multi_vgt_param = ge_cntl;
+ }
+}
+
+static void si_emit_draw_registers(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ enum pipe_prim_type prim,
+ unsigned num_patches,
+ unsigned instance_count,
+ bool primitive_restart)
+{
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ unsigned vgt_prim = si_conv_pipe_prim(prim);
+
+ if (sctx->chip_class >= GFX10)
+ gfx10_emit_ge_cntl(sctx, num_patches);
+ else
+ si_emit_ia_multi_vgt_param(sctx, info, prim, num_patches,
+ instance_count, primitive_restart);
+
if (vgt_prim != sctx->last_prim) {
- if (sctx->chip_class >= GFX7)
+ if (sctx->chip_class >= GFX10)
+ radeon_set_uconfig_reg(cs, R_030908_VGT_PRIMITIVE_TYPE, vgt_prim);
+ else if (sctx->chip_class >= GFX7)
radeon_set_uconfig_reg_idx(cs, sctx->screen,
R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
else
if (original_index_size) {
index_max_size = (indexbuf->width0 - index_offset) /
original_index_size;
+ /* Skip draw calls with 0-sized index buffers.
+ * They cause a hang on some chips, like Navi10-14.
+ */
+ if (!index_max_size)
+ return;
+
index_va = si_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
}
}
-static void si_emit_surface_sync(struct si_context *sctx,
- unsigned cp_coher_cntl)
+void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs,
+ unsigned cp_coher_cntl)
{
- struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ bool compute_ib = !sctx->has_graphics ||
+ cs == sctx->prim_discard_compute_cs;
+
+ assert(sctx->chip_class <= GFX9);
- if (sctx->chip_class >= GFX9 || !sctx->has_graphics) {
+ if (sctx->chip_class == GFX9 || compute_ib) {
/* Flush caches and wait for the caches to assert idle. */
radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
/* ACQUIRE_MEM has an implicit context roll if the current context
* is busy. */
- if (sctx->has_graphics)
+ if (!compute_ib)
sctx->context_roll = true;
}
*sctx->last_pkt3_write_data = PKT3(PKT3_NOP, 3, 0);
}
+void gfx10_emit_cache_flush(struct si_context *ctx)
+{
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
+ uint32_t gcr_cntl = 0;
+ unsigned cb_db_event = 0;
+ unsigned flags = ctx->flags;
+
+ if (!ctx->has_graphics) {
+ /* Only process compute flags. */
+ flags &= SI_CONTEXT_INV_ICACHE |
+ SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2 |
+ SI_CONTEXT_INV_L2_METADATA |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ }
+
+ /* We don't need these. */
+ assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC |
+ SI_CONTEXT_FLUSH_AND_INV_DB_META)));
+
+ if (flags & SI_CONTEXT_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
+ ctx->num_cb_cache_flushes++;
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
+ ctx->num_db_cache_flushes++;
+
+ if (flags & SI_CONTEXT_INV_ICACHE)
+ gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
+ if (flags & SI_CONTEXT_INV_SCACHE) {
+ /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
+ * to FORWARD when both L1 and L2 are written out (WB or INV).
+ */
+ gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
+ }
+ if (flags & SI_CONTEXT_INV_VCACHE)
+ gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
+
+ /* The L2 cache ops are:
+ * - INV: - invalidate lines that reflect memory (were loaded from memory)
+ * - don't touch lines that were overwritten (were stored by gfx clients)
+ * - WB: - don't touch lines that reflect memory
+ * - write back lines that were overwritten
+ * - WB | INV: - invalidate lines that reflect memory
+ * - write back lines that were overwritten
+ *
+ * GLM doesn't support WB alone. If WB is set, INV must be set too.
+ */
+ if (flags & SI_CONTEXT_INV_L2) {
+ /* Writeback and invalidate everything in L2. */
+ gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) |
+ S_586_GLM_INV(1) | S_586_GLM_WB(1);
+ ctx->num_L2_invalidates++;
+ } else if (flags & SI_CONTEXT_WB_L2) {
+ gcr_cntl |= S_586_GL2_WB(1) |
+ S_586_GLM_WB(1) | S_586_GLM_INV(1);
+ } else if (flags & SI_CONTEXT_INV_L2_METADATA) {
+ gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
+ }
+
+ if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) |
+ EVENT_INDEX(0));
+ }
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
+ /* Flush HTILE. Will wait for idle later. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) |
+ EVENT_INDEX(0));
+ }
+
+ /* First flush CB/DB, then L1/L2. */
+ gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
+
+ if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
+ (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
+ cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+ } else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
+ } else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
+ cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
+ } else {
+ assert(0);
+ }
+ } else {
+ /* Wait for graphics shaders to go idle if requested. */
+ if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ /* Only count explicit shader flushes, not implicit ones. */
+ ctx->num_vs_flushes++;
+ ctx->num_ps_flushes++;
+ } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ ctx->num_vs_flushes++;
+ }
+ }
+
+ if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ ctx->num_cs_flushes++;
+ ctx->compute_is_busy = false;
+ }
+
+ if (cb_db_event) {
+ /* CB/DB flush and invalidate (or possibly just a wait for a
+ * meta flush) via RELEASE_MEM.
+ *
+ * Combine this with other cache flushes when possible; this
+ * requires affected shaders to be idle, so do it after the
+ * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
+ * implied).
+ */
+ uint64_t va;
+
+ /* Do the flush (enqueue the event and wait for it). */
+ va = ctx->wait_mem_scratch->gpu_address;
+ ctx->wait_mem_number++;
+
+ /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
+ unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
+ unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
+ unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
+ unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
+ assert(G_586_GL2_US(gcr_cntl) == 0);
+ assert(G_586_GL2_RANGE(gcr_cntl) == 0);
+ assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
+ unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
+ unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
+ unsigned gcr_seq = G_586_SEQ(gcr_cntl);
+
+ gcr_cntl &= C_586_GLM_WB &
+ C_586_GLM_INV &
+ C_586_GLV_INV &
+ C_586_GL1_INV &
+ C_586_GL2_INV &
+ C_586_GL2_WB; /* keep SEQ */
+
+ si_cp_release_mem(ctx, cs, cb_db_event,
+ S_490_GLM_WB(glm_wb) |
+ S_490_GLM_INV(glm_inv) |
+ S_490_GLV_INV(glv_inv) |
+ S_490_GL1_INV(gl1_inv) |
+ S_490_GL2_INV(gl2_inv) |
+ S_490_GL2_WB(gl2_wb) |
+ S_490_SEQ(gcr_seq),
+ EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ ctx->wait_mem_scratch, va,
+ ctx->wait_mem_number, SI_NOT_QUERY);
+ si_cp_wait_mem(ctx, ctx->gfx_cs, va, ctx->wait_mem_number, 0xffffffff,
+ WAIT_REG_MEM_EQUAL);
+ }
+
+ /* Ignore fields that only modify the behavior of other fields. */
+ if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
+ /* Flush caches and wait for the caches to assert idle.
+ * The cache flush is executed in the ME, but the PFP waits
+ * for completion.
+ */
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
+ radeon_emit(cs, 0); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
+ } else if (cb_db_event ||
+ (flags & (SI_CONTEXT_VS_PARTIAL_FLUSH |
+ SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH))) {
+ /* We need to ensure that PFP waits as well. */
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ }
+
+ if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
+ EVENT_INDEX(0));
+ } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
+ EVENT_INDEX(0));
+ }
+
+ ctx->flags = 0;
+}
+
void si_emit_cache_flush(struct si_context *sctx)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
if (!sctx->has_graphics) {
/* Only process compute flags. */
flags &= SI_CONTEXT_INV_ICACHE |
- SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
+ SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2 |
SI_CONTEXT_INV_L2_METADATA |
SI_CONTEXT_CS_PARTIAL_FLUSH;
}
(flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
sctx->compute_is_busy);
+ assert(sctx->chip_class <= GFX9);
+
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
sctx->num_cb_cache_flushes++;
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
if (flags & SI_CONTEXT_INV_ICACHE)
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
- if (flags & SI_CONTEXT_INV_SMEM_L1)
+ if (flags & SI_CONTEXT_INV_SCACHE)
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
if (sctx->chip_class <= GFX8) {
/* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
* wait for idle on GFX9. We have to use a TS event.
*/
- if (sctx->chip_class >= GFX9 && flush_cb_db) {
+ if (sctx->chip_class == GFX9 && flush_cb_db) {
uint64_t va;
unsigned tc_flags, cb_db_event;
}
/* Ideally flush TC together with CB/DB. */
- if (flags & SI_CONTEXT_INV_GLOBAL_L2) {
+ if (flags & SI_CONTEXT_INV_L2) {
/* Writeback and invalidate everything in L2 & L1. */
tc_flags = EVENT_TC_ACTION_ENA |
EVENT_TC_WB_ACTION_ENA;
/* Clear the flags. */
- flags &= ~(SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
- SI_CONTEXT_INV_VMEM_L1);
+ flags &= ~(SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2 |
+ SI_CONTEXT_INV_VCACHE);
sctx->num_L2_invalidates++;
}
if (sctx->has_graphics &&
(cp_coher_cntl ||
(flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_WRITEBACK_GLOBAL_L2)))) {
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2)))) {
radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cs, 0);
}
*
* GFX6-GFX7 don't support L2 write-back.
*/
- if (flags & SI_CONTEXT_INV_GLOBAL_L2 ||
+ if (flags & SI_CONTEXT_INV_L2 ||
(sctx->chip_class <= GFX7 &&
- (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
+ (flags & SI_CONTEXT_WB_L2))) {
/* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
* WB must be set on GFX8+ when TC_ACTION is set.
*/
- si_emit_surface_sync(sctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
/* L1 invalidation and L2 writeback must be done separately,
* because both operations can't be done together.
*/
- if (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) {
+ if (flags & SI_CONTEXT_WB_L2) {
/* WB = write-back
* NC = apply to non-coherent MTYPEs
* (i.e. MTYPE <= 1, which is what we use everywhere)
*
* WB doesn't work without NC.
*/
- si_emit_surface_sync(sctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
S_0301F0_TC_NC_ACTION_ENA(1));
cp_coher_cntl = 0;
sctx->num_L2_writebacks++;
}
- if (flags & SI_CONTEXT_INV_VMEM_L1) {
+ if (flags & SI_CONTEXT_INV_VCACHE) {
/* Invalidate per-CU VMEM L1. */
- si_emit_surface_sync(sctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
}
/* If TC flushes haven't cleared this... */
if (cp_coher_cntl)
- si_emit_surface_sync(sctx, cp_coher_cntl);
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
if (is_barrier)
si_prim_discard_signal_next_compute_ib_start(sctx);
if (indexbuf &&
ws->cs_is_buffer_referenced(cs, si_resource(indexbuf)->buf,
RADEON_USAGE_WRITE))
- return false;
+ goto has_write_reference;
/* Vertex buffers. */
struct si_vertex_elements *velems = sctx->vertex_elements;
if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
RADEON_USAGE_WRITE))
- return false;
+ goto has_write_reference;
}
/* Constant and shader buffers. */
if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
RADEON_USAGE_WRITE))
- return false;
+ goto has_write_reference;
}
/* Samplers. */
if (ws->cs_is_buffer_referenced(cs,
si_resource(view->texture)->buf,
RADEON_USAGE_WRITE))
- return false;
+ goto has_write_reference;
}
}
if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
RADEON_USAGE_WRITE))
- return false;
+ goto has_write_reference;
}
}
return true;
+
+has_write_reference:
+ /* If the current gfx IB has enough packets, flush it to remove write
+ * references to buffers.
+ */
+ if (cs->prev_dw + cs->current.cdw > 2048) {
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
+ assert(si_all_vs_resources_read_only(sctx, indexbuf));
+ return true;
+ }
+ return false;
}
static ALWAYS_INLINE bool pd_msg(const char *s)
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct pipe_resource *indexbuf = info->index.resource;
- unsigned dirty_tex_counter;
+ unsigned dirty_tex_counter, dirty_buf_counter;
enum pipe_prim_type rast_prim, prim = info->mode;
unsigned index_size = info->index_size;
unsigned index_offset = info->indirect ? info->start * index_size : 0;
unsigned instance_count = info->instance_count;
- bool primitive_restart = info->primitive_restart;
+ bool primitive_restart = info->primitive_restart &&
+ (!sctx->screen->options.prim_restart_tri_strips_only ||
+ (prim != PIPE_PRIM_TRIANGLE_STRIP &&
+ prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
if (likely(!info->indirect)) {
/* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
return;
}
- if (unlikely(!sctx->vs_shader.cso ||
- !rs ||
+ struct si_shader_selector *vs = sctx->vs_shader.cso;
+ if (unlikely(!vs ||
+ sctx->num_vertex_elements < vs->num_vs_inputs ||
(!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
(!!sctx->tes_shader.cso != (prim == PIPE_PRIM_PATCHES)))) {
assert(0);
si_update_all_texture_descriptors(sctx);
}
+ dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
+ if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
+ sctx->last_dirty_buf_counter = dirty_buf_counter;
+ /* Rebind all buffers unconditionally. */
+ si_rebind_buffer(sctx, NULL);
+ }
+
si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
/* Set the rasterization primitive type.
* This must be done after si_decompress_textures, which can call
* draw_vbo recursively, and before si_update_shaders, which uses
* current_rast_prim for this draw_vbo call. */
- if (sctx->gs_shader.cso)
- rast_prim = sctx->gs_shader.cso->gs_output_prim;
- else if (sctx->tes_shader.cso) {
- if (sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
- rast_prim = PIPE_PRIM_POINTS;
- else
- rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
- } else
+ if (sctx->gs_shader.cso) {
+ /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
+ rast_prim = sctx->gs_shader.cso->rast_prim;
+ } else if (sctx->tes_shader.cso) {
+ /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
+ rast_prim = sctx->tes_shader.cso->rast_prim;
+ } else if (util_rast_prim_is_triangles(prim)) {
+ rast_prim = PIPE_PRIM_TRIANGLES;
+ } else {
+ /* Only possibilities, POINTS, LINE*, RECTANGLES */
rast_prim = prim;
+ }
if (rast_prim != sctx->current_rast_prim) {
if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
}
if (sctx->tes_shader.cso &&
- sctx->screen->has_ls_vgpr_init_bug) {
+ sctx->screen->info.has_ls_vgpr_init_bug) {
/* Determine whether the LS VGPR fix should be applied.
*
* It is only required when num input CPs > num output CPs,
}
}
- if (sctx->gs_shader.cso) {
+ if (sctx->chip_class <= GFX9 && sctx->gs_shader.cso) {
/* Determine whether the GS triangle strip adjacency fix should
* be applied. Rotate every other triangle if
* - triangle strips with adjacency are fed to the GS and
bool gs_tri_strip_adj_fix =
!sctx->tes_shader.cso &&
prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
- !info->primitive_restart;
+ !primitive_restart;
if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
si_resource(indexbuf)->TC_L2_dirty) {
/* GFX8 reads index buffers through TC L2, so it doesn't
* need this. */
- sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sctx->flags |= SI_CONTEXT_WB_L2;
si_resource(indexbuf)->TC_L2_dirty = false;
}
}
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
if (sctx->chip_class <= GFX8) {
if (si_resource(indirect->buffer)->TC_L2_dirty) {
- sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sctx->flags |= SI_CONTEXT_WB_L2;
si_resource(indirect->buffer)->TC_L2_dirty = false;
}
if (indirect->indirect_draw_count &&
si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
- sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sctx->flags |= SI_CONTEXT_WB_L2;
si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
}
}
} else {
- direct_count = info->count * instance_count;
+ /* Multiply by 3 for strips and fans to get an approximate vertex
+ * count as triangles. */
+ direct_count = info->count * instance_count *
+ (prim == PIPE_PRIM_TRIANGLES ? 1 : 3);
}
/* Determine if we can use the primitive discard compute shader. */
if (si_compute_prim_discard_enabled(sctx) &&
- /* Multiply by 3 for strips and fans to get the vertex count as triangles. */
- direct_count * (prim == PIPE_PRIM_TRIANGLES ? 1 : 3) >
- sctx->prim_discard_vertex_count_threshold &&
+ (direct_count > sctx->prim_discard_vertex_count_threshold ?
+ (sctx->compute_num_verts_rejected += direct_count, true) : /* Add, then return true. */
+ (sctx->compute_num_verts_ineligible += direct_count, false)) && /* Add, then return false. */
(!info->count_from_stream_output || pd_msg("draw_opaque")) &&
(primitive_restart ?
/* Supported prim types with primitive restart: */
(!sctx->tes_shader.cso || pd_msg("uses tess")) &&
(!sctx->gs_shader.cso || pd_msg("uses GS")) &&
(!sctx->ps_shader.cso->info.uses_primid || pd_msg("PS uses PrimID")) &&
+ !rs->polygon_mode_enabled &&
#if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
(!sctx->vs_shader.cso->info.uses_bindless_images || pd_msg("uses bindless images")) &&
(!sctx->vs_shader.cso->info.uses_bindless_samplers || pd_msg("uses bindless samplers")) &&
/* Check that all buffers are used for read only, because compute
* dispatches can run ahead. */
(si_all_vs_resources_read_only(sctx, index_size ? indexbuf : NULL) || pd_msg("write reference"))) {
- switch (si_prepare_prim_discard_or_split_draw(sctx, info)) {
+ switch (si_prepare_prim_discard_or_split_draw(sctx, info, primitive_restart)) {
case SI_PRIM_DISCARD_ENABLED:
original_index_size = index_size;
prim_discard_cs_instancing = instance_count > 1;
index_size = 4;
instance_count = 1;
primitive_restart = false;
+ sctx->compute_num_verts_rejected -= direct_count;
+ sctx->compute_num_verts_accepted += direct_count;
break;
case SI_PRIM_DISCARD_DISABLED:
break;
case SI_PRIM_DISCARD_DRAW_SPLIT:
+ sctx->compute_num_verts_rejected -= direct_count;
goto return_cleanup;
}
}
* written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
* registers must be written too.
*/
- bool has_gfx9_scissor_bug = sctx->screen->has_gfx9_scissor_bug;
unsigned masked_atoms = 0;
- if (has_gfx9_scissor_bug) {
+ if (sctx->screen->info.has_gfx9_scissor_bug) {
masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
if (info->count_from_stream_output ||
/* Emit all states except possibly render condition. */
si_emit_all_states(sctx, info, prim, instance_count,
primitive_restart, masked_atoms);
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
/* <-- CUs are idle here. */
if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
sctx->atoms.s.render_cond.emit(sctx);
- if (has_gfx9_scissor_bug &&
+ if (sctx->screen->info.has_gfx9_scissor_bug &&
(sctx->context_roll ||
si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
sctx->atoms.s.scissors.emit(sctx);
* states, and draw at the end.
*/
if (sctx->flags)
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
/* Only prefetch the API VS and VBO descriptors. */
if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
si_emit_all_states(sctx, info, prim, instance_count,
primitive_restart, masked_atoms);
- if (has_gfx9_scissor_bug &&
+ if (sctx->screen->info.has_gfx9_scissor_bug &&
(sctx->context_roll ||
si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
sctx->atoms.s.scissors.emit(sctx);
cik_emit_prefetch_L2(sctx, false);
}
+ /* Mark the displayable dcc buffer as dirty in order to update
+ * it on the next call to si_flush_resource. */
+ if (sctx->screen->info.use_display_dcc_with_retile_blit) {
+ /* Don't use si_update_fb_dirtiness_after_rendering because it'll
+ * cause unnecessary texture decompressions on each draw. */
+ unsigned displayable_dcc_cb_mask = sctx->framebuffer.displayable_dcc_cb_mask;
+ while (displayable_dcc_cb_mask) {
+ unsigned i = u_bit_scan(&displayable_dcc_cb_mask);
+ struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
+ struct si_texture *tex = (struct si_texture*) surf->texture;
+ tex->displayable_dcc_dirty = true;
+ }
+ }
+
/* Clear the context roll flag after the draw call. */
sctx->context_roll = false;
/* Don't set per-stage shader pointers for VS. */
sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
sctx->vertex_buffer_pointer_dirty = false;
+ sctx->vertex_buffer_user_sgprs_dirty = false;
si_draw_vbo(pipe, &info);
}