ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x0000001e; /* From GFX8 */
/* Set all cleared context registers to saved. */
- ctx->tracked_regs.reg_saved = 0xffffffffffffffff;
+ ctx->tracked_regs.reg_saved = ~(1ull << SI_TRACKED_GE_PC_ALLOC); /* uconfig reg */
ctx->last_gs_out_prim = 0; /* cleared by CLEAR_STATE */
} else {
- /* Set all saved registers state to unknown. */
+ /* Set all register values to unknown. */
ctx->tracked_regs.reg_saved = 0;
ctx->last_gs_out_prim = -1; /* unknown */
}
S_00B0C0_SOFT_GROUPING_EN(1) |
S_00B0C0_NUMBER_OF_REQUESTS_PER_CU(4 - 1));
si_pm4_set_reg(pm4, R_00B1C0_SPI_SHADER_REQ_CTRL_VS, 0);
-
- if (sctx->family == CHIP_NAVI10 ||
- sctx->family == CHIP_NAVI12 ||
- sctx->family == CHIP_NAVI14) {
- /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
- si_pm4_cmd_begin(pm4, PKT3_EVENT_WRITE);
- si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
- si_pm4_cmd_end(pm4, false);
- }
- /* TODO: For culling, replace 128 with 256. */
- si_pm4_set_reg(pm4, R_030980_GE_PC_ALLOC,
- S_030980_OVERSUB_EN(1) |
- S_030980_NUM_PC_LINES(sscreen->info.pc_lines / 4 - 1));
}
if (sctx->chip_class >= GFX8) {
}
}
+static void gfx10_emit_ge_pc_alloc(struct si_context *sctx, unsigned value)
+{
+ enum si_tracked_reg reg = SI_TRACKED_GE_PC_ALLOC;
+
+ if (((sctx->tracked_regs.reg_saved >> reg) & 0x1) != 0x1 ||
+ sctx->tracked_regs.reg_value[reg] != value) {
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+
+ if (sctx->family == CHIP_NAVI10 ||
+ sctx->family == CHIP_NAVI12 ||
+ sctx->family == CHIP_NAVI14) {
+ /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
+ }
+
+ radeon_set_uconfig_reg(cs, R_030980_GE_PC_ALLOC, value);
+
+ sctx->tracked_regs.reg_saved |= 0x1ull << reg;
+ sctx->tracked_regs.reg_value[reg] = value;
+ }
+}
+
/* Common tail code for NGG primitive shaders. */
static void gfx10_emit_shader_ngg_tail(struct si_context *sctx,
struct si_shader *shader,
if (initial_cdw != sctx->gfx_cs->current.cdw)
sctx->context_roll = true;
+
+ /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
+ gfx10_emit_ge_pc_alloc(sctx, shader->ctx_reg.ngg.ge_pc_alloc);
}
static void gfx10_emit_shader_ngg_notess_nogs(struct si_context *sctx)
shader->ctx_reg.ngg.pa_cl_ngg_cntl =
S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX);
shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(gs_sel, true);
+ shader->ctx_reg.ngg.ge_pc_alloc = S_030980_OVERSUB_EN(1) |
+ S_030980_NUM_PC_LINES(sscreen->info.pc_lines / 4 - 1);
shader->ge_cntl =
S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
if (initial_cdw != sctx->gfx_cs->current.cdw)
sctx->context_roll = true;
+
+ /* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
+ if (sctx->chip_class >= GFX10)
+ gfx10_emit_ge_pc_alloc(sctx, shader->ctx_reg.vs.ge_pc_alloc);
}
/**
S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE);
+ shader->ctx_reg.vs.ge_pc_alloc = S_030980_OVERSUB_EN(1) |
+ S_030980_NUM_PC_LINES(sscreen->info.pc_lines / 4 - 1);
shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, false);
oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;