ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT] = 0x00000000;
ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK] = 0xffffffff;
ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM] = 0x00000000;
+ ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x0000001e; /* From VI */
/* Set all saved registers state to saved. */
ctx->tracked_regs.reg_saved = 0xffffffffffffffff;
PIPE_TESS_SPACING_FRACTIONAL_ODD)
vtx_reuse_depth = 14;
- si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
- vtx_reuse_depth);
+ assert(pm4->shader);
+ pm4->shader->vgt_vertex_reuse_block_cntl = vtx_reuse_depth;
}
}
SI_TRACKED_VGT_TF_PARAM,
shader->vgt_tf_param);
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
}
static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
SI_TRACKED_VGT_TF_PARAM,
shader->vgt_tf_param);
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
}
}
radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
SI_TRACKED_VGT_TF_PARAM,
shader->vgt_tf_param);
+
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
}
/**