/* SHADER STATES */
static void si_set_tesseval_regs(struct si_screen *sscreen,
- struct si_shader_selector *tes,
+ const struct si_shader_selector *tes,
struct si_pm4_state *pm4)
{
- struct tgsi_shader_info *info = &tes->info;
+ const struct tgsi_shader_info *info = &tes->info;
unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
} else
distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST;
- si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM,
- S_028B6C_TYPE(type) |
- S_028B6C_PARTITIONING(partitioning) |
- S_028B6C_TOPOLOGY(topology) |
- S_028B6C_DISTRIBUTION_MODE(distribution_mode));
+ assert(pm4->shader);
+ pm4->shader->vgt_tf_param = S_028B6C_TYPE(type) |
+ S_028B6C_PARTITIONING(partitioning) |
+ S_028B6C_TOPOLOGY(topology) |
+ S_028B6C_DISTRIBUTION_MODE(distribution_mode);
}
/* Polaris needs different VTX_REUSE_DEPTH settings depending on
PIPE_TESS_SPACING_FRACTIONAL_ODD)
vtx_reuse_depth = 14;
- si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
- vtx_reuse_depth);
+ assert(pm4->shader);
+ pm4->shader->vgt_vertex_reuse_block_cntl = vtx_reuse_depth;
}
}
static unsigned si_get_num_vs_user_sgprs(unsigned num_always_on_user_sgprs)
{
/* Add the pointer to VBO descriptors. */
- if (HAVE_32BIT_POINTERS) {
- return num_always_on_user_sgprs + 1;
- } else {
- assert(num_always_on_user_sgprs % 2 == 0);
- return num_always_on_user_sgprs + 2;
- }
+ return num_always_on_user_sgprs + 1;
}
static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
unsigned vgpr_comp_cnt;
uint64_t va;
- assert(sscreen->info.chip_class <= VI);
+ assert(sscreen->info.chip_class <= GFX8);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
S_00B428_FLOAT_MODE(shader->config.float_mode) |
S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
- if (sscreen->info.chip_class <= VI) {
+ if (sscreen->info.chip_class <= GFX8) {
si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
shader->config.rsrc2);
}
static void si_emit_shader_es(struct si_context *sctx)
{
struct si_shader *shader = sctx->queued.named.es->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
if (!shader)
return;
radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
shader->selector->esgs_itemsize / 4);
+
+ if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
}
static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
uint64_t va;
unsigned oc_lds_en;
- assert(sscreen->info.chip_class <= VI);
+ assert(sscreen->info.chip_class <= GFX8);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
assert(out->max_prims_per_subgroup <= max_out_prims);
}
+static void si_emit_shader_gs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.gs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2
+ * R_028A68_VGT_GSVS_RING_OFFSET_3, R_028A6C_VGT_GS_OUT_PRIM_TYPE */
+ radeon_opt_set_context_reg4(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1,
+ SI_TRACKED_VGT_GSVS_RING_OFFSET_1,
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_1,
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_2,
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_3,
+ shader->ctx_reg.gs.vgt_gs_out_prim_type);
+
+
+ /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */
+ radeon_opt_set_context_reg(sctx, R_028AB0_VGT_GSVS_RING_ITEMSIZE,
+ SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
+ shader->ctx_reg.gs.vgt_gsvs_ring_itemsize);
+
+ /* R_028B38_VGT_GS_MAX_VERT_OUT */
+ radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT,
+ SI_TRACKED_VGT_GS_MAX_VERT_OUT,
+ shader->ctx_reg.gs.vgt_gs_max_vert_out);
+
+ /* R_028B5C_VGT_GS_VERT_ITEMSIZE, R_028B60_VGT_GS_VERT_ITEMSIZE_1
+ * R_028B64_VGT_GS_VERT_ITEMSIZE_2, R_028B68_VGT_GS_VERT_ITEMSIZE_3 */
+ radeon_opt_set_context_reg4(sctx, R_028B5C_VGT_GS_VERT_ITEMSIZE,
+ SI_TRACKED_VGT_GS_VERT_ITEMSIZE,
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize,
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize_1,
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize_2,
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize_3);
+
+ /* R_028B90_VGT_GS_INSTANCE_CNT */
+ radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT,
+ SI_TRACKED_VGT_GS_INSTANCE_CNT,
+ shader->ctx_reg.gs.vgt_gs_instance_cnt);
+
+ if (sctx->chip_class >= GFX9) {
+ /* R_028A44_VGT_GS_ONCHIP_CNTL */
+ radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
+ SI_TRACKED_VGT_GS_ONCHIP_CNTL,
+ shader->ctx_reg.gs.vgt_gs_onchip_cntl);
+ /* R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP */
+ radeon_opt_set_context_reg(sctx, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
+ SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
+ shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup);
+ /* R_028AAC_VGT_ESGS_RING_ITEMSIZE */
+ radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+ SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
+ shader->ctx_reg.gs.vgt_esgs_ring_itemsize);
+
+ if (shader->key.part.gs.es->type == PIPE_SHADER_TESS_EVAL)
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
+ }
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
+}
+
static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_shader_selector *sel = shader->selector;
if (!pm4)
return;
+ pm4->atom.emit = si_emit_shader_gs;
+
offset = num_components[0] * sel->gs_max_out_vertices;
- si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, offset);
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_1 = offset;
+
if (max_stream >= 1)
offset += num_components[1] * sel->gs_max_out_vertices;
- si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, offset);
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_2 = offset;
+
if (max_stream >= 2)
offset += num_components[2] * sel->gs_max_out_vertices;
- si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, offset);
- si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
- si_conv_prim_to_gs_out(sel->gs_output_prim));
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_3 = offset;
+
+ shader->ctx_reg.gs.vgt_gs_out_prim_type =
+ si_conv_prim_to_gs_out(sel->gs_output_prim);
+
if (max_stream >= 3)
offset += num_components[3] * sel->gs_max_out_vertices;
- si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset);
+ shader->ctx_reg.gs.vgt_gsvs_ring_itemsize = offset;
/* The GSVS_RING_ITEMSIZE register takes 15 bits */
assert(offset < (1 << 15));
- si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, sel->gs_max_out_vertices);
+ shader->ctx_reg.gs.vgt_gs_max_vert_out = sel->gs_max_out_vertices;
- si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, num_components[0]);
- si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? num_components[1] : 0);
- si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? num_components[2] : 0);
- si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? num_components[3] : 0);
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize = num_components[0];
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize_1 = (max_stream >= 1) ? num_components[1] : 0;
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize_2 = (max_stream >= 2) ? num_components[2] : 0;
+ shader->ctx_reg.gs.vgt_gs_vert_itemsize_3 = (max_stream >= 3) ? num_components[3] : 0;
- si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT,
- S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
- S_028B90_ENABLE(gs_num_invocations > 0));
+ shader->ctx_reg.gs.vgt_gs_instance_cnt = S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
+ S_028B90_ENABLE(gs_num_invocations > 0);
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
S_00B22C_LDS_SIZE(gs_info.lds_size) |
S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
- si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL,
- S_028A44_ES_VERTS_PER_SUBGRP(gs_info.es_verts_per_subgroup) |
- S_028A44_GS_PRIMS_PER_SUBGRP(gs_info.gs_prims_per_subgroup) |
- S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info.gs_inst_prims_in_subgroup));
- si_pm4_set_reg(pm4, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
- S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info.max_prims_per_subgroup));
- si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
- shader->key.part.gs.es->esgs_itemsize / 4);
+ shader->ctx_reg.gs.vgt_gs_onchip_cntl =
+ S_028A44_ES_VERTS_PER_SUBGRP(gs_info.es_verts_per_subgroup) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(gs_info.gs_prims_per_subgroup) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info.gs_inst_prims_in_subgroup);
+ shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup =
+ S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info.max_prims_per_subgroup);
+ shader->ctx_reg.gs.vgt_esgs_ring_itemsize =
+ shader->key.part.gs.es->esgs_itemsize / 4;
if (es_type == PIPE_SHADER_TESS_EVAL)
si_set_tesseval_regs(sscreen, shader->key.part.gs.es, pm4);
}
}
+static void si_emit_shader_vs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.vs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ radeon_opt_set_context_reg(sctx, R_028A40_VGT_GS_MODE,
+ SI_TRACKED_VGT_GS_MODE,
+ shader->ctx_reg.vs.vgt_gs_mode);
+ radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN,
+ SI_TRACKED_VGT_PRIMITIVEID_EN,
+ shader->ctx_reg.vs.vgt_primitiveid_en);
+
+ if (sctx->chip_class <= GFX8) {
+ radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF,
+ SI_TRACKED_VGT_REUSE_OFF,
+ shader->ctx_reg.vs.vgt_reuse_off);
+ }
+
+ radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG,
+ SI_TRACKED_SPI_VS_OUT_CONFIG,
+ shader->ctx_reg.vs.spi_vs_out_config);
+
+ radeon_opt_set_context_reg(sctx, R_02870C_SPI_SHADER_POS_FORMAT,
+ SI_TRACKED_SPI_SHADER_POS_FORMAT,
+ shader->ctx_reg.vs.spi_shader_pos_format);
+
+ radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL,
+ SI_TRACKED_PA_CL_VTE_CNTL,
+ shader->ctx_reg.vs.pa_cl_vte_cntl);
+
+ if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
+}
+
/**
* Compute the state for \p shader, which will run as a vertex shader on the
* hardware.
{
const struct tgsi_shader_info *info = &shader->selector->info;
struct si_pm4_state *pm4;
- unsigned num_user_sgprs;
- unsigned nparams, vgpr_comp_cnt;
+ unsigned num_user_sgprs, vgpr_comp_cnt;
uint64_t va;
- unsigned oc_lds_en;
+ unsigned nparams, oc_lds_en;
unsigned window_space =
- info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
+ info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
+ pm4->atom.emit = si_emit_shader_vs;
+
/* We always write VGT_GS_MODE in the VS state, because every switch
* between different shader pipelines involving a different GS or no
* GS at all involves a switch of the VS (different GS use different
if (enable_prim_id)
mode = V_028A40_GS_SCENARIO_A;
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode));
- si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
+ shader->ctx_reg.vs.vgt_gs_mode = S_028A40_MODE(mode);
+ shader->ctx_reg.vs.vgt_primitiveid_en = enable_prim_id;
} else {
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
- ac_vgt_gs_mode(gs->gs_max_out_vertices,
- sscreen->info.chip_class));
- si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
+ shader->ctx_reg.vs.vgt_gs_mode = ac_vgt_gs_mode(gs->gs_max_out_vertices,
+ sscreen->info.chip_class);
+ shader->ctx_reg.vs.vgt_primitiveid_en = 0;
}
- if (sscreen->info.chip_class <= VI) {
+ if (sscreen->info.chip_class <= GFX8) {
/* Reuse needs to be set off if we write oViewport. */
- si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF,
- S_028AB4_REUSE_OFF(info->writes_viewport_index));
+ shader->ctx_reg.vs.vgt_reuse_off =
+ S_028AB4_REUSE_OFF(info->writes_viewport_index);
}
va = shader->bo->gpu_address;
/* VS is required to export at least one param. */
nparams = MAX2(shader->info.nr_param_exports, 1);
- si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(nparams - 1));
-
- si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
- S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE));
+ shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
+
+ shader->ctx_reg.vs.spi_shader_pos_format =
+ S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
+ S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE);
oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
S_00B12C_SO_EN(!!shader->selector->so.num_outputs) |
S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+
if (window_space)
- si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
+ shader->ctx_reg.vs.pa_cl_vte_cntl =
+ S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
else
- si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_W0_FMT(1) |
- S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
- S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
- S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
+ shader->ctx_reg.vs.pa_cl_vte_cntl =
+ S_028818_VTX_W0_FMT(1) |
+ S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
+ S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
+ S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
si_set_tesseval_regs(sscreen, shader->selector, pm4);
return value;
}
+static void si_emit_shader_ps(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.ps->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/
+ radeon_opt_set_context_reg2(sctx, R_0286CC_SPI_PS_INPUT_ENA,
+ SI_TRACKED_SPI_PS_INPUT_ENA,
+ shader->ctx_reg.ps.spi_ps_input_ena,
+ shader->ctx_reg.ps.spi_ps_input_addr);
+
+ radeon_opt_set_context_reg(sctx, R_0286E0_SPI_BARYC_CNTL,
+ SI_TRACKED_SPI_BARYC_CNTL,
+ shader->ctx_reg.ps.spi_baryc_cntl);
+ radeon_opt_set_context_reg(sctx, R_0286D8_SPI_PS_IN_CONTROL,
+ SI_TRACKED_SPI_PS_IN_CONTROL,
+ shader->ctx_reg.ps.spi_ps_in_control);
+
+ /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */
+ radeon_opt_set_context_reg2(sctx, R_028710_SPI_SHADER_Z_FORMAT,
+ SI_TRACKED_SPI_SHADER_Z_FORMAT,
+ shader->ctx_reg.ps.spi_shader_z_format,
+ shader->ctx_reg.ps.spi_shader_col_format);
+
+ radeon_opt_set_context_reg(sctx, R_02823C_CB_SHADER_MASK,
+ SI_TRACKED_CB_SHADER_MASK,
+ shader->ctx_reg.ps.cb_shader_mask);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
+}
+
static void si_shader_ps(struct si_shader *shader)
{
struct tgsi_shader_info *info = &shader->selector->info;
if (!pm4)
return;
+ pm4->atom.emit = si_emit_shader_ps;
+
/* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
* Possible vaules:
* 0 -> Position = pixel center
!info->writes_z && !info->writes_stencil && !info->writes_samplemask)
spi_shader_col_format = V_028714_SPI_SHADER_32_R;
- si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena);
- si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR,
- shader->config.spi_ps_input_addr);
+ shader->ctx_reg.ps.spi_ps_input_ena = input_ena;
+ shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr;
/* Set interpolation controls. */
spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader));
- /* Set registers. */
- si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
- si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
-
- si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
- ac_get_spi_shader_z_format(info->writes_z,
- info->writes_stencil,
- info->writes_samplemask));
-
- si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format);
- si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask);
+ shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl;
+ shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control;
+ shader->ctx_reg.ps.spi_shader_z_format =
+ ac_get_spi_shader_z_format(info->writes_z,
+ info->writes_stencil,
+ info->writes_samplemask);
+ shader->ctx_reg.ps.spi_shader_col_format = spi_shader_col_format;
+ shader->ctx_reg.ps.cb_shader_mask = cb_shader_mask;
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS])
return;
- prolog_key->instance_divisor_is_one =
- sctx->vertex_elements->instance_divisor_is_one;
- prolog_key->instance_divisor_is_fetched =
- sctx->vertex_elements->instance_divisor_is_fetched;
+ struct si_vertex_elements *elts = sctx->vertex_elements;
+
+ prolog_key->instance_divisor_is_one = elts->instance_divisor_is_one;
+ prolog_key->instance_divisor_is_fetched = elts->instance_divisor_is_fetched;
/* Prefer a monolithic shader to allow scheduling divisions around
* VBO loads. */
if (prolog_key->instance_divisor_is_fetched)
key->opt.prefer_mono = 1;
- unsigned count = MIN2(vs->info.num_inputs,
- sctx->vertex_elements->count);
- memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count);
+ unsigned count = MIN2(vs->info.num_inputs, elts->count);
+ unsigned count_mask = (1 << count) - 1;
+ unsigned fix = elts->fix_fetch_always & count_mask;
+ unsigned opencode = elts->fix_fetch_opencode & count_mask;
+
+ if (sctx->vertex_buffer_unaligned & elts->vb_alignment_check_mask) {
+ uint32_t mask = elts->fix_fetch_unaligned & count_mask;
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ unsigned log_hw_load_size = 1 + ((elts->hw_load_is_dword >> i) & 1);
+ unsigned vbidx = elts->vertex_buffer_index[i];
+ struct pipe_vertex_buffer *vb = &sctx->vertex_buffer[vbidx];
+ unsigned align_mask = (1 << log_hw_load_size) - 1;
+ if (vb->buffer_offset & align_mask ||
+ vb->stride & align_mask) {
+ fix |= 1 << i;
+ opencode |= 1 << i;
+ }
+ }
+ }
+
+ while (fix) {
+ unsigned i = u_bit_scan(&fix);
+ key->mono.vs_fix_fetch[i].bits = elts->fix_fetch[i];
+ }
+ key->mono.vs_fetch_opencode = opencode;
}
static void si_shader_selector_key_hw_vs(struct si_context *sctx,
blend && blend->alpha_to_coverage)
key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
- /* On SI and CIK except Hawaii, the CB doesn't clamp outputs
+ /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs
* to the range supported by the type if a channel has less
* than 16 bits and the export format is 16_ABGR.
*/
- if (sctx->chip_class <= CIK && sctx->family != CHIP_HAWAII) {
+ if (sctx->chip_class <= GFX7 && sctx->family != CHIP_HAWAII) {
key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
key->part.ps.epilog.color_is_int10 = sctx->framebuffer.color_is_int10;
}
key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
/* ps_uses_fbfetch is true only if the color buffer is bound. */
- if (sctx->ps_uses_fbfetch) {
+ if (sctx->ps_uses_fbfetch && !sctx->blitter->running) {
struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0];
struct pipe_resource *tex = cb0->texture;
return true;
}
-/* Select the hw shader variant depending on the current state. */
-static int si_shader_select_with_key(struct si_screen *sscreen,
- struct si_shader_ctx_state *state,
- struct si_compiler_ctx_state *compiler_state,
- struct si_shader_key *key,
- int thread_index)
+/**
+ * Select a shader variant according to the shader key.
+ *
+ * \param optimized_or_none If the key describes an optimized shader variant and
+ * the compilation isn't finished, don't select any
+ * shader and return an error.
+ */
+int si_shader_select_with_key(struct si_screen *sscreen,
+ struct si_shader_ctx_state *state,
+ struct si_compiler_ctx_state *compiler_state,
+ struct si_shader_key *key,
+ int thread_index,
+ bool optimized_or_none)
{
struct si_shader_selector *sel = state->cso;
struct si_shader_selector *previous_stage_sel = NULL;
memcmp(¤t->key, key, sizeof(*key)) == 0)) {
if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) {
if (current->is_optimized) {
+ if (optimized_or_none)
+ return -1;
+
memset(&key->opt, 0, sizeof(key->opt));
goto current_not_ready;
}
* shader so as not to cause a stall due to compilation.
*/
if (iter->is_optimized) {
+ if (optimized_or_none)
+ return -1;
memset(&key->opt, 0, sizeof(key->opt));
goto again;
}
util_queue_fence_wait(&previous_stage_sel->ready);
}
- /* Compile the main shader part if it doesn't exist. This can happen
- * if the initial guess was wrong. */
bool is_pure_monolithic =
sscreen->use_monolithic_shaders ||
memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
+ /* Compile the main shader part if it doesn't exist. This can happen
+ * if the initial guess was wrong.
+ */
if (!is_pure_monolithic) {
bool ok;
memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
/* If it's an optimized shader, compile it asynchronously. */
- if (shader->is_optimized &&
- !is_pure_monolithic &&
- thread_index < 0) {
+ if (shader->is_optimized && thread_index < 0) {
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
shader, &shader->ready,
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
mtx_unlock(&sel->mutex);
+
+ if (sscreen->options.sync_compile)
+ util_queue_fence_wait(&shader->ready);
+
+ if (optimized_or_none)
+ return -1;
goto again;
}
si_shader_selector_key(ctx, state->cso, &key);
return si_shader_select_with_key(sctx->screen, state, compiler_state,
- &key, -1);
+ &key, -1, false);
}
static void si_parse_next_shader_property(const struct tgsi_shader_info *info,
assert(thread_index < ARRAY_SIZE(sscreen->compiler));
compiler = &sscreen->compiler[thread_index];
+ if (sel->nir)
+ si_lower_nir(sel);
+
/* Compile the main shader part for use with a prolog and/or epilog.
* If this fails, the driver will try to compile a monolithic shader
* on demand.
util_queue_fence_init(ready_fence);
struct util_async_debug_callback async_debug;
- bool wait =
+ bool debug =
(sctx->debug.debug_message && !sctx->debug.async) ||
sctx->is_debug ||
si_can_dump_shader(sctx->screen, processor);
- if (wait) {
+ if (debug) {
u_async_debug_init(&async_debug);
compiler_ctx_state->debug = async_debug.base;
}
util_queue_add_job(&sctx->screen->shader_compiler_queue, job,
ready_fence, execute, NULL);
- if (wait) {
+ if (debug) {
util_queue_fence_wait(ready_fence);
u_async_debug_drain(&async_debug, &sctx->debug);
u_async_debug_cleanup(&async_debug);
}
+
+ if (sctx->screen->options.sync_compile)
+ util_queue_fence_wait(ready_fence);
}
/* Return descriptor slot usage masks from the given shader info. */
sel->nir = state->ir.nir;
+ si_nir_opts(sel->nir);
si_nir_scan_shader(sel->nir, &sel->info);
- si_nir_scan_tess_ctrl(sel->nir, &sel->info, &sel->tcs_info);
-
- si_lower_nir(sel);
+ si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info);
}
sel->type = sel->info.processor;
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
if (shader->key.as_ls) {
- assert(sctx->chip_class <= VI);
+ assert(sctx->chip_class <= GFX8);
si_pm4_delete_state(sctx, ls, shader->pm4);
} else if (shader->key.as_es) {
- assert(sctx->chip_class <= VI);
+ assert(sctx->chip_class <= GFX8);
si_pm4_delete_state(sctx, es, shader->pm4);
} else {
si_pm4_delete_state(sctx, vs, shader->pm4);
break;
case PIPE_SHADER_TESS_EVAL:
if (shader->key.as_es) {
- assert(sctx->chip_class <= VI);
+ assert(sctx->chip_class <= GFX8);
si_pm4_delete_state(sctx, es, shader->pm4);
} else {
si_pm4_delete_state(sctx, vs, shader->pm4);
/* R_028644_SPI_PS_INPUT_CNTL_0 */
/* Dota 2: Only ~16% of SPI map updates set different values. */
/* Talos: Only ~9% of SPI map updates set different values. */
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
radeon_opt_set_context_regn(sctx, R_028644_SPI_PS_INPUT_CNTL_0,
spi_ps_input_cntl,
sctx->tracked_regs.spi_ps_input_cntl, num_interp);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
}
/**
unsigned num_se = sctx->screen->info.max_se;
unsigned wave_size = 64;
unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
- /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
- * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
+ /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
+ * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
*/
- unsigned gs_vertex_reuse = (sctx->chip_class >= VI ? 32 : 16) * num_se;
+ unsigned gs_vertex_reuse = (sctx->chip_class >= GFX8 ? 32 : 16) * num_se;
unsigned alignment = 256 * num_se;
/* The maximum size is 63.999 MB per SE. */
unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
*
* GFX9 doesn't have the ESGS ring.
*/
- bool update_esgs = sctx->chip_class <= VI &&
+ bool update_esgs = sctx->chip_class <= GFX8 &&
esgs_ring_size &&
(!sctx->esgs_ring ||
sctx->esgs_ring->width0 < esgs_ring_size);
if (!pm4)
return false;
- if (sctx->chip_class >= CIK) {
+ if (sctx->chip_class >= GFX7) {
if (sctx->esgs_ring) {
- assert(sctx->chip_class <= VI);
+ assert(sctx->chip_class <= GFX8);
si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
sctx->esgs_ring->width0 / 256);
}
/* Set ring bindings. */
if (sctx->esgs_ring) {
- assert(sctx->chip_class <= VI);
+ assert(sctx->chip_class <= GFX8);
si_set_ring_buffer(sctx, SI_ES_RING_ESGS,
sctx->esgs_ring, 0, sctx->esgs_ring->width0,
true, true, 4, 64, 0);
/* Update the shader state to use the new shader bo. */
si_shader_init_pm4_state(sctx->screen, shader);
- r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
+ si_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
si_shader_unlock(shader);
return 1;
if (scratch_needed_size > 0) {
if (scratch_needed_size > current_scratch_buffer_size) {
/* Create a bigger scratch buffer */
- r600_resource_reference(&sctx->scratch_buffer, NULL);
+ si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
si_init_config_add_vgt_flush(sctx);
- si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_rings),
+ si_pm4_add_bo(sctx->init_config, si_resource(sctx->tess_rings),
RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
- uint64_t factor_va = r600_resource(sctx->tess_rings)->gpu_address +
+ uint64_t factor_va = si_resource(sctx->tess_rings)->gpu_address +
sctx->screen->tess_offchip_ring_size;
/* Append these registers to the init config state. */
- if (sctx->chip_class >= CIK) {
+ if (sctx->chip_class >= GFX7) {
si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4));
si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
}
/* VS as LS */
- if (sctx->chip_class <= VI) {
+ if (sctx->chip_class <= GFX8) {
r = si_shader_select(ctx, &sctx->vs_shader,
&compiler_state);
if (r)
if (sctx->gs_shader.cso) {
/* TES as ES */
- if (sctx->chip_class <= VI) {
+ if (sctx->chip_class <= GFX8) {
r = si_shader_select(ctx, &sctx->tes_shader,
&compiler_state);
if (r)
si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
}
} else if (sctx->gs_shader.cso) {
- if (sctx->chip_class <= VI) {
+ if (sctx->chip_class <= GFX8) {
/* VS as ES */
r = si_shader_select(ctx, &sctx->vs_shader,
&compiler_state);
return false;
} else {
si_pm4_bind_state(sctx, gs, NULL);
- if (sctx->chip_class <= VI)
+ if (sctx->chip_class <= GFX8)
si_pm4_bind_state(sctx, es, NULL);
}
sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing;
si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config);
- if (sctx->chip_class == SI)
+ if (sctx->chip_class == GFX6)
si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
if (sctx->framebuffer.nr_samples <= 1)
return false;
}
- if (sctx->chip_class >= CIK) {
+ if (sctx->chip_class >= GFX7) {
if (si_pm4_state_enabled_and_changed(sctx, ls))
sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
else if (!sctx->queued.named.ls)