/* SHADER STATES */
static void si_set_tesseval_regs(struct si_screen *sscreen,
- struct si_shader_selector *tes,
+ const struct si_shader_selector *tes,
struct si_pm4_state *pm4)
{
- struct tgsi_shader_info *info = &tes->info;
+ const struct tgsi_shader_info *info = &tes->info;
unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
} else
distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST;
- si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM,
- S_028B6C_TYPE(type) |
- S_028B6C_PARTITIONING(partitioning) |
- S_028B6C_TOPOLOGY(topology) |
- S_028B6C_DISTRIBUTION_MODE(distribution_mode));
+ assert(pm4->shader);
+ pm4->shader->vgt_tf_param = S_028B6C_TYPE(type) |
+ S_028B6C_PARTITIONING(partitioning) |
+ S_028B6C_TOPOLOGY(topology) |
+ S_028B6C_DISTRIBUTION_MODE(distribution_mode);
}
/* Polaris needs different VTX_REUSE_DEPTH settings depending on
PIPE_TESS_SPACING_FRACTIONAL_ODD)
vtx_reuse_depth = 14;
- si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
- vtx_reuse_depth);
+ assert(pm4->shader);
+ pm4->shader->vgt_vertex_reuse_block_cntl = vtx_reuse_depth;
}
}
static unsigned si_get_num_vs_user_sgprs(unsigned num_always_on_user_sgprs)
{
/* Add the pointer to VBO descriptors. */
- if (HAVE_32BIT_POINTERS) {
- return num_always_on_user_sgprs + 1;
- } else {
- assert(num_always_on_user_sgprs % 2 == 0);
- return num_always_on_user_sgprs + 2;
- }
+ return num_always_on_user_sgprs + 1;
}
static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
static void si_emit_shader_es(struct si_context *sctx)
{
struct si_shader *shader = sctx->queued.named.es->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
if (!shader)
return;
radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
shader->selector->esgs_itemsize / 4);
+
+ if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
}
static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
static void si_emit_shader_gs(struct si_context *sctx)
{
struct si_shader *shader = sctx->queued.named.gs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
if (!shader)
return;
radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
shader->ctx_reg.gs.vgt_esgs_ring_itemsize);
+
+ if (shader->key.part.gs.es->type == PIPE_SHADER_TESS_EVAL)
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
}
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
}
static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
}
}
+static void si_emit_shader_vs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.vs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ radeon_opt_set_context_reg(sctx, R_028A40_VGT_GS_MODE,
+ SI_TRACKED_VGT_GS_MODE,
+ shader->ctx_reg.vs.vgt_gs_mode);
+ radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN,
+ SI_TRACKED_VGT_PRIMITIVEID_EN,
+ shader->ctx_reg.vs.vgt_primitiveid_en);
+
+ if (sctx->chip_class <= VI) {
+ radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF,
+ SI_TRACKED_VGT_REUSE_OFF,
+ shader->ctx_reg.vs.vgt_reuse_off);
+ }
+
+ radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG,
+ SI_TRACKED_SPI_VS_OUT_CONFIG,
+ shader->ctx_reg.vs.spi_vs_out_config);
+
+ radeon_opt_set_context_reg(sctx, R_02870C_SPI_SHADER_POS_FORMAT,
+ SI_TRACKED_SPI_SHADER_POS_FORMAT,
+ shader->ctx_reg.vs.spi_shader_pos_format);
+
+ radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL,
+ SI_TRACKED_PA_CL_VTE_CNTL,
+ shader->ctx_reg.vs.pa_cl_vte_cntl);
+
+ if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+
+ if (shader->vgt_vertex_reuse_block_cntl)
+ radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ shader->vgt_vertex_reuse_block_cntl);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
+}
+
/**
* Compute the state for \p shader, which will run as a vertex shader on the
* hardware.
{
const struct tgsi_shader_info *info = &shader->selector->info;
struct si_pm4_state *pm4;
- unsigned num_user_sgprs;
- unsigned nparams, vgpr_comp_cnt;
+ unsigned num_user_sgprs, vgpr_comp_cnt;
uint64_t va;
- unsigned oc_lds_en;
+ unsigned nparams, oc_lds_en;
unsigned window_space =
- info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
+ info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
+ pm4->atom.emit = si_emit_shader_vs;
+
/* We always write VGT_GS_MODE in the VS state, because every switch
* between different shader pipelines involving a different GS or no
* GS at all involves a switch of the VS (different GS use different
if (enable_prim_id)
mode = V_028A40_GS_SCENARIO_A;
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode));
- si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
+ shader->ctx_reg.vs.vgt_gs_mode = S_028A40_MODE(mode);
+ shader->ctx_reg.vs.vgt_primitiveid_en = enable_prim_id;
} else {
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
- ac_vgt_gs_mode(gs->gs_max_out_vertices,
- sscreen->info.chip_class));
- si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
+ shader->ctx_reg.vs.vgt_gs_mode = ac_vgt_gs_mode(gs->gs_max_out_vertices,
+ sscreen->info.chip_class);
+ shader->ctx_reg.vs.vgt_primitiveid_en = 0;
}
if (sscreen->info.chip_class <= VI) {
/* Reuse needs to be set off if we write oViewport. */
- si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF,
- S_028AB4_REUSE_OFF(info->writes_viewport_index));
+ shader->ctx_reg.vs.vgt_reuse_off =
+ S_028AB4_REUSE_OFF(info->writes_viewport_index);
}
va = shader->bo->gpu_address;
/* VS is required to export at least one param. */
nparams = MAX2(shader->info.nr_param_exports, 1);
- si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(nparams - 1));
-
- si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
- S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE));
+ shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
+
+ shader->ctx_reg.vs.spi_shader_pos_format =
+ S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
+ S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE);
oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
S_00B12C_SO_EN(!!shader->selector->so.num_outputs) |
S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+
if (window_space)
- si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1));
+ shader->ctx_reg.vs.pa_cl_vte_cntl =
+ S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
else
- si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_W0_FMT(1) |
- S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
- S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
- S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
+ shader->ctx_reg.vs.pa_cl_vte_cntl =
+ S_028818_VTX_W0_FMT(1) |
+ S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
+ S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
+ S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
si_set_tesseval_regs(sscreen, shader->selector, pm4);
return value;
}
+static void si_emit_shader_ps(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.ps->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/
+ radeon_opt_set_context_reg2(sctx, R_0286CC_SPI_PS_INPUT_ENA,
+ SI_TRACKED_SPI_PS_INPUT_ENA,
+ shader->ctx_reg.ps.spi_ps_input_ena,
+ shader->ctx_reg.ps.spi_ps_input_addr);
+
+ radeon_opt_set_context_reg(sctx, R_0286E0_SPI_BARYC_CNTL,
+ SI_TRACKED_SPI_BARYC_CNTL,
+ shader->ctx_reg.ps.spi_baryc_cntl);
+ radeon_opt_set_context_reg(sctx, R_0286D8_SPI_PS_IN_CONTROL,
+ SI_TRACKED_SPI_PS_IN_CONTROL,
+ shader->ctx_reg.ps.spi_ps_in_control);
+
+ /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */
+ radeon_opt_set_context_reg2(sctx, R_028710_SPI_SHADER_Z_FORMAT,
+ SI_TRACKED_SPI_SHADER_Z_FORMAT,
+ shader->ctx_reg.ps.spi_shader_z_format,
+ shader->ctx_reg.ps.spi_shader_col_format);
+
+ radeon_opt_set_context_reg(sctx, R_02823C_CB_SHADER_MASK,
+ SI_TRACKED_CB_SHADER_MASK,
+ shader->ctx_reg.ps.cb_shader_mask);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
+}
+
static void si_shader_ps(struct si_shader *shader)
{
struct tgsi_shader_info *info = &shader->selector->info;
if (!pm4)
return;
+ pm4->atom.emit = si_emit_shader_ps;
+
/* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
* Possible vaules:
* 0 -> Position = pixel center
!info->writes_z && !info->writes_stencil && !info->writes_samplemask)
spi_shader_col_format = V_028714_SPI_SHADER_32_R;
- si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena);
- si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR,
- shader->config.spi_ps_input_addr);
+ shader->ctx_reg.ps.spi_ps_input_ena = input_ena;
+ shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr;
/* Set interpolation controls. */
spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader));
- /* Set registers. */
- si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
- si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
-
- si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
- ac_get_spi_shader_z_format(info->writes_z,
- info->writes_stencil,
- info->writes_samplemask));
-
- si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format);
- si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask);
+ shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl;
+ shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control;
+ shader->ctx_reg.ps.spi_shader_z_format =
+ ac_get_spi_shader_z_format(info->writes_z,
+ info->writes_stencil,
+ info->writes_samplemask);
+ shader->ctx_reg.ps.spi_shader_col_format = spi_shader_col_format;
+ shader->ctx_reg.ps.cb_shader_mask = cb_shader_mask;
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS])
return;
- prolog_key->instance_divisor_is_one =
- sctx->vertex_elements->instance_divisor_is_one;
- prolog_key->instance_divisor_is_fetched =
- sctx->vertex_elements->instance_divisor_is_fetched;
+ struct si_vertex_elements *elts = sctx->vertex_elements;
+
+ prolog_key->instance_divisor_is_one = elts->instance_divisor_is_one;
+ prolog_key->instance_divisor_is_fetched = elts->instance_divisor_is_fetched;
/* Prefer a monolithic shader to allow scheduling divisions around
* VBO loads. */
if (prolog_key->instance_divisor_is_fetched)
key->opt.prefer_mono = 1;
- unsigned count = MIN2(vs->info.num_inputs,
- sctx->vertex_elements->count);
- memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count);
+ unsigned count = MIN2(vs->info.num_inputs, elts->count);
+ memcpy(key->mono.vs_fix_fetch, elts->fix_fetch, count);
}
static void si_shader_selector_key_hw_vs(struct si_context *sctx,
key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
/* ps_uses_fbfetch is true only if the color buffer is bound. */
- if (sctx->ps_uses_fbfetch) {
+ if (sctx->ps_uses_fbfetch && !sctx->blitter->running) {
struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0];
struct pipe_resource *tex = cb0->texture;
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
mtx_unlock(&sel->mutex);
+
+ if (sscreen->options.sync_compile)
+ util_queue_fence_wait(&shader->ready);
+
goto again;
}
assert(thread_index < ARRAY_SIZE(sscreen->compiler));
compiler = &sscreen->compiler[thread_index];
+ if (sel->nir)
+ si_lower_nir(sel);
+
/* Compile the main shader part for use with a prolog and/or epilog.
* If this fails, the driver will try to compile a monolithic shader
* on demand.
util_queue_fence_init(ready_fence);
struct util_async_debug_callback async_debug;
- bool wait =
+ bool debug =
(sctx->debug.debug_message && !sctx->debug.async) ||
sctx->is_debug ||
si_can_dump_shader(sctx->screen, processor);
- if (wait) {
+ if (debug) {
u_async_debug_init(&async_debug);
compiler_ctx_state->debug = async_debug.base;
}
util_queue_add_job(&sctx->screen->shader_compiler_queue, job,
ready_fence, execute, NULL);
- if (wait) {
+ if (debug) {
util_queue_fence_wait(ready_fence);
u_async_debug_drain(&async_debug, &sctx->debug);
u_async_debug_cleanup(&async_debug);
}
+
+ if (sctx->screen->options.sync_compile)
+ util_queue_fence_wait(ready_fence);
}
/* Return descriptor slot usage masks from the given shader info. */
sel->nir = state->ir.nir;
+ si_nir_opts(sel->nir);
si_nir_scan_shader(sel->nir, &sel->info);
- si_nir_scan_tess_ctrl(sel->nir, &sel->info, &sel->tcs_info);
-
- si_lower_nir(sel);
+ si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info);
}
sel->type = sel->info.processor;
/* R_028644_SPI_PS_INPUT_CNTL_0 */
/* Dota 2: Only ~16% of SPI map updates set different values. */
/* Talos: Only ~9% of SPI map updates set different values. */
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
radeon_opt_set_context_regn(sctx, R_028644_SPI_PS_INPUT_CNTL_0,
spi_ps_input_cntl,
sctx->tracked_regs.spi_ps_input_cntl, num_interp);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
}
/**
/* Update the shader state to use the new shader bo. */
si_shader_init_pm4_state(sctx->screen, shader);
- r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
+ si_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
si_shader_unlock(shader);
return 1;
if (scratch_needed_size > 0) {
if (scratch_needed_size > current_scratch_buffer_size) {
/* Create a bigger scratch buffer */
- r600_resource_reference(&sctx->scratch_buffer, NULL);
+ si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
si_init_config_add_vgt_flush(sctx);
- si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_rings),
+ si_pm4_add_bo(sctx->init_config, si_resource(sctx->tess_rings),
RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
- uint64_t factor_va = r600_resource(sctx->tess_rings)->gpu_address +
+ uint64_t factor_va = si_resource(sctx->tess_rings)->gpu_address +
sctx->screen->tess_offchip_ring_size;
/* Append these registers to the init config state. */