break;
/* fall through */
default:
- if (shader->key.opt.hw_vs.kill_outputs &
+ if (shader->key.opt.kill_outputs &
(1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
export_param = false;
}
target = V_008DFC_SQ_EXP_POS;
break;
case TGSI_SEMANTIC_CLIPDIST:
- if (shader->key.opt.hw_vs.clip_disable) {
+ if (shader->key.opt.clip_disable) {
semantic_name = TGSI_SEMANTIC_GENERIC;
goto handle_semantic;
}
target = V_008DFC_SQ_EXP_POS + 2 + semantic_index;
break;
case TGSI_SEMANTIC_CLIPVERTEX:
- if (shader->key.opt.hw_vs.clip_disable)
+ if (shader->key.opt.clip_disable)
continue;
si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
continue;
processor == PIPE_SHADER_TESS_EVAL ||
processor == PIPE_SHADER_VERTEX) &&
!key->as_es && !key->as_ls) {
- fprintf(f, " opt.hw_vs.kill_outputs = 0x%"PRIx64"\n", key->opt.hw_vs.kill_outputs);
- fprintf(f, " opt.hw_vs.clip_disable = %u\n", key->opt.hw_vs.clip_disable);
+ fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
+ fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
}
}
/* Optimization flags for asynchronous compilation only. */
struct {
- struct {
- uint64_t kill_outputs; /* "get_unique_index" bits */
- unsigned clip_disable:1;
- } hw_vs; /* HW VS (it can be VS, TES, GS) */
+ /* For HW VS (it can be VS, TES, GS) */
+ uint64_t kill_outputs; /* "get_unique_index" bits */
+ unsigned clip_disable:1;
/* For shaders where monolithic variants have better code.
*
unsigned culldist_mask = vs_sel->culldist_mask;
unsigned total_mask;
- if (vs->key.opt.hw_vs.clip_disable) {
+ if (vs->key.opt.clip_disable) {
assert(!info->culldist_writemask);
clipdist_mask = 0;
culldist_mask = 0;
{
struct si_shader_selector *ps = sctx->ps_shader.cso;
- key->opt.hw_vs.clip_disable =
+ key->opt.clip_disable =
sctx->queued.named.rasterizer->clip_plane_enable == 0 &&
(vs->info.clipdist_writemask ||
vs->info.writes_clipvertex) &&
uint64_t linked = outputs_written & inputs_read;
- key->opt.hw_vs.kill_outputs = ~linked & outputs_written;
+ key->opt.kill_outputs = ~linked & outputs_written;
}
/* Compute the key for the hw shader variant */
old_hw_vs->culldist_mask != next_hw_vs->culldist_mask ||
!old_hw_vs_variant ||
!next_hw_vs_variant ||
- old_hw_vs_variant->key.opt.hw_vs.clip_disable !=
- next_hw_vs_variant->key.opt.hw_vs.clip_disable))
+ old_hw_vs_variant->key.opt.clip_disable !=
+ next_hw_vs_variant->key.opt.clip_disable))
si_mark_atom_dirty(sctx, &sctx->clip_regs);
}
struct si_compiler_ctx_state compiler_state;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct si_shader *old_vs = si_get_vs_state(sctx);
- bool old_clip_disable = old_vs ? old_vs->key.opt.hw_vs.clip_disable : false;
+ bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false;
struct si_shader *old_ps = sctx->ps_shader.current;
unsigned old_spi_shader_col_format =
old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
si_update_vgt_shader_config(sctx);
- if (old_clip_disable != si_get_vs_state(sctx)->key.opt.hw_vs.clip_disable)
+ if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
si_mark_atom_dirty(sctx, &sctx->clip_regs);
if (sctx->ps_shader.cso) {