This moves to using an array of hw stages for the atoms.
Note this drops the 23 from the vertex shader, this value
is calculated internally when shaders are bound, so not
required here.
Signed-off-by: Dave Airlie <airlied@redhat.com>
void evergreen_init_state_functions(struct r600_context *rctx)
{
unsigned id = 1;
-
+ unsigned i;
/* !!!
* To avoid GPU lockup registers must be emited in a specific order
* (no kidding ...). The order below is important and have been
r600_add_atom(rctx, &rctx->b.render_cond_atom, id++);
r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++);
r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++);
- r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
- r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
- r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);
- r600_init_atom(rctx, &rctx->export_shader.atom, id++, r600_emit_shader, 0);
+ for (i = 0; i < EG_NUM_HW_STAGES; i++)
+ r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0);
r600_init_atom(rctx, &rctx->shader_stages.atom, id++, evergreen_emit_shader_stages, 6);
r600_init_atom(rctx, &rctx->gs_rings.atom, id++, evergreen_emit_gs_rings, 26);
r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
- r600_mark_atom_dirty(ctx, &ctx->pixel_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
}
r600_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
r600_mark_atom_dirty(ctx, &ctx->vertex_fetch_shader.atom);
- r600_mark_atom_dirty(ctx, &ctx->export_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_ES].atom);
r600_mark_atom_dirty(ctx, &ctx->shader_stages.atom);
if (ctx->gs_shader) {
- r600_mark_atom_dirty(ctx, &ctx->geometry_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_GS].atom);
r600_mark_atom_dirty(ctx, &ctx->gs_rings.atom);
}
- r600_mark_atom_dirty(ctx, &ctx->vertex_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_VS].atom);
r600_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
r600_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
#include "tgsi/tgsi_scan.h"
-#define R600_NUM_ATOMS 43
+#define R600_NUM_ATOMS 45
#define R600_MAX_VIEWPORTS 16
struct r600_viewport_state viewport;
/* Shaders and shader resources. */
struct r600_cso_state vertex_fetch_shader;
- struct r600_shader_state vertex_shader;
- struct r600_shader_state pixel_shader;
- struct r600_shader_state geometry_shader;
- struct r600_shader_state export_shader;
+ struct r600_shader_state hw_shader_stages[EG_NUM_HW_STAGES];
struct r600_cs_shader_state cs_shader_state;
struct r600_shader_stages_state shader_stages;
struct r600_gs_rings_state gs_rings;
void r600_init_state_functions(struct r600_context *rctx)
{
unsigned id = 1;
-
+ unsigned i;
/* !!!
* To avoid GPU lockup registers must be emited in a specific order
* (no kidding ...). The order below is important and have been
r600_add_atom(rctx, &rctx->b.render_cond_atom, id++);
r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++);
r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++);
- r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
- r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
- r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);
- r600_init_atom(rctx, &rctx->export_shader.atom, id++, r600_emit_shader, 0);
+ for (i = 0; i < R600_NUM_HW_STAGES; i++)
+ r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0);
r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0);
r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0);
}
/* gs_shader provides GS and VS (copy shader) */
- if (unlikely(rctx->geometry_shader.shader != rctx->gs_shader->current)) {
- update_shader_atom(ctx, &rctx->geometry_shader, rctx->gs_shader->current);
- update_shader_atom(ctx, &rctx->vertex_shader, rctx->gs_shader->current->gs_copy_shader);
+ if (unlikely(rctx->hw_shader_stages[R600_HW_STAGE_GS].shader != rctx->gs_shader->current)) {
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_GS], rctx->gs_shader->current);
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_VS], rctx->gs_shader->current->gs_copy_shader);
/* Update clip misc state. */
r600_update_clip_state(rctx, rctx->gs_shader->current->gs_copy_shader);
rctx->b.streamout.enabled_stream_buffers_mask = rctx->gs_shader->current->gs_copy_shader->enabled_stream_buffers_mask;
return false;
/* vs_shader is used as ES */
- if (unlikely(vs_dirty || rctx->export_shader.shader != rctx->vs_shader->current)) {
- update_shader_atom(ctx, &rctx->export_shader, rctx->vs_shader->current);
+ if (unlikely(vs_dirty || rctx->hw_shader_stages[R600_HW_STAGE_ES].shader != rctx->vs_shader->current)) {
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_ES], rctx->vs_shader->current);
}
} else {
- if (unlikely(rctx->geometry_shader.shader)) {
- update_shader_atom(ctx, &rctx->geometry_shader, NULL);
- update_shader_atom(ctx, &rctx->export_shader, NULL);
+ if (unlikely(rctx->hw_shader_stages[R600_HW_STAGE_GS].shader)) {
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_GS], NULL);
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_ES], NULL);
rctx->shader_stages.geom_enable = false;
r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
}
if (unlikely(!rctx->vs_shader->current))
return false;
- if (unlikely(vs_dirty || rctx->vertex_shader.shader != rctx->vs_shader->current)) {
- update_shader_atom(ctx, &rctx->vertex_shader, rctx->vs_shader->current);
+ if (unlikely(vs_dirty || rctx->hw_shader_stages[R600_HW_STAGE_VS].shader != rctx->vs_shader->current)) {
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_VS], rctx->vs_shader->current);
/* Update clip misc state. */
r600_update_clip_state(rctx, rctx->vs_shader->current);
}
}
-
- if (unlikely(ps_dirty || rctx->pixel_shader.shader != rctx->ps_shader->current ||
+ if (unlikely(ps_dirty || rctx->hw_shader_stages[R600_HW_STAGE_PS].shader != rctx->ps_shader->current ||
rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable ||
rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)) {
}
r600_mark_atom_dirty(rctx, &rctx->shader_stages.atom);
- update_shader_atom(ctx, &rctx->pixel_shader, rctx->ps_shader->current);
+ update_shader_atom(ctx, &rctx->hw_shader_stages[R600_HW_STAGE_PS], rctx->ps_shader->current);
}
if (rctx->b.chip_class >= EVERGREEN) {