#include "st_atifs_to_tgsi.h"
#include "st_nir.h"
#include "st_shader_cache.h"
+#include "st_util.h"
#include "cso_cache/cso_context.h"
switch (prog->info.stage) {
case MESA_SHADER_VERTEX:
- states = &((struct st_vertex_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
*states = ST_NEW_VS_STATE |
ST_NEW_RASTERIZER |
break;
case MESA_SHADER_TESS_CTRL:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_TCS_STATE;
break;
case MESA_SHADER_TESS_EVAL:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_TES_STATE |
ST_NEW_RASTERIZER;
break;
case MESA_SHADER_GEOMETRY:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_GS_STATE |
ST_NEW_RASTERIZER;
break;
case MESA_SHADER_FRAGMENT:
- states = &((struct st_fragment_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
/* gl_FragCoord and glDrawPixels always use constants. */
*states = ST_NEW_FS_STATE |
break;
case MESA_SHADER_COMPUTE:
- states = &((struct st_common_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
*states = ST_NEW_CS_STATE;
if (vpv->draw_shader)
draw_delete_vertex_shader( st->draw, vpv->draw_shader );
- delete_ir(&vpv->tgsi);
+ if (vpv->tokens)
+ ureg_free_tokens(vpv->tokens);
free( vpv );
}
*/
void
st_release_vp_variants( struct st_context *st,
- struct st_vertex_program *stvp )
+ struct st_program *stvp )
{
struct st_vp_variant *vpv;
- for (vpv = stvp->variants; vpv; ) {
+ for (vpv = stvp->vp_variants; vpv; ) {
struct st_vp_variant *next = vpv->next;
delete_vp_variant(st, vpv);
vpv = next;
}
- stvp->variants = NULL;
+ stvp->vp_variants = NULL;
- delete_ir(&stvp->tgsi);
+ delete_ir(&stvp->state);
}
* Free all variants of a fragment program.
*/
void
-st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
+st_release_fp_variants(struct st_context *st, struct st_program *stfp)
{
struct st_fp_variant *fpv;
- for (fpv = stfp->variants; fpv; ) {
+ for (fpv = stfp->fp_variants; fpv; ) {
struct st_fp_variant *next = fpv->next;
delete_fp_variant(st, fpv);
fpv = next;
}
- stfp->variants = NULL;
+ stfp->fp_variants = NULL;
- delete_ir(&stfp->tgsi);
+ delete_ir(&stfp->state);
}
* the variant from the linked list.
*/
static void
-delete_basic_variant(struct st_context *st, struct st_basic_variant *v,
- GLenum target)
+delete_common_variant(struct st_context *st, struct st_common_variant *v,
+ GLenum target)
{
if (v->driver_shader) {
if (st->has_shareable_shaders || v->key.st == st) {
* Free all basic program variants.
*/
void
-st_release_basic_variants(struct st_context *st, struct st_common_program *p)
+st_release_common_variants(struct st_context *st, struct st_program *p)
{
- struct st_basic_variant *v;
+ struct st_common_variant *v;
for (v = p->variants; v; ) {
- struct st_basic_variant *next = v->next;
- delete_basic_variant(st, v, p->Base.Target);
+ struct st_common_variant *next = v->next;
+ delete_common_variant(st, v, p->Base.Target);
v = next;
}
p->variants = NULL;
- delete_ir(&p->tgsi);
+ delete_ir(&p->state);
}
+void
+st_finalize_nir_before_variants(struct nir_shader *nir)
+{
+ NIR_PASS_V(nir, nir_opt_access);
+
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+ if (nir->options->lower_all_io_to_temps ||
+ nir->options->lower_all_io_to_elements ||
+ nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_GEOMETRY) {
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+ } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
+ }
+
+ st_nir_assign_vs_in_locations(nir);
+}
/**
* Translate ARB (asm) program to NIR
st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
gl_shader_stage stage)
{
+ struct pipe_screen *screen = st->pipe->screen;
const struct gl_shader_compiler_options *options =
&st->ctx->Const.ShaderCompilerOptions[stage];
NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
- NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, st->pipe->screen);
+ NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
NIR_PASS_V(nir, nir_lower_system_values);
/* Optimise NIR */
NIR_PASS_V(nir, nir_opt_constant_folding);
st_nir_opts(nir);
- nir_validate_shader(nir, "after st/ptn NIR opts");
+ st_finalize_nir_before_variants(nir);
+
+ if (st->allow_st_finalize_nir_twice)
+ st_finalize_nir(st, prog, NULL, nir, true);
+
+ nir_validate_shader(nir, "after st/glsl finalize_nir");
return nir;
}
void
-st_prepare_vertex_program(struct st_vertex_program *stvp)
+st_prepare_vertex_program(struct st_program *stvp)
{
stvp->num_inputs = 0;
memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
}
/* Translate stream output info. */
- struct pipe_stream_output_info *so_info = NULL;
- if (prog->info.stage == MESA_SHADER_VERTEX)
- so_info = &((struct st_vertex_program*)prog)->tgsi.stream_output;
- else
- so_info = &((struct st_common_program*)prog)->tgsi.stream_output;
+ struct pipe_stream_output_info *so_info =
+ &((struct st_program*)prog)->state.stream_output;
for (unsigned i = 0; i < info->NumOutputs; i++) {
so_info->output[i].register_index =
*/
bool
st_translate_vertex_program(struct st_context *st,
- struct st_vertex_program *stvp)
+ struct st_program *stvp)
{
struct ureg_program *ureg;
enum pipe_error error;
st_prepare_vertex_program(stvp);
- /* Get semantic names and indices. */
- for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
- if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
- unsigned slot = num_outputs++;
- unsigned semantic_name, semantic_index;
- tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
- &semantic_name, &semantic_index);
- output_semantic_name[slot] = semantic_name;
- output_semantic_index[slot] = semantic_index;
- }
- }
- /* pre-setup potentially unused edgeflag output */
- output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
- output_semantic_index[num_outputs] = 0;
-
/* ARB_vp: */
if (!stvp->glsl_to_tgsi) {
_mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
/* No samplers are allowed in ARB_vp. */
}
+ /* Get semantic names and indices. */
+ for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
+ if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
+ unsigned slot = num_outputs++;
+ unsigned semantic_name, semantic_index;
+ tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
+ &semantic_name, &semantic_index);
+ output_semantic_name[slot] = semantic_name;
+ output_semantic_index[slot] = semantic_index;
+ }
+ }
+ /* pre-setup potentially unused edgeflag output */
+ output_semantic_name[num_outputs] = TGSI_SEMANTIC_EDGEFLAG;
+ output_semantic_index[num_outputs] = 0;
+
ureg = ureg_create_with_screen(PIPE_SHADER_VERTEX, st->pipe->screen);
if (ureg == NULL)
return false;
return false;
}
- stvp->tgsi.tokens = ureg_get_tokens(ureg, &stvp->num_tgsi_tokens);
+ stvp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
if (stvp->glsl_to_tgsi) {
st_store_ir_in_disk_cache(st, &stvp->Base, false);
}
- bool use_nir = PIPE_SHADER_IR_NIR ==
- st->pipe->screen->get_shader_param(st->pipe->screen, PIPE_SHADER_VERTEX,
- PIPE_SHADER_CAP_PREFERRED_IR);
+ /* Translate to NIR.
+ *
+ * This must be done after the translation to TGSI is done, because
+ * we'll pass the NIR shader to the driver and the TGSI version to
+ * the draw module for the select/feedback/rasterpos code.
+ */
+ if (st->pipe->screen->get_shader_param(st->pipe->screen,
+ PIPE_SHADER_VERTEX,
+ PIPE_SHADER_CAP_PREFERRED_IR)) {
+ assert(!stvp->glsl_to_tgsi);
- if (use_nir) {
nir_shader *nir =
st_translate_prog_to_nir(st, &stvp->Base, MESA_SHADER_VERTEX);
- if (stvp->tgsi.ir.nir)
- ralloc_free(stvp->tgsi.ir.nir);
- stvp->tgsi.type = PIPE_SHADER_IR_NIR;
- stvp->tgsi.ir.nir = nir;
+ if (stvp->state.ir.nir)
+ ralloc_free(stvp->state.ir.nir);
+ stvp->state.type = PIPE_SHADER_IR_NIR;
+ stvp->state.ir.nir = nir;
stvp->Base.nir = nir;
return true;
}
- return stvp->tgsi.tokens != NULL;
+ return stvp->state.tokens != NULL;
}
static const gl_state_index16 depth_range_state[STATE_LENGTH] =
static struct st_vp_variant *
st_create_vp_variant(struct st_context *st,
- struct st_vertex_program *stvp,
- const struct st_vp_variant_key *key)
+ struct st_program *stvp,
+ const struct st_common_variant_key *key)
{
struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
struct pipe_context *pipe = st->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_shader_state state = {0};
+
+ static const gl_state_index16 point_size_state[STATE_LENGTH] =
+ { STATE_INTERNAL, STATE_POINT_SIZE_CLAMPED, 0 };
struct gl_program_parameter_list *params = stvp->Base.Parameters;
vpv->key = *key;
- vpv->tgsi.stream_output = stvp->tgsi.stream_output;
vpv->num_inputs = stvp->num_inputs;
- /* When generating a NIR program, we usually don't have TGSI tokens.
- * However, we do create them for ARB_vertex_program / fixed-function VS
- * programs which we may need to use with the draw module for legacy
- * feedback/select emulation. If they exist, copy them.
- */
- if (stvp->tgsi.tokens)
- vpv->tgsi.tokens = tgsi_dup_tokens(stvp->tgsi.tokens);
-
- if (stvp->tgsi.type == PIPE_SHADER_IR_NIR) {
- vpv->tgsi.type = PIPE_SHADER_IR_NIR;
- vpv->tgsi.ir.nir = nir_shader_clone(NULL, stvp->tgsi.ir.nir);
- if (key->clamp_color)
- NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_clamp_color_outputs);
+ state.stream_output = stvp->state.stream_output;
+
+ if (stvp->state.type == PIPE_SHADER_IR_NIR) {
+ bool finalize = false;
+
+ state.type = PIPE_SHADER_IR_NIR;
+ state.ir.nir = nir_shader_clone(NULL, stvp->state.ir.nir);
+ if (key->clamp_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
if (key->passthrough_edgeflags) {
- NIR_PASS_V(vpv->tgsi.ir.nir, nir_lower_passthrough_edgeflags);
+ NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
vpv->num_inputs++;
+ finalize = true;
}
- st_finalize_nir(st, &stvp->Base, stvp->shader_program,
- vpv->tgsi.ir.nir);
+ if (key->lower_point_size) {
+ _mesa_add_state_reference(params, point_size_state);
+ NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
+ point_size_state);
+ finalize = true;
+ }
+
+ if (key->lower_ucp) {
+ bool can_compact = screen->get_param(screen,
+ PIPE_CAP_NIR_COMPACT_ARRAYS);
+
+ bool use_eye = st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX] != NULL;
+ gl_state_index16 clipplane_state[MAX_CLIP_PLANES][STATE_LENGTH];
+ for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
+ if (use_eye) {
+ clipplane_state[i][0] = STATE_CLIPPLANE;
+ clipplane_state[i][1] = i;
+ } else {
+ clipplane_state[i][0] = STATE_INTERNAL;
+ clipplane_state[i][1] = STATE_CLIP_INTERNAL;
+ clipplane_state[i][2] = i;
+ }
+ _mesa_add_state_reference(params, clipplane_state[i]);
+ }
+
+ NIR_PASS_V(state.ir.nir, nir_lower_clip_vs, key->lower_ucp,
+ true, can_compact, clipplane_state);
+ NIR_PASS_V(state.ir.nir, nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(state.ir.nir), true, false);
+ NIR_PASS_V(state.ir.nir, nir_lower_global_vars_to_local);
+ finalize = true;
+ }
+
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
+ true);
+
+ /* Some of the lowering above may have introduced new varyings */
+ nir_shader_gather_info(state.ir.nir,
+ nir_shader_get_entrypoint(state.ir.nir));
+ }
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+
+ vpv->driver_shader = pipe->create_vs_state(pipe, &state);
+
+ /* When generating a NIR program, we usually don't have TGSI tokens.
+ * However, we do create them for ARB_vertex_program / fixed-function VS
+ * programs which we may need to use with the draw module for legacy
+ * feedback/select emulation. If they exist, copy them.
+ *
+ * TODO: Lowering for shader variants is not applied to TGSI when
+ * generating a NIR shader.
+ */
+ if (stvp->state.tokens)
+ vpv->tokens = tgsi_dup_tokens(stvp->state.tokens);
- vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
- /* driver takes ownership of IR: */
- vpv->tgsi.ir.nir = NULL;
return vpv;
}
+ state.type = PIPE_SHADER_IR_TGSI;
+ state.tokens = tgsi_dup_tokens(stvp->state.tokens);
+
/* Emulate features. */
if (key->clamp_color || key->passthrough_edgeflags) {
const struct tgsi_token *tokens;
(key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
(key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
- tokens = tgsi_emulate(vpv->tgsi.tokens, flags);
+ tokens = tgsi_emulate(state.tokens, flags);
if (tokens) {
- tgsi_free_tokens(vpv->tgsi.tokens);
- vpv->tgsi.tokens = tokens;
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
if (key->passthrough_edgeflags)
vpv->num_inputs++;
_mesa_add_state_reference(params, depth_range_state);
const struct tgsi_token *tokens;
- tokens = st_tgsi_lower_depth_clamp(vpv->tgsi.tokens, depth_range_const,
+ tokens = st_tgsi_lower_depth_clamp(state.tokens, depth_range_const,
key->clip_negative_one_to_one);
- if (tokens != vpv->tgsi.tokens)
- tgsi_free_tokens(vpv->tgsi.tokens);
- vpv->tgsi.tokens = tokens;
+ if (tokens != state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
}
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(vpv->tgsi.tokens, 0);
- debug_printf("\n");
- }
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
- vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->tgsi);
+ vpv->driver_shader = pipe->create_vs_state(pipe, &state);
+ /* Save this for selection/feedback/rasterpos. */
+ vpv->tokens = state.tokens;
return vpv;
}
*/
struct st_vp_variant *
st_get_vp_variant(struct st_context *st,
- struct st_vertex_program *stvp,
- const struct st_vp_variant_key *key)
+ struct st_program *stvp,
+ const struct st_common_variant_key *key)
{
struct st_vp_variant *vpv;
/* Search for existing variant */
- for (vpv = stvp->variants; vpv; vpv = vpv->next) {
+ for (vpv = stvp->vp_variants; vpv; vpv = vpv->next) {
if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
break;
}
}
/* insert into list */
- vpv->next = stvp->variants;
- stvp->variants = vpv;
+ vpv->next = stvp->vp_variants;
+ stvp->vp_variants = vpv;
}
}
*/
bool
st_translate_fragment_program(struct st_context *st,
- struct st_fragment_program *stfp)
+ struct st_program *stfp)
{
/* Non-GLSL programs: */
if (!stfp->glsl_to_tgsi) {
nir_shader *nir =
st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
- if (stfp->tgsi.ir.nir)
- ralloc_free(stfp->tgsi.ir.nir);
- stfp->tgsi.type = PIPE_SHADER_IR_NIR;
- stfp->tgsi.ir.nir = nir;
+ if (stfp->state.ir.nir)
+ ralloc_free(stfp->state.ir.nir);
+ stfp->state.type = PIPE_SHADER_IR_NIR;
+ stfp->state.ir.nir = nir;
stfp->Base.nir = nir;
return true;
}
fs_output_semantic_name,
fs_output_semantic_index);
- stfp->tgsi.tokens = ureg_get_tokens(ureg, &stfp->num_tgsi_tokens);
+ stfp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
if (stfp->glsl_to_tgsi) {
st_store_ir_in_disk_cache(st, &stfp->Base, false);
}
- return stfp->tgsi.tokens != NULL;
+ return stfp->state.tokens != NULL;
}
static struct st_fp_variant *
st_create_fp_variant(struct st_context *st,
- struct st_fragment_program *stfp,
+ struct st_program *stfp,
const struct st_fp_variant_key *key)
{
struct pipe_context *pipe = st->pipe;
struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
- struct pipe_shader_state tgsi = {0};
+ struct pipe_shader_state state = {0};
struct gl_program_parameter_list *params = stfp->Base.Parameters;
static const gl_state_index16 texcoord_state[STATE_LENGTH] =
{ STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
{ STATE_INTERNAL, STATE_PT_SCALE };
static const gl_state_index16 bias_state[STATE_LENGTH] =
{ STATE_INTERNAL, STATE_PT_BIAS };
+ static const gl_state_index16 alpha_ref_state[STATE_LENGTH] =
+ { STATE_INTERNAL, STATE_ALPHA_REF };
if (!variant)
return NULL;
- if (stfp->tgsi.type == PIPE_SHADER_IR_NIR) {
- tgsi.type = PIPE_SHADER_IR_NIR;
- tgsi.ir.nir = nir_shader_clone(NULL, stfp->tgsi.ir.nir);
+ if (stfp->state.type == PIPE_SHADER_IR_NIR) {
+ bool finalize = false;
- if (key->clamp_color)
- NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
+ state.type = PIPE_SHADER_IR_NIR;
+ state.ir.nir = nir_shader_clone(NULL, stfp->state.ir.nir);
+
+ if (key->clamp_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
+
+ if (key->lower_flatshade) {
+ NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
+ finalize = true;
+ }
+
+ if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
+ _mesa_add_state_reference(params, alpha_ref_state);
+ NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
+ false, alpha_ref_state);
+ finalize = true;
+ }
+
+ if (key->lower_two_sided_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color);
+ finalize = true;
+ }
if (key->persample_shading) {
- nir_shader *shader = tgsi.ir.nir;
+ nir_shader *shader = state.ir.nir;
nir_foreach_variable(var, &shader->inputs)
var->data.sample = true;
+ finalize = true;
}
assert(!(key->bitmap && key->drawpixels));
options.sampler = variant->bitmap_sampler;
options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
- NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
+ NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
+ finalize = true;
}
/* glDrawPixels (color only) */
memcpy(options.texcoord_state_tokens, texcoord_state,
sizeof(options.texcoord_state_tokens));
- NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
+ NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
+ finalize = true;
}
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
options.lower_ayuv_external = key->external.lower_ayuv;
options.lower_xyuv_external = key->external.lower_xyuv;
- NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
+ NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
+ finalize = true;
}
- st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
+ false);
+ }
+ /* This pass needs to happen *after* nir_lower_sampler */
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
- /* This pass needs to happen *after* nir_lower_sampler */
- NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
+ NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
~stfp->Base.SamplersUsed,
key->external.lower_nv12 || key->external.lower_xy_uxvx ||
key->external.lower_yx_xuxv,
key->external.lower_iyuv);
+ finalize = true;
+ }
+
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ /* Some of the lowering above may have introduced new varyings */
+ nir_shader_gather_info(state.ir.nir,
+ nir_shader_get_entrypoint(state.ir.nir));
+
+ struct pipe_screen *screen = pipe->screen;
+ if (screen->finalize_nir)
+ screen->finalize_nir(screen, state.ir.nir, false);
}
- /* Some of the lowering above may have introduced new varyings */
- nir_shader_gather_info(tgsi.ir.nir,
- nir_shader_get_entrypoint(tgsi.ir.nir));
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
- variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
+ variant->driver_shader = pipe->create_fs_state(pipe, &state);
variant->key = *key;
return variant;
}
- tgsi.tokens = stfp->tgsi.tokens;
+ state.tokens = stfp->state.tokens;
assert(!(key->bitmap && key->drawpixels));
/* Fix texture targets and add fog for ATI_fs */
if (stfp->ati_fs) {
- const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
+ const struct tgsi_token *tokens = st_fixup_atifs(state.tokens, key);
if (tokens)
- tgsi.tokens = tokens;
+ state.tokens = tokens;
else
fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
}
(key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
(key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
- tokens = tgsi_emulate(tgsi.tokens, flags);
+ tokens = tgsi_emulate(state.tokens, flags);
if (tokens) {
- if (tgsi.tokens != stfp->tgsi.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else
fprintf(stderr, "mesa: cannot emulate deprecated features\n");
}
variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
- tokens = st_get_bitmap_shader(tgsi.tokens,
+ tokens = st_get_bitmap_shader(state.tokens,
st->internal_target,
variant->bitmap_sampler,
st->needs_texcoord_semantic,
PIPE_FORMAT_R8_UNORM);
if (tokens) {
- if (tgsi.tokens != stfp->tgsi.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else
fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
}
texcoord_const = _mesa_add_state_reference(params, texcoord_state);
- tokens = st_get_drawpix_shader(tgsi.tokens,
+ tokens = st_get_drawpix_shader(state.tokens,
st->needs_texcoord_semantic,
key->scaleAndBias, scale_const,
bias_const, key->pixelMaps,
texcoord_const, st->internal_target);
if (tokens) {
- if (tgsi.tokens != stfp->tgsi.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else
fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
}
/* samplers inserted would conflict, but this should be unpossible: */
assert(!(key->bitmap || key->drawpixels));
- tokens = st_tgsi_lower_yuv(tgsi.tokens,
+ tokens = st_tgsi_lower_yuv(state.tokens,
~stfp->Base.SamplersUsed,
key->external.lower_nv12 ||
key->external.lower_xy_uxvx ||
key->external.lower_yx_xuxv,
key->external.lower_iyuv);
if (tokens) {
- if (tgsi.tokens != stfp->tgsi.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else {
fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
}
unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
const struct tgsi_token *tokens;
- tokens = st_tgsi_lower_depth_clamp_fs(tgsi.tokens, depth_range_const);
- if (tgsi.tokens != stfp->tgsi.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ tokens = st_tgsi_lower_depth_clamp_fs(state.tokens, depth_range_const);
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
}
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(tgsi.tokens, 0);
- debug_printf("\n");
- }
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
/* fill in variant */
- variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
+ variant->driver_shader = pipe->create_fs_state(pipe, &state);
variant->key = *key;
- if (tgsi.tokens != stfp->tgsi.tokens)
- tgsi_free_tokens(tgsi.tokens);
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
return variant;
}
*/
struct st_fp_variant *
st_get_fp_variant(struct st_context *st,
- struct st_fragment_program *stfp,
+ struct st_program *stfp,
const struct st_fp_variant_key *key)
{
struct st_fp_variant *fpv;
/* Search for existing variant */
- for (fpv = stfp->variants; fpv; fpv = fpv->next) {
+ for (fpv = stfp->fp_variants; fpv; fpv = fpv->next) {
if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
break;
}
* st_update_fp can take a fast path when
* shader_has_one_variant is set.
*/
- if (!stfp->variants) {
- stfp->variants = fpv;
+ if (!stfp->fp_variants) {
+ stfp->fp_variants = fpv;
} else {
/* insert into list after the first one */
- fpv->next = stfp->variants->next;
- stfp->variants->next = fpv;
+ fpv->next = stfp->fp_variants->next;
+ stfp->fp_variants->next = fpv;
}
} else {
/* insert into list */
- fpv->next = stfp->variants;
- stfp->variants = fpv;
+ fpv->next = stfp->fp_variants;
+ stfp->fp_variants = fpv;
}
}
}
*/
bool
st_translate_common_program(struct st_context *st,
- struct st_common_program *stcp)
+ struct st_program *stcp)
{
struct gl_program *prog = &stcp->Base;
enum pipe_shader_type stage =
memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
memset(inputMapping, 0, sizeof(inputMapping));
memset(outputMapping, 0, sizeof(outputMapping));
- memset(&stcp->tgsi, 0, sizeof(stcp->tgsi));
+ memset(&stcp->state, 0, sizeof(stcp->state));
if (prog->info.clip_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
output_semantic_name,
output_semantic_index);
- stcp->tgsi.tokens = ureg_get_tokens(ureg, &stcp->num_tgsi_tokens);
+ stcp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
st_store_ir_in_disk_cache(st, prog, false);
- if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
+ if (ST_DEBUG & DEBUG_PRINT_IR && ST_DEBUG & DEBUG_MESA)
_mesa_print_program(prog);
- debug_printf("\n");
- }
-
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(stcp->tgsi.tokens, 0);
- debug_printf("\n");
- }
free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
stcp->glsl_to_tgsi = NULL;
/**
* Get/create a basic program variant.
*/
-struct st_basic_variant *
-st_get_basic_variant(struct st_context *st,
- struct st_common_program *prog,
- const struct st_basic_variant_key *key)
+struct st_common_variant *
+st_get_common_variant(struct st_context *st,
+ struct st_program *prog,
+ const struct st_common_variant_key *key)
{
struct pipe_context *pipe = st->pipe;
- struct st_basic_variant *v;
- struct pipe_shader_state tgsi = {0};
+ struct st_common_variant *v;
+ struct pipe_shader_state state = {0};
/* Search for existing variant */
for (v = prog->variants; v; v = v->next) {
if (!v) {
/* create new */
- v = CALLOC_STRUCT(st_basic_variant);
+ v = CALLOC_STRUCT(st_common_variant);
if (v) {
+ if (prog->state.type == PIPE_SHADER_IR_NIR) {
+ bool finalize = false;
+
+ state.type = PIPE_SHADER_IR_NIR;
+ state.ir.nir = nir_shader_clone(NULL, prog->state.ir.nir);
- if (prog->tgsi.type == PIPE_SHADER_IR_NIR) {
- tgsi.type = PIPE_SHADER_IR_NIR;
- tgsi.ir.nir = nir_shader_clone(NULL, prog->tgsi.ir.nir);
+ if (key->clamp_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
- if (key->clamp_color)
- NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
+ state.stream_output = prog->state.stream_output;
- tgsi.stream_output = prog->tgsi.stream_output;
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &prog->Base, prog->shader_program,
+ state.ir.nir, true);
+ }
- st_finalize_nir(st, &prog->Base, prog->shader_program,
- tgsi.ir.nir);
- } else {
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+ } else {
if (key->lower_depth_clamp) {
struct gl_program_parameter_list *params = prog->Base.Parameters;
const struct tgsi_token *tokens;
tokens =
- st_tgsi_lower_depth_clamp(prog->tgsi.tokens,
+ st_tgsi_lower_depth_clamp(prog->state.tokens,
depth_range_const,
key->clip_negative_one_to_one);
- if (tokens != prog->tgsi.tokens)
- tgsi_free_tokens(prog->tgsi.tokens);
+ if (tokens != prog->state.tokens)
+ tgsi_free_tokens(prog->state.tokens);
- prog->tgsi.tokens = tokens;
- prog->num_tgsi_tokens = tgsi_num_tokens(tokens);
+ prog->state.tokens = tokens;
}
- tgsi = prog->tgsi;
+ state = prog->state;
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
}
/* fill in new variant */
switch (prog->Base.info.stage) {
case MESA_SHADER_TESS_CTRL:
- v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
+ v->driver_shader = pipe->create_tcs_state(pipe, &state);
break;
case MESA_SHADER_TESS_EVAL:
- v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
+ v->driver_shader = pipe->create_tes_state(pipe, &state);
break;
case MESA_SHADER_GEOMETRY:
- v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
+ v->driver_shader = pipe->create_gs_state(pipe, &state);
break;
case MESA_SHADER_COMPUTE: {
struct pipe_compute_state cs = {0};
- cs.ir_type = tgsi.type;
+ cs.ir_type = state.type;
cs.req_local_mem = prog->Base.info.cs.shared_size;
- if (tgsi.type == PIPE_SHADER_IR_NIR)
- cs.prog = tgsi.ir.nir;
+ if (state.type == PIPE_SHADER_IR_NIR)
+ cs.prog = state.ir.nir;
else
- cs.prog = tgsi.tokens;
+ cs.prog = state.tokens;
v->driver_shader = pipe->create_compute_state(pipe, &cs);
break;
switch (target->Target) {
case GL_VERTEX_PROGRAM_ARB:
{
- struct st_vertex_program *stvp = (struct st_vertex_program *) target;
- struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
+ struct st_program *stvp = (struct st_program *) target;
+ struct st_vp_variant *vpv, **prevPtr = &stvp->vp_variants;
- for (vpv = stvp->variants; vpv; ) {
+ for (vpv = stvp->vp_variants; vpv; ) {
struct st_vp_variant *next = vpv->next;
if (vpv->key.st == st) {
/* unlink from list */
break;
case GL_FRAGMENT_PROGRAM_ARB:
{
- struct st_fragment_program *stfp =
- (struct st_fragment_program *) target;
- struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
+ struct st_program *stfp =
+ (struct st_program *) target;
+ struct st_fp_variant *fpv, **prevPtr = &stfp->fp_variants;
- for (fpv = stfp->variants; fpv; ) {
+ for (fpv = stfp->fp_variants; fpv; ) {
struct st_fp_variant *next = fpv->next;
if (fpv->key.st == st) {
/* unlink from list */
case GL_TESS_EVALUATION_PROGRAM_NV:
case GL_COMPUTE_PROGRAM_NV:
{
- struct st_common_program *p = st_common_program(target);
- struct st_basic_variant *v, **prevPtr = &p->variants;
+ struct st_program *p = st_program(target);
+ struct st_common_variant *v, **prevPtr = &p->variants;
for (v = p->variants; v; ) {
- struct st_basic_variant *next = v->next;
+ struct st_common_variant *next = v->next;
if (v->key.st == st) {
/* unlink from list */
*prevPtr = next;
/* destroy this variant */
- delete_basic_variant(st, v, target->Target);
+ delete_common_variant(st, v, target->Target);
}
else {
prevPtr = &v->next;
}
-/**
- * For debugging, print/dump the current vertex program.
- */
-void
-st_print_current_vertex_program(void)
-{
- GET_CURRENT_CONTEXT(ctx);
-
- if (ctx->VertexProgram._Current) {
- struct st_vertex_program *stvp =
- (struct st_vertex_program *) ctx->VertexProgram._Current;
- struct st_vp_variant *stv;
-
- debug_printf("Vertex program %u\n", stvp->Base.Id);
-
- for (stv = stvp->variants; stv; stv = stv->next) {
- debug_printf("variant %p\n", stv);
- tgsi_dump(stv->tgsi.tokens, 0);
- }
- }
-}
-
-
/**
* Compile one shader variant.
*/
-void
+static void
st_precompile_shader_variant(struct st_context *st,
struct gl_program *prog)
{
switch (prog->Target) {
case GL_VERTEX_PROGRAM_ARB: {
- struct st_vertex_program *p = (struct st_vertex_program *)prog;
- struct st_vp_variant_key key;
+ struct st_program *p = (struct st_program *)prog;
+ struct st_common_variant_key key;
memset(&key, 0, sizeof(key));
}
case GL_FRAGMENT_PROGRAM_ARB: {
- struct st_fragment_program *p = (struct st_fragment_program *)prog;
+ struct st_program *p = (struct st_program *)prog;
struct st_fp_variant_key key;
memset(&key, 0, sizeof(key));
case GL_TESS_EVALUATION_PROGRAM_NV:
case GL_GEOMETRY_PROGRAM_NV:
case GL_COMPUTE_PROGRAM_NV: {
- struct st_common_program *p = st_common_program(prog);
- struct st_basic_variant_key key;
+ struct st_program *p = st_program(prog);
+ struct st_common_variant_key key;
memset(&key, 0, sizeof(key));
key.st = st->has_shareable_shaders ? NULL : st;
- st_get_basic_variant(st, p, &key);
+ st_get_common_variant(st, p, &key);
break;
}
assert(0);
}
}
+
+void
+st_finalize_program(struct st_context *st, struct gl_program *prog)
+{
+ if (st->current_program[prog->info.stage] == prog) {
+ if (prog->info.stage == MESA_SHADER_VERTEX)
+ st->dirty |= ST_NEW_VERTEX_PROGRAM(st, (struct st_program *)prog);
+ else
+ st->dirty |= ((struct st_program *)prog)->affected_states;
+ }
+
+ /* Create Gallium shaders now instead of on demand. */
+ if (ST_DEBUG & DEBUG_PRECOMPILE ||
+ st->shader_has_one_variant[prog->info.stage])
+ st_precompile_shader_variant(st, prog);
+}