#include "program/programopt.h"
#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_serialize.h"
+#include "draw/draw_context.h"
#include "pipe/p_context.h"
#include "pipe/p_defines.h"
#include "st_atifs_to_tgsi.h"
#include "st_nir.h"
#include "st_shader_cache.h"
+#include "st_util.h"
#include "cso_cache/cso_context.h"
switch (prog->info.stage) {
case MESA_SHADER_VERTEX:
- states = &((struct st_vertex_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
*states = ST_NEW_VS_STATE |
ST_NEW_RASTERIZER |
break;
case MESA_SHADER_TESS_CTRL:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_TCS_STATE;
break;
case MESA_SHADER_TESS_EVAL:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_TES_STATE |
ST_NEW_RASTERIZER;
break;
case MESA_SHADER_GEOMETRY:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_GS_STATE |
ST_NEW_RASTERIZER;
break;
case MESA_SHADER_FRAGMENT:
- states = &((struct st_common_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
/* gl_FragCoord and glDrawPixels always use constants. */
*states = ST_NEW_FS_STATE |
break;
case MESA_SHADER_COMPUTE:
- states = &((struct st_common_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
*states = ST_NEW_CS_STATE;
}
}
-static void
-delete_ir(struct pipe_shader_state *ir)
-{
- if (ir->tokens) {
- ureg_free_tokens(ir->tokens);
- ir->tokens = NULL;
- }
-
- /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
- * it has resulted in the driver taking ownership of the NIR. Those
- * callers should be NULLing out the nir field in any pipe_shader_state
- * that might have this called in order to indicate that.
- *
- * GLSL IR and ARB programs will have set gl_program->nir to the same
- * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
- */
-}
-
-/**
- * Delete a vertex program variant. Note the caller must unlink
- * the variant from the linked list.
- */
-static void
-delete_vp_variant(struct st_context *st, struct st_vp_variant *vpv)
-{
- if (vpv->driver_shader) {
- if (st->has_shareable_shaders || vpv->key.st == st) {
- cso_delete_vertex_shader(st->cso_context, vpv->driver_shader);
- } else {
- st_save_zombie_shader(vpv->key.st, PIPE_SHADER_VERTEX,
- vpv->driver_shader);
- }
- }
-
- if (vpv->draw_shader)
- draw_delete_vertex_shader( st->draw, vpv->draw_shader );
-
- if (vpv->tokens)
- ureg_free_tokens(vpv->tokens);
-
- free( vpv );
-}
-
-
-
-/**
- * Clean out any old compilations:
- */
-void
-st_release_vp_variants( struct st_context *st,
- struct st_vertex_program *stvp )
-{
- struct st_vp_variant *vpv;
-
- for (vpv = stvp->variants; vpv; ) {
- struct st_vp_variant *next = vpv->next;
- delete_vp_variant(st, vpv);
- vpv = next;
- }
-
- stvp->variants = NULL;
-
- delete_ir(&stvp->state);
-}
-
-
/**
- * Delete a fragment program variant. Note the caller must unlink
- * the variant from the linked list.
+ * Delete a shader variant. Note the caller must unlink the variant from
+ * the linked list.
*/
static void
-delete_fp_variant(struct st_context *st, struct st_fp_variant *fpv)
-{
- if (fpv->driver_shader) {
- if (st->has_shareable_shaders || fpv->key.st == st) {
- cso_delete_fragment_shader(st->cso_context, fpv->driver_shader);
- } else {
- st_save_zombie_shader(fpv->key.st, PIPE_SHADER_FRAGMENT,
- fpv->driver_shader);
- }
- }
-
- free(fpv);
-}
-
-
-/**
- * Free all variants of a fragment program.
- */
-void
-st_release_fp_variants(struct st_context *st, struct st_common_program *stfp)
-{
- struct st_fp_variant *fpv;
-
- for (fpv = stfp->fp_variants; fpv; ) {
- struct st_fp_variant *next = fpv->next;
- delete_fp_variant(st, fpv);
- fpv = next;
- }
-
- stfp->fp_variants = NULL;
-
- delete_ir(&stfp->state);
-}
-
-
-/**
- * Delete a basic program variant. Note the caller must unlink
- * the variant from the linked list.
- */
-static void
-delete_basic_variant(struct st_context *st, struct st_common_variant *v,
- GLenum target)
+delete_variant(struct st_context *st, struct st_variant *v, GLenum target)
{
if (v->driver_shader) {
- if (st->has_shareable_shaders || v->key.st == st) {
+ if (target == GL_VERTEX_PROGRAM_ARB &&
+ ((struct st_common_variant*)v)->key.is_draw_shader) {
+ /* Draw shader. */
+ draw_delete_vertex_shader(st->draw, v->driver_shader);
+ } else if (st->has_shareable_shaders || v->st == st) {
/* The shader's context matches the calling context, or we
* don't care.
*/
switch (target) {
+ case GL_VERTEX_PROGRAM_ARB:
+ st->pipe->delete_vs_state(st->pipe, v->driver_shader);
+ break;
case GL_TESS_CONTROL_PROGRAM_NV:
- cso_delete_tessctrl_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_tcs_state(st->pipe, v->driver_shader);
break;
case GL_TESS_EVALUATION_PROGRAM_NV:
- cso_delete_tesseval_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_tes_state(st->pipe, v->driver_shader);
break;
case GL_GEOMETRY_PROGRAM_NV:
- cso_delete_geometry_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_gs_state(st->pipe, v->driver_shader);
+ break;
+ case GL_FRAGMENT_PROGRAM_ARB:
+ st->pipe->delete_fs_state(st->pipe, v->driver_shader);
break;
case GL_COMPUTE_PROGRAM_NV:
- cso_delete_compute_shader(st->cso_context, v->driver_shader);
+ st->pipe->delete_compute_state(st->pipe, v->driver_shader);
break;
default:
unreachable("bad shader type in delete_basic_variant");
/* We can't delete a shader with a context different from the one
* that created it. Add it to the creating context's zombie list.
*/
- enum pipe_shader_type type;
- switch (target) {
- case GL_TESS_CONTROL_PROGRAM_NV:
- type = PIPE_SHADER_TESS_CTRL;
- break;
- case GL_TESS_EVALUATION_PROGRAM_NV:
- type = PIPE_SHADER_TESS_EVAL;
- break;
- case GL_GEOMETRY_PROGRAM_NV:
- type = PIPE_SHADER_GEOMETRY;
- break;
- default:
- unreachable("");
- }
- st_save_zombie_shader(v->key.st, type, v->driver_shader);
+ enum pipe_shader_type type =
+ pipe_shader_type_from_mesa(_mesa_program_enum_to_shader_stage(target));
+
+ st_save_zombie_shader(v->st, type, v->driver_shader);
}
}
free(v);
}
+static void
+st_unbind_program(struct st_context *st, struct st_program *p)
+{
+ /* Unbind the shader in cso_context and re-bind in st/mesa. */
+ switch (p->Base.info.stage) {
+ case MESA_SHADER_VERTEX:
+ cso_set_vertex_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_VS_STATE;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ cso_set_tessctrl_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_TCS_STATE;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ cso_set_tesseval_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_TES_STATE;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ cso_set_geometry_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_GS_STATE;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ cso_set_fragment_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_FS_STATE;
+ break;
+ case MESA_SHADER_COMPUTE:
+ cso_set_compute_shader_handle(st->cso_context, NULL);
+ st->dirty |= ST_NEW_CS_STATE;
+ break;
+ default:
+ unreachable("invalid shader type");
+ }
+}
/**
* Free all basic program variants.
*/
void
-st_release_common_variants(struct st_context *st, struct st_common_program *p)
+st_release_variants(struct st_context *st, struct st_program *p)
{
- struct st_common_variant *v;
+ struct st_variant *v;
+
+ /* If we are releasing shaders, re-bind them, because we don't
+ * know which shaders are bound in the driver.
+ */
+ if (p->variants)
+ st_unbind_program(st, p);
for (v = p->variants; v; ) {
- struct st_common_variant *next = v->next;
- delete_basic_variant(st, v, p->Base.Target);
+ struct st_variant *next = v->next;
+ delete_variant(st, v, p->Base.Target);
v = next;
}
p->variants = NULL;
- delete_ir(&p->state);
+
+ if (p->state.tokens) {
+ ureg_free_tokens(p->state.tokens);
+ p->state.tokens = NULL;
+ }
+
+ /* Note: Any setup of ->ir.nir that has had pipe->create_*_state called on
+ * it has resulted in the driver taking ownership of the NIR. Those
+ * callers should be NULLing out the nir field in any pipe_shader_state
+ * that might have this called in order to indicate that.
+ *
+ * GLSL IR and ARB programs will have set gl_program->nir to the same
+ * shader as ir->ir.nir, so it will be freed by _mesa_delete_program().
+ */
}
+void
+st_finalize_nir_before_variants(struct nir_shader *nir)
+{
+ NIR_PASS_V(nir, nir_opt_access);
+
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+ if (nir->options->lower_all_io_to_temps ||
+ nir->options->lower_all_io_to_elements ||
+ nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_GEOMETRY) {
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+ } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
+ }
+
+ st_nir_assign_vs_in_locations(nir);
+}
/**
* Translate ARB (asm) program to NIR
st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
gl_shader_stage stage)
{
+ struct pipe_screen *screen = st->pipe->screen;
const struct gl_shader_compiler_options *options =
&st->ctx->Const.ShaderCompilerOptions[stage];
NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
- NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, st->pipe->screen);
+ NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
NIR_PASS_V(nir, nir_lower_system_values);
/* Optimise NIR */
NIR_PASS_V(nir, nir_opt_constant_folding);
st_nir_opts(nir);
- nir_validate_shader(nir, "after st/ptn NIR opts");
+ st_finalize_nir_before_variants(nir);
+
+ if (st->allow_st_finalize_nir_twice)
+ st_finalize_nir(st, prog, NULL, nir, true);
+
+ nir_validate_shader(nir, "after st/glsl finalize_nir");
return nir;
}
void
-st_prepare_vertex_program(struct st_vertex_program *stvp)
+st_prepare_vertex_program(struct st_program *stp)
{
+ struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
+
stvp->num_inputs = 0;
memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
memset(stvp->result_to_output, ~0, sizeof(stvp->result_to_output));
* and TGSI generic input indexes, plus input attrib semantic info.
*/
for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
- if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
+ if ((stp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
stvp->input_to_index[attr] = stvp->num_inputs;
stvp->index_to_input[stvp->num_inputs] = attr;
stvp->num_inputs++;
- if ((stvp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
+ if ((stp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
/* add placeholder for second part of a double attribute */
stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
stvp->num_inputs++;
/* Compute mapping of vertex program outputs to slots. */
unsigned num_outputs = 0;
for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
- if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr))
+ if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr))
stvp->result_to_output[attr] = num_outputs++;
}
/* pre-setup potentially unused edgeflag output */
}
/* Translate stream output info. */
- struct pipe_stream_output_info *so_info = NULL;
- if (prog->info.stage == MESA_SHADER_VERTEX)
- so_info = &((struct st_vertex_program*)prog)->state.stream_output;
- else
- so_info = &((struct st_common_program*)prog)->state.stream_output;
+ struct pipe_stream_output_info *so_info =
+ &((struct st_program*)prog)->state.stream_output;
for (unsigned i = 0; i < info->NumOutputs; i++) {
so_info->output[i].register_index =
*/
bool
st_translate_vertex_program(struct st_context *st,
- struct st_vertex_program *stvp)
+ struct st_program *stp)
{
struct ureg_program *ureg;
enum pipe_error error;
ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
- if (stvp->Base.arb.IsPositionInvariant)
- _mesa_insert_mvp_code(st->ctx, &stvp->Base);
+ if (stp->Base.arb.IsPositionInvariant)
+ _mesa_insert_mvp_code(st->ctx, &stp->Base);
- st_prepare_vertex_program(stvp);
+ st_prepare_vertex_program(stp);
/* ARB_vp: */
- if (!stvp->glsl_to_tgsi) {
- _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
+ if (!stp->glsl_to_tgsi) {
+ _mesa_remove_output_reads(&stp->Base, PROGRAM_OUTPUT);
/* This determines which states will be updated when the assembly
* shader is bound.
*/
- stvp->affected_states = ST_NEW_VS_STATE |
+ stp->affected_states = ST_NEW_VS_STATE |
ST_NEW_RASTERIZER |
ST_NEW_VERTEX_ARRAYS;
- if (stvp->Base.Parameters->NumParameters)
- stvp->affected_states |= ST_NEW_VS_CONSTANTS;
+ if (stp->Base.Parameters->NumParameters)
+ stp->affected_states |= ST_NEW_VS_CONSTANTS;
+
+ /* Translate to NIR if preferred. */
+ if (st->pipe->screen->get_shader_param(st->pipe->screen,
+ PIPE_SHADER_VERTEX,
+ PIPE_SHADER_CAP_PREFERRED_IR)) {
+ assert(!stp->glsl_to_tgsi);
+
+ if (stp->Base.nir)
+ ralloc_free(stp->Base.nir);
+
+ if (stp->serialized_nir) {
+ free(stp->serialized_nir);
+ stp->serialized_nir = NULL;
+ }
- /* No samplers are allowed in ARB_vp. */
+ stp->state.type = PIPE_SHADER_IR_NIR;
+ stp->Base.nir = st_translate_prog_to_nir(st, &stp->Base,
+ MESA_SHADER_VERTEX);
+ /* For st_draw_feedback, we need to generate TGSI too if draw doesn't
+ * use LLVM.
+ */
+ if (draw_has_llvm())
+ return true;
+ }
}
/* Get semantic names and indices. */
for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
- if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
+ if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
unsigned slot = num_outputs++;
unsigned semantic_name, semantic_index;
tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
if (ureg == NULL)
return false;
- if (stvp->Base.info.clip_distance_array_size)
+ if (stp->Base.info.clip_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
- stvp->Base.info.clip_distance_array_size);
- if (stvp->Base.info.cull_distance_array_size)
+ stp->Base.info.clip_distance_array_size);
+ if (stp->Base.info.cull_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
- stvp->Base.info.cull_distance_array_size);
+ stp->Base.info.cull_distance_array_size);
if (ST_DEBUG & DEBUG_MESA) {
- _mesa_print_program(&stvp->Base);
- _mesa_print_program_parameters(st->ctx, &stvp->Base);
+ _mesa_print_program(&stp->Base);
+ _mesa_print_program_parameters(st->ctx, &stp->Base);
debug_printf("\n");
}
- if (stvp->glsl_to_tgsi) {
+ struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
+
+ if (stp->glsl_to_tgsi) {
error = st_translate_program(st->ctx,
PIPE_SHADER_VERTEX,
ureg,
- stvp->glsl_to_tgsi,
- &stvp->Base,
+ stp->glsl_to_tgsi,
+ &stp->Base,
/* inputs */
stvp->num_inputs,
stvp->input_to_index,
output_semantic_name,
output_semantic_index);
- st_translate_stream_output_info(&stvp->Base);
+ st_translate_stream_output_info(&stp->Base);
- free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
+ free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
} else
error = st_translate_mesa_program(st->ctx,
PIPE_SHADER_VERTEX,
ureg,
- &stvp->Base,
+ &stp->Base,
/* inputs */
stvp->num_inputs,
stvp->input_to_index,
if (error) {
debug_printf("%s: failed to translate Mesa program:\n", __func__);
- _mesa_print_program(&stvp->Base);
+ _mesa_print_program(&stp->Base);
debug_assert(0);
return false;
}
- stvp->state.tokens = ureg_get_tokens(ureg, NULL);
+ stp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
- if (stvp->glsl_to_tgsi) {
- stvp->glsl_to_tgsi = NULL;
- st_store_ir_in_disk_cache(st, &stvp->Base, false);
+ if (stp->glsl_to_tgsi) {
+ stp->glsl_to_tgsi = NULL;
+ st_store_ir_in_disk_cache(st, &stp->Base, false);
}
- /* Translate to NIR.
- *
- * This must be done after the translation to TGSI is done, because
- * we'll pass the NIR shader to the driver and the TGSI version to
- * the draw module for the select/feedback/rasterpos code.
- */
- if (st->pipe->screen->get_shader_param(st->pipe->screen,
- PIPE_SHADER_VERTEX,
- PIPE_SHADER_CAP_PREFERRED_IR)) {
- assert(!stvp->glsl_to_tgsi);
-
- nir_shader *nir =
- st_translate_prog_to_nir(st, &stvp->Base, MESA_SHADER_VERTEX);
-
- if (stvp->state.ir.nir)
- ralloc_free(stvp->state.ir.nir);
- stvp->state.type = PIPE_SHADER_IR_NIR;
- stvp->state.ir.nir = nir;
- stvp->Base.nir = nir;
- return true;
+ return stp->state.tokens != NULL;
+}
+
+static struct nir_shader *
+get_nir_shader(struct st_context *st, struct st_program *stp)
+{
+ if (stp->Base.nir) {
+ nir_shader *nir = stp->Base.nir;
+
+ /* The first shader variant takes ownership of NIR, so that there is
+ * no cloning. Additional shader variants are always generated from
+ * serialized NIR to save memory.
+ */
+ stp->Base.nir = NULL;
+ assert(stp->serialized_nir && stp->serialized_nir_size);
+ return nir;
}
- return stvp->state.tokens != NULL;
+ struct blob_reader blob_reader;
+ const struct nir_shader_compiler_options *options =
+ st->ctx->Const.ShaderCompilerOptions[stp->Base.info.stage].NirOptions;
+
+ blob_reader_init(&blob_reader, stp->serialized_nir, stp->serialized_nir_size);
+ return nir_deserialize(NULL, options, &blob_reader);
}
static const gl_state_index16 depth_range_state[STATE_LENGTH] =
{ STATE_DEPTH_RANGE };
-static struct st_vp_variant *
+static struct st_common_variant *
st_create_vp_variant(struct st_context *st,
- struct st_vertex_program *stvp,
+ struct st_program *stvp,
const struct st_common_variant_key *key)
{
- struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
+ struct st_common_variant *vpv = CALLOC_STRUCT(st_common_variant);
struct pipe_context *pipe = st->pipe;
+ struct pipe_screen *screen = pipe->screen;
struct pipe_shader_state state = {0};
static const gl_state_index16 point_size_state[STATE_LENGTH] =
struct gl_program_parameter_list *params = stvp->Base.Parameters;
vpv->key = *key;
- vpv->num_inputs = stvp->num_inputs;
state.stream_output = stvp->state.stream_output;
- if (stvp->state.type == PIPE_SHADER_IR_NIR) {
+ if (stvp->state.type == PIPE_SHADER_IR_NIR &&
+ (!key->is_draw_shader || draw_has_llvm())) {
+ bool finalize = false;
+
state.type = PIPE_SHADER_IR_NIR;
- state.ir.nir = nir_shader_clone(NULL, stvp->state.ir.nir);
- if (key->clamp_color)
+ state.ir.nir = get_nir_shader(st, stvp);
+ if (key->clamp_color) {
NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
if (key->passthrough_edgeflags) {
NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
- vpv->num_inputs++;
+ finalize = true;
}
if (key->lower_point_size) {
_mesa_add_state_reference(params, point_size_state);
NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
point_size_state);
+ finalize = true;
}
if (key->lower_ucp) {
- struct pipe_screen *screen = pipe->screen;
bool can_compact = screen->get_param(screen,
PIPE_CAP_NIR_COMPACT_ARRAYS);
true, can_compact, clipplane_state);
NIR_PASS_V(state.ir.nir, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(state.ir.nir), true, false);
+ NIR_PASS_V(state.ir.nir, nir_lower_global_vars_to_local);
+ finalize = true;
}
- st_finalize_nir(st, &stvp->Base, stvp->shader_program,
- state.ir.nir);
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
+ true);
+
+ /* Some of the lowering above may have introduced new varyings */
+ nir_shader_gather_info(state.ir.nir,
+ nir_shader_get_entrypoint(state.ir.nir));
+ }
- vpv->driver_shader = pipe->create_vs_state(pipe, &state);
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
- /* When generating a NIR program, we usually don't have TGSI tokens.
- * However, we do create them for ARB_vertex_program / fixed-function VS
- * programs which we may need to use with the draw module for legacy
- * feedback/select emulation. If they exist, copy them.
- *
- * TODO: Lowering for shader variants is not applied to TGSI when
- * generating a NIR shader.
- */
- if (stvp->state.tokens)
- vpv->tokens = tgsi_dup_tokens(stvp->state.tokens);
+ if (key->is_draw_shader)
+ vpv->base.driver_shader = draw_create_vertex_shader(st->draw, &state);
+ else
+ vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
return vpv;
}
if (tokens) {
tgsi_free_tokens(state.tokens);
state.tokens = tokens;
-
- if (key->passthrough_edgeflags)
- vpv->num_inputs++;
- } else
+ } else {
fprintf(stderr, "mesa: cannot emulate deprecated features\n");
+ }
}
if (key->lower_depth_clamp) {
state.tokens = tokens;
}
- if (ST_DEBUG & DEBUG_TGSI) {
+ if (ST_DEBUG & DEBUG_PRINT_IR)
tgsi_dump(state.tokens, 0);
- debug_printf("\n");
+
+ if (key->is_draw_shader)
+ vpv->base.driver_shader = draw_create_vertex_shader(st->draw, &state);
+ else
+ vpv->base.driver_shader = pipe->create_vs_state(pipe, &state);
+
+ if (state.tokens) {
+ tgsi_free_tokens(state.tokens);
}
- vpv->driver_shader = pipe->create_vs_state(pipe, &state);
- /* Save this for selection/feedback/rasterpos. */
- vpv->tokens = state.tokens;
return vpv;
}
/**
* Find/create a vertex program variant.
*/
-struct st_vp_variant *
+struct st_common_variant *
st_get_vp_variant(struct st_context *st,
- struct st_vertex_program *stvp,
+ struct st_program *stp,
const struct st_common_variant_key *key)
{
- struct st_vp_variant *vpv;
+ struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
+ struct st_common_variant *vpv;
/* Search for existing variant */
- for (vpv = stvp->variants; vpv; vpv = vpv->next) {
+ for (vpv = st_common_variant(stp->variants); vpv;
+ vpv = st_common_variant(vpv->base.next)) {
if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
break;
}
if (!vpv) {
/* create now */
- vpv = st_create_vp_variant(st, stvp, key);
+ vpv = st_create_vp_variant(st, stp, key);
if (vpv) {
- for (unsigned index = 0; index < vpv->num_inputs; ++index) {
- unsigned attr = stvp->index_to_input[index];
- if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
- continue;
- vpv->vert_attrib_mask |= 1u << attr;
- }
+ vpv->base.st = key->st;
+
+ unsigned num_inputs = stvp->num_inputs + key->passthrough_edgeflags;
+ for (unsigned index = 0; index < num_inputs; ++index) {
+ unsigned attr = stvp->index_to_input[index];
+ if (attr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
+ continue;
+ vpv->vert_attrib_mask |= 1u << attr;
+ }
/* insert into list */
- vpv->next = stvp->variants;
- stvp->variants = vpv;
+ vpv->base.next = stp->variants;
+ stp->variants = &vpv->base;
}
}
*/
bool
st_translate_fragment_program(struct st_context *st,
- struct st_common_program *stfp)
+ struct st_program *stfp)
{
/* Non-GLSL programs: */
if (!stfp->glsl_to_tgsi) {
nir_shader *nir =
st_translate_prog_to_nir(st, &stfp->Base, MESA_SHADER_FRAGMENT);
- if (stfp->state.ir.nir)
- ralloc_free(stfp->state.ir.nir);
+ if (stfp->Base.nir)
+ ralloc_free(stfp->Base.nir);
+ if (stfp->serialized_nir) {
+ free(stfp->serialized_nir);
+ stfp->serialized_nir = NULL;
+ }
stfp->state.type = PIPE_SHADER_IR_NIR;
- stfp->state.ir.nir = nir;
stfp->Base.nir = nir;
return true;
}
static struct st_fp_variant *
st_create_fp_variant(struct st_context *st,
- struct st_common_program *stfp,
+ struct st_program *stfp,
const struct st_fp_variant_key *key)
{
struct pipe_context *pipe = st->pipe;
return NULL;
if (stfp->state.type == PIPE_SHADER_IR_NIR) {
+ bool finalize = false;
+
state.type = PIPE_SHADER_IR_NIR;
- state.ir.nir = nir_shader_clone(NULL, stfp->state.ir.nir);
+ state.ir.nir = get_nir_shader(st, stfp);
- if (key->clamp_color)
+ if (key->clamp_color) {
NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
- if (key->lower_flatshade)
+ if (key->lower_flatshade) {
NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
+ finalize = true;
+ }
if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
_mesa_add_state_reference(params, alpha_ref_state);
NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
false, alpha_ref_state);
+ finalize = true;
}
- if (key->lower_two_sided_color)
+ if (key->lower_two_sided_color) {
NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color);
+ finalize = true;
+ }
if (key->persample_shading) {
nir_shader *shader = state.ir.nir;
nir_foreach_variable(var, &shader->inputs)
var->data.sample = true;
+ finalize = true;
}
assert(!(key->bitmap && key->drawpixels));
options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
+ finalize = true;
}
/* glDrawPixels (color only) */
sizeof(options.texcoord_state_tokens));
NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
+ finalize = true;
}
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
key->external.lower_ayuv || key->external.lower_xyuv)) {
+
+ st_nir_lower_samplers(pipe->screen, state.ir.nir,
+ stfp->shader_program, &stfp->Base);
+
nir_lower_tex_options options = {0};
options.lower_y_uv_external = key->external.lower_nv12;
options.lower_y_u_v_external = key->external.lower_iyuv;
options.lower_ayuv_external = key->external.lower_ayuv;
options.lower_xyuv_external = key->external.lower_xyuv;
NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
+ finalize = true;
}
- st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir);
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
+ false);
+ }
+ /* This pass needs to happen *after* nir_lower_sampler */
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
- key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
- /* This pass needs to happen *after* nir_lower_sampler */
+ key->external.lower_xy_uxvx || key->external.lower_yx_xuxv ||
+ key->external.lower_ayuv || key->external.lower_xyuv)) {
NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
~stfp->Base.SamplersUsed,
key->external.lower_nv12 || key->external.lower_xy_uxvx ||
key->external.lower_yx_xuxv,
key->external.lower_iyuv);
+ finalize = true;
}
- /* Some of the lowering above may have introduced new varyings */
- nir_shader_gather_info(state.ir.nir,
- nir_shader_get_entrypoint(state.ir.nir));
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ /* Some of the lowering above may have introduced new varyings */
+ nir_shader_gather_info(state.ir.nir,
+ nir_shader_get_entrypoint(state.ir.nir));
- variant->driver_shader = pipe->create_fs_state(pipe, &state);
+ struct pipe_screen *screen = pipe->screen;
+ if (screen->finalize_nir)
+ screen->finalize_nir(screen, state.ir.nir, false);
+ }
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+
+ variant->base.driver_shader = pipe->create_fs_state(pipe, &state);
variant->key = *key;
return variant;
state.tokens = tokens;
}
- if (ST_DEBUG & DEBUG_TGSI) {
+ if (ST_DEBUG & DEBUG_PRINT_IR)
tgsi_dump(state.tokens, 0);
- debug_printf("\n");
- }
/* fill in variant */
- variant->driver_shader = pipe->create_fs_state(pipe, &state);
+ variant->base.driver_shader = pipe->create_fs_state(pipe, &state);
variant->key = *key;
if (state.tokens != stfp->state.tokens)
*/
struct st_fp_variant *
st_get_fp_variant(struct st_context *st,
- struct st_common_program *stfp,
+ struct st_program *stfp,
const struct st_fp_variant_key *key)
{
struct st_fp_variant *fpv;
/* Search for existing variant */
- for (fpv = stfp->fp_variants; fpv; fpv = fpv->next) {
+ for (fpv = st_fp_variant(stfp->variants); fpv;
+ fpv = st_fp_variant(fpv->base.next)) {
if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
break;
}
/* create new */
fpv = st_create_fp_variant(st, stfp, key);
if (fpv) {
+ fpv->base.st = key->st;
+
if (key->bitmap || key->drawpixels) {
/* Regular variants should always come before the
* bitmap & drawpixels variants, (unless there
* st_update_fp can take a fast path when
* shader_has_one_variant is set.
*/
- if (!stfp->fp_variants) {
- stfp->fp_variants = fpv;
+ if (!stfp->variants) {
+ stfp->variants = &fpv->base;
} else {
/* insert into list after the first one */
- fpv->next = stfp->fp_variants->next;
- stfp->fp_variants->next = fpv;
+ fpv->base.next = stfp->variants->next;
+ stfp->variants->next = &fpv->base;
}
} else {
/* insert into list */
- fpv->next = stfp->fp_variants;
- stfp->fp_variants = fpv;
+ fpv->base.next = stfp->variants;
+ stfp->variants = &fpv->base;
}
}
}
*/
bool
st_translate_common_program(struct st_context *st,
- struct st_common_program *stcp)
+ struct st_program *stp)
{
- struct gl_program *prog = &stcp->Base;
+ struct gl_program *prog = &stp->Base;
enum pipe_shader_type stage =
- pipe_shader_type_from_mesa(stcp->Base.info.stage);
+ pipe_shader_type_from_mesa(stp->Base.info.stage);
struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
if (ureg == NULL)
switch (stage) {
case PIPE_SHADER_TESS_CTRL:
ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
- stcp->Base.info.tess.tcs_vertices_out);
+ stp->Base.info.tess.tcs_vertices_out);
break;
case PIPE_SHADER_TESS_EVAL:
- if (stcp->Base.info.tess.primitive_mode == GL_ISOLINES)
+ if (stp->Base.info.tess.primitive_mode == GL_ISOLINES)
ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
else
ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
- stcp->Base.info.tess.primitive_mode);
+ stp->Base.info.tess.primitive_mode);
STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
PIPE_TESS_SPACING_FRACTIONAL_EVEN);
ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
- (stcp->Base.info.tess.spacing + 1) % 3);
+ (stp->Base.info.tess.spacing + 1) % 3);
ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
- !stcp->Base.info.tess.ccw);
+ !stp->Base.info.tess.ccw);
ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
- stcp->Base.info.tess.point_mode);
+ stp->Base.info.tess.point_mode);
break;
case PIPE_SHADER_GEOMETRY:
ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
- stcp->Base.info.gs.input_primitive);
+ stp->Base.info.gs.input_primitive);
ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
- stcp->Base.info.gs.output_primitive);
+ stp->Base.info.gs.output_primitive);
ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
- stcp->Base.info.gs.vertices_out);
+ stp->Base.info.gs.vertices_out);
ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
- stcp->Base.info.gs.invocations);
+ stp->Base.info.gs.invocations);
break;
default:
memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
memset(inputMapping, 0, sizeof(inputMapping));
memset(outputMapping, 0, sizeof(outputMapping));
- memset(&stcp->state, 0, sizeof(stcp->state));
+ memset(&stp->state, 0, sizeof(stp->state));
if (prog->info.clip_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
st_translate_program(st->ctx,
stage,
ureg,
- stcp->glsl_to_tgsi,
+ stp->glsl_to_tgsi,
prog,
/* inputs */
num_inputs,
output_semantic_name,
output_semantic_index);
- stcp->state.tokens = ureg_get_tokens(ureg, NULL);
+ stp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
st_store_ir_in_disk_cache(st, prog, false);
- if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
+ if (ST_DEBUG & DEBUG_PRINT_IR && ST_DEBUG & DEBUG_MESA)
_mesa_print_program(prog);
- debug_printf("\n");
- }
-
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(stcp->state.tokens, 0);
- debug_printf("\n");
- }
- free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
- stcp->glsl_to_tgsi = NULL;
+ free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
+ stp->glsl_to_tgsi = NULL;
return true;
}
/**
* Get/create a basic program variant.
*/
-struct st_common_variant *
+struct st_variant *
st_get_common_variant(struct st_context *st,
- struct st_common_program *prog,
+ struct st_program *prog,
const struct st_common_variant_key *key)
{
struct pipe_context *pipe = st->pipe;
- struct st_common_variant *v;
+ struct st_variant *v;
struct pipe_shader_state state = {0};
/* Search for existing variant */
for (v = prog->variants; v; v = v->next) {
- if (memcmp(&v->key, key, sizeof(*key)) == 0) {
+ if (memcmp(&st_common_variant(v)->key, key, sizeof(*key)) == 0)
break;
- }
}
if (!v) {
/* create new */
- v = CALLOC_STRUCT(st_common_variant);
+ v = (struct st_variant*)CALLOC_STRUCT(st_common_variant);
if (v) {
-
if (prog->state.type == PIPE_SHADER_IR_NIR) {
+ bool finalize = false;
+
state.type = PIPE_SHADER_IR_NIR;
- state.ir.nir = nir_shader_clone(NULL, prog->state.ir.nir);
+ state.ir.nir = get_nir_shader(st, prog);
- if (key->clamp_color)
+ if (key->clamp_color) {
NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
state.stream_output = prog->state.stream_output;
- st_finalize_nir(st, &prog->Base, prog->shader_program,
- state.ir.nir);
- } else {
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &prog->Base, prog->shader_program,
+ state.ir.nir, true);
+ }
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+ } else {
if (key->lower_depth_clamp) {
struct gl_program_parameter_list *params = prog->Base.Parameters;
prog->state.tokens = tokens;
}
- state = prog->state;
+ state = prog->state;
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
}
/* fill in new variant */
switch (prog->Base.info.stage) {
return NULL;
}
- v->key = *key;
+ st_common_variant(v)->key = *key;
+ v->st = key->st;
/* insert into list */
v->next = prog->variants;
if (!target || target == &_mesa_DummyProgram)
return;
- switch (target->Target) {
- case GL_VERTEX_PROGRAM_ARB:
- {
- struct st_vertex_program *stvp = (struct st_vertex_program *) target;
- struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
-
- for (vpv = stvp->variants; vpv; ) {
- struct st_vp_variant *next = vpv->next;
- if (vpv->key.st == st) {
- /* unlink from list */
- *prevPtr = next;
- /* destroy this variant */
- delete_vp_variant(st, vpv);
- }
- else {
- prevPtr = &vpv->next;
- }
- vpv = next;
- }
- }
- break;
- case GL_FRAGMENT_PROGRAM_ARB:
- {
- struct st_common_program *stfp =
- (struct st_common_program *) target;
- struct st_fp_variant *fpv, **prevPtr = &stfp->fp_variants;
-
- for (fpv = stfp->fp_variants; fpv; ) {
- struct st_fp_variant *next = fpv->next;
- if (fpv->key.st == st) {
- /* unlink from list */
- *prevPtr = next;
- /* destroy this variant */
- delete_fp_variant(st, fpv);
- }
- else {
- prevPtr = &fpv->next;
- }
- fpv = next;
+ struct st_program *p = st_program(target);
+ struct st_variant *v, **prevPtr = &p->variants;
+ bool unbound = false;
+
+ for (v = p->variants; v; ) {
+ struct st_variant *next = v->next;
+ if (v->st == st) {
+ if (!unbound) {
+ st_unbind_program(st, p);
+ unbound = true;
}
+
+ /* unlink from list */
+ *prevPtr = next;
+ /* destroy this variant */
+ delete_variant(st, v, target->Target);
}
- break;
- case GL_GEOMETRY_PROGRAM_NV:
- case GL_TESS_CONTROL_PROGRAM_NV:
- case GL_TESS_EVALUATION_PROGRAM_NV:
- case GL_COMPUTE_PROGRAM_NV:
- {
- struct st_common_program *p = st_common_program(target);
- struct st_common_variant *v, **prevPtr = &p->variants;
-
- for (v = p->variants; v; ) {
- struct st_common_variant *next = v->next;
- if (v->key.st == st) {
- /* unlink from list */
- *prevPtr = next;
- /* destroy this variant */
- delete_basic_variant(st, v, target->Target);
- }
- else {
- prevPtr = &v->next;
- }
- v = next;
- }
+ else {
+ prevPtr = &v->next;
}
- break;
- default:
- _mesa_problem(NULL, "Unexpected program target 0x%x in "
- "destroy_program_variants_cb()", target->Target);
+ v = next;
}
}
}
-/**
- * For debugging, print/dump the current vertex program.
- */
-void
-st_print_current_vertex_program(void)
-{
- GET_CURRENT_CONTEXT(ctx);
-
- if (ctx->VertexProgram._Current) {
- struct st_vertex_program *stvp =
- (struct st_vertex_program *) ctx->VertexProgram._Current;
- struct st_vp_variant *stv;
-
- debug_printf("Vertex program %u\n", stvp->Base.Id);
-
- for (stv = stvp->variants; stv; stv = stv->next) {
- debug_printf("variant %p\n", stv);
- tgsi_dump(stv->tokens, 0);
- }
- }
-}
-
-
/**
* Compile one shader variant.
*/
-void
+static void
st_precompile_shader_variant(struct st_context *st,
struct gl_program *prog)
{
switch (prog->Target) {
case GL_VERTEX_PROGRAM_ARB: {
- struct st_vertex_program *p = (struct st_vertex_program *)prog;
+ struct st_program *p = (struct st_program *)prog;
struct st_common_variant_key key;
memset(&key, 0, sizeof(key));
}
case GL_FRAGMENT_PROGRAM_ARB: {
- struct st_common_program *p = (struct st_common_program *)prog;
+ struct st_program *p = (struct st_program *)prog;
struct st_fp_variant_key key;
memset(&key, 0, sizeof(key));
case GL_TESS_EVALUATION_PROGRAM_NV:
case GL_GEOMETRY_PROGRAM_NV:
case GL_COMPUTE_PROGRAM_NV: {
- struct st_common_program *p = st_common_program(prog);
+ struct st_program *p = st_program(prog);
struct st_common_variant_key key;
memset(&key, 0, sizeof(key));
assert(0);
}
}
+
+void
+st_serialize_nir(struct st_program *stp)
+{
+ if (!stp->serialized_nir) {
+ struct blob blob;
+ size_t size;
+
+ blob_init(&blob);
+ nir_serialize(&blob, stp->Base.nir, false);
+ blob_finish_get_buffer(&blob, &stp->serialized_nir, &size);
+ stp->serialized_nir_size = size;
+ }
+}
+
+void
+st_finalize_program(struct st_context *st, struct gl_program *prog)
+{
+ if (st->current_program[prog->info.stage] == prog) {
+ if (prog->info.stage == MESA_SHADER_VERTEX)
+ st->dirty |= ST_NEW_VERTEX_PROGRAM(st, (struct st_program *)prog);
+ else
+ st->dirty |= ((struct st_program *)prog)->affected_states;
+ }
+
+ if (prog->nir) {
+ nir_sweep(prog->nir);
+
+ /* This is only needed for ARB_vp/fp programs and when the disk cache
+ * is disabled. If the disk cache is enabled, GLSL programs are
+ * serialized in write_nir_to_cache.
+ */
+ st_serialize_nir(st_program(prog));
+ }
+
+ /* Create Gallium shaders now instead of on demand. */
+ if (ST_DEBUG & DEBUG_PRECOMPILE ||
+ st->shader_has_one_variant[prog->info.stage])
+ st_precompile_shader_variant(st, prog);
+}