#include "program/prog_to_nir.h"
#include "program/programopt.h"
-#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_serialize.h"
#include "pipe/p_context.h"
#include "pipe/p_defines.h"
#include "st_atifs_to_tgsi.h"
#include "st_nir.h"
#include "st_shader_cache.h"
+#include "st_util.h"
#include "cso_cache/cso_context.h"
switch (prog->info.stage) {
case MESA_SHADER_VERTEX:
- states = &((struct st_vertex_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
*states = ST_NEW_VS_STATE |
ST_NEW_RASTERIZER |
break;
case MESA_SHADER_TESS_CTRL:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_TCS_STATE;
break;
case MESA_SHADER_TESS_EVAL:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_TES_STATE |
ST_NEW_RASTERIZER;
break;
case MESA_SHADER_GEOMETRY:
- states = &(st_common_program(prog))->affected_states;
+ states = &(st_program(prog))->affected_states;
*states = ST_NEW_GS_STATE |
ST_NEW_RASTERIZER;
break;
case MESA_SHADER_FRAGMENT:
- states = &((struct st_fragment_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
/* gl_FragCoord and glDrawPixels always use constants. */
*states = ST_NEW_FS_STATE |
break;
case MESA_SHADER_COMPUTE:
- states = &((struct st_common_program*)prog)->affected_states;
+ states = &((struct st_program*)prog)->affected_states;
*states = ST_NEW_CS_STATE;
if (vpv->draw_shader)
draw_delete_vertex_shader( st->draw, vpv->draw_shader );
- delete_ir(&vpv->state);
+ if (vpv->tokens)
+ ureg_free_tokens(vpv->tokens);
free( vpv );
}
*/
void
st_release_vp_variants( struct st_context *st,
- struct st_vertex_program *stvp )
+ struct st_program *stvp )
{
struct st_vp_variant *vpv;
- for (vpv = stvp->variants; vpv; ) {
+ for (vpv = stvp->vp_variants; vpv; ) {
struct st_vp_variant *next = vpv->next;
delete_vp_variant(st, vpv);
vpv = next;
}
- stvp->variants = NULL;
+ stvp->vp_variants = NULL;
delete_ir(&stvp->state);
}
* Free all variants of a fragment program.
*/
void
-st_release_fp_variants(struct st_context *st, struct st_fragment_program *stfp)
+st_release_fp_variants(struct st_context *st, struct st_program *stfp)
{
struct st_fp_variant *fpv;
- for (fpv = stfp->variants; fpv; ) {
+ for (fpv = stfp->fp_variants; fpv; ) {
struct st_fp_variant *next = fpv->next;
delete_fp_variant(st, fpv);
fpv = next;
}
- stfp->variants = NULL;
+ stfp->fp_variants = NULL;
delete_ir(&stfp->state);
}
* the variant from the linked list.
*/
static void
-delete_basic_variant(struct st_context *st, struct st_common_variant *v,
- GLenum target)
+delete_common_variant(struct st_context *st, struct st_common_variant *v,
+ GLenum target)
{
if (v->driver_shader) {
if (st->has_shareable_shaders || v->key.st == st) {
* Free all basic program variants.
*/
void
-st_release_common_variants(struct st_context *st, struct st_common_program *p)
+st_release_common_variants(struct st_context *st, struct st_program *p)
{
struct st_common_variant *v;
for (v = p->variants; v; ) {
struct st_common_variant *next = v->next;
- delete_basic_variant(st, v, p->Base.Target);
+ delete_common_variant(st, v, p->Base.Target);
v = next;
}
delete_ir(&p->state);
}
+void
+st_finalize_nir_before_variants(struct nir_shader *nir)
+{
+ NIR_PASS_V(nir, nir_opt_access);
+
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+ if (nir->options->lower_all_io_to_temps ||
+ nir->options->lower_all_io_to_elements ||
+ nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_GEOMETRY) {
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+ } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
+ }
+
+ st_nir_assign_vs_in_locations(nir);
+}
/**
* Translate ARB (asm) program to NIR
st_translate_prog_to_nir(struct st_context *st, struct gl_program *prog,
gl_shader_stage stage)
{
+ struct pipe_screen *screen = st->pipe->screen;
const struct gl_shader_compiler_options *options =
&st->ctx->Const.ShaderCompilerOptions[stage];
NIR_PASS_V(nir, nir_lower_regs_to_ssa); /* turn registers into SSA */
nir_validate_shader(nir, "after st/ptn lower_regs_to_ssa");
- NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, st->pipe->screen);
+ NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, prog, screen);
NIR_PASS_V(nir, nir_lower_system_values);
/* Optimise NIR */
NIR_PASS_V(nir, nir_opt_constant_folding);
st_nir_opts(nir);
- nir_validate_shader(nir, "after st/ptn NIR opts");
+ st_finalize_nir_before_variants(nir);
+
+ if (st->allow_st_finalize_nir_twice)
+ st_finalize_nir(st, prog, NULL, nir, true);
+
+ nir_validate_shader(nir, "after st/glsl finalize_nir");
return nir;
}
void
-st_prepare_vertex_program(struct st_vertex_program *stvp)
+st_prepare_vertex_program(struct st_program *stp)
{
+ struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
+
stvp->num_inputs = 0;
memset(stvp->input_to_index, ~0, sizeof(stvp->input_to_index));
memset(stvp->result_to_output, ~0, sizeof(stvp->result_to_output));
* and TGSI generic input indexes, plus input attrib semantic info.
*/
for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
- if ((stvp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
+ if ((stp->Base.info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
stvp->input_to_index[attr] = stvp->num_inputs;
stvp->index_to_input[stvp->num_inputs] = attr;
stvp->num_inputs++;
- if ((stvp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
+ if ((stp->Base.DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
/* add placeholder for second part of a double attribute */
stvp->index_to_input[stvp->num_inputs] = ST_DOUBLE_ATTRIB_PLACEHOLDER;
stvp->num_inputs++;
/* Compute mapping of vertex program outputs to slots. */
unsigned num_outputs = 0;
for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
- if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr))
+ if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr))
stvp->result_to_output[attr] = num_outputs++;
}
/* pre-setup potentially unused edgeflag output */
}
/* Translate stream output info. */
- struct pipe_stream_output_info *so_info = NULL;
- if (prog->info.stage == MESA_SHADER_VERTEX)
- so_info = &((struct st_vertex_program*)prog)->state.stream_output;
- else
- so_info = &((struct st_common_program*)prog)->state.stream_output;
+ struct pipe_stream_output_info *so_info =
+ &((struct st_program*)prog)->state.stream_output;
for (unsigned i = 0; i < info->NumOutputs; i++) {
so_info->output[i].register_index =
*/
bool
st_translate_vertex_program(struct st_context *st,
- struct st_vertex_program *stvp)
+ struct st_program *stp)
{
struct ureg_program *ureg;
enum pipe_error error;
ubyte output_semantic_name[VARYING_SLOT_MAX] = {0};
ubyte output_semantic_index[VARYING_SLOT_MAX] = {0};
- if (stvp->Base.arb.IsPositionInvariant)
- _mesa_insert_mvp_code(st->ctx, &stvp->Base);
+ if (stp->Base.arb.IsPositionInvariant)
+ _mesa_insert_mvp_code(st->ctx, &stp->Base);
- st_prepare_vertex_program(stvp);
+ st_prepare_vertex_program(stp);
/* ARB_vp: */
- if (!stvp->glsl_to_tgsi) {
- _mesa_remove_output_reads(&stvp->Base, PROGRAM_OUTPUT);
+ if (!stp->glsl_to_tgsi) {
+ _mesa_remove_output_reads(&stp->Base, PROGRAM_OUTPUT);
/* This determines which states will be updated when the assembly
* shader is bound.
*/
- stvp->affected_states = ST_NEW_VS_STATE |
+ stp->affected_states = ST_NEW_VS_STATE |
ST_NEW_RASTERIZER |
ST_NEW_VERTEX_ARRAYS;
- if (stvp->Base.Parameters->NumParameters)
- stvp->affected_states |= ST_NEW_VS_CONSTANTS;
+ if (stp->Base.Parameters->NumParameters)
+ stp->affected_states |= ST_NEW_VS_CONSTANTS;
/* No samplers are allowed in ARB_vp. */
}
/* Get semantic names and indices. */
for (attr = 0; attr < VARYING_SLOT_MAX; attr++) {
- if (stvp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
+ if (stp->Base.info.outputs_written & BITFIELD64_BIT(attr)) {
unsigned slot = num_outputs++;
unsigned semantic_name, semantic_index;
tgsi_get_gl_varying_semantic(attr, st->needs_texcoord_semantic,
if (ureg == NULL)
return false;
- if (stvp->Base.info.clip_distance_array_size)
+ if (stp->Base.info.clip_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
- stvp->Base.info.clip_distance_array_size);
- if (stvp->Base.info.cull_distance_array_size)
+ stp->Base.info.clip_distance_array_size);
+ if (stp->Base.info.cull_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
- stvp->Base.info.cull_distance_array_size);
+ stp->Base.info.cull_distance_array_size);
if (ST_DEBUG & DEBUG_MESA) {
- _mesa_print_program(&stvp->Base);
- _mesa_print_program_parameters(st->ctx, &stvp->Base);
+ _mesa_print_program(&stp->Base);
+ _mesa_print_program_parameters(st->ctx, &stp->Base);
debug_printf("\n");
}
- if (stvp->glsl_to_tgsi) {
+ struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
+
+ if (stp->glsl_to_tgsi) {
error = st_translate_program(st->ctx,
PIPE_SHADER_VERTEX,
ureg,
- stvp->glsl_to_tgsi,
- &stvp->Base,
+ stp->glsl_to_tgsi,
+ &stp->Base,
/* inputs */
stvp->num_inputs,
stvp->input_to_index,
output_semantic_name,
output_semantic_index);
- st_translate_stream_output_info(&stvp->Base);
+ st_translate_stream_output_info(&stp->Base);
- free_glsl_to_tgsi_visitor(stvp->glsl_to_tgsi);
+ free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
} else
error = st_translate_mesa_program(st->ctx,
PIPE_SHADER_VERTEX,
ureg,
- &stvp->Base,
+ &stp->Base,
/* inputs */
stvp->num_inputs,
stvp->input_to_index,
if (error) {
debug_printf("%s: failed to translate Mesa program:\n", __func__);
- _mesa_print_program(&stvp->Base);
+ _mesa_print_program(&stp->Base);
debug_assert(0);
return false;
}
- stvp->state.tokens = ureg_get_tokens(ureg, NULL);
+ stp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
- if (stvp->glsl_to_tgsi) {
- stvp->glsl_to_tgsi = NULL;
- st_store_ir_in_disk_cache(st, &stvp->Base, false);
+ if (stp->glsl_to_tgsi) {
+ stp->glsl_to_tgsi = NULL;
+ st_store_ir_in_disk_cache(st, &stp->Base, false);
}
/* Translate to NIR.
if (st->pipe->screen->get_shader_param(st->pipe->screen,
PIPE_SHADER_VERTEX,
PIPE_SHADER_CAP_PREFERRED_IR)) {
- assert(!stvp->glsl_to_tgsi);
+ assert(!stp->glsl_to_tgsi);
nir_shader *nir =
- st_translate_prog_to_nir(st, &stvp->Base, MESA_SHADER_VERTEX);
+ st_translate_prog_to_nir(st, &stp->Base, MESA_SHADER_VERTEX);
- if (stvp->state.ir.nir)
- ralloc_free(stvp->state.ir.nir);
- stvp->state.type = PIPE_SHADER_IR_NIR;
- stvp->state.ir.nir = nir;
- stvp->Base.nir = nir;
+ if (stp->state.ir.nir)
+ ralloc_free(stp->state.ir.nir);
+ stp->state.type = PIPE_SHADER_IR_NIR;
+ stp->state.ir.nir = nir;
+ stp->Base.nir = nir;
return true;
}
- return stvp->state.tokens != NULL;
+ return stp->state.tokens != NULL;
+}
+
+static struct nir_shader *
+get_nir_shader(struct st_context *st, struct st_program *stp)
+{
+ if (stp->state.ir.nir)
+ return nir_shader_clone(NULL, stp->state.ir.nir);
+
+ struct blob_reader blob_reader;
+ const struct nir_shader_compiler_options *options =
+ st->ctx->Const.ShaderCompilerOptions[stp->Base.info.stage].NirOptions;
+
+ blob_reader_init(&blob_reader, stp->nir_binary, stp->nir_size);
+ return nir_deserialize(NULL, options, &blob_reader);
}
static const gl_state_index16 depth_range_state[STATE_LENGTH] =
static struct st_vp_variant *
st_create_vp_variant(struct st_context *st,
- struct st_vertex_program *stvp,
+ struct st_program *stvp,
const struct st_common_variant_key *key)
{
struct st_vp_variant *vpv = CALLOC_STRUCT(st_vp_variant);
struct pipe_context *pipe = st->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_shader_state state = {0};
static const gl_state_index16 point_size_state[STATE_LENGTH] =
{ STATE_INTERNAL, STATE_POINT_SIZE_CLAMPED, 0 };
struct gl_program_parameter_list *params = stvp->Base.Parameters;
vpv->key = *key;
- vpv->state.stream_output = stvp->state.stream_output;
- vpv->num_inputs = stvp->num_inputs;
+ vpv->num_inputs = ((struct st_vertex_program*)stvp)->num_inputs;
- /* When generating a NIR program, we usually don't have TGSI tokens.
- * However, we do create them for ARB_vertex_program / fixed-function VS
- * programs which we may need to use with the draw module for legacy
- * feedback/select emulation. If they exist, copy them.
- */
- if (stvp->state.tokens)
- vpv->state.tokens = tgsi_dup_tokens(stvp->state.tokens);
+ state.stream_output = stvp->state.stream_output;
if (stvp->state.type == PIPE_SHADER_IR_NIR) {
- vpv->state.type = PIPE_SHADER_IR_NIR;
- vpv->state.ir.nir = nir_shader_clone(NULL, stvp->state.ir.nir);
- if (key->clamp_color)
- NIR_PASS_V(vpv->state.ir.nir, nir_lower_clamp_color_outputs);
+ bool finalize = false;
+
+ state.type = PIPE_SHADER_IR_NIR;
+ state.ir.nir = get_nir_shader(st, stvp);
+ if (key->clamp_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
if (key->passthrough_edgeflags) {
- NIR_PASS_V(vpv->state.ir.nir, nir_lower_passthrough_edgeflags);
+ NIR_PASS_V(state.ir.nir, nir_lower_passthrough_edgeflags);
vpv->num_inputs++;
+ finalize = true;
}
if (key->lower_point_size) {
_mesa_add_state_reference(params, point_size_state);
- NIR_PASS_V(vpv->state.ir.nir, nir_lower_point_size_mov,
+ NIR_PASS_V(state.ir.nir, nir_lower_point_size_mov,
point_size_state);
+ finalize = true;
}
if (key->lower_ucp) {
- struct pipe_screen *screen = pipe->screen;
bool can_compact = screen->get_param(screen,
PIPE_CAP_NIR_COMPACT_ARRAYS);
_mesa_add_state_reference(params, clipplane_state[i]);
}
- NIR_PASS_V(vpv->state.ir.nir, nir_lower_clip_vs, key->lower_ucp,
+ NIR_PASS_V(state.ir.nir, nir_lower_clip_vs, key->lower_ucp,
true, can_compact, clipplane_state);
- NIR_PASS_V(vpv->state.ir.nir, nir_lower_io_to_temporaries,
- nir_shader_get_entrypoint(vpv->state.ir.nir), true, false);
+ NIR_PASS_V(state.ir.nir, nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(state.ir.nir), true, false);
+ NIR_PASS_V(state.ir.nir, nir_lower_global_vars_to_local);
+ finalize = true;
}
- st_finalize_nir(st, &stvp->Base, stvp->shader_program,
- vpv->state.ir.nir);
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &stvp->Base, stvp->shader_program, state.ir.nir,
+ true);
+
+ /* Some of the lowering above may have introduced new varyings */
+ nir_shader_gather_info(state.ir.nir,
+ nir_shader_get_entrypoint(state.ir.nir));
+ }
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+
+ vpv->driver_shader = pipe->create_vs_state(pipe, &state);
+
+ /* When generating a NIR program, we usually don't have TGSI tokens.
+ * However, we do create them for ARB_vertex_program / fixed-function VS
+ * programs which we may need to use with the draw module for legacy
+ * feedback/select emulation. If they exist, copy them.
+ *
+ * TODO: Lowering for shader variants is not applied to TGSI when
+ * generating a NIR shader.
+ */
+ if (stvp->state.tokens)
+ vpv->tokens = tgsi_dup_tokens(stvp->state.tokens);
- vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->state);
- /* driver takes ownership of IR: */
- vpv->state.ir.nir = NULL;
return vpv;
}
+ state.type = PIPE_SHADER_IR_TGSI;
+ state.tokens = tgsi_dup_tokens(stvp->state.tokens);
+
/* Emulate features. */
if (key->clamp_color || key->passthrough_edgeflags) {
const struct tgsi_token *tokens;
(key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
(key->passthrough_edgeflags ? TGSI_EMU_PASSTHROUGH_EDGEFLAG : 0);
- tokens = tgsi_emulate(vpv->state.tokens, flags);
+ tokens = tgsi_emulate(state.tokens, flags);
if (tokens) {
- tgsi_free_tokens(vpv->state.tokens);
- vpv->state.tokens = tokens;
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
if (key->passthrough_edgeflags)
vpv->num_inputs++;
_mesa_add_state_reference(params, depth_range_state);
const struct tgsi_token *tokens;
- tokens = st_tgsi_lower_depth_clamp(vpv->state.tokens, depth_range_const,
+ tokens = st_tgsi_lower_depth_clamp(state.tokens, depth_range_const,
key->clip_negative_one_to_one);
- if (tokens != vpv->state.tokens)
- tgsi_free_tokens(vpv->state.tokens);
- vpv->state.tokens = tokens;
+ if (tokens != state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
}
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(vpv->state.tokens, 0);
- debug_printf("\n");
- }
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
- vpv->driver_shader = pipe->create_vs_state(pipe, &vpv->state);
+ vpv->driver_shader = pipe->create_vs_state(pipe, &state);
+ /* Save this for selection/feedback/rasterpos. */
+ vpv->tokens = state.tokens;
return vpv;
}
*/
struct st_vp_variant *
st_get_vp_variant(struct st_context *st,
- struct st_vertex_program *stvp,
+ struct st_program *stp,
const struct st_common_variant_key *key)
{
+ struct st_vertex_program *stvp = (struct st_vertex_program *)stp;
struct st_vp_variant *vpv;
/* Search for existing variant */
- for (vpv = stvp->variants; vpv; vpv = vpv->next) {
+ for (vpv = stp->vp_variants; vpv; vpv = vpv->next) {
if (memcmp(&vpv->key, key, sizeof(*key)) == 0) {
break;
}
if (!vpv) {
/* create now */
- vpv = st_create_vp_variant(st, stvp, key);
+ vpv = st_create_vp_variant(st, stp, key);
if (vpv) {
for (unsigned index = 0; index < vpv->num_inputs; ++index) {
unsigned attr = stvp->index_to_input[index];
}
/* insert into list */
- vpv->next = stvp->variants;
- stvp->variants = vpv;
+ vpv->next = stp->vp_variants;
+ stp->vp_variants = vpv;
}
}
*/
bool
st_translate_fragment_program(struct st_context *st,
- struct st_fragment_program *stfp)
+ struct st_program *stfp)
{
/* Non-GLSL programs: */
if (!stfp->glsl_to_tgsi) {
static struct st_fp_variant *
st_create_fp_variant(struct st_context *st,
- struct st_fragment_program *stfp,
+ struct st_program *stfp,
const struct st_fp_variant_key *key)
{
struct pipe_context *pipe = st->pipe;
struct st_fp_variant *variant = CALLOC_STRUCT(st_fp_variant);
- struct pipe_shader_state tgsi = {0};
+ struct pipe_shader_state state = {0};
struct gl_program_parameter_list *params = stfp->Base.Parameters;
static const gl_state_index16 texcoord_state[STATE_LENGTH] =
{ STATE_INTERNAL, STATE_CURRENT_ATTRIB, VERT_ATTRIB_TEX0 };
return NULL;
if (stfp->state.type == PIPE_SHADER_IR_NIR) {
- tgsi.type = PIPE_SHADER_IR_NIR;
- tgsi.ir.nir = nir_shader_clone(NULL, stfp->state.ir.nir);
+ bool finalize = false;
- if (key->clamp_color)
- NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
+ state.type = PIPE_SHADER_IR_NIR;
+ state.ir.nir = get_nir_shader(st, stfp);
- if (key->lower_flatshade)
- NIR_PASS_V(tgsi.ir.nir, nir_lower_flatshade);
+ if (key->clamp_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
+
+ if (key->lower_flatshade) {
+ NIR_PASS_V(state.ir.nir, nir_lower_flatshade);
+ finalize = true;
+ }
if (key->lower_alpha_func != COMPARE_FUNC_NEVER) {
_mesa_add_state_reference(params, alpha_ref_state);
- NIR_PASS_V(tgsi.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
+ NIR_PASS_V(state.ir.nir, nir_lower_alpha_test, key->lower_alpha_func,
false, alpha_ref_state);
+ finalize = true;
}
- if (key->lower_two_sided_color)
- NIR_PASS_V(tgsi.ir.nir, nir_lower_two_sided_color);
+ if (key->lower_two_sided_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_two_sided_color);
+ finalize = true;
+ }
if (key->persample_shading) {
- nir_shader *shader = tgsi.ir.nir;
+ nir_shader *shader = state.ir.nir;
nir_foreach_variable(var, &shader->inputs)
var->data.sample = true;
+ finalize = true;
}
assert(!(key->bitmap && key->drawpixels));
options.sampler = variant->bitmap_sampler;
options.swizzle_xxxx = st->bitmap.tex_format == PIPE_FORMAT_R8_UNORM;
- NIR_PASS_V(tgsi.ir.nir, nir_lower_bitmap, &options);
+ NIR_PASS_V(state.ir.nir, nir_lower_bitmap, &options);
+ finalize = true;
}
/* glDrawPixels (color only) */
memcpy(options.texcoord_state_tokens, texcoord_state,
sizeof(options.texcoord_state_tokens));
- NIR_PASS_V(tgsi.ir.nir, nir_lower_drawpixels, &options);
+ NIR_PASS_V(state.ir.nir, nir_lower_drawpixels, &options);
+ finalize = true;
}
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
options.lower_yx_xuxv_external = key->external.lower_yx_xuxv;
options.lower_ayuv_external = key->external.lower_ayuv;
options.lower_xyuv_external = key->external.lower_xyuv;
- NIR_PASS_V(tgsi.ir.nir, nir_lower_tex, &options);
+ NIR_PASS_V(state.ir.nir, nir_lower_tex, &options);
+ finalize = true;
}
- st_finalize_nir(st, &stfp->Base, stfp->shader_program, tgsi.ir.nir);
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &stfp->Base, stfp->shader_program, state.ir.nir,
+ false);
+ }
+ /* This pass needs to happen *after* nir_lower_sampler */
if (unlikely(key->external.lower_nv12 || key->external.lower_iyuv ||
key->external.lower_xy_uxvx || key->external.lower_yx_xuxv)) {
- /* This pass needs to happen *after* nir_lower_sampler */
- NIR_PASS_V(tgsi.ir.nir, st_nir_lower_tex_src_plane,
+ NIR_PASS_V(state.ir.nir, st_nir_lower_tex_src_plane,
~stfp->Base.SamplersUsed,
key->external.lower_nv12 || key->external.lower_xy_uxvx ||
key->external.lower_yx_xuxv,
key->external.lower_iyuv);
+ finalize = true;
}
- /* Some of the lowering above may have introduced new varyings */
- nir_shader_gather_info(tgsi.ir.nir,
- nir_shader_get_entrypoint(tgsi.ir.nir));
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ /* Some of the lowering above may have introduced new varyings */
+ nir_shader_gather_info(state.ir.nir,
+ nir_shader_get_entrypoint(state.ir.nir));
- variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
+ struct pipe_screen *screen = pipe->screen;
+ if (screen->finalize_nir)
+ screen->finalize_nir(screen, state.ir.nir, false);
+ }
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+
+ variant->driver_shader = pipe->create_fs_state(pipe, &state);
variant->key = *key;
return variant;
}
- tgsi.tokens = stfp->state.tokens;
+ state.tokens = stfp->state.tokens;
assert(!(key->bitmap && key->drawpixels));
/* Fix texture targets and add fog for ATI_fs */
if (stfp->ati_fs) {
- const struct tgsi_token *tokens = st_fixup_atifs(tgsi.tokens, key);
+ const struct tgsi_token *tokens = st_fixup_atifs(state.tokens, key);
if (tokens)
- tgsi.tokens = tokens;
+ state.tokens = tokens;
else
fprintf(stderr, "mesa: cannot post-process ATI_fs\n");
}
(key->clamp_color ? TGSI_EMU_CLAMP_COLOR_OUTPUTS : 0) |
(key->persample_shading ? TGSI_EMU_FORCE_PERSAMPLE_INTERP : 0);
- tokens = tgsi_emulate(tgsi.tokens, flags);
+ tokens = tgsi_emulate(state.tokens, flags);
if (tokens) {
- if (tgsi.tokens != stfp->state.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else
fprintf(stderr, "mesa: cannot emulate deprecated features\n");
}
variant->bitmap_sampler = ffs(~stfp->Base.SamplersUsed) - 1;
- tokens = st_get_bitmap_shader(tgsi.tokens,
+ tokens = st_get_bitmap_shader(state.tokens,
st->internal_target,
variant->bitmap_sampler,
st->needs_texcoord_semantic,
PIPE_FORMAT_R8_UNORM);
if (tokens) {
- if (tgsi.tokens != stfp->state.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else
fprintf(stderr, "mesa: cannot create a shader for glBitmap\n");
}
texcoord_const = _mesa_add_state_reference(params, texcoord_state);
- tokens = st_get_drawpix_shader(tgsi.tokens,
+ tokens = st_get_drawpix_shader(state.tokens,
st->needs_texcoord_semantic,
key->scaleAndBias, scale_const,
bias_const, key->pixelMaps,
texcoord_const, st->internal_target);
if (tokens) {
- if (tgsi.tokens != stfp->state.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else
fprintf(stderr, "mesa: cannot create a shader for glDrawPixels\n");
}
/* samplers inserted would conflict, but this should be unpossible: */
assert(!(key->bitmap || key->drawpixels));
- tokens = st_tgsi_lower_yuv(tgsi.tokens,
+ tokens = st_tgsi_lower_yuv(state.tokens,
~stfp->Base.SamplersUsed,
key->external.lower_nv12 ||
key->external.lower_xy_uxvx ||
key->external.lower_yx_xuxv,
key->external.lower_iyuv);
if (tokens) {
- if (tgsi.tokens != stfp->state.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
} else {
fprintf(stderr, "mesa: cannot create a shader for samplerExternalOES\n");
}
unsigned depth_range_const = _mesa_add_state_reference(params, depth_range_state);
const struct tgsi_token *tokens;
- tokens = st_tgsi_lower_depth_clamp_fs(tgsi.tokens, depth_range_const);
- if (tgsi.tokens != stfp->state.tokens)
- tgsi_free_tokens(tgsi.tokens);
- tgsi.tokens = tokens;
+ tokens = st_tgsi_lower_depth_clamp_fs(state.tokens, depth_range_const);
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
+ state.tokens = tokens;
}
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(tgsi.tokens, 0);
- debug_printf("\n");
- }
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
/* fill in variant */
- variant->driver_shader = pipe->create_fs_state(pipe, &tgsi);
+ variant->driver_shader = pipe->create_fs_state(pipe, &state);
variant->key = *key;
- if (tgsi.tokens != stfp->state.tokens)
- tgsi_free_tokens(tgsi.tokens);
+ if (state.tokens != stfp->state.tokens)
+ tgsi_free_tokens(state.tokens);
return variant;
}
*/
struct st_fp_variant *
st_get_fp_variant(struct st_context *st,
- struct st_fragment_program *stfp,
+ struct st_program *stfp,
const struct st_fp_variant_key *key)
{
struct st_fp_variant *fpv;
/* Search for existing variant */
- for (fpv = stfp->variants; fpv; fpv = fpv->next) {
+ for (fpv = stfp->fp_variants; fpv; fpv = fpv->next) {
if (memcmp(&fpv->key, key, sizeof(*key)) == 0) {
break;
}
* st_update_fp can take a fast path when
* shader_has_one_variant is set.
*/
- if (!stfp->variants) {
- stfp->variants = fpv;
+ if (!stfp->fp_variants) {
+ stfp->fp_variants = fpv;
} else {
/* insert into list after the first one */
- fpv->next = stfp->variants->next;
- stfp->variants->next = fpv;
+ fpv->next = stfp->fp_variants->next;
+ stfp->fp_variants->next = fpv;
}
} else {
/* insert into list */
- fpv->next = stfp->variants;
- stfp->variants = fpv;
+ fpv->next = stfp->fp_variants;
+ stfp->fp_variants = fpv;
}
}
}
*/
bool
st_translate_common_program(struct st_context *st,
- struct st_common_program *stcp)
+ struct st_program *stp)
{
- struct gl_program *prog = &stcp->Base;
+ struct gl_program *prog = &stp->Base;
enum pipe_shader_type stage =
- pipe_shader_type_from_mesa(stcp->Base.info.stage);
+ pipe_shader_type_from_mesa(stp->Base.info.stage);
struct ureg_program *ureg = ureg_create_with_screen(stage, st->pipe->screen);
if (ureg == NULL)
switch (stage) {
case PIPE_SHADER_TESS_CTRL:
ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
- stcp->Base.info.tess.tcs_vertices_out);
+ stp->Base.info.tess.tcs_vertices_out);
break;
case PIPE_SHADER_TESS_EVAL:
- if (stcp->Base.info.tess.primitive_mode == GL_ISOLINES)
+ if (stp->Base.info.tess.primitive_mode == GL_ISOLINES)
ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
else
ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
- stcp->Base.info.tess.primitive_mode);
+ stp->Base.info.tess.primitive_mode);
STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
PIPE_TESS_SPACING_FRACTIONAL_EVEN);
ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
- (stcp->Base.info.tess.spacing + 1) % 3);
+ (stp->Base.info.tess.spacing + 1) % 3);
ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
- !stcp->Base.info.tess.ccw);
+ !stp->Base.info.tess.ccw);
ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
- stcp->Base.info.tess.point_mode);
+ stp->Base.info.tess.point_mode);
break;
case PIPE_SHADER_GEOMETRY:
ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
- stcp->Base.info.gs.input_primitive);
+ stp->Base.info.gs.input_primitive);
ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
- stcp->Base.info.gs.output_primitive);
+ stp->Base.info.gs.output_primitive);
ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
- stcp->Base.info.gs.vertices_out);
+ stp->Base.info.gs.vertices_out);
ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
- stcp->Base.info.gs.invocations);
+ stp->Base.info.gs.invocations);
break;
default:
memset(inputSlotToAttr, 0, sizeof(inputSlotToAttr));
memset(inputMapping, 0, sizeof(inputMapping));
memset(outputMapping, 0, sizeof(outputMapping));
- memset(&stcp->state, 0, sizeof(stcp->state));
+ memset(&stp->state, 0, sizeof(stp->state));
if (prog->info.clip_distance_array_size)
ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
st_translate_program(st->ctx,
stage,
ureg,
- stcp->glsl_to_tgsi,
+ stp->glsl_to_tgsi,
prog,
/* inputs */
num_inputs,
output_semantic_name,
output_semantic_index);
- stcp->state.tokens = ureg_get_tokens(ureg, NULL);
+ stp->state.tokens = ureg_get_tokens(ureg, NULL);
ureg_destroy(ureg);
st_store_ir_in_disk_cache(st, prog, false);
- if ((ST_DEBUG & DEBUG_TGSI) && (ST_DEBUG & DEBUG_MESA)) {
+ if (ST_DEBUG & DEBUG_PRINT_IR && ST_DEBUG & DEBUG_MESA)
_mesa_print_program(prog);
- debug_printf("\n");
- }
-
- if (ST_DEBUG & DEBUG_TGSI) {
- tgsi_dump(stcp->state.tokens, 0);
- debug_printf("\n");
- }
- free_glsl_to_tgsi_visitor(stcp->glsl_to_tgsi);
- stcp->glsl_to_tgsi = NULL;
+ free_glsl_to_tgsi_visitor(stp->glsl_to_tgsi);
+ stp->glsl_to_tgsi = NULL;
return true;
}
*/
struct st_common_variant *
st_get_common_variant(struct st_context *st,
- struct st_common_program *prog,
+ struct st_program *prog,
const struct st_common_variant_key *key)
{
struct pipe_context *pipe = st->pipe;
struct st_common_variant *v;
- struct pipe_shader_state tgsi = {0};
+ struct pipe_shader_state state = {0};
/* Search for existing variant */
for (v = prog->variants; v; v = v->next) {
/* create new */
v = CALLOC_STRUCT(st_common_variant);
if (v) {
-
if (prog->state.type == PIPE_SHADER_IR_NIR) {
- tgsi.type = PIPE_SHADER_IR_NIR;
- tgsi.ir.nir = nir_shader_clone(NULL, prog->state.ir.nir);
+ bool finalize = false;
+
+ state.type = PIPE_SHADER_IR_NIR;
+ state.ir.nir = get_nir_shader(st, prog);
+
+ if (key->clamp_color) {
+ NIR_PASS_V(state.ir.nir, nir_lower_clamp_color_outputs);
+ finalize = true;
+ }
- if (key->clamp_color)
- NIR_PASS_V(tgsi.ir.nir, nir_lower_clamp_color_outputs);
+ state.stream_output = prog->state.stream_output;
- tgsi.stream_output = prog->state.stream_output;
+ if (finalize || !st->allow_st_finalize_nir_twice) {
+ st_finalize_nir(st, &prog->Base, prog->shader_program,
+ state.ir.nir, true);
+ }
- st_finalize_nir(st, &prog->Base, prog->shader_program,
- tgsi.ir.nir);
- } else {
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ nir_print_shader(state.ir.nir, stderr);
+ } else {
if (key->lower_depth_clamp) {
struct gl_program_parameter_list *params = prog->Base.Parameters;
prog->state.tokens = tokens;
}
- tgsi = prog->state;
+ state = prog->state;
+
+ if (ST_DEBUG & DEBUG_PRINT_IR)
+ tgsi_dump(state.tokens, 0);
}
/* fill in new variant */
switch (prog->Base.info.stage) {
case MESA_SHADER_TESS_CTRL:
- v->driver_shader = pipe->create_tcs_state(pipe, &tgsi);
+ v->driver_shader = pipe->create_tcs_state(pipe, &state);
break;
case MESA_SHADER_TESS_EVAL:
- v->driver_shader = pipe->create_tes_state(pipe, &tgsi);
+ v->driver_shader = pipe->create_tes_state(pipe, &state);
break;
case MESA_SHADER_GEOMETRY:
- v->driver_shader = pipe->create_gs_state(pipe, &tgsi);
+ v->driver_shader = pipe->create_gs_state(pipe, &state);
break;
case MESA_SHADER_COMPUTE: {
struct pipe_compute_state cs = {0};
- cs.ir_type = tgsi.type;
+ cs.ir_type = state.type;
cs.req_local_mem = prog->Base.info.cs.shared_size;
- if (tgsi.type == PIPE_SHADER_IR_NIR)
- cs.prog = tgsi.ir.nir;
+ if (state.type == PIPE_SHADER_IR_NIR)
+ cs.prog = state.ir.nir;
else
- cs.prog = tgsi.tokens;
+ cs.prog = state.tokens;
v->driver_shader = pipe->create_compute_state(pipe, &cs);
break;
switch (target->Target) {
case GL_VERTEX_PROGRAM_ARB:
{
- struct st_vertex_program *stvp = (struct st_vertex_program *) target;
- struct st_vp_variant *vpv, **prevPtr = &stvp->variants;
+ struct st_program *stvp = (struct st_program *) target;
+ struct st_vp_variant *vpv, **prevPtr = &stvp->vp_variants;
- for (vpv = stvp->variants; vpv; ) {
+ for (vpv = stvp->vp_variants; vpv; ) {
struct st_vp_variant *next = vpv->next;
if (vpv->key.st == st) {
/* unlink from list */
break;
case GL_FRAGMENT_PROGRAM_ARB:
{
- struct st_fragment_program *stfp =
- (struct st_fragment_program *) target;
- struct st_fp_variant *fpv, **prevPtr = &stfp->variants;
+ struct st_program *stfp =
+ (struct st_program *) target;
+ struct st_fp_variant *fpv, **prevPtr = &stfp->fp_variants;
- for (fpv = stfp->variants; fpv; ) {
+ for (fpv = stfp->fp_variants; fpv; ) {
struct st_fp_variant *next = fpv->next;
if (fpv->key.st == st) {
/* unlink from list */
case GL_TESS_EVALUATION_PROGRAM_NV:
case GL_COMPUTE_PROGRAM_NV:
{
- struct st_common_program *p = st_common_program(target);
+ struct st_program *p = st_program(target);
struct st_common_variant *v, **prevPtr = &p->variants;
for (v = p->variants; v; ) {
/* unlink from list */
*prevPtr = next;
/* destroy this variant */
- delete_basic_variant(st, v, target->Target);
+ delete_common_variant(st, v, target->Target);
}
else {
prevPtr = &v->next;
}
-/**
- * For debugging, print/dump the current vertex program.
- */
-void
-st_print_current_vertex_program(void)
-{
- GET_CURRENT_CONTEXT(ctx);
-
- if (ctx->VertexProgram._Current) {
- struct st_vertex_program *stvp =
- (struct st_vertex_program *) ctx->VertexProgram._Current;
- struct st_vp_variant *stv;
-
- debug_printf("Vertex program %u\n", stvp->Base.Id);
-
- for (stv = stvp->variants; stv; stv = stv->next) {
- debug_printf("variant %p\n", stv);
- tgsi_dump(stv->state.tokens, 0);
- }
- }
-}
-
-
/**
* Compile one shader variant.
*/
-void
+static void
st_precompile_shader_variant(struct st_context *st,
struct gl_program *prog)
{
switch (prog->Target) {
case GL_VERTEX_PROGRAM_ARB: {
- struct st_vertex_program *p = (struct st_vertex_program *)prog;
+ struct st_program *p = (struct st_program *)prog;
struct st_common_variant_key key;
memset(&key, 0, sizeof(key));
}
case GL_FRAGMENT_PROGRAM_ARB: {
- struct st_fragment_program *p = (struct st_fragment_program *)prog;
+ struct st_program *p = (struct st_program *)prog;
struct st_fp_variant_key key;
memset(&key, 0, sizeof(key));
case GL_TESS_EVALUATION_PROGRAM_NV:
case GL_GEOMETRY_PROGRAM_NV:
case GL_COMPUTE_PROGRAM_NV: {
- struct st_common_program *p = st_common_program(prog);
+ struct st_program *p = st_program(prog);
struct st_common_variant_key key;
memset(&key, 0, sizeof(key));
assert(0);
}
}
+
+void
+st_finalize_program(struct st_context *st, struct gl_program *prog)
+{
+ struct st_program *stp = (struct st_program *)prog;
+
+ if (st->current_program[prog->info.stage] == prog) {
+ if (prog->info.stage == MESA_SHADER_VERTEX)
+ st->dirty |= ST_NEW_VERTEX_PROGRAM(st, stp);
+ else
+ st->dirty |= stp->affected_states;
+ }
+
+ if (prog->nir)
+ nir_sweep(prog->nir);
+
+ /* Create Gallium shaders now instead of on demand. */
+ if (ST_DEBUG & DEBUG_PRECOMPILE ||
+ st->shader_has_one_variant[prog->info.stage])
+ st_precompile_shader_variant(st, prog);
+
+ /* Additional shader variants are always generated from serialized NIR
+ * to save memory.
+ */
+ if (prog->nir) {
+ /* Serialize NIR. */
+ struct blob blob;
+ blob_init(&blob);
+ nir_serialize(&blob, prog->nir, false);
+ stp->nir_binary = malloc(blob.size);
+ memcpy(stp->nir_binary, blob.data, blob.size);
+ stp->nir_size = blob.size;
+ blob_finish(&blob);
+
+ /* Free NIR. */
+ assert(stp->state.ir.nir == prog->nir);
+ ralloc_free(prog->nir);
+ prog->nir = NULL;
+ stp->state.ir.nir = NULL;
+ }
+}