X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fstate_tracker%2Fst_glsl_to_nir.cpp;h=4a772e654210a48b96e1b4ac89392cd5607ae293;hb=a9ac01b96f265af10fe4de58699c75ec6e0b6238;hp=d5309e49fab448a7f33ae2d39c2916729b9facd9;hpb=937523971f42f37b40badb962e575ecd8258b2d5;p=mesa.git diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp index d5309e49fab..4a772e65421 100644 --- a/src/mesa/state_tracker/st_glsl_to_nir.cpp +++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp @@ -35,18 +35,23 @@ #include "main/errors.h" #include "main/shaderapi.h" #include "main/uniforms.h" -#include "util/string_to_uint_map.h" #include "st_context.h" #include "st_program.h" -#include "st_glsl_types.h" #include "compiler/nir/nir.h" #include "compiler/glsl_types.h" #include "compiler/glsl/glsl_to_nir.h" #include "compiler/glsl/ir.h" +#include "compiler/glsl/string_to_uint_map.h" +static int +type_size(const struct glsl_type *type) +{ + return type->count_attribute_slots(false); +} + /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we * may need to fix up varying slots so the glsl->nir path is aligned * with the anything->tgsi->nir path. @@ -92,6 +97,9 @@ st_nir_assign_vs_in_locations(struct gl_program *prog, nir_shader *nir) } } + /* bit of a hack, mirroring st_translate_vertex_program */ + input_to_index[VERT_ATTRIB_EDGEFLAG] = num_inputs; + nir->num_inputs = 0; nir_foreach_variable_safe(var, &nir->inputs) { attr = var->data.location; @@ -113,6 +121,34 @@ st_nir_assign_vs_in_locations(struct gl_program *prog, nir_shader *nir) } } +static void +st_nir_assign_var_locations(struct exec_list *var_list, unsigned *size) +{ + unsigned location = 0; + unsigned assigned_locations[VARYING_SLOT_MAX]; + uint64_t processed_locs = 0; + + nir_foreach_variable(var, var_list) { + /* Because component packing allows varyings to share the same location + * we may have already have processed this location. + */ + if (var->data.location >= VARYING_SLOT_VAR0 && + processed_locs & ((uint64_t)1 << var->data.location)) { + var->data.driver_location = assigned_locations[var->data.location]; + *size += type_size(var->type); + continue; + } + + assigned_locations[var->data.location] = location; + var->data.driver_location = location; + location += type_size(var->type); + + processed_locs |= ((uint64_t)1 << var->data.location); + } + + *size += location; +} + static int st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params, const char *name) @@ -168,6 +204,7 @@ st_nir_assign_uniform_locations(struct gl_program *prog, { int max = 0; int shaderidx = 0; + int imageidx = 0; nir_foreach_variable(uniform, uniform_list) { int loc; @@ -180,11 +217,15 @@ st_nir_assign_uniform_locations(struct gl_program *prog, uniform->interface_type != NULL) continue; - if (uniform->type->is_sampler()) { - unsigned val; + if (uniform->type->is_sampler() || uniform->type->is_image()) { + unsigned val = 0; bool found = shader_program->UniformHash->get(val, uniform->name); - loc = shaderidx++; + if (uniform->type->is_sampler()) + loc = shaderidx++; + else + loc = imageidx++; assert(found); + (void) found; /* silence unused var warning */ /* this ensure that nir_lower_samplers looks at the correct * shader_program->UniformStorage[location]: */ @@ -201,26 +242,23 @@ st_nir_assign_uniform_locations(struct gl_program *prog, uniform->data.driver_location = loc; - max = MAX2(max, loc + st_glsl_type_size(uniform->type)); + max = MAX2(max, loc + type_size(uniform->type)); } *size = max; } -extern "C" { - -/* First half of converting glsl_to_nir.. this leaves things in a pre- +/* First third of converting glsl_to_nir.. this leaves things in a pre- * nir_lower_io state, so that shader variants can more easily insert/ * replace variables, etc. */ -nir_shader * +static nir_shader * st_glsl_to_nir(struct st_context *st, struct gl_program *prog, struct gl_shader_program *shader_program, gl_shader_stage stage) { struct pipe_screen *pscreen = st->pipe->screen; - enum pipe_shader_type ptarget = st_shader_stage_to_ptarget(stage); + enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage); const nir_shader_compiler_options *options; - nir_shader *nir; assert(pscreen->get_compiler_options); /* drivers using NIR must implement this */ @@ -231,8 +269,48 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog, if (prog->nir) return prog->nir; - nir = glsl_to_nir(shader_program, stage, options); - prog->nir = nir; + return glsl_to_nir(shader_program, stage, options); +} + +/* Second third of converting glsl_to_nir. This creates uniforms, gathers + * info on varyings, etc after NIR link time opts have been applied. + */ +static void +st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog, + struct gl_shader_program *shader_program) +{ + nir_shader *nir = prog->nir; + + /* Make a pass over the IR to add state references for any built-in + * uniforms that are used. This has to be done now (during linking). + * Code generation doesn't happen until the first time this shader is + * used for rendering. Waiting until then to generate the parameters is + * too late. At that point, the values for the built-in uniforms won't + * get sent to the shader. + */ + nir_foreach_variable(var, &nir->uniforms) { + if (strncmp(var->name, "gl_", 3) == 0) { + const nir_state_slot *const slots = var->state_slots; + assert(var->state_slots != NULL); + + for (unsigned int i = 0; i < var->num_state_slots; i++) { + _mesa_add_state_reference(prog->Parameters, + (gl_state_index *)slots[i].tokens); + } + } + } + + /* Avoid reallocation of the program parameter list, because the uniform + * storage is only associated with the original parameter list. + * This should be enough for Bitmap and DrawPixels constants. + */ + _mesa_reserve_parameter_storage(prog->Parameters, 8); + + /* This has to be done last. Any operation the can cause + * prog->ParameterValues to get reallocated (e.g., anything that adds a + * program constant) has to happen before creating this linkage. + */ + _mesa_associate_uniform_storage(st->ctx, shader_program, prog, true); NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), @@ -240,14 +318,13 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog, NIR_PASS_V(nir, nir_lower_global_vars_to_local); NIR_PASS_V(nir, nir_split_var_copies); NIR_PASS_V(nir, nir_lower_var_copies); - NIR_PASS_V(nir, st_nir_lower_builtin); /* fragment shaders may need : */ - if (stage == MESA_SHADER_FRAGMENT) { + if (prog->info.stage == MESA_SHADER_FRAGMENT) { static const gl_state_index wposTransformState[STATE_LENGTH] = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM }; - nir_lower_wpos_ytransform_options wpos_options = {0}; + nir_lower_wpos_ytransform_options wpos_options = { { 0 } }; struct pipe_screen *pscreen = st->pipe->screen; memcpy(wpos_options.state_tokens, wposTransformState, @@ -267,16 +344,24 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog, } } + NIR_PASS_V(nir, nir_lower_system_values); + + nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)); + prog->info = nir->info; + + st_set_prog_affected_state_flags(prog); + + NIR_PASS_V(nir, st_nir_lower_builtin); + NIR_PASS_V(nir, nir_lower_atomics, shader_program); + if (st->ctx->_Shader->Flags & GLSL_DUMP) { _mesa_log("\n"); _mesa_log("NIR IR for linked %s program %d:\n", - _mesa_shader_stage_to_string(stage), + _mesa_shader_stage_to_string(prog->info.stage), shader_program->Name); nir_print_shader(nir, _mesa_get_log_file()); _mesa_log("\n\n"); } - - return nir; } /* TODO any better helper somewhere to sort a list? */ @@ -305,67 +390,54 @@ sort_varyings(struct exec_list *var_list) exec_list_move_nodes_to(&new_list, var_list); } -/* Second half of preparing nir from glsl, which happens after shader - * variant lowering. - */ -void -st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir) +static void +set_st_program(struct gl_program *prog, + struct gl_shader_program *shader_program, + nir_shader *nir) { - NIR_PASS_V(nir, nir_split_var_copies); - NIR_PASS_V(nir, nir_lower_var_copies); - NIR_PASS_V(nir, nir_lower_io_types); - - if (nir->stage == MESA_SHADER_VERTEX) { - /* Needs special handling so drvloc matches the vbo state: */ - st_nir_assign_vs_in_locations(prog, nir); - /* Re-lower global vars, to deal with any dead VS inputs. */ - NIR_PASS_V(nir, nir_lower_global_vars_to_local); - - sort_varyings(&nir->outputs); - nir_assign_var_locations(&nir->outputs, - &nir->num_outputs, - st_glsl_type_size); - st_nir_fixup_varying_slots(st, &nir->outputs); - } else if (nir->stage == MESA_SHADER_FRAGMENT) { - sort_varyings(&nir->inputs); - nir_assign_var_locations(&nir->inputs, - &nir->num_inputs, - st_glsl_type_size); - st_nir_fixup_varying_slots(st, &nir->inputs); - nir_assign_var_locations(&nir->outputs, - &nir->num_outputs, - st_glsl_type_size); - } else { - unreachable("invalid shader type for tgsi bypass\n"); - } + struct st_vertex_program *stvp; + struct st_common_program *stp; + struct st_fragment_program *stfp; + struct st_compute_program *stcp; - struct gl_shader_program *shader_program; - switch (nir->stage) { + switch (prog->info.stage) { case MESA_SHADER_VERTEX: - shader_program = ((struct st_vertex_program *)prog)->shader_program; + stvp = (struct st_vertex_program *)prog; + stvp->shader_program = shader_program; + stvp->tgsi.type = PIPE_SHADER_IR_NIR; + stvp->tgsi.ir.nir = nir; + break; + case MESA_SHADER_GEOMETRY: + case MESA_SHADER_TESS_CTRL: + case MESA_SHADER_TESS_EVAL: + stp = (struct st_common_program *)prog; + stp->shader_program = shader_program; + stp->tgsi.type = PIPE_SHADER_IR_NIR; + stp->tgsi.ir.nir = nir; break; case MESA_SHADER_FRAGMENT: - shader_program = ((struct st_fragment_program *)prog)->shader_program; + stfp = (struct st_fragment_program *)prog; + stfp->shader_program = shader_program; + stfp->tgsi.type = PIPE_SHADER_IR_NIR; + stfp->tgsi.ir.nir = nir; + break; + case MESA_SHADER_COMPUTE: + stcp = (struct st_compute_program *)prog; + stcp->shader_program = shader_program; + stcp->tgsi.ir_type = PIPE_SHADER_IR_NIR; + stcp->tgsi.prog = nir_shader_clone(NULL, nir); break; default: - assert(!"should not be reached"); - return; + unreachable("unknown shader stage"); } - - st_nir_assign_uniform_locations(prog, shader_program, - &nir->uniforms, &nir->num_uniforms); - - NIR_PASS_V(nir, nir_lower_system_values); - NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size, - (nir_lower_io_options)0); - NIR_PASS_V(nir, nir_lower_samplers, shader_program); } -struct gl_program * +static void st_nir_get_mesa_program(struct gl_context *ctx, struct gl_shader_program *shader_program, struct gl_linked_shader *shader) { + struct st_context *st = st_context(ctx); struct gl_program *prog; validate_ir_tree(shader->ir); @@ -374,35 +446,10 @@ st_nir_get_mesa_program(struct gl_context *ctx, prog->Parameters = _mesa_new_parameter_list(); - do_set_program_inouts(shader->ir, prog, shader->Stage); - _mesa_copy_linked_program_data(shader_program, shader); - _mesa_generate_parameters_list_for_uniforms(shader_program, shader, + _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader, prog->Parameters); - /* Make a pass over the IR to add state references for any built-in - * uniforms that are used. This has to be done now (during linking). - * Code generation doesn't happen until the first time this shader is - * used for rendering. Waiting until then to generate the parameters is - * too late. At that point, the values for the built-in uniforms won't - * get sent to the shader. - */ - foreach_in_list(ir_instruction, node, shader->ir) { - ir_variable *var = node->as_variable(); - - if ((var == NULL) || (var->data.mode != ir_var_uniform) || - (strncmp(var->name, "gl_", 3) != 0)) - continue; - - const ir_state_slot *const slots = var->get_state_slots(); - assert(slots != NULL); - - for (unsigned int i = 0; i < var->get_num_state_slots(); i++) { - _mesa_add_state_reference(prog->Parameters, - (gl_state_index *) slots[i].tokens); - } - } - if (ctx->_Shader->Flags & GLSL_DUMP) { _mesa_log("\n"); _mesa_log("GLSL IR for linked %s program %d:\n", @@ -412,40 +459,96 @@ st_nir_get_mesa_program(struct gl_context *ctx, _mesa_log("\n\n"); } - prog->ShadowSamplers = shader->shadow_samplers; - prog->ExternalSamplersUsed = gl_external_samplers(shader); + prog->ExternalSamplersUsed = gl_external_samplers(prog); _mesa_update_shader_textures_used(shader_program, prog); - /* Avoid reallocation of the program parameter list, because the uniform - * storage is only associated with the original parameter list. - * This should be enough for Bitmap and DrawPixels constants. - */ - _mesa_reserve_parameter_storage(prog->Parameters, 8); + nir_shader *nir = st_glsl_to_nir(st, prog, shader_program, shader->Stage); - /* This has to be done last. Any operation the can cause - * prog->ParameterValues to get reallocated (e.g., anything that adds a - * program constant) has to happen before creating this linkage. - */ - _mesa_associate_uniform_storage(ctx, shader_program, prog->Parameters); + set_st_program(prog, shader_program, nir); + prog->nir = nir; +} - struct st_vertex_program *stvp; - struct st_fragment_program *stfp; +extern "C" { - switch (shader->Stage) { - case MESA_SHADER_VERTEX: - stvp = (struct st_vertex_program *)prog; - stvp->shader_program = shader_program; - break; - case MESA_SHADER_FRAGMENT: - stfp = (struct st_fragment_program *)prog; - stfp->shader_program = shader_program; - break; - default: - assert(!"should not be reached"); - return NULL; +bool +st_link_nir(struct gl_context *ctx, + struct gl_shader_program *shader_program) +{ + struct st_context *st = st_context(ctx); + + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + struct gl_linked_shader *shader = shader_program->_LinkedShaders[i]; + if (shader == NULL) + continue; + + st_nir_get_mesa_program(ctx, shader_program, shader); + } + + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + struct gl_linked_shader *shader = shader_program->_LinkedShaders[i]; + if (shader == NULL) + continue; + + st_glsl_to_nir_post_opts(st, shader->Program, shader_program); + + assert(shader->Program); + if (!ctx->Driver.ProgramStringNotify(ctx, + _mesa_shader_stage_to_program(i), + shader->Program)) { + _mesa_reference_program(ctx, &shader->Program, NULL); + return false; + } + } + + return true; +} + +/* Last third of preparing nir from glsl, which happens after shader + * variant lowering. + */ +void +st_finalize_nir(struct st_context *st, struct gl_program *prog, + struct gl_shader_program *shader_program, nir_shader *nir) +{ + struct pipe_screen *screen = st->pipe->screen; + + NIR_PASS_V(nir, nir_split_var_copies); + NIR_PASS_V(nir, nir_lower_var_copies); + NIR_PASS_V(nir, nir_lower_io_types); + + if (nir->info.stage == MESA_SHADER_VERTEX) { + /* Needs special handling so drvloc matches the vbo state: */ + st_nir_assign_vs_in_locations(prog, nir); + /* Re-lower global vars, to deal with any dead VS inputs. */ + NIR_PASS_V(nir, nir_lower_global_vars_to_local); + + sort_varyings(&nir->outputs); + st_nir_assign_var_locations(&nir->outputs, + &nir->num_outputs); + st_nir_fixup_varying_slots(st, &nir->outputs); + } else if (nir->info.stage == MESA_SHADER_FRAGMENT) { + sort_varyings(&nir->inputs); + st_nir_assign_var_locations(&nir->inputs, + &nir->num_inputs); + st_nir_fixup_varying_slots(st, &nir->inputs); + st_nir_assign_var_locations(&nir->outputs, + &nir->num_outputs); + } else if (nir->info.stage == MESA_SHADER_COMPUTE) { + /* TODO? */ + } else { + unreachable("invalid shader type for tgsi bypass\n"); } - return prog; + NIR_PASS_V(nir, nir_lower_atomics_to_ssbo, + st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers); + + st_nir_assign_uniform_locations(prog, shader_program, + &nir->uniforms, &nir->num_uniforms); + + if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF)) + NIR_PASS_V(nir, nir_lower_samplers_as_deref, shader_program); + else + NIR_PASS_V(nir, nir_lower_samplers, shader_program); } } /* extern "C" */