st/mesa: merge st_fragment_program into st_common_program
[mesa.git] / src / mesa / state_tracker / st_glsl_to_nir.cpp
index 39203435515d968d8130a6f0498fb18c06bd0aa0..43459d46b5797fffb9f55b4ffe5056913b99d45b 100644 (file)
@@ -41,6 +41,7 @@
 #include "st_context.h"
 #include "st_glsl_types.h"
 #include "st_program.h"
+#include "st_shader_cache.h"
 
 #include "compiler/nir/nir.h"
 #include "compiler/glsl_types.h"
@@ -113,15 +114,59 @@ st_nir_assign_vs_in_locations(nir_shader *nir)
 static int
 st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
 {
+   struct gl_program_parameter_list *params = prog->Parameters;
+
    /* Lookup the first parameter that the uniform storage that match the
     * variable location.
     */
-   for (unsigned i = 0; i < prog->Parameters->NumParameters; i++) {
-      int index = prog->Parameters->Parameters[i].MainUniformStorageIndex;
+   for (unsigned i = 0; i < params->NumParameters; i++) {
+      int index = params->Parameters[i].MainUniformStorageIndex;
       if (index == var->data.location)
          return i;
    }
 
+   /* TODO: Handle this fallback for SPIR-V.  We need this for GLSL e.g. in
+    * dEQP-GLES2.functional.uniform_api.random.3
+    */
+
+   /* is there a better way to do this?  If we have something like:
+    *
+    *    struct S {
+    *           float f;
+    *           vec4 v;
+    *    };
+    *    uniform S color;
+    *
+    * Then what we get in prog->Parameters looks like:
+    *
+    *    0: Name=color.f, Type=6, DataType=1406, Size=1
+    *    1: Name=color.v, Type=6, DataType=8b52, Size=4
+    *
+    * So the name doesn't match up and _mesa_lookup_parameter_index()
+    * fails.  In this case just find the first matching "color.*"..
+    *
+    * Note for arrays you could end up w/ color[n].f, for example.
+    *
+    * glsl_to_tgsi works slightly differently in this regard.  It is
+    * emitting something more low level, so it just translates the
+    * params list 1:1 to CONST[] regs.  Going from GLSL IR to TGSI,
+    * it just calculates the additional offset of struct field members
+    * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
+    * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir).  It never
+    * needs to work backwards to get base var loc from the param-list
+    * which already has them separated out.
+    */
+   if (!prog->sh.data->spirv) {
+      int namelen = strlen(var->name);
+      for (unsigned i = 0; i < params->NumParameters; i++) {
+         struct gl_program_parameter *p = &params->Parameters[i];
+         if ((strncmp(p->Name, var->name, namelen) == 0) &&
+             ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
+            return i;
+         }
+      }
+   }
+
    return -1;
 }
 
@@ -189,7 +234,7 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
 }
 
 void
-st_nir_opts(nir_shader *nir, bool scalar)
+st_nir_opts(nir_shader *nir)
 {
    bool progress;
    unsigned lower_flrp =
@@ -215,7 +260,7 @@ st_nir_opts(nir_shader *nir, bool scalar)
       NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
       NIR_PASS(progress, nir, nir_opt_dead_write_vars);
 
-      if (scalar) {
+      if (nir->options->lower_to_scalar) {
          NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
          NIR_PASS_V(nir, nir_lower_phis_to_scalar);
       }
@@ -257,7 +302,7 @@ st_nir_opts(nir_shader *nir, bool scalar)
          lower_flrp = 0;
       }
 
-      NIR_PASS(progress, nir, gl_nir_opt_access);
+      NIR_PASS(progress, nir, nir_opt_access);
 
       NIR_PASS(progress, nir, nir_opt_undef);
       NIR_PASS(progress, nir, nir_opt_conditional_discard);
@@ -290,12 +335,7 @@ st_nir_preprocess(struct st_context *st, struct gl_program *prog,
 {
    const nir_shader_compiler_options *options =
       st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
-   enum pipe_shader_type type = pipe_shader_type_from_mesa(stage);
-   struct pipe_screen *screen = st->pipe->screen;
-   bool is_scalar = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA);
    assert(options);
-   bool lower_64bit =
-      options->lower_int64_options || options->lower_doubles_options;
    nir_shader *nir = prog->nir;
 
    /* Set the next shader stage hint for VS and TES. */
@@ -339,13 +379,12 @@ st_nir_preprocess(struct st_context *st, struct gl_program *prog,
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_lower_var_copies);
 
-   if (is_scalar) {
+   if (options->lower_to_scalar) {
      NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
    }
 
    /* before buffers and vars_to_ssa */
    NIR_PASS_V(nir, gl_nir_lower_bindless_images);
-   st_nir_opts(nir, is_scalar);
 
    /* TODO: Change GLSL to not lower shared memory. */
    if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
@@ -359,21 +398,6 @@ st_nir_preprocess(struct st_context *st, struct gl_program *prog,
    NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
    /* Do a round of constant folding to clean up address calculations */
    NIR_PASS_V(nir, nir_opt_constant_folding);
-
-   if (lower_64bit) {
-      bool lowered_64bit_ops = false;
-      if (options->lower_doubles_options) {
-         NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
-                  st->ctx->SoftFP64, options->lower_doubles_options);
-      }
-      if (options->lower_int64_options) {
-         NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64,
-                  options->lower_int64_options);
-      }
-
-      if (lowered_64bit_ops)
-         st_nir_opts(nir, is_scalar);
-   }
 }
 
 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
@@ -442,6 +466,23 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
    NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
    NIR_PASS_V(nir, nir_opt_intrinsics);
 
+   /* Lower 64-bit ops. */
+   if (nir->options->lower_int64_options ||
+       nir->options->lower_doubles_options) {
+      bool lowered_64bit_ops = false;
+      if (nir->options->lower_doubles_options) {
+         NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
+                  st->ctx->SoftFP64, nir->options->lower_doubles_options);
+      }
+      if (nir->options->lower_int64_options) {
+         NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64,
+                  nir->options->lower_int64_options);
+      }
+
+      if (lowered_64bit_ops)
+         st_nir_opts(nir);
+   }
+
    nir_variable_mode mask = nir_var_function_temp;
    nir_remove_dead_variables(nir, mask);
 
@@ -462,35 +503,23 @@ set_st_program(struct gl_program *prog,
 {
    struct st_vertex_program *stvp;
    struct st_common_program *stp;
-   struct st_fragment_program *stfp;
-   struct st_compute_program *stcp;
 
    switch (prog->info.stage) {
    case MESA_SHADER_VERTEX:
       stvp = (struct st_vertex_program *)prog;
       stvp->shader_program = shader_program;
-      stvp->tgsi.type = PIPE_SHADER_IR_NIR;
-      stvp->tgsi.ir.nir = nir;
+      stvp->state.type = PIPE_SHADER_IR_NIR;
+      stvp->state.ir.nir = nir;
       break;
    case MESA_SHADER_GEOMETRY:
    case MESA_SHADER_TESS_CTRL:
    case MESA_SHADER_TESS_EVAL:
+   case MESA_SHADER_COMPUTE:
+   case MESA_SHADER_FRAGMENT:
       stp = (struct st_common_program *)prog;
       stp->shader_program = shader_program;
-      stp->tgsi.type = PIPE_SHADER_IR_NIR;
-      stp->tgsi.ir.nir = nir;
-      break;
-   case MESA_SHADER_FRAGMENT:
-      stfp = (struct st_fragment_program *)prog;
-      stfp->shader_program = shader_program;
-      stfp->tgsi.type = PIPE_SHADER_IR_NIR;
-      stfp->tgsi.ir.nir = nir;
-      break;
-   case MESA_SHADER_COMPUTE:
-      stcp = (struct st_compute_program *)prog;
-      stcp->shader_program = shader_program;
-      stcp->tgsi.ir_type = PIPE_SHADER_IR_NIR;
-      stcp->tgsi.prog = nir;
+      stp->state.type = PIPE_SHADER_IR_NIR;
+      stp->state.ir.nir = nir;
       break;
    default:
       unreachable("unknown shader stage");
@@ -520,20 +549,20 @@ st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
 }
 
 static void
-st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar)
+st_nir_link_shaders(nir_shader **producer, nir_shader **consumer)
 {
-   if (scalar) {
+   if ((*producer)->options->lower_to_scalar) {
       NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
       NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
    }
 
    nir_lower_io_arrays_to_elements(*producer, *consumer);
 
-   st_nir_opts(*producer, scalar);
-   st_nir_opts(*consumer, scalar);
+   st_nir_opts(*producer);
+   st_nir_opts(*consumer);
 
    if (nir_link_opt_varyings(*producer, *consumer))
-      st_nir_opts(*consumer, scalar);
+      st_nir_opts(*consumer);
 
    NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
    NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
@@ -542,8 +571,8 @@ st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar)
       NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
       NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
 
-      st_nir_opts(*producer, scalar);
-      st_nir_opts(*consumer, scalar);
+      st_nir_opts(*producer);
+      st_nir_opts(*consumer);
 
       /* Optimizations can cause varyings to become unused.
        * nir_compact_varyings() depends on all dead varyings being removed so
@@ -617,7 +646,7 @@ st_link_nir(struct gl_context *ctx,
 {
    struct st_context *st = st_context(ctx);
    struct pipe_screen *screen = st->pipe->screen;
-   bool is_scalar[MESA_SHADER_STAGES];
+   unsigned num_linked_shaders = 0;
 
    unsigned last_stage = 0;
    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
@@ -625,20 +654,16 @@ st_link_nir(struct gl_context *ctx,
       if (shader == NULL)
          continue;
 
-      /* Determine scalar property of each shader stage */
-      enum pipe_shader_type type = pipe_shader_type_from_mesa(shader->Stage);
-      is_scalar[i] = screen->get_shader_param(screen, type,
-                                              PIPE_SHADER_CAP_SCALAR_ISA);
+      num_linked_shaders++;
 
+      const nir_shader_compiler_options *options =
+         st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
       struct gl_program *prog = shader->Program;
       _mesa_copy_linked_program_data(shader_program, shader);
 
       assert(!prog->nir);
 
       if (shader_program->data->spirv) {
-         const nir_shader_compiler_options *options =
-            st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
-
          prog->Parameters = _mesa_new_parameter_list();
          /* Parameters will be filled during NIR linking. */
 
@@ -667,10 +692,6 @@ st_link_nir(struct gl_context *ctx,
          prog->ExternalSamplersUsed = gl_external_samplers(prog);
          _mesa_update_shader_textures_used(shader_program, prog);
 
-         const nir_shader_compiler_options *options =
-            st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
-         assert(options);
-
          prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
          set_st_program(prog, shader_program, prog->nir);
          st_nir_preprocess(st, prog, shader_program, shader->Stage);
@@ -678,7 +699,7 @@ st_link_nir(struct gl_context *ctx,
 
       last_stage = i;
 
-      if (is_scalar[i]) {
+      if (options->lower_to_scalar) {
          NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
       }
    }
@@ -687,15 +708,12 @@ st_link_nir(struct gl_context *ctx,
     * st_nir_preprocess.
     */
    if (shader_program->data->spirv) {
-      if (!gl_nir_link_uniform_blocks(ctx, shader_program))
-         return GL_FALSE;
-
-      if (!gl_nir_link_uniforms(ctx, shader_program, /* fill_parameters */ true))
+      static const gl_nir_linker_options opts = {
+         true /*fill_parameters */
+      };
+      if (!gl_nir_link(ctx, shader_program, &opts))
          return GL_FALSE;
 
-      gl_nir_link_assign_atomic_counter_resources(ctx, shader_program);
-      gl_nir_link_assign_xfb_resources(ctx, shader_program);
-
       nir_build_program_resource_list(ctx, shader_program);
 
       for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
@@ -723,8 +741,7 @@ st_link_nir(struct gl_context *ctx,
          continue;
 
       st_nir_link_shaders(&shader->Program->nir,
-                          &shader_program->_LinkedShaders[next]->Program->nir,
-                          is_scalar[i]);
+                          &shader_program->_LinkedShaders[next]->Program->nir);
       next = i;
    }
 
@@ -736,6 +753,12 @@ st_link_nir(struct gl_context *ctx,
 
       nir_shader *nir = shader->Program->nir;
 
+      /* Linked shaders are optimized in st_nir_link_shaders. Separate shaders
+       * and shaders with a fixed-func VS or FS are optimized here.
+       */
+      if (num_linked_shaders == 1)
+         st_nir_opts(nir);
+
       NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
                  st->pipe->screen);
 
@@ -780,17 +803,29 @@ st_link_nir(struct gl_context *ctx,
       if (shader == NULL)
          continue;
 
-      st_glsl_to_nir_post_opts(st, shader->Program, shader_program);
+      struct gl_program *prog = shader->Program;
+      st_glsl_to_nir_post_opts(st, prog, shader_program);
+
+      /* Initialize st_vertex_program members. */
+      if (i == MESA_SHADER_VERTEX)
+         st_prepare_vertex_program(st_vertex_program(prog));
+
+      /* Get pipe_stream_output_info. */
+      if (i == MESA_SHADER_VERTEX ||
+          i == MESA_SHADER_TESS_EVAL ||
+          i == MESA_SHADER_GEOMETRY)
+         st_translate_stream_output_info(prog);
+
+      st_store_ir_in_disk_cache(st, prog, true);
 
-      assert(shader->Program);
       if (!ctx->Driver.ProgramStringNotify(ctx,
                                            _mesa_shader_stage_to_program(i),
-                                           shader->Program)) {
+                                           prog)) {
          _mesa_reference_program(ctx, &shader->Program, NULL);
          return false;
       }
 
-      nir_sweep(shader->Program->nir);
+      nir_sweep(prog->nir);
 
       /* The GLSL IR won't be needed anymore. */
       ralloc_free(shader->ir);