st/mesa: Fix inverted polygon stipple condition
[mesa.git] / src / mesa / state_tracker / st_glsl_to_nir.cpp
index f609efbff959fdeac2469e2b8ee50277e355309d..d658a648b15d0332505cd4f15eb94a9c1cde0290 100644 (file)
 #include "program/ir_to_mesa.h"
 #include "main/mtypes.h"
 #include "main/errors.h"
+#include "main/glspirv.h"
 #include "main/shaderapi.h"
 #include "main/uniforms.h"
 
+#include "main/shaderobj.h"
 #include "st_context.h"
 #include "st_glsl_types.h"
 #include "st_program.h"
 #include "compiler/glsl_types.h"
 #include "compiler/glsl/glsl_to_nir.h"
 #include "compiler/glsl/gl_nir.h"
+#include "compiler/glsl/gl_nir_linker.h"
 #include "compiler/glsl/ir.h"
 #include "compiler/glsl/ir_optimization.h"
 #include "compiler/glsl/string_to_uint_map.h"
 
-
 static int
 type_size(const struct glsl_type *type)
 {
@@ -81,21 +83,20 @@ st_nir_fixup_varying_slots(struct st_context *st, struct exec_list *var_list)
  * on varying-slot w/ the VS outputs)
  */
 static void
-st_nir_assign_vs_in_locations(struct gl_program *prog, nir_shader *nir)
+st_nir_assign_vs_in_locations(nir_shader *nir)
 {
-   nir->num_inputs = 0;
+   nir->num_inputs = util_bitcount64(nir->info.inputs_read);
    nir_foreach_variable_safe(var, &nir->inputs) {
       /* NIR already assigns dual-slot inputs to two locations so all we have
        * to do is compact everything down.
        */
       if (var->data.location == VERT_ATTRIB_EDGEFLAG) {
          /* bit of a hack, mirroring st_translate_vertex_program */
-         var->data.driver_location = util_bitcount64(nir->info.inputs_read);
+         var->data.driver_location = nir->num_inputs++;
       } else if (nir->info.inputs_read & BITFIELD64_BIT(var->data.location)) {
          var->data.driver_location =
             util_bitcount64(nir->info.inputs_read &
                               BITFIELD64_MASK(var->data.location));
-         nir->num_inputs++;
       } else {
          /* Move unused input variables to the globals list (with no
           * initialization), to avoid confusing drivers looking through the
@@ -103,97 +104,29 @@ st_nir_assign_vs_in_locations(struct gl_program *prog, nir_shader *nir)
           * set.
           */
          exec_node_remove(&var->node);
-         var->data.mode = nir_var_global;
+         var->data.mode = nir_var_shader_temp;
          exec_list_push_tail(&nir->globals, &var->node);
       }
    }
 }
 
-static void
-st_nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
-                            gl_shader_stage stage)
+static int
+st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
 {
-   unsigned location = 0;
-   unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
-   uint64_t processed_locs[2] = {0};
-
-   const int base = stage == MESA_SHADER_FRAGMENT ?
-      (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
-
-   int UNUSED last_loc = 0;
-   nir_foreach_variable(var, var_list) {
-
-      const struct glsl_type *type = var->type;
-      if (nir_is_per_vertex_io(var, stage)) {
-         assert(glsl_type_is_array(type));
-         type = glsl_get_array_element(type);
-      }
-
-      unsigned var_size = type_size(type);
-
-      /* Builtins don't allow component packing so we only need to worry about
-       * user defined varyings sharing the same location.
-       */
-      bool processed = false;
-      if (var->data.location >= base) {
-         unsigned glsl_location = var->data.location - base;
-
-         for (unsigned i = 0; i < var_size; i++) {
-            if (processed_locs[var->data.index] &
-                ((uint64_t)1 << (glsl_location + i)))
-               processed = true;
-            else
-               processed_locs[var->data.index] |=
-                  ((uint64_t)1 << (glsl_location + i));
-         }
-      }
+   struct gl_program_parameter_list *params = prog->Parameters;
 
-      /* Because component packing allows varyings to share the same location
-       * we may have already have processed this location.
-       */
-      if (processed) {
-         unsigned driver_location = assigned_locations[var->data.location];
-         var->data.driver_location = driver_location;
-         *size += type_size(type);
-
-         /* An array may be packed such that is crosses multiple other arrays
-          * or variables, we need to make sure we have allocated the elements
-          * consecutively if the previously proccessed var was shorter than
-          * the current array we are processing.
-          *
-          * NOTE: The code below assumes the var list is ordered in ascending
-          * location order.
-          */
-         assert(last_loc <= var->data.location);
-         last_loc = var->data.location;
-         unsigned last_slot_location = driver_location + var_size;
-         if (last_slot_location > location) {
-            unsigned num_unallocated_slots = last_slot_location - location;
-            unsigned first_unallocated_slot = var_size - num_unallocated_slots;
-            for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
-               assigned_locations[var->data.location + i] = location;
-               location++;
-            }
-         }
-         continue;
-      }
-
-      for (unsigned i = 0; i < var_size; i++) {
-         assigned_locations[var->data.location + i] = location + i;
-      }
-
-      var->data.driver_location = location;
-      location += var_size;
+   /* Lookup the first parameter that the uniform storage that match the
+    * variable location.
+    */
+   for (unsigned i = 0; i < params->NumParameters; i++) {
+      int index = params->Parameters[i].MainUniformStorageIndex;
+      if (index == var->data.location)
+         return i;
    }
 
-   *size += location;
-}
-
-static int
-st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
-                              const char *name)
-{
-   int loc = _mesa_lookup_parameter_index(params, name);
+   /* TODO: Handle this fallback for SPIR-V.  We need this for GLSL e.g. in
+    * dEQP-GLES2.functional.uniform_api.random.3
+    */
 
    /* is there a better way to do this?  If we have something like:
     *
@@ -222,28 +155,25 @@ st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
     * needs to work backwards to get base var loc from the param-list
     * which already has them separated out.
     */
-   if (loc < 0) {
-      int namelen = strlen(name);
+   if (!prog->sh.data->spirv) {
+      int namelen = strlen(var->name);
       for (unsigned i = 0; i < params->NumParameters; i++) {
          struct gl_program_parameter *p = &params->Parameters[i];
-         if ((strncmp(p->Name, name, namelen) == 0) &&
+         if ((strncmp(p->Name, var->name, namelen) == 0) &&
              ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
-            loc = i;
-            break;
+            return i;
          }
       }
    }
 
-   return loc;
+   return -1;
 }
 
 static void
 st_nir_assign_uniform_locations(struct gl_context *ctx,
                                 struct gl_program *prog,
-                                struct gl_shader_program *shader_program,
-                                struct exec_list *uniform_list, unsigned *size)
+                                struct exec_list *uniform_list)
 {
-   int max = 0;
    int shaderidx = 0;
    int imageidx = 0;
 
@@ -254,8 +184,7 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
        * UBO's have their own address spaces, so don't count them towards the
        * number of global uniforms
        */
-      if ((uniform->data.mode == nir_var_uniform || uniform->data.mode == nir_var_shader_storage) &&
-          uniform->interface_type != NULL)
+      if (uniform->data.mode == nir_var_mem_ubo || uniform->data.mode == nir_var_mem_ssbo)
          continue;
 
       const struct glsl_type *type = glsl_without_array(uniform->type);
@@ -267,14 +196,14 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
             loc = imageidx;
             imageidx += type_size(uniform->type);
          }
-      } else if (strncmp(uniform->name, "gl_", 3) == 0) {
+      } else if (uniform->state_slots) {
          const gl_state_index16 *const stateTokens = uniform->state_slots[0].tokens;
          /* This state reference has already been setup by ir_to_mesa, but we'll
           * get the same index back here.
           */
 
          unsigned comps;
-         if (glsl_type_is_struct(type)) {
+         if (glsl_type_is_struct_or_ifc(type)) {
             comps = 4;
          } else {
             comps = glsl_get_vector_elements(type);
@@ -288,31 +217,50 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
             loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
          }
       } else {
-         loc = st_nir_lookup_parameter_index(prog->Parameters, uniform->name);
+         loc = st_nir_lookup_parameter_index(prog, uniform);
 
-         if (ctx->Const.PackedDriverUniformStorage) {
+         /* We need to check that loc is not -1 here before accessing the
+          * array. It can be negative for example when we have a struct that
+          * only contains opaque types.
+          */
+         if (loc >= 0 && ctx->Const.PackedDriverUniformStorage) {
             loc = prog->Parameters->ParameterValueOffset[loc];
          }
       }
 
       uniform->data.driver_location = loc;
-
-      max = MAX2(max, loc + type_size(uniform->type));
    }
-   *size = max;
 }
 
 void
 st_nir_opts(nir_shader *nir, bool scalar)
 {
    bool progress;
+   unsigned lower_flrp =
+      (nir->options->lower_flrp16 ? 16 : 0) |
+      (nir->options->lower_flrp32 ? 32 : 0) |
+      (nir->options->lower_flrp64 ? 64 : 0);
+
    do {
       progress = false;
 
       NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+      
+      /* Linking deals with unused inputs/outputs, but here we can remove
+       * things local to the shader in the hopes that we can cleanup other
+       * things. This pass will also remove variables with only stores, so we
+       * might be able to make progress after it.
+       */
+      NIR_PASS(progress, nir, nir_remove_dead_variables,
+               (nir_variable_mode)(nir_var_function_temp |
+                                   nir_var_shader_temp |
+                                   nir_var_mem_shared));
+
+      NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
+      NIR_PASS(progress, nir, nir_opt_dead_write_vars);
 
       if (scalar) {
-         NIR_PASS_V(nir, nir_lower_alu_to_scalar);
+         NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
          NIR_PASS_V(nir, nir_lower_phis_to_scalar);
       }
 
@@ -326,14 +274,35 @@ st_nir_opts(nir_shader *nir, bool scalar)
          NIR_PASS(progress, nir, nir_copy_prop);
          NIR_PASS(progress, nir, nir_opt_dce);
       }
-      NIR_PASS(progress, nir, nir_opt_if);
+      NIR_PASS(progress, nir, nir_opt_if, false);
       NIR_PASS(progress, nir, nir_opt_dead_cf);
       NIR_PASS(progress, nir, nir_opt_cse);
-      NIR_PASS(progress, nir, nir_opt_peephole_select, 8);
+      NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
 
       NIR_PASS(progress, nir, nir_opt_algebraic);
       NIR_PASS(progress, nir, nir_opt_constant_folding);
 
+      if (lower_flrp != 0) {
+         bool lower_flrp_progress = false;
+
+         NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
+                  lower_flrp,
+                  false /* always_precise */,
+                  nir->options->lower_ffma);
+         if (lower_flrp_progress) {
+            NIR_PASS(progress, nir,
+                     nir_opt_constant_folding);
+            progress = true;
+         }
+
+         /* Nothing should rematerialize any flrps, so we only need to do this
+          * lowering once.
+          */
+         lower_flrp = 0;
+      }
+
+      NIR_PASS(progress, nir, gl_nir_opt_access);
+
       NIR_PASS(progress, nir, nir_opt_undef);
       NIR_PASS(progress, nir, nir_opt_conditional_discard);
       if (nir->options->max_unroll_iterations) {
@@ -342,14 +311,26 @@ st_nir_opts(nir_shader *nir, bool scalar)
    } while (progress);
 }
 
+static void
+shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+   assert(glsl_type_is_vector_or_scalar(type));
+
+   uint32_t comp_size = glsl_type_is_boolean(type)
+      ? 4 : glsl_get_bit_size(type) / 8;
+   unsigned length = glsl_get_vector_elements(type);
+   *size = comp_size * length,
+   *align = comp_size * (length == 3 ? 4 : length);
+}
+
 /* First third of converting glsl_to_nir.. this leaves things in a pre-
  * nir_lower_io state, so that shader variants can more easily insert/
  * replace variables, etc.
  */
-static nir_shader *
-st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
-               struct gl_shader_program *shader_program,
-               gl_shader_stage stage)
+static void
+st_nir_preprocess(struct st_context *st, struct gl_program *prog,
+                  struct gl_shader_program *shader_program,
+                  gl_shader_stage stage)
 {
    const nir_shader_compiler_options *options =
       st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
@@ -357,11 +338,9 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
    struct pipe_screen *screen = st->pipe->screen;
    bool is_scalar = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA);
    assert(options);
-
-   if (prog->nir)
-      return prog->nir;
-
-   nir_shader *nir = glsl_to_nir(shader_program, stage, options);
+   bool lower_64bit =
+      options->lower_int64_options || options->lower_doubles_options;
+   nir_shader *nir = prog->nir;
 
    /* Set the next shader stage hint for VS and TES. */
    if (!nir->info.separate_shader &&
@@ -378,6 +357,12 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
       nir->info.next_stage = MESA_SHADER_FRAGMENT;
    }
 
+   nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+   if (!st->ctx->SoftFP64 && nir->info.uses_64bit &&
+       (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
+      st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
+   }
+
    nir_variable_mode mask =
       (nir_variable_mode) (nir_var_shader_in | nir_var_shader_out);
    nir_remove_dead_variables(nir, mask);
@@ -398,9 +383,41 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_lower_var_copies);
 
+   if (is_scalar) {
+     NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
+   }
+
+   /* before buffers and vars_to_ssa */
+   NIR_PASS_V(nir, gl_nir_lower_bindless_images);
    st_nir_opts(nir, is_scalar);
 
-   return nir;
+   /* TODO: Change GLSL to not lower shared memory. */
+   if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
+       shader_program->data->spirv) {
+      NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
+                 nir_var_mem_shared, shared_type_info);
+      NIR_PASS_V(prog->nir, nir_lower_explicit_io,
+                 nir_var_mem_shared, nir_address_format_32bit_offset);
+   }
+
+   NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
+   /* Do a round of constant folding to clean up address calculations */
+   NIR_PASS_V(nir, nir_opt_constant_folding);
+
+   if (lower_64bit) {
+      bool lowered_64bit_ops = false;
+      if (options->lower_doubles_options) {
+         NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
+                  st->ctx->SoftFP64, options->lower_doubles_options);
+      }
+      if (options->lower_int64_options) {
+         NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64,
+                  options->lower_int64_options);
+      }
+
+      if (lowered_64bit_ops)
+         st_nir_opts(nir, is_scalar);
+   }
 }
 
 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
@@ -420,14 +437,12 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
     * get sent to the shader.
     */
    nir_foreach_variable(var, &nir->uniforms) {
-      if (strncmp(var->name, "gl_", 3) == 0) {
-         const nir_state_slot *const slots = var->state_slots;
-         assert(var->state_slots != NULL);
-
+      const nir_state_slot *const slots = var->state_slots;
+      if (slots != NULL) {
          const struct glsl_type *type = glsl_without_array(var->type);
          for (unsigned int i = 0; i < var->num_state_slots; i++) {
             unsigned comps;
-            if (glsl_type_is_struct(type)) {
+            if (glsl_type_is_struct_or_ifc(type)) {
                /* Builtin struct require specical handling for now we just
                 * make all members vec4. See st_nir_lower_builtin.
                 */
@@ -458,12 +473,21 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
     * prog->ParameterValues to get reallocated (e.g., anything that adds a
     * program constant) has to happen before creating this linkage.
     */
-   _mesa_associate_uniform_storage(st->ctx, shader_program, prog, true);
+   _mesa_associate_uniform_storage(st->ctx, shader_program, prog);
 
    st_set_prog_affected_state_flags(prog);
 
-   NIR_PASS_V(nir, st_nir_lower_builtin);
+   /* None of the builtins being lowered here can be produced by SPIR-V.  See
+    * _mesa_builtin_uniform_desc.
+    */
+   if (!shader_program->data->spirv)
+      NIR_PASS_V(nir, st_nir_lower_builtin);
+
    NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
+   NIR_PASS_V(nir, nir_opt_intrinsics);
+
+   nir_variable_mode mask = nir_var_function_temp;
+   nir_remove_dead_variables(nir, mask);
 
    if (st->ctx->_Shader->Flags & GLSL_DUMP) {
       _mesa_log("\n");
@@ -475,32 +499,6 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
    }
 }
 
-/* TODO any better helper somewhere to sort a list? */
-
-static void
-insert_sorted(struct exec_list *var_list, nir_variable *new_var)
-{
-   nir_foreach_variable(var, var_list) {
-      if (var->data.location > new_var->data.location) {
-         exec_node_insert_node_before(&var->node, &new_var->node);
-         return;
-      }
-   }
-   exec_list_push_tail(var_list, &new_var->node);
-}
-
-static void
-sort_varyings(struct exec_list *var_list)
-{
-   struct exec_list new_list;
-   exec_list_make_empty(&new_list);
-   nir_foreach_variable_safe(var, var_list) {
-      exec_node_remove(&var->node);
-      insert_sorted(&new_list, var);
-   }
-   exec_list_move_nodes_to(&new_list, var_list);
-}
-
 static void
 set_st_program(struct gl_program *prog,
                struct gl_shader_program *shader_program,
@@ -544,51 +542,43 @@ set_st_program(struct gl_program *prog,
 }
 
 static void
-st_nir_get_mesa_program(struct gl_context *ctx,
-                        struct gl_shader_program *shader_program,
-                        struct gl_linked_shader *shader)
+st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
 {
-   struct st_context *st = st_context(ctx);
-   struct pipe_screen *pscreen = ctx->st->pipe->screen;
-   struct gl_program *prog;
-
-   validate_ir_tree(shader->ir);
-
-   prog = shader->Program;
-
-   prog->Parameters = _mesa_new_parameter_list();
-
-   _mesa_copy_linked_program_data(shader_program, shader);
-   _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
-                                               prog->Parameters);
-
-   /* Remove reads from output registers. */
-   if (!pscreen->get_param(pscreen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS))
-      lower_output_reads(shader->Stage, shader->ir);
-
-   if (ctx->_Shader->Flags & GLSL_DUMP) {
-      _mesa_log("\n");
-      _mesa_log("GLSL IR for linked %s program %d:\n",
-             _mesa_shader_stage_to_string(shader->Stage),
-             shader_program->Name);
-      _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
-      _mesa_log("\n\n");
+   NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
+   NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
+   NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
+
+   if ((producer)->info.stage != MESA_SHADER_TESS_CTRL) {
+      /* Calling lower_io_to_vector creates output variable writes with
+       * write-masks.  We only support these for TCS outputs, so for other
+       * stages, we need to call nir_lower_io_to_temporaries to get rid of
+       * them.  This, in turn, creates temporary variables and extra
+       * copy_deref intrinsics that we need to clean up.
+       */
+      NIR_PASS_V(producer, nir_lower_io_to_temporaries,
+                 nir_shader_get_entrypoint(producer), true, false);
+      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+      NIR_PASS_V(producer, nir_split_var_copies);
+      NIR_PASS_V(producer, nir_lower_var_copies);
    }
-
-   prog->ExternalSamplersUsed = gl_external_samplers(prog);
-   _mesa_update_shader_textures_used(shader_program, prog);
-
-   nir_shader *nir = st_glsl_to_nir(st, prog, shader_program, shader->Stage);
-
-   set_st_program(prog, shader_program, nir);
-   prog->nir = nir;
 }
 
 static void
 st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar)
 {
+   if (scalar) {
+      NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
+      NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
+   }
+
    nir_lower_io_arrays_to_elements(*producer, *consumer);
 
+   st_nir_opts(*producer, scalar);
+   st_nir_opts(*consumer, scalar);
+
+   if (nir_link_opt_varyings(*producer, *consumer))
+      st_nir_opts(*consumer, scalar);
+
    NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
    NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
 
@@ -596,24 +586,36 @@ st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar)
       NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
       NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
 
-      /* The backend might not be able to handle indirects on
-       * temporaries so we need to lower indirects on any of the
-       * varyings we have demoted here.
-       *
-       * TODO: radeonsi shouldn't need to do this, however LLVM isn't
-       * currently smart enough to handle indirects without causing excess
-       * spilling causing the gpu to hang.
-       *
-       * See the following thread for more details of the problem:
-       * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
-       */
-      nir_variable_mode indirect_mask = nir_var_local;
-
-      NIR_PASS_V(*producer, nir_lower_indirect_derefs, indirect_mask);
-      NIR_PASS_V(*consumer, nir_lower_indirect_derefs, indirect_mask);
-
       st_nir_opts(*producer, scalar);
       st_nir_opts(*consumer, scalar);
+
+      /* Optimizations can cause varyings to become unused.
+       * nir_compact_varyings() depends on all dead varyings being removed so
+       * we need to call nir_remove_dead_variables() again here.
+       */
+      NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
+      NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+   }
+}
+
+static void
+st_lower_patch_vertices_in(struct gl_shader_program *shader_prog)
+{
+   struct gl_linked_shader *linked_tcs =
+      shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
+   struct gl_linked_shader *linked_tes =
+      shader_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
+
+   /* If we have a TCS and TES linked together, lower TES patch vertices. */
+   if (linked_tcs && linked_tes) {
+      nir_shader *tcs_nir = linked_tcs->Program->nir;
+      nir_shader *tes_nir = linked_tes->Program->nir;
+
+      /* The TES input vertex count is the TCS output vertex count,
+       * lower TES gl_PatchVerticesIn to a constant.
+       */
+      uint32_t tes_patch_verts = tcs_nir->info.tess.tcs_vertices_out;
+      NIR_PASS_V(tes_nir, nir_lower_patch_vertices, tes_patch_verts, NULL);
    }
 }
 
@@ -661,49 +663,93 @@ st_link_nir(struct gl_context *ctx,
    struct pipe_screen *screen = st->pipe->screen;
    bool is_scalar[MESA_SHADER_STAGES];
 
-   /* Determine scalar property of each shader stage */
+   unsigned last_stage = 0;
    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
-      enum pipe_shader_type type;
-
       if (shader == NULL)
          continue;
 
-      type = pipe_shader_type_from_mesa(shader->Stage);
-      is_scalar[i] = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA);
-   }
+      /* Determine scalar property of each shader stage */
+      enum pipe_shader_type type = pipe_shader_type_from_mesa(shader->Stage);
+      is_scalar[i] = screen->get_shader_param(screen, type,
+                                              PIPE_SHADER_CAP_SCALAR_ISA);
 
-   /* Determine first and last stage. */
-   unsigned first = MESA_SHADER_STAGES;
-   unsigned last = 0;
-   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
-      if (!shader_program->_LinkedShaders[i])
-         continue;
-      if (first == MESA_SHADER_STAGES)
-         first = i;
-      last = i;
-   }
+      struct gl_program *prog = shader->Program;
+      _mesa_copy_linked_program_data(shader_program, shader);
 
-   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
-      struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
-      if (shader == NULL)
-         continue;
+      assert(!prog->nir);
+
+      if (shader_program->data->spirv) {
+         const nir_shader_compiler_options *options =
+            st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
 
-      st_nir_get_mesa_program(ctx, shader_program, shader);
+         prog->Parameters = _mesa_new_parameter_list();
+         /* Parameters will be filled during NIR linking. */
 
-      nir_variable_mode mask = (nir_variable_mode) 0;
-      if (i != first)
-         mask = (nir_variable_mode)(mask | nir_var_shader_in);
+         prog->nir = _mesa_spirv_to_nir(ctx, shader_program, shader->Stage, options);
+         set_st_program(prog, shader_program, prog->nir);
+      } else {
+         validate_ir_tree(shader->ir);
+
+         prog->Parameters = _mesa_new_parameter_list();
+         _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
+                                                     prog->Parameters);
+
+         /* Remove reads from output registers. */
+         if (!screen->get_param(screen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS))
+            lower_output_reads(shader->Stage, shader->ir);
+
+         if (ctx->_Shader->Flags & GLSL_DUMP) {
+            _mesa_log("\n");
+            _mesa_log("GLSL IR for linked %s program %d:\n",
+                      _mesa_shader_stage_to_string(shader->Stage),
+                      shader_program->Name);
+            _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
+            _mesa_log("\n\n");
+         }
 
-      if (i != last)
-         mask = (nir_variable_mode)(mask | nir_var_shader_out);
+         prog->ExternalSamplersUsed = gl_external_samplers(prog);
+         _mesa_update_shader_textures_used(shader_program, prog);
 
-      nir_shader *nir = shader->Program->nir;
+         const nir_shader_compiler_options *options =
+            st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
+         assert(options);
 
-      if (is_scalar[i])
-         NIR_PASS_V(nir, nir_lower_io_to_scalar_early, mask);
+         prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
+         set_st_program(prog, shader_program, prog->nir);
+         st_nir_preprocess(st, prog, shader_program, shader->Stage);
+      }
 
-      st_nir_opts(nir, is_scalar[i]);
+      last_stage = i;
+
+      if (is_scalar[i]) {
+         NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
+      }
+   }
+
+   /* For SPIR-V, we have to perform the NIR linking before applying
+    * st_nir_preprocess.
+    */
+   if (shader_program->data->spirv) {
+      static const gl_nir_linker_options opts = {
+         true /*fill_parameters */
+      };
+      if (!gl_nir_link(ctx, shader_program, &opts))
+         return GL_FALSE;
+
+      nir_build_program_resource_list(ctx, shader_program);
+
+      for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+         struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
+         if (shader == NULL)
+            continue;
+
+         struct gl_program *prog = shader->Program;
+         prog->ExternalSamplersUsed = gl_external_samplers(prog);
+         _mesa_update_shader_textures_used(shader_program, prog);
+
+         st_nir_preprocess(st, prog, shader_program, shader->Stage);
+      }
    }
 
    /* Linking the stages in the opposite order (from fragment to vertex)
@@ -711,7 +757,7 @@ st_link_nir(struct gl_context *ctx,
     * are eliminated if they are (transitively) not used in a later
     * stage.
     */
-   int next = last;
+   int next = last_stage;
    for (int i = next - 1; i >= 0; i--) {
       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
       if (shader == NULL)
@@ -735,6 +781,7 @@ st_link_nir(struct gl_context *ctx,
                  st->pipe->screen);
 
       NIR_PASS_V(nir, nir_lower_system_values);
+      NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
 
       nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
       shader->Program->info = nir->info;
@@ -756,13 +803,19 @@ st_link_nir(struct gl_context *ctx,
           * the pipe_stream_output->output_register field is based on the
           * pre-compacted driver_locations.
           */
-         if (!prev_shader->sh.LinkedTransformFeedback)
+         if (!(prev_shader->sh.LinkedTransformFeedback &&
+               prev_shader->sh.LinkedTransformFeedback->NumVarying > 0))
             nir_compact_varyings(shader_program->_LinkedShaders[prev]->Program->nir,
                               nir, ctx->API != API_OPENGL_COMPAT);
+
+         if (ctx->Const.ShaderCompilerOptions[i].NirOptions->vectorize_io)
+            st_nir_vectorize_io(prev_shader->nir, nir);
       }
       prev = i;
    }
 
+   st_lower_patch_vertices_in(shader_program);
+
    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
       if (shader == NULL)
@@ -779,88 +832,114 @@ st_link_nir(struct gl_context *ctx,
       }
 
       nir_sweep(shader->Program->nir);
+
+      /* The GLSL IR won't be needed anymore. */
+      ralloc_free(shader->ir);
+      shader->ir = NULL;
    }
 
    return true;
 }
 
-/* Last third of preparing nir from glsl, which happens after shader
- * variant lowering.
- */
 void
-st_finalize_nir(struct st_context *st, struct gl_program *prog,
-                struct gl_shader_program *shader_program, nir_shader *nir)
+st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
 {
-   struct pipe_screen *screen = st->pipe->screen;
-   const nir_shader_compiler_options *options =
-      st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
-
-   NIR_PASS_V(nir, nir_split_var_copies);
-   NIR_PASS_V(nir, nir_lower_var_copies);
-   if (options->lower_all_io_to_temps ||
-       nir->info.stage == MESA_SHADER_VERTEX ||
-       nir->info.stage == MESA_SHADER_GEOMETRY) {
-      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-   } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
-   }
-
    if (nir->info.stage == MESA_SHADER_VERTEX) {
       /* Needs special handling so drvloc matches the vbo state: */
-      st_nir_assign_vs_in_locations(prog, nir);
+      st_nir_assign_vs_in_locations(nir);
       /* Re-lower global vars, to deal with any dead VS inputs. */
       NIR_PASS_V(nir, nir_lower_global_vars_to_local);
 
-      sort_varyings(&nir->outputs);
-      st_nir_assign_var_locations(&nir->outputs,
+      nir_assign_io_var_locations(&nir->outputs,
                                   &nir->num_outputs,
                                   nir->info.stage);
       st_nir_fixup_varying_slots(st, &nir->outputs);
    } else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
               nir->info.stage == MESA_SHADER_TESS_CTRL ||
               nir->info.stage == MESA_SHADER_TESS_EVAL) {
-      sort_varyings(&nir->inputs);
-      st_nir_assign_var_locations(&nir->inputs,
+      nir_assign_io_var_locations(&nir->inputs,
                                   &nir->num_inputs,
                                   nir->info.stage);
       st_nir_fixup_varying_slots(st, &nir->inputs);
 
-      sort_varyings(&nir->outputs);
-      st_nir_assign_var_locations(&nir->outputs,
+      nir_assign_io_var_locations(&nir->outputs,
                                   &nir->num_outputs,
                                   nir->info.stage);
       st_nir_fixup_varying_slots(st, &nir->outputs);
    } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-      sort_varyings(&nir->inputs);
-      st_nir_assign_var_locations(&nir->inputs,
+      nir_assign_io_var_locations(&nir->inputs,
                                   &nir->num_inputs,
                                   nir->info.stage);
       st_nir_fixup_varying_slots(st, &nir->inputs);
-      st_nir_assign_var_locations(&nir->outputs,
+      nir_assign_io_var_locations(&nir->outputs,
                                   &nir->num_outputs,
                                   nir->info.stage);
    } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
        /* TODO? */
    } else {
-      unreachable("invalid shader type for tgsi bypass\n");
+      unreachable("invalid shader type");
+   }
+}
+
+void
+st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
+                      struct gl_shader_program *shader_program,
+                      struct gl_program *prog)
+{
+   if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
+      NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
+   else
+      NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
+
+   if (prog) {
+      prog->info.textures_used = nir->info.textures_used;
+      prog->info.textures_used_by_txf = nir->info.textures_used_by_txf;
+   }
+}
+
+/* Last third of preparing nir from glsl, which happens after shader
+ * variant lowering.
+ */
+void
+st_finalize_nir(struct st_context *st, struct gl_program *prog,
+                struct gl_shader_program *shader_program, nir_shader *nir)
+{
+   struct pipe_screen *screen = st->pipe->screen;
+   const nir_shader_compiler_options *options =
+      st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
+
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_lower_var_copies);
+   if (options->lower_all_io_to_temps ||
+       options->lower_all_io_to_elements ||
+       nir->info.stage == MESA_SHADER_VERTEX ||
+       nir->info.stage == MESA_SHADER_GEOMETRY) {
+      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+   } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
    }
 
+   st_nir_assign_varying_locations(st, nir);
+
    NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
          st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
 
-   st_nir_assign_uniform_locations(st->ctx, prog, shader_program,
-                                   &nir->uniforms, &nir->num_uniforms);
+   st_nir_assign_uniform_locations(st->ctx, prog,
+                                   &nir->uniforms);
+
+   /* Set num_uniforms in number of attribute slots (vec4s) */
+   nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
 
    if (st->ctx->Const.PackedDriverUniformStorage) {
       NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_type_dword_size,
                  (nir_lower_io_options)0);
-      NIR_PASS_V(nir, st_nir_lower_uniforms_to_ubo);
+      NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 4);
+   } else {
+      NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_uniforms_type_size,
+                 (nir_lower_io_options)0);
    }
 
-   if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
-      NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
-   else
-      NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
+   st_nir_lower_samplers(screen, nir, shader_program, prog);
 }
 
 } /* extern "C" */