gallium + mesa/st: Add PIPE_CAP_NIR_ATOMICS_AS_DEREF and use it
[mesa.git] / src / mesa / state_tracker / st_glsl_to_nir.cpp
index 9a4e030413bf93a0841ebcadb48f7caab6f64c2e..99bbbef79db16f969cc4953cf6cef328fa3fa2fb 100644 (file)
 #include "program/prog_statevars.h"
 #include "program/prog_parameter.h"
 #include "program/ir_to_mesa.h"
+#include "main/context.h"
 #include "main/mtypes.h"
 #include "main/errors.h"
+#include "main/glspirv.h"
 #include "main/shaderapi.h"
 #include "main/uniforms.h"
 
 #include "main/shaderobj.h"
 #include "st_context.h"
-#include "st_glsl_types.h"
 #include "st_program.h"
+#include "st_shader_cache.h"
 
 #include "compiler/nir/nir.h"
 #include "compiler/glsl_types.h"
 #include "compiler/glsl/glsl_to_nir.h"
 #include "compiler/glsl/gl_nir.h"
+#include "compiler/glsl/gl_nir_linker.h"
 #include "compiler/glsl/ir.h"
 #include "compiler/glsl/ir_optimization.h"
 #include "compiler/glsl/string_to_uint_map.h"
@@ -60,14 +63,17 @@ type_size(const struct glsl_type *type)
  * with the anything->tgsi->nir path.
  */
 static void
-st_nir_fixup_varying_slots(struct st_context *st, struct exec_list *var_list)
+st_nir_fixup_varying_slots(struct st_context *st, nir_shader *shader,
+                           nir_variable_mode mode)
 {
    if (st->needs_texcoord_semantic)
       return;
 
-   nir_foreach_variable(var, var_list) {
+   nir_foreach_variable_with_modes(var, shader, mode) {
       if (var->data.location >= VARYING_SLOT_VAR0) {
          var->data.location += 9;
+      } else if (var->data.location == VARYING_SLOT_PNTC) {
+         var->data.location = VARYING_SLOT_VAR8;
       } else if ((var->data.location >= VARYING_SLOT_TEX0) &&
                (var->data.location <= VARYING_SLOT_TEX7)) {
          var->data.location += VARYING_SLOT_VAR0 - VARYING_SLOT_TEX0;
@@ -75,125 +81,74 @@ st_nir_fixup_varying_slots(struct st_context *st, struct exec_list *var_list)
    }
 }
 
+static void
+st_shader_gather_info(nir_shader *nir, struct gl_program *prog)
+{
+   nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+
+   /* Copy the info we just generated back into the gl_program */
+   const char *prog_name = prog->info.name;
+   const char *prog_label = prog->info.label;
+   prog->info = nir->info;
+   prog->info.name = prog_name;
+   prog->info.label = prog_label;
+}
+
 /* input location assignment for VS inputs must be handled specially, so
  * that it is aligned w/ st's vbo state.
  * (This isn't the case with, for ex, FS inputs, which only need to agree
  * on varying-slot w/ the VS outputs)
  */
-static void
-st_nir_assign_vs_in_locations(nir_shader *nir)
+void
+st_nir_assign_vs_in_locations(struct nir_shader *nir)
 {
-   nir->num_inputs = 0;
-   nir_foreach_variable_safe(var, &nir->inputs) {
+   if (nir->info.stage != MESA_SHADER_VERTEX)
+      return;
+
+   bool removed_inputs = false;
+
+   nir->num_inputs = util_bitcount64(nir->info.inputs_read);
+   nir_foreach_shader_in_variable_safe(var, nir) {
       /* NIR already assigns dual-slot inputs to two locations so all we have
        * to do is compact everything down.
        */
-      if (var->data.location == VERT_ATTRIB_EDGEFLAG) {
-         /* bit of a hack, mirroring st_translate_vertex_program */
-         var->data.driver_location = util_bitcount64(nir->info.inputs_read);
-      } else if (nir->info.inputs_read & BITFIELD64_BIT(var->data.location)) {
+      if (nir->info.inputs_read & BITFIELD64_BIT(var->data.location)) {
          var->data.driver_location =
             util_bitcount64(nir->info.inputs_read &
                               BITFIELD64_MASK(var->data.location));
-         nir->num_inputs++;
       } else {
-         /* Move unused input variables to the globals list (with no
+         /* Convert unused input variables to shader_temp (with no
           * initialization), to avoid confusing drivers looking through the
           * inputs array and expecting to find inputs with a driver_location
           * set.
           */
-         exec_node_remove(&var->node);
          var->data.mode = nir_var_shader_temp;
-         exec_list_push_tail(&nir->globals, &var->node);
+         removed_inputs = true;
       }
    }
+
+   /* Re-lower global vars, to deal with any dead VS inputs. */
+   if (removed_inputs)
+      NIR_PASS_V(nir, nir_lower_global_vars_to_local);
 }
 
-static void
-st_nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
-                            gl_shader_stage stage)
+static int
+st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
 {
-   unsigned location = 0;
-   unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
-   uint64_t processed_locs[2] = {0};
-
-   const int base = stage == MESA_SHADER_FRAGMENT ?
-      (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
-
-   int UNUSED last_loc = 0;
-   nir_foreach_variable(var, var_list) {
-
-      const struct glsl_type *type = var->type;
-      if (nir_is_per_vertex_io(var, stage)) {
-         assert(glsl_type_is_array(type));
-         type = glsl_get_array_element(type);
-      }
-
-      unsigned var_size = type_size(type);
-
-      /* Builtins don't allow component packing so we only need to worry about
-       * user defined varyings sharing the same location.
-       */
-      bool processed = false;
-      if (var->data.location >= base) {
-         unsigned glsl_location = var->data.location - base;
-
-         for (unsigned i = 0; i < var_size; i++) {
-            if (processed_locs[var->data.index] &
-                ((uint64_t)1 << (glsl_location + i)))
-               processed = true;
-            else
-               processed_locs[var->data.index] |=
-                  ((uint64_t)1 << (glsl_location + i));
-         }
-      }
+   struct gl_program_parameter_list *params = prog->Parameters;
 
-      /* Because component packing allows varyings to share the same location
-       * we may have already have processed this location.
-       */
-      if (processed) {
-         unsigned driver_location = assigned_locations[var->data.location];
-         var->data.driver_location = driver_location;
-         *size += type_size(type);
-
-         /* An array may be packed such that is crosses multiple other arrays
-          * or variables, we need to make sure we have allocated the elements
-          * consecutively if the previously proccessed var was shorter than
-          * the current array we are processing.
-          *
-          * NOTE: The code below assumes the var list is ordered in ascending
-          * location order.
-          */
-         assert(last_loc <= var->data.location);
-         last_loc = var->data.location;
-         unsigned last_slot_location = driver_location + var_size;
-         if (last_slot_location > location) {
-            unsigned num_unallocated_slots = last_slot_location - location;
-            unsigned first_unallocated_slot = var_size - num_unallocated_slots;
-            for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
-               assigned_locations[var->data.location + i] = location;
-               location++;
-            }
-         }
-         continue;
-      }
-
-      for (unsigned i = 0; i < var_size; i++) {
-         assigned_locations[var->data.location + i] = location + i;
-      }
-
-      var->data.driver_location = location;
-      location += var_size;
+   /* Lookup the first parameter that the uniform storage that match the
+    * variable location.
+    */
+   for (unsigned i = 0; i < params->NumParameters; i++) {
+      int index = params->Parameters[i].MainUniformStorageIndex;
+      if (index == var->data.location)
+         return i;
    }
 
-   *size += location;
-}
-
-static int
-st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
-                              const char *name)
-{
-   int loc = _mesa_lookup_parameter_index(params, name);
+   /* TODO: Handle this fallback for SPIR-V.  We need this for GLSL e.g. in
+    * dEQP-GLES2.functional.uniform_api.random.3
+    */
 
    /* is there a better way to do this?  If we have something like:
     *
@@ -222,39 +177,31 @@ st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
     * needs to work backwards to get base var loc from the param-list
     * which already has them separated out.
     */
-   if (loc < 0) {
-      int namelen = strlen(name);
+   if (!prog->sh.data->spirv) {
+      int namelen = strlen(var->name);
       for (unsigned i = 0; i < params->NumParameters; i++) {
          struct gl_program_parameter *p = &params->Parameters[i];
-         if ((strncmp(p->Name, name, namelen) == 0) &&
+         if ((strncmp(p->Name, var->name, namelen) == 0) &&
              ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
-            loc = i;
-            break;
+            return i;
          }
       }
    }
 
-   return loc;
+   return -1;
 }
 
 static void
 st_nir_assign_uniform_locations(struct gl_context *ctx,
                                 struct gl_program *prog,
-                                struct exec_list *uniform_list)
+                                nir_shader *nir)
 {
    int shaderidx = 0;
    int imageidx = 0;
 
-   nir_foreach_variable(uniform, uniform_list) {
+   nir_foreach_uniform_variable(uniform, nir) {
       int loc;
 
-      /*
-       * UBO's have their own address spaces, so don't count them towards the
-       * number of global uniforms
-       */
-      if (uniform->data.mode == nir_var_mem_ubo || uniform->data.mode == nir_var_mem_ssbo)
-         continue;
-
       const struct glsl_type *type = glsl_without_array(uniform->type);
       if (!uniform->data.bindless && (type->is_sampler() || type->is_image())) {
          if (type->is_sampler()) {
@@ -264,7 +211,7 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
             loc = imageidx;
             imageidx += type_size(uniform->type);
          }
-      } else if (strncmp(uniform->name, "gl_", 3) == 0) {
+      } else if (uniform->state_slots) {
          const gl_state_index16 *const stateTokens = uniform->state_slots[0].tokens;
          /* This state reference has already been setup by ir_to_mesa, but we'll
           * get the same index back here.
@@ -285,7 +232,7 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
             loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
          }
       } else {
-         loc = st_nir_lookup_parameter_index(prog->Parameters, uniform->name);
+         loc = st_nir_lookup_parameter_index(prog, uniform);
 
          /* We need to check that loc is not -1 here before accessing the
           * array. It can be negative for example when we have a struct that
@@ -301,16 +248,31 @@ st_nir_assign_uniform_locations(struct gl_context *ctx,
 }
 
 void
-st_nir_opts(nir_shader *nir, bool scalar)
+st_nir_opts(nir_shader *nir)
 {
    bool progress;
+
    do {
       progress = false;
 
       NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+      
+      /* Linking deals with unused inputs/outputs, but here we can remove
+       * things local to the shader in the hopes that we can cleanup other
+       * things. This pass will also remove variables with only stores, so we
+       * might be able to make progress after it.
+       */
+      NIR_PASS(progress, nir, nir_remove_dead_variables,
+               (nir_variable_mode)(nir_var_function_temp |
+                                   nir_var_shader_temp |
+                                   nir_var_mem_shared),
+               NULL);
 
-      if (scalar) {
-         NIR_PASS_V(nir, nir_lower_alu_to_scalar);
+      NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
+      NIR_PASS(progress, nir, nir_opt_dead_write_vars);
+
+      if (nir->options->lower_to_scalar) {
+         NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
          NIR_PASS_V(nir, nir_lower_phis_to_scalar);
       }
 
@@ -324,7 +286,7 @@ st_nir_opts(nir_shader *nir, bool scalar)
          NIR_PASS(progress, nir, nir_copy_prop);
          NIR_PASS(progress, nir, nir_opt_dce);
       }
-      NIR_PASS(progress, nir, nir_opt_if);
+      NIR_PASS(progress, nir, nir_opt_if, false);
       NIR_PASS(progress, nir, nir_opt_dead_cf);
       NIR_PASS(progress, nir, nir_opt_cse);
       NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
@@ -332,6 +294,32 @@ st_nir_opts(nir_shader *nir, bool scalar)
       NIR_PASS(progress, nir, nir_opt_algebraic);
       NIR_PASS(progress, nir, nir_opt_constant_folding);
 
+      if (!nir->info.flrp_lowered) {
+         unsigned lower_flrp =
+            (nir->options->lower_flrp16 ? 16 : 0) |
+            (nir->options->lower_flrp32 ? 32 : 0) |
+            (nir->options->lower_flrp64 ? 64 : 0);
+
+         if (lower_flrp) {
+            bool lower_flrp_progress = false;
+
+            NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
+                     lower_flrp,
+                     false /* always_precise */,
+                     nir->options->lower_ffma);
+            if (lower_flrp_progress) {
+               NIR_PASS(progress, nir,
+                        nir_opt_constant_folding);
+               progress = true;
+            }
+         }
+
+         /* Nothing should rematerialize any flrps, so we only need to do this
+          * lowering once.
+          */
+         nir->info.flrp_lowered = true;
+      }
+
       NIR_PASS(progress, nir, nir_opt_undef);
       NIR_PASS(progress, nir, nir_opt_conditional_discard);
       if (nir->options->max_unroll_iterations) {
@@ -340,28 +328,32 @@ st_nir_opts(nir_shader *nir, bool scalar)
    } while (progress);
 }
 
+static void
+shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+   assert(glsl_type_is_vector_or_scalar(type));
+
+   uint32_t comp_size = glsl_type_is_boolean(type)
+      ? 4 : glsl_get_bit_size(type) / 8;
+   unsigned length = glsl_get_vector_elements(type);
+   *size = comp_size * length,
+   *align = comp_size * (length == 3 ? 4 : length);
+}
+
 /* First third of converting glsl_to_nir.. this leaves things in a pre-
  * nir_lower_io state, so that shader variants can more easily insert/
  * replace variables, etc.
  */
-static nir_shader *
-st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
-               struct gl_shader_program *shader_program,
-               gl_shader_stage stage)
+static void
+st_nir_preprocess(struct st_context *st, struct gl_program *prog,
+                  struct gl_shader_program *shader_program,
+                  gl_shader_stage stage)
 {
+   struct pipe_screen *screen = st->pipe->screen;
    const nir_shader_compiler_options *options =
       st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
-   enum pipe_shader_type type = pipe_shader_type_from_mesa(stage);
-   struct pipe_screen *screen = st->pipe->screen;
-   bool is_scalar = screen->get_shader_param(screen, type, PIPE_SHADER_CAP_SCALAR_ISA);
    assert(options);
-   bool lower_64bit =
-      options->lower_int64_options || options->lower_doubles_options;
-
-   if (prog->nir)
-      return prog->nir;
-
-   nir_shader *nir = glsl_to_nir(st->ctx, shader_program, stage, options);
+   nir_shader *nir = prog->nir;
 
    /* Set the next shader stage hint for VS and TES. */
    if (!nir->info.separate_shader &&
@@ -379,16 +371,21 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
    }
 
    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
-   nir_shader *softfp64 = NULL;
-   if (nir->info.uses_64bit &&
+   if (!st->ctx->SoftFP64 && nir->info.uses_64bit &&
        (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
-      softfp64 = glsl_float64_funcs_to_nir(st->ctx, options);
-      ralloc_steal(ralloc_parent(nir), softfp64);
+      st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
    }
 
-   nir_variable_mode mask =
-      (nir_variable_mode) (nir_var_shader_in | nir_var_shader_out);
-   nir_remove_dead_variables(nir, mask);
+   /* ES has strict SSO validation rules for shader IO matching so we can't
+    * remove dead IO until the resource list has been built. Here we skip
+    * removing them until later. This will potentially make the IO lowering
+    * calls below do a little extra work but should otherwise have no impact.
+    */
+   if (!_mesa_is_gles(st->ctx) || !nir->info.separate_shader) {
+      nir_variable_mode mask =
+         (nir_variable_mode) (nir_var_shader_in | nir_var_shader_out);
+      nir_remove_dead_variables(nir, mask, NULL);
+   }
 
    if (options->lower_all_io_to_temps ||
        nir->info.stage == MESA_SHADER_VERTEX ||
@@ -396,7 +393,8 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
       NIR_PASS_V(nir, nir_lower_io_to_temporaries,
                  nir_shader_get_entrypoint(nir),
                  true, true);
-   } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+   } else if (nir->info.stage == MESA_SHADER_FRAGMENT ||
+              !screen->get_param(screen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS)) {
       NIR_PASS_V(nir, nir_lower_io_to_temporaries,
                  nir_shader_get_entrypoint(nir),
                  true, false);
@@ -406,41 +404,24 @@ st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_lower_var_copies);
 
-   if (is_scalar) {
-     NIR_PASS_V(nir, nir_lower_alu_to_scalar);
+   if (options->lower_to_scalar) {
+     NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
    }
 
-   st_nir_opts(nir, is_scalar);
-
-   NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
-   /* Do a round of constant folding to clean up address calculations */
-   NIR_PASS_V(nir, nir_opt_constant_folding);
-
-   if (lower_64bit) {
-      bool lowered_64bit_ops = false;
-      bool progress = false;
-
-      NIR_PASS_V(nir, nir_opt_algebraic);
-
-      do {
-         progress = false;
-         if (options->lower_int64_options) {
-            NIR_PASS(progress, nir, nir_lower_int64,
-                     options->lower_int64_options);
-         }
-         if (options->lower_doubles_options) {
-            NIR_PASS(progress, nir, nir_lower_doubles,
-                     softfp64, options->lower_doubles_options);
-         }
-         NIR_PASS(progress, nir, nir_opt_algebraic);
-         lowered_64bit_ops |= progress;
-      } while (progress);
+   /* before buffers and vars_to_ssa */
+   NIR_PASS_V(nir, gl_nir_lower_images, true);
 
-      if (lowered_64bit_ops)
-         st_nir_opts(nir, is_scalar);
+   /* TODO: Change GLSL to not lower shared memory. */
+   if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
+       shader_program->data->spirv) {
+      NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
+                 nir_var_mem_shared, shared_type_info);
+      NIR_PASS_V(prog->nir, nir_lower_explicit_io,
+                 nir_var_mem_shared, nir_address_format_32bit_offset);
    }
 
-   return nir;
+   /* Do a round of constant folding to clean up address calculations */
+   NIR_PASS_V(nir, nir_opt_constant_folding);
 }
 
 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
@@ -451,6 +432,7 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
                          struct gl_shader_program *shader_program)
 {
    nir_shader *nir = prog->nir;
+   struct pipe_screen *screen = st->pipe->screen;
 
    /* Make a pass over the IR to add state references for any built-in
     * uniforms that are used.  This has to be done now (during linking).
@@ -459,19 +441,14 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
     * too late.  At that point, the values for the built-in uniforms won't
     * get sent to the shader.
     */
-   nir_foreach_variable(var, &nir->uniforms) {
-      if (strncmp(var->name, "gl_", 3) == 0) {
-         const nir_state_slot *const slots = var->state_slots;
-         assert(var->state_slots != NULL);
-
+   nir_foreach_uniform_variable(var, nir) {
+      const nir_state_slot *const slots = var->state_slots;
+      if (slots != NULL) {
          const struct glsl_type *type = glsl_without_array(var->type);
          for (unsigned int i = 0; i < var->num_state_slots; i++) {
             unsigned comps;
             if (glsl_type_is_struct_or_ifc(type)) {
-               /* Builtin struct require specical handling for now we just
-                * make all members vec4. See st_nir_lower_builtin.
-                */
-               comps = 4;
+               comps = _mesa_program_state_value_size(slots[i].tokens);
             } else {
                comps = glsl_get_vector_elements(type);
             }
@@ -498,183 +475,116 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
     * prog->ParameterValues to get reallocated (e.g., anything that adds a
     * program constant) has to happen before creating this linkage.
     */
-   _mesa_associate_uniform_storage(st->ctx, shader_program, prog, true);
+   _mesa_associate_uniform_storage(st->ctx, shader_program, prog);
 
    st_set_prog_affected_state_flags(prog);
 
-   NIR_PASS_V(nir, st_nir_lower_builtin);
-   NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
-
-   nir_variable_mode mask = nir_var_function_temp;
-   nir_remove_dead_variables(nir, mask);
+   /* None of the builtins being lowered here can be produced by SPIR-V.  See
+    * _mesa_builtin_uniform_desc. Also drivers that support packed uniform
+    * storage don't need to lower builtins.
+    */
+   if (!shader_program->data->spirv &&
+       !st->ctx->Const.PackedDriverUniformStorage)
+      NIR_PASS_V(nir, st_nir_lower_builtin);
 
-   if (st->ctx->_Shader->Flags & GLSL_DUMP) {
-      _mesa_log("\n");
-      _mesa_log("NIR IR for linked %s program %d:\n",
-             _mesa_shader_stage_to_string(prog->info.stage),
-             shader_program->Name);
-      nir_print_shader(nir, _mesa_get_log_file());
-      _mesa_log("\n\n");
-   }
-}
+   if (!screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF))
+      NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
 
-/* TODO any better helper somewhere to sort a list? */
+   NIR_PASS_V(nir, nir_opt_intrinsics);
 
-static void
-insert_sorted(struct exec_list *var_list, nir_variable *new_var)
-{
-   nir_foreach_variable(var, var_list) {
-      if (var->data.location > new_var->data.location) {
-         exec_node_insert_node_before(&var->node, &new_var->node);
-         return;
+   /* Lower 64-bit ops. */
+   if (nir->options->lower_int64_options ||
+       nir->options->lower_doubles_options) {
+      bool lowered_64bit_ops = false;
+      if (nir->options->lower_doubles_options) {
+         NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
+                  st->ctx->SoftFP64, nir->options->lower_doubles_options);
       }
-   }
-   exec_list_push_tail(var_list, &new_var->node);
-}
-
-static void
-sort_varyings(struct exec_list *var_list)
-{
-   struct exec_list new_list;
-   exec_list_make_empty(&new_list);
-   nir_foreach_variable_safe(var, var_list) {
-      exec_node_remove(&var->node);
-      insert_sorted(&new_list, var);
-   }
-   exec_list_move_nodes_to(&new_list, var_list);
-}
+      if (nir->options->lower_int64_options)
+         NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64);
 
-static void
-set_st_program(struct gl_program *prog,
-               struct gl_shader_program *shader_program,
-               nir_shader *nir)
-{
-   struct st_vertex_program *stvp;
-   struct st_common_program *stp;
-   struct st_fragment_program *stfp;
-   struct st_compute_program *stcp;
-
-   switch (prog->info.stage) {
-   case MESA_SHADER_VERTEX:
-      stvp = (struct st_vertex_program *)prog;
-      stvp->shader_program = shader_program;
-      stvp->tgsi.type = PIPE_SHADER_IR_NIR;
-      stvp->tgsi.ir.nir = nir;
-      break;
-   case MESA_SHADER_GEOMETRY:
-   case MESA_SHADER_TESS_CTRL:
-   case MESA_SHADER_TESS_EVAL:
-      stp = (struct st_common_program *)prog;
-      stp->shader_program = shader_program;
-      stp->tgsi.type = PIPE_SHADER_IR_NIR;
-      stp->tgsi.ir.nir = nir;
-      break;
-   case MESA_SHADER_FRAGMENT:
-      stfp = (struct st_fragment_program *)prog;
-      stfp->shader_program = shader_program;
-      stfp->tgsi.type = PIPE_SHADER_IR_NIR;
-      stfp->tgsi.ir.nir = nir;
-      break;
-   case MESA_SHADER_COMPUTE:
-      stcp = (struct st_compute_program *)prog;
-      stcp->shader_program = shader_program;
-      stcp->tgsi.ir_type = PIPE_SHADER_IR_NIR;
-      stcp->tgsi.prog = nir;
-      break;
-   default:
-      unreachable("unknown shader stage");
+      if (lowered_64bit_ops)
+         st_nir_opts(nir);
    }
-}
-
-static void
-st_nir_get_mesa_program(struct gl_context *ctx,
-                        struct gl_shader_program *shader_program,
-                        struct gl_linked_shader *shader)
-{
-   struct st_context *st = st_context(ctx);
-   struct pipe_screen *pscreen = ctx->st->pipe->screen;
-   struct gl_program *prog;
-
-   validate_ir_tree(shader->ir);
 
-   prog = shader->Program;
+   nir_variable_mode mask = (nir_variable_mode)
+      (nir_var_shader_in | nir_var_shader_out | nir_var_function_temp );
+   nir_remove_dead_variables(nir, mask, NULL);
 
-   prog->Parameters = _mesa_new_parameter_list();
+   if (!st->has_hw_atomics && !screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF))
+      NIR_PASS_V(nir, nir_lower_atomics_to_ssbo);
 
-   _mesa_copy_linked_program_data(shader_program, shader);
-   _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
-                                               prog->Parameters);
+   st_finalize_nir_before_variants(nir);
 
-   /* Remove reads from output registers. */
-   if (!pscreen->get_param(pscreen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS))
-      lower_output_reads(shader->Stage, shader->ir);
+   if (st->allow_st_finalize_nir_twice)
+      st_finalize_nir(st, prog, shader_program, nir, true);
 
-   if (ctx->_Shader->Flags & GLSL_DUMP) {
+   if (st->ctx->_Shader->Flags & GLSL_DUMP) {
       _mesa_log("\n");
-      _mesa_log("GLSL IR for linked %s program %d:\n",
-             _mesa_shader_stage_to_string(shader->Stage),
+      _mesa_log("NIR IR for linked %s program %d:\n",
+             _mesa_shader_stage_to_string(prog->info.stage),
              shader_program->Name);
-      _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
+      nir_print_shader(nir, _mesa_get_log_file());
       _mesa_log("\n\n");
    }
-
-   prog->ExternalSamplersUsed = gl_external_samplers(prog);
-   _mesa_update_shader_textures_used(shader_program, prog);
-
-   nir_shader *nir = st_glsl_to_nir(st, prog, shader_program, shader->Stage);
-
-   set_st_program(prog, shader_program, nir);
-   prog->nir = nir;
 }
 
 static void
-st_nir_link_shaders(nir_shader **producer, nir_shader **consumer, bool scalar)
+st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
 {
-   if (scalar) {
-      NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
-      NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
+   NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
+   NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
+   NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
+
+   if ((producer)->info.stage != MESA_SHADER_TESS_CTRL) {
+      /* Calling lower_io_to_vector creates output variable writes with
+       * write-masks.  We only support these for TCS outputs, so for other
+       * stages, we need to call nir_lower_io_to_temporaries to get rid of
+       * them.  This, in turn, creates temporary variables and extra
+       * copy_deref intrinsics that we need to clean up.
+       */
+      NIR_PASS_V(producer, nir_lower_io_to_temporaries,
+                 nir_shader_get_entrypoint(producer), true, false);
+      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+      NIR_PASS_V(producer, nir_split_var_copies);
+      NIR_PASS_V(producer, nir_lower_var_copies);
    }
+}
 
-   nir_lower_io_arrays_to_elements(*producer, *consumer);
-
-   st_nir_opts(*producer, scalar);
-   st_nir_opts(*consumer, scalar);
+static void
+st_nir_link_shaders(nir_shader *producer, nir_shader *consumer)
+{
+   if (producer->options->lower_to_scalar) {
+      NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
+      NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
+   }
 
-   if (nir_link_opt_varyings(*producer, *consumer))
-      st_nir_opts(*consumer, scalar);
+   nir_lower_io_arrays_to_elements(producer, consumer);
 
-   NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
-   NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+   st_nir_opts(producer);
+   st_nir_opts(consumer);
 
-   if (nir_remove_unused_varyings(*producer, *consumer)) {
-      NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
-      NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
+   if (nir_link_opt_varyings(producer, consumer))
+      st_nir_opts(consumer);
 
-      /* The backend might not be able to handle indirects on
-       * temporaries so we need to lower indirects on any of the
-       * varyings we have demoted here.
-       *
-       * TODO: radeonsi shouldn't need to do this, however LLVM isn't
-       * currently smart enough to handle indirects without causing excess
-       * spilling causing the gpu to hang.
-       *
-       * See the following thread for more details of the problem:
-       * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
-       */
-      nir_variable_mode indirect_mask = nir_var_function_temp;
+   NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
+   NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
 
-      NIR_PASS_V(*producer, nir_lower_indirect_derefs, indirect_mask);
-      NIR_PASS_V(*consumer, nir_lower_indirect_derefs, indirect_mask);
+   if (nir_remove_unused_varyings(producer, consumer)) {
+      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+      NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
 
-      st_nir_opts(*producer, scalar);
-      st_nir_opts(*consumer, scalar);
+      st_nir_opts(producer);
+      st_nir_opts(consumer);
 
-      /* Lowering indirects can cause varying to become unused.
+      /* Optimizations can cause varyings to become unused.
        * nir_compact_varyings() depends on all dead varyings being removed so
        * we need to call nir_remove_dead_variables() again here.
        */
-      NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
-      NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+      NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out,
+                 NULL);
+      NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in,
+                 NULL);
    }
 }
 
@@ -740,62 +650,131 @@ st_link_nir(struct gl_context *ctx,
             struct gl_shader_program *shader_program)
 {
    struct st_context *st = st_context(ctx);
-   struct pipe_screen *screen = st->pipe->screen;
-   bool is_scalar[MESA_SHADER_STAGES];
+   struct gl_linked_shader *linked_shader[MESA_SHADER_STAGES];
+   unsigned num_shaders = 0;
 
-   unsigned last_stage = 0;
    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
-      struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
-      if (shader == NULL)
-         continue;
+      if (shader_program->_LinkedShaders[i])
+         linked_shader[num_shaders++] = shader_program->_LinkedShaders[i];
+   }
+
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_linked_shader *shader = linked_shader[i];
+      const nir_shader_compiler_options *options =
+         st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
+      struct gl_program *prog = shader->Program;
+      struct st_program *stp = (struct st_program *)prog;
+
+      _mesa_copy_linked_program_data(shader_program, shader);
 
-      /* Determine scalar property of each shader stage */
-      enum pipe_shader_type type = pipe_shader_type_from_mesa(shader->Stage);
-      is_scalar[i] = screen->get_shader_param(screen, type,
-                                              PIPE_SHADER_CAP_SCALAR_ISA);
+      assert(!prog->nir);
+      stp->shader_program = shader_program;
+      stp->state.type = PIPE_SHADER_IR_NIR;
+
+      /* Parameters will be filled during NIR linking. */
+      prog->Parameters = _mesa_new_parameter_list();
+
+      if (shader_program->data->spirv) {
+         prog->nir = _mesa_spirv_to_nir(ctx, shader_program, shader->Stage, options);
+      } else {
+         validate_ir_tree(shader->ir);
+
+         if (ctx->_Shader->Flags & GLSL_DUMP) {
+            _mesa_log("\n");
+            _mesa_log("GLSL IR for linked %s program %d:\n",
+                      _mesa_shader_stage_to_string(shader->Stage),
+                      shader_program->Name);
+            _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
+            _mesa_log("\n\n");
+         }
 
-      st_nir_get_mesa_program(ctx, shader_program, shader);
-      last_stage = i;
+         prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
+         st_nir_preprocess(st, prog, shader_program, shader->Stage);
+      }
 
-      if (is_scalar[i]) {
+      if (options->lower_to_scalar) {
          NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
       }
    }
 
+   st_lower_patch_vertices_in(shader_program);
+
+   /* For SPIR-V, we have to perform the NIR linking before applying
+    * st_nir_preprocess.
+    */
+   if (shader_program->data->spirv) {
+      static const gl_nir_linker_options opts = {
+         true /*fill_parameters */
+      };
+      if (!gl_nir_link_spirv(ctx, shader_program, &opts))
+         return GL_FALSE;
+
+      nir_build_program_resource_list(ctx, shader_program, true);
+
+      for (unsigned i = 0; i < num_shaders; i++) {
+         struct gl_linked_shader *shader = linked_shader[i];
+         struct gl_program *prog = shader->Program;
+
+         prog->ExternalSamplersUsed = gl_external_samplers(prog);
+         _mesa_update_shader_textures_used(shader_program, prog);
+         st_nir_preprocess(st, prog, shader_program, shader->Stage);
+      }
+   }
+
    /* Linking the stages in the opposite order (from fragment to vertex)
     * ensures that inter-shader outputs written to in an earlier stage
     * are eliminated if they are (transitively) not used in a later
     * stage.
     */
-   int next = last_stage;
-   for (int i = next - 1; i >= 0; i--) {
-      struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
-      if (shader == NULL)
-         continue;
-
-      st_nir_link_shaders(&shader->Program->nir,
-                          &shader_program->_LinkedShaders[next]->Program->nir,
-                          is_scalar[i]);
-      next = i;
+   for (int i = num_shaders - 2; i >= 0; i--) {
+      st_nir_link_shaders(linked_shader[i]->Program->nir,
+                          linked_shader[i + 1]->Program->nir);
    }
+   /* Linking shaders also optimizes them. Separate shaders, compute shaders
+    * and shaders with a fixed-func VS or FS that don't need linking are
+    * optimized here.
+    */
+   if (num_shaders == 1)
+      st_nir_opts(linked_shader[0]->Program->nir);
 
-   int prev = -1;
-   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
-      struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
-      if (shader == NULL)
-         continue;
+   if (!shader_program->data->spirv) {
+      if (!gl_nir_link_glsl(ctx, shader_program))
+         return GL_FALSE;
+
+      for (unsigned i = 0; i < num_shaders; i++) {
+         struct gl_program *prog = linked_shader[i]->Program;
+         prog->ExternalSamplersUsed = gl_external_samplers(prog);
+         _mesa_update_shader_textures_used(shader_program, prog);
+      }
+
+      nir_build_program_resource_list(ctx, shader_program, false);
+   }
 
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_linked_shader *shader = linked_shader[i];
       nir_shader *nir = shader->Program->nir;
 
+      /* This needs to run after the initial pass of nir_lower_vars_to_ssa, so
+       * that the buffer indices are constants in nir where they where
+       * constants in GLSL. */
+      NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
+
+      /* Remap the locations to slots so those requiring two slots will occupy
+       * two locations. For instance, if we have in the IR code a dvec3 attr0 in
+       * location 0 and vec4 attr1 in location 1, in NIR attr0 will use
+       * locations/slots 0 and 1, and attr1 will use location/slot 2
+       */
+      if (nir->info.stage == MESA_SHADER_VERTEX && !shader_program->data->spirv)
+         nir_remap_dual_slot_attributes(nir, &shader->Program->DualSlotInputs);
+
       NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
                  st->pipe->screen);
 
       NIR_PASS_V(nir, nir_lower_system_values);
       NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
 
-      nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
-      shader->Program->info = nir->info;
-      if (i == MESA_SHADER_VERTEX) {
+      st_shader_gather_info(nir, shader->Program);
+      if (shader->Stage == MESA_SHADER_VERTEX) {
          /* NIR expands dual-slot inputs out to two locations.  We need to
           * compact things back down GL-style single-slot inputs to avoid
           * confusing the state tracker.
@@ -805,9 +784,8 @@ st_link_nir(struct gl_context *ctx,
                                              shader->Program->DualSlotInputs);
       }
 
-      if (prev != -1) {
-         struct gl_program *prev_shader =
-            shader_program->_LinkedShaders[prev]->Program;
+      if (i >= 1) {
+         struct gl_program *prev_shader = linked_shader[i - 1]->Program;
 
          /* We can't use nir_compact_varyings with transform feedback, since
           * the pipe_stream_output->output_register field is based on the
@@ -815,30 +793,53 @@ st_link_nir(struct gl_context *ctx,
           */
          if (!(prev_shader->sh.LinkedTransformFeedback &&
                prev_shader->sh.LinkedTransformFeedback->NumVarying > 0))
-            nir_compact_varyings(shader_program->_LinkedShaders[prev]->Program->nir,
-                              nir, ctx->API != API_OPENGL_COMPAT);
+            nir_compact_varyings(prev_shader->nir,
+                                 nir, ctx->API != API_OPENGL_COMPAT);
+
+         if (ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions->vectorize_io)
+            st_nir_vectorize_io(prev_shader->nir, nir);
       }
-      prev = i;
    }
 
-   st_lower_patch_vertices_in(shader_program);
+   struct shader_info *prev_info = NULL;
 
-   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
-      struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
-      if (shader == NULL)
-         continue;
-
-      st_glsl_to_nir_post_opts(st, shader->Program, shader_program);
-
-      assert(shader->Program);
-      if (!ctx->Driver.ProgramStringNotify(ctx,
-                                           _mesa_shader_stage_to_program(i),
-                                           shader->Program)) {
-         _mesa_reference_program(ctx, &shader->Program, NULL);
-         return false;
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_linked_shader *shader = linked_shader[i];
+      struct shader_info *info = &shader->Program->nir->info;
+
+      if (prev_info &&
+          ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions->unify_interfaces) {
+         prev_info->outputs_written |= info->inputs_read &
+            ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
+         info->inputs_read |= prev_info->outputs_written &
+            ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
+
+         prev_info->patch_outputs_written |= info->patch_inputs_read;
+         info->patch_inputs_read |= prev_info->patch_outputs_written;
       }
+      prev_info = info;
+   }
+
+   for (unsigned i = 0; i < num_shaders; i++) {
+      struct gl_linked_shader *shader = linked_shader[i];
+      struct gl_program *prog = shader->Program;
+      struct st_program *stp = st_program(prog);
+      st_glsl_to_nir_post_opts(st, prog, shader_program);
+
+      /* Initialize st_vertex_program members. */
+      if (shader->Stage == MESA_SHADER_VERTEX)
+         st_prepare_vertex_program(stp);
+
+      /* Get pipe_stream_output_info. */
+      if (shader->Stage == MESA_SHADER_VERTEX ||
+          shader->Stage == MESA_SHADER_TESS_EVAL ||
+          shader->Stage == MESA_SHADER_GEOMETRY)
+         st_translate_stream_output_info(prog);
 
-      nir_sweep(shader->Program->nir);
+      st_store_ir_in_disk_cache(st, prog, true);
+
+      st_release_variants(st, stp);
+      st_finalize_program(st, prog);
 
       /* The GLSL IR won't be needed anymore. */
       ralloc_free(shader->ir);
@@ -852,37 +853,28 @@ void
 st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
 {
    if (nir->info.stage == MESA_SHADER_VERTEX) {
-      /* Needs special handling so drvloc matches the vbo state: */
-      st_nir_assign_vs_in_locations(nir);
-      /* Re-lower global vars, to deal with any dead VS inputs. */
-      NIR_PASS_V(nir, nir_lower_global_vars_to_local);
-
-      sort_varyings(&nir->outputs);
-      st_nir_assign_var_locations(&nir->outputs,
+      nir_assign_io_var_locations(nir, nir_var_shader_out,
                                   &nir->num_outputs,
                                   nir->info.stage);
-      st_nir_fixup_varying_slots(st, &nir->outputs);
+      st_nir_fixup_varying_slots(st, nir, nir_var_shader_out);
    } else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
               nir->info.stage == MESA_SHADER_TESS_CTRL ||
               nir->info.stage == MESA_SHADER_TESS_EVAL) {
-      sort_varyings(&nir->inputs);
-      st_nir_assign_var_locations(&nir->inputs,
+      nir_assign_io_var_locations(nir, nir_var_shader_in,
                                   &nir->num_inputs,
                                   nir->info.stage);
-      st_nir_fixup_varying_slots(st, &nir->inputs);
+      st_nir_fixup_varying_slots(st, nir, nir_var_shader_in);
 
-      sort_varyings(&nir->outputs);
-      st_nir_assign_var_locations(&nir->outputs,
+      nir_assign_io_var_locations(nir, nir_var_shader_out,
                                   &nir->num_outputs,
                                   nir->info.stage);
-      st_nir_fixup_varying_slots(st, &nir->outputs);
+      st_nir_fixup_varying_slots(st, nir, nir_var_shader_out);
    } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-      sort_varyings(&nir->inputs);
-      st_nir_assign_var_locations(&nir->inputs,
+      nir_assign_io_var_locations(nir, nir_var_shader_in,
                                   &nir->num_inputs,
                                   nir->info.stage);
-      st_nir_fixup_varying_slots(st, &nir->inputs);
-      st_nir_assign_var_locations(&nir->outputs,
+      st_nir_fixup_varying_slots(st, nir, nir_var_shader_in);
+      nir_assign_io_var_locations(nir, nir_var_shader_out,
                                   &nir->num_outputs,
                                   nir->info.stage);
    } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
@@ -905,6 +897,34 @@ st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
    if (prog) {
       prog->info.textures_used = nir->info.textures_used;
       prog->info.textures_used_by_txf = nir->info.textures_used_by_txf;
+      prog->info.images_used = nir->info.images_used;
+   }
+}
+
+static int
+st_packed_uniforms_type_size(const struct glsl_type *type, bool bindless)
+{
+   return glsl_count_dword_slots(type, bindless);
+}
+
+static int
+st_unpacked_uniforms_type_size(const struct glsl_type *type, bool bindless)
+{
+   return glsl_count_vec4_slots(type, false, bindless);
+}
+
+void
+st_nir_lower_uniforms(struct st_context *st, nir_shader *nir)
+{
+   if (st->ctx->Const.PackedDriverUniformStorage) {
+      NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
+                 st_packed_uniforms_type_size,
+                 (nir_lower_io_options)0);
+      NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 4);
+   } else {
+      NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
+                 st_unpacked_uniforms_type_size,
+                 (nir_lower_io_options)0);
    }
 }
 
@@ -913,41 +933,27 @@ st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
  */
 void
 st_finalize_nir(struct st_context *st, struct gl_program *prog,
-                struct gl_shader_program *shader_program, nir_shader *nir)
+                struct gl_shader_program *shader_program,
+                nir_shader *nir, bool finalize_by_driver)
 {
    struct pipe_screen *screen = st->pipe->screen;
-   const nir_shader_compiler_options *options =
-      st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
 
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_lower_var_copies);
-   if (options->lower_all_io_to_temps ||
-       options->lower_all_io_to_elements ||
-       nir->info.stage == MESA_SHADER_VERTEX ||
-       nir->info.stage == MESA_SHADER_GEOMETRY) {
-      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-   } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
-   }
 
    st_nir_assign_varying_locations(st, nir);
-
-   NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
-         st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
-
-   st_nir_assign_uniform_locations(st->ctx, prog,
-                                   &nir->uniforms);
+   st_nir_assign_uniform_locations(st->ctx, prog, nir);
 
    /* Set num_uniforms in number of attribute slots (vec4s) */
    nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
 
-   if (st->ctx->Const.PackedDriverUniformStorage) {
-      NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, st_glsl_type_dword_size,
-                 (nir_lower_io_options)0);
-      NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 4);
-   }
-
+   st_nir_lower_uniforms(st, nir);
    st_nir_lower_samplers(screen, nir, shader_program, prog);
+   if (!screen->get_param(screen, PIPE_CAP_NIR_IMAGES_AS_DEREF))
+      NIR_PASS_V(nir, gl_nir_lower_images, false);
+
+   if (finalize_by_driver && screen->finalize_nir)
+      screen->finalize_nir(screen, nir, false);
 }
 
 } /* extern "C" */