nir: move nir_shader_info to a common compiler header
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
index ed836bf36bc1863240db2e1ab7e4a356186fb1e4..04dbf01f6d356229b8c0fda9bcff2bfa4dab02f2 100644 (file)
 
 #include "brw_nir.h"
 #include "brw_shader.h"
-#include "compiler/nir/glsl_to_nir.h"
+#include "compiler/glsl_types.h"
 #include "compiler/nir/nir_builder.h"
-#include "program/prog_to_nir.h"
 
 static bool
 is_input(nir_intrinsic_instr *intrin)
 {
    return intrin->intrinsic == nir_intrinsic_load_input ||
-          intrin->intrinsic == nir_intrinsic_load_per_vertex_input;
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
+          intrin->intrinsic == nir_intrinsic_load_interpolated_input;
 }
 
 static bool
@@ -54,30 +54,24 @@ is_output(nir_intrinsic_instr *intrin)
  * we don't know what part of a compound variable is accessed, we allocate
  * storage for the entire thing.
  */
-struct add_const_offset_to_base_params {
-   nir_builder b;
-   nir_variable_mode mode;
-};
 
 static bool
-add_const_offset_to_base_block(nir_block *block, void *closure)
+add_const_offset_to_base_block(nir_block *block, nir_builder *b,
+                               nir_variable_mode mode)
 {
-   struct add_const_offset_to_base_params *params = closure;
-   nir_builder *b = &params->b;
-
-   nir_foreach_instr_safe(block, instr) {
+   nir_foreach_instr_safe(instr, block) {
       if (instr->type != nir_instr_type_intrinsic)
          continue;
 
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-      if ((params->mode == nir_var_shader_in && is_input(intrin)) ||
-          (params->mode == nir_var_shader_out && is_output(intrin))) {
+      if ((mode == nir_var_shader_in && is_input(intrin)) ||
+          (mode == nir_var_shader_out && is_output(intrin))) {
          nir_src *offset = nir_get_io_offset_src(intrin);
          nir_const_value *const_offset = nir_src_as_const_value(*offset);
 
          if (const_offset) {
-            intrin->const_index[0] += const_offset->u[0];
+            intrin->const_index[0] += const_offset->u32[0];
             b->cursor = nir_before_instr(&intrin->instr);
             nir_instr_rewrite_src(&intrin->instr, offset,
                                   nir_src_for_ssa(nir_imm_int(b, 0)));
@@ -90,22 +84,21 @@ add_const_offset_to_base_block(nir_block *block, void *closure)
 static void
 add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
 {
-   struct add_const_offset_to_base_params params = { .mode = mode };
-
-   nir_foreach_function(nir, f) {
+   nir_foreach_function(f, nir) {
       if (f->impl) {
-         nir_builder_init(&params.b, f->impl);
-         nir_foreach_block(f->impl, add_const_offset_to_base_block, &params);
+         nir_builder b;
+         nir_builder_init(&b, f->impl);
+         nir_foreach_block(block, f->impl) {
+            add_const_offset_to_base_block(block, &b, mode);
+         }
       }
    }
 }
 
 static bool
-remap_vs_attrs(nir_block *block, void *closure)
+remap_vs_attrs(nir_block *block, shader_info *nir_info)
 {
-   GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
-
-   nir_foreach_instr(block, instr) {
+   nir_foreach_instr(instr, block) {
       if (instr->type != nir_instr_type_intrinsic)
          continue;
 
@@ -118,20 +111,20 @@ remap_vs_attrs(nir_block *block, void *closure)
           * before it and counting the bits.
           */
          int attr = intrin->const_index[0];
-         int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
-
-         intrin->const_index[0] = 4 * slot;
+         int slot = _mesa_bitcount_64(nir_info->inputs_read &
+                                      BITFIELD64_MASK(attr));
+         int dslot = _mesa_bitcount_64(nir_info->double_inputs_read &
+                                       BITFIELD64_MASK(attr));
+         intrin->const_index[0] = 4 * (slot + dslot);
       }
    }
    return true;
 }
 
 static bool
-remap_inputs_with_vue_map(nir_block *block, void *closure)
+remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map)
 {
-   const struct brw_vue_map *vue_map = closure;
-
-   nir_foreach_instr(block, instr) {
+   nir_foreach_instr(instr, block) {
       if (instr->type != nir_instr_type_intrinsic)
          continue;
 
@@ -147,27 +140,21 @@ remap_inputs_with_vue_map(nir_block *block, void *closure)
    return true;
 }
 
-struct remap_patch_urb_offsets_state {
-   nir_builder b;
-   struct brw_vue_map vue_map;
-};
-
 static bool
-remap_patch_urb_offsets(nir_block *block, void *closure)
+remap_patch_urb_offsets(nir_block *block, nir_builder *b,
+                        const struct brw_vue_map *vue_map)
 {
-   struct remap_patch_urb_offsets_state *state = closure;
-
-   nir_foreach_instr_safe(block, instr) {
+   nir_foreach_instr_safe(instr, block) {
       if (instr->type != nir_instr_type_intrinsic)
          continue;
 
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-      gl_shader_stage stage = state->b.shader->stage;
+      gl_shader_stage stage = b->shader->stage;
 
       if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
           (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
-         int vue_slot = state->vue_map.varying_to_slot[intrin->const_index[0]];
+         int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
          assert(vue_slot != -1);
          intrin->const_index[0] = vue_slot;
 
@@ -175,23 +162,23 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
          if (vertex) {
             nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
             if (const_vertex) {
-               intrin->const_index[0] += const_vertex->u[0] *
-                                         state->vue_map.num_per_vertex_slots;
+               intrin->const_index[0] += const_vertex->u32[0] *
+                                         vue_map->num_per_vertex_slots;
             } else {
-               state->b.cursor = nir_before_instr(&intrin->instr);
+               b->cursor = nir_before_instr(&intrin->instr);
 
                /* Multiply by the number of per-vertex slots. */
                nir_ssa_def *vertex_offset =
-                  nir_imul(&state->b,
-                           nir_ssa_for_src(&state->b, *vertex, 1),
-                           nir_imm_int(&state->b,
-                                       state->vue_map.num_per_vertex_slots));
+                  nir_imul(b,
+                           nir_ssa_for_src(b, *vertex, 1),
+                           nir_imm_int(b,
+                                       vue_map->num_per_vertex_slots));
 
                /* Add it to the existing offset */
                nir_src *offset = nir_get_io_offset_src(intrin);
                nir_ssa_def *total_offset =
-                  nir_iadd(&state->b, vertex_offset,
-                           nir_ssa_for_src(&state->b, *offset, 1));
+                  nir_iadd(b, vertex_offset,
+                           nir_ssa_for_src(b, *offset, 1));
 
                nir_instr_rewrite_src(&intrin->instr, offset,
                                      nir_src_for_ssa(total_offset));
@@ -202,9 +189,8 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
    return true;
 }
 
-static void
+void
 brw_nir_lower_vs_inputs(nir_shader *nir,
-                        const struct brw_device_info *devinfo,
                         bool is_scalar,
                         bool use_legacy_snorm_formula,
                         const uint8_t *vs_attrib_wa_flags)
@@ -214,11 +200,11 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
       var->data.driver_location = var->data.location;
    }
 
-   /* Now use nir_lower_io to walk dereference chains.  Attribute arrays
-    * are loaded as one vec4 per element (or matrix column), so we use
-    * type_size_vec4 here.
+   /* Now use nir_lower_io to walk dereference chains.  Attribute arrays are
+    * loaded as one vec4 or dvec4 per element (or matrix column), depending on
+    * whether it is a double-precision type or not.
     */
-   nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+   nir_lower_io(nir, nir_var_shader_in, type_size_vs_input, 0);
 
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
@@ -229,237 +215,168 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
                                        vs_attrib_wa_flags);
 
    if (is_scalar) {
-      /* Finally, translate VERT_ATTRIB_* values into the actual registers.
-       *
-       * Note that we can use nir->info.inputs_read instead of
-       * key->inputs_read since the two are identical aside from Gen4-5
-       * edge flag differences.
-       */
-      GLbitfield64 inputs_read = nir->info.inputs_read;
+      /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
 
-      nir_foreach_function(nir, function) {
+      nir_foreach_function(function, nir) {
          if (function->impl) {
-            nir_foreach_block(function->impl, remap_vs_attrs, &inputs_read);
+            nir_foreach_block(block, function->impl) {
+               remap_vs_attrs(block, &nir->info);
+            }
          }
       }
    }
 }
 
-static void
-brw_nir_lower_vue_inputs(nir_shader *nir,
-                         const struct brw_device_info *devinfo,
-                         bool is_scalar)
+void
+brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar,
+                         const struct brw_vue_map *vue_map)
 {
-   if (!is_scalar && nir->stage == MESA_SHADER_GEOMETRY) {
-      foreach_list_typed(nir_variable, var, node, &nir->inputs) {
-         var->data.driver_location = var->data.location;
-      }
-      nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
-   } else {
-      /* The GLSL linker will have already matched up GS inputs and
-       * the outputs of prior stages.  The driver does extend VS outputs
-       * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
-       * neither of which offer geometry shader support.  So we can
-       * safely ignore that.
-       *
-       * For SSO pipelines, we use a fixed VUE map layout based on variable
-       * locations, so we can rely on rendezvous-by-location to make this
-       * work.
-       *
-       * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
-       * written by previous stages and shows up via payload magic.
-       */
-      struct brw_vue_map input_vue_map;
-      GLbitfield64 inputs_read =
-         nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
-      brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
-                          nir->info.separate_shader ||
-                          nir->stage == MESA_SHADER_TESS_CTRL);
-
-      foreach_list_typed(nir_variable, var, node, &nir->inputs) {
-         var->data.driver_location = var->data.location;
-      }
+   foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+      var->data.driver_location = var->data.location;
+   }
 
-      /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
-      nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+   /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
+   nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
 
+   if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) {
       /* This pass needs actual constants */
       nir_opt_constant_folding(nir);
 
       add_const_offset_to_base(nir, nir_var_shader_in);
 
-      nir_foreach_function(nir, function) {
+      nir_foreach_function(function, nir) {
          if (function->impl) {
-            nir_foreach_block(function->impl, remap_inputs_with_vue_map,
-                              &input_vue_map);
+            nir_foreach_block(block, function->impl) {
+               remap_inputs_with_vue_map(block, vue_map);
+            }
          }
       }
    }
 }
 
-static void
-brw_nir_lower_tes_inputs(nir_shader *nir)
+void
+brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
 {
-   struct remap_patch_urb_offsets_state state;
-   brw_compute_tess_vue_map(&state.vue_map,
-                            nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
-                            nir->info.patch_inputs_read);
-
    foreach_list_typed(nir_variable, var, node, &nir->inputs) {
       var->data.driver_location = var->data.location;
    }
 
-   nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+   nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
 
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
    add_const_offset_to_base(nir, nir_var_shader_in);
 
-   nir_foreach_function(nir, function) {
+   nir_foreach_function(function, nir) {
       if (function->impl) {
-         nir_builder_init(&state.b, function->impl);
-         nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+         nir_builder b;
+         nir_builder_init(&b, function->impl);
+         nir_foreach_block(block, function->impl) {
+            remap_patch_urb_offsets(block, &b, vue_map);
+         }
       }
    }
 }
 
-static void
-brw_nir_lower_fs_inputs(nir_shader *nir)
+void
+brw_nir_lower_fs_inputs(nir_shader *nir,
+                        const struct gen_device_info *devinfo,
+                        const struct brw_wm_prog_key *key)
 {
-   nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar);
-   nir_lower_io(nir, nir_var_shader_in, type_size_scalar);
-}
+   foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+      var->data.driver_location = var->data.location;
 
-static void
-brw_nir_lower_inputs(nir_shader *nir,
-                     const struct brw_device_info *devinfo,
-                     bool is_scalar,
-                     bool use_legacy_snorm_formula,
-                     const uint8_t *vs_attrib_wa_flags)
-{
-   switch (nir->stage) {
-   case MESA_SHADER_VERTEX:
-      brw_nir_lower_vs_inputs(nir, devinfo, is_scalar, use_legacy_snorm_formula,
-                              vs_attrib_wa_flags);
-      break;
-   case MESA_SHADER_TESS_CTRL:
-   case MESA_SHADER_GEOMETRY:
-      brw_nir_lower_vue_inputs(nir, devinfo, is_scalar);
-      break;
-   case MESA_SHADER_TESS_EVAL:
-      brw_nir_lower_tes_inputs(nir);
-      break;
-   case MESA_SHADER_FRAGMENT:
-      assert(is_scalar);
-      brw_nir_lower_fs_inputs(nir);
-      break;
-   case MESA_SHADER_COMPUTE:
-      /* Compute shaders have no inputs. */
-      assert(exec_list_is_empty(&nir->inputs));
-      break;
-   default:
-      unreachable("unsupported shader stage");
+      /* Apply default interpolation mode.
+       *
+       * Everything defaults to smooth except for the legacy GL color
+       * built-in variables, which might be flat depending on API state.
+       */
+      if (var->data.interpolation == INTERP_MODE_NONE) {
+         const bool flat = key->flat_shade &&
+            (var->data.location == VARYING_SLOT_COL0 ||
+             var->data.location == VARYING_SLOT_COL1);
+
+         var->data.interpolation = flat ? INTERP_MODE_FLAT
+                                        : INTERP_MODE_SMOOTH;
+      }
+
+      /* On Ironlake and below, there is only one interpolation mode.
+       * Centroid interpolation doesn't mean anything on this hardware --
+       * there is no multisampling.
+       */
+      if (devinfo->gen < 6) {
+         var->data.centroid = false;
+         var->data.sample = false;
+      }
    }
+
+   nir_lower_io_options lower_io_options = 0;
+   if (key->persample_interp)
+      lower_io_options |= nir_lower_io_force_sample_interpolation;
+
+   nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
+
+   /* This pass needs actual constants */
+   nir_opt_constant_folding(nir);
+
+   add_const_offset_to_base(nir, nir_var_shader_in);
 }
 
-static void
+void
 brw_nir_lower_vue_outputs(nir_shader *nir,
                           bool is_scalar)
 {
-   if (is_scalar) {
-      nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
-                               type_size_vec4_times_4);
-      nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
-   } else {
-      nir_foreach_variable(var, &nir->outputs)
-         var->data.driver_location = var->data.location;
-      nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+   nir_foreach_variable(var, &nir->outputs) {
+      var->data.driver_location = var->data.location;
    }
+
+   nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
 }
 
-static void
-brw_nir_lower_tcs_outputs(nir_shader *nir)
+void
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
 {
-   struct remap_patch_urb_offsets_state state;
-   brw_compute_tess_vue_map(&state.vue_map, nir->info.outputs_written,
-                            nir->info.patch_outputs_written);
-
    nir_foreach_variable(var, &nir->outputs) {
       var->data.driver_location = var->data.location;
    }
 
-   nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+   nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
 
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
    add_const_offset_to_base(nir, nir_var_shader_out);
 
-   nir_foreach_function(nir, function) {
+   nir_foreach_function(function, nir) {
       if (function->impl) {
-         nir_builder_init(&state.b, function->impl);
-         nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+         nir_builder b;
+         nir_builder_init(&b, function->impl);
+         nir_foreach_block(block, function->impl) {
+            remap_patch_urb_offsets(block, &b, vue_map);
+         }
       }
    }
 }
 
-static void
+void
 brw_nir_lower_fs_outputs(nir_shader *nir)
 {
-   nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
-                            type_size_scalar);
-   nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
-}
-
-static void
-brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
-{
-   switch (nir->stage) {
-   case MESA_SHADER_VERTEX:
-   case MESA_SHADER_TESS_EVAL:
-   case MESA_SHADER_GEOMETRY:
-      brw_nir_lower_vue_outputs(nir, is_scalar);
-      break;
-   case MESA_SHADER_TESS_CTRL:
-      brw_nir_lower_tcs_outputs(nir);
-      break;
-   case MESA_SHADER_FRAGMENT:
-      brw_nir_lower_fs_outputs(nir);
-      break;
-   case MESA_SHADER_COMPUTE:
-      /* Compute shaders have no outputs. */
-      assert(exec_list_is_empty(&nir->outputs));
-      break;
-   default:
-      unreachable("unsupported shader stage");
+   nir_foreach_variable(var, &nir->outputs) {
+      var->data.driver_location =
+         SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
+         SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
    }
-}
-
-static int
-type_size_scalar_bytes(const struct glsl_type *type)
-{
-   return type_size_scalar(type) * 4;
-}
 
-static int
-type_size_vec4_bytes(const struct glsl_type *type)
-{
-   return type_size_vec4(type) * 16;
+   nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
 }
 
-static void
-brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
+void
+brw_nir_lower_cs_shared(nir_shader *nir)
 {
-   if (is_scalar) {
-      nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
-                               type_size_scalar_bytes);
-      nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
-   } else {
-      nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
-                               type_size_vec4_bytes);
-      nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
-   }
+   nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
+                            type_size_scalar_bytes);
+   nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
 }
 
 #define OPT(pass, ...) ({                                  \
@@ -481,24 +398,34 @@ nir_optimize(nir_shader *nir, bool is_scalar)
       OPT_V(nir_lower_vars_to_ssa);
 
       if (is_scalar) {
-         OPT_V(nir_lower_alu_to_scalar);
+         OPT(nir_lower_alu_to_scalar);
       }
 
       OPT(nir_copy_prop);
 
       if (is_scalar) {
-         OPT_V(nir_lower_phis_to_scalar);
+         OPT(nir_lower_phis_to_scalar);
       }
 
       OPT(nir_copy_prop);
       OPT(nir_opt_dce);
       OPT(nir_opt_cse);
-      OPT(nir_opt_peephole_select);
+      OPT(nir_opt_peephole_select, 0);
       OPT(nir_opt_algebraic);
       OPT(nir_opt_constant_folding);
       OPT(nir_opt_dead_cf);
       OPT(nir_opt_remove_phis);
       OPT(nir_opt_undef);
+      OPT_V(nir_lower_doubles, nir_lower_drcp |
+                               nir_lower_dsqrt |
+                               nir_lower_drsq |
+                               nir_lower_dtrunc |
+                               nir_lower_dfloor |
+                               nir_lower_dceil |
+                               nir_lower_dfract |
+                               nir_lower_dround_even |
+                               nir_lower_dmod);
+      OPT_V(nir_lower_double_pack);
    } while (progress);
 
    return nir;
@@ -514,16 +441,23 @@ nir_optimize(nir_shader *nir, bool is_scalar)
  * is_scalar = true to scalarize everything prior to code gen.
  */
 nir_shader *
-brw_preprocess_nir(nir_shader *nir, bool is_scalar)
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
 {
    bool progress; /* Written by OPT and OPT_V */
    (void)progress;
 
+   const bool is_scalar = compiler->scalar_stage[nir->stage];
+
    if (nir->stage == MESA_SHADER_GEOMETRY)
       OPT(nir_lower_gs_intrinsics);
 
+   if (compiler->precise_trig)
+      OPT(brw_nir_apply_trig_workarounds);
+
    static const nir_lower_tex_options tex_options = {
       .lower_txp = ~0,
+      .lower_txf_offset = true,
+      .lower_rect_offset = true,
    };
 
    OPT(nir_lower_tex, &tex_options);
@@ -545,25 +479,7 @@ brw_preprocess_nir(nir_shader *nir, bool is_scalar)
    /* Get rid of split copies */
    nir = nir_optimize(nir, is_scalar);
 
-   OPT(nir_remove_dead_variables);
-
-   return nir;
-}
-
-/** Lower input and output loads and stores for i965. */
-nir_shader *
-brw_nir_lower_io(nir_shader *nir,
-                 const struct brw_device_info *devinfo,
-                 bool is_scalar,
-                 bool use_legacy_snorm_formula,
-                 const uint8_t *vs_attrib_wa_flags)
-{
-   bool progress; /* Written by OPT and OPT_V */
-   (void)progress;
-
-   OPT_V(brw_nir_lower_inputs, devinfo, is_scalar,
-         use_legacy_snorm_formula, vs_attrib_wa_flags);
-   OPT_V(brw_nir_lower_outputs, is_scalar);
+   OPT(nir_remove_dead_variables, nir_var_local);
 
    return nir;
 }
@@ -577,7 +493,7 @@ brw_nir_lower_io(nir_shader *nir,
  */
 nir_shader *
 brw_postprocess_nir(nir_shader *nir,
-                    const struct brw_device_info *devinfo,
+                    const struct gen_device_info *devinfo,
                     bool is_scalar)
 {
    bool debug_enabled =
@@ -603,7 +519,7 @@ brw_postprocess_nir(nir_shader *nir,
 
    if (unlikely(debug_enabled)) {
       /* Re-index SSA defs so we print more sensible numbers. */
-      nir_foreach_function(nir, function) {
+      nir_foreach_function(function, nir) {
          if (function->impl)
             nir_index_ssa_defs(function->impl);
       }
@@ -639,46 +555,9 @@ brw_postprocess_nir(nir_shader *nir,
    return nir;
 }
 
-nir_shader *
-brw_create_nir(struct brw_context *brw,
-               const struct gl_shader_program *shader_prog,
-               const struct gl_program *prog,
-               gl_shader_stage stage,
-               bool is_scalar)
-{
-   struct gl_context *ctx = &brw->ctx;
-   const nir_shader_compiler_options *options =
-      ctx->Const.ShaderCompilerOptions[stage].NirOptions;
-   bool progress;
-   nir_shader *nir;
-
-   /* First, lower the GLSL IR or Mesa IR to NIR */
-   if (shader_prog) {
-      nir = glsl_to_nir(shader_prog, stage, options);
-   } else {
-      nir = prog_to_nir(prog, options);
-      OPT_V(nir_convert_to_ssa); /* turn registers into SSA */
-   }
-   nir_validate_shader(nir);
-
-   (void)progress;
-
-   nir = brw_preprocess_nir(nir, is_scalar);
-
-   OPT(nir_lower_system_values);
-   OPT_V(brw_nir_lower_uniforms, is_scalar);
-
-   if (shader_prog) {
-      OPT_V(nir_lower_samplers, shader_prog);
-      OPT_V(nir_lower_atomics, shader_prog);
-   }
-
-   return nir;
-}
-
 nir_shader *
 brw_nir_apply_sampler_key(nir_shader *nir,
-                          const struct brw_device_info *devinfo,
+                          const struct gen_device_info *devinfo,
                           const struct brw_sampler_prog_key_data *key_tex,
                           bool is_scalar)
 {
@@ -705,6 +584,10 @@ brw_nir_apply_sampler_key(nir_shader *nir,
          tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
    }
 
+   tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
+   tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
+   tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
+
    if (nir_lower_tex(nir, &tex_options)) {
       nir_validate_shader(nir);
       nir = nir_optimize(nir, is_scalar);
@@ -718,12 +601,24 @@ brw_type_for_nir_type(nir_alu_type type)
 {
    switch (type) {
    case nir_type_uint:
+   case nir_type_uint32:
       return BRW_REGISTER_TYPE_UD;
    case nir_type_bool:
    case nir_type_int:
+   case nir_type_bool32:
+   case nir_type_int32:
       return BRW_REGISTER_TYPE_D;
    case nir_type_float:
+   case nir_type_float32:
       return BRW_REGISTER_TYPE_F;
+   case nir_type_float64:
+      return BRW_REGISTER_TYPE_DF;
+   case nir_type_int64:
+   case nir_type_uint64:
+      /* TODO we should only see these in moves, so for now it's ok, but when
+       * we add actual 64-bit integer support we should fix this.
+       */
+      return BRW_REGISTER_TYPE_DF;
    default:
       unreachable("unknown type");
    }
@@ -739,12 +634,18 @@ brw_glsl_base_type_for_nir_type(nir_alu_type type)
 {
    switch (type) {
    case nir_type_float:
+   case nir_type_float32:
       return GLSL_TYPE_FLOAT;
 
+   case nir_type_float64:
+      return GLSL_TYPE_DOUBLE;
+
    case nir_type_int:
+   case nir_type_int32:
       return GLSL_TYPE_INT;
 
    case nir_type_uint:
+   case nir_type_uint32:
       return GLSL_TYPE_UINT;
 
    default: