i965: Port INTEL_PRECISE_TRIG=1 to NIR.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
index cc9430c83e61fb7dcee6dab22983fb1fc5d5e0e7..932979a77199f8b2539c31e371867297db5afd9a 100644 (file)
 
 #include "brw_nir.h"
 #include "brw_shader.h"
-#include "glsl/glsl_parser_extras.h"
-#include "glsl/nir/glsl_to_nir.h"
+#include "compiler/nir/glsl_to_nir.h"
+#include "compiler/nir/nir_builder.h"
 #include "program/prog_to_nir.h"
 
-static void
-brw_nir_lower_inputs(nir_shader *nir,
-                     const struct gl_program *prog,
-                     bool is_scalar)
+static bool
+is_input(nir_intrinsic_instr *intrin)
+{
+   return intrin->intrinsic == nir_intrinsic_load_input ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_input;
+}
+
+static bool
+is_output(nir_intrinsic_instr *intrin)
+{
+   return intrin->intrinsic == nir_intrinsic_load_output ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
+          intrin->intrinsic == nir_intrinsic_store_output ||
+          intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
+}
+
+/**
+ * In many cases, we just add the base and offset together, so there's no
+ * reason to keep them separate.  Sometimes, combining them is essential:
+ * if a shader only accesses part of a compound variable (such as a matrix
+ * or array), the variable's base may not actually exist in the VUE map.
+ *
+ * This pass adds constant offsets to instr->const_index[0], and resets
+ * the offset source to 0.  Non-constant offsets remain unchanged - since
+ * we don't know what part of a compound variable is accessed, we allocate
+ * storage for the entire thing.
+ */
+struct add_const_offset_to_base_params {
+   nir_builder b;
+   nir_variable_mode mode;
+};
+
+static bool
+add_const_offset_to_base_block(nir_block *block, void *closure)
 {
-   nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
-                            is_scalar ? type_size_scalar : type_size_vec4);
+   struct add_const_offset_to_base_params *params = closure;
+   nir_builder *b = &params->b;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      if ((params->mode == nir_var_shader_in && is_input(intrin)) ||
+          (params->mode == nir_var_shader_out && is_output(intrin))) {
+         nir_src *offset = nir_get_io_offset_src(intrin);
+         nir_const_value *const_offset = nir_src_as_const_value(*offset);
+
+         if (const_offset) {
+            intrin->const_index[0] += const_offset->u32[0];
+            b->cursor = nir_before_instr(&intrin->instr);
+            nir_instr_rewrite_src(&intrin->instr, offset,
+                                  nir_src_for_ssa(nir_imm_int(b, 0)));
+         }
+      }
+   }
+   return true;
 }
 
 static void
-brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
+add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
+{
+   struct add_const_offset_to_base_params params = { .mode = mode };
+
+   nir_foreach_function(nir, f) {
+      if (f->impl) {
+         nir_builder_init(&params.b, f->impl);
+         nir_foreach_block(f->impl, add_const_offset_to_base_block, &params);
+      }
+   }
+}
+
+static bool
+remap_vs_attrs(nir_block *block, void *closure)
+{
+   GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
+
+   nir_foreach_instr(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      if (intrin->intrinsic == nir_intrinsic_load_input) {
+         /* Attributes come in a contiguous block, ordered by their
+          * gl_vert_attrib value.  That means we can compute the slot
+          * number for an attribute by masking out the enabled attributes
+          * before it and counting the bits.
+          */
+         int attr = intrin->const_index[0];
+         int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
+
+         intrin->const_index[0] = 4 * slot;
+      }
+   }
+   return true;
+}
+
+static bool
+remap_inputs_with_vue_map(nir_block *block, void *closure)
+{
+   const struct brw_vue_map *vue_map = closure;
+
+   nir_foreach_instr(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      if (intrin->intrinsic == nir_intrinsic_load_input ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
+         int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
+         assert(vue_slot != -1);
+         intrin->const_index[0] = vue_slot;
+      }
+   }
+   return true;
+}
+
+struct remap_patch_urb_offsets_state {
+   nir_builder b;
+   const struct brw_vue_map *vue_map;
+};
+
+static bool
+remap_patch_urb_offsets(nir_block *block, void *closure)
+{
+   struct remap_patch_urb_offsets_state *state = closure;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      gl_shader_stage stage = state->b.shader->stage;
+
+      if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
+          (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
+         int vue_slot = state->vue_map->varying_to_slot[intrin->const_index[0]];
+         assert(vue_slot != -1);
+         intrin->const_index[0] = vue_slot;
+
+         nir_src *vertex = nir_get_io_vertex_index_src(intrin);
+         if (vertex) {
+            nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
+            if (const_vertex) {
+               intrin->const_index[0] += const_vertex->u32[0] *
+                                         state->vue_map->num_per_vertex_slots;
+            } else {
+               state->b.cursor = nir_before_instr(&intrin->instr);
+
+               /* Multiply by the number of per-vertex slots. */
+               nir_ssa_def *vertex_offset =
+                  nir_imul(&state->b,
+                           nir_ssa_for_src(&state->b, *vertex, 1),
+                           nir_imm_int(&state->b,
+                                       state->vue_map->num_per_vertex_slots));
+
+               /* Add it to the existing offset */
+               nir_src *offset = nir_get_io_offset_src(intrin);
+               nir_ssa_def *total_offset =
+                  nir_iadd(&state->b, vertex_offset,
+                           nir_ssa_for_src(&state->b, *offset, 1));
+
+               nir_instr_rewrite_src(&intrin->instr, offset,
+                                     nir_src_for_ssa(total_offset));
+            }
+         }
+      }
+   }
+   return true;
+}
+
+void
+brw_nir_lower_vs_inputs(nir_shader *nir,
+                        const struct brw_device_info *devinfo,
+                        bool is_scalar,
+                        bool use_legacy_snorm_formula,
+                        const uint8_t *vs_attrib_wa_flags)
+{
+   /* Start with the location of the variable's base. */
+   foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+      var->data.driver_location = var->data.location;
+   }
+
+   /* Now use nir_lower_io to walk dereference chains.  Attribute arrays
+    * are loaded as one vec4 per element (or matrix column), so we use
+    * type_size_vec4 here.
+    */
+   nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+
+   /* This pass needs actual constants */
+   nir_opt_constant_folding(nir);
+
+   add_const_offset_to_base(nir, nir_var_shader_in);
+
+   brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula,
+                                       vs_attrib_wa_flags);
+
+   if (is_scalar) {
+      /* Finally, translate VERT_ATTRIB_* values into the actual registers.
+       *
+       * Note that we can use nir->info.inputs_read instead of
+       * key->inputs_read since the two are identical aside from Gen4-5
+       * edge flag differences.
+       */
+      GLbitfield64 inputs_read = nir->info.inputs_read;
+
+      nir_foreach_function(nir, function) {
+         if (function->impl) {
+            nir_foreach_block(function->impl, remap_vs_attrs, &inputs_read);
+         }
+      }
+   }
+}
+
+void
+brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar,
+                         const struct brw_vue_map *vue_map)
+{
+   foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+      var->data.driver_location = var->data.location;
+   }
+
+   /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
+   nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+
+   if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) {
+      /* This pass needs actual constants */
+      nir_opt_constant_folding(nir);
+
+      add_const_offset_to_base(nir, nir_var_shader_in);
+
+      nir_foreach_function(nir, function) {
+         if (function->impl) {
+            nir_foreach_block(function->impl, remap_inputs_with_vue_map,
+                              (void *) vue_map);
+         }
+      }
+   }
+}
+
+void
+brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
+{
+   struct remap_patch_urb_offsets_state state;
+   state.vue_map = vue_map;
+
+   foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+      var->data.driver_location = var->data.location;
+   }
+
+   nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+
+   /* This pass needs actual constants */
+   nir_opt_constant_folding(nir);
+
+   add_const_offset_to_base(nir, nir_var_shader_in);
+
+   nir_foreach_function(nir, function) {
+      if (function->impl) {
+         nir_builder_init(&state.b, function->impl);
+         nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+      }
+   }
+}
+
+void
+brw_nir_lower_fs_inputs(nir_shader *nir)
+{
+   nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar);
+   nir_lower_io(nir, nir_var_shader_in, type_size_scalar);
+}
+
+void
+brw_nir_lower_vue_outputs(nir_shader *nir,
+                          bool is_scalar)
 {
    if (is_scalar) {
-      nir_assign_var_locations(&nir->outputs, &nir->num_outputs, type_size_scalar);
+      nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
+                               type_size_vec4_times_4);
+      nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
    } else {
-      foreach_list_typed(nir_variable, var, node, &nir->outputs)
+      nir_foreach_variable(var, &nir->outputs)
          var->data.driver_location = var->data.location;
+      nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+   }
+}
+
+void
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
+{
+   struct remap_patch_urb_offsets_state state;
+   state.vue_map = vue_map;
+
+   nir_foreach_variable(var, &nir->outputs) {
+      var->data.driver_location = var->data.location;
+   }
+
+   nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+
+   /* This pass needs actual constants */
+   nir_opt_constant_folding(nir);
+
+   add_const_offset_to_base(nir, nir_var_shader_out);
+
+   nir_foreach_function(nir, function) {
+      if (function->impl) {
+         nir_builder_init(&state.b, function->impl);
+         nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+      }
    }
 }
 
+void
+brw_nir_lower_fs_outputs(nir_shader *nir)
+{
+   nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
+                            type_size_scalar);
+   nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
+}
+
+static int
+type_size_scalar_bytes(const struct glsl_type *type)
+{
+   return type_size_scalar(type) * 4;
+}
+
+static int
+type_size_vec4_bytes(const struct glsl_type *type)
+{
+   return type_size_vec4(type) * 16;
+}
+
 static void
+brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
+{
+   if (is_scalar) {
+      nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
+                               type_size_scalar_bytes);
+      nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
+   } else {
+      nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
+                               type_size_vec4_bytes);
+      nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
+   }
+}
+
+void
+brw_nir_lower_cs_shared(nir_shader *nir)
+{
+   nir_assign_var_locations(&nir->shared, &nir->num_shared,
+                            type_size_scalar_bytes);
+   nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes);
+}
+
+#define OPT(pass, ...) ({                                  \
+   bool this_progress = false;                             \
+   NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
+   if (this_progress)                                      \
+      progress = true;                                     \
+   this_progress;                                          \
+})
+
+#define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
+
+static nir_shader *
 nir_optimize(nir_shader *nir, bool is_scalar)
 {
    bool progress;
    do {
       progress = false;
-      nir_lower_vars_to_ssa(nir);
-      nir_validate_shader(nir);
+      OPT_V(nir_lower_vars_to_ssa);
 
       if (is_scalar) {
-         nir_lower_alu_to_scalar(nir);
-         nir_validate_shader(nir);
+         OPT_V(nir_lower_alu_to_scalar);
       }
 
-      progress |= nir_copy_prop(nir);
-      nir_validate_shader(nir);
+      OPT(nir_copy_prop);
 
       if (is_scalar) {
-         nir_lower_phis_to_scalar(nir);
-         nir_validate_shader(nir);
+         OPT_V(nir_lower_phis_to_scalar);
       }
 
-      progress |= nir_copy_prop(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_dce(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_cse(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_peephole_select(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_algebraic(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_constant_folding(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_dead_cf(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_remove_phis(nir);
-      nir_validate_shader(nir);
-      progress |= nir_opt_undef(nir);
-      nir_validate_shader(nir);
+      OPT(nir_copy_prop);
+      OPT(nir_opt_dce);
+      OPT(nir_opt_cse);
+      OPT(nir_opt_peephole_select);
+      OPT(nir_opt_algebraic);
+      OPT(nir_opt_constant_folding);
+      OPT(nir_opt_dead_cf);
+      OPT(nir_opt_remove_phis);
+      OPT(nir_opt_undef);
    } while (progress);
+
+   return nir;
 }
 
+/* Does some simple lowering and runs the standard suite of optimizations
+ *
+ * This is intended to be called more-or-less directly after you get the
+ * shader out of GLSL or some other source.  While it is geared towards i965,
+ * it is not at all generator-specific except for the is_scalar flag.  Even
+ * there, it is safe to call with is_scalar = false for a shader that is
+ * intended for the FS backend as long as nir_optimize is called again with
+ * is_scalar = true to scalarize everything prior to code gen.
+ */
 nir_shader *
-brw_create_nir(struct brw_context *brw,
-               const struct gl_shader_program *shader_prog,
-               const struct gl_program *prog,
-               gl_shader_stage stage,
-               bool is_scalar)
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
 {
-   struct gl_context *ctx = &brw->ctx;
-   const nir_shader_compiler_options *options =
-      ctx->Const.ShaderCompilerOptions[stage].NirOptions;
+   bool progress; /* Written by OPT and OPT_V */
+   (void)progress;
+
+   const bool is_scalar = compiler->scalar_stage[nir->stage];
+
+   if (nir->stage == MESA_SHADER_GEOMETRY)
+      OPT(nir_lower_gs_intrinsics);
+
+   if (compiler->precise_trig)
+      OPT(brw_nir_apply_trig_workarounds);
+
    static const nir_lower_tex_options tex_options = {
       .lower_txp = ~0,
    };
-   bool debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
-   nir_shader *nir;
-
-   /* First, lower the GLSL IR or Mesa IR to NIR */
-   if (shader_prog) {
-      nir = glsl_to_nir(shader_prog, stage, options);
-   } else {
-      nir = prog_to_nir(prog, options);
-      nir_convert_to_ssa(nir); /* turn registers into SSA */
-   }
-   nir_validate_shader(nir);
-
-   if (stage == MESA_SHADER_GEOMETRY) {
-      nir_lower_gs_intrinsics(nir);
-      nir_validate_shader(nir);
-   }
 
-   nir_lower_global_vars_to_local(nir);
-   nir_validate_shader(nir);
+   OPT(nir_lower_tex, &tex_options);
+   OPT(nir_normalize_cubemap_coords);
 
-   nir_lower_tex(nir, &tex_options);
-   nir_validate_shader(nir);
+   OPT(nir_lower_global_vars_to_local);
 
-   nir_normalize_cubemap_coords(nir);
-   nir_validate_shader(nir);
+   OPT(nir_split_var_copies);
 
-   nir_split_var_copies(nir);
-   nir_validate_shader(nir);
+   nir = nir_optimize(nir, is_scalar);
 
-   nir_optimize(nir, is_scalar);
+   if (is_scalar) {
+      OPT_V(nir_lower_load_const_to_scalar);
+   }
 
    /* Lower a bunch of stuff */
-   nir_lower_var_copies(nir);
-   nir_validate_shader(nir);
+   OPT_V(nir_lower_var_copies);
 
    /* Get rid of split copies */
-   nir_optimize(nir, is_scalar);
-
-   brw_nir_lower_inputs(nir, prog, is_scalar);
-   brw_nir_lower_outputs(nir, is_scalar);
-   nir_assign_var_locations(&nir->uniforms,
-                            &nir->num_uniforms,
-                            is_scalar ? type_size_scalar : type_size_vec4);
-   nir_lower_io(nir, -1, is_scalar ? type_size_scalar : type_size_vec4);
-   nir_validate_shader(nir);
+   nir = nir_optimize(nir, is_scalar);
 
-   nir_remove_dead_variables(nir);
-   nir_validate_shader(nir);
+   OPT(nir_remove_dead_variables);
 
-   if (shader_prog) {
-      nir_lower_samplers(nir, shader_prog);
-      nir_validate_shader(nir);
-   }
+   return nir;
+}
 
-   nir_lower_system_values(nir);
-   nir_validate_shader(nir);
+/* Prepare the given shader for codegen
+ *
+ * This function is intended to be called right before going into the actual
+ * backend and is highly backend-specific.  Also, once this function has been
+ * called on a shader, it will no longer be in SSA form so most optimizations
+ * will not work.
+ */
+nir_shader *
+brw_postprocess_nir(nir_shader *nir,
+                    const struct brw_device_info *devinfo,
+                    bool is_scalar)
+{
+   bool debug_enabled =
+      (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage));
 
-   nir_lower_atomics(nir);
-   nir_validate_shader(nir);
+   bool progress; /* Written by OPT and OPT_V */
+   (void)progress;
 
-   nir_optimize(nir, is_scalar);
+   nir = nir_optimize(nir, is_scalar);
 
-   if (brw->gen >= 6) {
+   if (devinfo->gen >= 6) {
       /* Try and fuse multiply-adds */
-      nir_opt_peephole_ffma(nir);
-      nir_validate_shader(nir);
+      OPT(brw_nir_opt_peephole_ffma);
    }
 
-   nir_opt_algebraic_late(nir);
-   nir_validate_shader(nir);
+   OPT(nir_opt_algebraic_late);
 
-   nir_lower_locals_to_regs(nir);
-   nir_validate_shader(nir);
+   OPT(nir_lower_locals_to_regs);
 
-   nir_lower_to_source_mods(nir);
-   nir_validate_shader(nir);
-   nir_copy_prop(nir);
-   nir_validate_shader(nir);
-   nir_opt_dce(nir);
-   nir_validate_shader(nir);
+   OPT_V(nir_lower_to_source_mods);
+   OPT(nir_copy_prop);
+   OPT(nir_opt_dce);
 
    if (unlikely(debug_enabled)) {
       /* Re-index SSA defs so we print more sensible numbers. */
-      nir_foreach_overload(nir, overload) {
-         if (overload->impl)
-            nir_index_ssa_defs(overload->impl);
+      nir_foreach_function(nir, function) {
+         if (function->impl)
+            nir_index_ssa_defs(function->impl);
       }
 
       fprintf(stderr, "NIR (SSA form) for %s shader:\n",
-              _mesa_shader_stage_to_string(stage));
+              _mesa_shader_stage_to_string(nir->stage));
       nir_print_shader(nir, stderr);
    }
 
-   nir_convert_from_ssa(nir, true);
-   nir_validate_shader(nir);
+   OPT_V(nir_convert_from_ssa, true);
 
    if (!is_scalar) {
-      nir_move_vec_src_uses_to_dest(nir);
-      nir_validate_shader(nir);
-
-      nir_lower_vec_to_movs(nir);
-      nir_validate_shader(nir);
+      OPT_V(nir_move_vec_src_uses_to_dest);
+      OPT(nir_lower_vec_to_movs);
    }
 
    /* This is the last pass we run before we start emitting stuff.  It
@@ -212,31 +535,117 @@ brw_create_nir(struct brw_context *brw,
     * run it last because it stashes data in instr->pass_flags and we don't
     * want that to be squashed by other NIR passes.
     */
-   if (brw->gen <= 5)
+   if (devinfo->gen <= 5)
       brw_nir_analyze_boolean_resolves(nir);
 
    nir_sweep(nir);
 
    if (unlikely(debug_enabled)) {
       fprintf(stderr, "NIR (final form) for %s shader:\n",
-              _mesa_shader_stage_to_string(stage));
+              _mesa_shader_stage_to_string(nir->stage));
       nir_print_shader(nir, stderr);
    }
 
    return nir;
 }
 
+nir_shader *
+brw_create_nir(struct brw_context *brw,
+               const struct gl_shader_program *shader_prog,
+               const struct gl_program *prog,
+               gl_shader_stage stage,
+               bool is_scalar)
+{
+   struct gl_context *ctx = &brw->ctx;
+   const nir_shader_compiler_options *options =
+      ctx->Const.ShaderCompilerOptions[stage].NirOptions;
+   bool progress;
+   nir_shader *nir;
+
+   /* First, lower the GLSL IR or Mesa IR to NIR */
+   if (shader_prog) {
+      nir = glsl_to_nir(shader_prog, stage, options);
+   } else {
+      nir = prog_to_nir(prog, options);
+      OPT_V(nir_convert_to_ssa); /* turn registers into SSA */
+   }
+   nir_validate_shader(nir);
+
+   (void)progress;
+
+   nir = brw_preprocess_nir(brw->intelScreen->compiler, nir);
+
+   OPT(nir_lower_system_values);
+   OPT_V(brw_nir_lower_uniforms, is_scalar);
+
+   if (shader_prog) {
+      OPT_V(nir_lower_samplers, shader_prog);
+      OPT_V(nir_lower_atomics, shader_prog);
+   }
+
+   return nir;
+}
+
+nir_shader *
+brw_nir_apply_sampler_key(nir_shader *nir,
+                          const struct brw_device_info *devinfo,
+                          const struct brw_sampler_prog_key_data *key_tex,
+                          bool is_scalar)
+{
+   nir_lower_tex_options tex_options = { 0 };
+
+   /* Iron Lake and prior require lowering of all rectangle textures */
+   if (devinfo->gen < 6)
+      tex_options.lower_rect = true;
+
+   /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
+   if (devinfo->gen < 8) {
+      tex_options.saturate_s = key_tex->gl_clamp_mask[0];
+      tex_options.saturate_t = key_tex->gl_clamp_mask[1];
+      tex_options.saturate_r = key_tex->gl_clamp_mask[2];
+   }
+
+   /* Prior to Haswell, we have to fake texture swizzle */
+   for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
+      if (key_tex->swizzles[s] == SWIZZLE_NOOP)
+         continue;
+
+      tex_options.swizzle_result |= (1 << s);
+      for (unsigned c = 0; c < 4; c++)
+         tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
+   }
+
+   if (nir_lower_tex(nir, &tex_options)) {
+      nir_validate_shader(nir);
+      nir = nir_optimize(nir, is_scalar);
+   }
+
+   return nir;
+}
+
 enum brw_reg_type
 brw_type_for_nir_type(nir_alu_type type)
 {
    switch (type) {
-   case nir_type_unsigned:
+   case nir_type_uint:
+   case nir_type_uint32:
       return BRW_REGISTER_TYPE_UD;
    case nir_type_bool:
    case nir_type_int:
+   case nir_type_bool32:
+   case nir_type_int32:
       return BRW_REGISTER_TYPE_D;
    case nir_type_float:
+   case nir_type_float32:
       return BRW_REGISTER_TYPE_F;
+   case nir_type_float64:
+      return BRW_REGISTER_TYPE_DF;
+   case nir_type_int64:
+   case nir_type_uint64:
+      /* TODO we should only see these in moves, so for now it's ok, but when
+       * we add actual 64-bit integer support we should fix this.
+       */
+      return BRW_REGISTER_TYPE_DF;
    default:
       unreachable("unknown type");
    }
@@ -252,12 +661,18 @@ brw_glsl_base_type_for_nir_type(nir_alu_type type)
 {
    switch (type) {
    case nir_type_float:
+   case nir_type_float32:
       return GLSL_TYPE_FLOAT;
 
+   case nir_type_float64:
+      return GLSL_TYPE_DOUBLE;
+
    case nir_type_int:
+   case nir_type_int32:
       return GLSL_TYPE_INT;
 
-   case nir_type_unsigned:
+   case nir_type_uint:
+   case nir_type_uint32:
       return GLSL_TYPE_UINT;
 
    default: