intel/vec4: Try to emit a VF source in try_immediate_source
[mesa.git] / src / intel / compiler / brw_nir.c
index d9642c7d4904f2b6947478cfe5dbaa18de1bd741..2d46b87de6cda837fa050fbfb1e73abc2011d5c8 100644 (file)
 
 #include "brw_nir.h"
 #include "brw_shader.h"
-#include "common/gen_debug.h"
+#include "dev/gen_debug.h"
 #include "compiler/glsl_types.h"
 #include "compiler/nir/nir_builder.h"
-
-static bool
-is_input(nir_intrinsic_instr *intrin)
-{
-   return intrin->intrinsic == nir_intrinsic_load_input ||
-          intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
-          intrin->intrinsic == nir_intrinsic_load_interpolated_input;
-}
-
-static bool
-is_output(nir_intrinsic_instr *intrin)
-{
-   return intrin->intrinsic == nir_intrinsic_load_output ||
-          intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
-          intrin->intrinsic == nir_intrinsic_store_output ||
-          intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
-}
-
-/**
- * In many cases, we just add the base and offset together, so there's no
- * reason to keep them separate.  Sometimes, combining them is essential:
- * if a shader only accesses part of a compound variable (such as a matrix
- * or array), the variable's base may not actually exist in the VUE map.
- *
- * This pass adds constant offsets to instr->const_index[0], and resets
- * the offset source to 0.  Non-constant offsets remain unchanged - since
- * we don't know what part of a compound variable is accessed, we allocate
- * storage for the entire thing.
- */
-
-static bool
-add_const_offset_to_base_block(nir_block *block, nir_builder *b,
-                               nir_variable_mode mode)
-{
-   nir_foreach_instr_safe(instr, block) {
-      if (instr->type != nir_instr_type_intrinsic)
-         continue;
-
-      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-
-      if ((mode == nir_var_shader_in && is_input(intrin)) ||
-          (mode == nir_var_shader_out && is_output(intrin))) {
-         nir_src *offset = nir_get_io_offset_src(intrin);
-         nir_const_value *const_offset = nir_src_as_const_value(*offset);
-
-         if (const_offset) {
-            intrin->const_index[0] += const_offset->u32[0];
-            b->cursor = nir_before_instr(&intrin->instr);
-            nir_instr_rewrite_src(&intrin->instr, offset,
-                                  nir_src_for_ssa(nir_imm_int(b, 0)));
-         }
-      }
-   }
-   return true;
-}
-
-static void
-add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
-{
-   nir_foreach_function(f, nir) {
-      if (f->impl) {
-         nir_builder b;
-         nir_builder_init(&b, f->impl);
-         nir_foreach_block(block, f->impl) {
-            add_const_offset_to_base_block(block, &b, mode);
-         }
-      }
-   }
-}
+#include "util/u_math.h"
 
 static bool
 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
@@ -151,6 +83,24 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
    return true;
 }
 
+static bool
+is_input(nir_intrinsic_instr *intrin)
+{
+   return intrin->intrinsic == nir_intrinsic_load_input ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
+          intrin->intrinsic == nir_intrinsic_load_interpolated_input;
+}
+
+static bool
+is_output(nir_intrinsic_instr *intrin)
+{
+   return intrin->intrinsic == nir_intrinsic_load_output ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
+          intrin->intrinsic == nir_intrinsic_store_output ||
+          intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
+}
+
+
 static bool
 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
                         const struct brw_vue_map *vue_map,
@@ -180,9 +130,8 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
 
          nir_src *vertex = nir_get_io_vertex_index_src(intrin);
          if (vertex) {
-            nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
-            if (const_vertex) {
-               intrin->const_index[0] += const_vertex->u32[0] *
+            if (nir_src_is_const(*vertex)) {
+               intrin->const_index[0] += nir_src_as_uint(*vertex) *
                                          vue_map->num_per_vertex_slots;
             } else {
                b->cursor = nir_before_instr(&intrin->instr);
@@ -227,7 +176,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
-   add_const_offset_to_base(nir, nir_var_shader_in);
+   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
 
    brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
 
@@ -243,7 +192,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
        BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
        BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID));
 
-   const unsigned num_inputs = _mesa_bitcount_64(nir->info.inputs_read);
+   const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
 
    nir_foreach_function(function, nir) {
       if (!function->impl)
@@ -322,7 +271,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
                 * before it and counting the bits.
                 */
                int attr = nir_intrinsic_base(intrin);
-               int slot = _mesa_bitcount_64(nir->info.inputs_read &
+               int slot = util_bitcount64(nir->info.inputs_read &
                                             BITFIELD64_MASK(attr));
                nir_intrinsic_set_base(intrin, slot);
                break;
@@ -350,7 +299,7 @@ brw_nir_lower_vue_inputs(nir_shader *nir,
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
-   add_const_offset_to_base(nir, nir_var_shader_in);
+   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
 
    nir_foreach_function(function, nir) {
       if (!function->impl)
@@ -401,7 +350,7 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
-   add_const_offset_to_base(nir, nir_var_shader_in);
+   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
 
    nir_foreach_function(function, nir) {
       if (function->impl) {
@@ -452,16 +401,17 @@ brw_nir_lower_fs_inputs(nir_shader *nir,
       lower_io_options |= nir_lower_io_force_sample_interpolation;
 
    nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
+   if (devinfo->gen >= 11)
+      nir_lower_interpolation(nir, ~0);
 
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
-   add_const_offset_to_base(nir, nir_var_shader_in);
+   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
 }
 
 void
-brw_nir_lower_vue_outputs(nir_shader *nir,
-                          bool is_scalar)
+brw_nir_lower_vue_outputs(nir_shader *nir)
 {
    nir_foreach_variable(var, &nir->outputs) {
       var->data.driver_location = var->data.location;
@@ -483,7 +433,7 @@ brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
    /* This pass needs actual constants */
    nir_opt_constant_folding(nir);
 
-   add_const_offset_to_base(nir, nir_var_shader_out);
+   nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
 
    nir_foreach_function(function, nir) {
       if (function->impl) {
@@ -527,26 +477,43 @@ brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
    if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
       indirect_mask |= nir_var_shader_out;
    if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
-      indirect_mask |= nir_var_local;
+      indirect_mask |= nir_var_function_temp;
 
    return indirect_mask;
 }
 
-nir_shader *
+void
 brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
-                 bool is_scalar)
+                 bool is_scalar, bool allow_copies)
 {
    nir_variable_mode indirect_mask =
       brw_nir_no_indirect_mask(compiler, nir->info.stage);
 
    bool progress;
+   unsigned lower_flrp =
+      (nir->options->lower_flrp16 ? 16 : 0) |
+      (nir->options->lower_flrp32 ? 32 : 0) |
+      (nir->options->lower_flrp64 ? 64 : 0);
+
    do {
       progress = false;
+      OPT(nir_split_array_vars, nir_var_function_temp);
+      OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
+      OPT(nir_opt_deref);
       OPT(nir_lower_vars_to_ssa);
-      /* OPT(nir_opt_copy_prop_vars); */
+      if (allow_copies) {
+         /* Only run this pass in the first call to brw_nir_optimize.  Later
+          * calls assume that we've lowered away any copy_deref instructions
+          * and we don't want to introduce any more.
+          */
+         OPT(nir_opt_find_array_copies);
+      }
+      OPT(nir_opt_copy_prop_vars);
+      OPT(nir_opt_dead_write_vars);
+      OPT(nir_opt_combine_stores, nir_var_all);
 
       if (is_scalar) {
-         OPT(nir_lower_alu_to_scalar);
+         OPT(nir_lower_alu_to_scalar, NULL);
       }
 
       OPT(nir_copy_prop);
@@ -558,10 +525,54 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
       OPT(nir_copy_prop);
       OPT(nir_opt_dce);
       OPT(nir_opt_cse);
-      OPT(nir_opt_peephole_select, 0);
+      OPT(nir_opt_combine_stores, nir_var_all);
+
+      /* Passing 0 to the peephole select pass causes it to convert
+       * if-statements that contain only move instructions in the branches
+       * regardless of the count.
+       *
+       * Passing 1 to the peephole select pass causes it to convert
+       * if-statements that contain at most a single ALU instruction (total)
+       * in both branches.  Before Gen6, some math instructions were
+       * prohibitively expensive and the results of compare operations need an
+       * extra resolve step.  For these reasons, this pass is more harmful
+       * than good on those platforms.
+       *
+       * For indirect loads of uniforms (push constants), we assume that array
+       * indices will nearly always be in bounds and the cost of the load is
+       * low.  Therefore there shouldn't be a performance benefit to avoid it.
+       * However, in vec4 tessellation shaders, these loads operate by
+       * actually pulling from memory.
+       */
+      const bool is_vec4_tessellation = !is_scalar &&
+         (nir->info.stage == MESA_SHADER_TESS_CTRL ||
+          nir->info.stage == MESA_SHADER_TESS_EVAL);
+      OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
+      OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
+          compiler->devinfo->gen >= 6);
+
       OPT(nir_opt_intrinsics);
+      OPT(nir_opt_idiv_const, 32);
       OPT(nir_opt_algebraic);
       OPT(nir_opt_constant_folding);
+
+      if (lower_flrp != 0) {
+         /* To match the old behavior, set always_precise only for scalar
+          * shader stages.
+          */
+         if (OPT(nir_lower_flrp,
+                 lower_flrp,
+                 false /* always_precise */,
+                 compiler->devinfo->gen >= 6)) {
+            OPT(nir_opt_constant_folding);
+         }
+
+         /* Nothing should rematerialize any flrps, so we only need to do this
+          * lowering once.
+          */
+         lower_flrp = 0;
+      }
+
       OPT(nir_opt_dead_cf);
       if (OPT(nir_opt_trivial_continues)) {
          /* If nir_opt_trivial_continues makes progress, then we need to clean
@@ -571,41 +582,51 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
          OPT(nir_copy_prop);
          OPT(nir_opt_dce);
       }
-      OPT(nir_opt_if);
+      OPT(nir_opt_if, false);
       if (nir->options->max_unroll_iterations != 0) {
          OPT(nir_opt_loop_unroll, indirect_mask);
       }
       OPT(nir_opt_remove_phis);
       OPT(nir_opt_undef);
-      OPT(nir_lower_doubles, nir_lower_drcp |
-                             nir_lower_dsqrt |
-                             nir_lower_drsq |
-                             nir_lower_dtrunc |
-                             nir_lower_dfloor |
-                             nir_lower_dceil |
-                             nir_lower_dfract |
-                             nir_lower_dround_even |
-                             nir_lower_dmod);
       OPT(nir_lower_pack);
    } while (progress);
 
-   return nir;
+   /* Workaround Gfxbench unused local sampler variable which will trigger an
+    * assert in the opt_large_constants pass.
+    */
+   OPT(nir_remove_dead_variables, nir_var_function_temp);
 }
 
 static unsigned
-lower_bit_size_callback(const nir_alu_instr *alu, void *data)
+lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
 {
    assert(alu->dest.dest.is_ssa);
-   if (alu->dest.dest.ssa.bit_size != 16)
+   if (alu->dest.dest.ssa.bit_size >= 32)
       return 0;
 
+   const struct brw_compiler *compiler = (const struct brw_compiler *) data;
+
    switch (alu->op) {
    case nir_op_idiv:
    case nir_op_imod:
    case nir_op_irem:
    case nir_op_udiv:
    case nir_op_umod:
+   case nir_op_fceil:
+   case nir_op_ffloor:
+   case nir_op_ffract:
+   case nir_op_fround_even:
+   case nir_op_ftrunc:
       return 32;
+   case nir_op_frcp:
+   case nir_op_frsq:
+   case nir_op_fsqrt:
+   case nir_op_fpow:
+   case nir_op_fexp2:
+   case nir_op_flog2:
+   case nir_op_fsin:
+   case nir_op_fcos:
+      return compiler->devinfo->gen < 9 ? 32 : 0;
    default:
       return 0;
    }
@@ -620,14 +641,19 @@ lower_bit_size_callback(const nir_alu_instr *alu, void *data)
  * intended for the FS backend as long as nir_optimize is called again with
  * is_scalar = true to scalarize everything prior to code gen.
  */
-nir_shader *
-brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
+void
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
+                   const nir_shader *softfp64)
 {
    const struct gen_device_info *devinfo = compiler->devinfo;
    UNUSED bool progress; /* Written by OPT */
 
    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
 
+   if (is_scalar) {
+      OPT(nir_lower_alu_to_scalar, NULL);
+   }
+
    if (nir->info.stage == MESA_SHADER_GEOMETRY)
       OPT(nir_lower_gs_intrinsics);
 
@@ -640,7 +666,12 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
       .lower_txp = ~0,
       .lower_txf_offset = true,
       .lower_rect_offset = true,
+      .lower_tex_without_implicit_lod = true,
       .lower_txd_cube_map = true,
+      .lower_txb_shadow_clamp = true,
+      .lower_txd_shadow_clamp = true,
+      .lower_txd_offset_clamp = true,
+      .lower_tg4_offsets = true,
    };
 
    OPT(nir_lower_tex, &tex_options);
@@ -649,22 +680,31 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
    OPT(nir_lower_global_vars_to_local);
 
    OPT(nir_split_var_copies);
+   OPT(nir_split_struct_vars, nir_var_function_temp);
 
-   /* Run opt_algebraic before int64 lowering so we can hopefully get rid
-    * of some int64 instructions.
-    */
-   OPT(nir_opt_algebraic);
+   brw_nir_optimize(nir, compiler, is_scalar, true);
 
-   /* Lower int64 instructions before nir_optimize so that loop unrolling
-    * sees their actual cost.
-    */
-   nir_lower_int64(nir, nir_lower_imul64 |
-                        nir_lower_isign64 |
-                        nir_lower_divmod64);
+   bool lowered_64bit_ops = false;
+   do {
+      progress = false;
 
-   nir = brw_nir_optimize(nir, compiler, is_scalar);
+      OPT(nir_lower_int64, nir->options->lower_int64_options);
+      OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
 
-   nir_lower_bit_size(nir, lower_bit_size_callback, NULL);
+      /* Necessary to lower add -> sub and div -> mul/rcp */
+      OPT(nir_opt_algebraic);
+
+      lowered_64bit_ops |= progress;
+   } while (progress);
+
+   /* This needs to be run after the first optimization pass but before we
+    * lower indirect derefs away
+    */
+   if (compiler->supports_shader_constants) {
+      OPT(nir_opt_large_constants, NULL, 32);
+   }
+
+   OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
 
    if (is_scalar) {
       OPT(nir_lower_load_const_to_scalar);
@@ -689,43 +729,80 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
 
    nir_variable_mode indirect_mask =
       brw_nir_no_indirect_mask(compiler, nir->info.stage);
-   nir_lower_indirect_derefs(nir, indirect_mask);
+   OPT(nir_lower_indirect_derefs, indirect_mask);
+
+   /* Lower array derefs of vectors for SSBO and UBO loads.  For both UBOs and
+    * SSBOs, our back-end is capable of loading an entire vec4 at a time and
+    * we would like to take advantage of that whenever possible regardless of
+    * whether or not the app gives us full loads.  This should allow the
+    * optimizer to combine UBO and SSBO load operations and save us some send
+    * messages.
+    */
+   OPT(nir_lower_array_deref_of_vec,
+       nir_var_mem_ubo | nir_var_mem_ssbo,
+       nir_lower_direct_array_deref_of_vec_load);
 
    /* Get rid of split copies */
-   nir = brw_nir_optimize(nir, compiler, is_scalar);
-
-   OPT(nir_remove_dead_variables, nir_var_local);
-
-   return nir;
+   brw_nir_optimize(nir, compiler, is_scalar, false);
 }
 
 void
 brw_nir_link_shaders(const struct brw_compiler *compiler,
-                     nir_shader **producer, nir_shader **consumer)
+                     nir_shader *producer, nir_shader *consumer)
 {
-   NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
-   NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+   nir_lower_io_arrays_to_elements(producer, consumer);
+   nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
+   nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
+
+   const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
+   const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
+
+   if (p_is_scalar && c_is_scalar) {
+      NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
+      NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
+      brw_nir_optimize(producer, compiler, p_is_scalar, false);
+      brw_nir_optimize(consumer, compiler, c_is_scalar, false);
+   }
+
+   if (nir_link_opt_varyings(producer, consumer))
+      brw_nir_optimize(consumer, compiler, c_is_scalar, false);
 
-   if (nir_remove_unused_varyings(*producer, *consumer)) {
-      NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
-      NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
+   NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
+   NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
+
+   if (nir_remove_unused_varyings(producer, consumer)) {
+      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+      NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
 
       /* The backend might not be able to handle indirects on
        * temporaries so we need to lower indirects on any of the
        * varyings we have demoted here.
        */
-      NIR_PASS_V(*producer, nir_lower_indirect_derefs,
-                 brw_nir_no_indirect_mask(compiler, (*producer)->info.stage));
-      NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
-                 brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
+      NIR_PASS_V(producer, nir_lower_indirect_derefs,
+                 brw_nir_no_indirect_mask(compiler, producer->info.stage));
+      NIR_PASS_V(consumer, nir_lower_indirect_derefs,
+                 brw_nir_no_indirect_mask(compiler, consumer->info.stage));
+
+      brw_nir_optimize(producer, compiler, p_is_scalar, false);
+      brw_nir_optimize(consumer, compiler, c_is_scalar, false);
+   }
 
-      const bool p_is_scalar =
-         compiler->scalar_stage[(*producer)->info.stage];
-      *producer = brw_nir_optimize(*producer, compiler, p_is_scalar);
+   NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
+   NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
+   NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
 
-      const bool c_is_scalar =
-         compiler->scalar_stage[(*producer)->info.stage];
-      *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar);
+   if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
+      /* Calling lower_io_to_vector creates output variable writes with
+       * write-masks.  On non-TCS outputs, the back-end can't handle it and we
+       * need to call nir_lower_io_to_temporaries to get rid of them.  This,
+       * in turn, creates temporary variables and extra copy_deref intrinsics
+       * that we need to clean up.
+       */
+      NIR_PASS_V(producer, nir_lower_io_to_temporaries,
+                 nir_shader_get_entrypoint(producer), true, false);
+      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
+      NIR_PASS_V(producer, nir_split_var_copies);
+      NIR_PASS_V(producer, nir_lower_var_copies);
    }
 }
 
@@ -736,7 +813,7 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
  * called on a shader, it will no longer be in SSA form so most optimizations
  * will not work.
  */
-nir_shader *
+void
 brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
                     bool is_scalar)
 {
@@ -746,27 +823,68 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
 
    UNUSED bool progress; /* Written by OPT */
 
+   OPT(brw_nir_lower_mem_access_bit_sizes);
+   OPT(nir_lower_int64, nir->options->lower_int64_options);
 
    do {
       progress = false;
       OPT(nir_opt_algebraic_before_ffma);
    } while (progress);
 
-   nir = brw_nir_optimize(nir, compiler, is_scalar);
+   brw_nir_optimize(nir, compiler, is_scalar, false);
 
    if (devinfo->gen >= 6) {
       /* Try and fuse multiply-adds */
       OPT(brw_nir_opt_peephole_ffma);
    }
 
-   OPT(nir_opt_algebraic_late);
+   if (OPT(nir_opt_comparison_pre)) {
+      OPT(nir_copy_prop);
+      OPT(nir_opt_dce);
+      OPT(nir_opt_cse);
+
+      /* Do the select peepehole again.  nir_opt_comparison_pre (combined with
+       * the other optimization passes) will have removed at least one
+       * instruction from one of the branches of the if-statement, so now it
+       * might be under the threshold of conversion to bcsel.
+       *
+       * See brw_nir_optimize for the explanation of is_vec4_tessellation.
+       */
+      const bool is_vec4_tessellation = !is_scalar &&
+         (nir->info.stage == MESA_SHADER_TESS_CTRL ||
+          nir->info.stage == MESA_SHADER_TESS_EVAL);
+      OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
+      OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
+          compiler->devinfo->gen >= 6);
+   }
+
+   do {
+      progress = false;
+      if (OPT(nir_opt_algebraic_late)) {
+         /* At this late stage, anything that makes more constants will wreak
+          * havok on the vec4 backend.  The handling of constants in the vec4
+          * backend is not good.
+          */
+         if (is_scalar) {
+            OPT(nir_opt_constant_folding);
+            OPT(nir_copy_prop);
+         }
+         OPT(nir_opt_dce);
+         OPT(nir_opt_cse);
+      }
+   } while (progress);
+
 
-   OPT(nir_lower_to_source_mods);
+   OPT(brw_nir_lower_conversions);
+
+   if (is_scalar)
+      OPT(nir_lower_alu_to_scalar, NULL);
+   OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
    OPT(nir_copy_prop);
    OPT(nir_opt_dce);
    OPT(nir_opt_move_comparisons);
 
-   OPT(nir_lower_deref_instrs, nir_lower_load_store_derefs);
+   OPT(nir_lower_bool_to_int32);
 
    OPT(nir_lower_locals_to_regs);
 
@@ -789,6 +907,11 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
       OPT(nir_lower_vec_to_movs);
    }
 
+   OPT(nir_opt_dce);
+
+   if (OPT(nir_opt_rematerialize_compares))
+      OPT(nir_opt_dce);
+
    /* This is the last pass we run before we start emitting stuff.  It
     * determines when we need to insert boolean resolves on Gen <= 5.  We
     * run it last because it stashes data in instr->pass_flags and we don't
@@ -804,18 +927,19 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
               _mesa_shader_stage_to_string(nir->info.stage));
       nir_print_shader(nir, stderr);
    }
-
-   return nir;
 }
 
-nir_shader *
+void
 brw_nir_apply_sampler_key(nir_shader *nir,
                           const struct brw_compiler *compiler,
                           const struct brw_sampler_prog_key_data *key_tex,
                           bool is_scalar)
 {
    const struct gen_device_info *devinfo = compiler->devinfo;
-   nir_lower_tex_options tex_options = { 0 };
+   nir_lower_tex_options tex_options = {
+      .lower_txd_clamp_bindless_sampler = true,
+      .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
+   };
 
    /* Iron Lake and prior require lowering of all rectangle textures */
    if (devinfo->gen < 6)
@@ -845,13 +969,17 @@ brw_nir_apply_sampler_key(nir_shader *nir,
    tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
    tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
    tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
+   tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
+   tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
+
+   /* Setup array of scaling factors for each texture. */
+   memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
+          sizeof(tex_options.scale_factors));
 
    if (nir_lower_tex(nir, &tex_options)) {
-      nir_validate_shader(nir);
-      nir = brw_nir_optimize(nir, compiler, is_scalar);
+      nir_validate_shader(nir, "after nir_lower_tex");
+      brw_nir_optimize(nir, compiler, is_scalar, false);
    }
-
-   return nir;
 }
 
 enum brw_reg_type
@@ -881,6 +1009,10 @@ brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
       return BRW_REGISTER_TYPE_W;
    case nir_type_uint16:
       return BRW_REGISTER_TYPE_UW;
+   case nir_type_int8:
+      return BRW_REGISTER_TYPE_B;
+   case nir_type_uint8:
+      return BRW_REGISTER_TYPE_UB;
    default:
       unreachable("unknown type");
    }
@@ -923,3 +1055,83 @@ brw_glsl_base_type_for_nir_type(nir_alu_type type)
       unreachable("bad type");
    }
 }
+
+nir_shader *
+brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
+                               const nir_shader_compiler_options *options,
+                               const struct brw_tcs_prog_key *key)
+{
+   nir_builder b;
+   nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_TESS_CTRL,
+                                  options);
+   nir_shader *nir = b.shader;
+   nir_variable *var;
+   nir_intrinsic_instr *load;
+   nir_intrinsic_instr *store;
+   nir_ssa_def *zero = nir_imm_int(&b, 0);
+   nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
+
+   nir->info.inputs_read = key->outputs_written &
+      ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
+   nir->info.outputs_written = key->outputs_written;
+   nir->info.tess.tcs_vertices_out = key->input_vertices;
+   nir->info.name = ralloc_strdup(nir, "passthrough");
+   nir->num_uniforms = 8 * sizeof(uint32_t);
+
+   var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
+   var->data.location = 0;
+   var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
+   var->data.location = 1;
+
+   /* Write the patch URB header. */
+   for (int i = 0; i <= 1; i++) {
+      load = nir_intrinsic_instr_create(nir, nir_intrinsic_load_uniform);
+      load->num_components = 4;
+      load->src[0] = nir_src_for_ssa(zero);
+      nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
+      nir_intrinsic_set_base(load, i * 4 * sizeof(uint32_t));
+      nir_builder_instr_insert(&b, &load->instr);
+
+      store = nir_intrinsic_instr_create(nir, nir_intrinsic_store_output);
+      store->num_components = 4;
+      store->src[0] = nir_src_for_ssa(&load->dest.ssa);
+      store->src[1] = nir_src_for_ssa(zero);
+      nir_intrinsic_set_base(store, VARYING_SLOT_TESS_LEVEL_INNER - i);
+      nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
+      nir_builder_instr_insert(&b, &store->instr);
+   }
+
+   /* Copy inputs to outputs. */
+   uint64_t varyings = nir->info.inputs_read;
+
+   while (varyings != 0) {
+      const int varying = ffsll(varyings) - 1;
+
+      load = nir_intrinsic_instr_create(nir,
+                                        nir_intrinsic_load_per_vertex_input);
+      load->num_components = 4;
+      load->src[0] = nir_src_for_ssa(invoc_id);
+      load->src[1] = nir_src_for_ssa(zero);
+      nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
+      nir_intrinsic_set_base(load, varying);
+      nir_builder_instr_insert(&b, &load->instr);
+
+      store = nir_intrinsic_instr_create(nir,
+                                         nir_intrinsic_store_per_vertex_output);
+      store->num_components = 4;
+      store->src[0] = nir_src_for_ssa(&load->dest.ssa);
+      store->src[1] = nir_src_for_ssa(invoc_id);
+      store->src[2] = nir_src_for_ssa(zero);
+      nir_intrinsic_set_base(store, varying);
+      nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
+      nir_builder_instr_insert(&b, &store->instr);
+
+      varyings &= ~BITFIELD64_BIT(varying);
+   }
+
+   nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
+
+   brw_preprocess_nir(compiler, nir, NULL);
+
+   return nir;
+}