radeonsi: rename num_memory_instructions -> num_memory_stores
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_nir.c
index 6b8bea1c11cf48cb16176d5367e0697e429fbd9e..c95e949c6af6d5a2841d996c400bbc206b4a0ac7 100644 (file)
@@ -45,122 +45,185 @@ static const nir_deref_instr *tex_get_texture_deref(nir_tex_instr *instr)
    return NULL;
 }
 
-static nir_variable *intrinsic_get_var(nir_intrinsic_instr *instr)
+static void scan_io_usage(struct si_shader_info *info, nir_intrinsic_instr *intr,
+                          bool is_input)
 {
-   return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
-}
+   unsigned interp = INTERP_MODE_FLAT; /* load_input uses flat shading */
 
-static void gather_usage_helper(const nir_deref_instr **deref_ptr, unsigned location, uint8_t mask,
-                                uint8_t *usage_mask)
-{
-   for (; *deref_ptr; deref_ptr++) {
-      const nir_deref_instr *deref = *deref_ptr;
-      switch (deref->deref_type) {
-      case nir_deref_type_array: {
-         unsigned elem_size = glsl_count_attribute_slots(deref->type, false);
-         if (nir_src_is_const(deref->arr.index)) {
-            location += elem_size * nir_src_as_uint(deref->arr.index);
-         } else {
-            unsigned array_elems = glsl_get_length(deref_ptr[-1]->type);
-            for (unsigned i = 0; i < array_elems; i++) {
-               gather_usage_helper(deref_ptr + 1, location + elem_size * i, mask, usage_mask);
-            }
-            return;
-         }
-         break;
-      }
-      case nir_deref_type_struct: {
-         const struct glsl_type *parent_type = deref_ptr[-1]->type;
-         unsigned index = deref->strct.index;
-         for (unsigned i = 0; i < index; i++) {
-            const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
-            location += glsl_count_attribute_slots(ft, false);
-         }
-         break;
-      }
-      default:
-         unreachable("Unhandled deref type in gather_components_used_helper");
+   if (intr->intrinsic == nir_intrinsic_load_interpolated_input) {
+      nir_intrinsic_instr *baryc = nir_instr_as_intrinsic(intr->src[0].ssa->parent_instr);
+
+      if (baryc) {
+         if (nir_intrinsic_infos[baryc->intrinsic].index_map[NIR_INTRINSIC_INTERP_MODE] > 0)
+            interp = nir_intrinsic_interp_mode(baryc);
+         else
+            unreachable("unknown barycentric intrinsic");
+      } else {
+         unreachable("unknown barycentric expression");
       }
    }
 
-   usage_mask[location] |= mask & 0xf;
-   if (mask & 0xf0)
-      usage_mask[location + 1] |= (mask >> 4) & 0xf;
-}
+   unsigned mask, bit_size;
+   bool dual_slot, is_output_load;
 
-static void gather_usage(const nir_deref_instr *deref, uint8_t mask, uint8_t *usage_mask)
-{
-   nir_deref_path path;
-   nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
+   if (nir_intrinsic_infos[intr->intrinsic].index_map[NIR_INTRINSIC_WRMASK] > 0) {
+      mask = nir_intrinsic_write_mask(intr); /* store */
+      bit_size = nir_src_bit_size(intr->src[0]);
+      dual_slot = bit_size == 64 && nir_src_num_components(intr->src[0]) >= 3;
+      is_output_load = false;
+   } else {
+      mask = nir_ssa_def_components_read(&intr->dest.ssa); /* load */
+      bit_size = intr->dest.ssa.bit_size;
+      dual_slot = bit_size == 64 && intr->dest.ssa.num_components >= 3;
+      is_output_load = !is_input;
+   }
 
-   unsigned location_frac = path.path[0]->var->data.location_frac;
-   if (glsl_type_is_64bit(deref->type)) {
-      uint8_t new_mask = 0;
+   /* Convert the 64-bit component mask to a 32-bit component mask. */
+   if (bit_size == 64) {
+      unsigned new_mask = 0;
       for (unsigned i = 0; i < 4; i++) {
          if (mask & (1 << i))
             new_mask |= 0x3 << (2 * i);
       }
-      mask = new_mask << location_frac;
-   } else {
-      mask <<= location_frac;
-      mask &= 0xf;
+      mask = new_mask;
    }
 
-   gather_usage_helper((const nir_deref_instr **)&path.path[1],
-                       path.path[0]->var->data.driver_location, mask, usage_mask);
+   /* Convert the 16-bit component mask to a 32-bit component mask. */
+   if (bit_size == 16) {
+      unsigned new_mask = 0;
+      for (unsigned i = 0; i < 4; i++) {
+         if (mask & (1 << i))
+            new_mask |= 0x1 << (i / 2);
+      }
+      mask = new_mask;
+   }
 
-   nir_deref_path_finish(&path);
-}
+   mask <<= nir_intrinsic_component(intr);
 
-static void gather_intrinsic_load_deref_input_info(const nir_shader *nir,
-                                                   const nir_intrinsic_instr *instr,
-                                                   const nir_deref_instr *deref,
-                                                   struct si_shader_info *info)
-{
-   switch (nir->info.stage) {
-   case MESA_SHADER_VERTEX:
-      gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa), info->input_usage_mask);
-   default:;
-   }
-}
+   nir_src offset = *nir_get_io_offset_src(intr);
+   bool indirect = !nir_src_is_const(offset);
+   if (!indirect)
+      assert(nir_src_as_uint(offset) == 0);
 
-static void gather_intrinsic_load_deref_output_info(const nir_shader *nir,
-                                                    const nir_intrinsic_instr *instr,
-                                                    nir_variable *var, struct si_shader_info *info)
-{
-   assert(var && var->data.mode == nir_var_shader_out);
-
-   switch (nir->info.stage) {
-   case MESA_SHADER_TESS_CTRL:
-      if (var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
-          var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
-         info->reads_tessfactor_outputs = true;
-      else if (var->data.patch)
-         info->reads_perpatch_outputs = true;
-      else
-         info->reads_pervertex_outputs = true;
-      break;
-
-   case MESA_SHADER_FRAGMENT:
-      if (var->data.fb_fetch_output)
-         info->uses_fbfetch = true;
-      break;
-   default:;
+   unsigned semantic = 0;
+   /* VS doesn't have semantics. */
+   if (info->stage != MESA_SHADER_VERTEX || !is_input)
+      semantic = nir_intrinsic_io_semantics(intr).location;
+
+   if (info->stage == MESA_SHADER_FRAGMENT && !is_input) {
+      /* Never use FRAG_RESULT_COLOR directly. */
+      if (semantic == FRAG_RESULT_COLOR) {
+         semantic = FRAG_RESULT_DATA0;
+         info->color0_writes_all_cbufs = true;
+      }
+      semantic += nir_intrinsic_io_semantics(intr).dual_source_blend_index;
    }
-}
 
-static void gather_intrinsic_store_deref_output_info(const nir_shader *nir,
-                                                     const nir_intrinsic_instr *instr,
-                                                     const nir_deref_instr *deref,
-                                                     struct si_shader_info *info)
-{
-   switch (nir->info.stage) {
-   case MESA_SHADER_VERTEX:    /* needed by LS, ES */
-   case MESA_SHADER_TESS_EVAL: /* needed by ES */
-   case MESA_SHADER_GEOMETRY:
-      gather_usage(deref, nir_intrinsic_write_mask(instr), info->output_usagemask);
-      break;
-   default:;
+   unsigned driver_location = nir_intrinsic_base(intr);
+   unsigned num_slots = indirect ? nir_intrinsic_io_semantics(intr).num_slots : (1 + dual_slot);
+
+   if (is_input) {
+      assert(driver_location + num_slots <= ARRAY_SIZE(info->input_usage_mask));
+
+      for (unsigned i = 0; i < num_slots; i++) {
+         unsigned loc = driver_location + i;
+         unsigned slot_mask = (dual_slot && i % 2 ? mask >> 4 : mask) & 0xf;
+
+         info->input_semantic[loc] = semantic + i;
+         info->input_interpolate[loc] = interp;
+
+         if (slot_mask) {
+            info->input_usage_mask[loc] |= slot_mask;
+            info->num_inputs = MAX2(info->num_inputs, loc + 1);
+
+            if (semantic == VARYING_SLOT_PRIMITIVE_ID)
+               info->uses_primid = true;
+         }
+      }
+   } else {
+      /* Outputs. */
+      assert(driver_location + num_slots <= ARRAY_SIZE(info->output_usagemask));
+      assert(semantic + num_slots < ARRAY_SIZE(info->output_semantic_to_slot));
+
+      for (unsigned i = 0; i < num_slots; i++) {
+         unsigned loc = driver_location + i;
+         unsigned slot_mask = (dual_slot && i % 2 ? mask >> 4 : mask) & 0xf;
+
+         info->output_semantic[loc] = semantic + i;
+         info->output_semantic_to_slot[semantic + i] = loc;
+
+         if (is_output_load) {
+            /* Output loads have only a few things that we need to track. */
+            info->output_readmask[loc] |= slot_mask;
+
+            if (info->stage == MESA_SHADER_FRAGMENT &&
+                nir_intrinsic_io_semantics(intr).fb_fetch_output)
+               info->uses_fbfetch = true;
+         } else if (slot_mask) {
+            /* Output stores. */
+            if (info->stage == MESA_SHADER_GEOMETRY) {
+               unsigned gs_streams = (uint32_t)nir_intrinsic_io_semantics(intr).gs_streams <<
+                                     (nir_intrinsic_component(intr) * 2);
+               unsigned new_mask = slot_mask & ~info->output_usagemask[loc];
+
+               for (unsigned i = 0; i < 4; i++) {
+                  unsigned stream = (gs_streams >> (i * 2)) & 0x3;
+
+                  if (new_mask & (1 << i)) {
+                     info->output_streams[loc] |= stream << (i * 2);
+                     info->num_stream_output_components[stream]++;
+                  }
+               }
+            }
+
+            info->output_usagemask[loc] |= slot_mask;
+            info->num_outputs = MAX2(info->num_outputs, loc + 1);
+
+            if (info->stage == MESA_SHADER_FRAGMENT) {
+               switch (semantic) {
+               case FRAG_RESULT_DEPTH:
+                  info->writes_z = true;
+                  break;
+               case FRAG_RESULT_STENCIL:
+                  info->writes_stencil = true;
+                  break;
+               case FRAG_RESULT_SAMPLE_MASK:
+                  info->writes_samplemask = true;
+                  break;
+               default:
+                  if (semantic >= FRAG_RESULT_DATA0 && semantic <= FRAG_RESULT_DATA7) {
+                     unsigned index = semantic - FRAG_RESULT_DATA0;
+                     info->colors_written |= 1 << (index + i);
+                  }
+                  break;
+               }
+            } else {
+               switch (semantic) {
+               case VARYING_SLOT_PRIMITIVE_ID:
+                  info->writes_primid = true;
+                  break;
+               case VARYING_SLOT_VIEWPORT:
+                  info->writes_viewport_index = true;
+                  break;
+               case VARYING_SLOT_LAYER:
+                  info->writes_layer = true;
+                  break;
+               case VARYING_SLOT_PSIZ:
+                  info->writes_psize = true;
+                  break;
+               case VARYING_SLOT_CLIP_VERTEX:
+                  info->writes_clipvertex = true;
+                  break;
+               case VARYING_SLOT_EDGE:
+                  info->writes_edgeflag = true;
+                  break;
+               case VARYING_SLOT_POS:
+                  info->writes_position = true;
+                  break;
+               }
+            }
+         }
+      }
    }
 }
 
@@ -187,9 +250,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
       const nir_deref_instr *deref = tex_get_texture_deref(tex);
       nir_variable *var = deref ? nir_deref_instr_get_variable(deref) : NULL;
 
-      if (!var) {
-         info->samplers_declared |= u_bit_consecutive(tex->sampler_index, 1);
-      } else {
+      if (var) {
          if (deref->mode != nir_var_uniform || var->data.bindless)
             info->uses_bindless_samplers = true;
       }
@@ -226,7 +287,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
          break;
       case nir_intrinsic_load_local_group_size:
          /* The block size is translated to IMM with a fixed block size. */
-         if (info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
+         if (info->base.cs.local_size[0] == 0)
             info->uses_block_size = true;
          break;
       case nir_intrinsic_load_local_invocation_id:
@@ -242,15 +303,6 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
          }
          break;
       }
-      case nir_intrinsic_load_vertex_id:
-         info->uses_vertexid = 1;
-         break;
-      case nir_intrinsic_load_vertex_id_zero_base:
-         info->uses_vertexid_nobase = 1;
-         break;
-      case nir_intrinsic_load_base_vertex:
-         info->uses_basevertex = 1;
-         break;
       case nir_intrinsic_load_draw_id:
          info->uses_drawid = 1;
          break;
@@ -272,11 +324,11 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
       case nir_intrinsic_bindless_image_store:
          info->uses_bindless_images = true;
          info->writes_memory = true;
-         info->num_memory_instructions++; /* we only care about stores */
+         info->num_memory_stores++;
          break;
       case nir_intrinsic_image_deref_store:
          info->writes_memory = true;
-         info->num_memory_instructions++; /* we only care about stores */
+         info->num_memory_stores++;
          break;
       case nir_intrinsic_bindless_image_atomic_add:
       case nir_intrinsic_bindless_image_atomic_imin:
@@ -290,7 +342,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
       case nir_intrinsic_bindless_image_atomic_comp_swap:
          info->uses_bindless_images = true;
          info->writes_memory = true;
-         info->num_memory_instructions++; /* we only care about stores */
+         info->num_memory_stores++;
          break;
       case nir_intrinsic_image_deref_atomic_add:
       case nir_intrinsic_image_deref_atomic_imin:
@@ -305,7 +357,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
       case nir_intrinsic_image_deref_atomic_inc_wrap:
       case nir_intrinsic_image_deref_atomic_dec_wrap:
          info->writes_memory = true;
-         info->num_memory_instructions++; /* we only care about stores */
+         info->num_memory_stores++;
          break;
       case nir_intrinsic_store_ssbo:
       case nir_intrinsic_ssbo_atomic_add:
@@ -319,7 +371,7 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
       case nir_intrinsic_ssbo_atomic_exchange:
       case nir_intrinsic_ssbo_atomic_comp_swap:
          info->writes_memory = true;
-         info->num_memory_instructions++; /* we only care about stores */
+         info->num_memory_stores++;
          break;
       case nir_intrinsic_load_color0:
       case nir_intrinsic_load_color1: {
@@ -345,9 +397,6 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
                info->uses_linear_centroid = true;
             else
                info->uses_linear_center = true;
-
-            if (intr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
-               info->uses_linear_opcode_interp_sample = true;
          } else {
             if (intr->intrinsic == nir_intrinsic_load_barycentric_sample)
                info->uses_persp_sample = true;
@@ -355,37 +404,29 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
                info->uses_persp_centroid = true;
             else
                info->uses_persp_center = true;
-
-            if (intr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
-               info->uses_persp_opcode_interp_sample = true;
          }
+         if (intr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
+            info->uses_interp_at_sample = true;
          break;
       }
-      case nir_intrinsic_load_deref: {
-         nir_variable *var = intrinsic_get_var(intr);
-         nir_variable_mode mode = var->data.mode;
-
-         if (mode == nir_var_shader_in) {
-            /* PS inputs use the interpolated load intrinsics. */
-            assert(nir->info.stage != MESA_SHADER_FRAGMENT);
-            gather_intrinsic_load_deref_input_info(nir, intr, nir_src_as_deref(intr->src[0]), info);
-         } else if (mode == nir_var_shader_out) {
-            gather_intrinsic_load_deref_output_info(nir, intr, var, info);
-         }
-         break;
-      }
-      case nir_intrinsic_store_deref: {
-         nir_variable *var = intrinsic_get_var(intr);
-
-         if (var->data.mode == nir_var_shader_out)
-            gather_intrinsic_store_deref_output_info(nir, intr, nir_src_as_deref(intr->src[0]),
-                                                     info);
-         break;
-      }
+      case nir_intrinsic_load_input:
+      case nir_intrinsic_load_per_vertex_input:
+      case nir_intrinsic_load_input_vertex:
+      case nir_intrinsic_load_interpolated_input:
+         scan_io_usage(info, intr, true);
+         break;
+      case nir_intrinsic_load_output:
+      case nir_intrinsic_load_per_vertex_output:
+      case nir_intrinsic_store_output:
+      case nir_intrinsic_store_per_vertex_output:
+         scan_io_usage(info, intr, false);
+         break;
+      case nir_intrinsic_load_deref:
+      case nir_intrinsic_store_deref:
       case nir_intrinsic_interp_deref_at_centroid:
       case nir_intrinsic_interp_deref_at_sample:
       case nir_intrinsic_interp_deref_at_offset:
-         unreachable("interp opcodes should have been lowered");
+         unreachable("these opcodes should have been lowered");
          break;
       default:
          break;
@@ -393,378 +434,115 @@ static void scan_instruction(const struct nir_shader *nir, struct si_shader_info
    }
 }
 
-static void scan_output_slot(const nir_variable *var, unsigned var_idx, unsigned component,
-                             unsigned num_components, struct si_shader_info *info)
-{
-   assert(component + num_components <= 4);
-   assert(component < 4);
-
-   unsigned semantic_name, semantic_index;
-
-   unsigned location = var->data.location + var_idx;
-   unsigned drv_location = var->data.driver_location + var_idx;
-
-   if (info->processor == PIPE_SHADER_FRAGMENT) {
-      tgsi_get_gl_frag_result_semantic(location, &semantic_name, &semantic_index);
-
-      /* Adjust for dual source blending */
-      if (var->data.index > 0) {
-         semantic_index++;
-      }
-   } else {
-      tgsi_get_gl_varying_semantic(location, true, &semantic_name, &semantic_index);
-   }
-
-   ubyte usagemask = ((1 << num_components) - 1) << component;
-
-   unsigned gs_out_streams;
-   if (var->data.stream & NIR_STREAM_PACKED) {
-      gs_out_streams = var->data.stream & ~NIR_STREAM_PACKED;
-   } else {
-      assert(var->data.stream < 4);
-      gs_out_streams = 0;
-      for (unsigned j = 0; j < num_components; ++j)
-         gs_out_streams |= var->data.stream << (2 * (component + j));
-   }
-
-   unsigned streamx = gs_out_streams & 3;
-   unsigned streamy = (gs_out_streams >> 2) & 3;
-   unsigned streamz = (gs_out_streams >> 4) & 3;
-   unsigned streamw = (gs_out_streams >> 6) & 3;
-
-   if (usagemask & TGSI_WRITEMASK_X) {
-      info->output_streams[drv_location] |= streamx;
-      info->num_stream_output_components[streamx]++;
-   }
-   if (usagemask & TGSI_WRITEMASK_Y) {
-      info->output_streams[drv_location] |= streamy << 2;
-      info->num_stream_output_components[streamy]++;
-   }
-   if (usagemask & TGSI_WRITEMASK_Z) {
-      info->output_streams[drv_location] |= streamz << 4;
-      info->num_stream_output_components[streamz]++;
-   }
-   if (usagemask & TGSI_WRITEMASK_W) {
-      info->output_streams[drv_location] |= streamw << 6;
-      info->num_stream_output_components[streamw]++;
-   }
-
-   info->output_semantic_name[drv_location] = semantic_name;
-   info->output_semantic_index[drv_location] = semantic_index;
-
-   switch (semantic_name) {
-   case TGSI_SEMANTIC_PRIMID:
-      info->writes_primid = true;
-      break;
-   case TGSI_SEMANTIC_VIEWPORT_INDEX:
-      info->writes_viewport_index = true;
-      break;
-   case TGSI_SEMANTIC_LAYER:
-      info->writes_layer = true;
-      break;
-   case TGSI_SEMANTIC_PSIZE:
-      info->writes_psize = true;
-      break;
-   case TGSI_SEMANTIC_CLIPVERTEX:
-      info->writes_clipvertex = true;
-      break;
-   case TGSI_SEMANTIC_COLOR:
-      info->colors_written |= 1 << semantic_index;
-      break;
-   case TGSI_SEMANTIC_STENCIL:
-      info->writes_stencil = true;
-      break;
-   case TGSI_SEMANTIC_SAMPLEMASK:
-      info->writes_samplemask = true;
-      break;
-   case TGSI_SEMANTIC_EDGEFLAG:
-      info->writes_edgeflag = true;
-      break;
-   case TGSI_SEMANTIC_POSITION:
-      if (info->processor == PIPE_SHADER_FRAGMENT)
-         info->writes_z = true;
-      else
-         info->writes_position = true;
-      break;
-   }
-}
-
-static void scan_output_helper(const nir_variable *var, unsigned location,
-                               const struct glsl_type *type, struct si_shader_info *info)
-{
-   if (glsl_type_is_struct(type) || glsl_type_is_interface(type)) {
-      for (unsigned i = 0; i < glsl_get_length(type); i++) {
-         const struct glsl_type *ft = glsl_get_struct_field(type, i);
-         scan_output_helper(var, location, ft, info);
-         location += glsl_count_attribute_slots(ft, false);
-      }
-   } else if (glsl_type_is_array_or_matrix(type)) {
-      const struct glsl_type *elem_type = glsl_get_array_element(type);
-      unsigned num_elems = glsl_get_length(type);
-      if (var->data.compact) {
-         assert(glsl_type_is_scalar(elem_type));
-         assert(glsl_get_bit_size(elem_type) == 32);
-         unsigned component = var->data.location_frac;
-         scan_output_slot(var, location, component, MIN2(num_elems, 4 - component), info);
-         if (component + num_elems > 4) {
-            scan_output_slot(var, location + 1, 0, component + num_elems - 4, info);
-         }
-
-      } else {
-         unsigned elem_count = glsl_count_attribute_slots(elem_type, false);
-         for (unsigned i = 0; i < num_elems; i++) {
-            scan_output_helper(var, location, elem_type, info);
-            location += elem_count;
-         }
-      }
-   } else if (glsl_type_is_dual_slot(type)) {
-      unsigned component = var->data.location_frac;
-      scan_output_slot(var, location, component, 4 - component, info);
-      scan_output_slot(var, location + 1, 0, component + 2 * glsl_get_components(type) - 4, info);
-   } else {
-      unsigned component = var->data.location_frac;
-      assert(glsl_type_is_vector_or_scalar(type));
-      unsigned num_components = glsl_get_components(type);
-      if (glsl_type_is_64bit(type))
-         num_components *= 2;
-      scan_output_slot(var, location, component, num_components, info);
-   }
-}
-
 void si_nir_scan_shader(const struct nir_shader *nir, struct si_shader_info *info)
 {
    nir_function *func;
-   unsigned i;
 
-   info->processor = pipe_shader_type_from_mesa(nir->info.stage);
-
-   info->properties[TGSI_PROPERTY_NEXT_SHADER] = pipe_shader_type_from_mesa(nir->info.next_stage);
-
-   if (nir->info.stage == MESA_SHADER_VERTEX) {
-      info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] = nir->info.vs.window_space_position;
-      info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] = nir->info.vs.blit_sgprs_amd;
-   }
-
-   if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
-      info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] = nir->info.tess.tcs_vertices_out;
-   }
+   info->base = nir->info;
+   info->stage = nir->info.stage;
 
    if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
-      if (nir->info.tess.primitive_mode == GL_ISOLINES)
-         info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
-      else
-         info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
-
-      STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
-      STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 == PIPE_TESS_SPACING_FRACTIONAL_ODD);
-      STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 == PIPE_TESS_SPACING_FRACTIONAL_EVEN);
-
-      info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
-      info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
-      info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
-   }
-
-   if (nir->info.stage == MESA_SHADER_GEOMETRY) {
-      info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
-      info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
-      info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
-      info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
+      if (info->base.tess.primitive_mode == GL_ISOLINES)
+         info->base.tess.primitive_mode = GL_LINES;
    }
 
    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-      info->properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] =
-         nir->info.fs.early_fragment_tests | nir->info.fs.post_depth_coverage;
-      info->properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE] = nir->info.fs.post_depth_coverage;
-
-      if (nir->info.fs.pixel_center_integer) {
-         info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] = TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
+      /* post_depth_coverage implies early_fragment_tests */
+      info->base.fs.early_fragment_tests |= info->base.fs.post_depth_coverage;
+
+      info->color_interpolate[0] = nir->info.fs.color0_interp;
+      info->color_interpolate[1] = nir->info.fs.color1_interp;
+      for (unsigned i = 0; i < 2; i++) {
+         if (info->color_interpolate[i] == INTERP_MODE_NONE)
+            info->color_interpolate[i] = INTERP_MODE_COLOR;
       }
 
-      if (nir->info.fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
-         switch (nir->info.fs.depth_layout) {
-         case FRAG_DEPTH_LAYOUT_ANY:
-            info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_ANY;
-            break;
-         case FRAG_DEPTH_LAYOUT_GREATER:
-            info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_GREATER;
-            break;
-         case FRAG_DEPTH_LAYOUT_LESS:
-            info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_LESS;
-            break;
-         case FRAG_DEPTH_LAYOUT_UNCHANGED:
-            info->properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT] = TGSI_FS_DEPTH_LAYOUT_UNCHANGED;
-            break;
-         default:
-            unreachable("Unknow depth layout");
-         }
-      }
+      info->color_interpolate_loc[0] = nir->info.fs.color0_sample ? TGSI_INTERPOLATE_LOC_SAMPLE :
+                                       nir->info.fs.color0_centroid ? TGSI_INTERPOLATE_LOC_CENTROID :
+                                                                      TGSI_INTERPOLATE_LOC_CENTER;
+      info->color_interpolate_loc[1] = nir->info.fs.color1_sample ? TGSI_INTERPOLATE_LOC_SAMPLE :
+                                       nir->info.fs.color1_centroid ? TGSI_INTERPOLATE_LOC_CENTROID :
+                                                                      TGSI_INTERPOLATE_LOC_CENTER;
    }
 
-   if (gl_shader_stage_is_compute(nir->info.stage)) {
-      info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.local_size[0];
-      info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.local_size[1];
-      info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.local_size[2];
-      info->properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD] =
-         nir->info.cs.user_data_components_amd;
-   }
-
-   i = 0;
-   uint64_t processed_inputs = 0;
-   nir_foreach_variable (variable, &nir->inputs) {
-      unsigned semantic_name, semantic_index;
-
-      const struct glsl_type *type = variable->type;
-      if (nir_is_per_vertex_io(variable, nir->info.stage)) {
-         assert(glsl_type_is_array(type));
-         type = glsl_get_array_element(type);
-      }
-
-      unsigned attrib_count =
-         glsl_count_attribute_slots(type, nir->info.stage == MESA_SHADER_VERTEX);
-
-      i = variable->data.driver_location;
-
-      /* Vertex shader inputs don't have semantics. The state
-       * tracker has already mapped them to attributes via
-       * variable->data.driver_location.
-       */
-      if (nir->info.stage == MESA_SHADER_VERTEX)
-         continue;
-
-      for (unsigned j = 0; j < attrib_count; j++, i++) {
-
-         if (processed_inputs & ((uint64_t)1 << i))
-            continue;
-
-         processed_inputs |= ((uint64_t)1 << i);
-
-         tgsi_get_gl_varying_semantic(variable->data.location + j, true, &semantic_name,
-                                      &semantic_index);
-
-         info->input_semantic_name[i] = semantic_name;
-         info->input_semantic_index[i] = semantic_index;
-
-         if (semantic_name == TGSI_SEMANTIC_PRIMID)
-            info->uses_primid = true;
-
-         if (semantic_name == TGSI_SEMANTIC_COLOR) {
-            /* We only need this for color inputs. */
-            if (variable->data.sample)
-               info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
-            else if (variable->data.centroid)
-               info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
-            else
-               info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTER;
-         }
-
-         enum glsl_base_type base_type = glsl_get_base_type(glsl_without_array(variable->type));
-
-         switch (variable->data.interpolation) {
-         case INTERP_MODE_NONE:
-            if (glsl_base_type_is_integer(base_type)) {
-               info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
-               break;
-            }
-
-            if (semantic_name == TGSI_SEMANTIC_COLOR) {
-               info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
-               break;
-            }
-            /* fall-through */
-
-         case INTERP_MODE_SMOOTH:
-            assert(!glsl_base_type_is_integer(base_type));
-
-            info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
-            break;
-
-         case INTERP_MODE_NOPERSPECTIVE:
-            assert(!glsl_base_type_is_integer(base_type));
-
-            info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
-            break;
-
-         case INTERP_MODE_FLAT:
-            info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
-            break;
-         }
-      }
-   }
-
-   nir_foreach_variable (variable, &nir->outputs) {
-      const struct glsl_type *type = variable->type;
-      if (nir_is_per_vertex_io(variable, nir->info.stage)) {
-         assert(glsl_type_is_array(type));
-         type = glsl_get_array_element(type);
-      }
-
-      ASSERTED unsigned attrib_count = glsl_count_attribute_slots(type, false);
-      scan_output_helper(variable, 0, type, info);
-
-      unsigned loc = variable->data.location;
-      if (nir->info.stage == MESA_SHADER_FRAGMENT && loc == FRAG_RESULT_COLOR &&
-          nir->info.outputs_written & (1ull << loc)) {
-         assert(attrib_count == 1);
-         info->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] = true;
-      }
-   }
-
-   info->num_inputs = nir->num_inputs;
-   info->num_outputs = nir->num_outputs;
-
    info->constbuf0_num_slots = nir->num_uniforms;
-   info->shader_buffers_declared = u_bit_consecutive(0, nir->info.num_ssbos);
-   info->const_buffers_declared = u_bit_consecutive(0, nir->info.num_ubos);
-   info->images_declared = u_bit_consecutive(0, nir->info.num_images);
-   info->msaa_images_declared = nir->info.msaa_images;
-   info->image_buffers = nir->info.image_buffers;
-   info->samplers_declared = nir->info.textures_used;
-
-   info->num_written_clipdistance = nir->info.clip_distance_array_size;
-   info->num_written_culldistance = nir->info.cull_distance_array_size;
-   info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
-   info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
-
-   if (info->processor == PIPE_SHADER_FRAGMENT)
-      info->uses_kill = nir->info.fs.uses_discard;
 
    if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
       info->tessfactors_are_def_in_all_invocs = ac_are_tessfactors_def_in_all_invocs(nir);
    }
 
+   memset(info->output_semantic_to_slot, -1, sizeof(info->output_semantic_to_slot));
+
    func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
    nir_foreach_block (block, func->impl) {
       nir_foreach_instr (instr, block)
          scan_instruction(nir, info, instr);
    }
+
+   /* Add color inputs to the list of inputs. */
+   if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+      for (unsigned i = 0; i < 2; i++) {
+         if ((info->colors_read >> (i * 4)) & 0xf) {
+            info->input_semantic[info->num_inputs] = VARYING_SLOT_COL0 + i;
+            info->input_interpolate[info->num_inputs] = info->color_interpolate[i];
+            info->input_usage_mask[info->num_inputs] = info->colors_read >> (i * 4);
+            info->num_inputs++;
+         }
+      }
+   }
+
+   /* Trim output read masks based on write masks. */
+   for (unsigned i = 0; i < info->num_outputs; i++)
+      info->output_readmask[i] &= info->output_usagemask[i];
 }
 
-static void si_nir_opts(struct nir_shader *nir)
+static void si_nir_opts(struct nir_shader *nir, bool first)
 {
    bool progress;
 
+   NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+   NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
+   NIR_PASS_V(nir, nir_lower_phis_to_scalar);
+
    do {
       progress = false;
+      bool lower_alu_to_scalar = false;
+      bool lower_phis_to_scalar = false;
 
-      NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+      if (first) {
+         bool opt_find_array_copies = false;
 
-      NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
-      NIR_PASS(progress, nir, nir_opt_dead_write_vars);
+         NIR_PASS(progress, nir, nir_split_array_vars, nir_var_function_temp);
+         NIR_PASS(lower_alu_to_scalar, nir, nir_shrink_vec_array_vars, nir_var_function_temp);
+         NIR_PASS(opt_find_array_copies, nir, nir_opt_find_array_copies);
+         NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
 
-      NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
-      NIR_PASS_V(nir, nir_lower_phis_to_scalar);
+         /* Call nir_lower_var_copies() to remove any copies introduced
+          * by nir_opt_find_array_copies().
+          */
+         if (opt_find_array_copies)
+            NIR_PASS(progress, nir, nir_lower_var_copies);
+         progress |= opt_find_array_copies;
+      } else {
+         NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
+      }
 
+      NIR_PASS(progress, nir, nir_opt_dead_write_vars);
+
+      NIR_PASS(lower_alu_to_scalar, nir, nir_opt_trivial_continues);
       /* (Constant) copy propagation is needed for txf with offsets. */
       NIR_PASS(progress, nir, nir_copy_prop);
       NIR_PASS(progress, nir, nir_opt_remove_phis);
       NIR_PASS(progress, nir, nir_opt_dce);
-      if (nir_opt_trivial_continues(nir)) {
-         progress = true;
-         NIR_PASS(progress, nir, nir_copy_prop);
-         NIR_PASS(progress, nir, nir_opt_dce);
-      }
-      NIR_PASS(progress, nir, nir_opt_if, true);
+      NIR_PASS(lower_phis_to_scalar, nir, nir_opt_if, true);
       NIR_PASS(progress, nir, nir_opt_dead_cf);
+
+      if (lower_alu_to_scalar)
+         NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
+      if (lower_phis_to_scalar)
+         NIR_PASS_V(nir, nir_lower_phis_to_scalar);
+      progress |= lower_alu_to_scalar | lower_phis_to_scalar;
+
       NIR_PASS(progress, nir, nir_opt_cse);
       NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
 
@@ -779,8 +557,7 @@ static void si_nir_opts(struct nir_shader *nir)
          assert(lower_flrp);
          bool lower_flrp_progress = false;
 
-         NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp, lower_flrp, false /* always_precise */,
-                  nir->options->lower_ffma);
+         NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp, lower_flrp, false /* always_precise */);
          if (lower_flrp_progress) {
             NIR_PASS(progress, nir, nir_opt_constant_folding);
             progress = true;
@@ -832,8 +609,14 @@ static void si_nir_lower_color(nir_shader *nir)
 
          if (var->data.location == VARYING_SLOT_COL0) {
             def = nir_load_color0(&b);
+            nir->info.fs.color0_interp = var->data.interpolation;
+            nir->info.fs.color0_sample = var->data.sample;
+            nir->info.fs.color0_centroid = var->data.centroid;
          } else if (var->data.location == VARYING_SLOT_COL1) {
             def = nir_load_color1(&b);
+            nir->info.fs.color1_interp = var->data.interpolation;
+            nir->info.fs.color1_sample = var->data.sample;
+            nir->info.fs.color1_centroid = var->data.centroid;
          } else {
             continue;
          }
@@ -844,42 +627,65 @@ static void si_nir_lower_color(nir_shader *nir)
    }
 }
 
-static void si_nir_lower_ps_inputs(struct nir_shader *nir)
+static void si_lower_io(struct nir_shader *nir)
 {
-   if (nir->info.stage != MESA_SHADER_FRAGMENT)
-      return;
+   /* HW supports indirect indexing for: | Enabled in driver
+    * -------------------------------------------------------
+    * VS inputs                          | No
+    * TCS inputs                         | Yes
+    * TES inputs                         | Yes
+    * GS inputs                          | No
+    * -------------------------------------------------------
+    * VS outputs before TCS              | No
+    * VS outputs before GS               | No
+    * TCS outputs                        | Yes
+    * TES outputs before GS              | No
+    */
+   bool has_indirect_inputs = nir->info.stage == MESA_SHADER_TESS_CTRL ||
+                              nir->info.stage == MESA_SHADER_TESS_EVAL;
+   bool has_indirect_outputs = nir->info.stage == MESA_SHADER_TESS_CTRL;
 
-   NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), false, true);
+   if (!has_indirect_inputs || !has_indirect_outputs) {
+      NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir),
+                 !has_indirect_outputs, !has_indirect_inputs);
 
-   /* Since we're doing nir_lower_io_to_temporaries late, we need
-    * to lower all the copy_deref's introduced by
-    * lower_io_to_temporaries before calling nir_lower_io.
-    */
-   NIR_PASS_V(nir, nir_split_var_copies);
-   NIR_PASS_V(nir, nir_lower_var_copies);
-   NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+      /* Since we're doing nir_lower_io_to_temporaries late, we need
+       * to lower all the copy_deref's introduced by
+       * lower_io_to_temporaries before calling nir_lower_io.
+       */
+      NIR_PASS_V(nir, nir_split_var_copies);
+      NIR_PASS_V(nir, nir_lower_var_copies);
+      NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+   }
+
+   if (nir->info.stage == MESA_SHADER_FRAGMENT)
+      si_nir_lower_color(nir);
 
-   si_nir_lower_color(nir);
-   NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
+   NIR_PASS_V(nir, nir_lower_io, nir_var_shader_out | nir_var_shader_in,
+              type_size_vec4, 0);
+   nir->info.io_lowered = true;
 
    /* This pass needs actual constants */
    NIR_PASS_V(nir, nir_opt_constant_folding);
    NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
-}
+   NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_out);
 
-void si_nir_adjust_driver_locations(struct nir_shader *nir)
-{
-       /* Adjust the driver location of inputs and outputs. the gallium frontend
-    * interprets them as slots, while the ac/nir backend interprets them
-    * as individual components.
+   /* Remove dead derefs, so that nir_validate doesn't fail. */
+   NIR_PASS_V(nir, nir_opt_dce);
+
+   /* Remove input and output nir_variables, because we don't need them
+    * anymore. Also remove uniforms, because those should have been lowered
+    * to UBOs already.
     */
-   if (nir->info.stage != MESA_SHADER_FRAGMENT) {
-      nir_foreach_variable (variable, &nir->inputs)
-         variable->data.driver_location *= 4;
-   }
+   unsigned modes = nir_var_shader_in | nir_var_shader_out | nir_var_uniform;
+   nir_foreach_variable_with_modes_safe(var, nir, modes) {
+      if (var->data.mode == nir_var_uniform &&
+          (glsl_type_get_image_count(var->type) ||
+           glsl_type_get_sampler_count(var->type)))
+         continue;
 
-   nir_foreach_variable (variable, &nir->outputs)
-      variable->data.driver_location *= 4;
+      exec_node_remove(&var->node);
+   }
 }
 
 /**
@@ -916,7 +722,7 @@ static void si_lower_nir(struct si_screen *sscreen, struct nir_shader *nir)
    NIR_PASS_V(nir, nir_lower_var_copies);
    NIR_PASS_V(nir, nir_lower_pack);
    NIR_PASS_V(nir, nir_opt_access);
-   si_nir_opts(nir);
+   si_nir_opts(nir, true);
 
    /* Lower large variables that are always constant with load_constant
     * intrinsics, which get turned into PC-relative loads from a data
@@ -926,15 +732,17 @@ static void si_lower_nir(struct si_screen *sscreen, struct nir_shader *nir)
     */
    bool changed = false;
    if (!nir->constant_data) {
+      /* The pass crashes if there are dead temps of lowered IO interface types. */
+      NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
       NIR_PASS(changed, nir, nir_opt_large_constants, glsl_get_natural_size_align_bytes, 16);
    }
 
    changed |= ac_lower_indirect_derefs(nir, sscreen->info.chip_class);
    if (changed)
-      si_nir_opts(nir);
+      si_nir_opts(nir, false);
 
    NIR_PASS_V(nir, nir_lower_bool_to_int32);
-   NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
+   NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
 
    if (sscreen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL))
       NIR_PASS_V(nir, nir_lower_discard_to_demote);
@@ -946,6 +754,6 @@ void si_finalize_nir(struct pipe_screen *screen, void *nirptr, bool optimize)
    struct nir_shader *nir = (struct nir_shader *)nirptr;
 
    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
-   si_nir_lower_ps_inputs(nir);
+   si_lower_io(nir);
    si_lower_nir(sscreen, nir);
 }