aco: Don't declare 'Block' as class, but define as struct.
[mesa.git] / src / amd / compiler / aco_instruction_selection_setup.cpp
index bf4b34c6e3e9dd5ac05d4e9b3efd68a34d42c1a9..90a92232343f706c3c8f3742ac65b13d96af4fd7 100644 (file)
@@ -57,7 +57,6 @@ struct isel_context {
    nir_shader *shader;
    uint32_t constant_data_offset;
    Block *block;
-   bool *divergent_vals;
    std::unique_ptr<Temp[]> allocated;
    std::unordered_map<unsigned, std::array<Temp,NIR_MAX_VEC_COMPONENTS>> allocated_vec;
    Stage stage; /* Stage */
@@ -91,10 +90,6 @@ struct isel_context {
    /* GS inputs */
    Temp gs_wave_id;
 
-   /* gathered information */
-   uint64_t input_masks[MESA_SHADER_COMPUTE];
-   uint64_t output_masks[MESA_SHADER_COMPUTE];
-
    /* VS output information */
    bool export_clip_dists;
    unsigned num_clip_distances;
@@ -156,7 +151,7 @@ unsigned get_interp_input(nir_intrinsic_op intrin, enum glsl_interp_mode interp)
  * block instead. This is so that we can use any SGPR live-out of the side
  * without the branch without creating a linear phi in the invert or merge block. */
 bool
-sanitize_if(nir_function_impl *impl, bool *divergent, nir_if *nif)
+sanitize_if(nir_function_impl *impl, nir_if *nif)
 {
    //TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
 
@@ -201,7 +196,7 @@ sanitize_if(nir_function_impl *impl, bool *divergent, nir_if *nif)
 }
 
 bool
-sanitize_cf_list(nir_function_impl *impl, bool *divergent, struct exec_list *cf_list)
+sanitize_cf_list(nir_function_impl *impl, struct exec_list *cf_list)
 {
    bool progress = false;
    foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
@@ -210,14 +205,14 @@ sanitize_cf_list(nir_function_impl *impl, bool *divergent, struct exec_list *cf_
          break;
       case nir_cf_node_if: {
          nir_if *nif = nir_cf_node_as_if(cf_node);
-         progress |= sanitize_cf_list(impl, divergent, &nif->then_list);
-         progress |= sanitize_cf_list(impl, divergent, &nif->else_list);
-         progress |= sanitize_if(impl, divergent, nif);
+         progress |= sanitize_cf_list(impl, &nif->then_list);
+         progress |= sanitize_cf_list(impl, &nif->else_list);
+         progress |= sanitize_if(impl, nif);
          break;
       }
       case nir_cf_node_loop: {
          nir_loop *loop = nir_cf_node_as_loop(cf_node);
-         progress |= sanitize_cf_list(impl, divergent, &loop->body);
+         progress |= sanitize_cf_list(impl, &loop->body);
          break;
       }
       case nir_cf_node_function:
@@ -242,11 +237,11 @@ void init_context(isel_context *ctx, nir_shader *shader)
    unsigned lane_mask_size = ctx->program->lane_mask.size();
 
    ctx->shader = shader;
-   ctx->divergent_vals = nir_divergence_analysis(shader, nir_divergence_view_index_uniform);
+   nir_divergence_analysis(shader, nir_divergence_view_index_uniform);
 
    /* sanitize control flow */
    nir_metadata_require(impl, nir_metadata_dominance);
-   sanitize_cf_list(impl, ctx->divergent_vals, &impl->body);
+   sanitize_cf_list(impl, &impl->body);
    nir_metadata_preserve(impl, (nir_metadata)~nir_metadata_block_index);
 
    /* we'll need this for isel */
@@ -336,10 +331,10 @@ void init_context(isel_context *ctx, nir_shader *shader)
                   case nir_op_b2f16:
                   case nir_op_b2f32:
                   case nir_op_mov:
-                     type = ctx->divergent_vals[alu_instr->dest.dest.ssa.index] ? RegType::vgpr : RegType::sgpr;
+                     type = nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
                      break;
                   case nir_op_bcsel:
-                     type = ctx->divergent_vals[alu_instr->dest.dest.ssa.index] ? RegType::vgpr : RegType::sgpr;
+                     type = nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
                      /* fallthrough */
                   default:
                      for (unsigned i = 0; i < nir_op_infos[alu_instr->op].num_inputs; i++) {
@@ -469,7 +464,7 @@ void init_context(isel_context *ctx, nir_shader *shader)
                   case nir_intrinsic_load_global:
                   case nir_intrinsic_vulkan_resource_index:
                   case nir_intrinsic_load_shared:
-                     type = ctx->divergent_vals[intrinsic->dest.ssa.index] ? RegType::vgpr : RegType::sgpr;
+                     type = nir_dest_is_divergent(intrinsic->dest) ? RegType::vgpr : RegType::sgpr;
                      break;
                   case nir_intrinsic_load_view_index:
                      type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
@@ -528,9 +523,10 @@ void init_context(isel_context *ctx, nir_shader *shader)
 
                if (tex->dest.ssa.bit_size == 64)
                   size *= 2;
-               if (tex->op == nir_texop_texture_samples)
-                  assert(!ctx->divergent_vals[tex->dest.ssa.index]);
-               if (ctx->divergent_vals[tex->dest.ssa.index])
+               if (tex->op == nir_texop_texture_samples) {
+                  assert(!tex->dest.ssa.divergent);
+               }
+               if (nir_dest_is_divergent(tex->dest))
                   allocated[tex->dest.ssa.index] = Temp(0, RegClass(RegType::vgpr, size));
                else
                   allocated[tex->dest.ssa.index] = Temp(0, RegClass(RegType::sgpr, size));
@@ -562,7 +558,7 @@ void init_context(isel_context *ctx, nir_shader *shader)
                   break;
                }
 
-               if (ctx->divergent_vals[phi->dest.ssa.index]) {
+               if (nir_dest_is_divergent(phi->dest)) {
                   type = RegType::vgpr;
                } else {
                   type = RegType::sgpr;
@@ -744,7 +740,7 @@ setup_vs_output_info(isel_context *ctx, nir_shader *nir,
    if (outinfo->writes_pointsize || outinfo->writes_viewport_index || outinfo->writes_layer)
       pos_written |= 1 << 1;
 
-   uint64_t mask = ctx->output_masks[nir->info.stage];
+   uint64_t mask = nir->info.outputs_written;
    while (mask) {
       int idx = u_bit_scan64(&mask);
       if (idx >= VARYING_SLOT_VAR0 || idx == VARYING_SLOT_LAYER ||
@@ -789,17 +785,8 @@ setup_vs_variables(isel_context *ctx, nir_shader *nir)
    }
    nir_foreach_variable(variable, &nir->outputs)
    {
-      if (ctx->stage == vertex_geometry_gs)
-         variable->data.driver_location = util_bitcount64(ctx->output_masks[nir->info.stage] & ((1ull << variable->data.location) - 1ull)) * 4;
-      else if (ctx->stage == vertex_es ||
-               ctx->stage == vertex_ls ||
-               ctx->stage == vertex_tess_control_hs)
-         // TODO: make this more compact
-         variable->data.driver_location = shader_io_get_unique_index((gl_varying_slot) variable->data.location) * 4;
-      else if (ctx->stage == vertex_vs || ctx->stage == ngg_vertex_gs)
+      if (ctx->stage == vertex_vs || ctx->stage == ngg_vertex_gs)
          variable->data.driver_location = variable->data.location * 4;
-      else
-         unreachable("Unsupported VS stage");
 
       assert(variable->data.location >= 0 && variable->data.location <= UINT8_MAX);
       ctx->output_drv_loc_to_var_slot[MESA_SHADER_VERTEX][variable->data.driver_location / 4] = variable->data.location;
@@ -809,16 +796,8 @@ setup_vs_variables(isel_context *ctx, nir_shader *nir)
       radv_vs_output_info *outinfo = &ctx->program->info->vs.outinfo;
       setup_vs_output_info(ctx, nir, outinfo->export_prim_id,
                            ctx->options->key.vs_common_out.export_clip_dists, outinfo);
-   } else if (ctx->stage == vertex_geometry_gs || ctx->stage == vertex_es) {
-      /* TODO: radv_nir_shader_info_pass() already sets this but it's larger
-       * than it needs to be in order to set it better, we have to improve
-       * radv_nir_shader_info_pass() because gfx9_get_gs_info() uses
-       * esgs_itemsize and has to be done before compilation
-       */
-      /* radv_es_output_info *outinfo = &ctx->program->info->vs.es_info;
-      outinfo->esgs_itemsize = util_bitcount64(ctx->output_masks[nir->info.stage]) * 16u; */
    } else if (ctx->stage == vertex_ls) {
-      ctx->tcs_num_inputs = util_last_bit64(ctx->args->shader_info->vs.ls_outputs_written);
+      ctx->tcs_num_inputs = ctx->program->info->vs.num_linked_outputs;
    }
 
    if (ctx->stage == ngg_vertex_gs && ctx->args->options->key.vs_common_out.export_prim_id) {
@@ -831,18 +810,8 @@ setup_vs_variables(isel_context *ctx, nir_shader *nir)
 
 void setup_gs_variables(isel_context *ctx, nir_shader *nir)
 {
-   if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
-      nir_foreach_variable(variable, &nir->inputs) {
-         variable->data.driver_location = util_bitcount64(ctx->input_masks[nir->info.stage] & ((1ull << variable->data.location) - 1ull)) * 4;
-      }
-   } else if (ctx->stage == geometry_gs) {
-      //TODO: make this more compact
-      nir_foreach_variable(variable, &nir->inputs) {
-         variable->data.driver_location = shader_io_get_unique_index((gl_varying_slot)variable->data.location) * 4;
-      }
-   } else {
-      unreachable("Unsupported GS stage.");
-   }
+   if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs)
+      ctx->program->config->lds_size = ctx->program->info->gs_ring_info.lds_size; /* Already in units of the alloc granularity */
 
    nir_foreach_variable(variable, &nir->outputs) {
       variable->data.driver_location = variable->data.location * 4;
@@ -866,22 +835,15 @@ setup_tcs_info(isel_context *ctx, nir_shader *nir)
       ctx->stage == vertex_tess_control_hs &&
       ctx->args->options->key.tcs.input_vertices == nir->info.tess.tcs_vertices_out;
 
-   if (ctx->stage == tess_control_hs) {
-      ctx->tcs_num_inputs = ctx->args->options->key.tcs.num_inputs;
-   } else if (ctx->stage == vertex_tess_control_hs) {
-      ctx->tcs_num_inputs = util_last_bit64(ctx->args->shader_info->vs.ls_outputs_written);
-
-      if (ctx->tcs_in_out_eq) {
-         ctx->tcs_temp_only_inputs = ~nir->info.tess.tcs_cross_invocation_inputs_read &
-                                     ~nir->info.inputs_read_indirectly &
-                                     nir->info.inputs_read;
-      }
-   } else {
-      unreachable("Unsupported TCS shader stage");
+   if (ctx->tcs_in_out_eq) {
+      ctx->tcs_temp_only_inputs = ~nir->info.tess.tcs_cross_invocation_inputs_read &
+                                    ~nir->info.inputs_read_indirectly &
+                                    nir->info.inputs_read;
    }
 
-   ctx->tcs_num_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
-   ctx->tcs_num_patch_outputs = util_last_bit64(ctx->args->shader_info->tcs.patch_outputs_written);
+   ctx->tcs_num_inputs = ctx->program->info->tcs.num_linked_inputs;
+   ctx->tcs_num_outputs = ctx->program->info->tcs.num_linked_outputs;
+   ctx->tcs_num_patch_outputs = ctx->program->info->tcs.num_linked_patch_outputs;
 
    ctx->tcs_num_patches = get_tcs_num_patches(
                              ctx->args->options->key.tcs.input_vertices,
@@ -909,43 +871,30 @@ setup_tcs_info(isel_context *ctx, nir_shader *nir)
 void
 setup_tcs_variables(isel_context *ctx, nir_shader *nir)
 {
-   nir_foreach_variable(variable, &nir->inputs) {
-      variable->data.driver_location = shader_io_get_unique_index((gl_varying_slot) variable->data.location) * 4;
-   }
-
    nir_foreach_variable(variable, &nir->outputs) {
-      variable->data.driver_location = shader_io_get_unique_index((gl_varying_slot) variable->data.location) * 4;
       assert(variable->data.location >= 0 && variable->data.location <= UINT8_MAX);
 
+      if (variable->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
+         ctx->tcs_tess_lvl_out_loc = variable->data.driver_location * 4u;
+      else if (variable->data.location == VARYING_SLOT_TESS_LEVEL_INNER)
+         ctx->tcs_tess_lvl_in_loc = variable->data.driver_location * 4u;
+
       if (variable->data.patch)
          ctx->output_tcs_patch_drv_loc_to_var_slot[variable->data.driver_location / 4] = variable->data.location;
       else
          ctx->output_drv_loc_to_var_slot[MESA_SHADER_TESS_CTRL][variable->data.driver_location / 4] = variable->data.location;
    }
-
-   ctx->tcs_tess_lvl_out_loc = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER) * 16u;
-   ctx->tcs_tess_lvl_in_loc = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER) * 16u;
 }
 
 void
 setup_tes_variables(isel_context *ctx, nir_shader *nir)
 {
    ctx->tcs_num_patches = ctx->args->options->key.tes.num_patches;
-   ctx->tcs_num_outputs = ctx->args->options->key.tes.tcs_num_outputs;
-
-   nir_foreach_variable(variable, &nir->inputs) {
-      variable->data.driver_location = shader_io_get_unique_index((gl_varying_slot) variable->data.location) * 4;
-   }
+   ctx->tcs_num_outputs = ctx->program->info->tes.num_linked_inputs;
 
    nir_foreach_variable(variable, &nir->outputs) {
       if (ctx->stage == tess_eval_vs || ctx->stage == ngg_tess_eval_gs)
          variable->data.driver_location = variable->data.location * 4;
-      else if (ctx->stage == tess_eval_es)
-         variable->data.driver_location = shader_io_get_unique_index((gl_varying_slot) variable->data.location) * 4;
-      else if (ctx->stage == tess_eval_geometry_gs)
-         variable->data.driver_location = util_bitcount64(ctx->output_masks[nir->info.stage] & ((1ull << variable->data.location) - 1ull)) * 4;
-      else
-         unreachable("Unsupported TES shader stage");
    }
 
    if (ctx->stage == tess_eval_vs || ctx->stage == ngg_tess_eval_gs) {
@@ -993,50 +942,6 @@ setup_variables(isel_context *ctx, nir_shader *nir)
    }
 }
 
-void
-get_io_masks(isel_context *ctx, unsigned shader_count, struct nir_shader *const *shaders)
-{
-   for (unsigned i = 0; i < shader_count; i++) {
-      nir_shader *nir = shaders[i];
-      if (nir->info.stage == MESA_SHADER_COMPUTE)
-         continue;
-
-      uint64_t output_mask = 0;
-      nir_foreach_variable(variable, &nir->outputs) {
-         const glsl_type *type = variable->type;
-         if (nir_is_per_vertex_io(variable, nir->info.stage))
-            type = type->fields.array;
-         unsigned slots = type->count_attribute_slots(false);
-         if (variable->data.compact) {
-            unsigned component_count = variable->data.location_frac + type->length;
-            slots = (component_count + 3) / 4;
-         }
-         output_mask |= ((1ull << slots) - 1) << variable->data.location;
-      }
-
-      uint64_t input_mask = 0;
-      nir_foreach_variable(variable, &nir->inputs) {
-         const glsl_type *type = variable->type;
-         if (nir_is_per_vertex_io(variable, nir->info.stage))
-            type = type->fields.array;
-         unsigned slots = type->count_attribute_slots(false);
-         if (variable->data.compact) {
-            unsigned component_count = variable->data.location_frac + type->length;
-            slots = (component_count + 3) / 4;
-         }
-         input_mask |= ((1ull << slots) - 1) << variable->data.location;
-      }
-
-      ctx->output_masks[nir->info.stage] |= output_mask;
-      if (i + 1 < shader_count)
-         ctx->input_masks[shaders[i + 1]->info.stage] |= output_mask;
-
-      ctx->input_masks[nir->info.stage] |= input_mask;
-      if (i)
-         ctx->output_masks[shaders[i - 1]->info.stage] |= input_mask;
-   }
-}
-
 unsigned
 lower_bit_size_callback(const nir_alu_instr *alu, void *_)
 {
@@ -1089,11 +994,20 @@ setup_nir(isel_context *ctx, nir_shader *nir)
 
    bool lower_to_scalar = false;
    bool lower_pack = false;
+   nir_variable_mode robust_modes = (nir_variable_mode)0;
+
+   if (ctx->options->robust_buffer_access) {
+      robust_modes = (nir_variable_mode)(nir_var_mem_ubo |
+                                         nir_var_mem_ssbo |
+                                         nir_var_mem_global |
+                                         nir_var_mem_push_const);
+   }
+
    if (nir_opt_load_store_vectorize(nir,
                                     (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
                                                         nir_var_mem_push_const | nir_var_mem_shared |
                                                         nir_var_mem_global),
-                                    mem_vectorize_callback)) {
+                                    mem_vectorize_callback, robust_modes)) {
       lower_to_scalar = true;
       lower_pack = true;
    }
@@ -1320,8 +1234,6 @@ setup_isel_context(Program* program,
    program->vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
    program->sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
 
-   get_io_masks(&ctx, shader_count, shaders);
-
    unsigned scratch_size = 0;
    if (program->stage == gs_copy_vs) {
       assert(shader_count == 1);