aco: set constant_data_offset correctly in the case of merged shaders
[mesa.git] / src / amd / compiler / aco_instruction_selection_setup.cpp
index f39f8476b80e5ab13dc402f95755840ef21c3442..4f76289be45adc0eb8e7240ee5dffe364d115d56 100644 (file)
@@ -327,8 +327,8 @@ void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
    }
    ctx->buffer_resource_flags = std::vector<uint8_t>(resource_flag_count);
 
-   nir_foreach_variable(var, &impl->function->shader->uniforms) {
-      if (var->data.mode == nir_var_mem_ssbo && (var->data.access & ACCESS_RESTRICT)) {
+   nir_foreach_variable_with_modes(var, impl->function->shader, nir_var_mem_ssbo) {
+      if (var->data.access & ACCESS_RESTRICT) {
          uint32_t offset = ctx->resource_flag_offsets[var->data.descriptor_set];
          ctx->buffer_resource_flags[offset + var->data.binding] |= buffer_is_restrict;
       }
@@ -367,7 +367,8 @@ void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
             res = intrin->src[0].ssa;
             break;
          case nir_intrinsic_store_ssbo:
-            if (nir_src_is_divergent(intrin->src[2]) || ctx->program->chip_class < GFX8 ||
+            if (nir_src_is_divergent(intrin->src[2]) ||
+                ctx->program->chip_class < GFX8 || ctx->program->chip_class >= GFX10_3 ||
                 (intrin->src[0].ssa->bit_size < 32 && !can_subdword_ssbo_store_use_smem(intrin)))
                flags |= glc ? has_glc_vmem_store : has_nonglc_vmem_store;
             res = intrin->src[1].ssa;
@@ -935,6 +936,14 @@ void init_context(isel_context *ctx, nir_shader *shader)
 
    ctx->allocated.reset(allocated.release());
    ctx->cf_info.nir_to_aco.reset(nir_to_aco.release());
+
+   /* align and copy constant data */
+   while (ctx->program->constant_data.size() % 4u)
+      ctx->program->constant_data.push_back(0);
+   ctx->constant_data_offset = ctx->program->constant_data.size();
+   ctx->program->constant_data.insert(ctx->program->constant_data.end(),
+                                      (uint8_t*)shader->constant_data,
+                                      (uint8_t*)shader->constant_data + shader->constant_data_size);
 }
 
 Pseudo_instruction *add_startpgm(struct isel_context *ctx)
@@ -1303,16 +1312,6 @@ lower_bit_size_callback(const nir_alu_instr *alu, void *_)
 void
 setup_nir(isel_context *ctx, nir_shader *nir)
 {
-   Program *program = ctx->program;
-
-   /* align and copy constant data */
-   while (program->constant_data.size() % 4u)
-      program->constant_data.push_back(0);
-   ctx->constant_data_offset = program->constant_data.size();
-   program->constant_data.insert(program->constant_data.end(),
-                                 (uint8_t*)nir->constant_data,
-                                 (uint8_t*)nir->constant_data + nir->constant_data_size);
-
    /* the variable setup has to be done before lower_io / CSE */
    setup_variables(ctx, nir);
 
@@ -1344,13 +1343,15 @@ setup_nir(isel_context *ctx, nir_shader *nir)
    if (nir->info.stage != MESA_SHADER_COMPUTE)
       nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
 
+   lower_to_scalar |= nir_opt_shrink_vectors(nir);
+
    if (lower_to_scalar)
       nir_lower_alu_to_scalar(nir, NULL, NULL);
    if (lower_pack)
       nir_lower_pack(nir);
 
    /* lower ALU operations */
-   nir_lower_int64(nir, nir->options->lower_int64_options);
+   nir_lower_int64(nir);
 
    if (nir_lower_bit_size(nir, lower_bit_size_callback, NULL))
       nir_copy_prop(nir); /* allow nir_opt_idiv_const() to optimize lowered divisions */
@@ -1385,7 +1386,6 @@ setup_nir(isel_context *ctx, nir_shader *nir)
 
    /* cleanup passes */
    nir_lower_load_const_to_scalar(nir);
-   nir_opt_shrink_load(nir);
    nir_move_options move_opts = (nir_move_options)(
       nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
       nir_move_comparisons | nir_move_copies);