nir: use enum operator helper for nir_variable_mode and nir_metadata
[mesa.git] / src / amd / compiler / aco_instruction_selection_setup.cpp
index f39f8476b80e5ab13dc402f95755840ef21c3442..73bc1e151dfbe93725ac9c5260b13f7b54583fe3 100644 (file)
@@ -205,8 +205,7 @@ sanitize_if(nir_function_impl *impl, nir_if *nif)
     * correct because of the specific type of transformation we did. Block
     * indices are not valid except for block_0's, which is all we care about for
     * nir_block_is_unreachable(). */
-   impl->valid_metadata =
-      (nir_metadata)(impl->valid_metadata | nir_metadata_dominance | nir_metadata_block_index);
+   impl->valid_metadata = impl->valid_metadata | nir_metadata_dominance | nir_metadata_block_index;
 
    return true;
 }
@@ -327,8 +326,8 @@ void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
    }
    ctx->buffer_resource_flags = std::vector<uint8_t>(resource_flag_count);
 
-   nir_foreach_variable(var, &impl->function->shader->uniforms) {
-      if (var->data.mode == nir_var_mem_ssbo && (var->data.access & ACCESS_RESTRICT)) {
+   nir_foreach_variable_with_modes(var, impl->function->shader, nir_var_mem_ssbo) {
+      if (var->data.access & ACCESS_RESTRICT) {
          uint32_t offset = ctx->resource_flag_offsets[var->data.descriptor_set];
          ctx->buffer_resource_flags[offset + var->data.binding] |= buffer_is_restrict;
       }
@@ -339,7 +338,7 @@ void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
          if (instr->type != nir_instr_type_intrinsic)
             continue;
          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
-         if (!(nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_ACCESS]))
+         if (!nir_intrinsic_has_access(intrin))
             continue;
 
          nir_ssa_def *res = NULL;
@@ -367,7 +366,8 @@ void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
             res = intrin->src[0].ssa;
             break;
          case nir_intrinsic_store_ssbo:
-            if (nir_src_is_divergent(intrin->src[2]) || ctx->program->chip_class < GFX8 ||
+            if (nir_src_is_divergent(intrin->src[2]) ||
+                ctx->program->chip_class < GFX8 || ctx->program->chip_class >= GFX10_3 ||
                 (intrin->src[0].ssa->bit_size < 32 && !can_subdword_ssbo_store_use_smem(intrin)))
                flags |= glc ? has_glc_vmem_store : has_nonglc_vmem_store;
             res = intrin->src[1].ssa;
@@ -567,7 +567,7 @@ void init_context(isel_context *ctx, nir_shader *shader)
    /* sanitize control flow */
    nir_metadata_require(impl, nir_metadata_dominance);
    sanitize_cf_list(impl, &impl->body);
-   nir_metadata_preserve(impl, (nir_metadata)~nir_metadata_block_index);
+   nir_metadata_preserve(impl, ~nir_metadata_block_index);
 
    /* we'll need this for isel */
    nir_metadata_require(impl, nir_metadata_block_index);
@@ -599,9 +599,6 @@ void init_context(isel_context *ctx, nir_shader *shader)
                   case nir_op_fsub:
                   case nir_op_fmax:
                   case nir_op_fmin:
-                  case nir_op_fmax3:
-                  case nir_op_fmin3:
-                  case nir_op_fmed3:
                   case nir_op_fneg:
                   case nir_op_fabs:
                   case nir_op_fsat:
@@ -935,6 +932,14 @@ void init_context(isel_context *ctx, nir_shader *shader)
 
    ctx->allocated.reset(allocated.release());
    ctx->cf_info.nir_to_aco.reset(nir_to_aco.release());
+
+   /* align and copy constant data */
+   while (ctx->program->constant_data.size() % 4u)
+      ctx->program->constant_data.push_back(0);
+   ctx->constant_data_offset = ctx->program->constant_data.size();
+   ctx->program->constant_data.insert(ctx->program->constant_data.end(),
+                                      (uint8_t*)shader->constant_data,
+                                      (uint8_t*)shader->constant_data + shader->constant_data_size);
 }
 
 Pseudo_instruction *add_startpgm(struct isel_context *ctx)
@@ -1303,16 +1308,6 @@ lower_bit_size_callback(const nir_alu_instr *alu, void *_)
 void
 setup_nir(isel_context *ctx, nir_shader *nir)
 {
-   Program *program = ctx->program;
-
-   /* align and copy constant data */
-   while (program->constant_data.size() % 4u)
-      program->constant_data.push_back(0);
-   ctx->constant_data_offset = program->constant_data.size();
-   program->constant_data.insert(program->constant_data.end(),
-                                 (uint8_t*)nir->constant_data,
-                                 (uint8_t*)nir->constant_data + nir->constant_data_size);
-
    /* the variable setup has to be done before lower_io / CSE */
    setup_variables(ctx, nir);
 
@@ -1327,22 +1322,24 @@ setup_nir(isel_context *ctx, nir_shader *nir)
    nir_variable_mode robust_modes = (nir_variable_mode)0;
 
    if (ctx->options->robust_buffer_access) {
-      robust_modes = (nir_variable_mode)(nir_var_mem_ubo |
-                                         nir_var_mem_ssbo |
-                                         nir_var_mem_global |
-                                         nir_var_mem_push_const);
+      robust_modes = nir_var_mem_ubo |
+                     nir_var_mem_ssbo |
+                     nir_var_mem_global |
+                     nir_var_mem_push_const;
    }
 
    if (nir_opt_load_store_vectorize(nir,
-                                    (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
-                                                        nir_var_mem_push_const | nir_var_mem_shared |
-                                                        nir_var_mem_global),
+                                    nir_var_mem_ssbo | nir_var_mem_ubo |
+                                    nir_var_mem_push_const | nir_var_mem_shared |
+                                    nir_var_mem_global,
                                     mem_vectorize_callback, robust_modes)) {
       lower_to_scalar = true;
       lower_pack = true;
    }
    if (nir->info.stage != MESA_SHADER_COMPUTE)
-      nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
+      nir_lower_io(nir, nir_var_shader_in | nir_var_shader_out, type_size, (nir_lower_io_options)0);
+
+   lower_to_scalar |= nir_opt_shrink_vectors(nir);
 
    if (lower_to_scalar)
       nir_lower_alu_to_scalar(nir, NULL, NULL);
@@ -1350,7 +1347,7 @@ setup_nir(isel_context *ctx, nir_shader *nir)
       nir_lower_pack(nir);
 
    /* lower ALU operations */
-   nir_lower_int64(nir, nir->options->lower_int64_options);
+   nir_lower_int64(nir);
 
    if (nir_lower_bit_size(nir, lower_bit_size_callback, NULL))
       nir_copy_prop(nir); /* allow nir_opt_idiv_const() to optimize lowered divisions */
@@ -1385,7 +1382,6 @@ setup_nir(isel_context *ctx, nir_shader *nir)
 
    /* cleanup passes */
    nir_lower_load_const_to_scalar(nir);
-   nir_opt_shrink_load(nir);
    nir_move_options move_opts = (nir_move_options)(
       nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
       nir_move_comparisons | nir_move_copies);