* correct because of the specific type of transformation we did. Block
* indices are not valid except for block_0's, which is all we care about for
* nir_block_is_unreachable(). */
- impl->valid_metadata =
- (nir_metadata)(impl->valid_metadata | nir_metadata_dominance | nir_metadata_block_index);
+ impl->valid_metadata = impl->valid_metadata | nir_metadata_dominance | nir_metadata_block_index;
return true;
}
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if (!(nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_ACCESS]))
+ if (!nir_intrinsic_has_access(intrin))
continue;
nir_ssa_def *res = NULL;
/* sanitize control flow */
nir_metadata_require(impl, nir_metadata_dominance);
sanitize_cf_list(impl, &impl->body);
- nir_metadata_preserve(impl, (nir_metadata)~nir_metadata_block_index);
+ nir_metadata_preserve(impl, ~nir_metadata_block_index);
/* we'll need this for isel */
nir_metadata_require(impl, nir_metadata_block_index);
case nir_op_fsub:
case nir_op_fmax:
case nir_op_fmin:
- case nir_op_fmax3:
- case nir_op_fmin3:
- case nir_op_fmed3:
case nir_op_fneg:
case nir_op_fabs:
case nir_op_fsat:
ctx->allocated.reset(allocated.release());
ctx->cf_info.nir_to_aco.reset(nir_to_aco.release());
+
+ /* align and copy constant data */
+ while (ctx->program->constant_data.size() % 4u)
+ ctx->program->constant_data.push_back(0);
+ ctx->constant_data_offset = ctx->program->constant_data.size();
+ ctx->program->constant_data.insert(ctx->program->constant_data.end(),
+ (uint8_t*)shader->constant_data,
+ (uint8_t*)shader->constant_data + shader->constant_data_size);
}
Pseudo_instruction *add_startpgm(struct isel_context *ctx)
void
setup_nir(isel_context *ctx, nir_shader *nir)
{
- Program *program = ctx->program;
-
- /* align and copy constant data */
- while (program->constant_data.size() % 4u)
- program->constant_data.push_back(0);
- ctx->constant_data_offset = program->constant_data.size();
- program->constant_data.insert(program->constant_data.end(),
- (uint8_t*)nir->constant_data,
- (uint8_t*)nir->constant_data + nir->constant_data_size);
-
/* the variable setup has to be done before lower_io / CSE */
setup_variables(ctx, nir);
nir_variable_mode robust_modes = (nir_variable_mode)0;
if (ctx->options->robust_buffer_access) {
- robust_modes = (nir_variable_mode)(nir_var_mem_ubo |
- nir_var_mem_ssbo |
- nir_var_mem_global |
- nir_var_mem_push_const);
+ robust_modes = nir_var_mem_ubo |
+ nir_var_mem_ssbo |
+ nir_var_mem_global |
+ nir_var_mem_push_const;
}
if (nir_opt_load_store_vectorize(nir,
- (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
- nir_var_mem_push_const | nir_var_mem_shared |
- nir_var_mem_global),
+ nir_var_mem_ssbo | nir_var_mem_ubo |
+ nir_var_mem_push_const | nir_var_mem_shared |
+ nir_var_mem_global,
mem_vectorize_callback, robust_modes)) {
lower_to_scalar = true;
lower_pack = true;
}
if (nir->info.stage != MESA_SHADER_COMPUTE)
- nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
+ nir_lower_io(nir, nir_var_shader_in | nir_var_shader_out, type_size, (nir_lower_io_options)0);
lower_to_scalar |= nir_opt_shrink_vectors(nir);