nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
reg->bit_size, reg->name);
- nir_op vecN_op;
- switch (reg->num_components) {
- case 2: vecN_op = nir_op_vec2; break;
- case 3: vecN_op = nir_op_vec3; break;
- case 4: vecN_op = nir_op_vec4; break;
- default: unreachable("not reached");
- }
+ nir_op vecN_op = nir_op_vec(reg->num_components);
nir_alu_instr *vec = nir_alu_instr_create(state->shader, vecN_op);
nir_metadata_dominance);
nir_index_local_regs(impl);
+ void *dead_ctx = ralloc_context(NULL);
struct regs_to_ssa_state state;
state.shader = impl->function->shader;
- state.values = malloc(impl->reg_alloc * sizeof(*state.values));
+ state.values = ralloc_array(dead_ctx, struct nir_phi_builder_value *,
+ impl->reg_alloc);
struct nir_phi_builder *phi_build = nir_phi_builder_create(impl);
const unsigned block_set_words = BITSET_WORDS(impl->num_blocks);
- NIR_VLA(BITSET_WORD, defs, block_set_words);
+ BITSET_WORD *defs = ralloc_array(dead_ctx, BITSET_WORD, block_set_words);
nir_foreach_register(reg, &impl->registers) {
if (reg->num_array_elems != 0) {
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block) {
- if (instr->type == nir_instr_type_alu) {
+ switch (instr->type) {
+ case nir_instr_type_alu:
rewrite_alu_instr(nir_instr_as_alu(instr), &state);
- } else {
+ break;
+
+ case nir_instr_type_phi:
+ /* We rewrite sources as a separate pass */
+ nir_foreach_dest(instr, rewrite_dest, &state);
+ break;
+
+ default:
nir_foreach_src(instr, rewrite_src, &state);
nir_foreach_dest(instr, rewrite_dest, &state);
}
nir_if *following_if = nir_block_get_following_if(block);
if (following_if)
rewrite_if_condition(following_if, &state);
+
+ /* Handle phi sources that source from this block. We have to do this
+ * as a separate pass because the phi builder assumes that uses and
+ * defs are processed in an order that respects dominance. When we have
+ * loops, a phi source may be a back-edge so we have to handle it as if
+ * it were one of the last instructions in the predecessor block.
+ */
+ for (unsigned i = 0; i < ARRAY_SIZE(block->successors); i++) {
+ if (block->successors[i] == NULL)
+ continue;
+
+ nir_foreach_instr(instr, block->successors[i]) {
+ if (instr->type != nir_instr_type_phi)
+ break;
+
+ nir_phi_instr *phi = nir_instr_as_phi(instr);
+ nir_foreach_phi_src(phi_src, phi) {
+ if (phi_src->pred == block)
+ rewrite_src(&phi_src->src, &state);
+ }
+ }
+ }
}
nir_phi_builder_finish(phi_build);
nir_foreach_register_safe(reg, &impl->registers) {
if (state.values[reg->index]) {
- assert(list_empty(®->uses));
- assert(list_empty(®->if_uses));
- assert(list_empty(®->defs));
+ assert(list_is_empty(®->uses));
+ assert(list_is_empty(®->if_uses));
+ assert(list_is_empty(®->defs));
exec_node_remove(®->node);
}
}
- free(state.values);
+ ralloc_free(dead_ctx);
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
bool
nir_lower_regs_to_ssa(nir_shader *shader)
{
- assert(exec_list_is_empty(&shader->registers));
bool progress = false;
nir_foreach_function(function, shader) {