util_dynarray_fini(&last_read[i]);
util_dynarray_fini(&last_write[i]);
}
+
+ free(last_read);
+ free(last_write);
}
/* Does the mask cover more than a scalar? */
if (!ins->has_constants)
return true;
- if (ins->alu.reg_mode == midgard_reg_mode_16) {
+ if (ins->alu.reg_mode != midgard_reg_mode_32) {
/* TODO: 16-bit constant combining */
if (pred->constant_count)
return false;
mir_foreach_instr_in_block_scheduled_rev(block, ins) {
list_add(&ins->link, &block->instructions);
}
+
+ free(instructions); /* Allocated by flatten_mir() */
+ free(worklist);
}
/* When we're 'squeezing down' the values in the IR, we maintain a hash
/* For register spilling - to thread local storage */
.arg_1 = 0xEA,
.arg_2 = 0x1E,
-
- /* Splattered across, TODO combine logically */
- .varying_parameters = (byte & 0x1FF) << 1,
- .address = (byte >> 9)
},
/* If we spill an unspill, RA goes into an infinite loop */
.no_spill = true
};
+ ins.constants[0] = byte;
+
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
mir_foreach_instr_global(ctx, ins) {
ins->hint = false;
}
+
+ free(cost);
}
void
mir_squeeze_index(ctx);
mir_invalidate_liveness(ctx);
- l = NULL;
+ if (l) {
+ lcra_free(l);
+ l = NULL;
+ }
+
l = allocate_registers(ctx, &spilled);
} while(spilled && ((iter_count--) > 0));
ctx->tls_size = spill_count * 16;
install_registers(ctx, l);
+
+ lcra_free(l);
}