bool invert;
/* Hint for the register allocator not to spill the destination written
- * from this instruction (because it is a spill/unspill node itself) */
+ * from this instruction (because it is a spill/unspill node itself).
+ * Bitmask of spilled classes */
- bool no_spill;
+ unsigned no_spill;
/* Generic hint for intra-pass use */
bool hint;
return ins;
}
+/* Broad types of register classes so we can handle special
+ * registers */
+
+#define REG_CLASS_WORK 0
+#define REG_CLASS_LDST 1
+#define REG_CLASS_TEXR 3
+#define REG_CLASS_TEXW 4
+
/* Like a move, but to thread local storage! */
static inline midgard_instruction
},
/* If we spill an unspill, RA goes into an infinite loop */
- .no_spill = true
+ .no_spill = (1 << REG_CLASS_WORK)
};
ins.constants[0] = byte;
void schedule_program(compiler_context *ctx);
-/* Broad types of register classes so we can handle special
- * registers */
-
-#define REG_CLASS_WORK 0
-#define REG_CLASS_LDST 1
-#define REG_CLASS_TEXR 3
-#define REG_CLASS_TEXW 4
-
void mir_ra(compiler_context *ctx);
void mir_squeeze_index(compiler_context *ctx);
void mir_lower_special_reads(compiler_context *ctx);
/* We can't spill a previously spilled value or an unspill */
mir_foreach_instr_global(ctx, ins) {
- if (ins->no_spill) {
+ if (ins->no_spill & (1 << l->spill_class)) {
lcra_set_node_spill_cost(l, ins->dest, -1);
mir_foreach_src(ins, s)
if (is_special_w) {
st = v_mov(spill_node, spill_slot);
- st.no_spill = true;
+ st.no_spill |= (1 << spill_class);
} else {
ins->dest = spill_index++;
- ins->no_spill = true;
+ ins->no_spill |= (1 << spill_class);
st = v_load_store_scratch(ins->dest, spill_slot, true, ins->mask);
}
if (is_special) {
/* Move */
st = v_mov(spill_node, index);
- st.no_spill = true;
+ st.no_spill |= (1 << spill_class);
} else {
/* TLS load */
st = v_load_store_scratch(index, spill_slot, false, 0xF);