return IS_ALU(bundle->tag);
}
+/* Registers/SSA are distinguish in the backend by the bottom-most bit */
+
+#define IS_REG (1)
+
+static inline unsigned
+make_compiler_temp(compiler_context *ctx)
+{
+ return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
+}
+
/* MIR manipulation */
void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
#define SSA_UNUSED_1 -2
#define SSA_FIXED_SHIFT 24
-#define SSA_FIXED_REGISTER(reg) ((1 + reg) << SSA_FIXED_SHIFT)
-#define SSA_REG_FROM_FIXED(reg) ((reg >> SSA_FIXED_SHIFT) - 1)
+#define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
+#define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
#define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
/* Swizzle support */
nir_dest_index(compiler_context *ctx, nir_dest *dst)
{
if (dst->is_ssa)
- return dst->ssa.index;
+ return (dst->ssa.index << 1) | 0;
else {
assert(!dst->reg.indirect);
- return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+ return (dst->reg.reg->index << 1) | IS_REG;
}
}
float *v = rzalloc_array(NULL, float, 4);
nir_const_load_to_arr(v, instr, f32);
- _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
+
+ /* Shifted for SSA, +1 for off-by-one */
+ _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, v);
}
/* Normally constants are embedded implicitly, but for I/O and such we have to
nir_src_index(compiler_context *ctx, nir_src *src)
{
if (src->is_ssa)
- return src->ssa->index;
+ return (src->ssa->index << 1) | 0;
else {
assert(!src->reg.indirect);
- return ctx->func->impl->ssa_alloc + src->reg.reg->index;
+ return (src->reg.reg->index << 1) | IS_REG;
}
}
/* TODO: Registers? */
unsigned src = ins->ssa_args.src1;
- if (src >= ctx->func->impl->ssa_alloc) continue;
+ if (src & IS_REG) continue;
assert(!mir_has_multiple_writes(ctx, src));
/* There might be a source modifier, too */
if (to >= SSA_FIXED_MINIMUM) continue;
if (from >= SSA_FIXED_MINIMUM) continue;
- if (to >= ctx->func->impl->ssa_alloc) continue;
- if (from >= ctx->func->impl->ssa_alloc) continue;
+ if (to & IS_REG) continue;
+ if (from & IS_REG) continue;
/* Constant propagation is not handled here, either */
if (ins->ssa_args.inline_constant) continue;
unsigned frcp = ins->ssa_args.src1;
unsigned to = ins->ssa_args.dest;
- if (frcp >= ctx->func->impl->ssa_alloc) continue;
- if (to >= ctx->func->impl->ssa_alloc) continue;
+ if (frcp & IS_REG) continue;
+ if (to & IS_REG) continue;
bool frcp_found = false;
unsigned frcp_component = 0;
unsigned vary = ins->ssa_args.src0;
unsigned to = ins->ssa_args.dest;
- if (vary >= ctx->func->impl->ssa_alloc) continue;
- if (to >= ctx->func->impl->ssa_alloc) continue;
+ if (vary & IS_REG) continue;
+ if (to & IS_REG) continue;
if (!mir_single_use(ctx, vary)) continue;
/* Check for a varying source. If we find it, we rewrite */
/* All of r0 has to be written out along with
* the branch writeout */
- unsigned node_count = ctx->func->impl->ssa_alloc + ctx->func->impl->reg_alloc;
-
- if (ains->writeout && !can_writeout_fragment(ctx, scheduled, index, node_count)) {
+ if (ains->writeout && !can_writeout_fragment(ctx, scheduled, index, ctx->temp_count)) {
/* We only work on full moves
* at the beginning. We could
* probably do better */
/* We do need the move for safety for a non-SSA dest, or if
* we're being fed into a special class */
- bool needs_move = ins->ssa_args.dest >= ctx->func->impl->ssa_alloc;
+ bool needs_move = ins->ssa_args.dest & IS_REG;
needs_move |= mir_special_index(ctx, ins->ssa_args.dest);
if (needs_move) {