} midgard_branch;
/* Instruction arguments represented as block-local SSA indices, rather than
- * registers. Negative values mean unused. */
+ * registers. ~0 means unused. */
typedef struct {
- int src[3];
- int dest;
+ unsigned src[3];
+ unsigned dest;
bool inline_constant;
} ssa_args;
.type = TAG_ALU_4,
.mask = 0xF,
.ssa_args = {
- .src = { SSA_UNUSED_1, src, -1 },
+ .src = { SSA_UNUSED, src, SSA_UNUSED },
.dest = dest,
},
.alu = {
#define REGISTER_TEXTURE_BASE 28
#define REGISTER_SELECT 31
-/* SSA helper aliases to mimic the registers. UNUSED_0 encoded as an inline
- * constant. UNUSED_1 encoded as REGISTER_UNUSED */
-
-#define SSA_UNUSED_0 0
-#define SSA_UNUSED_1 -2
+/* SSA helper aliases to mimic the registers. */
+#define SSA_UNUSED ~0
#define SSA_FIXED_SHIFT 24
#define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
#define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
.type = TAG_LOAD_STORE_4, \
.mask = 0xF, \
.ssa_args = { \
- .dest = -1, \
- .src = { -1, -1, -1 }, \
+ .dest = ~0, \
+ .src = { ~0, ~0, ~0 }, \
}, \
.load_store = { \
.op = midgard_op_##name, \
.compact_branch = true,
.br_compact = compact,
.ssa_args = {
- .dest = -1,
- .src = { -1, -1, -1 },
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
}
};
.invert_conditional = invert
},
.ssa_args = {
- .dest = -1,
- .src = { -1, -1, -1 },
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
}
};
case nir_intrinsic_store_ssbo:
return midgard_sysval_for_ssbo(instr);
default:
- return -1;
+ return ~0;
}
}
.mask = 1 << COMPONENT_W,
.ssa_args = {
- .src = { condition, condition, -1 },
+ .src = { condition, condition, ~0 },
.dest = SSA_FIXED_REGISTER(31),
},
.precede_break = true,
.mask = mask_of(nr_comp),
.ssa_args = {
- .src = { condition, condition, -1 },
+ .src = { condition, condition, ~0 },
.dest = SSA_FIXED_REGISTER(31),
},
.alu = {
* needs it, or else we may segfault. */
unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
- unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : SSA_UNUSED_0;
+ unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : ~0;
/* Rather than use the instruction generation helpers, we do it
* ourselves here to avoid the mess */
.type = TAG_ALU_4,
.ssa_args = {
.src = {
- quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
- quirk_flipped_r24 ? src0 : src1,
- -1
+ quirk_flipped_r24 ? ~0 : src0,
+ quirk_flipped_r24 ? src0 : src1,
+ ~0
},
.dest = dest,
}
midgard_instruction rt_move = {
.ssa_args = {
- .dest = -1
+ .dest = ~0
}
};
if (rt != 0) {
/* We'll write to r1.z */
- rt_move = v_mov(-1, blank_alu_src, SSA_FIXED_REGISTER(1));
+ rt_move = v_mov(~0, blank_alu_src, SSA_FIXED_REGISTER(1));
rt_move.mask = 1 << COMPONENT_Z;
rt_move.unit = UNIT_SADD;
case nir_intrinsic_load_viewport_scale:
case nir_intrinsic_load_viewport_offset:
case nir_intrinsic_load_num_work_groups:
- emit_sysval_read(ctx, &instr->instr, -1, 3);
+ emit_sysval_read(ctx, &instr->instr, ~0, 3);
break;
case nir_intrinsic_load_work_group_id:
.mask = 0xF,
.ssa_args = {
.dest = nir_dest_index(ctx, &instr->dest),
- .src = { -1, -1, -1 },
+ .src = { ~0, ~0, ~0 },
},
.texture = {
.op = midgard_texop,
emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
break;
case nir_texop_txs:
- emit_sysval_read(ctx, &instr->instr, -1, 4);
+ emit_sysval_read(ctx, &instr->instr, ~0, 4);
break;
default:
unreachable("Unhanlded texture op");
/* Get rid of the embedded constant */
ins->has_constants = false;
- ins->ssa_args.src[1] = -1;
+ ins->ssa_args.src[1] = ~0;
ins->ssa_args.inline_constant = true;
ins->inline_constant = scaled_constant;
}
emit_fragment_epilogue(compiler_context *ctx)
{
/* Just emit the last chunk with the branch */
- EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
+ EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, ~0, midgard_condition_always);
}
static midgard_block *
this_block->is_scheduled = false;
++ctx->block_count;
- ctx->texture_index[0] = -1;
- ctx->texture_index[1] = -1;
+ ctx->texture_index[0] = ~0;
+ ctx->texture_index[1] = ~0;
/* Set up current block */
list_inithead(&this_block->instructions);
.mask = mask_of(nr_components),
.ssa_args = {
.dest = nir_dest_index(ctx, &instr->dest.dest),
- .src = { nir_alu_src_index(ctx, &instr->src[0]), -1, -1 },
+ .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 },
},
.texture = {
.op = mir_derivative_op(instr->op),
unsigned iA = ins->ssa_args.dest;
unsigned iB = ins->ssa_args.src[1];
- if ((iA < 0) || (iB < 0)) continue;
+ if ((iA == ~0) || (iB == ~0)) continue;
unsigned A = iA >= SSA_FIXED_MINIMUM ?
SSA_REG_FROM_FIXED(iA) :
.type = TAG_ALU_4,
.mask = ins->mask,
.ssa_args = {
- .src = { temp, -1, -1 },
+ .src = { temp, ~0, ~0 },
.dest = ins->ssa_args.dest,
.inline_constant = true
},
.mask = ins->mask,
.ssa_args = {
.dest = to,
- .src = { frcp_from, -1, -1 },
+ .src = { frcp_from, ~0, ~0 },
},
.load_store = {
.op = frcp_component == COMPONENT_W ?
static void
mir_print_index(int source)
{
- if (source < 0) {
+ if (source == ~0) {
printf("_");
return;
}
* register corresponds to */
static struct phys_reg
-index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg)
+index_to_reg(compiler_context *ctx, struct ra_graph *g, unsigned reg)
{
/* Check for special cases */
- if (reg >= SSA_FIXED_MINIMUM)
+ if ((reg == ~0) && g)
+ return default_phys_reg(REGISTER_UNUSED);
+ else if (reg >= SSA_FIXED_MINIMUM)
return default_phys_reg(SSA_REG_FROM_FIXED(reg));
- else if ((reg < 0) || !g)
+ else if (!g)
return default_phys_reg(REGISTER_UNUSED);
/* Special cases aside, we pick the underlying register */
set_class(unsigned *classes, unsigned node, unsigned class)
{
/* Check that we're even a node */
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return;
/* First 4 are work, next 4 are load/store.. */
static void
force_vec4(unsigned *classes, unsigned node)
{
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return;
/* Force vec4 = 3 */
check_read_class(unsigned *classes, unsigned tag, unsigned node)
{
/* Non-nodes are implicitly ok */
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return true;
unsigned current_class = classes[node] >> 2;
check_write_class(unsigned *classes, unsigned tag, unsigned node)
{
/* Non-nodes are implicitly ok */
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return true;
unsigned current_class = classes[node] >> 2;
static void
mark_node_class (unsigned *bitfield, unsigned node)
{
- if ((node >= 0) && (node < SSA_FIXED_MINIMUM))
+ if (node < SSA_FIXED_MINIMUM)
BITSET_SET(bitfield, node);
}
static void
liveness_gen(uint8_t *live, unsigned node, unsigned max, unsigned mask)
{
- if ((node < 0) || (node >= max))
+ if (node >= max)
return;
live[node] |= mask;
static void
liveness_kill(uint8_t *live, unsigned node, unsigned max, unsigned mask)
{
- if ((node < 0) || (node >= max))
+ if (node >= max)
return;
live[node] &= ~mask;
unsigned dest = ins->ssa_args.dest;
- if (dest >= 0 && dest < ctx->temp_count) {
+ if (dest < ctx->temp_count) {
for (unsigned i = 0; i < ctx->temp_count; ++i)
if (live[i])
ra_add_node_interference(g, dest, i);
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
- if (ins->ssa_args.dest < 0) continue;
if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
compose_writemask(ins->mask, dest);
/* If there is a register LOD/bias, use it */
- if (args.src[1] > -1) {
+ if (args.src[1] != ~0) {
midgard_tex_register_select sel = {
.select = lod.reg,
.full = 1,
unsigned src1 = ins->ssa_args.src[1];
if (!mir_is_written_before(ctx, bundle[0], src0))
- src0 = -1;
+ src0 = ~0;
if (!mir_is_written_before(ctx, bundle[0], src1))
- src1 = -1;
+ src1 = ~0;
- if ((src0 > 0) && (src0 < node_count))
+ if (src0 < node_count)
BITSET_SET(dependencies, src0);
- if ((src1 > 0) && (src1 < node_count))
+ if (src1 < node_count)
BITSET_SET(dependencies, src1);
/* Requirement 2 */
bool deps = false;
for (unsigned s = 0; s < ARRAY_SIZE(ins->ssa_args.src); ++s)
- deps |= (c->ssa_args.src[s] != -1);
+ deps |= (c->ssa_args.src[s] != ~0);
if (deps)
continue;
static unsigned
find_or_allocate_temp(compiler_context *ctx, unsigned hash)
{
- if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM))
+ if (hash >= SSA_FIXED_MINIMUM)
return hash;
unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(
.type = TAG_LOAD_STORE_4,
.mask = mask,
.ssa_args = {
- .dest = -1,
- .src = { -1, -1, -1 },
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
},
.load_store = {
.op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
bool
mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node)
{
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return true;
mir_foreach_instr_global(ctx, q) {