This is just a bit of refactoring to simplify MIR.
Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
};
} midgard_branch;
-/* Instruction arguments represented as block-local SSA indices, rather than
- * registers. ~0 means unused. */
-
-typedef struct {
- unsigned src[3];
- unsigned dest;
-
- bool inline_constant;
-} ssa_args;
-
/* Generic in-memory data type repesenting a single logical instruction, rather
* than a single instruction group. This is the preferred form for code gen.
* Multiple midgard_insturctions will later be combined during scheduling,
unsigned type; /* ALU, load/store, texture */
- /* If the register allocator has not run yet... */
- ssa_args ssa_args;
+ /* Instruction arguments represented as block-local SSA
+ * indices, rather than registers. ~0 means unused. */
+ unsigned src[3];
+ unsigned dest;
/* Special fields for an ALU instruction */
midgard_reg_info registers;
uint32_t constants[4];
uint16_t inline_constant;
bool has_blend_constant;
+ bool has_inline_constant;
bool compact_branch;
bool writeout;
v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
#define mir_foreach_src(ins, v) \
- for (unsigned v = 0; v < ARRAY_SIZE(ins->ssa_args.src); ++v)
+ for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
static inline midgard_instruction *
mir_last_in_block(struct midgard_block *block)
midgard_instruction ins = {
.type = TAG_ALU_4,
.mask = 0xF,
- .ssa_args = {
- .src = { SSA_UNUSED, src, SSA_UNUSED },
- .dest = dest,
- },
+ .src = { SSA_UNUSED, src, SSA_UNUSED },
+ .dest = dest,
.alu = {
.op = midgard_alu_op_imov,
.reg_mode = midgard_reg_mode_32,
static inline bool
mir_has_arg(midgard_instruction *ins, unsigned arg)
{
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
- if (ins->ssa_args.src[i] == arg)
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
+ if (ins->src[i] == arg)
return true;
}
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
.mask = 0xF, \
- .ssa_args = { \
- .dest = ~0, \
- .src = { ~0, ~0, ~0 }, \
- }, \
+ .dest = ~0, \
+ .src = { ~0, ~0, ~0 }, \
.load_store = { \
.op = midgard_op_##name, \
.swizzle = SWIZZLE_XYZW, \
}; \
\
if (store) \
- i.ssa_args.src[0] = ssa; \
+ i.src[0] = ssa; \
else \
- i.ssa_args.dest = ssa; \
+ i.dest = ssa; \
\
return i; \
}
.prepacked_branch = true,
.compact_branch = true,
.br_compact = compact,
- .ssa_args = {
- .dest = ~0,
- .src = { ~0, ~0, ~0 },
- }
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
};
if (op == midgard_jmp_writeout_op_writeout)
.conditional = conditional,
.invert_conditional = invert
},
- .ssa_args = {
- .dest = ~0,
- .src = { ~0, ~0, ~0 },
- }
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
};
return ins;
.precede_break = true,
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
.mask = 1 << COMPONENT_W,
-
- .ssa_args = {
- .src = { condition, condition, ~0 },
- .dest = SSA_FIXED_REGISTER(31),
- },
+ .src = { condition, condition, ~0 },
+ .dest = SSA_FIXED_REGISTER(31),
.alu = {
.op = midgard_alu_op_iand,
.type = TAG_ALU_4,
.precede_break = true,
.mask = mask_of(nr_comp),
- .ssa_args = {
- .src = { condition, condition, ~0 },
- .dest = SSA_FIXED_REGISTER(31),
- },
+ .src = { condition, condition, ~0 },
+ .dest = SSA_FIXED_REGISTER(31),
.alu = {
.op = midgard_alu_op_iand,
.outmod = midgard_outmod_int_wrap,
midgard_instruction ins = {
.type = TAG_ALU_4,
- .ssa_args = {
- .src = {
- quirk_flipped_r24 ? ~0 : src0,
- quirk_flipped_r24 ? src0 : src1,
- ~0
- },
- .dest = dest,
- }
+ .src = {
+ quirk_flipped_r24 ? ~0 : src0,
+ quirk_flipped_r24 ? src0 : src1,
+ ~0
+ },
+ .dest = dest,
};
nir_alu_src *nirmods[2] = { NULL };
* inline, since we're 32-bit, not 16-bit like the inline
* constants) */
- ins.ssa_args.inline_constant = false;
- ins.ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.has_inline_constant = false;
+ ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
if (instr->op == nir_op_b2f32) {
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
} else if (nr_inputs == 1 && !quirk_flipped_r24) {
/* Lots of instructions need a 0 plonked in */
- ins.ssa_args.inline_constant = false;
- ins.ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.has_inline_constant = false;
+ ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
ins.constants[0] = 0;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
ins.mask = mir_mask_for_intr(instr, true);
if (indirect_offset) {
- ins.ssa_args.src[1] = nir_src_index(ctx, indirect_offset);
+ ins.src[1] = nir_src_index(ctx, indirect_offset);
ins.load_store.arg_2 = 0x80;
} else {
ins.load_store.arg_2 = 0x1E;
* arg_2 = the offset.
*/
- ins.ssa_args.src[is_read ? 0 : 1] = addr;
+ ins.src[is_read ? 0 : 1] = addr;
/* TODO: What is this? It looks superficially like a shift << 5, but
* arg_1 doesn't take a shift Should it be E0 or A0? */
/* We also need to emit the indirect offset */
if (indirect_offset)
- ins.ssa_args.src[is_read ? 1 : 2] = nir_src_index(ctx, indirect_offset);
+ ins.src[is_read ? 1 : 2] = nir_src_index(ctx, indirect_offset);
else
ins.load_store.arg_2 = 0x7E;
ins.load_store.varying_parameters = u;
if (indirect_offset)
- ins.ssa_args.src[1] = nir_src_index(ctx, indirect_offset);
+ ins.src[1] = nir_src_index(ctx, indirect_offset);
else
ins.load_store.arg_2 = 0x1E;
/* If we're doing MRT, we need to specify the render target */
midgard_instruction rt_move = {
- .ssa_args = {
- .dest = ~0
- }
+ .dest = ~0
};
if (rt != 0) {
rt_move.unit = UNIT_SADD;
/* r1.z = (rt * 0x100) */
- rt_move.ssa_args.inline_constant = true;
+ rt_move.has_inline_constant = true;
rt_move.inline_constant = (rt * 0x100);
/* r1 */
v_alu_br_compact_cond(midgard_jmp_writeout_op_writeout, TAG_ALU_4, offset, midgard_condition_always);
/* Add dependencies */
- ins.ssa_args.src[0] = move.ssa_args.dest;
- ins.ssa_args.src[1] = rt_move.ssa_args.dest;
+ ins.src[0] = move.dest;
+ ins.src[1] = rt_move.dest;
/* Emit the branch */
emit_mir_instruction(ctx, ins);
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = 0xF,
- .ssa_args = {
- .dest = nir_dest_index(ctx, &instr->dest),
- .src = { ~0, ~0, ~0 },
- },
+ .dest = nir_dest_index(ctx, &instr->dest),
+ .src = { ~0, ~0, ~0 },
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
unsigned temp = make_compiler_temp(ctx);
midgard_instruction ld = m_ld_cubemap_coords(temp, 0);
- ld.ssa_args.src[0] = index;
+ ld.src[0] = index;
ld.mask = 0x3; /* xy */
ld.load_store.arg_1 = 0x20;
ld.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, ld);
- ins.ssa_args.src[0] = temp;
+ ins.src[0] = temp;
ins.texture.in_reg_swizzle = SWIZZLE_XYXX;
} else {
- ins.ssa_args.src[0] = index;
+ ins.src[0] = index;
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
break;
ins.texture.lod_register = true;
- ins.ssa_args.src[1] = index;
+ ins.src[1] = index;
emit_explicit_constant(ctx, index, index);
break;
/* ALU instructions can inline or embed constants, which decreases register
* pressure and saves space. */
-#define CONDITIONAL_ATTACH(src) { \
- void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src + 1); \
+#define CONDITIONAL_ATTACH(idx) { \
+ void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
\
if (entry) { \
- attach_constants(ctx, alu, entry, alu->ssa_args.src + 1); \
- alu->ssa_args.src = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
+ attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
+ alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
} \
}
/* If there is already a constant here, we can do nothing */
if (alu->has_constants) continue;
- CONDITIONAL_ATTACH(src[0]);
+ CONDITIONAL_ATTACH(0);
if (!alu->has_constants) {
- CONDITIONAL_ATTACH(src[1])
+ CONDITIONAL_ATTACH(1)
} else if (!alu->inline_constant) {
/* Corner case: _two_ vec4 constants, for instance with a
* csel. For this case, we can only use a constant
* to the destination register.
*/
- void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src[1] + 1);
- unsigned scratch = alu->ssa_args.dest;
+ void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
+ unsigned scratch = alu->dest;
if (entry) {
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
- attach_constants(ctx, &ins, entry, alu->ssa_args.src[1] + 1);
+ attach_constants(ctx, &ins, entry, alu->src[1] + 1);
/* Force a break XXX Defer r31 writes */
ins.unit = UNIT_VLUT;
/* Set the source */
- alu->ssa_args.src[1] = scratch;
+ alu->src[1] = scratch;
/* Inject us -before- the last instruction which set r31 */
mir_insert_instruction_before(mir_prev_op(alu), ins);
{
mir_foreach_instr(ctx, ins) {
if (!ins->has_constants) continue;
-
- if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_inline_constant) continue;
/* Blend constants must not be inlined by definition */
if (ins->has_blend_constant) continue;
int op = ins->alu.op;
- if (ins->ssa_args.src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
+ if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
bool flip = alu_opcode_props[op].props & OP_COMMUTES;
switch (op) {
if (flip) {
/* Flip the SSA numbers */
- ins->ssa_args.src[0] = ins->ssa_args.src[1];
- ins->ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins->src[0] = ins->src[1];
+ ins->src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
/* And flip the modifiers */
}
}
- if (ins->ssa_args.src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
+ if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
/* Extract the source information */
midgard_vector_alu_src *src;
/* Get rid of the embedded constant */
ins->has_constants = false;
- ins->ssa_args.src[1] = ~0;
- ins->ssa_args.inline_constant = true;
+ ins->src[1] = ~0;
+ ins->has_inline_constant = true;
ins->inline_constant = scaled_constant;
}
}
if (ins->alu.outmod != midgard_outmod_pos) continue;
/* TODO: Registers? */
- unsigned src = ins->ssa_args.src[1];
+ unsigned src = ins->src[1];
if (src & IS_REG) continue;
assert(!mir_has_multiple_writes(ctx, src));
/* Backpropagate the modifier */
mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
if (v->type != TAG_ALU_4) continue;
- if (v->ssa_args.dest != src) continue;
+ if (v->dest != src) continue;
/* Can we even take a float outmod? */
if (midgard_is_integer_out_op(v->alu.op)) continue;
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = mask_of(nr_components),
- .ssa_args = {
- .dest = nir_dest_index(ctx, &instr->dest.dest),
- .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 },
- },
+ .dest = nir_dest_index(ctx, &instr->dest.dest),
+ .src = { nir_alu_src_index(ctx, &instr->src[0]), ~0, ~0 },
.texture = {
.op = mir_derivative_op(instr->op),
.format = MALI_TEX_2D,
* rewrite to use a register */
unsigned new = make_compiler_temp_reg(ctx);
- mir_rewrite_index(ctx, ins->ssa_args.dest, new);
+ mir_rewrite_index(ctx, ins->dest, new);
}
}
{
bool is_int = midgard_is_integer_op(v.op);
bool is_full = v.reg_mode == midgard_reg_mode_32;
- bool is_inline_constant = ins->ssa_args.inline_constant;
+ bool is_inline_constant = ins->has_inline_constant;
unsigned comp = component_from_mask(ins->mask);
/* Inline constant is passed along rather than trying to extract it
* from v */
- if (ins->ssa_args.inline_constant) {
+ if (ins->has_inline_constant) {
uint16_t imm = 0;
int lower_11 = ins->inline_constant & ((1 << 12) - 1);
imm |= (lower_11 >> 9) & 3;
/* If written-before-use, we're gone */
- if (ins->ssa_args.dest == src)
+ if (ins->dest == src)
overwritten_mask |= ins->mask;
}
unsigned write_count = 0;
mir_foreach_instr_global(ctx, ins) {
- if (ins->ssa_args.dest == dest)
+ if (ins->dest == dest)
write_count++;
}
if (ins->type != TAG_ALU_4) continue;
if (!OP_IS_MOVE(ins->alu.op)) continue;
- unsigned from = ins->ssa_args.src[1];
- unsigned to = ins->ssa_args.dest;
+ unsigned from = ins->src[1];
+ unsigned to = ins->dest;
/* We only work on pure SSA */
if (from & IS_REG) continue;
/* Constant propagation is not handled here, either */
- if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_inline_constant) continue;
if (ins->has_constants) continue;
/* Modifier propagation is not handled here */
OP_IS_STORE(q->load_store.op) ? 1 : 0;
mir_foreach_src(q, s) {
- if ((s >= start) && q->ssa_args.src[s] == to) {
+ if ((s >= start) && q->src[s] == to) {
skip = true;
break;
}
if (ins->type != TAG_ALU_4) continue;
if (ins->compact_branch) continue;
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
- if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+ if (ins->dest >= SSA_FIXED_MINIMUM) continue;
+ if (mir_is_live_after(ctx, block, ins, ins->dest)) continue;
mir_remove_instruction(ins);
progress = true;
mir_foreach_instr_in_block_from(block, q, mir_next_op(ins)) {
/* Check if used */
- if (mir_has_arg(q, ins->ssa_args.dest))
+ if (mir_has_arg(q, ins->dest))
break;
/* Check if overwritten */
- if (q->ssa_args.dest == ins->ssa_args.dest) {
+ if (q->dest == ins->dest) {
/* Special case to vec4; component tracking is
* harder */
if (ins->dont_eliminate) continue;
/* Check we're to the same place post-RA */
- unsigned iA = ins->ssa_args.dest;
- unsigned iB = ins->ssa_args.src[1];
+ unsigned iA = ins->dest;
+ unsigned iB = ins->src[1];
if ((iA == ~0) || (iB == ~0)) continue;
if (ins->mask != 0xF) continue;
/* We do need to rewrite to facilitate pipelining/scheduling */
- mir_rewrite_index(ctx, ins->ssa_args.src[1], ins->ssa_args.dest);
+ mir_rewrite_index(ctx, ins->src[1], ins->dest);
/* We're good to go */
mir_remove_instruction(ins);
mir_foreach_instr_in_block(block, ins) {
if (ins->type != TAG_ALU_4) continue;
if (ins->alu.op != midgard_alu_op_imov) continue;
- if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_inline_constant) continue;
if (!ins->has_constants) continue;
if (mir_nontrivial_source2_mod_simple(ins)) continue;
if (mir_nontrivial_outmod(ins)) continue;
midgard_instruction not = {
.type = TAG_ALU_4,
.mask = ins->mask,
- .ssa_args = {
- .src = { temp, ~0, ~0 },
- .dest = ins->ssa_args.dest,
- .inline_constant = true
- },
+ .src = { temp, ~0, ~0 },
+ .dest = ins->dest,
+ .has_inline_constant = true,
.alu = {
.op = midgard_alu_op_inor,
/* TODO: i16 */
},
};
- ins->ssa_args.dest = temp;
+ ins->dest = temp;
ins->invert = false;
mir_insert_instruction_before(mir_next_op(ins), not);
}
if (ins->alu.op != midgard_alu_op_imov) continue;
if (!ins->invert) continue;
if (mir_nontrivial_source2_mod_simple(ins)) continue;
- if (ins->ssa_args.src[1] & IS_REG) continue;
+ if (ins->src[1] & IS_REG) continue;
/* Is it beneficial to propagate? */
- if (!mir_single_use(ctx, ins->ssa_args.src[1])) continue;
+ if (!mir_single_use(ctx, ins->src[1])) continue;
/* We found an imov.not, propagate the invert back */
mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
- if (v->ssa_args.dest != ins->ssa_args.src[1]) continue;
+ if (v->dest != ins->src[1]) continue;
if (v->type != TAG_ALU_4) break;
v->invert = !v->invert;
/* Strips and returns the invert off a node */
mir_foreach_instr_global(ctx, ins) {
if (ins->compact_branch) continue;
- if (ins->ssa_args.dest != node) continue;
+ if (ins->dest != node) continue;
bool status = ins->invert;
ins->invert = false;
if (!mir_is_bitwise(ins)) continue;
if (ins->invert) continue;
- if (ins->ssa_args.src[0] & IS_REG) continue;
- if (ins->ssa_args.src[1] & IS_REG) continue;
- if (!mir_single_use(ctx, ins->ssa_args.src[0])) continue;
- if (!ins->ssa_args.inline_constant && !mir_single_use(ctx, ins->ssa_args.src[1])) continue;
+ if (ins->src[0] & IS_REG) continue;
+ if (ins->src[1] & IS_REG) continue;
+ if (!mir_single_use(ctx, ins->src[0])) continue;
+ if (!ins->has_inline_constant && !mir_single_use(ctx, ins->src[1])) continue;
- bool not_a = mir_strip_inverted(ctx, ins->ssa_args.src[0]);
+ bool not_a = mir_strip_inverted(ctx, ins->src[0]);
bool not_b =
- ins->ssa_args.inline_constant ? false :
- mir_strip_inverted(ctx, ins->ssa_args.src[1]);
+ ins->has_inline_constant ? false :
+ mir_strip_inverted(ctx, ins->src[1]);
/* Edge case: if src0 == src1, it'll've been stripped */
- if ((ins->ssa_args.src[0] == ins->ssa_args.src[1]) && !ins->ssa_args.inline_constant)
+ if ((ins->src[0] == ins->src[1]) && !ins->has_inline_constant)
not_b = not_a;
progress |= (not_a || not_b);
if (both) {
ins->alu.op = mir_demorgan_op(ins->alu.op);
- } else if (right || (left && !ins->ssa_args.inline_constant)) {
+ } else if (right || (left && !ins->has_inline_constant)) {
if (left) {
/* Commute */
- unsigned temp = ins->ssa_args.src[0];
- ins->ssa_args.src[0] = ins->ssa_args.src[1];
- ins->ssa_args.src[1] = temp;
+ unsigned temp = ins->src[0];
+ ins->src[0] = ins->src[1];
+ ins->src[1] = temp;
}
ins->alu.op = mir_notright_op(ins->alu.op);
- } else if (left && ins->ssa_args.inline_constant) {
+ } else if (left && ins->has_inline_constant) {
/* Some special transformations:
*
* ~A & c = ~(~(~A) | (~c)) = ~(A | ~c) = inor(A, ~c)
if (src2.swizzle != SWIZZLE_XXXX) continue;
/* Awesome, we're the right form. Now check where src2 is from */
- unsigned frcp = ins->ssa_args.src[1];
- unsigned to = ins->ssa_args.dest;
+ unsigned frcp = ins->src[1];
+ unsigned to = ins->dest;
if (frcp & IS_REG) continue;
if (to & IS_REG) continue;
unsigned frcp_from = 0;
mir_foreach_instr_in_block_safe(block, sub) {
- if (sub->ssa_args.dest != frcp) continue;
+ if (sub->dest != frcp) continue;
midgard_vector_alu_src s =
vector_alu_from_unsigned(sub->alu.src1);
frcp_component = s.swizzle & 3;
- frcp_from = sub->ssa_args.src[0];
+ frcp_from = sub->src[0];
frcp_found =
(sub->type == TAG_ALU_4) &&
if (mir_use_count(ctx, frcp_from) > 2) continue;
mir_foreach_instr_in_block_safe(block, v) {
- if (v->ssa_args.dest != frcp_from) continue;
+ if (v->dest != frcp_from) continue;
if (v->type != TAG_LOAD_STORE_4) break;
if (!OP_IS_LOAD_VARY_F(v->load_store.op)) break;
midgard_instruction accel = {
.type = TAG_LOAD_STORE_4,
.mask = ins->mask,
- .ssa_args = {
- .dest = to,
- .src = { frcp_from, ~0, ~0 },
- },
+ .dest = to,
+ .src = { frcp_from, ~0, ~0 },
.load_store = {
.op = frcp_component == COMPONENT_W ?
midgard_op_ldst_perspective_division_w :
if (ins->type != TAG_LOAD_STORE_4) continue;
if (!OP_IS_PROJECTION(ins->load_store.op)) continue;
- unsigned vary = ins->ssa_args.src[0];
- unsigned to = ins->ssa_args.dest;
+ unsigned vary = ins->src[0];
+ unsigned to = ins->dest;
if (vary & IS_REG) continue;
if (to & IS_REG) continue;
bool rewritten = false;
mir_foreach_instr_in_block_safe(block, v) {
- if (v->ssa_args.dest != vary) continue;
+ if (v->dest != vary) continue;
if (v->type != TAG_LOAD_STORE_4) break;
if (!OP_IS_LOAD_VARY_F(v->load_store.op)) break;
v->load_store.varying_parameters = param;
/* Use the new destination */
- v->ssa_args.dest = to;
+ v->dest = to;
rewritten = true;
break;
if (ins->invert)
printf(".not");
- ssa_args *args = &ins->ssa_args;
-
printf(" ");
- mir_print_index(args->dest);
+ mir_print_index(ins->dest);
if (ins->mask != 0xF)
mir_print_mask(ins->mask);
printf(", ");
- mir_print_index(args->src[0]);
+ mir_print_index(ins->src[0]);
printf(", ");
- if (args->inline_constant)
+ if (ins->has_inline_constant)
printf("#%d", ins->inline_constant);
else
- mir_print_index(args->src[1]);
+ mir_print_index(ins->src[1]);
printf(", ");
- mir_print_index(args->src[2]);
+ mir_print_index(ins->src[2]);
if (ins->has_constants) {
uint32_t *uc = ins->constants;
mir_foreach_instr_global(ctx, ins) {
switch (ins->type) {
case TAG_ALU_4:
- mark_node_class(aluw, ins->ssa_args.dest);
- mark_node_class(alur, ins->ssa_args.src[0]);
- mark_node_class(alur, ins->ssa_args.src[1]);
+ mark_node_class(aluw, ins->dest);
+ mark_node_class(alur, ins->src[0]);
+ mark_node_class(alur, ins->src[1]);
break;
case TAG_LOAD_STORE_4:
- mark_node_class(ldst, ins->ssa_args.src[0]);
- mark_node_class(ldst, ins->ssa_args.src[1]);
- mark_node_class(ldst, ins->ssa_args.src[2]);
+ mark_node_class(ldst, ins->src[0]);
+ mark_node_class(ldst, ins->src[1]);
+ mark_node_class(ldst, ins->src[2]);
break;
case TAG_TEXTURE_4:
- mark_node_class(texr, ins->ssa_args.src[0]);
- mark_node_class(texr, ins->ssa_args.src[1]);
- mark_node_class(texr, ins->ssa_args.src[2]);
- mark_node_class(texw, ins->ssa_args.dest);
+ mark_node_class(texr, ins->src[0]);
+ mark_node_class(texr, ins->src[1]);
+ mark_node_class(texr, ins->src[2]);
+ mark_node_class(texw, ins->dest);
break;
}
}
continue;
if (hazard_write) {
- if (pre_use->ssa_args.dest != i)
+ if (pre_use->dest != i)
continue;
} else {
if (!mir_has_arg(pre_use, i))
{
/* live_in[s] = GEN[s] + (live_out[s] - KILL[s]) */
- liveness_kill(live, ins->ssa_args.dest, max, ins->mask);
+ liveness_kill(live, ins->dest, max, ins->mask);
mir_foreach_src(ins, src) {
- unsigned node = ins->ssa_args.src[src];
+ unsigned node = ins->src[src];
unsigned mask = mir_mask_of_read_components(ins, node);
liveness_gen(live, node, max, mask);
/* Mark all registers live after the instruction as
* interfering with the destination */
- unsigned dest = ins->ssa_args.dest;
+ unsigned dest = ins->dest;
if (dest < ctx->temp_count) {
for (unsigned i = 0; i < ctx->temp_count; ++i)
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ if (ins->dest >= SSA_FIXED_MINIMUM) continue;
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
int class = util_logbase2(ins->mask);
/* Use the largest class if there's ambiguity, this
* handles partial writes */
- int dest = ins->ssa_args.dest;
+ int dest = ins->dest;
found_class[dest] = MAX2(found_class[dest], class);
}
if (ins->type == TAG_LOAD_STORE_4) {
bool force_vec4_only = OP_IS_VEC4_ONLY(ins->load_store.op);
- set_class(found_class, ins->ssa_args.src[0], REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src[1], REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src[2], REG_CLASS_LDST);
+ set_class(found_class, ins->src[0], REG_CLASS_LDST);
+ set_class(found_class, ins->src[1], REG_CLASS_LDST);
+ set_class(found_class, ins->src[2], REG_CLASS_LDST);
if (force_vec4_only) {
- force_vec4(found_class, ins->ssa_args.dest);
- force_vec4(found_class, ins->ssa_args.src[0]);
- force_vec4(found_class, ins->ssa_args.src[1]);
- force_vec4(found_class, ins->ssa_args.src[2]);
+ force_vec4(found_class, ins->dest);
+ force_vec4(found_class, ins->src[0]);
+ force_vec4(found_class, ins->src[1]);
+ force_vec4(found_class, ins->src[2]);
}
} else if (ins->type == TAG_TEXTURE_4) {
- set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW);
- set_class(found_class, ins->ssa_args.src[0], REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src[1], REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src[2], REG_CLASS_TEXR);
+ set_class(found_class, ins->dest, REG_CLASS_TEXW);
+ set_class(found_class, ins->src[0], REG_CLASS_TEXR);
+ set_class(found_class, ins->src[1], REG_CLASS_TEXR);
+ set_class(found_class, ins->src[2], REG_CLASS_TEXR);
}
}
/* Check that the semantics of the class are respected */
mir_foreach_instr_global(ctx, ins) {
- assert(check_write_class(found_class, ins->type, ins->ssa_args.dest));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[0]));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[1]));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[2]));
+ assert(check_write_class(found_class, ins->type, ins->dest));
+ assert(check_read_class(found_class, ins->type, ins->src[0]));
+ assert(check_read_class(found_class, ins->type, ins->src[1]));
+ assert(check_read_class(found_class, ins->type, ins->src[2]));
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
struct ra_graph *g,
midgard_instruction *ins)
{
- ssa_args args = ins->ssa_args;
-
switch (ins->type) {
case TAG_ALU_4: {
- struct phys_reg src1 = index_to_reg(ctx, g, args.src[0]);
- struct phys_reg src2 = index_to_reg(ctx, g, args.src[1]);
- struct phys_reg dest = index_to_reg(ctx, g, args.dest);
+ struct phys_reg src1 = index_to_reg(ctx, g, ins->src[0]);
+ struct phys_reg src2 = index_to_reg(ctx, g, ins->src[1]);
+ struct phys_reg dest = index_to_reg(ctx, g, ins->dest);
unsigned uncomposed_mask = ins->mask;
ins->mask = compose_writemask(uncomposed_mask, dest);
ins->registers.src1_reg = src1.reg;
- ins->registers.src2_imm = args.inline_constant;
+ ins->registers.src2_imm = ins->has_inline_constant;
- if (args.inline_constant) {
+ if (ins->has_inline_constant) {
/* Encode inline 16-bit constant. See disassembler for
* where the algorithm is from */
bool encodes_src = OP_IS_STORE(ins->load_store.op);
if (encodes_src) {
- struct phys_reg src = index_to_reg(ctx, g, args.src[0]);
+ struct phys_reg src = index_to_reg(ctx, g, ins->src[0]);
assert(src.reg == 26 || src.reg == 27);
ins->load_store.reg = src.reg - 26;
new_swizzle, src.mask,
default_phys_reg(0), src);
} else {
- struct phys_reg src = index_to_reg(ctx, g, args.dest);
+ struct phys_reg src = index_to_reg(ctx, g, ins->dest);
ins->load_store.reg = src.reg;
/* We also follow up by actual arguments */
int src2 =
- encodes_src ? args.src[1] : args.src[0];
+ encodes_src ? ins->src[1] : ins->src[0];
int src3 =
- encodes_src ? args.src[2] : args.src[1];
+ encodes_src ? ins->src[2] : ins->src[1];
if (src2 >= 0) {
struct phys_reg src = index_to_reg(ctx, g, src2);
case TAG_TEXTURE_4: {
/* Grab RA results */
- struct phys_reg dest = index_to_reg(ctx, g, args.dest);
- struct phys_reg coord = index_to_reg(ctx, g, args.src[0]);
- struct phys_reg lod = index_to_reg(ctx, g, args.src[1]);
+ struct phys_reg dest = index_to_reg(ctx, g, ins->dest);
+ struct phys_reg coord = index_to_reg(ctx, g, ins->src[0]);
+ struct phys_reg lod = index_to_reg(ctx, g, ins->src[1]);
assert(dest.reg == 28 || dest.reg == 29);
assert(coord.reg == 28 || coord.reg == 29);
compose_writemask(ins->mask, dest);
/* If there is a register LOD/bias, use it */
- if (args.src[1] != ~0) {
+ if (ins->src[1] != ~0) {
midgard_tex_register_select sel = {
.select = lod.reg,
.full = 1,
unsigned pipeline_count)
{
midgard_instruction *ins = bundle->instructions[i];
- unsigned dest = ins->ssa_args.dest;
+ unsigned dest = ins->dest;
/* We could be pipelining a register, so we need to make sure that all
* of the components read in this bundle are written in this bundle,
* and that no components are written before this bundle */
- unsigned node = ins->ssa_args.dest;
+ unsigned node = ins->dest;
unsigned read_mask = 0;
/* Analyze the bundle for a read mask */
/* Now analyze for a write mask */
for (unsigned i = 0; i < bundle->instruction_count; ++i) {
midgard_instruction *q = bundle->instructions[i];
- if (q->ssa_args.dest != node) continue;
+ if (q->dest != node) continue;
/* Remove the written mask from the read requirements */
read_mask &= ~q->mask;
midgard_instruction *end = bundle->instructions[
bundle->instruction_count - 1];
- if (mir_is_live_after(ctx, block, end, ins->ssa_args.dest))
+ if (mir_is_live_after(ctx, block, end, ins->dest))
return false;
/* We're only live in this bundle -- pipeline! */
/* Each instruction reads some registers and writes to a register. See
* where the first writes */
- int source = first->ssa_args.dest;
+ int source = first->dest;
int source_mask = first->mask;
/* As long as the second doesn't read from the first, we're okay */
- for (unsigned i = 0; i < ARRAY_SIZE(second->ssa_args.src); ++i) {
- if (second->ssa_args.src[i] != source)
+ for (unsigned i = 0; i < ARRAY_SIZE(second->src); ++i) {
+ if (second->src[i] != source)
continue;
if (first->type != TAG_ALU_4)
/* Otherwise, it's safe in that regard. Another data hazard is both
* writing to the same place, of course */
- if (second->ssa_args.dest == source) {
+ if (second->dest == source) {
/* ...but only if the components overlap */
if (second->mask & source_mask)
for (unsigned i = 0; i < count; ++i) {
midgard_instruction *ins = bundle[i];
- if (ins->ssa_args.dest != SSA_FIXED_REGISTER(0))
+ if (ins->dest != SSA_FIXED_REGISTER(0))
continue;
/* Record written out mask */
* we're writeout at the very end of the shader. So check if
* they were written before us. */
- unsigned src0 = ins->ssa_args.src[0];
- unsigned src1 = ins->ssa_args.src[1];
+ unsigned src0 = ins->src[0];
+ unsigned src1 = ins->src[1];
if (!mir_is_written_before(ctx, bundle[0], src0))
src0 = ~0;
/* Requirement 3 */
for (unsigned i = 0; i < count; ++i) {
- unsigned dest = bundle[i]->ssa_args.dest;
+ unsigned dest = bundle[i]->dest;
if (dest < node_count && BITSET_TEST(dependencies, dest))
return false;
unsigned swizzle = SWIZZLE_FROM_ARRAY(indices);
unsigned r_constant = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
- if (ains->ssa_args.src[0] == r_constant)
+ if (ains->src[0] == r_constant)
ains->alu.src1 = vector_alu_apply_swizzle(ains->alu.src1, swizzle);
- if (ains->ssa_args.src[1] == r_constant)
+ if (ains->src[1] == r_constant)
ains->alu.src2 = vector_alu_apply_swizzle(ains->alu.src2, swizzle);
bundle.has_embedded_constants = true;
bool deps = false;
- for (unsigned s = 0; s < ARRAY_SIZE(ins->ssa_args.src); ++s)
- deps |= (c->ssa_args.src[s] != ~0);
+ for (unsigned s = 0; s < ARRAY_SIZE(ins->src); ++s)
+ deps |= (c->src[s] != ~0);
if (deps)
continue;
ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
mir_foreach_instr_global(ctx, ins) {
- ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
+ ins->dest = find_or_allocate_temp(ctx, ins->dest);
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i)
- ins->ssa_args.src[i] = find_or_allocate_temp(ctx, ins->ssa_args.src[i]);
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i)
+ ins->src[i] = find_or_allocate_temp(ctx, ins->src[i]);
}
}
midgard_instruction ins = {
.type = TAG_LOAD_STORE_4,
.mask = mask,
- .ssa_args = {
- .dest = ~0,
- .src = { ~0, ~0, ~0 },
- },
+ .dest = ~0,
+ .src = { ~0, ~0, ~0 },
.load_store = {
.op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
.swizzle = SWIZZLE_XYZW,
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
- ins.ssa_args.src[0] = srcdest;
+ ins.src[0] = srcdest;
} else {
- ins.ssa_args.dest = srcdest;
+ ins.dest = srcdest;
}
return ins;
mir_foreach_instr_global(ctx, ins) {
if (ins->no_spill &&
- ins->ssa_args.dest >= 0 &&
- ins->ssa_args.dest < ctx->temp_count)
- ra_set_node_spill_cost(g, ins->ssa_args.dest, -1.0);
+ ins->dest >= 0 &&
+ ins->dest < ctx->temp_count)
+ ra_set_node_spill_cost(g, ins->dest, -1.0);
}
int spill_node = ra_get_best_spill_node(g);
spill_slot = spill_index++;
mir_foreach_instr_global_safe(ctx, ins) {
- if (ins->ssa_args.dest != spill_node) continue;
+ if (ins->dest != spill_node) continue;
midgard_instruction st;
st = v_mov(spill_node, blank_alu_src, spill_slot);
st.no_spill = true;
} else {
- ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
- st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
+ ins->dest = SSA_FIXED_REGISTER(26);
+ st = v_load_store_scratch(ins->dest, spill_slot, true, ins->mask);
}
/* Hint: don't rewrite this node */
void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
{
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
- if (ins->ssa_args.src[i] == old)
- ins->ssa_args.src[i] = new;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
+ if (ins->src[i] == old)
+ ins->src[i] = new;
}
}
void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
{
- if (ins->ssa_args.dest == old)
- ins->ssa_args.dest = new;
+ if (ins->dest == old)
+ ins->dest = new;
}
static unsigned
static void
mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned swizzle)
{
- for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
- if (ins->ssa_args.src[i] != old) continue;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
+ if (ins->src[i] != old) continue;
- ins->ssa_args.src[i] = new;
+ ins->src[i] = new;
mir_set_swizzle(ins, i,
pan_compose_swizzle(mir_get_swizzle(ins, i), swizzle));
if (ins->type != tag)
continue;
- if (ins->ssa_args.dest == old)
- ins->ssa_args.dest = new;
+ if (ins->dest == old)
+ ins->dest = new;
}
}
if (q == ins)
break;
- if (q->ssa_args.dest == node)
+ if (q->dest == node)
return true;
}
unsigned mask = 0;
for (unsigned i = 0; i < mir_source_count(ins); ++i) {
- if (ins->ssa_args.src[i] != node) continue;
+ if (ins->src[i] != node) continue;
unsigned swizzle = mir_get_swizzle(ins, i);
unsigned m = mir_mask_of_read_components_single(swizzle, ins->mask);
/* We do need the move for safety for a non-SSA dest, or if
* we're being fed into a special class */
- bool needs_move = ins->ssa_args.dest & IS_REG;
- needs_move |= mir_special_index(ctx, ins->ssa_args.dest);
+ bool needs_move = ins->dest & IS_REG;
+ needs_move |= mir_special_index(ctx, ins->dest);
/* Ensure this is a contiguous X-bound mask. It should be since
* we haven't done RA and per-component masked UBO reads don't
unsigned nr_components = util_bitcount(ins->mask);
if (needs_move) {
- midgard_instruction mov = v_mov(promoted, blank_alu_src, ins->ssa_args.dest);
+ midgard_instruction mov = v_mov(promoted, blank_alu_src, ins->dest);
mov.mask = ins->mask;
mir_insert_instruction_before(ins, mov);
} else {
- mir_rewrite_index_src_swizzle(ctx, ins->ssa_args.dest,
+ mir_rewrite_index_src_swizzle(ctx, ins->dest,
promoted, swizzle_of(nr_components));
}