* registers. Negative values mean unused. */
typedef struct {
- int src0;
- int src1;
+ int src[3];
int dest;
- /* src1 is -not- SSA but instead a 16-bit inline constant to be smudged
- * in. Only valid for ALU ops. */
bool inline_constant;
} ssa_args;
.type = TAG_ALU_4,
.mask = 0xF,
.ssa_args = {
- .src0 = SSA_UNUSED_1,
- .src1 = src,
+ .src = { SSA_UNUSED_1, src, -1 },
.dest = dest,
},
.alu = {
static inline bool
mir_has_arg(midgard_instruction *ins, unsigned arg)
{
- if (ins->ssa_args.src0 == arg)
- return true;
-
- if (ins->ssa_args.src1 == arg && !ins->ssa_args.inline_constant)
- return true;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
+ if (ins->ssa_args.src[i] == arg)
+ return true;
+ }
return false;
}
#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define M_LOAD_STORE(name, rname, uname) \
+#define M_LOAD_STORE(name, store) \
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
.mask = 0xF, \
.ssa_args = { \
- .rname = ssa, \
- .uname = -1, \
- .src1 = -1 \
+ .dest = -1, \
+ .src = { -1, -1, -1 }, \
}, \
.load_store = { \
.op = midgard_op_##name, \
.address = address \
} \
}; \
+ \
+ if (store) \
+ i.ssa_args.src[0] = ssa; \
+ else \
+ i.ssa_args.dest = ssa; \
\
return i; \
}
-#define M_LOAD(name) M_LOAD_STORE(name, dest, src0)
-#define M_STORE(name) M_LOAD_STORE(name, src0, dest)
+#define M_LOAD(name) M_LOAD_STORE(name, false)
+#define M_STORE(name) M_LOAD_STORE(name, true)
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
.br_compact = compact,
.ssa_args = {
.dest = -1,
- .src0 = -1,
- .src1 = -1,
+ .src = { -1, -1, -1 },
}
};
},
.ssa_args = {
.dest = -1,
- .src0 = -1,
- .src1 = -1
+ .src = { -1, -1, -1 },
}
};
.mask = 1 << COMPONENT_W,
.ssa_args = {
- .src0 = condition,
- .src1 = condition,
+ .src = { condition, condition, -1 },
.dest = SSA_FIXED_REGISTER(31),
},
.precede_break = true,
.mask = mask_of(nr_comp),
.ssa_args = {
- .src0 = condition,
- .src1 = condition,
+ .src = { condition, condition, -1 },
.dest = SSA_FIXED_REGISTER(31),
},
.alu = {
midgard_instruction ins = {
.type = TAG_ALU_4,
.ssa_args = {
- .src0 = quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
- .src1 = quirk_flipped_r24 ? src0 : src1,
+ .src = {
+ quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
+ quirk_flipped_r24 ? src0 : src1,
+ -1
+ },
.dest = dest,
}
};
* constants) */
ins.ssa_args.inline_constant = false;
- ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
if (instr->op == nir_op_b2f32) {
} else if (nr_inputs == 1 && !quirk_flipped_r24) {
/* Lots of instructions need a 0 plonked in */
ins.ssa_args.inline_constant = false;
- ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
ins.constants[0] = 0.0f;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
ins.load_store.address = offset >> 3;
if (indirect_offset) {
- ins.ssa_args.src1 = nir_src_index(ctx, indirect_offset);
+ ins.ssa_args.src[1] = nir_src_index(ctx, indirect_offset);
ins.load_store.arg_2 = 0x80;
} else {
ins.load_store.arg_2 = 0x1E;
ins.load_store.varying_parameters = u;
if (indirect_offset)
- ins.ssa_args.src1 = nir_src_index(ctx, indirect_offset);
+ ins.ssa_args.src[1] = nir_src_index(ctx, indirect_offset);
else
ins.load_store.arg_2 = 0x1E;
.mask = 0xF,
.ssa_args = {
.dest = nir_dest_index(ctx, &instr->dest),
- .src0 = -1,
- .src1 = -1,
+ .src = { -1, -1, -1 },
},
.texture = {
.op = midgard_texop,
unsigned temp = make_compiler_temp(ctx);
midgard_instruction st = m_st_cubemap_coords(temp, 0);
- st.ssa_args.src0 = index;
+ st.ssa_args.src[0] = index;
st.mask = 0x3; /* xy */
st.load_store.arg_1 = 0x20;
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
- ins.ssa_args.src0 = temp;
+ ins.ssa_args.src[0] = temp;
} else {
- ins.ssa_args.src0 = index;
+ ins.ssa_args.src[0] = index;
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
break;
ins.texture.lod_register = true;
- ins.ssa_args.src1 = index;
+ ins.ssa_args.src[1] = index;
emit_explicit_constant(ctx, index, index);
break;
/* If there is already a constant here, we can do nothing */
if (alu->has_constants) continue;
- CONDITIONAL_ATTACH(src0);
+ CONDITIONAL_ATTACH(src[0]);
if (!alu->has_constants) {
- CONDITIONAL_ATTACH(src1)
+ CONDITIONAL_ATTACH(src[1])
} else if (!alu->inline_constant) {
/* Corner case: _two_ vec4 constants, for instance with a
* csel. For this case, we can only use a constant
* to the destination register.
*/
- void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src1 + 1);
+ void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src[1] + 1);
unsigned scratch = alu->ssa_args.dest;
if (entry) {
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
- attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
+ attach_constants(ctx, &ins, entry, alu->ssa_args.src[1] + 1);
/* Force a break XXX Defer r31 writes */
ins.unit = UNIT_VLUT;
/* Set the source */
- alu->ssa_args.src1 = scratch;
+ alu->ssa_args.src[1] = scratch;
/* Inject us -before- the last instruction which set r31 */
mir_insert_instruction_before(mir_prev_op(alu), ins);
int op = ins->alu.op;
- if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
+ if (ins->ssa_args.src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
bool flip = alu_opcode_props[op].props & OP_COMMUTES;
switch (op) {
if (flip) {
/* Flip the SSA numbers */
- ins->ssa_args.src0 = ins->ssa_args.src1;
- ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins->ssa_args.src[0] = ins->ssa_args.src[1];
+ ins->ssa_args.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
/* And flip the modifiers */
}
}
- if (ins->ssa_args.src1 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
+ if (ins->ssa_args.src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
/* Extract the source information */
midgard_vector_alu_src *src;
/* Get rid of the embedded constant */
ins->has_constants = false;
- ins->ssa_args.src1 = -1;
+ ins->ssa_args.src[1] = -1;
ins->ssa_args.inline_constant = true;
ins->inline_constant = scaled_constant;
}
if (ins->alu.outmod != midgard_outmod_pos) continue;
/* TODO: Registers? */
- unsigned src = ins->ssa_args.src1;
+ unsigned src = ins->ssa_args.src[1];
if (src & IS_REG) continue;
assert(!mir_has_multiple_writes(ctx, src));
.mask = mask_of(nr_components),
.ssa_args = {
.dest = nir_dest_index(ctx, &instr->dest.dest),
- .src0 = nir_alu_src_index(ctx, &instr->src[0]),
- .src1 = -1,
+ .src = { nir_alu_src_index(ctx, &instr->src[0]), -1, -1 },
},
.texture = {
.op = mir_derivative_op(instr->op),
#include "compiler.h"
-static bool
-midgard_is_live_in_instr(midgard_instruction *ins, int src)
-{
- if (ins->ssa_args.src0 == src)
- return true;
-
- if (ins->ssa_args.src1 == src)
- return true;
-
- return false;
-}
-
/* Determine if a variable is live in the successors of a block */
static bool
is_live_after_successors(compiler_context *ctx, midgard_block *bl, int src)
bool block_done = false;
mir_foreach_instr_in_block(succ, ins) {
- if (midgard_is_live_in_instr(ins, src))
+ if (mir_has_arg(ins, src))
return true;
/* If written-before-use, we're gone */
/* Check the rest of the block for liveness */
mir_foreach_instr_in_block_from(block, ins, mir_next_op(start)) {
- if (midgard_is_live_in_instr(ins, src))
+ if (mir_has_arg(ins, src))
return true;
}
if (ins->type != TAG_ALU_4) continue;
if (!OP_IS_MOVE(ins->alu.op)) continue;
- unsigned from = ins->ssa_args.src1;
+ unsigned from = ins->ssa_args.src[1];
unsigned to = ins->ssa_args.dest;
/* We only work on pure SSA */
bool skip = false;
mir_foreach_instr_global(ctx, q) {
- if (q->ssa_args.src1 != to) continue;
+ if (q->ssa_args.src[1] != to) continue;
if (q->type == TAG_TEXTURE_4) skip = true;
}
/* Check we're to the same place post-RA */
unsigned iA = ins->ssa_args.dest;
- unsigned iB = ins->ssa_args.src1;
+ unsigned iB = ins->ssa_args.src[1];
if ((iA < 0) || (iB < 0)) continue;
/* We do want to rewrite to keep the graph sane for pipeline
* register creation (TODO: is this the best approach?) */
- mir_rewrite_index_dst(ctx, ins->ssa_args.src1, ins->ssa_args.dest);
+ mir_rewrite_index_dst(ctx, ins->ssa_args.src[1], ins->ssa_args.dest);
/* We're good to go */
mir_remove_instruction(ins);
.type = TAG_ALU_4,
.mask = ins->mask,
.ssa_args = {
- .src0 = temp,
- .src1 = -1,
+ .src = { temp, -1, -1 },
.dest = ins->ssa_args.dest,
.inline_constant = true
},
if (ins->alu.op != midgard_alu_op_imov) continue;
if (!ins->invert) continue;
if (mir_nontrivial_source2_mod_simple(ins)) continue;
- if (ins->ssa_args.src1 & IS_REG) continue;
+ if (ins->ssa_args.src[1] & IS_REG) continue;
/* Is it beneficial to propagate? */
- if (!mir_single_use(ctx, ins->ssa_args.src1)) continue;
+ if (!mir_single_use(ctx, ins->ssa_args.src[1])) continue;
/* We found an imov.not, propagate the invert back */
mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
- if (v->ssa_args.dest != ins->ssa_args.src1) continue;
+ if (v->ssa_args.dest != ins->ssa_args.src[1]) continue;
if (v->type != TAG_ALU_4) break;
v->invert = !v->invert;
if (!mir_is_bitwise(ins)) continue;
if (ins->invert) continue;
- if (ins->ssa_args.src0 & IS_REG) continue;
- if (ins->ssa_args.src1 & IS_REG) continue;
- if (!mir_single_use(ctx, ins->ssa_args.src0)) continue;
- if (!ins->ssa_args.inline_constant && !mir_single_use(ctx, ins->ssa_args.src1)) continue;
+ if (ins->ssa_args.src[0] & IS_REG) continue;
+ if (ins->ssa_args.src[1] & IS_REG) continue;
+ if (!mir_single_use(ctx, ins->ssa_args.src[0])) continue;
+ if (!ins->ssa_args.inline_constant && !mir_single_use(ctx, ins->ssa_args.src[1])) continue;
- bool not_a = mir_strip_inverted(ctx, ins->ssa_args.src0);
+ bool not_a = mir_strip_inverted(ctx, ins->ssa_args.src[0]);
bool not_b =
ins->ssa_args.inline_constant ? false :
- mir_strip_inverted(ctx, ins->ssa_args.src1);
+ mir_strip_inverted(ctx, ins->ssa_args.src[1]);
/* Edge case: if src0 == src1, it'll've been stripped */
- if ((ins->ssa_args.src0 == ins->ssa_args.src1) && !ins->ssa_args.inline_constant)
+ if ((ins->ssa_args.src[0] == ins->ssa_args.src[1]) && !ins->ssa_args.inline_constant)
not_b = not_a;
progress |= (not_a || not_b);
} else if (right || (left && !ins->ssa_args.inline_constant)) {
if (left) {
/* Commute */
- unsigned temp = ins->ssa_args.src0;
- ins->ssa_args.src0 = ins->ssa_args.src1;
- ins->ssa_args.src1 = temp;
+ unsigned temp = ins->ssa_args.src[0];
+ ins->ssa_args.src[0] = ins->ssa_args.src[1];
+ ins->ssa_args.src[1] = temp;
}
ins->alu.op = mir_notright_op(ins->alu.op);
if (src2.swizzle != SWIZZLE_XXXX) continue;
/* Awesome, we're the right form. Now check where src2 is from */
- unsigned frcp = ins->ssa_args.src1;
+ unsigned frcp = ins->ssa_args.src[1];
unsigned to = ins->ssa_args.dest;
if (frcp & IS_REG) continue;
vector_alu_from_unsigned(sub->alu.src1);
frcp_component = s.swizzle & 3;
- frcp_from = sub->ssa_args.src0;
+ frcp_from = sub->ssa_args.src[0];
frcp_found =
(sub->type == TAG_ALU_4) &&
.mask = ins->mask,
.ssa_args = {
.dest = to,
- .src0 = frcp_from,
- .src1 = -1
+ .src = { frcp_from, -1, -1 },
},
.load_store = {
.op = frcp_component == COMPONENT_W ?
if (ins->type != TAG_LOAD_STORE_4) continue;
if (!OP_IS_PROJECTION(ins->load_store.op)) continue;
- unsigned vary = ins->ssa_args.src0;
+ unsigned vary = ins->ssa_args.src[0];
unsigned to = ins->ssa_args.dest;
if (vary & IS_REG) continue;
printf(" %d, ", args->dest);
- mir_print_source(args->src0);
+ mir_print_source(args->src[0]);
printf(", ");
if (args->inline_constant)
printf("#%d", ins->inline_constant);
else
- mir_print_source(args->src1);
+ mir_print_source(args->src[1]);
+
+ printf(", ");
+ mir_print_source(args->src[2]);
if (ins->has_constants)
printf(" <%f, %f, %f, %f>", ins->constants[0], ins->constants[1], ins->constants[2], ins->constants[3]);
switch (ins->type) {
case TAG_ALU_4:
mark_node_class(aluw, ins->ssa_args.dest);
- mark_node_class(alur, ins->ssa_args.src0);
- mark_node_class(alur, ins->ssa_args.src1);
-
+ mark_node_class(alur, ins->ssa_args.src[0]);
+ mark_node_class(alur, ins->ssa_args.src[1]);
break;
+
case TAG_LOAD_STORE_4:
- mark_node_class(ldst, ins->ssa_args.src0);
- mark_node_class(ldst, ins->ssa_args.src1);
+ mark_node_class(ldst, ins->ssa_args.src[0]);
+ mark_node_class(ldst, ins->ssa_args.src[1]);
+ mark_node_class(ldst, ins->ssa_args.src[2]);
break;
+
case TAG_TEXTURE_4:
- mark_node_class(texr, ins->ssa_args.src0);
- mark_node_class(texr, ins->ssa_args.src1);
+ mark_node_class(texr, ins->ssa_args.src[0]);
+ mark_node_class(texr, ins->ssa_args.src[1]);
+ mark_node_class(texr, ins->ssa_args.src[2]);
mark_node_class(texw, ins->ssa_args.dest);
break;
}
if (ins->type == TAG_LOAD_STORE_4) {
bool force_vec4_only = OP_IS_VEC4_ONLY(ins->load_store.op);
- set_class(found_class, ins->ssa_args.src0, REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src1, REG_CLASS_LDST);
+ set_class(found_class, ins->ssa_args.src[0], REG_CLASS_LDST);
+ set_class(found_class, ins->ssa_args.src[1], REG_CLASS_LDST);
+ set_class(found_class, ins->ssa_args.src[2], REG_CLASS_LDST);
if (force_vec4_only) {
force_vec4(found_class, ins->ssa_args.dest);
- force_vec4(found_class, ins->ssa_args.src0);
- force_vec4(found_class, ins->ssa_args.src1);
+ force_vec4(found_class, ins->ssa_args.src[0]);
+ force_vec4(found_class, ins->ssa_args.src[1]);
+ force_vec4(found_class, ins->ssa_args.src[2]);
}
} else if (ins->type == TAG_TEXTURE_4) {
set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW);
- set_class(found_class, ins->ssa_args.src0, REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src1, REG_CLASS_TEXR);
+ set_class(found_class, ins->ssa_args.src[0], REG_CLASS_TEXR);
+ set_class(found_class, ins->ssa_args.src[1], REG_CLASS_TEXR);
+ set_class(found_class, ins->ssa_args.src[2], REG_CLASS_TEXR);
}
}
/* Check that the semantics of the class are respected */
mir_foreach_instr_global(ctx, ins) {
assert(check_write_class(found_class, ins->type, ins->ssa_args.dest));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src0));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src1));
+ assert(check_read_class(found_class, ins->type, ins->ssa_args.src[0]));
+ assert(check_read_class(found_class, ins->type, ins->ssa_args.src[1]));
+ assert(check_read_class(found_class, ins->type, ins->ssa_args.src[2]));
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
* */
int sources[2] = {
- ins->ssa_args.src0, ins->ssa_args.src1
+ ins->ssa_args.src[0], ins->ssa_args.src[1]
};
for (int src = 0; src < 2; ++src) {
switch (ins->type) {
case TAG_ALU_4: {
- struct phys_reg src1 = index_to_reg(ctx, g, args.src0);
- struct phys_reg src2 = index_to_reg(ctx, g, args.src1);
+ struct phys_reg src1 = index_to_reg(ctx, g, args.src[0]);
+ struct phys_reg src2 = index_to_reg(ctx, g, args.src[1]);
struct phys_reg dest = index_to_reg(ctx, g, args.dest);
unsigned uncomposed_mask = ins->mask;
}
case TAG_LOAD_STORE_4: {
- bool fixed = args.src0 >= SSA_FIXED_MINIMUM;
+ bool fixed = args.src[0] >= SSA_FIXED_MINIMUM;
/* Which physical register we read off depends on
* whether we are loading or storing -- think about the
ins->load_store.op != midgard_op_st_cubemap_coords;
if (OP_IS_STORE_R26(ins->load_store.op) && fixed) {
- ins->load_store.reg = SSA_REG_FROM_FIXED(args.src0);
+ ins->load_store.reg = SSA_REG_FROM_FIXED(args.src[0]);
} else if (OP_IS_STORE_VARY(ins->load_store.op)) {
- struct phys_reg src = index_to_reg(ctx, g, args.src0);
+ struct phys_reg src = index_to_reg(ctx, g, args.src[0]);
assert(src.reg == 26 || src.reg == 27);
ins->load_store.reg = src.reg - 26;
/* TODO: swizzle/mask */
} else {
unsigned r = encodes_src ?
- args.src0 : args.dest;
+ args.src[0] : args.dest;
struct phys_reg src = index_to_reg(ctx, g, r);
/* We also follow up by actual arguments */
int src2 =
- encodes_src ? args.src1 : args.src0;
+ encodes_src ? args.src[1] : args.src[0];
int src3 =
- encodes_src ? -1 : args.src1;
+ encodes_src ? -1 : args.src[1];
if (src2 >= 0) {
struct phys_reg src = index_to_reg(ctx, g, src2);
case TAG_TEXTURE_4: {
/* Grab RA results */
struct phys_reg dest = index_to_reg(ctx, g, args.dest);
- struct phys_reg coord = index_to_reg(ctx, g, args.src0);
- struct phys_reg lod = index_to_reg(ctx, g, args.src1);
+ struct phys_reg coord = index_to_reg(ctx, g, args.src[0]);
+ struct phys_reg lod = index_to_reg(ctx, g, args.src[1]);
assert(dest.reg == 28 || dest.reg == 29);
assert(coord.reg == 28 || coord.reg == 29);
compose_writemask(ins->mask, dest);
/* If there is a register LOD/bias, use it */
- if (args.src1 > -1) {
+ if (args.src[1] > -1) {
midgard_tex_register_select sel = {
.select = lod.reg,
.full = 1,
int source_mask = first->mask;
/* As long as the second doesn't read from the first, we're okay */
- if (second->ssa_args.src0 == source) {
+ if (second->ssa_args.src[0] == source) {
if (first->type == TAG_ALU_4) {
/* Figure out which components we just read from */
}
- if (second->ssa_args.src1 == source)
+ if (second->ssa_args.src[1] == source)
return false;
/* Otherwise, it's safe in that regard. Another data hazard is both
* we're writeout at the very end of the shader. So check if
* they were written before us. */
- unsigned src0 = ins->ssa_args.src0;
- unsigned src1 = ins->ssa_args.src1;
+ unsigned src0 = ins->ssa_args.src[0];
+ unsigned src1 = ins->ssa_args.src[1];
if (!mir_is_written_before(ctx, bundle[0], src0))
src0 = -1;
unsigned swizzle = SWIZZLE_FROM_ARRAY(indices);
unsigned r_constant = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
- if (ains->ssa_args.src0 == r_constant)
+ if (ains->ssa_args.src[0] == r_constant)
ains->alu.src1 = vector_alu_apply_swizzle(ains->alu.src1, swizzle);
- if (ains->ssa_args.src1 == r_constant)
+ if (ains->ssa_args.src[1] == r_constant)
ains->alu.src2 = vector_alu_apply_swizzle(ains->alu.src2, swizzle);
bundle.has_embedded_constants = true;
mir_foreach_instr_global(ctx, ins) {
ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
- ins->ssa_args.src0 = find_or_allocate_temp(ctx, ins->ssa_args.src0);
- ins->ssa_args.src1 = find_or_allocate_temp(ctx, ins->ssa_args.src1);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i)
+ ins->ssa_args.src[i] = find_or_allocate_temp(ctx, ins->ssa_args.src[i]);
}
}
.mask = mask,
.ssa_args = {
.dest = -1,
- .src0 = -1,
- .src1 = -1
+ .src = { -1, -1, -1 },
},
.load_store = {
.op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
- ins.ssa_args.src0 = (srcdest == SSA_FIXED_REGISTER(27)) ? SSA_FIXED_REGISTER(1) : SSA_FIXED_REGISTER(0);
+ ins.ssa_args.src[0] = (srcdest == SSA_FIXED_REGISTER(27)) ? SSA_FIXED_REGISTER(1) : SSA_FIXED_REGISTER(0);
} else {
ins.ssa_args.dest = srcdest;
}
void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
{
- if (ins->ssa_args.src0 == old)
- ins->ssa_args.src0 = new;
-
- if (ins->ssa_args.src1 == old)
- ins->ssa_args.src1 = new;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
+ if (ins->ssa_args.src[i] == old)
+ ins->ssa_args.src[i] = new;
+ }
}
static unsigned
static void
mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned swizzle)
{
- if (ins->ssa_args.src0 == old) {
- ins->ssa_args.src0 = new;
+ for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
+ if (ins->ssa_args.src[i] != old) continue;
- mir_set_swizzle(ins, 0,
- pan_compose_swizzle(mir_get_swizzle(ins, 0), swizzle));
- }
+ ins->ssa_args.src[i] = new;
- if (ins->ssa_args.src1 == old) {
- ins->ssa_args.src1 = new;
-
- mir_set_swizzle(ins, 1,
- pan_compose_swizzle(mir_get_swizzle(ins, 1), swizzle));
+ mir_set_swizzle(ins, i,
+ pan_compose_swizzle(mir_get_swizzle(ins, i), swizzle));
}
}
unsigned mask = 0;
- if (ins->ssa_args.src0 == node)
+ if (ins->ssa_args.src[0] == node)
mask |= mir_mask_of_read_components_single(ins->alu.src1, ins->mask);
- if (ins->ssa_args.src1 == node)
+ if (ins->ssa_args.src[1] == node)
mask |= mir_mask_of_read_components_single(ins->alu.src2, ins->mask);
+ assert(ins->ssa_args.src[2] == -1);
+
return mask;
}