We don't need it in practice, so this is some more cleanup.
Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
void mir_flip(midgard_instruction *ins);
void mir_compute_temp_count(compiler_context *ctx);
-/* MIR goodies */
-
-static const midgard_vector_alu_src blank_alu_src = {};
-
-static const midgard_scalar_alu_src blank_scalar_alu_src = {
- .full = true
-};
-
/* 'Intrinsic' move for aliasing */
static inline midgard_instruction
-v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
+v_mov(unsigned src, unsigned dest)
{
midgard_instruction ins = {
.type = TAG_ALU_4,
.op = midgard_alu_op_imov,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .outmod = midgard_outmod_int_wrap,
- .src1 = vector_alu_srco_unsigned(blank_alu_src),
- .src2 = vector_alu_srco_unsigned(mod)
+ .outmod = midgard_outmod_int_wrap
},
};
vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
bool half, bool sext)
{
- if (!src) return blank_alu_src;
-
/* Figure out how many components there are so we can adjust.
* Specifically we want to broadcast the last channel so things like
* ball2/3 work.
*/
- if (broadcast_count) {
+ if (broadcast_count && src) {
uint8_t last_component = src->swizzle[broadcast_count - 1];
for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
}
/* These should have been lowered away */
- assert(!(src->abs || src->negate));
+ if (src)
+ assert(!(src->abs || src->negate));
} else {
- alu_src.mod = (src->abs << 0) | (src->negate << 1);
+ if (src)
+ alu_src.mod = (src->abs << 0) | (src->negate << 1);
}
return alu_src;
void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
if (constant_value) {
- midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, to);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
attach_constants(ctx, &ins, constant_value, node + 1);
emit_mir_instruction(ctx, ins);
}
if (rt != 0) {
/* We'll write to r1.z */
- rt_move = v_mov(~0, blank_alu_src, SSA_FIXED_REGISTER(1));
+ rt_move = v_mov(~0, SSA_FIXED_REGISTER(1));
rt_move.mask = 1 << COMPONENT_Z;
rt_move.unit = UNIT_SADD;
/* For blend shaders, load the input color, which is
* preloaded to r0 */
- midgard_instruction move = v_mov(SSA_FIXED_REGISTER(0), blank_alu_src, reg);
+ midgard_instruction move = v_mov(SSA_FIXED_REGISTER(0), reg);
emit_mir_instruction(ctx, move);
schedule_barrier(ctx);
} else if (ctx->stage == MESA_SHADER_VERTEX) {
/* Blend constants are embedded directly in the shader and
* patched in, so we use some magic routing */
- midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), reg);
ins.has_constants = true;
ins.has_blend_constant = true;
emit_mir_instruction(ctx, ins);
index = make_compiler_temp(ctx);
/* mov index, old_index */
- midgard_instruction mov = v_mov(old_index, blank_alu_src, index);
+ midgard_instruction mov = v_mov(old_index, index);
mov.mask = 0x3;
emit_mir_instruction(ctx, mov);
/* mov index.zw, #0 */
- mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT),
- blank_alu_src, index);
+ mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), index);
mov.has_constants = true;
mov.mask = (1 << COMPONENT_Z) | (1 << COMPONENT_W);
emit_mir_instruction(ctx, mov);
unsigned scratch = alu->dest;
if (entry) {
- midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
attach_constants(ctx, &ins, entry, alu->src[1] + 1);
/* Set the source */
/* TODO: i16 */
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .outmod = midgard_outmod_int_wrap,
- .src1 = vector_alu_srco_unsigned(blank_alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src)
+ .outmod = midgard_outmod_int_wrap
},
};
unsigned idx = spill_idx++;
midgard_instruction m = hazard_write ?
- v_mov(idx, blank_alu_src, i) :
- v_mov(i, blank_alu_src, idx);
+ v_mov(idx, i) : v_mov(i, idx);
/* Insert move before each read/write, depending on the
* hazard we're trying to account for */
mir_rewrite_index_dst_single(pre_use, i, idx);
} else {
idx = spill_idx++;
- m = v_mov(i, blank_alu_src, idx);
+ m = v_mov(i, idx);
m.mask = mir_from_bytemask(mir_bytemask_of_read_components(pre_use, i), midgard_reg_mode_32);
mir_insert_instruction_before(ctx, pre_use, m);
mir_rewrite_index_src_single(pre_use, i, idx);
/* Otherwise, we insert a move */
- midgard_instruction mov = v_mov(cond, blank_alu_src, cond);
+ midgard_instruction mov = v_mov(cond, cond);
mov.mask = vector ? 0xF : 0x1;
memcpy(mov.swizzle[1], swizzle, sizeof(mov.swizzle[1]));
/* Finally, add a move if necessary */
if (bad_writeout || writeout_mask != 0xF) {
unsigned temp = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx);
- midgard_instruction mov = v_mov(src, blank_alu_src, temp);
+ midgard_instruction mov = v_mov(src, temp);
vmul = mem_dup(&mov, sizeof(midgard_instruction));
vmul->unit = UNIT_VMUL;
vmul->mask = 0xF ^ writeout_mask;
midgard_instruction st;
if (is_special_w) {
- st = v_mov(spill_node, blank_alu_src, spill_slot);
+ st = v_mov(spill_node, spill_slot);
st.no_spill = true;
} else {
ins->dest = SSA_FIXED_REGISTER(26);
if (is_special) {
/* Move */
- st = v_mov(spill_node, blank_alu_src, consecutive_index);
+ st = v_mov(spill_node, consecutive_index);
st.no_spill = true;
} else {
/* TLS load */
needs_move |= mir_special_index(ctx, ins->dest);
if (needs_move) {
- midgard_instruction mov = v_mov(promoted, blank_alu_src, ins->dest);
+ midgard_instruction mov = v_mov(promoted, ins->dest);
mov.mask = ins->mask;
mir_insert_instruction_before(ctx, ins, mov);
} else {