/* I.e. (1 << alu_bit) */
int unit;
+ /* When emitting bundle, should this instruction have a break forced
+ * before it? Used for r31 writes which are valid only within a single
+ * bundle and *need* to happen as early as possible... this is a hack,
+ * TODO remove when we have a scheduler */
+ bool precede_break;
+
bool has_constants;
float constants[4];
uint16_t inline_constant;
return alu_src;
}
-static bool
-mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
-{
- /* abs or neg */
- if (!is_int && src.mod) return true;
-
- /* swizzle */
- for (unsigned c = 0; c < 4; ++c) {
- if (!(mask & (1 << c))) continue;
- if (((src.swizzle >> (2*c)) & 3) != c) return true;
- }
-
- return false;
-}
-
/* 'Intrinsic' move for misc aliasing uses independent of actual NIR ALU code */
static midgard_instruction
},
.alu = {
.op = midgard_alu_op_fmov,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(zero_alu_src),
* don't support half-floats -- this requires changes in other parts of the
* compiler -- therefore the 16-bit versions are commented out. */
-//M_LOAD(load_attr_16);
-M_LOAD(load_attr_32);
-//M_LOAD(load_vary_16);
-M_LOAD(load_vary_32);
-//M_LOAD(load_uniform_16);
-M_LOAD(load_uniform_32);
-M_LOAD(load_color_buffer_8);
-//M_STORE(store_vary_16);
-M_STORE(store_vary_32);
-M_STORE(store_cubemap_coords);
+//M_LOAD(ld_attr_16);
+M_LOAD(ld_attr_32);
+//M_LOAD(ld_vary_16);
+M_LOAD(ld_vary_32);
+//M_LOAD(ld_uniform_16);
+M_LOAD(ld_uniform_32);
+M_LOAD(ld_color_buffer_8);
+//M_STORE(st_vary_16);
+M_STORE(st_vary_32);
+M_STORE(st_cubemap_coords);
static midgard_instruction
v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
optimise_nir(nir_shader *nir)
{
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress = false;
+ NIR_PASS(lower_flrp_progress,
+ nir,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
+
+ /* We implement booleans as 32-bit 0/~0 */
+ NIR_PASS(progress, nir, nir_lower_bool_to_int32);
+
+ /* Now that booleans are lowered, we can run out late opts */
NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
/* Lower mods for float ops only. Integer ops don't support modifiers
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
- /* We implement booleans as 32-bit 0/~0 */
- NIR_PASS(progress, nir, nir_lower_bool_to_int32);
-
/* Take us out of SSA */
NIR_PASS(progress, nir, nir_lower_locals_to_regs);
NIR_PASS(progress, nir, nir_convert_from_ssa, true);
{
nir_ssa_def def = instr->def;
- float *v = ralloc_array(NULL, float, 4);
+ float *v = rzalloc_array(NULL, float, 4);
nir_const_load_to_arr(v, instr, f32);
_mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
}
return nir_src_index(ctx, &src->src);
}
-/* Midgard puts conditionals in r31.w; move an arbitrary source (the output of
- * a conditional test) into that register */
+static bool
+nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
+{
+ unsigned comp = src->swizzle[0];
+
+ for (unsigned c = 1; c < nr_components; ++c) {
+ if (src->swizzle[c] != comp)
+ return true;
+ }
+
+ return false;
+}
+
+/* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the
+ * output of a conditional test) into that register */
static void
emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
midgard_instruction ins = {
.type = TAG_ALU_4,
- .unit = for_branch ? UNIT_SMUL : UNIT_SADD, /* TODO: DEDUCE THIS */
+
+ /* We need to set the conditional as close as possible */
+ .precede_break = true,
+ .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+
.ssa_args = {
+
.src0 = condition,
.src1 = condition,
.dest = SSA_FIXED_REGISTER(31),
},
.alu = {
.op = midgard_alu_op_iand,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
emit_mir_instruction(ctx, ins);
}
+/* Or, for mixed conditions (with csel_v), here's a vector version using all of
+ * r31 instead */
+
+static void
+emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
+{
+ int condition = nir_src_index(ctx, &src->src);
+
+ /* Source to swizzle the desired component into w */
+
+ const midgard_vector_alu_src alu_src = {
+ .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle),
+ };
+
+ /* There is no boolean move instruction. Instead, we simulate a move by
+ * ANDing the condition with itself to get it into r31.w */
+
+ midgard_instruction ins = {
+ .type = TAG_ALU_4,
+ .precede_break = true,
+ .ssa_args = {
+ .src0 = condition,
+ .src1 = condition,
+ .dest = SSA_FIXED_REGISTER(31),
+ },
+ .alu = {
+ .op = midgard_alu_op_iand,
+ .reg_mode = midgard_reg_mode_32,
+ .dest_override = midgard_dest_override_none,
+ .mask = expand_writemask((1 << nr_comp) - 1),
+ .src1 = vector_alu_srco_unsigned(alu_src),
+ .src2 = vector_alu_srco_unsigned(alu_src)
+ },
+ };
+
+ emit_mir_instruction(ctx, ins);
+}
+
+
+
/* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
* pinning to eliminate this move in all known cases */
},
.alu = {
.op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(zero_alu_src),
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
break;
-
static bool
nir_is_fzero_constant(nir_src src)
{
ALU_CASE(isub, isub);
ALU_CASE(imul, imul);
ALU_CASE(iabs, iabs);
-
- /* XXX: Use fmov, not imov, since imov was causing major
- * issues with texture precision? XXX research */
ALU_CASE(imov, imov);
ALU_CASE(feq32, feq);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
- ALU_CASE(inot, inot);
+ ALU_CASE(inot, inand);
ALU_CASE(ishl, ishl);
ALU_CASE(ishr, iasr);
ALU_CASE(ushr, ilsr);
}
case nir_op_b32csel: {
- op = midgard_alu_op_fcsel;
+ /* Midgard features both fcsel and icsel, depending on
+ * the type of the arguments/output. However, as long
+ * as we're careful we can _always_ use icsel and
+ * _never_ need fcsel, since the latter does additional
+ * floating-point-specific processing whereas the
+ * former just moves bits on the wire. It's not obvious
+ * why these are separate opcodes, save for the ability
+ * to do things like sat/pos/abs/neg for free */
+
+ bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
+ op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
/* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
nr_inputs = 2;
- /* Figure out which component the condition is in */
-
- unsigned comp = instr->src[0].swizzle[0];
+ /* Emit the condition into r31 */
- /* Make sure NIR isn't throwing a mixed condition at us */
-
- for (unsigned c = 1; c < nr_components; ++c)
- assert(instr->src[0].swizzle[c] == comp);
-
- /* Emit the condition into r31.w */
- emit_condition(ctx, &instr->src[0].src, false, comp);
+ if (mixed)
+ emit_condition_mixed(ctx, &instr->src[0], nr_components);
+ else
+ emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]);
/* The condition is the first argument; move the other
* arguments up one to be a binary instruction for
return;
}
- /* Midgard can perform certain modifiers on output ofa n ALU op */
+ /* Midgard can perform certain modifiers on output of an ALU op */
midgard_outmod outmod =
+ midgard_is_integer_out_op(op) ? midgard_outmod_int :
instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
/* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
midgard_vector_alu alu = {
.op = op,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = outmod,
ins.has_constants = true;
ins.constants[0] = 0.0f;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
+ } else if (instr->op == nir_op_inot) {
+ /* ~b = ~(b & b), so duplicate the source */
+ ins.ssa_args.src1 = ins.ssa_args.src0;
+ ins.alu.src2 = ins.alu.src1;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
* higher-indexed uniforms, at a performance cost. More
* generally, we're emitting a UBO read instruction. */
- midgard_instruction ins = m_load_uniform_32(dest, offset);
+ midgard_instruction ins = m_ld_uniform_32(dest, offset);
/* TODO: Don't split */
ins.load_store.varying_parameters = (offset & 7) << 7;
/* XXX: Half-floats? */
/* TODO: swizzle, mask */
- midgard_instruction ins = m_load_vary_32(reg, offset);
+ midgard_instruction ins = m_ld_vary_32(reg, offset);
midgard_varying_parameter p = {
.is_varying = 1,
} else if (out->data.location == VARYING_SLOT_COL1) {
/* Destination color must be read from framebuffer */
- midgard_instruction ins = m_load_color_buffer_8(reg, 0);
+ midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
ins.load_store.swizzle = 0; /* xxxx */
/* Read each component sequentially */
},
.alu = {
.op = midgard_alu_op_u2f,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_none,
.mask = 0xF,
.src1 = vector_alu_srco_unsigned(alu_src),
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = midgard_outmod_sat,
.mask = 0xFF,
assert(0);
}
} else if (ctx->stage == MESA_SHADER_VERTEX) {
- midgard_instruction ins = m_load_attr_32(reg, offset);
+ midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
ins.load_store.mask = (1 << instr->num_components) - 1;
emit_mir_instruction(ctx, ins);
attach_constants(ctx, &ins, constant_value, reg + 1);
emit_mir_instruction(ctx, ins);
- midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(0), offset);
+ midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
emit_mir_instruction(ctx, st);
} else {
midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
emit_mir_instruction(ctx, move);
- midgard_instruction st = m_store_cubemap_coords(reg, 0);
+ midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
st.load_store.mask = 0x3; /* xy? */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, ins);
}
- //midgard_pin_output(ctx, index, REGISTER_TEXTURE_BASE + in_reg);
-
break;
}
if (reg >= 0) {
assert(reg < maxreg);
+ assert(g);
int r = ra_get_node_reg(g, reg);
ctx->work_registers = MAX2(ctx->work_registers, r);
return r;
return succ;
}
+/* Once registers have been decided via register allocation
+ * (allocate_registers), we need to rewrite the MIR to use registers instead of
+ * SSA */
+
static void
+install_registers(compiler_context *ctx, struct ra_graph *g)
+{
+ mir_foreach_block(ctx, block) {
+ mir_foreach_instr_in_block(block, ins) {
+ if (ins->compact_branch) continue;
+
+ ssa_args args = ins->ssa_args;
+
+ switch (ins->type) {
+ case TAG_ALU_4:
+ ins->registers.src1_reg = dealias_register(ctx, g, args.src0, ctx->temp_count);
+
+ ins->registers.src2_imm = args.inline_constant;
+
+ if (args.inline_constant) {
+ /* Encode inline 16-bit constant as a vector by default */
+
+ ins->registers.src2_reg = ins->inline_constant >> 11;
+
+ int lower_11 = ins->inline_constant & ((1 << 12) - 1);
+
+ uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
+ ins->alu.src2 = imm << 2;
+ } else {
+ ins->registers.src2_reg = dealias_register(ctx, g, args.src1, ctx->temp_count);
+ }
+
+ ins->registers.out_reg = dealias_register(ctx, g, args.dest, ctx->temp_count);
+
+ break;
+
+ case TAG_LOAD_STORE_4: {
+ if (OP_IS_STORE_VARY(ins->load_store.op)) {
+ /* TODO: use ssa_args for st_vary */
+ ins->load_store.reg = 0;
+ } else {
+ bool has_dest = args.dest >= 0;
+ int ssa_arg = has_dest ? args.dest : args.src0;
+
+ ins->load_store.reg = dealias_register(ctx, g, ssa_arg, ctx->temp_count);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+}
+
+/* This routine performs the actual register allocation. It should be succeeded
+ * by install_registers */
+
+static struct ra_graph *
allocate_registers(compiler_context *ctx)
{
/* First, initialize the RA */
print_mir_block(block);
}
+ /* No register allocation to do with no SSA */
+
+ if (!ctx->temp_count)
+ return NULL;
+
/* Let's actually do register allocation */
int nodes = ctx->temp_count;
struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
mir_foreach_instr_in_block(block, ins) {
if (ins->compact_branch) continue;
+ /* Dest is < 0 for st_vary instructions, which break
+ * the usual SSA conventions. Liveness analysis doesn't
+ * make sense on these instructions, so skip them to
+ * avoid memory corruption */
+
+ if (ins->ssa_args.dest < 0) continue;
+
if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
/* If this destination is not yet live, it is now since we just wrote it */
free(live_start);
free(live_end);
- mir_foreach_block(ctx, block) {
- mir_foreach_instr_in_block(block, ins) {
- if (ins->compact_branch) continue;
-
- ssa_args args = ins->ssa_args;
-
- switch (ins->type) {
- case TAG_ALU_4:
- ins->registers.src1_reg = dealias_register(ctx, g, args.src0, nodes);
-
- ins->registers.src2_imm = args.inline_constant;
-
- if (args.inline_constant) {
- /* Encode inline 16-bit constant as a vector by default */
-
- ins->registers.src2_reg = ins->inline_constant >> 11;
-
- int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-
- uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
- ins->alu.src2 = imm << 2;
- } else {
- ins->registers.src2_reg = dealias_register(ctx, g, args.src1, nodes);
- }
-
- ins->registers.out_reg = dealias_register(ctx, g, args.dest, nodes);
-
- break;
-
- case TAG_LOAD_STORE_4: {
- if (OP_IS_STORE_VARY(ins->load_store.op)) {
- /* TODO: use ssa_args for store_vary */
- ins->load_store.reg = 0;
- } else {
- bool has_dest = args.dest >= 0;
- int ssa_arg = has_dest ? args.dest : args.src0;
-
- ins->load_store.reg = dealias_register(ctx, g, ssa_arg, nodes);
- }
-
- break;
- }
-
- default:
- break;
- }
- }
- }
+ return g;
}
/* Midgard IR only knows vector ALU types, but we sometimes need to actually
/* Ensure that the chain can continue */
if (ains->type != TAG_ALU_4) break;
+ /* If there's already something in the bundle and we
+ * have weird scheduler constraints, break now */
+ if (ains->precede_break && index) break;
+
/* According to the presentation "The ARM
* Mali-T880 Mobile GPU" from HotChips 27,
* there are two pipeline stages. Branching
static void
schedule_program(compiler_context *ctx)
{
- allocate_registers(ctx);
+ /* We run RA prior to scheduling */
+ struct ra_graph *g = allocate_registers(ctx);
+ install_registers(ctx, g);
mir_foreach_block(ctx, block) {
schedule_block(ctx, block);
/* Scale constant appropriately, if we can legally */
uint16_t scaled_constant = 0;
- /* XXX: Check legality */
if (midgard_is_integer_op(op)) {
- /* TODO: Inline integer */
- continue;
-
unsigned int *iconstants = (unsigned int *) ins->constants;
scaled_constant = (uint16_t) iconstants[component];
if (scaled_constant != iconstants[component])
continue;
} else {
- scaled_constant = _mesa_float_to_half((float) ins->constants[component]);
+ float original = (float) ins->constants[component];
+ scaled_constant = _mesa_float_to_half(original);
+
+ /* Check for loss of precision. If this is
+ * mediump, we don't care, but for a highp
+ * shader, we need to pay attention. NIR
+ * doesn't yet tell us which mode we're in!
+ * Practically this prevents most constants
+ * from being inlined, sadly. */
+
+ float fp32 = _mesa_half_to_float(scaled_constant);
+
+ if (fp32 != original)
+ continue;
}
/* We don't know how to handle these with a constant */
return progress;
}
+static bool
+mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
+{
+ /* abs or neg */
+ if (!is_int && src.mod) return true;
+
+ /* swizzle */
+ for (unsigned c = 0; c < 4; ++c) {
+ if (!(mask & (1 << c))) continue;
+ if (((src.swizzle >> (2*c)) & 3) != c) return true;
+ }
+
+ return false;
+}
+
static bool
midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
{
if (to >= SSA_FIXED_MINIMUM) continue;
if (from >= SSA_FIXED_MINIMUM) continue;
+ if (to >= ctx->func->impl->ssa_alloc) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+
+ /* Constant propagation is not handled here, either */
+ if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_constants) continue;
/* Also, if the move has side effects, we're helpless */
bool is_int = midgard_is_integer_op(ins->alu.op);
if (mir_nontrivial_mod(src, is_int, mask)) continue;
+ if (ins->alu.outmod != midgard_outmod_none) continue;
mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) {
if (v->ssa_args.src0 == to) {
return progress;
}
+static bool
+midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* Make sure it's simple enough for us to handle */
+
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+ if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
+ if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
+
+ bool eliminated = false;
+
+ mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+ /* The texture registers are not SSA so be careful.
+ * Conservatively, just stop if we hit a texture op
+ * (even if it may not write) to where we are */
+
+ if (v->type != TAG_ALU_4)
+ break;
+
+ if (v->ssa_args.dest == from) {
+ /* We don't want to track partial writes ... */
+ if (v->alu.mask == 0xF) {
+ v->ssa_args.dest = to;
+ eliminated = true;
+ }
+
+ break;
+ }
+ }
+
+ if (eliminated)
+ mir_remove_instruction(ins);
+
+ progress |= eliminated;
+ }
+
+ return progress;
+}
+
/* The following passes reorder MIR instructions to enable better scheduling */
static void
midgard_instruction mov = v_fmov(idx, blank_alu_src, SSA_FIXED_REGISTER(REGISTER_VARYING_BASE + high_varying_register));
- midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
+ midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
mir_insert_instruction_before(mir_next_op(ins), st);
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_lower,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
},
.alu = {
.op = midgard_alu_op_f2u8,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_lower,
.outmod = midgard_outmod_pos,
.mask = 0xF,
},
.alu = {
.op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_quarter,
+ .reg_mode = midgard_reg_mode_8,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
mir_foreach_block(ctx, block) {
progress |= midgard_opt_copy_prop(ctx, block);
+ progress |= midgard_opt_copy_prop_tex(ctx, block);
progress |= midgard_opt_dead_code_eliminate(ctx, block);
}
} while (progress);