X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fmidgard%2Fmidgard_compile.c;h=34b0678cf984ab634658d22c9c4016d529cd7748;hb=0cfa54801e6beac8bd1f1e7152cc02af6a636ea0;hp=cb3b4689c8200e9cad46db1e2de087dc6b08b80d;hpb=f42e5be9105c0a7a6358e51ca2af6fd5a31c6e7e;p=mesa.git diff --git a/src/gallium/drivers/panfrost/midgard/midgard_compile.c b/src/gallium/drivers/panfrost/midgard/midgard_compile.c index cb3b4689c82..34b0678cf98 100644 --- a/src/gallium/drivers/panfrost/midgard/midgard_compile.c +++ b/src/gallium/drivers/panfrost/midgard/midgard_compile.c @@ -88,6 +88,7 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) static midgard_instruction m_##name(unsigned ssa, unsigned address) { \ midgard_instruction i = { \ .type = TAG_LOAD_STORE_4, \ + .mask = 0xF, \ .ssa_args = { \ .rname = ssa, \ .uname = -1, \ @@ -95,7 +96,6 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) }, \ .load_store = { \ .op = midgard_op_##name, \ - .mask = 0xF, \ .swizzle = SWIZZLE_XYZW, \ .address = address \ } \ @@ -111,7 +111,8 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) * the corresponding Midgard source */ static midgard_vector_alu_src -vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count) +vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count, + bool half, bool sext) { if (!src) return blank_alu_src; @@ -131,14 +132,21 @@ vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count) midgard_vector_alu_src alu_src = { .rep_low = 0, .rep_high = 0, - .half = 0, /* TODO */ + .half = half, .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle) }; if (is_int) { - /* TODO: sign-extend/zero-extend */ alu_src.mod = midgard_int_normal; + /* Sign/zero-extend if needed */ + + if (half) { + alu_src.mod = sext ? + midgard_int_sign_extend + : midgard_int_zero_extend; + } + /* These should have been lowered away */ assert(!(src->abs || src->negate)); } else { @@ -588,6 +596,7 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co /* We need to set the conditional as close as possible */ .precede_break = true, .unit = for_branch ? UNIT_SMUL : UNIT_SADD, + .mask = 1 << COMPONENT_W, .ssa_args = { .src0 = condition, @@ -600,7 +609,6 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co .outmod = midgard_outmod_int_wrap, .reg_mode = midgard_reg_mode_32, .dest_override = midgard_dest_override_none, - .mask = (0x3 << 6), /* w */ .src1 = vector_alu_srco_unsigned(alu_src), .src2 = vector_alu_srco_unsigned(alu_src) }, @@ -629,6 +637,7 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp) midgard_instruction ins = { .type = TAG_ALU_4, .precede_break = true, + .mask = mask_of(nr_comp), .ssa_args = { .src0 = condition, .src1 = condition, @@ -639,7 +648,6 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp) .outmod = midgard_outmod_int_wrap, .reg_mode = midgard_reg_mode_32, .dest_override = midgard_dest_override_none, - .mask = expand_writemask(mask_of(nr_comp)), .src1 = vector_alu_srco_unsigned(alu_src), .src2 = vector_alu_srco_unsigned(alu_src) }, @@ -660,6 +668,7 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src) midgard_instruction ins = { .type = TAG_ALU_4, + .mask = 1 << COMPONENT_W, .ssa_args = { .src0 = SSA_UNUSED_1, .src1 = offset, @@ -670,7 +679,6 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src) .outmod = midgard_outmod_int_wrap, .reg_mode = midgard_reg_mode_32, .dest_override = midgard_dest_override_none, - .mask = (0x3 << 6), /* w */ .src1 = vector_alu_srco_unsigned(zero_alu_src), .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx) }, @@ -682,12 +690,14 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src) #define ALU_CASE(nir, _op) \ case nir_op_##nir: \ op = midgard_alu_op_##_op; \ + assert(src_bitsize == dst_bitsize); \ break; #define ALU_CASE_BCAST(nir, _op, count) \ case nir_op_##nir: \ op = midgard_alu_op_##_op; \ broadcast_swizzle = count; \ + assert(src_bitsize == dst_bitsize); \ break; static bool nir_is_fzero_constant(nir_src src) @@ -703,9 +713,8 @@ nir_is_fzero_constant(nir_src src) return true; } -/* Analyze the sizes of inputs/outputs to determine which reg mode to run an - * ALU instruction in. Right now, we just consider the size of the first - * argument. This will fail for upconverting, for instance (TODO) */ +/* Analyze the sizes of the inputs to determine which reg mode. Ops needed + * special treatment override this anyway. */ static midgard_reg_mode reg_mode_for_nir(nir_alu_instr *instr) @@ -751,12 +760,24 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) unsigned broadcast_swizzle = 0; + /* What register mode should we operate in? */ + midgard_reg_mode reg_mode = + reg_mode_for_nir(instr); + /* Do we need a destination override? Used for inline * type conversion */ midgard_dest_override dest_override = midgard_dest_override_none; + /* Should we use a smaller respective source and sign-extend? */ + + bool half_1 = false, sext_1 = false; + bool half_2 = false, sext_2 = false; + + unsigned src_bitsize = nir_src_bit_size(instr->src[0].src); + unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest); + switch (instr->op) { ALU_CASE(fadd, fadd); ALU_CASE(fmul, fmul); @@ -820,6 +841,11 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) ALU_CASE(i2f32, i2f_rtz); ALU_CASE(u2f32, u2f_rtz); + ALU_CASE(f2i16, f2i_rtz); + ALU_CASE(f2u16, f2u_rtz); + ALU_CASE(i2f16, i2f_rtz); + ALU_CASE(u2f16, u2f_rtz); + ALU_CASE(fsin, fsin); ALU_CASE(fcos, fcos); @@ -856,17 +882,54 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) /* For size conversion, we use a move. Ideally though we would squash * these ops together; maybe that has to happen after in NIR as part of * propagation...? An earlier algebraic pass ensured we step down by - * only / exactly one size */ + * only / exactly one size. If stepping down, we use a dest override to + * reduce the size; if stepping up, we use a larger-sized move with a + * half source and a sign/zero-extension modifier */ + case nir_op_i2i8: + case nir_op_i2i16: + case nir_op_i2i32: + /* If we end up upscale, we'll need a sign-extend on the + * operand (the second argument) */ + + sext_2 = true; case nir_op_u2u8: case nir_op_u2u16: - case nir_op_i2i8: - case nir_op_i2i16: { + case nir_op_u2u32: { op = midgard_alu_op_imov; + + if (dst_bitsize == (src_bitsize * 2)) { + /* Converting up */ + half_2 = true; + + /* Use a greater register mode */ + reg_mode++; + } else if (src_bitsize == (dst_bitsize * 2)) { + /* Converting down */ + dest_override = midgard_dest_override_lower; + } + + break; + } + + case nir_op_f2f16: { + assert(src_bitsize == 32); + + op = midgard_alu_op_fmov; dest_override = midgard_dest_override_lower; break; } + case nir_op_f2f32: { + assert(src_bitsize == 16); + + op = midgard_alu_op_fmov; + half_2 = true; + reg_mode++; + break; + } + + /* For greater-or-equal, we lower to less-or-equal and flip the * arguments */ @@ -999,23 +1062,22 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) bool is_int = midgard_is_integer_op(op); + ins.mask = mask_of(nr_components); + midgard_vector_alu alu = { .op = op, - .reg_mode = reg_mode_for_nir(instr), + .reg_mode = reg_mode, .dest_override = dest_override, .outmod = outmod, - /* Writemask only valid for non-SSA NIR */ - .mask = expand_writemask(mask_of(nr_components)), - - .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)), - .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)), + .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)), + .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)), }; /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */ if (!is_ssa) - alu.mask &= expand_writemask(instr->dest.write_mask); + ins.mask &= instr->dest.write_mask; ins.alu = alu; @@ -1060,21 +1122,22 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) uint8_t original_swizzle[4]; memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle)); + unsigned orig_mask = ins.mask; for (int i = 0; i < nr_components; ++i) { /* Mask the associated component, dropping the * instruction if needed */ - ins.alu.mask = (0x3) << (2 * i); - ins.alu.mask &= alu.mask; + ins.mask = 1 << i; + ins.mask &= orig_mask; - if (!ins.alu.mask) + if (!ins.mask) continue; for (int j = 0; j < 4; ++j) nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */ - ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)); + ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, false)); emit_mir_instruction(ctx, ins); } } else { @@ -1140,7 +1203,7 @@ emit_varying_read( /* TODO: swizzle, mask */ midgard_instruction ins = m_ld_vary_32(dest, offset); - ins.load_store.mask = mask_of(nr_comp); + ins.mask = mask_of(nr_comp); ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component); midgard_varying_parameter p = { @@ -1197,79 +1260,6 @@ emit_sysval_read(compiler_context *ctx, nir_instr *instr) emit_ubo_read(ctx, dest, uniform, NULL, 0); } -/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register, - * using scalar ops functional on earlier Midgard generations. Newer Midgard - * generations have faster vectorized reads. This operation is for blend - * shaders in particular; reading the tilebuffer from the fragment shader - * remains an open problem. */ - -static void -emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg) -{ - midgard_instruction ins = m_ld_color_buffer_8(reg, 0); - ins.load_store.swizzle = 0; /* xxxx */ - - /* Read each component sequentially */ - - for (unsigned c = 0; c < 4; ++c) { - ins.load_store.mask = (1 << c); - ins.load_store.unknown = c; - emit_mir_instruction(ctx, ins); - } - - /* vadd.u2f hr2, zext(hr2), #0 */ - - midgard_vector_alu_src alu_src = blank_alu_src; - alu_src.mod = midgard_int_zero_extend; - alu_src.half = true; - - midgard_instruction u2f = { - .type = TAG_ALU_4, - .ssa_args = { - .src0 = reg, - .src1 = SSA_UNUSED_0, - .dest = reg, - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_u2f_rtz, - .reg_mode = midgard_reg_mode_16, - .dest_override = midgard_dest_override_none, - .mask = 0xF, - .src1 = vector_alu_srco_unsigned(alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, u2f); - - /* vmul.fmul.sat r1, hr2, #0.00392151 */ - - alu_src.mod = 0; - - midgard_instruction fmul = { - .type = TAG_ALU_4, - .inline_constant = _mesa_float_to_half(1.0 / 255.0), - .ssa_args = { - .src0 = reg, - .dest = reg, - .src1 = SSA_UNUSED_0, - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_fmul, - .reg_mode = midgard_reg_mode_32, - .dest_override = midgard_dest_override_none, - .outmod = midgard_outmod_sat, - .mask = 0xFF, - .src1 = vector_alu_srco_unsigned(alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, fmul); -} - static void emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) { @@ -1352,7 +1342,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) } else if (ctx->stage == MESA_SHADER_VERTEX) { midgard_instruction ins = m_ld_attr_32(reg, offset); ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */ - ins.load_store.mask = mask_of(nr_comp); + ins.mask = mask_of(nr_comp); /* Use the type appropriate load */ switch (t) { @@ -1380,18 +1370,14 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) break; } - case nir_intrinsic_load_output: - assert(nir_src_is_const(instr->src[0])); - reg = nir_dest_index(ctx, &instr->dest); + /* Reads 128-bit value raw off the tilebuffer during blending, tasty */ - if (ctx->is_blend) { - /* TODO: MRT */ - emit_fb_read_blend_scalar(ctx, reg); - } else { - DBG("Unknown output load\n"); - assert(0); - } + case nir_intrinsic_load_raw_output_pan: + reg = nir_dest_index(ctx, &instr->dest); + assert(ctx->is_blend); + midgard_instruction ins = m_ld_color_buffer_8(reg, 0); + emit_mir_instruction(ctx, ins); break; case nir_intrinsic_load_blend_const_color_rgba: { @@ -1459,6 +1445,17 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) break; + /* Special case of store_output for lowered blend shaders */ + case nir_intrinsic_store_raw_output_pan: + assert (ctx->stage == MESA_SHADER_FRAGMENT); + reg = nir_src_index(ctx, &instr->src[0]); + + midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0)); + emit_mir_instruction(ctx, move); + ctx->fragment_output = reg; + + break; + case nir_intrinsic_load_alpha_ref_float: assert(instr->dest.is_ssa); @@ -1577,6 +1574,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, /* No helper to build texture words -- we do it all here */ midgard_instruction ins = { .type = TAG_TEXTURE_4, + .mask = 0xF, .texture = { .op = midgard_texop, .format = midgard_tex_format(instr->sampler_dim), @@ -1585,7 +1583,6 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, /* TODO: Regalloc it in */ .swizzle = SWIZZLE_XYZW, - .mask = 0xF, /* TODO: half */ .in_reg_full = 1, @@ -1619,7 +1616,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, midgard_instruction st = m_st_cubemap_coords(reg, 0); st.load_store.unknown = 0x24; /* XXX: What is this? */ - st.load_store.mask = 0x3; /* xy */ + st.mask = 0x3; /* xy */ st.load_store.swizzle = alu_src.swizzle; emit_mir_instruction(ctx, st); @@ -1628,7 +1625,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp); midgard_instruction mov = v_mov(index, alu_src, reg); - mov.alu.mask = expand_writemask(mask_of(nr_comp)); + mov.mask = mask_of(nr_comp); emit_mir_instruction(ctx, mov); if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) { @@ -1645,7 +1642,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, zero.ssa_args.inline_constant = true; zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT); zero.has_constants = true; - zero.alu.mask = ~mov.alu.mask; + zero.mask = ~mov.mask; emit_mir_instruction(ctx, zero); ins.texture.in_reg_swizzle = SWIZZLE_XYZZ; @@ -1676,7 +1673,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, alu_src.swizzle = SWIZZLE_XXXX; midgard_instruction mov = v_mov(index, alu_src, reg); - mov.alu.mask = expand_writemask(1 << COMPONENT_W); + mov.mask = 1 << COMPONENT_W; emit_mir_instruction(ctx, mov); ins.texture.lod_register = true; @@ -1883,6 +1880,13 @@ embedded_to_inline_constant(compiler_context *ctx) /* Blend constants must not be inlined by definition */ if (ins->has_blend_constant) continue; + /* We can inline 32-bit (sometimes) or 16-bit (usually) */ + bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16; + bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32; + + if (!(is_16 || is_32)) + continue; + /* src1 cannot be an inline constant due to encoding * restrictions. So, if possible we try to flip the arguments * in that case */ @@ -1933,7 +1937,7 @@ embedded_to_inline_constant(compiler_context *ctx) /* Scale constant appropriately, if we can legally */ uint16_t scaled_constant = 0; - if (midgard_is_integer_op(op)) { + if (midgard_is_integer_op(op) || is_16) { unsigned int *iconstants = (unsigned int *) ins->constants; scaled_constant = (uint16_t) iconstants[component]; @@ -1972,7 +1976,7 @@ embedded_to_inline_constant(compiler_context *ctx) uint32_t value = cons[component]; bool is_vector = false; - unsigned mask = effective_writemask(&ins->alu); + unsigned mask = effective_writemask(&ins->alu, ins->mask); for (int c = 1; c < 4; ++c) { /* We only care if this component is actually used */ @@ -2082,6 +2086,12 @@ mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask) /* abs or neg */ if (!is_int && src.mod) return true; + /* Other int mods don't matter in isolation */ + if (is_int && src.mod == midgard_int_shift) return true; + + /* size-conversion */ + if (src.half) return true; + /* swizzle */ for (unsigned c = 0; c < 4; ++c) { if (!(mask & (1 << c))) continue; @@ -2094,13 +2104,12 @@ mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask) static bool mir_nontrivial_source2_mod(midgard_instruction *ins) { - unsigned mask = squeeze_writemask(ins->alu.mask); bool is_int = midgard_is_integer_op(ins->alu.op); midgard_vector_alu_src src2 = vector_alu_from_unsigned(ins->alu.src2); - return mir_nontrivial_mod(src2, is_int, mask); + return mir_nontrivial_mod(src2, is_int, ins->mask); } static bool @@ -2216,55 +2225,6 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block) return progress; } -static bool -midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block) -{ - bool progress = false; - - mir_foreach_instr_in_block_safe(block, ins) { - if (ins->type != TAG_ALU_4) continue; - if (!OP_IS_MOVE(ins->alu.op)) continue; - - unsigned from = ins->ssa_args.src1; - unsigned to = ins->ssa_args.dest; - - /* Make sure it's simple enough for us to handle */ - - if (from >= SSA_FIXED_MINIMUM) continue; - if (from >= ctx->func->impl->ssa_alloc) continue; - if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue; - if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue; - - bool eliminated = false; - - mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) { - /* The texture registers are not SSA so be careful. - * Conservatively, just stop if we hit a texture op - * (even if it may not write) to where we are */ - - if (v->type != TAG_ALU_4) - break; - - if (v->ssa_args.dest == from) { - /* We don't want to track partial writes ... */ - if (v->alu.mask == 0xF) { - v->ssa_args.dest = to; - eliminated = true; - } - - break; - } - } - - if (eliminated) - mir_remove_instruction(ins); - - progress |= eliminated; - } - - return progress; -} - /* The following passes reorder MIR instructions to enable better scheduling */ static void @@ -2364,64 +2324,6 @@ emit_fragment_epilogue(compiler_context *ctx) EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always); } -/* For the blend epilogue, we need to convert the blended fragment vec4 (stored - * in r0) to a RGBA8888 value by scaling and type converting. We then output it - * with the int8 analogue to the fragment epilogue */ - -static void -emit_blend_epilogue(compiler_context *ctx) -{ - /* fmov hr48, [...], r0*/ - - midgard_instruction scale = { - .type = TAG_ALU_4, - .unit = UNIT_VMUL, - .ssa_args = { - .src0 = SSA_FIXED_REGISTER(24), - .src1 = SSA_FIXED_REGISTER(0), - .dest = SSA_FIXED_REGISTER(24), - }, - .alu = { - .op = midgard_alu_op_fmov, - .reg_mode = midgard_reg_mode_32, - .dest_override = midgard_dest_override_lower, - .mask = 0xFF, - .src1 = vector_alu_srco_unsigned(blank_alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, scale); - - /* vadd.f2u_rte qr0, hr48, #0 */ - - midgard_vector_alu_src alu_src = blank_alu_src; - alu_src.half = true; - - midgard_instruction f2u_rte = { - .type = TAG_ALU_4, - .ssa_args = { - .src0 = SSA_FIXED_REGISTER(24), - .src1 = SSA_UNUSED_0, - .dest = SSA_FIXED_REGISTER(0), - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_f2u_rte, - .reg_mode = midgard_reg_mode_16, - .dest_override = midgard_dest_override_lower, - .mask = 0xF, - .src1 = vector_alu_srco_unsigned(alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, f2u_rte); - - EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always); - EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always); -} - static midgard_block * emit_block(compiler_context *ctx, nir_block *block) { @@ -2458,10 +2360,7 @@ emit_block(compiler_context *ctx, nir_block *block) /* Append fragment shader epilogue (value writeout) */ if (ctx->stage == MESA_SHADER_FRAGMENT) { if (block == nir_impl_last_block(ctx->func->impl)) { - if (ctx->is_blend) - emit_blend_epilogue(ctx); - else - emit_fragment_epilogue(ctx); + emit_fragment_epilogue(ctx); } } @@ -2751,7 +2650,6 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl mir_foreach_block(ctx, block) { progress |= midgard_opt_pos_propagate(ctx, block); progress |= midgard_opt_copy_prop(ctx, block); - progress |= midgard_opt_copy_prop_tex(ctx, block); progress |= midgard_opt_dead_code_eliminate(ctx, block); } } while (progress);