so->base = *cso;
+ so->lrz_write = true; /* unless blend enabled for any MRT */
+
for (i = 0; i < ARRAY_SIZE(so->rb_mrt); i++) {
const struct pipe_rt_blend_state *rt;
A6XX_RB_MRT_CONTROL_BLEND |
A6XX_RB_MRT_CONTROL_BLEND2;
mrt_blend |= (1 << i);
+ so->lrz_write = false;
}
if (reads_dest) {
// so->rb_mrt[i].control |= A6XX_RB_MRT_CONTROL_READ_DEST_ENABLE;
mrt_blend |= (1 << i);
+ so->lrz_write = false;
}
}
static struct fd_ringbuffer *
build_lrz(struct fd6_emit *emit, bool binning_pass)
{
+ struct fd6_blend_stateobj *blend = fd6_blend_stateobj(emit->ctx->blend);
struct fd6_zsa_stateobj *zsa = fd6_zsa_stateobj(emit->ctx->zsa);
struct pipe_framebuffer_state *pfb = &emit->ctx->batch->framebuffer;
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
if (emit->no_lrz_write || !rsc->lrz || !rsc->lrz_valid) {
gras_lrz_cntl = 0;
rb_lrz_cntl = 0;
- } else if (binning_pass && zsa->lrz_write) {
+ } else if (binning_pass && blend->lrz_write && zsa->lrz_write) {
gras_lrz_cntl |= A6XX_GRAS_LRZ_CNTL_LRZ_WRITE;
}
fd6_emit_add_group(emit, zsa->stateobj, FD6_GROUP_ZSA, 0x7);
}
- if ((dirty & (FD_DIRTY_ZSA | FD_DIRTY_PROG)) && pfb->zsbuf) {
+ if ((dirty & (FD_DIRTY_ZSA | FD_DIRTY_BLEND | FD_DIRTY_PROG)) && pfb->zsbuf) {
struct fd_ringbuffer *state;
state = build_lrz(emit, false);