panfrost/midgard: Don't try to "alias" texture registers
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
index 871b168040c0751df4ead40cd0a6a0abaa912a28..69d877277be14475e3e071c8b84dc56093627756 100644 (file)
@@ -99,13 +99,13 @@ panfrost_emit_sfbd(struct panfrost_context *ctx, unsigned vertex_count)
                 .unknown2 = 0x1f,
                 .format = 0x30000000,
                 .clear_flags = 0x1000,
-                .unknown_address_0 = ctx->scratchpad.gpu,
-                .tiler_polygon_list = ctx->tiler_polygon_list.gpu,
-                .tiler_polygon_list_body = ctx->tiler_polygon_list.gpu + 40960,
+                .unknown_address_0 = ctx->scratchpad.bo->gpu,
+                .tiler_polygon_list = ctx->tiler_polygon_list.bo->gpu,
+                .tiler_polygon_list_body = ctx->tiler_polygon_list.bo->gpu + 40960,
                 .tiler_hierarchy_mask = 0xF0,
                 .tiler_flags = 0x0,
-                .tiler_heap_free = ctx->tiler_heap.gpu,
-                .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
+                .tiler_heap_free = ctx->tiler_heap.bo->gpu,
+                .tiler_heap_end = ctx->tiler_heap.bo->gpu + ctx->tiler_heap.bo->size,
         };
 
         panfrost_set_framebuffer_resolution(&framebuffer, ctx->pipe_framebuffer.width, ctx->pipe_framebuffer.height);
@@ -133,7 +133,7 @@ panfrost_emit_mfbd(struct panfrost_context *ctx, unsigned vertex_count)
 
                 .unknown2 = 0x1f,
 
-                .scratchpad = ctx->scratchpad.gpu,
+                .scratchpad = ctx->scratchpad.bo->gpu,
         };
 
         framebuffer.tiler_hierarchy_mask =
@@ -152,22 +152,22 @@ panfrost_emit_mfbd(struct panfrost_context *ctx, unsigned vertex_count)
         unsigned total_size = header_size + body_size;
 
         if (framebuffer.tiler_hierarchy_mask) {
-               assert(ctx->tiler_polygon_list.size >= total_size);
+               assert(ctx->tiler_polygon_list.bo->size >= total_size);
 
                 /* Specify allocated tiler structures */
-                framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.gpu;
+                framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.bo->gpu;
 
                 /* Allow the entire tiler heap */
-                framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
+                framebuffer.tiler_heap_start = ctx->tiler_heap.bo->gpu;
                 framebuffer.tiler_heap_end =
-                        ctx->tiler_heap.gpu + ctx->tiler_heap.size;
+                        ctx->tiler_heap.bo->gpu + ctx->tiler_heap.bo->size;
         } else {
                 /* The tiler is disabled, so don't allow the tiler heap */
-                framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
+                framebuffer.tiler_heap_start = ctx->tiler_heap.bo->gpu;
                 framebuffer.tiler_heap_end = framebuffer.tiler_heap_start;
 
                 /* Use a dummy polygon list */
-                framebuffer.tiler_polygon_list = ctx->tiler_dummy.gpu;
+                framebuffer.tiler_polygon_list = ctx->tiler_dummy.bo->gpu;
 
                 /* Also, set a "tiler disabled?" flag? */
                 framebuffer.tiler_hierarchy_mask |= 0x1000;
@@ -190,7 +190,7 @@ bool
 panfrost_is_scanout(struct panfrost_context *ctx)
 {
         /* If there is no color buffer, it's an FBO */
-        if (!ctx->pipe_framebuffer.nr_cbufs)
+        if (ctx->pipe_framebuffer.nr_cbufs != 1)
                 return false;
 
         /* If we're too early that no framebuffer was sent, it's scanout */
@@ -529,15 +529,15 @@ panfrost_emit_varyings(
                 unsigned stride,
                 unsigned count)
 {
-        mali_ptr varying_address = ctx->varying_mem.gpu + ctx->varying_height;
+        mali_ptr varying_address = ctx->varying_mem.bo->gpu + ctx->varying_height;
 
         /* Fill out the descriptor */
         slot->elements = varying_address | MALI_ATTR_LINEAR;
         slot->stride = stride;
         slot->size = stride * count;
 
-        ctx->varying_height += ALIGN(slot->size, 64);
-        assert(ctx->varying_height < ctx->varying_mem.size);
+        ctx->varying_height += ALIGN_POT(slot->size, 64);
+        assert(ctx->varying_height < ctx->varying_mem.bo->size);
 
         return varying_address;
 }
@@ -552,7 +552,7 @@ panfrost_emit_point_coord(union mali_attr *slot)
 static void
 panfrost_emit_varying_descriptor(
                 struct panfrost_context *ctx,
-                unsigned invocation_count)
+                unsigned vertex_count)
 {
         /* Load the shaders */
 
@@ -638,19 +638,19 @@ panfrost_emit_varying_descriptor(
         unsigned idx = 0;
 
         panfrost_emit_varyings(ctx, &varyings[idx++], num_gen_varyings * 16,
-                               invocation_count);
+                               vertex_count);
 
         /* fp32 vec4 gl_Position */
         ctx->payload_tiler.postfix.position_varying =
                 panfrost_emit_varyings(ctx, &varyings[idx++],
-                                sizeof(float) * 4, invocation_count);
+                                sizeof(float) * 4, vertex_count);
 
 
         if (vs->writes_point_size || fs->reads_point_coord) {
                 /* fp16 vec1 gl_PointSize */
                 ctx->payload_tiler.primitive_size.pointer =
                         panfrost_emit_varyings(ctx, &varyings[idx++],
-                                        2, invocation_count);
+                                        2, vertex_count);
         }
 
         if (fs->reads_point_coord) {
@@ -663,7 +663,7 @@ panfrost_emit_varying_descriptor(
         ctx->payload_tiler.postfix.varyings = varyings_p;
 }
 
-static mali_ptr
+mali_ptr
 panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
 {
         struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
@@ -672,48 +672,6 @@ panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
         return rsrc->bo->gpu + buf->buffer_offset;
 }
 
-/* Emits attributes and varying descriptors, which should be called every draw,
- * excepting some obscure circumstances */
-
-static void
-panfrost_emit_vertex_data(struct panfrost_context *ctx, struct panfrost_job *job)
-{
-        /* Staged mali_attr, and index into them. i =/= k, depending on the
-         * vertex buffer mask */
-        union mali_attr attrs[PIPE_MAX_ATTRIBS];
-        unsigned k = 0;
-
-        unsigned invocation_count = MALI_NEGATIVE(ctx->payload_tiler.prefix.invocation_count);
-
-        for (int i = 0; i < ARRAY_SIZE(ctx->vertex_buffers); ++i) {
-                if (!(ctx->vb_mask & (1 << i))) continue;
-
-                struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
-                struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
-
-                if (!rsrc) continue;
-
-                /* Align to 64 bytes by masking off the lower bits. This
-                 * will be adjusted back when we fixup the src_offset in
-                 * mali_attr_meta */
-
-                mali_ptr addr = panfrost_vertex_buffer_address(ctx, i) & ~63;
-
-                /* Offset vertex count by draw_start to make sure we upload enough */
-                attrs[k].stride = buf->stride;
-                attrs[k].size = rsrc->base.width0;
-
-                panfrost_job_add_bo(job, rsrc->bo);
-                attrs[k].elements = addr | MALI_ATTR_LINEAR;
-
-                ++k;
-        }
-
-        ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, k * sizeof(union mali_attr));
-
-        panfrost_emit_varying_descriptor(ctx, invocation_count);
-}
-
 static bool
 panfrost_writes_point_size(struct panfrost_context *ctx)
 {
@@ -759,12 +717,24 @@ panfrost_stage_attributes(struct panfrost_context *ctx)
          * QED.
          */
 
+        unsigned start = ctx->payload_vertex.draw_start;
+
         for (unsigned i = 0; i < so->num_elements; ++i) {
                 unsigned vbi = so->pipe[i].vertex_buffer_index;
+                struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[vbi];
                 mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
 
                 /* Adjust by the masked off bits of the offset */
                 target[i].src_offset += (addr & 63);
+
+                /* Also, somewhat obscurely per-instance data needs to be
+                 * offset in response to a delayed start in an indexed draw */
+
+                if (so->pipe[i].instance_divisor && ctx->instance_count > 1 && start) {
+                        target[i].src_offset -= buf->stride * start;
+                }
+
+
         }
 
         ctx->payload_vertex.postfix.attribute_meta = transfer.gpu;
@@ -827,10 +797,14 @@ panfrost_upload_tex(
         unsigned last_layer = pview->u.tex.last_layer;
 
         /* Lower-bit is set when sampling from colour AFBC */
-        bool is_afbc = rsrc->bo->layout == PAN_AFBC;
+        bool is_afbc = rsrc->layout == PAN_AFBC;
         bool is_zs = rsrc->base.bind & PIPE_BIND_DEPTH_STENCIL;
         unsigned afbc_bit = (is_afbc && !is_zs) ? 1 : 0;
 
+       /* Add the BO to the job so it's retained until the job is done. */
+        struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+        panfrost_job_add_bo(job, rsrc->bo);
+
         /* Inject the addresses in, interleaving mip levels, cube faces, and
          * strides in that order */
 
@@ -844,7 +818,7 @@ panfrost_upload_tex(
 
                         if (has_manual_stride) {
                                 view->hw.payload[idx++] =
-                                        rsrc->bo->slices[l].stride;
+                                        rsrc->slices[l].stride;
                         }
                 }
         }
@@ -1028,7 +1002,11 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
         struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
 
         if (with_vertex_data) {
-                panfrost_emit_vertex_data(ctx, job);
+                panfrost_emit_vertex_data(job);
+
+                /* Varyings emitted for -all- geometry */
+                unsigned total_count = ctx->padded_count * ctx->instance_count;
+                panfrost_emit_varying_descriptor(ctx, total_count);
         }
 
         bool msaa = ctx->rasterizer->base.multisample;
@@ -1093,9 +1071,14 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                 COPY(midgard1.unknown2);
 
 #undef COPY
+
+                /* Get blending setup */
+                struct panfrost_blend_final blend =
+                        panfrost_get_blend_for_context(ctx, 0);
+
                 /* If there is a blend shader, work registers are shared */
 
-                if (ctx->blend->has_blend_shader)
+                if (blend.is_shader)
                         ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
 
                 /* Set late due to depending on render state */
@@ -1134,18 +1117,19 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                /* Check if we're using the default blend descriptor (fast path) */
 
                bool no_blending =
-                       !ctx->blend->has_blend_shader &&
-                       (ctx->blend->equation.rgb_mode == 0x122) &&
-                       (ctx->blend->equation.alpha_mode == 0x122) &&
-                       (ctx->blend->equation.color_mask == 0xf);
+                       !blend.is_shader &&
+                       (blend.equation.equation->rgb_mode == 0x122) &&
+                       (blend.equation.equation->alpha_mode == 0x122) &&
+                       (blend.equation.equation->color_mask == 0xf);
 
                 /* Even on MFBD, the shader descriptor gets blend shaders. It's
                  * *also* copied to the blend_meta appended (by convention),
                  * but this is the field actually read by the hardware. (Or
                  * maybe both are read...?) */
 
-                if (ctx->blend->has_blend_shader) {
-                        ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader;
+                if (blend.is_shader) {
+                        ctx->fragment_shader_core.blend.shader =
+                                blend.shader.gpu;
                 } else {
                         ctx->fragment_shader_core.blend.shader = 0;
                 }
@@ -1156,9 +1140,11 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                          * additionally need to signal CAN_DISCARD for nontrivial blend
                          * modes (so we're able to read back the destination buffer) */
 
-                        if (!ctx->blend->has_blend_shader) {
-                                ctx->fragment_shader_core.blend.equation = ctx->blend->equation;
-                                ctx->fragment_shader_core.blend.constant = ctx->blend->constant;
+                        if (!blend.is_shader) {
+                                ctx->fragment_shader_core.blend.equation =
+                                        *blend.equation.equation;
+                                ctx->fragment_shader_core.blend.constant =
+                                        blend.equation.constant;
                         }
 
                         if (!no_blending) {
@@ -1177,13 +1163,13 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
 
                         unsigned blend_count = 0x200;
 
-                        if (ctx->blend->has_blend_shader) {
+                        if (blend.is_shader) {
                                 /* For a blend shader, the bottom nibble corresponds to
                                  * the number of work registers used, which signals the
                                  * -existence- of a blend shader */
 
-                                assert(ctx->blend->blend_work_count >= 2);
-                                blend_count |= MIN2(ctx->blend->blend_work_count, 3);
+                                assert(blend.shader.work_count >= 2);
+                                blend_count |= MIN2(blend.shader.work_count, 3);
                         } else {
                                 /* Otherwise, the bottom bit simply specifies if
                                  * blending (anything other than REPLACE) is enabled */
@@ -1213,13 +1199,13 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                                  * native Midgard ops for helping here, but
                                  * they're not well-understood yet. */
 
-                                assert(!(is_srgb && ctx->blend->has_blend_shader));
+                                assert(!(is_srgb && blend.is_shader));
 
-                                if (ctx->blend->has_blend_shader) {
-                                        rts[i].blend.shader = ctx->blend->blend_shader;
+                                if (blend.is_shader) {
+                                        rts[i].blend.shader = blend.shader.gpu;
                                 } else {
-                                        rts[i].blend.equation = ctx->blend->equation;
-                                        rts[i].blend.constant = ctx->blend->constant;
+                                        rts[i].blend.equation = *blend.equation.equation;
+                                        rts[i].blend.constant = blend.equation.constant;
                                 }
                         }
 
@@ -1314,7 +1300,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                         mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, buf, ubo);
 
                         unsigned bytes_per_field = 16;
-                        unsigned aligned = ALIGN(sz, bytes_per_field);
+                        unsigned aligned = ALIGN_POT(sz, bytes_per_field);
                         unsigned fields = aligned / bytes_per_field;
 
                         ubos[ubo].size = MALI_POSITIVE(fields);
@@ -1340,10 +1326,10 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                  * should work, but in practice causes issues when we're not
                  * explicitly trying to scissor */
 
-                .clip_minx = -inff,
-                .clip_miny = -inff,
-                .clip_maxx = inff,
-                .clip_maxy = inff,
+                .clip_minx = -INFINITY,
+                .clip_miny = -INFINITY,
+                .clip_maxx = INFINITY,
+                .clip_maxy = INFINITY,
 
                 .clip_minz = 0.0,
                 .clip_maxz = 1.0,
@@ -1481,7 +1467,7 @@ panfrost_draw_wallpaper(struct pipe_context *pipe)
 {
        struct panfrost_context *ctx = pan_context(pipe);
 
-       /* Nothing to reload? */
+       /* Nothing to reload? TODO: MRT wallpapers */
        if (ctx->pipe_framebuffer.cbufs[0] == NULL)
                return;
 
@@ -1491,7 +1477,7 @@ panfrost_draw_wallpaper(struct pipe_context *pipe)
         struct panfrost_resource *rsrc = pan_resource(surf->texture);
         unsigned level = surf->u.tex.level;
 
-        if (!rsrc->bo->slices[level].initialized)
+        if (!rsrc->slices[level].initialized)
                 return;
 
         /* Save the batch */
@@ -1580,9 +1566,11 @@ panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe
         struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
 
         off_t offset = info->start * info->index_size;
+        struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
 
         if (!info->has_user_indices) {
                 /* Only resources can be directly mapped */
+                panfrost_job_add_bo(batch, rsrc->bo);
                 return rsrc->bo->gpu + offset;
         } else {
                 /* Otherwise, we need to upload to transient memory */
@@ -1657,6 +1645,7 @@ panfrost_draw_vbo(
         ctx->payload_tiler.prefix.draw_mode = g2m_draw_mode(mode);
 
         ctx->vertex_count = info->count;
+        ctx->instance_count = info->instance_count;
 
         /* For non-indexed draws, they're the same */
         unsigned vertex_count = ctx->vertex_count;
@@ -1673,9 +1662,20 @@ panfrost_draw_vbo(
 
         /* For higher amounts of vertices (greater than what fits in a 16-bit
          * short), the other value is needed, otherwise there will be bizarre
-         * rendering artefacts. It's not clear what these values mean yet. */
+         * rendering artefacts. It's not clear what these values mean yet. This
+         * change is also needed for instancing and sometimes points (perhaps
+         * related to dynamically setting gl_PointSize) */
+
+        bool is_points = mode == PIPE_PRIM_POINTS;
+        bool many_verts = ctx->vertex_count > 0xFFFF;
+        bool instanced = ctx->instance_count > 1;
 
-        draw_flags |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
+        draw_flags |= (is_points || many_verts || instanced) ? 0x3000 : 0x18000;
+
+        /* This doesn't make much sense */
+        if (mode == PIPE_PRIM_LINE_STRIP) {
+                draw_flags |= 0x800;
+        }
 
         if (info->index_size) {
                 /* Calculate the min/max index used so we can figure out how
@@ -1721,11 +1721,42 @@ panfrost_draw_vbo(
         panfrost_pack_work_groups_fused(
                         &ctx->payload_vertex.prefix,
                         &ctx->payload_tiler.prefix,
-                        1, vertex_count, 1,
+                        1, vertex_count, info->instance_count,
                         1, 1, 1);
 
         ctx->payload_tiler.prefix.unknown_draw = draw_flags;
 
+        /* Encode the padded vertex count */
+
+        if (info->instance_count > 1) {
+                /* Triangles have non-even vertex counts so they change how
+                 * padding works internally */
+
+                bool is_triangle =
+                        mode == PIPE_PRIM_TRIANGLES ||
+                        mode == PIPE_PRIM_TRIANGLE_STRIP ||
+                        mode == PIPE_PRIM_TRIANGLE_FAN;
+
+                struct pan_shift_odd so =
+                        panfrost_padded_vertex_count(vertex_count, !is_triangle);
+
+                ctx->payload_vertex.instance_shift = so.shift;
+                ctx->payload_tiler.instance_shift = so.shift;
+
+                ctx->payload_vertex.instance_odd = so.odd;
+                ctx->payload_tiler.instance_odd = so.odd;
+
+                ctx->padded_count = pan_expand_shift_odd(so);
+        } else {
+                ctx->padded_count = ctx->vertex_count;
+
+                /* Reset instancing state */
+                ctx->payload_vertex.instance_shift = 0;
+                ctx->payload_vertex.instance_odd = 0;
+                ctx->payload_tiler.instance_shift = 0;
+                ctx->payload_tiler.instance_odd = 0;
+        }
+
         /* Fire off the draw itself */
         panfrost_queue_draw(ctx);
 }
@@ -1807,7 +1838,7 @@ panfrost_create_vertex_elements_state(
         panfrost_allocate_chunk(pan_context(pctx), 0, HEAP_DESCRIPTOR);
 
         for (int i = 0; i < num_elements; ++i) {
-                so->hw[i].index = elements[i].vertex_buffer_index;
+                so->hw[i].index = i;
 
                 enum pipe_format fmt = elements[i].src_format;
                 const struct util_format_description *desc = util_format_description(fmt);
@@ -2180,7 +2211,7 @@ panfrost_create_sampler_view(
 
         unsigned usage2_layout = 0x10;
 
-        switch (prsrc->bo->layout) {
+        switch (prsrc->layout) {
                 case PAN_AFBC:
                         usage2_layout |= 0x8 | 0x4;
                         break;
@@ -2203,9 +2234,9 @@ panfrost_create_sampler_view(
         unsigned first_level = template->u.tex.first_level;
         unsigned last_level = template->u.tex.last_level;
 
-        if (prsrc->bo->layout == PAN_LINEAR) {
+        if (prsrc->layout == PAN_LINEAR) {
                 for (unsigned l = first_level; l <= last_level; ++l) {
-                        unsigned actual_stride = prsrc->bo->slices[l].stride;
+                        unsigned actual_stride = prsrc->slices[l].stride;
                         unsigned width = u_minify(texture->width0, l);
                         unsigned comp_stride = width * bytes_per_pixel;
 
@@ -2310,23 +2341,27 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx,
         ctx->pipe_framebuffer.width = fb->width;
         ctx->pipe_framebuffer.height = fb->height;
 
+        struct pipe_surface *zb = fb->zsbuf;
+        bool needs_reattach = false;
+
         for (int i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
                 struct pipe_surface *cb = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
 
                 /* check if changing cbuf */
                 if (ctx->pipe_framebuffer.cbufs[i] == cb) continue;
 
-                if (cb && (i != 0)) {
-                        DBG("XXX: Multiple render targets not supported before t7xx!\n");
-                        assert(0);
-                }
-
                 /* assign new */
                 pipe_surface_reference(&ctx->pipe_framebuffer.cbufs[i], cb);
 
-                if (!cb)
-                        continue;
+                needs_reattach |= (cb != NULL);
+        }
+
+        if (ctx->pipe_framebuffer.zsbuf != zb) {
+                pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
+                needs_reattach |= (zb != NULL);
+        }
 
+        if (needs_reattach) {
                 if (ctx->require_sfbd)
                         ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
                 else
@@ -2334,100 +2369,6 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx,
 
                 panfrost_attach_vt_framebuffer(ctx);
         }
-
-        {
-                struct pipe_surface *zb = fb->zsbuf;
-
-                if (ctx->pipe_framebuffer.zsbuf != zb) {
-                        pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
-
-                        if (zb) {
-                                if (ctx->require_sfbd)
-                                        ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
-                                else
-                                        ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
-
-                                panfrost_attach_vt_framebuffer(ctx);
-                        }
-                }
-        }
-}
-
-static void *
-panfrost_create_blend_state(struct pipe_context *pipe,
-                            const struct pipe_blend_state *blend)
-{
-        struct panfrost_context *ctx = pan_context(pipe);
-        struct panfrost_blend_state *so = rzalloc(ctx, struct panfrost_blend_state);
-        so->base = *blend;
-
-        /* TODO: The following features are not yet implemented */
-        assert(!blend->logicop_enable);
-        assert(!blend->alpha_to_coverage);
-        assert(!blend->alpha_to_one);
-
-        /* Compile the blend state, first as fixed-function if we can */
-
-        if (panfrost_make_fixed_blend_mode(&blend->rt[0], so, blend->rt[0].colormask, &ctx->blend_color))
-                return so;
-
-        /* If we can't, compile a blend shader instead */
-
-        panfrost_make_blend_shader(ctx, so, &ctx->blend_color);
-
-        return so;
-}
-
-static void
-panfrost_bind_blend_state(struct pipe_context *pipe,
-                          void *cso)
-{
-        struct panfrost_context *ctx = pan_context(pipe);
-        struct pipe_blend_state *blend = (struct pipe_blend_state *) cso;
-        struct panfrost_blend_state *pblend = (struct panfrost_blend_state *) cso;
-        ctx->blend = pblend;
-
-        if (!blend)
-                return;
-
-        SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_DITHER, !blend->dither);
-
-        /* TODO: Attach color */
-
-        /* Shader itself is not dirty, but the shader core is */
-        ctx->dirty |= PAN_DIRTY_FS;
-}
-
-static void
-panfrost_delete_blend_state(struct pipe_context *pipe,
-                            void *blend)
-{
-        struct panfrost_blend_state *so = (struct panfrost_blend_state *) blend;
-
-        if (so->has_blend_shader) {
-                DBG("Deleting blend state leak blend shaders bytecode\n");
-        }
-
-        ralloc_free(blend);
-}
-
-static void
-panfrost_set_blend_color(struct pipe_context *pipe,
-                         const struct pipe_blend_color *blend_color)
-{
-        struct panfrost_context *ctx = pan_context(pipe);
-
-        /* If blend_color is we're unbinding, so ctx->blend_color is now undefined -> nothing to do */
-
-        if (blend_color) {
-                ctx->blend_color = *blend_color;
-
-                /* The blend mode depends on the blend constant color, due to the
-                 * fixed/programmable split. So, we're forced to regenerate the blend
-                 * equation */
-
-                /* TODO: Attach color */
-        }
 }
 
 static void *
@@ -2780,12 +2721,6 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
         gallium->delete_sampler_state = panfrost_generic_cso_delete;
         gallium->bind_sampler_states = panfrost_bind_sampler_states;
 
-        gallium->create_blend_state = panfrost_create_blend_state;
-        gallium->bind_blend_state   = panfrost_bind_blend_state;
-        gallium->delete_blend_state = panfrost_delete_blend_state;
-
-        gallium->set_blend_color = panfrost_set_blend_color;
-
         gallium->create_depth_stencil_alpha_state = panfrost_create_depth_stencil_state;
         gallium->bind_depth_stencil_alpha_state   = panfrost_bind_depth_stencil_state;
         gallium->delete_depth_stencil_alpha_state = panfrost_delete_depth_stencil_state;
@@ -2809,6 +2744,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
         gallium->set_stream_output_targets = panfrost_set_stream_output_targets;
 
         panfrost_resource_context_init(gallium);
+        panfrost_blend_context_init(gallium);
 
         panfrost_drm_init_context(ctx);