panfrost: DRY between shader stage setup
[mesa.git] / src / gallium / drivers / panfrost / pan_context.c
index d242c3f90ceab9b57825bc7d7a92460166ad9f81..de6dd38c5566b8d77ed38bc22e952f0dcb172e13 100644 (file)
@@ -26,7 +26,6 @@
 #include <errno.h>
 
 #include "pan_context.h"
-#include "pan_swizzle.h"
 #include "pan_format.h"
 
 #include "util/macros.h"
 #include "util/u_inlines.h"
 #include "util/u_upload_mgr.h"
 #include "util/u_memory.h"
+#include "util/u_vbuf.h"
 #include "util/half_float.h"
+#include "util/u_helpers.h"
+#include "util/u_format.h"
 #include "indices/u_primconvert.h"
 #include "tgsi/tgsi_parse.h"
+#include "util/u_math.h"
 
 #include "pan_screen.h"
 #include "pan_blending.h"
 #include "pan_blend_shaders.h"
 #include "pan_util.h"
-#include "pan_wallpaper.h"
-
-static int performance_counter_number = 0;
-extern const char *pan_counters_base;
+#include "pan_tiler.h"
 
 /* Do not actually send anything to the GPU; merely generate the cmdstream as fast as possible. Disables framebuffer writes */
 //#define DRY_RUN
 
-/* AFBC is enabled on a per-resource basis (AFBC enabling is theoretically
- * indepdent between color buffers and depth/stencil). To enable, we allocate
- * the AFBC metadata buffer and mark that it is enabled. We do -not- actually
- * edit the fragment job here. This routine should be called ONCE per
- * AFBC-compressed buffer, rather than on every frame. */
-
-static void
-panfrost_enable_afbc(struct panfrost_context *ctx, struct panfrost_resource *rsrc, bool ds)
+static enum mali_job_type
+panfrost_job_type_for_pipe(enum pipe_shader_type type)
 {
-        if (ctx->require_sfbd) {
-                DBG("AFBC not supported yet on SFBD\n");
-                assert(0);
-        }
+        switch (type) {
+                case PIPE_SHADER_VERTEX:
+                        return JOB_TYPE_VERTEX;
 
-        struct pipe_context *gallium = (struct pipe_context *) ctx;
-        struct panfrost_screen *screen = pan_screen(gallium->screen);
-       /* AFBC metadata is 16 bytes per tile */
-        int tile_w = (rsrc->base.width0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
-        int tile_h = (rsrc->base.height0 + (MALI_TILE_LENGTH - 1)) >> MALI_TILE_SHIFT;
-        int bytes_per_pixel = util_format_get_blocksize(rsrc->base.format);
-        int stride = bytes_per_pixel * ALIGN(rsrc->base.width0, 16);
-
-        stride *= 2;  /* TODO: Should this be carried over? */
-        int main_size = stride * rsrc->base.height0;
-        rsrc->bo->afbc_metadata_size = tile_w * tile_h * 16;
+                case PIPE_SHADER_FRAGMENT:
+                        /* Note: JOB_TYPE_FRAGMENT is different.
+                         * JOB_TYPE_FRAGMENT actually executes the
+                         * fragment shader, but JOB_TYPE_TILER is how you
+                         * specify it*/
+                        return JOB_TYPE_TILER;
 
-        /* Allocate the AFBC slab itself, large enough to hold the above */
-        screen->driver->allocate_slab(screen, &rsrc->bo->afbc_slab,
-                               (rsrc->bo->afbc_metadata_size + main_size + 4095) / 4096,
-                               true, 0, 0, 0);
+                case PIPE_SHADER_GEOMETRY:
+                        return JOB_TYPE_GEOMETRY;
 
-        rsrc->bo->layout = PAN_AFBC;
+                case PIPE_SHADER_COMPUTE:
+                        return JOB_TYPE_COMPUTE;
 
-        /* Compressed textured reads use a tagged pointer to the metadata */
-
-        rsrc->bo->gpu = rsrc->bo->afbc_slab.gpu | (ds ? 0 : 1);
-        rsrc->bo->cpu = rsrc->bo->afbc_slab.cpu;
+                default:
+                        unreachable("Unsupported shader stage");
+        }
 }
 
 static void
@@ -100,7 +86,7 @@ panfrost_enable_checksum(struct panfrost_context *ctx, struct panfrost_resource
         /* 8 byte checksum per tile */
         rsrc->bo->checksum_stride = tile_w * 8;
         int pages = (((rsrc->bo->checksum_stride * tile_h) + 4095) / 4096);
-        screen->driver->allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0);
+        panfrost_drm_allocate_slab(screen, &rsrc->bo->checksum_slab, pages, false, 0, 0, 0);
 
         rsrc->bo->has_checksum = true;
 }
@@ -118,20 +104,21 @@ panfrost_set_framebuffer_resolution(struct mali_single_framebuffer *fb, int w, i
          * The formula itself was discovered mostly by manual bruteforce and
          * aggressive algebraic simplification. */
 
-        fb->resolution_check = ((w + h) / 3) << 4;
+        fb->tiler_resolution_check = ((w + h) / 3) << 4;
 }
 
 struct mali_single_framebuffer
-panfrost_emit_sfbd(struct panfrost_context *ctx)
+panfrost_emit_sfbd(struct panfrost_context *ctx, unsigned vertex_count)
 {
         struct mali_single_framebuffer framebuffer = {
                 .unknown2 = 0x1f,
                 .format = 0x30000000,
                 .clear_flags = 0x1000,
                 .unknown_address_0 = ctx->scratchpad.gpu,
-                .unknown_address_1 = ctx->misc_0.gpu,
-                .unknown_address_2 = ctx->misc_0.gpu + 40960,
-                .tiler_flags = 0xf0,
+                .tiler_polygon_list = ctx->tiler_polygon_list.gpu,
+                .tiler_polygon_list_body = ctx->tiler_polygon_list.gpu + 40960,
+                .tiler_hierarchy_mask = 0xF0,
+                .tiler_flags = 0x0,
                 .tiler_heap_free = ctx->tiler_heap.gpu,
                 .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
         };
@@ -142,36 +129,16 @@ panfrost_emit_sfbd(struct panfrost_context *ctx)
 }
 
 struct bifrost_framebuffer
-panfrost_emit_mfbd(struct panfrost_context *ctx)
+panfrost_emit_mfbd(struct panfrost_context *ctx, unsigned vertex_count)
 {
+        unsigned width = ctx->pipe_framebuffer.width;
+        unsigned height = ctx->pipe_framebuffer.height;
+
         struct bifrost_framebuffer framebuffer = {
-                /* It is not yet clear what tiler_meta means or how it's
-                 * calculated, but we can tell the lower 32-bits are a
-                 * (monotonically increasing?) function of tile count and
-                 * geometry complexity; I suspect it defines a memory size of
-                 * some kind? for the tiler. It's really unclear at the
-                 * moment... but to add to the confusion, the hardware is happy
-                 * enough to accept a zero in this field, so we don't even have
-                 * to worry about it right now.
-                 *
-                 * The byte (just after the 32-bit mark) is much more
-                 * interesting. The higher nibble I've only ever seen as 0xF,
-                 * but the lower one I've seen as 0x0 or 0xF, and it's not
-                 * obvious what the difference is. But what -is- obvious is
-                 * that when the lower nibble is zero, performance is severely
-                 * degraded compared to when the lower nibble is set.
-                 * Evidently, that nibble enables some sort of fast path,
-                 * perhaps relating to caching or tile flush? Regardless, at
-                 * this point there's no clear reason not to set it, aside from
-                 * substantially increased memory requirements (of the misc_0
-                 * buffer) */
-
-                .tiler_meta = ((uint64_t) 0xff << 32) | 0x0,
-
-                .width1 = MALI_POSITIVE(ctx->pipe_framebuffer.width),
-                .height1 = MALI_POSITIVE(ctx->pipe_framebuffer.height),
-                .width2 = MALI_POSITIVE(ctx->pipe_framebuffer.width),
-                .height2 = MALI_POSITIVE(ctx->pipe_framebuffer.height),
+                .width1 = MALI_POSITIVE(width),
+                .height1 = MALI_POSITIVE(height),
+                .width2 = MALI_POSITIVE(width),
+                .height2 = MALI_POSITIVE(height),
 
                 .unk1 = 0x1080,
 
@@ -181,27 +148,53 @@ panfrost_emit_mfbd(struct panfrost_context *ctx)
 
                 .unknown2 = 0x1f,
 
-                /* Corresponds to unknown_address_X of SFBD */
                 .scratchpad = ctx->scratchpad.gpu,
-                .tiler_scratch_start  = ctx->misc_0.gpu,
-
-                /* The constant added here is, like the lower word of
-                 * tiler_meta, (loosely) another product of framebuffer size
-                 * and geometry complexity. It must be sufficiently large for
-                 * the tiler_meta fast path to work; if it's too small, there
-                 * will be DATA_INVALID_FAULTs. Conversely, it must be less
-                 * than the total size of misc_0, or else there's no room. It's
-                 * possible this constant configures a partition between two
-                 * parts of misc_0? We haven't investigated the functionality,
-                 * as these buffers are internally used by the hardware
-                 * (presumably by the tiler) but not seemingly touched by the driver
-                 */
+        };
+
+        framebuffer.tiler_hierarchy_mask =
+                panfrost_choose_hierarchy_mask(width, height, vertex_count);
+
+        /* Compute the polygon header size and use that to offset the body */
+
+        unsigned header_size = panfrost_tiler_header_size(
+                        width, height, framebuffer.tiler_hierarchy_mask);
+
+        unsigned body_size = panfrost_tiler_body_size(
+                        width, height, framebuffer.tiler_hierarchy_mask);
+
+        /* Sanity check */
+
+        unsigned total_size = header_size + body_size;
+
+        if (framebuffer.tiler_hierarchy_mask) {
+               assert(ctx->tiler_polygon_list.size >= total_size);
+
+                /* Specify allocated tiler structures */
+                framebuffer.tiler_polygon_list = ctx->tiler_polygon_list.gpu;
+
+                /* Allow the entire tiler heap */
+                framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
+                framebuffer.tiler_heap_end =
+                        ctx->tiler_heap.gpu + ctx->tiler_heap.size;
+        } else {
+                /* The tiler is disabled, so don't allow the tiler heap */
+                framebuffer.tiler_heap_start = ctx->tiler_heap.gpu;
+                framebuffer.tiler_heap_end = framebuffer.tiler_heap_start;
+
+                /* Use a dummy polygon list */
+                framebuffer.tiler_polygon_list = ctx->tiler_dummy.gpu;
+
+                /* Also, set a "tiler disabled?" flag? */
+                framebuffer.tiler_hierarchy_mask |= 0x1000;
+        }
+
+        framebuffer.tiler_polygon_list_body =
+                framebuffer.tiler_polygon_list + header_size;
+
+        framebuffer.tiler_polygon_list_size =
+                header_size + body_size;
 
-                .tiler_scratch_middle = ctx->misc_0.gpu + 0xf0000,
 
-                .tiler_heap_start = ctx->tiler_heap.gpu,
-                .tiler_heap_end = ctx->tiler_heap.gpu + ctx->tiler_heap.size,
-        };
 
         return framebuffer;
 }
@@ -224,13 +217,6 @@ panfrost_is_scanout(struct panfrost_context *ctx)
                ctx->pipe_framebuffer.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
 }
 
-/* Maps float 0.0-1.0 to int 0x00-0xFF */
-static uint8_t
-normalised_float_to_u8(float f)
-{
-        return (uint8_t) (int) (f * 255.0f);
-}
-
 static void
 panfrost_clear(
         struct pipe_context *pipe,
@@ -241,57 +227,13 @@ panfrost_clear(
         struct panfrost_context *ctx = pan_context(pipe);
         struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
 
-        if (buffers & PIPE_CLEAR_COLOR) {
-                /* Alpha clear only meaningful without alpha channel, TODO less ad hoc */
-                bool has_alpha = util_format_has_alpha(ctx->pipe_framebuffer.cbufs[0]->format);
-                float clear_alpha = has_alpha ? color->f[3] : 1.0f;
-
-                uint32_t packed_color =
-                        (normalised_float_to_u8(clear_alpha) << 24) |
-                        (normalised_float_to_u8(color->f[2]) << 16) |
-                        (normalised_float_to_u8(color->f[1]) <<  8) |
-                        (normalised_float_to_u8(color->f[0]) <<  0);
-
-                job->clear_color = packed_color;
-
-        }
-
-        if (buffers & PIPE_CLEAR_DEPTH) {
-                job->clear_depth = depth;
-        }
-
-        if (buffers & PIPE_CLEAR_STENCIL) {
-                job->clear_stencil = stencil;
-        }
-
-        job->clear |= buffers;
+        panfrost_job_clear(ctx, job, buffers, color, depth, stencil);
 }
 
 static mali_ptr
 panfrost_attach_vt_mfbd(struct panfrost_context *ctx)
 {
-        /* MFBD needs a sequential semi-render target upload, but what exactly this is, is beyond me for now */
-        struct bifrost_render_target rts_list[] = {
-                {
-                        .chunknown = {
-                                .unk = 0x30005,
-                        },
-                        .framebuffer = ctx->misc_0.gpu,
-                        .zero2 = 0x3,
-                },
-        };
-
-        /* Allocate memory for the three components */
-        int size = 1024 + sizeof(ctx->vt_framebuffer_mfbd) + sizeof(rts_list);
-        struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
-
-        /* Opaque 1024-block */
-        rts_list[0].chunknown.pointer = transfer.gpu;
-
-        memcpy(transfer.cpu + 1024, &ctx->vt_framebuffer_mfbd, sizeof(ctx->vt_framebuffer_mfbd));
-        memcpy(transfer.cpu + 1024 + sizeof(ctx->vt_framebuffer_mfbd), rts_list, sizeof(rts_list));
-
-        return (transfer.gpu + 1024) | MALI_MFBD;
+        return panfrost_upload_transient(ctx, &ctx->vt_framebuffer_mfbd, sizeof(ctx->vt_framebuffer_mfbd)) | MALI_MFBD;
 }
 
 static mali_ptr
@@ -325,9 +267,9 @@ panfrost_invalidate_frame(struct panfrost_context *ctx)
                 ctx->cmdstream_i = 0;
 
         if (ctx->require_sfbd)
-                ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
+                ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
         else
-                ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
+                ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
 
         /* Reset varyings allocated */
         ctx->varying_height = 0;
@@ -400,8 +342,7 @@ translate_tex_wrap(enum pipe_tex_wrap w)
                 return MALI_WRAP_MIRRORED_REPEAT;
 
         default:
-                assert(0);
-                return 0;
+                unreachable("Invalid wrap");
         }
 }
 
@@ -416,8 +357,7 @@ translate_tex_filter(enum pipe_tex_filter f)
                 return MALI_LINEAR;
 
         default:
-                assert(0);
-                return 0;
+                unreachable("Invalid filter");
         }
 }
 
@@ -454,10 +394,10 @@ panfrost_translate_compare_func(enum pipe_compare_func in)
 
         case PIPE_FUNC_ALWAYS:
                 return MALI_FUNC_ALWAYS;
-        }
 
-        assert (0);
-        return 0; /* Unreachable */
+        default:
+                unreachable("Invalid func");
+        }
 }
 
 static unsigned
@@ -487,10 +427,10 @@ panfrost_translate_alt_compare_func(enum pipe_compare_func in)
 
         case PIPE_FUNC_ALWAYS:
                 return MALI_ALT_FUNC_ALWAYS;
-        }
 
-        assert (0);
-        return 0; /* Unreachable */
+        default:
+                unreachable("Invalid alt func");
+        }
 }
 
 static unsigned
@@ -520,10 +460,10 @@ panfrost_translate_stencil_op(enum pipe_stencil_op in)
 
         case PIPE_STENCIL_OP_INVERT:
                 return MALI_STENCIL_INVERT;
-        }
 
-        assert (0);
-        return 0; /* Unreachable */
+        default:
+                unreachable("Invalid stencil op");
+        }
 }
 
 static void
@@ -581,32 +521,15 @@ panfrost_default_shader_backend(struct panfrost_context *ctx)
  * vertex jobs. */
 
 struct panfrost_transfer
-panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler, bool is_elided_tiler)
+panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler)
 {
-        /* Each draw call corresponds to two jobs, and we want to offset to leave room for the set-value job */
-        int draw_job_index = 1 + (2 * ctx->draw_count);
-
         struct mali_job_descriptor_header job = {
                 .job_type = is_tiler ? JOB_TYPE_TILER : JOB_TYPE_VERTEX,
-                .job_index = draw_job_index + (is_tiler ? 1 : 0),
 #ifdef __LP64__
                 .job_descriptor_size = 1,
 #endif
         };
 
-        /* Only non-elided tiler jobs have dependencies which are known at this point */
-
-        if (is_tiler && !is_elided_tiler) {
-                /* Tiler jobs depend on vertex jobs */
-
-                job.job_dependency_index_1 = draw_job_index;
-
-                /* Tiler jobs also depend on the previous tiler job */
-
-                if (ctx->draw_count)
-                        job.job_dependency_index_2 = draw_job_index - 1;
-        }
-
         struct midgard_payload_vertex_tiler *payload = is_tiler ? &ctx->payload_tiler : &ctx->payload_vertex;
 
         /* There's some padding hacks on 32-bit */
@@ -617,36 +540,12 @@ panfrost_vertex_tiler_job(struct panfrost_context *ctx, bool is_tiler, bool is_e
         int offset = 4;
 #endif
         struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(*payload));
+
         memcpy(transfer.cpu, &job, sizeof(job));
         memcpy(transfer.cpu + sizeof(job) - offset, payload, sizeof(*payload));
         return transfer;
 }
 
-/* Generates a set value job. It's unclear what exactly this does, why it's
- * necessary, and when to call it. */
-
-static void
-panfrost_set_value_job(struct panfrost_context *ctx)
-{
-        struct mali_job_descriptor_header job = {
-                .job_type = JOB_TYPE_SET_VALUE,
-                .job_descriptor_size = 1,
-                .job_index = 1 + (2 * ctx->draw_count),
-        };
-
-        struct mali_payload_set_value payload = {
-                .out = ctx->misc_0.gpu,
-                .unknown = 0x3,
-        };
-
-        struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(payload));
-        memcpy(transfer.cpu, &job, sizeof(job));
-        memcpy(transfer.cpu + sizeof(job), &payload, sizeof(payload));
-        
-        ctx->u_set_value_job = (struct mali_job_descriptor_header *) transfer.cpu;
-        ctx->set_value_job = transfer.gpu;
-}
-
 static mali_ptr
 panfrost_emit_varyings(
                 struct panfrost_context *ctx,
@@ -683,6 +582,7 @@ panfrost_emit_varying_descriptor(
 
         struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
         struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
+        unsigned int num_gen_varyings = 0;
 
         /* Allocate the varying descriptor */
 
@@ -692,6 +592,65 @@ panfrost_emit_varying_descriptor(
         struct panfrost_transfer trans = panfrost_allocate_transient(ctx,
                         vs_size + fs_size);
 
+        /*
+         * Assign ->src_offset now that we know about all the general purpose
+         * varyings that will be used by the fragment and vertex shaders.
+         */
+        for (unsigned i = 0; i < vs->tripipe->varying_count; i++) {
+                /*
+                 * General purpose varyings have ->index set to 0, skip other
+                 * entries.
+                 */
+                if (vs->varyings[i].index)
+                        continue;
+
+                vs->varyings[i].src_offset = 16 * (num_gen_varyings++);
+        }
+
+        for (unsigned i = 0; i < fs->tripipe->varying_count; i++) {
+                unsigned j;
+
+                /* If we have a point sprite replacement, handle that here. We
+                 * have to translate location first.  TODO: Flip y in shader.
+                 * We're already keying ... just time crunch .. */
+
+                unsigned loc = fs->varyings_loc[i];
+                unsigned pnt_loc =
+                        (loc >= VARYING_SLOT_VAR0) ? (loc - VARYING_SLOT_VAR0) :
+                        (loc == VARYING_SLOT_PNTC) ? 8 :
+                        ~0;
+
+                if (~pnt_loc && fs->point_sprite_mask & (1 << pnt_loc)) {
+                        /* gl_PointCoord index by convention */
+                        fs->varyings[i].index = 3;
+                        fs->reads_point_coord = true;
+
+                        /* Swizzle out the z/w to 0/1 */
+                        fs->varyings[i].format = MALI_RG16F;
+                        fs->varyings[i].swizzle =
+                                panfrost_get_default_swizzle(2);
+
+                        continue;
+                }
+
+                if (fs->varyings[i].index)
+                        continue;
+
+                /*
+                 * Re-use the VS general purpose varying pos if it exists,
+                 * create a new one otherwise.
+                 */
+                for (j = 0; j < vs->tripipe->varying_count; j++) {
+                        if (fs->varyings_loc[i] == vs->varyings_loc[j])
+                                break;
+                }
+
+                if (j < vs->tripipe->varying_count)
+                        fs->varyings[i].src_offset = vs->varyings[j].src_offset;
+                else
+                        fs->varyings[i].src_offset = 16 * (num_gen_varyings++);
+        }
+
         memcpy(trans.cpu, vs->varyings, vs_size);
         memcpy(trans.cpu + vs_size, fs->varyings, fs_size);
 
@@ -702,11 +661,8 @@ panfrost_emit_varying_descriptor(
         union mali_attr varyings[PIPE_MAX_ATTRIBS];
         unsigned idx = 0;
 
-        /* General varyings -- use the VS's, since those are more likely to be
-         * accurate on desktop */
-
-        panfrost_emit_varyings(ctx, &varyings[idx++],
-                        vs->general_varying_stride, invocation_count);
+        panfrost_emit_varyings(ctx, &varyings[idx++], num_gen_varyings * 16,
+                               invocation_count);
 
         /* fp32 vec4 gl_Position */
         ctx->payload_tiler.postfix.position_varying =
@@ -731,59 +687,367 @@ panfrost_emit_varying_descriptor(
         ctx->payload_tiler.postfix.varyings = varyings_p;
 }
 
+static mali_ptr
+panfrost_vertex_buffer_address(struct panfrost_context *ctx, unsigned i)
+{
+        struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
+        struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
+
+        return rsrc->bo->gpu + buf->buffer_offset;
+}
+
 /* Emits attributes and varying descriptors, which should be called every draw,
  * excepting some obscure circumstances */
 
 static void
-panfrost_emit_vertex_data(struct panfrost_context *ctx)
+panfrost_emit_vertex_data(struct panfrost_context *ctx, struct panfrost_job *job)
 {
-        /* TODO: Only update the dirtied buffers */
+        /* Staged mali_attr, and index into them. i =/= k, depending on the
+         * vertex buffer mask */
         union mali_attr attrs[PIPE_MAX_ATTRIBS];
+        unsigned k = 0;
 
         unsigned invocation_count = MALI_NEGATIVE(ctx->payload_tiler.prefix.invocation_count);
 
-        for (int i = 0; i < ctx->vertex_buffer_count; ++i) {
+        for (int i = 0; i < ARRAY_SIZE(ctx->vertex_buffers); ++i) {
+                if (!(ctx->vb_mask & (1 << i))) continue;
+
                 struct pipe_vertex_buffer *buf = &ctx->vertex_buffers[i];
                 struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer.resource);
 
-                /* Let's figure out the layout of the attributes in memory so
-                 * we can be smart about size computation. The idea is to
-                 * figure out the maximum src_offset, which tells us the latest
-                 * spot a vertex could start. Meanwhile, we figure out the size
-                 * of the attribute memory (assuming interleaved
-                 * representation) and tack on the max src_offset for a
-                 * reasonably good upper bound on the size.
-                 *
-                 * Proving correctness is left as an exercise to the reader.
-                 */
+                if (!rsrc) continue;
 
-                unsigned max_src_offset = 0;
+                /* Align to 64 bytes by masking off the lower bits. This
+                 * will be adjusted back when we fixup the src_offset in
+                 * mali_attr_meta */
 
-                for (unsigned j = 0; j < ctx->vertex->num_elements; ++j) {
-                        if (ctx->vertex->pipe[j].vertex_buffer_index != i) continue;
-                        max_src_offset = MAX2(max_src_offset, ctx->vertex->pipe[j].src_offset);
-                }
+                mali_ptr addr = panfrost_vertex_buffer_address(ctx, i) & ~63;
 
                 /* Offset vertex count by draw_start to make sure we upload enough */
-                attrs[i].stride = buf->stride;
-                attrs[i].size = buf->stride * (ctx->payload_vertex.draw_start + invocation_count) + max_src_offset;
+                attrs[k].stride = buf->stride;
+                attrs[k].size = rsrc->base.width0;
 
-                /* Vertex elements are -already- GPU-visible, at
-                 * rsrc->gpu. However, attribute buffers must be 64 aligned. If
-                 * it is not, for now we have to duplicate the buffer. */
+                panfrost_job_add_bo(job, rsrc->bo);
+                attrs[k].elements = addr | MALI_ATTR_LINEAR;
 
-                mali_ptr effective_address = (rsrc->bo->gpu + buf->buffer_offset);
+                ++k;
+        }
 
-                if (effective_address & 0x3F) {
-                        attrs[i].elements = panfrost_upload_transient(ctx, rsrc->bo->cpu + buf->buffer_offset, attrs[i].size) | 1;
-                } else {
-                        attrs[i].elements = effective_address | 1;
+        ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, k * sizeof(union mali_attr));
+
+        panfrost_emit_varying_descriptor(ctx, invocation_count);
+}
+
+static bool
+panfrost_writes_point_size(struct panfrost_context *ctx)
+{
+        assert(ctx->vs);
+        struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
+
+        return vs->writes_point_size && ctx->payload_tiler.prefix.draw_mode == MALI_POINTS;
+}
+
+/* Stage the attribute descriptors so we can adjust src_offset
+ * to let BOs align nicely */
+
+static void
+panfrost_stage_attributes(struct panfrost_context *ctx)
+{
+        struct panfrost_vertex_state *so = ctx->vertex;
+
+        size_t sz = sizeof(struct mali_attr_meta) * so->num_elements;
+        struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sz);
+        struct mali_attr_meta *target = (struct mali_attr_meta *) transfer.cpu;
+
+        /* Copy as-is for the first pass */
+        memcpy(target, so->hw, sz);
+
+        /* Fixup offsets for the second pass. Recall that the hardware
+         * calculates attribute addresses as:
+         *
+         *      addr = base + (stride * vtx) + src_offset;
+         *
+         * However, on Mali, base must be aligned to 64-bytes, so we
+         * instead let:
+         *
+         *      base' = base & ~63 = base - (base & 63)
+         * 
+         * To compensate when using base' (see emit_vertex_data), we have
+         * to adjust src_offset by the masked off piece:
+         *
+         *      addr' = base' + (stride * vtx) + (src_offset + (base & 63))
+         *            = base - (base & 63) + (stride * vtx) + src_offset + (base & 63)
+         *            = base + (stride * vtx) + src_offset
+         *            = addr;
+         *
+         * QED.
+         */
+
+        for (unsigned i = 0; i < so->num_elements; ++i) {
+                unsigned vbi = so->pipe[i].vertex_buffer_index;
+                mali_ptr addr = panfrost_vertex_buffer_address(ctx, vbi);
+
+                /* Adjust by the masked off bits of the offset */
+                target[i].src_offset += (addr & 63);
+        }
+
+        ctx->payload_vertex.postfix.attribute_meta = transfer.gpu;
+}
+
+static void
+panfrost_upload_sampler_descriptors(struct panfrost_context *ctx)
+{
+        size_t desc_size = sizeof(struct mali_sampler_descriptor);
+
+        for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
+                mali_ptr upload = 0;
+
+                if (ctx->sampler_count[t] && ctx->sampler_view_count[t]) {
+                        size_t transfer_size = desc_size * ctx->sampler_count[t];
+
+                        struct panfrost_transfer transfer =
+                                panfrost_allocate_transient(ctx, transfer_size);
+
+                        struct mali_sampler_descriptor *desc =
+                                (struct mali_sampler_descriptor *) transfer.cpu;
+
+                        for (int i = 0; i < ctx->sampler_count[t]; ++i)
+                                desc[i] = ctx->samplers[t][i]->hw;
+
+                        upload = transfer.gpu;
                 }
+
+                if (t == PIPE_SHADER_FRAGMENT)
+                        ctx->payload_tiler.postfix.sampler_descriptor = upload;
+                else if (t == PIPE_SHADER_VERTEX)
+                        ctx->payload_vertex.postfix.sampler_descriptor = upload;
+                else
+                        assert(0);
         }
+}
 
-        ctx->payload_vertex.postfix.attributes = panfrost_upload_transient(ctx, attrs, ctx->vertex_buffer_count * sizeof(union mali_attr));
+/* Computes the address to a texture at a particular slice */
 
-        panfrost_emit_varying_descriptor(ctx, invocation_count);
+static mali_ptr
+panfrost_get_texture_address(
+                struct panfrost_resource *rsrc,
+                unsigned level, unsigned face)
+{
+        unsigned level_offset = rsrc->bo->slices[level].offset;
+        unsigned face_offset = face * rsrc->bo->cubemap_stride;
+
+        return rsrc->bo->gpu + level_offset + face_offset;
+
+}
+
+static mali_ptr
+panfrost_upload_tex(
+                struct panfrost_context *ctx,
+                struct panfrost_sampler_view *view)
+{
+        if (!view)
+                return (mali_ptr) NULL;
+
+        struct pipe_sampler_view *pview = &view->base;
+        struct panfrost_resource *rsrc = pan_resource(pview->texture);
+
+        /* Do we interleave an explicit stride with every element? */
+
+        bool has_manual_stride =
+                view->hw.format.usage2 & MALI_TEX_MANUAL_STRIDE;
+
+        /* For easy access */
+
+        assert(pview->target != PIPE_BUFFER);
+        unsigned first_level = pview->u.tex.first_level;
+        unsigned last_level = pview->u.tex.last_level;
+
+        /* Inject the addresses in, interleaving mip levels, cube faces, and
+         * strides in that order */
+
+        unsigned idx = 0;
+
+        for (unsigned l = first_level; l <= last_level; ++l) {
+                for (unsigned f = 0; f < pview->texture->array_size; ++f) {
+                        view->hw.payload[idx++] =
+                                panfrost_get_texture_address(rsrc, l, f);
+
+                        if (has_manual_stride) {
+                                view->hw.payload[idx++] =
+                                        rsrc->bo->slices[l].stride;
+                        }
+                }
+        }
+
+        return panfrost_upload_transient(ctx, &view->hw,
+                        sizeof(struct mali_texture_descriptor));
+}
+
+static void
+panfrost_upload_texture_descriptors(struct panfrost_context *ctx)
+{
+        for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
+                mali_ptr trampoline = 0;
+
+                if (ctx->sampler_view_count[t]) {
+                        uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+
+                        for (int i = 0; i < ctx->sampler_view_count[t]; ++i)
+                                trampolines[i] =
+                                        panfrost_upload_tex(ctx, ctx->sampler_views[t][i]);
+
+                        trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
+                }
+
+                if (t == PIPE_SHADER_FRAGMENT)
+                        ctx->payload_tiler.postfix.texture_trampoline = trampoline;
+                else if (t == PIPE_SHADER_VERTEX)
+                        ctx->payload_vertex.postfix.texture_trampoline = trampoline;
+                else
+                        assert(0);
+        }
+}
+
+struct sysval_uniform {
+        union {
+                float f[4];
+                int32_t i[4];
+                uint32_t u[4];
+        };
+};
+
+static void panfrost_upload_viewport_scale_sysval(struct panfrost_context *ctx,
+                                                  struct sysval_uniform *uniform)
+{
+        const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
+
+        uniform->f[0] = vp->scale[0];
+        uniform->f[1] = vp->scale[1];
+        uniform->f[2] = vp->scale[2];
+}
+
+static void panfrost_upload_viewport_offset_sysval(struct panfrost_context *ctx,
+                                                   struct sysval_uniform *uniform)
+{
+        const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
+
+        uniform->f[0] = vp->translate[0];
+        uniform->f[1] = vp->translate[1];
+        uniform->f[2] = vp->translate[2];
+}
+
+static void panfrost_upload_txs_sysval(struct panfrost_context *ctx,
+                                       enum pipe_shader_type st,
+                                       unsigned int sysvalid,
+                                       struct sysval_uniform *uniform)
+{
+        unsigned texidx = PAN_SYSVAL_ID_TO_TXS_TEX_IDX(sysvalid);
+        unsigned dim = PAN_SYSVAL_ID_TO_TXS_DIM(sysvalid);
+        bool is_array = PAN_SYSVAL_ID_TO_TXS_IS_ARRAY(sysvalid);
+        struct pipe_sampler_view *tex = &ctx->sampler_views[st][texidx]->base;
+
+        assert(dim);
+        uniform->i[0] = u_minify(tex->texture->width0, tex->u.tex.first_level);
+
+        if (dim > 1)
+                uniform->i[1] = u_minify(tex->texture->height0,
+                                         tex->u.tex.first_level);
+
+        if (dim > 2)
+                uniform->i[2] = u_minify(tex->texture->depth0,
+                                         tex->u.tex.first_level);
+
+        if (is_array)
+                uniform->i[dim] = tex->texture->array_size;
+}
+
+static void panfrost_upload_sysvals(struct panfrost_context *ctx, void *buf,
+                                    struct panfrost_shader_state *ss,
+                                    enum pipe_shader_type st)
+{
+        struct sysval_uniform *uniforms = (void *)buf;
+
+        for (unsigned i = 0; i < ss->sysval_count; ++i) {
+                int sysval = ss->sysval[i];
+
+                switch (PAN_SYSVAL_TYPE(sysval)) {
+                case PAN_SYSVAL_VIEWPORT_SCALE:
+                        panfrost_upload_viewport_scale_sysval(ctx, &uniforms[i]);
+                        break;
+                case PAN_SYSVAL_VIEWPORT_OFFSET:
+                        panfrost_upload_viewport_offset_sysval(ctx, &uniforms[i]);
+                        break;
+                case PAN_SYSVAL_TEXTURE_SIZE:
+                        panfrost_upload_txs_sysval(ctx, st, PAN_SYSVAL_ID(sysval),
+                                                   &uniforms[i]);
+                        break;
+                default:
+                        assert(0);
+                }
+        }
+}
+
+static const void *
+panfrost_map_constant_buffer_cpu(struct panfrost_constant_buffer *buf, unsigned index)
+{
+        struct pipe_constant_buffer *cb = &buf->cb[index];
+        struct panfrost_resource *rsrc = pan_resource(cb->buffer);
+
+        if (rsrc)
+                return rsrc->bo->cpu;
+        else if (cb->user_buffer)
+                return cb->user_buffer;
+        else
+                unreachable("No constant buffer");
+}
+
+static mali_ptr
+panfrost_map_constant_buffer_gpu(
+                struct panfrost_context *ctx,
+                struct panfrost_constant_buffer *buf,
+                unsigned index)
+{
+        struct pipe_constant_buffer *cb = &buf->cb[index];
+        struct panfrost_resource *rsrc = pan_resource(cb->buffer);
+
+        if (rsrc)
+                return rsrc->bo->gpu;
+        else if (cb->user_buffer)
+                return panfrost_upload_transient(ctx, cb->user_buffer, cb->buffer_size);
+        else
+                unreachable("No constant buffer");
+}
+
+/* Compute number of UBOs active (more specifically, compute the highest UBO
+ * number addressable -- if there are gaps, include them in the count anyway).
+ * We always include UBO #0 in the count, since we *need* uniforms enabled for
+ * sysvals. */
+
+static unsigned
+panfrost_ubo_count(struct panfrost_context *ctx, enum pipe_shader_type stage)
+{
+        unsigned mask = ctx->constant_buffer[stage].enabled_mask | 1;
+        return 32 - __builtin_clz(mask);
+}
+
+/* Fixes up a shader state with current state, returning a GPU address to the
+ * patched shader */
+
+static mali_ptr
+panfrost_patch_shader_state(
+                struct panfrost_context *ctx,
+                struct panfrost_shader_state *ss,
+                enum pipe_shader_type stage)
+{
+        ss->tripipe->texture_count = ctx->sampler_view_count[stage];
+        ss->tripipe->sampler_count = ctx->sampler_count[stage];
+
+        ss->tripipe->midgard1.flags = 0x220;
+
+        unsigned ubo_count = panfrost_ubo_count(ctx, stage);
+        ss->tripipe->midgard1.uniform_buffer_count = ubo_count;
+
+        return ss->tripipe_gpu;
 }
 
 /* Go through dirty flags and actualise them in the cmdstream. */
@@ -794,7 +1058,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
         struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
 
         if (with_vertex_data) {
-                panfrost_emit_vertex_data(ctx);
+                panfrost_emit_vertex_data(ctx, job);
         }
 
         bool msaa = ctx->rasterizer->base.multisample;
@@ -807,13 +1071,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                 SET_BIT(ctx->fragment_shader_core.unknown2_4, MALI_NO_MSAA, !msaa);
         }
 
-        /* Enable job requirements at draw-time */
-
-        if (msaa)
-                job->requirements |= PAN_REQ_MSAA;
-
-        if (ctx->depth_stencil->depth.writemask)
-                job->requirements |= PAN_REQ_DEPTH_WRITE;
+       panfrost_job_set_requirements(ctx, job);
 
         if (ctx->occlusion_query) {
                 ctx->payload_tiler.gl_enables |= MALI_OCCLUSION_QUERY | MALI_OCCLUSION_PRECISE;
@@ -825,34 +1083,19 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
 
                 struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
 
-                /* Late shader descriptor assignments */
-
-                vs->tripipe->texture_count = ctx->sampler_view_count[PIPE_SHADER_VERTEX];
-                vs->tripipe->sampler_count = ctx->sampler_count[PIPE_SHADER_VERTEX];
-
-                /* Who knows */
-                vs->tripipe->midgard1.unknown1 = 0x2201;
-
-                ctx->payload_vertex.postfix._shader_upper = vs->tripipe_gpu >> 4;
+                ctx->payload_vertex.postfix._shader_upper =
+                        panfrost_patch_shader_state(ctx, vs, PIPE_SHADER_VERTEX) >> 4;
         }
 
         if (ctx->dirty & (PAN_DIRTY_RASTERIZER | PAN_DIRTY_VS)) {
                 /* Check if we need to link the gl_PointSize varying */
-                assert(ctx->vs);
-                struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
-
-                bool needs_gl_point_size = vs->writes_point_size && ctx->payload_tiler.prefix.draw_mode == MALI_POINTS;
-
-                if (!needs_gl_point_size) {
+                if (!panfrost_writes_point_size(ctx)) {
                         /* If the size is constant, write it out. Otherwise,
                          * don't touch primitive_size (since we would clobber
                          * the pointer there) */
 
                         ctx->payload_tiler.primitive_size.constant = ctx->rasterizer->base.line_width;
                 }
-
-                /* Set the flag for varying (pointer) point size if the shader needs that */
-                SET_BIT(ctx->payload_tiler.prefix.unknown_draw, MALI_DRAW_VARYING_SIZE, needs_gl_point_size);
         }
 
         /* TODO: Maybe dirty track FS, maybe not. For now, it's transient. */
@@ -863,13 +1106,20 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                 assert(ctx->fs);
                 struct panfrost_shader_state *variant = &ctx->fs->variants[ctx->fs->active_variant];
 
+                panfrost_patch_shader_state(ctx, variant, PIPE_SHADER_FRAGMENT);
+
 #define COPY(name) ctx->fragment_shader_core.name = variant->tripipe->name
 
                 COPY(shader);
                 COPY(attribute_count);
                 COPY(varying_count);
+                COPY(texture_count);
+                COPY(sampler_count);
+                COPY(sampler_count);
                 COPY(midgard1.uniform_count);
+                COPY(midgard1.uniform_buffer_count);
                 COPY(midgard1.work_count);
+                COPY(midgard1.flags);
                 COPY(midgard1.unknown2);
 
 #undef COPY
@@ -879,12 +1129,21 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                         ctx->fragment_shader_core.midgard1.work_count = /*MAX2(ctx->fragment_shader_core.midgard1.work_count, ctx->blend->blend_work_count)*/16;
 
                 /* Set late due to depending on render state */
-                /* The one at the end seems to mean "1 UBO" */
-                ctx->fragment_shader_core.midgard1.unknown1 = MALI_NO_ALPHA_TO_COVERAGE | 0x200 | 0x2201;
+                unsigned flags = ctx->fragment_shader_core.midgard1.flags;
 
-                /* Assign texture/sample count right before upload */
-                ctx->fragment_shader_core.texture_count = ctx->sampler_view_count[PIPE_SHADER_FRAGMENT];
-                ctx->fragment_shader_core.sampler_count = ctx->sampler_count[PIPE_SHADER_FRAGMENT];
+                /* Depending on whether it's legal to in the given shader, we
+                 * try to enable early-z testing (or forward-pixel kill?) */
+
+                if (!variant->can_discard)
+                        flags |= MALI_EARLY_Z;
+
+                /* Any time texturing is used, derivatives are implicitly
+                 * calculated, so we need to enable helper invocations */
+
+                if (ctx->sampler_view_count[PIPE_SHADER_FRAGMENT])
+                        flags |= MALI_HELPER_INVOCATIONS;
+
+                ctx->fragment_shader_core.midgard1.flags = flags;
 
                 /* Assign the stencil refs late */
                 ctx->fragment_shader_core.stencil_front.ref = ctx->stencil_ref.ref_value[0];
@@ -899,9 +1158,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
 
                 if (variant->can_discard) {
                         ctx->fragment_shader_core.unknown2_3 |= MALI_CAN_DISCARD;
-                        ctx->fragment_shader_core.midgard1.unknown1 &= ~MALI_NO_ALPHA_TO_COVERAGE;
-                        ctx->fragment_shader_core.midgard1.unknown1 |= 0x4000;
-                        ctx->fragment_shader_core.midgard1.unknown1 = 0x4200;
+                        ctx->fragment_shader_core.midgard1.flags |= 0x400;
                 }
 
                /* Check if we're using the default blend descriptor (fast path) */
@@ -912,16 +1169,26 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                        (ctx->blend->equation.alpha_mode == 0x122) &&
                        (ctx->blend->equation.color_mask == 0xf);
 
+                /* Even on MFBD, the shader descriptor gets blend shaders. It's
+                 * *also* copied to the blend_meta appended (by convention),
+                 * but this is the field actually read by the hardware. (Or
+                 * maybe both are read...?) */
+
+                if (ctx->blend->has_blend_shader) {
+                        ctx->fragment_shader_core.blend.shader = ctx->blend->blend_shader;
+                } else {
+                        ctx->fragment_shader_core.blend.shader = 0;
+                }
+
                 if (ctx->require_sfbd) {
                         /* When only a single render target platform is used, the blend
                          * information is inside the shader meta itself. We
                          * additionally need to signal CAN_DISCARD for nontrivial blend
                          * modes (so we're able to read back the destination buffer) */
 
-                        if (ctx->blend->has_blend_shader) {
-                                ctx->fragment_shader_core.blend_shader = ctx->blend->blend_shader;
-                        } else {
-                                memcpy(&ctx->fragment_shader_core.blend_equation, &ctx->blend->equation, sizeof(ctx->blend->equation));
+                        if (!ctx->blend->has_blend_shader) {
+                                ctx->fragment_shader_core.blend.equation = ctx->blend->equation;
+                                ctx->fragment_shader_core.blend.constant = ctx->blend->constant;
                         }
 
                         if (!no_blending) {
@@ -929,7 +1196,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                         }
                 }
 
-                size_t size = sizeof(struct mali_shader_meta) + sizeof(struct mali_blend_meta);
+                size_t size = sizeof(struct mali_shader_meta) + sizeof(struct midgard_blend_rt);
                 struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
                 memcpy(transfer.cpu, &ctx->fragment_shader_core, sizeof(struct mali_shader_meta));
 
@@ -938,7 +1205,7 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                 if (!ctx->require_sfbd) {
                         /* Additional blend descriptor tacked on for jobs using MFBD */
 
-                        unsigned blend_count = 0;
+                        unsigned blend_count = 0x200;
 
                         if (ctx->blend->has_blend_shader) {
                                 /* For a blend shader, the bottom nibble corresponds to
@@ -956,164 +1223,139 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
                                         blend_count |= 0x1;
                         }
 
-                        /* Second blend equation is always a simple replace */
+                        struct midgard_blend_rt rts[4];
 
-                        uint64_t replace_magic = 0xf0122122;
-                        struct mali_blend_equation replace_mode;
-                        memcpy(&replace_mode, &replace_magic, sizeof(replace_mode));
+                        /* TODO: MRT */
 
-                        struct mali_blend_meta blend_meta[] = {
-                                {
-                                        .unk1 = 0x200 | blend_count,
-                                        .blend_equation_1 = ctx->blend->equation,
-                                        .blend_equation_2 = replace_mode
-                                },
-                        };
+                        for (unsigned i = 0; i < 1; ++i) {
+                                bool is_srgb =
+                                        (ctx->pipe_framebuffer.nr_cbufs > i) &&
+                                        util_format_is_srgb(ctx->pipe_framebuffer.cbufs[i]->format);
 
-                        if (ctx->blend->has_blend_shader)
-                                memcpy(&blend_meta[0].blend_equation_1, &ctx->blend->blend_shader, sizeof(ctx->blend->blend_shader));
-
-                        memcpy(transfer.cpu + sizeof(struct mali_shader_meta), blend_meta, sizeof(blend_meta));
-                }
-        }
-
-        if (ctx->dirty & PAN_DIRTY_VERTEX) {
-                ctx->payload_vertex.postfix.attribute_meta = ctx->vertex->descriptor_ptr;
-        }
+                                rts[i].flags = blend_count;
 
-        if (ctx->dirty & PAN_DIRTY_SAMPLERS) {
-                /* Upload samplers back to back, no padding */
+                                if (is_srgb)
+                                        rts[i].flags |= MALI_BLEND_SRGB;
 
-                for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
-                        if (!ctx->sampler_count[t]) continue;
+                                /* TODO: sRGB in blend shaders is currently
+                                 * unimplemented. Contact me (Alyssa) if you're
+                                 * interested in working on this. We have
+                                 * native Midgard ops for helping here, but
+                                 * they're not well-understood yet. */
 
-                        struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(struct mali_sampler_descriptor) * ctx->sampler_count[t]);
-                        struct mali_sampler_descriptor *desc = (struct mali_sampler_descriptor *) transfer.cpu;
+                                assert(!(is_srgb && ctx->blend->has_blend_shader));
 
-                        for (int i = 0; i < ctx->sampler_count[t]; ++i) {
-                                desc[i] = ctx->samplers[t][i]->hw;
+                                if (ctx->blend->has_blend_shader) {
+                                        rts[i].blend.shader = ctx->blend->blend_shader;
+                                } else {
+                                        rts[i].blend.equation = ctx->blend->equation;
+                                        rts[i].blend.constant = ctx->blend->constant;
+                                }
                         }
 
-                        if (t == PIPE_SHADER_FRAGMENT)
-                                ctx->payload_tiler.postfix.sampler_descriptor = transfer.gpu;
-                        else if (t == PIPE_SHADER_VERTEX)
-                                ctx->payload_vertex.postfix.sampler_descriptor = transfer.gpu;
-                        else
-                                assert(0);
+                        memcpy(transfer.cpu + sizeof(struct mali_shader_meta), rts, sizeof(rts[0]) * 1);
                 }
         }
 
-        if (ctx->dirty & PAN_DIRTY_TEXTURES) {
-                for (int t = 0; t <= PIPE_SHADER_FRAGMENT; ++t) {
-                        /* Shortcircuit */
-                        if (!ctx->sampler_view_count[t]) continue;
-
-                        uint64_t trampolines[PIPE_MAX_SHADER_SAMPLER_VIEWS];
+        /* We stage to transient, so always dirty.. */
+        panfrost_stage_attributes(ctx);
 
-                        for (int i = 0; i < ctx->sampler_view_count[t]; ++i) {
-                                if (!ctx->sampler_views[t][i])
-                                        continue;
+        if (ctx->dirty & PAN_DIRTY_SAMPLERS)
+                panfrost_upload_sampler_descriptors(ctx);
 
-                                struct pipe_resource *tex_rsrc = ctx->sampler_views[t][i]->base.texture;
-                                struct panfrost_resource *rsrc = (struct panfrost_resource *) tex_rsrc;
+        if (ctx->dirty & PAN_DIRTY_TEXTURES)
+                panfrost_upload_texture_descriptors(ctx);
 
-                                /* Inject the address in. */
-                                for (int l = 0; l <= tex_rsrc->last_level; ++l) {
-                                        ctx->sampler_views[t][i]->hw.swizzled_bitmaps[l] =
-                                                rsrc->bo->gpu + rsrc->bo->slices[l].offset;
-                                }
+        const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
 
-                                trampolines[i] = panfrost_upload_transient(ctx, &ctx->sampler_views[t][i]->hw, sizeof(struct mali_texture_descriptor));
-                        }
+        for (int i = 0; i <= PIPE_SHADER_FRAGMENT; ++i) {
+                struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
 
-                        mali_ptr trampoline = panfrost_upload_transient(ctx, trampolines, sizeof(uint64_t) * ctx->sampler_view_count[t]);
+                struct panfrost_shader_state *vs = &ctx->vs->variants[ctx->vs->active_variant];
+                struct panfrost_shader_state *fs = &ctx->fs->variants[ctx->fs->active_variant];
+                struct panfrost_shader_state *ss = (i == PIPE_SHADER_FRAGMENT) ? fs : vs;
 
-                        if (t == PIPE_SHADER_FRAGMENT)
-                                ctx->payload_tiler.postfix.texture_trampoline = trampoline;
-                        else if (t == PIPE_SHADER_VERTEX)
-                                ctx->payload_vertex.postfix.texture_trampoline = trampoline;
-                        else
-                                assert(0);
-                }
-        }
+                /* Uniforms are implicitly UBO #0 */
+                bool has_uniforms = buf->enabled_mask & (1 << 0);
 
-        /* Generate the viewport vector of the form: <width/2, height/2, centerx, centery> */
-        const struct pipe_viewport_state *vp = &ctx->pipe_viewport;
+                /* Allocate room for the sysval and the uniforms */
+                size_t sys_size = sizeof(float) * 4 * ss->sysval_count;
+                size_t uniform_size = has_uniforms ? (buf->cb[0].buffer_size) : 0;
+                size_t size = sys_size + uniform_size;
+                struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
 
-        /* For flipped-Y buffers (signaled by negative scale), the translate is
-         * flipped as well */
+                /* Upload sysvals requested by the shader */
+                panfrost_upload_sysvals(ctx, transfer.cpu, ss, i);
 
-        bool invert_y = vp->scale[1] < 0.0;
-        float translate_y = vp->translate[1];
+                /* Upload uniforms */
+                if (has_uniforms) {
+                        const void *cpu = panfrost_map_constant_buffer_cpu(buf, 0);
+                        memcpy(transfer.cpu + sys_size, cpu, uniform_size);
+                }
 
-        if (invert_y)
-                translate_y = ctx->pipe_framebuffer.height - translate_y;
+                int uniform_count = 0;
 
-        float viewport_vec4[] = {
-                vp->scale[0],
-                fabsf(vp->scale[1]),
+                struct mali_vertex_tiler_postfix *postfix;
 
-                vp->translate[0],
-                translate_y
-        };
+                switch (i) {
+                case PIPE_SHADER_VERTEX:
+                        uniform_count = ctx->vs->variants[ctx->vs->active_variant].uniform_count;
+                        postfix = &ctx->payload_vertex.postfix;
+                        break;
 
-        for (int i = 0; i < PIPE_SHADER_TYPES; ++i) {
-                struct panfrost_constant_buffer *buf = &ctx->constant_buffer[i];
+                case PIPE_SHADER_FRAGMENT:
+                        uniform_count = ctx->fs->variants[ctx->fs->active_variant].uniform_count;
+                        postfix = &ctx->payload_tiler.postfix;
+                        break;
 
-                if (i == PIPE_SHADER_VERTEX || i == PIPE_SHADER_FRAGMENT) {
-                        /* It doesn't matter if we don't use all the memory;
-                         * we'd need a dummy UBO anyway. Compute the max */
+                default:
+                        unreachable("Invalid shader stage\n");
+                }
 
-                        size_t size = sizeof(viewport_vec4) + buf->size;
-                        struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, size);
+                /* Next up, attach UBOs. UBO #0 is the uniforms we just
+                 * uploaded */
 
-                        /* Keep track how much we've uploaded */
-                        off_t offset = 0;
+                unsigned ubo_count = panfrost_ubo_count(ctx, i);
+                assert(ubo_count >= 1);
 
-                        if (i == PIPE_SHADER_VERTEX) {
-                                /* Upload viewport */
-                                memcpy(transfer.cpu + offset, viewport_vec4, sizeof(viewport_vec4));
-                                offset += sizeof(viewport_vec4);
-                        }
+                size_t sz = sizeof(struct mali_uniform_buffer_meta) * ubo_count;
+                struct mali_uniform_buffer_meta *ubos = calloc(sz, 1);
 
-                        /* Upload uniforms */
-                        memcpy(transfer.cpu + offset, buf->buffer, buf->size);
+                /* Upload uniforms as a UBO */
+                ubos[0].size = MALI_POSITIVE((2 + uniform_count));
+                ubos[0].ptr = transfer.gpu >> 2;
 
-                        int uniform_count = 0;
+                /* The rest are honest-to-goodness UBOs */
 
-                        struct mali_vertex_tiler_postfix *postfix;
+                for (unsigned ubo = 1; ubo < ubo_count; ++ubo) {
+                        size_t sz = buf->cb[ubo].buffer_size;
 
-                        switch (i) {
-                        case PIPE_SHADER_VERTEX:
-                                uniform_count = ctx->vs->variants[ctx->vs->active_variant].uniform_count;
-                                postfix = &ctx->payload_vertex.postfix;
-                                break;
+                        bool enabled = buf->enabled_mask & (1 << ubo);
+                        bool empty = sz == 0;
 
-                        case PIPE_SHADER_FRAGMENT:
-                                uniform_count = ctx->fs->variants[ctx->fs->active_variant].uniform_count;
-                                postfix = &ctx->payload_tiler.postfix;
-                                break;
+                        if (!enabled || empty) {
+                                /* Stub out disabled UBOs to catch accesses */
 
-                        default:
-                                DBG("Unknown shader stage %d in uniform upload\n", i);
-                                assert(0);
+                                ubos[ubo].size = 0;
+                                ubos[ubo].ptr = 0xDEAD0000;
+                                continue;
                         }
 
-                        /* Also attach the same buffer as a UBO for extended access */
-
-                        struct mali_uniform_buffer_meta uniform_buffers[] = {
-                                {
-                                        .size = MALI_POSITIVE((2 + uniform_count)),
-                                        .ptr = transfer.gpu >> 2,
-                                },
-                        };
+                        mali_ptr gpu = panfrost_map_constant_buffer_gpu(ctx, buf, ubo);
 
-                        mali_ptr ubufs = panfrost_upload_transient(ctx, uniform_buffers, sizeof(uniform_buffers));
-                        postfix->uniforms = transfer.gpu;
-                        postfix->uniform_buffers = ubufs;
+                        unsigned bytes_per_field = 16;
+                        unsigned aligned = ALIGN(sz, bytes_per_field);
+                        unsigned fields = aligned / bytes_per_field;
 
-                        buf->dirty = 0;
+                        ubos[ubo].size = MALI_POSITIVE(fields);
+                        ubos[ubo].ptr = gpu >> 2;
                 }
+
+                mali_ptr ubufs = panfrost_upload_transient(ctx, ubos, sz);
+                postfix->uniforms = transfer.gpu;
+                postfix->uniform_buffers = ubufs;
+
+                buf->dirty_mask = 0;
         }
 
         /* TODO: Upload the viewport somewhere more appropriate */
@@ -1138,27 +1380,67 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
         };
 
         /* Always scissor to the viewport by default. */
-        view.viewport0[0] = (int) (vp->translate[0] - vp->scale[0]);
-        view.viewport1[0] = MALI_POSITIVE((int) (vp->translate[0] + vp->scale[0]));
+        int minx = (int) (vp->translate[0] - vp->scale[0]);
+        int maxx = (int) (vp->translate[0] + vp->scale[0]);
 
-        view.viewport0[1] = (int) (translate_y - fabs(vp->scale[1]));
-        view.viewport1[1] = MALI_POSITIVE((int) (translate_y + fabs(vp->scale[1])));
+        int miny = (int) (vp->translate[1] - vp->scale[1]);
+        int maxy = (int) (vp->translate[1] + vp->scale[1]);
+
+        /* Apply the scissor test */
 
         if (ss && ctx->rasterizer && ctx->rasterizer->base.scissor) {
-                /* Invert scissor if needed */
-                unsigned miny = invert_y ?
-                        ctx->pipe_framebuffer.height - ss->maxy : ss->miny;
-
-                unsigned maxy = invert_y ?
-                        ctx->pipe_framebuffer.height - ss->miny : ss->maxy;
-
-                /* Set the actual scissor */
-                view.viewport0[0] = ss->minx;
-                view.viewport0[1] = miny;
-                view.viewport1[0] = MALI_POSITIVE(ss->maxx);
-                view.viewport1[1] = MALI_POSITIVE(maxy);
+                minx = ss->minx;
+                maxx = ss->maxx;
+                miny = ss->miny;
+                maxy = ss->maxy;
         } 
 
+        /* Hardware needs the min/max to be strictly ordered, so flip if we
+         * need to. The viewport transformation in the vertex shader will
+         * handle the negatives if we don't */
+
+        if (miny > maxy) {
+                int temp = miny;
+                miny = maxy;
+                maxy = temp;
+        }
+
+        if (minx > maxx) {
+                int temp = minx;
+                minx = maxx;
+                maxx = temp;
+        }
+
+        /* Clamp everything positive, just in case */
+
+        maxx = MAX2(0, maxx);
+        maxy = MAX2(0, maxy);
+        minx = MAX2(0, minx);
+        miny = MAX2(0, miny);
+
+        /* Clamp to the framebuffer size as a last check */
+
+        minx = MIN2(ctx->pipe_framebuffer.width, minx);
+        maxx = MIN2(ctx->pipe_framebuffer.width, maxx);
+
+        miny = MIN2(ctx->pipe_framebuffer.height, miny);
+        maxy = MIN2(ctx->pipe_framebuffer.height, maxy);
+
+        /* Update the job, unless we're doing wallpapering (whose lack of
+         * scissor we can ignore, since if we "miss" a tile of wallpaper, it'll
+         * just... be faster :) */
+
+        if (!ctx->wallpaper_batch)
+                panfrost_job_union_scissor(job, minx, miny, maxx, maxy);
+
+        /* Upload */
+
+        view.viewport0[0] = minx;
+        view.viewport1[0] = MALI_POSITIVE(maxx);
+
+        view.viewport0[1] = miny;
+        view.viewport1[1] = MALI_POSITIVE(maxy);
+
         ctx->payload_tiler.postfix.viewport =
                 panfrost_upload_transient(ctx,
                                 &view,
@@ -1172,119 +1454,82 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, bool with_vertex_data)
 static void
 panfrost_queue_draw(struct panfrost_context *ctx)
 {
-        /* TODO: Expand the array? */
-        if (ctx->draw_count >= MAX_DRAW_CALLS) {
-                DBG("Job buffer overflow, ignoring draw\n");
-                assert(0);
-        }
-
         /* Handle dirty flags now */
         panfrost_emit_for_draw(ctx, true);
 
-        struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false, false);
-        struct panfrost_transfer tiler = panfrost_vertex_tiler_job(ctx, true, false);
+        /* If rasterizer discard is enable, only submit the vertex */
 
-        ctx->u_vertex_jobs[ctx->vertex_job_count] = (struct mali_job_descriptor_header *) vertex.cpu;
-        ctx->vertex_jobs[ctx->vertex_job_count++] = vertex.gpu;
+        bool rasterizer_discard = ctx->rasterizer
+                && ctx->rasterizer->base.rasterizer_discard;
 
-        ctx->u_tiler_jobs[ctx->tiler_job_count] = (struct mali_job_descriptor_header *) tiler.cpu;
-        ctx->tiler_jobs[ctx->tiler_job_count++] = tiler.gpu;
+        struct panfrost_transfer vertex = panfrost_vertex_tiler_job(ctx, false);
+        struct panfrost_transfer tiler;
 
-        ctx->draw_count++;
-}
+        if (!rasterizer_discard)
+                tiler = panfrost_vertex_tiler_job(ctx, true);
 
-/* At the end of the frame, the vertex and tiler jobs are linked together and
- * then the fragment job is plonked at the end. Set value job is first for
- * unknown reasons. */
+        struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
 
-static void
-panfrost_link_job_pair(struct mali_job_descriptor_header *first, mali_ptr next)
-{
-        if (first->job_descriptor_size)
-                first->next_job_64 = (u64) (uintptr_t) next;
+        if (rasterizer_discard)
+                panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
+        else if (ctx->wallpaper_batch)
+                panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, tiler);
         else
-                first->next_job_32 = (u32) (uintptr_t) next;
-}
-
-static void
-panfrost_link_jobs(struct panfrost_context *ctx)
-{
-        if (ctx->draw_count) {
-                /* Generate the set_value_job */
-                panfrost_set_value_job(ctx);
-
-                /* Have the first vertex job depend on the set value job */
-                ctx->u_vertex_jobs[0]->job_dependency_index_1 = ctx->u_set_value_job->job_index;
-
-                /* SV -> V */
-                panfrost_link_job_pair(ctx->u_set_value_job, ctx->vertex_jobs[0]);
-        }
-
-        /* V -> V/T ; T -> T/null */
-        for (int i = 0; i < ctx->vertex_job_count; ++i) {
-                bool isLast = (i + 1) == ctx->vertex_job_count;
-
-                panfrost_link_job_pair(ctx->u_vertex_jobs[i], isLast ? ctx->tiler_jobs[0] : ctx->vertex_jobs[i + 1]);
-        }
-
-        /* T -> T/null */
-        for (int i = 0; i < ctx->tiler_job_count; ++i) {
-                bool isLast = (i + 1) == ctx->tiler_job_count;
-                panfrost_link_job_pair(ctx->u_tiler_jobs[i], isLast ? 0 : ctx->tiler_jobs[i + 1]);
-        }
+                panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
 }
 
 /* The entire frame is in memory -- send it off to the kernel! */
 
 static void
 panfrost_submit_frame(struct panfrost_context *ctx, bool flush_immediate,
-                     struct pipe_fence_handle **fence)
+                     struct pipe_fence_handle **fence,
+                      struct panfrost_job *job)
 {
         struct pipe_context *gallium = (struct pipe_context *) ctx;
         struct panfrost_screen *screen = pan_screen(gallium->screen);
 
-        /* Edge case if screen is cleared and nothing else */
-        bool has_draws = ctx->draw_count > 0;
-
-        /* Workaround a bizarre lockup (a hardware errata?) */
-        if (!has_draws)
-                flush_immediate = true;
-
-        /* A number of jobs are batched -- this must be linked and cleared */
-        panfrost_link_jobs(ctx);
-
-        ctx->draw_count = 0;
-        ctx->vertex_job_count = 0;
-        ctx->tiler_job_count = 0;
-
 #ifndef DRY_RUN
         
-        bool is_scanout = panfrost_is_scanout(ctx);
-        int fragment_id = screen->driver->submit_vs_fs_job(ctx, has_draws, is_scanout);
+        panfrost_job_submit(ctx, job);
 
         /* If visual, we can stall a frame */
 
         if (!flush_immediate)
-                screen->driver->force_flush_fragment(ctx, fence);
+                panfrost_drm_force_flush_fragment(ctx, fence);
 
-        screen->last_fragment_id = fragment_id;
         screen->last_fragment_flushed = false;
+        screen->last_job = job;
 
         /* If readback, flush now (hurts the pipelined performance) */
         if (flush_immediate)
-                screen->driver->force_flush_fragment(ctx, fence);
+                panfrost_drm_force_flush_fragment(ctx, fence);
+#endif
+}
 
-        if (screen->driver->dump_counters && pan_counters_base) {
-                screen->driver->dump_counters(screen);
+static void
+panfrost_draw_wallpaper(struct pipe_context *pipe)
+{
+       struct panfrost_context *ctx = pan_context(pipe);
 
-                char filename[128];
-                snprintf(filename, sizeof(filename), "%s/frame%d.mdgprf", pan_counters_base, ++performance_counter_number);
-                FILE *fp = fopen(filename, "wb");
-                fwrite(screen->perf_counters.cpu,  4096, sizeof(uint32_t), fp);
-                fclose(fp);
-        }
+       /* Nothing to reload? */
+       if (ctx->pipe_framebuffer.cbufs[0] == NULL)
+               return;
 
-#endif
+        /* Check if the buffer has any content on it worth preserving */
+
+        struct pipe_surface *surf = ctx->pipe_framebuffer.cbufs[0];
+        struct panfrost_resource *rsrc = pan_resource(surf->texture);
+        unsigned level = surf->u.tex.level;
+
+        if (!rsrc->bo->slices[level].initialized)
+                return;
+
+        /* Save the batch */
+        struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+
+        ctx->wallpaper_batch = batch;
+        panfrost_blit_wallpaper(ctx);
+        ctx->wallpaper_batch = NULL;
 }
 
 void
@@ -1297,13 +1542,18 @@ panfrost_flush(
         struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
 
         /* Nothing to do! */
-        if (!ctx->draw_count && !job->clear) return;
+        if (!job->last_job.gpu && !job->clear) return;
 
-        /* Whether to stall the pipeline for immediately correct results */
-        bool flush_immediate = flags & PIPE_FLUSH_END_OF_FRAME;
+       if (!job->clear)
+               panfrost_draw_wallpaper(&ctx->base);
+
+        /* Whether to stall the pipeline for immediately correct results. Since
+         * pipelined rendering is quite broken right now (to be fixed by the
+         * panfrost_job refactor, just take the perf hit for correctness) */
+        bool flush_immediate = /*flags & PIPE_FLUSH_END_OF_FRAME*/true;
 
         /* Submit the frame itself */
-        panfrost_submit_frame(ctx, flush_immediate, fence);
+        panfrost_submit_frame(ctx, flush_immediate, fence, job);
 
         /* Prepare for the next frame */
         panfrost_invalidate_frame(ctx);
@@ -1327,9 +1577,7 @@ g2m_draw_mode(enum pipe_prim_type mode)
                 DEFINE_CASE(POLYGON);
 
         default:
-                DBG("Illegal draw mode %d\n", mode);
-                assert(0);
-                return MALI_LINE_LOOP;
+                unreachable("Invalid draw mode");
         }
 }
 
@@ -1349,20 +1597,7 @@ panfrost_translate_index_size(unsigned size)
                 return MALI_DRAW_INDEXED_UINT32;
 
         default:
-                DBG("Unknown index size %d\n", size);
-                assert(0);
-                return 0;
-        }
-}
-
-static const uint8_t *
-panfrost_get_index_buffer_raw(const struct pipe_draw_info *info)
-{
-        if (info->has_user_indices) {
-                return (const uint8_t *) info->index.user;
-        } else {
-                struct panfrost_resource *rsrc = (struct panfrost_resource *) (info->index.resource);
-                return (const uint8_t *) rsrc->bo->cpu;
+                unreachable("Invalid index size");
         }
 }
 
@@ -1381,17 +1616,23 @@ panfrost_get_index_buffer_mapped(struct panfrost_context *ctx, const struct pipe
                 return rsrc->bo->gpu + offset;
         } else {
                 /* Otherwise, we need to upload to transient memory */
-                const uint8_t *ibuf8 = panfrost_get_index_buffer_raw(info);
+                const uint8_t *ibuf8 = (const uint8_t *) info->index.user;
                 return panfrost_upload_transient(ctx, ibuf8 + offset, info->count * info->index_size);
         }
 }
 
-#define CALCULATE_MIN_MAX_INDEX(T, buffer, start, count) \
-        for (unsigned _idx = (start); _idx < (start + count); ++_idx) { \
-                T idx = buffer[_idx]; \
-                if (idx > max_index) max_index = idx; \
-                if (idx < min_index) min_index = idx; \
-        }
+static bool
+panfrost_scissor_culls_everything(struct panfrost_context *ctx)
+{
+        const struct pipe_scissor_state *ss = &ctx->scissor;
+
+        /* Check if we're scissoring at all */
+
+        if (!(ss && ctx->rasterizer && ctx->rasterizer->base.scissor))
+                return false;
+
+        return (ss->minx == ss->maxx) && (ss->miny == ss->maxy);
+}
 
 static void
 panfrost_draw_vbo(
@@ -1400,6 +1641,13 @@ panfrost_draw_vbo(
 {
         struct panfrost_context *ctx = pan_context(pipe);
 
+        /* First of all, check the scissor to see if anything is drawn at all.
+         * If it's not, we drop the draw (mostly a conformance issue;
+         * well-behaved apps shouldn't hit this) */
+
+        if (panfrost_scissor_culls_everything(ctx))
+                return;
+
         ctx->payload_vertex.draw_start = info->start;
         ctx->payload_tiler.draw_start = info->start;
 
@@ -1434,39 +1682,33 @@ panfrost_draw_vbo(
         /* For non-indexed draws, they're the same */
         unsigned invocation_count = ctx->vertex_count;
 
+        unsigned draw_flags = 0;
+
+        /* The draw flags interpret how primitive size is interpreted */
+
+        if (panfrost_writes_point_size(ctx))
+                draw_flags |= MALI_DRAW_VARYING_SIZE;
+
         /* For higher amounts of vertices (greater than what fits in a 16-bit
          * short), the other value is needed, otherwise there will be bizarre
          * rendering artefacts. It's not clear what these values mean yet. */
 
-        ctx->payload_tiler.prefix.unknown_draw &= ~(0x3000 | 0x18000);
-        ctx->payload_tiler.prefix.unknown_draw |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
+        draw_flags |= (mode == PIPE_PRIM_POINTS || ctx->vertex_count > 65535) ? 0x3000 : 0x18000;
 
         if (info->index_size) {
                 /* Calculate the min/max index used so we can figure out how
                  * many times to invoke the vertex shader */
 
-                const uint8_t *ibuf8 = panfrost_get_index_buffer_raw(info);
-
-                int min_index = INT_MAX;
-                int max_index = 0;
+                /* Fetch / calculate index bounds */
+                unsigned min_index = 0, max_index = 0;
 
-                if (info->index_size == 1) {
-                        CALCULATE_MIN_MAX_INDEX(uint8_t, ibuf8, info->start, info->count);
-                } else if (info->index_size == 2) {
-                        const uint16_t *ibuf16 = (const uint16_t *) ibuf8;
-                        CALCULATE_MIN_MAX_INDEX(uint16_t, ibuf16, info->start, info->count);
-                } else if (info->index_size == 4) {
-                        const uint32_t *ibuf32 = (const uint32_t *) ibuf8;
-                        CALCULATE_MIN_MAX_INDEX(uint32_t, ibuf32, info->start, info->count);
+                if (info->max_index == ~0u) {
+                        u_vbuf_get_minmax_index(pipe, info, &min_index, &max_index);
                 } else {
-                        assert(0);
+                        min_index = info->min_index;
+                        max_index = info->max_index;
                 }
 
-                /* Make sure we didn't go crazy */
-                assert(min_index < INT_MAX);
-                assert(max_index > 0);
-                assert(max_index > min_index);
-
                 /* Use the corresponding values */
                 invocation_count = max_index - min_index + 1;
                 ctx->payload_vertex.draw_start = min_index;
@@ -1477,9 +1719,8 @@ panfrost_draw_vbo(
 
                 //assert(!info->restart_index); /* TODO: Research */
                 assert(!info->index_bias);
-                //assert(!info->min_index); /* TODO: Use value */
 
-                ctx->payload_tiler.prefix.unknown_draw |= panfrost_translate_index_size(info->index_size);
+                draw_flags |= panfrost_translate_index_size(info->index_size);
                 ctx->payload_tiler.prefix.indices = panfrost_get_index_buffer_mapped(ctx, info);
         } else {
                 /* Index count == vertex count, if no indexing is applied, as
@@ -1489,12 +1730,12 @@ panfrost_draw_vbo(
                 ctx->payload_tiler.prefix.index_count = MALI_POSITIVE(ctx->vertex_count);
 
                 /* Reverse index state */
-                ctx->payload_tiler.prefix.unknown_draw &= ~MALI_DRAW_INDEXED_UINT32;
                 ctx->payload_tiler.prefix.indices = (uintptr_t) NULL;
         }
 
         ctx->payload_vertex.prefix.invocation_count = MALI_POSITIVE(invocation_count);
         ctx->payload_tiler.prefix.invocation_count = MALI_POSITIVE(invocation_count);
+        ctx->payload_tiler.prefix.unknown_draw = draw_flags;
 
         /* Fire off the draw itself */
         panfrost_queue_draw(ctx);
@@ -1521,8 +1762,8 @@ panfrost_create_rasterizer_state(
         /* Bitmask, unknown meaning of the start value */
         so->tiler_gl_enables = ctx->is_t6xx ? 0x105 : 0x7;
 
-        so->tiler_gl_enables |= MALI_FRONT_FACE(
-                                        cso->front_ccw ? MALI_CCW : MALI_CW);
+        if (cso->front_ccw)
+                so->tiler_gl_enables |= MALI_FRONT_CCW_TOP;
 
         if (cso->cull_face & PIPE_FACE_FRONT)
                 so->tiler_gl_enables |= MALI_CULL_FACE_FRONT;
@@ -1546,6 +1787,14 @@ panfrost_bind_rasterizer_state(
 
         ctx->rasterizer = hwcso;
         ctx->dirty |= PAN_DIRTY_RASTERIZER;
+
+        /* Point sprites are emulated */
+
+        struct panfrost_shader_state *variant =
+                ctx->fs ? &ctx->fs->variants[ctx->fs->active_variant] : NULL;
+
+        if (ctx->rasterizer->base.sprite_coord_enable || (variant && variant->point_sprite_mask))
+                ctx->base.bind_fs_state(&ctx->base, ctx->fs);
 }
 
 static void *
@@ -1554,17 +1803,19 @@ panfrost_create_vertex_elements_state(
         unsigned num_elements,
         const struct pipe_vertex_element *elements)
 {
-        struct panfrost_context *ctx = pan_context(pctx);
         struct panfrost_vertex_state *so = CALLOC_STRUCT(panfrost_vertex_state);
 
         so->num_elements = num_elements;
         memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
 
-        struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_attr_meta) * num_elements, HEAP_DESCRIPTOR);
-        so->hw = (struct mali_attr_meta *) transfer.cpu;
-        so->descriptor_ptr = transfer.gpu;
-
-        /* Allocate memory for the descriptor state */
+        /* XXX: What the cornball? This is totally, 100%, unapologetically
+         * nonsense. And yet it somehow fixes a regression in -bshadow
+         * (previously, we allocated the descriptor here... a newer commit
+         * removed that allocation, and then memory corruption led to
+         * shader_meta getting overwritten in bad ways and then the whole test
+         * case falling apart . TODO: LOOK INTO PLEASE XXX XXX BAD XXX XXX XXX
+         */
+        panfrost_allocate_chunk(pan_context(pctx), 0, HEAP_DESCRIPTOR);
 
         for (int i = 0; i < num_elements; ++i) {
                 so->hw[i].index = elements[i].vertex_buffer_index;
@@ -1594,15 +1845,6 @@ panfrost_bind_vertex_elements_state(
         ctx->dirty |= PAN_DIRTY_VERTEX;
 }
 
-static void
-panfrost_delete_vertex_elements_state(struct pipe_context *pctx, void *hwcso)
-{
-        struct panfrost_vertex_state *so = (struct panfrost_vertex_state *) hwcso;
-        unsigned bytes = sizeof(struct mali_attr_meta) * so->num_elements;
-        DBG("Vertex elements delete leaks descriptor (%d bytes)\n", bytes);
-        free(hwcso);
-}
-
 static void *
 panfrost_create_shader_state(
         struct pipe_context *pctx,
@@ -1630,9 +1872,6 @@ panfrost_delete_shader_state(
                 DBG("Deleting TGSI shader leaks duplicated tokens\n");
         }
 
-        unsigned leak = cso->variant_count * sizeof(struct mali_shader_meta);
-        DBG("Deleting shader state leaks descriptors (%d bytes), and shader bytecode\n", leak);
-
         free(so);
 }
 
@@ -1691,11 +1930,17 @@ panfrost_bind_sampler_states(
 }
 
 static bool
-panfrost_variant_matches(struct panfrost_context *ctx, struct panfrost_shader_state *variant)
+panfrost_variant_matches(
+                struct panfrost_context *ctx,
+                struct panfrost_shader_state *variant,
+                enum pipe_shader_type type)
 {
+        struct pipe_rasterizer_state *rasterizer = &ctx->rasterizer->base;
         struct pipe_alpha_state *alpha = &ctx->depth_stencil->alpha;
 
-        if (alpha->enabled || variant->alpha_state.enabled) {
+        bool is_fragment = (type == PIPE_SHADER_FRAGMENT);
+
+        if (is_fragment && (alpha->enabled || variant->alpha_state.enabled)) {
                 /* Make sure enable state is at least the same */
                 if (alpha->enabled != variant->alpha_state.enabled) {
                         return false;
@@ -1709,91 +1954,113 @@ panfrost_variant_matches(struct panfrost_context *ctx, struct panfrost_shader_st
                         return false;
                 }
         }
+
+        if (is_fragment && rasterizer && (rasterizer->sprite_coord_enable |
+                                variant->point_sprite_mask)) {
+                /* Ensure the same varyings are turned to point sprites */
+                if (rasterizer->sprite_coord_enable != variant->point_sprite_mask)
+                        return false;
+
+                /* Ensure the orientation is correct */
+                bool upper_left =
+                        rasterizer->sprite_coord_mode ==
+                        PIPE_SPRITE_COORD_UPPER_LEFT;
+
+                if (variant->point_sprite_upper_left != upper_left)
+                        return false;
+        }
+
         /* Otherwise, we're good to go */
         return true;
 }
 
 static void
-panfrost_bind_fs_state(
+panfrost_bind_shader_state(
         struct pipe_context *pctx,
-        void *hwcso)
+        void *hwcso,
+        enum pipe_shader_type type)
 {
         struct panfrost_context *ctx = pan_context(pctx);
 
-        ctx->fs = hwcso;
+        if (type == PIPE_SHADER_FRAGMENT) {
+                ctx->fs = hwcso;
+                ctx->dirty |= PAN_DIRTY_FS;
+        } else {
+                assert(type == PIPE_SHADER_VERTEX);
+                ctx->vs = hwcso;
+                ctx->dirty |= PAN_DIRTY_VS;
+        }
 
-        if (hwcso) {
-                /* Match the appropriate variant */
+        if (!hwcso) return;
 
-                signed variant = -1;
+        /* Match the appropriate variant */
 
-                struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
+        signed variant = -1;
+        struct panfrost_shader_variants *variants = (struct panfrost_shader_variants *) hwcso;
 
-                for (unsigned i = 0; i < variants->variant_count; ++i) {
-                        if (panfrost_variant_matches(ctx, &variants->variants[i])) {
-                                variant = i;
-                                break;
-                        }
+        for (unsigned i = 0; i < variants->variant_count; ++i) {
+                if (panfrost_variant_matches(ctx, &variants->variants[i], type)) {
+                        variant = i;
+                        break;
                 }
+        }
 
-                if (variant == -1) {
-                        /* No variant matched, so create a new one */
-                        variant = variants->variant_count++;
-                        assert(variants->variant_count < MAX_SHADER_VARIANTS);
+        if (variant == -1) {
+                /* No variant matched, so create a new one */
+                variant = variants->variant_count++;
+                assert(variants->variant_count < MAX_SHADER_VARIANTS);
 
-                        variants->variants[variant].base = hwcso;
-                        variants->variants[variant].alpha_state = ctx->depth_stencil->alpha;
+                struct panfrost_shader_state *v =
+                        &variants->variants[variant];
 
-                        /* Allocate the mapped descriptor ahead-of-time. TODO: Use for FS as well as VS */
-                        struct panfrost_context *ctx = pan_context(pctx);
-                        struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
+                v->base = hwcso;
 
-                        variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu;
-                        variants->variants[variant].tripipe_gpu = transfer.gpu;
+                if (type == PIPE_SHADER_FRAGMENT) {
+                        v->alpha_state = ctx->depth_stencil->alpha;
 
+                        if (ctx->rasterizer) {
+                                v->point_sprite_mask = ctx->rasterizer->base.sprite_coord_enable;
+                                v->point_sprite_upper_left =
+                                        ctx->rasterizer->base.sprite_coord_mode ==
+                                        PIPE_SPRITE_COORD_UPPER_LEFT;
+                        }
                 }
 
-                /* Select this variant */
-                variants->active_variant = variant;
+                /* Allocate the mapped descriptor ahead-of-time. */
+                struct panfrost_context *ctx = pan_context(pctx);
+                struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
 
-                struct panfrost_shader_state *shader_state = &variants->variants[variant];
-                assert(panfrost_variant_matches(ctx, shader_state));
+                variants->variants[variant].tripipe = (struct mali_shader_meta *) transfer.cpu;
+                variants->variants[variant].tripipe_gpu = transfer.gpu;
 
-                /* Now we have a variant selected, so compile and go */
-
-                if (!shader_state->compiled) {
-                        panfrost_shader_compile(ctx, shader_state->tripipe, NULL, JOB_TYPE_TILER, shader_state);
-                        shader_state->compiled = true;
-                }
         }
 
-        ctx->dirty |= PAN_DIRTY_FS;
-}
-
-static void
-panfrost_bind_vs_state(
-        struct pipe_context *pctx,
-        void *hwcso)
-{
-        struct panfrost_context *ctx = pan_context(pctx);
+        /* Select this variant */
+        variants->active_variant = variant;
 
-        ctx->vs = hwcso;
+        struct panfrost_shader_state *shader_state = &variants->variants[variant];
+        assert(panfrost_variant_matches(ctx, shader_state, type));
 
-        if (hwcso) {
-                if (!ctx->vs->variants[0].compiled) {
-                        ctx->vs->variants[0].base = hwcso;
+        /* We finally have a variant, so compile it */
 
-                        /* TODO DRY from above */
-                        struct panfrost_transfer transfer = panfrost_allocate_chunk(ctx, sizeof(struct mali_shader_meta), HEAP_DESCRIPTOR);
-                        ctx->vs->variants[0].tripipe = (struct mali_shader_meta *) transfer.cpu;
-                        ctx->vs->variants[0].tripipe_gpu = transfer.gpu;
+        if (!shader_state->compiled) {
+                panfrost_shader_compile(ctx, shader_state->tripipe, NULL,
+                                panfrost_job_type_for_pipe(type), shader_state);
 
-                        panfrost_shader_compile(ctx, ctx->vs->variants[0].tripipe, NULL, JOB_TYPE_VERTEX, &ctx->vs->variants[0]);
-                        ctx->vs->variants[0].compiled = true;
-                }
+                shader_state->compiled = true;
         }
+}
 
-        ctx->dirty |= PAN_DIRTY_VS;
+static void
+panfrost_bind_vs_state(struct pipe_context *pctx, void *hwcso)
+{
+        panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_VERTEX);
+}
+
+static void
+panfrost_bind_fs_state(struct pipe_context *pctx, void *hwcso)
+{
+        panfrost_bind_shader_state(pctx, hwcso, PIPE_SHADER_FRAGMENT);
 }
 
 static void
@@ -1804,22 +2071,8 @@ panfrost_set_vertex_buffers(
         const struct pipe_vertex_buffer *buffers)
 {
         struct panfrost_context *ctx = pan_context(pctx);
-        assert(num_buffers <= PIPE_MAX_ATTRIBS);
-
-        /* XXX: Dirty tracking? etc */
-        if (buffers) {
-                size_t sz = sizeof(buffers[0]) * num_buffers;
-                ctx->vertex_buffers = malloc(sz);
-                ctx->vertex_buffer_count = num_buffers;
-                memcpy(ctx->vertex_buffers, buffers, sz);
-        } else {
-                if (ctx->vertex_buffers) {
-                        free(ctx->vertex_buffers);
-                        ctx->vertex_buffers = NULL;
-                }
 
-                ctx->vertex_buffer_count = 0;
-        }
+        util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, start_slot, num_buffers);
 }
 
 static void
@@ -1831,43 +2084,18 @@ panfrost_set_constant_buffer(
         struct panfrost_context *ctx = pan_context(pctx);
         struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
 
-        size_t sz = buf ? buf->buffer_size : 0;
-
-        /* Free previous buffer */
-
-        pbuf->dirty = true;
-        pbuf->size = sz;
-
-        if (pbuf->buffer) {
-                free(pbuf->buffer);
-                pbuf->buffer = NULL;
-        }
+        util_copy_constant_buffer(&pbuf->cb[index], buf);
 
-        /* If unbinding, we're done */
+        unsigned mask = (1 << index);
 
-        if (!buf)
-                return;
-
-        /* Multiple constant buffers not yet supported */
-        assert(index == 0);
-
-        const uint8_t *cpu;
-
-        struct panfrost_resource *rsrc = (struct panfrost_resource *) (buf->buffer);
-
-        if (rsrc) {
-                cpu = rsrc->bo->cpu;
-        } else if (buf->user_buffer) {
-                cpu = buf->user_buffer;
-        } else {
-                DBG("No constant buffer?\n");
+        if (unlikely(!buf)) {
+                pbuf->enabled_mask &= ~mask;
+                pbuf->dirty_mask &= ~mask;
                 return;
         }
 
-        /* Copy the constant buffer into the driver context for later upload */
-
-        pbuf->buffer = malloc(sz);
-        memcpy(pbuf->buffer, cpu + buf->buffer_offset, sz);
+        pbuf->enabled_mask |= mask;
+        pbuf->dirty_mask |= mask;
 }
 
 static void
@@ -1882,18 +2110,45 @@ panfrost_set_stencil_ref(
         ctx->dirty |= PAN_DIRTY_FS;
 }
 
+static enum mali_texture_type
+panfrost_translate_texture_type(enum pipe_texture_target t)
+{
+        switch (t) {
+                case PIPE_BUFFER:
+                case PIPE_TEXTURE_1D:
+                case PIPE_TEXTURE_1D_ARRAY:
+                        return MALI_TEX_1D;
+
+                case PIPE_TEXTURE_2D:
+                case PIPE_TEXTURE_2D_ARRAY:
+                case PIPE_TEXTURE_RECT:
+                        return MALI_TEX_2D;
+
+                case PIPE_TEXTURE_3D:
+                        return MALI_TEX_3D;
+
+                case PIPE_TEXTURE_CUBE:
+                case PIPE_TEXTURE_CUBE_ARRAY:
+                        return MALI_TEX_CUBE;
+
+                default:
+                        unreachable("Unknown target");
+        }
+}
+
 static struct pipe_sampler_view *
 panfrost_create_sampler_view(
         struct pipe_context *pctx,
         struct pipe_resource *texture,
         const struct pipe_sampler_view *template)
 {
-        struct panfrost_sampler_view *so = CALLOC_STRUCT(panfrost_sampler_view);
+        struct panfrost_sampler_view *so = rzalloc(pctx, struct panfrost_sampler_view);
         int bytes_per_pixel = util_format_get_blocksize(texture->format);
 
         pipe_reference(NULL, &texture->reference);
 
         struct panfrost_resource *prsrc = (struct panfrost_resource *) texture;
+        assert(prsrc->bo);
 
         so->base = *template;
         so->base.texture = texture;
@@ -1904,9 +2159,6 @@ panfrost_create_sampler_view(
          * (data) itself. So, we serialise the descriptor here and cache it for
          * later. */
 
-        /* TODO: Other types of textures */
-        assert(template->target == PIPE_TEXTURE_2D);
-
         /* Make sure it's something with which we're familiar */
         assert(bytes_per_pixel >= 1 && bytes_per_pixel <= 4);
 
@@ -1941,18 +2193,50 @@ panfrost_create_sampler_view(
                         break;
         }
 
+        /* Check if we need to set a custom stride by computing the "expected"
+         * stride and comparing it to what the BO actually wants. Only applies
+         * to linear textures, since tiled/compressed textures have strict
+         * alignment requirements for their strides as it is */
+
+        unsigned first_level = template->u.tex.first_level;
+        unsigned last_level = template->u.tex.last_level;
+
+        if (prsrc->bo->layout == PAN_LINEAR) {
+                for (unsigned l = first_level; l <= last_level; ++l) {
+                        unsigned actual_stride = prsrc->bo->slices[l].stride;
+                        unsigned width = u_minify(texture->width0, l);
+                        unsigned comp_stride = width * bytes_per_pixel;
+
+                        if (comp_stride != actual_stride) {
+                                usage2_layout |= MALI_TEX_MANUAL_STRIDE;
+                                break;
+                        }
+                }
+        }
+
+        /* In the hardware, array_size refers specifically to array textures,
+         * whereas in Gallium, it also covers cubemaps */
+
+        unsigned array_size = texture->array_size;
+
+        if (texture->target == PIPE_TEXTURE_CUBE) {
+                /* TODO: Cubemap arrays */
+                assert(array_size == 6);
+        }
+
         struct mali_texture_descriptor texture_descriptor = {
-                .width = MALI_POSITIVE(texture->width0),
-                .height = MALI_POSITIVE(texture->height0),
-                .depth = MALI_POSITIVE(texture->depth0),
+                .width = MALI_POSITIVE(u_minify(texture->width0, first_level)),
+                .height = MALI_POSITIVE(u_minify(texture->height0, first_level)),
+                .depth = MALI_POSITIVE(u_minify(texture->depth0, first_level)),
+                .array_size = MALI_POSITIVE(array_size),
 
                 /* TODO: Decode */
                 .format = {
                         .swizzle = panfrost_translate_swizzle_4(desc->swizzle),
                         .format = format,
 
-                        .usage1 = 0x0,
-                        .is_not_cubemap = 1,
+                        .srgb = desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB,
+                        .type = panfrost_translate_texture_type(texture->target),
 
                         .usage2 = usage2_layout
                 },
@@ -1960,14 +2244,7 @@ panfrost_create_sampler_view(
                 .swizzle = panfrost_translate_swizzle_4(user_swizzle)
         };
 
-        /* TODO: Other base levels require adjusting dimensions / level numbers / etc */
-        assert (template->u.tex.first_level == 0);
-
-        /* Disable mipmapping for now to avoid regressions while automipmapping
-         * is being implemented. TODO: Remove me once automipmaps work */
-
-        //texture_descriptor.nr_mipmap_levels = template->u.tex.last_level - template->u.tex.first_level;
-        texture_descriptor.nr_mipmap_levels = 0;
+        //texture_descriptor.nr_mipmap_levels = last_level - first_level;
 
         so->hw = texture_descriptor;
 
@@ -1985,7 +2262,13 @@ panfrost_set_sampler_views(
 
         assert(start_slot == 0);
 
-        ctx->sampler_view_count[shader] = num_views;
+        unsigned new_nr = 0;
+        for (unsigned i = 0; i < num_views; ++i) {
+                if (views[i])
+                        new_nr = i + 1;
+        }
+
+        ctx->sampler_view_count[shader] = new_nr;
         memcpy(ctx->sampler_views[shader], views, num_views * sizeof (void *));
 
         ctx->dirty |= PAN_DIRTY_TEXTURES;
@@ -1994,13 +2277,10 @@ panfrost_set_sampler_views(
 static void
 panfrost_sampler_view_destroy(
         struct pipe_context *pctx,
-        struct pipe_sampler_view *views)
+        struct pipe_sampler_view *view)
 {
-        //struct panfrost_context *ctx = pan_context(pctx);
-
-        /* TODO */
-
-        free(views);
+        pipe_resource_reference(&view->texture, NULL);
+        ralloc_free(view);
 }
 
 static void
@@ -2009,10 +2289,16 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx,
 {
         struct panfrost_context *ctx = pan_context(pctx);
 
-        /* Flush when switching away from an FBO */
+        /* Flush when switching framebuffers, but not if the framebuffer
+         * state is being restored by u_blitter
+         */
 
-        if (!panfrost_is_scanout(ctx)) {
-                panfrost_flush(pctx, NULL, 0);
+        struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+        bool is_scanout = panfrost_is_scanout(ctx);
+        bool has_draws = job->last_job.gpu;
+
+        if (!ctx->blitter->running && (!is_scanout || has_draws)) {
+                panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
         }
 
         ctx->pipe_framebuffer.nr_cbufs = fb->nr_cbufs;
@@ -2039,27 +2325,23 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx,
                         continue;
 
                 if (ctx->require_sfbd)
-                        ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
+                        ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
                 else
-                        ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
+                        ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
 
                 panfrost_attach_vt_framebuffer(ctx);
 
                 struct panfrost_resource *tex = ((struct panfrost_resource *) ctx->pipe_framebuffer.cbufs[i]->texture);
-                bool is_scanout = panfrost_is_scanout(ctx);
+                enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
 
-                if (!is_scanout && tex->bo->layout != PAN_AFBC) {
-                        /* The blob is aggressive about enabling AFBC. As such,
-                         * it's pretty much necessary to use it here, since we
-                         * have no traces of non-compressed FBO. */
+                bool can_afbc = panfrost_format_supports_afbc(format);
+                bool is_scanout = panfrost_is_scanout(ctx);
 
+                if (!is_scanout && tex->bo->layout != PAN_AFBC && can_afbc)
                         panfrost_enable_afbc(ctx, tex, false);
-                }
 
-                if (!is_scanout && !tex->bo->has_checksum) {
-                        /* Enable transaction elimination if we can */
+                if (!is_scanout && !tex->bo->has_checksum)
                         panfrost_enable_checksum(ctx, tex);
-                }
         }
 
         {
@@ -2069,16 +2351,19 @@ panfrost_set_framebuffer_state(struct pipe_context *pctx,
                         pipe_surface_reference(&ctx->pipe_framebuffer.zsbuf, zb);
 
                         if (zb) {
-                                /* FBO has depth */
-
                                 if (ctx->require_sfbd)
-                                        ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx);
+                                        ctx->vt_framebuffer_sfbd = panfrost_emit_sfbd(ctx, ~0);
                                 else
-                                        ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx);
+                                        ctx->vt_framebuffer_mfbd = panfrost_emit_mfbd(ctx, ~0);
 
                                 panfrost_attach_vt_framebuffer(ctx);
 
-                                /* Keep the depth FBO linear */
+                                struct panfrost_resource *tex = pan_resource(zb->texture);
+                                bool can_afbc = panfrost_format_supports_afbc(zb->format);
+                                bool is_scanout = panfrost_is_scanout(ctx);
+
+                                if (!is_scanout && tex->bo->layout != PAN_AFBC && can_afbc)
+                                        panfrost_enable_afbc(ctx, tex, true);
                         }
                 }
         }
@@ -2089,7 +2374,7 @@ panfrost_create_blend_state(struct pipe_context *pipe,
                             const struct pipe_blend_state *blend)
 {
         struct panfrost_context *ctx = pan_context(pipe);
-        struct panfrost_blend_state *so = CALLOC_STRUCT(panfrost_blend_state);
+        struct panfrost_blend_state *so = rzalloc(ctx, struct panfrost_blend_state);
         so->base = *blend;
 
         /* TODO: The following features are not yet implemented */
@@ -2099,7 +2384,7 @@ panfrost_create_blend_state(struct pipe_context *pipe,
 
         /* Compile the blend state, first as fixed-function if we can */
 
-        if (panfrost_make_fixed_blend_mode(&blend->rt[0], &so->equation, blend->rt[0].colormask, &ctx->blend_color))
+        if (panfrost_make_fixed_blend_mode(&blend->rt[0], so, blend->rt[0].colormask, &ctx->blend_color))
                 return so;
 
         /* If we can't, compile a blend shader instead */
@@ -2139,7 +2424,7 @@ panfrost_delete_blend_state(struct pipe_context *pipe,
                 DBG("Deleting blend state leak blend shaders bytecode\n");
         }
 
-        free(blend);
+        ralloc_free(blend);
 }
 
 static void
@@ -2241,15 +2526,6 @@ panfrost_set_viewport_states(struct pipe_context *pipe,
         assert(num_viewports == 1);
 
         ctx->pipe_viewport = *viewports;
-
-#if 0
-        /* TODO: What if not centered? */
-        float w = abs(viewports->scale[0]) * 2.0;
-        float h = abs(viewports->scale[1]) * 2.0;
-
-        ctx->viewport.viewport1[0] = MALI_POSITIVE((int) w);
-        ctx->viewport.viewport1[1] = MALI_POSITIVE((int) h);
-#endif
 }
 
 static void
@@ -2289,11 +2565,20 @@ panfrost_destroy(struct pipe_context *pipe)
         if (panfrost->blitter)
                 util_blitter_destroy(panfrost->blitter);
 
-        screen->driver->free_slab(screen, &panfrost->scratchpad);
-        screen->driver->free_slab(screen, &panfrost->varying_mem);
-        screen->driver->free_slab(screen, &panfrost->shaders);
-        screen->driver->free_slab(screen, &panfrost->tiler_heap);
-        screen->driver->free_slab(screen, &panfrost->misc_0);
+        panfrost_drm_free_slab(screen, &panfrost->scratchpad);
+        panfrost_drm_free_slab(screen, &panfrost->varying_mem);
+        panfrost_drm_free_slab(screen, &panfrost->shaders);
+        panfrost_drm_free_slab(screen, &panfrost->tiler_heap);
+        panfrost_drm_free_slab(screen, &panfrost->tiler_polygon_list);
+        panfrost_drm_free_slab(screen, &panfrost->tiler_dummy);
+
+        for (int i = 0; i < ARRAY_SIZE(panfrost->transient_pools); ++i) {
+                struct panfrost_memory_entry *entry;
+                entry = panfrost->transient_pools[i].entries[0];
+                pb_slab_free(&screen->slabs, (struct pb_slab_entry *)entry);
+        }
+
+        ralloc_free(pipe);
 }
 
 static struct pipe_query *
@@ -2301,7 +2586,7 @@ panfrost_create_query(struct pipe_context *pipe,
                      unsigned type,
                      unsigned index)
 {
-        struct panfrost_query *q = CALLOC_STRUCT(panfrost_query);
+        struct panfrost_query *q = rzalloc(pipe, struct panfrost_query);
 
         q->type = type;
         q->index = index;
@@ -2312,7 +2597,7 @@ panfrost_create_query(struct pipe_context *pipe,
 static void
 panfrost_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
 {
-        FREE(q);
+        ralloc_free(q);
 }
 
 static boolean
@@ -2396,7 +2681,7 @@ panfrost_create_stream_output_target(struct pipe_context *pctx,
 {
         struct pipe_stream_output_target *target;
 
-        target = CALLOC_STRUCT(pipe_stream_output_target);
+        target = rzalloc(pctx, struct pipe_stream_output_target);
 
         if (!target)
                 return NULL;
@@ -2416,7 +2701,7 @@ panfrost_stream_output_target_destroy(struct pipe_context *pctx,
                                  struct pipe_stream_output_target *target)
 {
         pipe_resource_reference(&target->buffer, NULL);
-        free(target);
+        ralloc_free(target);
 }
 
 static void
@@ -2444,12 +2729,12 @@ panfrost_setup_hardware(struct panfrost_context *ctx)
                 ctx->transient_pools[i].entries[0] = (struct panfrost_memory_entry *) pb_slab_alloc(&screen->slabs, entry_size, HEAP_TRANSIENT);
         }
 
-        screen->driver->allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0);
-        screen->driver->allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0);
-        screen->driver->allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
-        screen->driver->allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
-        screen->driver->allocate_slab(screen, &ctx->misc_0, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
-
+        panfrost_drm_allocate_slab(screen, &ctx->scratchpad, 64, false, 0, 0, 0);
+        panfrost_drm_allocate_slab(screen, &ctx->varying_mem, 16384, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_COHERENT_LOCAL, 0, 0);
+        panfrost_drm_allocate_slab(screen, &ctx->shaders, 4096, true, PAN_ALLOCATE_EXECUTE, 0, 0);
+        panfrost_drm_allocate_slab(screen, &ctx->tiler_heap, 32768, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
+        panfrost_drm_allocate_slab(screen, &ctx->tiler_polygon_list, 128*128, false, PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_GROWABLE, 1, 128);
+        panfrost_drm_allocate_slab(screen, &ctx->tiler_dummy, 1, false, PAN_ALLOCATE_INVISIBLE, 0, 0);
 }
 
 /* New context creation, which also does hardware initialisation since I don't
@@ -2458,13 +2743,13 @@ panfrost_setup_hardware(struct panfrost_context *ctx)
 struct pipe_context *
 panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
 {
-        struct panfrost_context *ctx = CALLOC_STRUCT(panfrost_context);
+        struct panfrost_context *ctx = rzalloc(screen, struct panfrost_context);
         struct panfrost_screen *pscreen = pan_screen(screen);
         memset(ctx, 0, sizeof(*ctx));
         struct pipe_context *gallium = (struct pipe_context *) ctx;
         unsigned gpu_id;
 
-        gpu_id = pscreen->driver->query_gpu_version(pscreen);
+        gpu_id = panfrost_drm_query_gpu_version(pscreen);
 
         ctx->is_t6xx = gpu_id <= 0x0750; /* For now, this flag means T760 or less */
         ctx->require_sfbd = gpu_id < 0x0750; /* T760 is the first to support MFBD */
@@ -2494,7 +2779,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
 
         gallium->create_vertex_elements_state = panfrost_create_vertex_elements_state;
         gallium->bind_vertex_elements_state = panfrost_bind_vertex_elements_state;
-        gallium->delete_vertex_elements_state = panfrost_delete_vertex_elements_state;
+        gallium->delete_vertex_elements_state = panfrost_generic_cso_delete;
 
         gallium->create_fs_state = panfrost_create_shader_state;
         gallium->delete_fs_state = panfrost_delete_shader_state;
@@ -2538,7 +2823,7 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
 
         panfrost_resource_context_init(gallium);
 
-        pscreen->driver->init_context(ctx);
+        panfrost_drm_init_context(ctx);
 
         panfrost_setup_hardware(ctx);
 
@@ -2562,7 +2847,6 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
         panfrost_emit_tiler_payload(ctx);
         panfrost_invalidate_frame(ctx);
         panfrost_default_shader_backend(ctx);
-        panfrost_generate_space_filler_indices();
 
         return gallium;
 }