panfrost/midgard: Include loop count for shader-db
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
index dd86e8bceeeb622a7eabf6cf0ddf77fa5e1a027e..1e09e46218c5100252e7e1017f57e3c08a6b6b1f 100644 (file)
@@ -36,6 +36,7 @@
 #include "main/imports.h"
 #include "compiler/nir/nir_builder.h"
 #include "util/half_float.h"
+#include "util/u_math.h"
 #include "util/u_debug.h"
 #include "util/u_dynarray.h"
 #include "util/list.h"
 static const struct debug_named_value debug_options[] = {
        {"msgs",      MIDGARD_DBG_MSGS,         "Print debug messages"},
        {"shaders",   MIDGARD_DBG_SHADERS,      "Dump shaders in NIR and MIR"},
+        {"shaderdb",  MIDGARD_DBG_SHADERDB,     "Prints shader-db statistics"},
        DEBUG_NAMED_VALUE_END
 };
 
 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
 
+unsigned SHADER_DB_COUNT = 0;
+
 int midgard_debug = 0;
 
 #define DBG(fmt, ...) \
@@ -82,12 +86,12 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
  * driver seems to do it that way */
 
 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
 
 #define M_LOAD_STORE(name, rname, uname) \
        static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
                midgard_instruction i = { \
                        .type = TAG_LOAD_STORE_4, \
+                        .mask = 0xF, \
                        .ssa_args = { \
                                .rname = ssa, \
                                .uname = -1, \
@@ -95,7 +99,6 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
                        }, \
                        .load_store = { \
                                .op = midgard_op_##name, \
-                               .mask = 0xF, \
                                .swizzle = SWIZZLE_XYZW, \
                                .address = address \
                        } \
@@ -111,21 +114,42 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
  * the corresponding Midgard source */
 
 static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int)
+vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
+                bool half, bool sext)
 {
         if (!src) return blank_alu_src;
 
+        /* Figure out how many components there are so we can adjust the
+         * swizzle.  Specifically we want to broadcast the last channel so
+         * things like ball2/3 work
+         */
+
+        if (broadcast_count) {
+                uint8_t last_component = src->swizzle[broadcast_count - 1];
+
+                for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
+                        src->swizzle[c] = last_component;
+                }
+        }
+
         midgard_vector_alu_src alu_src = {
                 .rep_low = 0,
                 .rep_high = 0,
-                .half = 0, /* TODO */
+                .half = half,
                 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
         };
 
         if (is_int) {
-                /* TODO: sign-extend/zero-extend */
                 alu_src.mod = midgard_int_normal;
 
+                /* Sign/zero-extend if needed */
+
+                if (half) {
+                        alu_src.mod = sext ?
+                                  midgard_int_sign_extend
+                                : midgard_int_zero_extend;
+                }
+
                 /* These should have been lowered away */
                 assert(!(src->abs || src->negate));
         } else {
@@ -269,16 +293,59 @@ midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
         }
 }
 
-static void
-midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+static unsigned
+nir_dest_index(compiler_context *ctx, nir_dest *dst)
 {
+        if (dst->is_ssa)
+                return dst->ssa.index;
+        else {
+                assert(!dst->reg.indirect);
+                return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+        }
+}
+
+static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
+                            unsigned *dest)
+{
+        nir_intrinsic_instr *intr;
+        nir_dest *dst = NULL;
+        nir_tex_instr *tex;
         int sysval = -1;
 
-        if (instr->type == nir_instr_type_intrinsic) {
-                nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+        switch (instr->type) {
+        case nir_instr_type_intrinsic:
+                intr = nir_instr_as_intrinsic(instr);
                 sysval = midgard_nir_sysval_for_intrinsic(intr);
+                dst = &intr->dest;
+                break;
+        case nir_instr_type_tex:
+                tex = nir_instr_as_tex(instr);
+                if (tex->op != nir_texop_txs)
+                        break;
+
+                sysval = PAN_SYSVAL(TEXTURE_SIZE,
+                                   PAN_TXS_SYSVAL_ID(tex->texture_index,
+                                                     nir_tex_instr_dest_size(tex) -
+                                                     (tex->is_array ? 1 : 0),
+                                                     tex->is_array));
+                dst  = &tex->dest;
+                break;
+        default:
+                break;
         }
 
+        if (dest && dst)
+                *dest = nir_dest_index(ctx, dst);
+
+        return sysval;
+}
+
+static void
+midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+{
+        int sysval;
+
+        sysval = sysval_for_instr(ctx, instr, NULL);
         if (sysval < 0)
                 return;
 
@@ -340,6 +407,8 @@ midgard_nir_lower_fdot2(nir_shader *shader)
         return progress;
 }
 
+/* Flushes undefined values to zero */
+
 static void
 optimise_nir(nir_shader *nir)
 {
@@ -353,11 +422,17 @@ optimise_nir(nir_shader *nir)
         NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
         NIR_PASS(progress, nir, nir_lower_idiv);
 
-        nir_lower_tex_options lower_tex_options = {
-                .lower_rect = true
+        nir_lower_tex_options lower_tex_1st_pass_options = {
+                .lower_rect = true,
+                .lower_txp = ~0
         };
 
-        NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+        nir_lower_tex_options lower_tex_2nd_pass_options = {
+                .lower_txs_lod = true,
+       };
+
+        NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options);
+        NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options);
 
         do {
                 progress = false;
@@ -394,13 +469,14 @@ optimise_nir(nir_shader *nir)
                 }
 
                 NIR_PASS(progress, nir, nir_opt_undef);
+                NIR_PASS(progress, nir, nir_undef_to_zero);
+
                 NIR_PASS(progress, nir, nir_opt_loop_unroll,
                          nir_var_shader_in |
                          nir_var_shader_out |
                          nir_var_function_temp);
 
-                /* TODO: Enable vectorize when merged upstream */
-                // NIR_PASS(progress, nir, nir_opt_vectorize);
+                NIR_PASS(progress, nir, nir_opt_vectorize);
         } while (progress);
 
         /* Must be run at the end to prevent creation of fsin/fcos ops */
@@ -485,17 +561,6 @@ nir_src_index(compiler_context *ctx, nir_src *src)
         }
 }
 
-static unsigned
-nir_dest_index(compiler_context *ctx, nir_dest *dst)
-{
-        if (dst->is_ssa)
-                return dst->ssa.index;
-        else {
-                assert(!dst->reg.indirect);
-                return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
-        }
-}
-
 static unsigned
 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
 {
@@ -538,19 +603,19 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
                 /* We need to set the conditional as close as possible */
                 .precede_break = true,
                 .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+                .mask = 1 << COMPONENT_W,
 
                 .ssa_args = {
-
                         .src0 = condition,
                         .src1 = condition,
                         .dest = SSA_FIXED_REGISTER(31),
                 },
+
                 .alu = {
                         .op = midgard_alu_op_iand,
-                        .outmod = midgard_outmod_int,
+                        .outmod = midgard_outmod_int_wrap,
                         .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
-                        .mask = (0x3 << 6), /* w */
                         .src1 = vector_alu_srco_unsigned(alu_src),
                         .src2 = vector_alu_srco_unsigned(alu_src)
                 },
@@ -579,6 +644,7 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
         midgard_instruction ins = {
                 .type = TAG_ALU_4,
                 .precede_break = true,
+                .mask = mask_of(nr_comp),
                 .ssa_args = {
                         .src0 = condition,
                         .src1 = condition,
@@ -586,10 +652,9 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
                 },
                 .alu = {
                         .op = midgard_alu_op_iand,
-                        .outmod = midgard_outmod_int,
+                        .outmod = midgard_outmod_int_wrap,
                         .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
-                        .mask = expand_writemask((1 << nr_comp) - 1),
                         .src1 = vector_alu_srco_unsigned(alu_src),
                         .src2 = vector_alu_srco_unsigned(alu_src)
                 },
@@ -610,6 +675,7 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
 
         midgard_instruction ins = {
                 .type = TAG_ALU_4,
+                .mask = 1 << COMPONENT_W,
                 .ssa_args = {
                         .src0 = SSA_UNUSED_1,
                         .src1 = offset,
@@ -617,10 +683,9 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
                 },
                 .alu = {
                         .op = midgard_alu_op_imov,
-                        .outmod = midgard_outmod_int,
+                        .outmod = midgard_outmod_int_wrap,
                         .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
-                        .mask = (0x3 << 6), /* w */
                         .src1 = vector_alu_srco_unsigned(zero_alu_src),
                         .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
                 },
@@ -632,7 +697,15 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
 #define ALU_CASE(nir, _op) \
        case nir_op_##nir: \
                op = midgard_alu_op_##_op; \
+                assert(src_bitsize == dst_bitsize); \
                break;
+
+#define ALU_CASE_BCAST(nir, _op, count) \
+        case nir_op_##nir: \
+                op = midgard_alu_op_##_op; \
+                broadcast_swizzle = count; \
+                assert(src_bitsize == dst_bitsize); \
+                break;
 static bool
 nir_is_fzero_constant(nir_src src)
 {
@@ -647,13 +720,35 @@ nir_is_fzero_constant(nir_src src)
         return true;
 }
 
+/* Analyze the sizes of the inputs to determine which reg mode. Ops needed
+ * special treatment override this anyway. */
+
+static midgard_reg_mode
+reg_mode_for_nir(nir_alu_instr *instr)
+{
+        unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+
+        switch (src_bitsize) {
+                case 8:
+                        return midgard_reg_mode_8;
+                case 16:
+                        return midgard_reg_mode_16;
+                case 32:
+                        return midgard_reg_mode_32;
+                case 64:
+                        return midgard_reg_mode_64;
+                default:
+                        unreachable("Invalid bit size");
+        }
+}
+
 static void
 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 {
         bool is_ssa = instr->dest.dest.is_ssa;
 
         unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
-        unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components;
+        unsigned nr_components = nir_dest_num_components(instr->dest.dest);
         unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
 
         /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
@@ -665,6 +760,31 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
         unsigned op;
 
+        /* Number of components valid to check for the instruction (the rest
+         * will be forced to the last), or 0 to use as-is. Relevant as
+         * ball-type instructions have a channel count in NIR but are all vec4
+         * in Midgard */
+
+        unsigned broadcast_swizzle = 0;
+
+        /* What register mode should we operate in? */
+        midgard_reg_mode reg_mode =
+                reg_mode_for_nir(instr);
+
+        /* Do we need a destination override? Used for inline
+         * type conversion */
+
+        midgard_dest_override dest_override =
+                midgard_dest_override_none;
+
+        /* Should we use a smaller respective source and sign-extend?  */
+
+        bool half_1 = false, sext_1 = false;
+        bool half_2 = false, sext_2 = false;
+
+        unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+        unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+
         switch (instr->op) {
                 ALU_CASE(fadd, fadd);
                 ALU_CASE(fmul, fmul);
@@ -723,36 +843,42 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(fexp2, fexp2);
                 ALU_CASE(flog2, flog2);
 
-                ALU_CASE(f2i32, f2i);
-                ALU_CASE(f2u32, f2u);
-                ALU_CASE(i2f32, i2f);
-                ALU_CASE(u2f32, u2f);
+                ALU_CASE(f2i32, f2i_rtz);
+                ALU_CASE(f2u32, f2u_rtz);
+                ALU_CASE(i2f32, i2f_rtz);
+                ALU_CASE(u2f32, u2f_rtz);
+
+                ALU_CASE(f2i16, f2i_rtz);
+                ALU_CASE(f2u16, f2u_rtz);
+                ALU_CASE(i2f16, i2f_rtz);
+                ALU_CASE(u2f16, u2f_rtz);
 
                 ALU_CASE(fsin, fsin);
                 ALU_CASE(fcos, fcos);
 
+                /* Second op implicit #0 */
+                ALU_CASE(inot, inor);
                 ALU_CASE(iand, iand);
                 ALU_CASE(ior, ior);
                 ALU_CASE(ixor, ixor);
-                ALU_CASE(inot, inand);
                 ALU_CASE(ishl, ishl);
                 ALU_CASE(ishr, iasr);
                 ALU_CASE(ushr, ilsr);
 
-                ALU_CASE(b32all_fequal2, fball_eq);
-                ALU_CASE(b32all_fequal3, fball_eq);
+                ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
+                ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
                 ALU_CASE(b32all_fequal4, fball_eq);
 
-                ALU_CASE(b32any_fnequal2, fbany_neq);
-                ALU_CASE(b32any_fnequal3, fbany_neq);
+                ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
+                ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
                 ALU_CASE(b32any_fnequal4, fbany_neq);
 
-                ALU_CASE(b32all_iequal2, iball_eq);
-                ALU_CASE(b32all_iequal3, iball_eq);
+                ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
+                ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
                 ALU_CASE(b32all_iequal4, iball_eq);
 
-                ALU_CASE(b32any_inequal2, ibany_neq);
-                ALU_CASE(b32any_inequal3, ibany_neq);
+                ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
+                ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
                 ALU_CASE(b32any_inequal4, ibany_neq);
 
                 /* Source mods will be shoved in later */
@@ -760,6 +886,57 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(fneg, fmov);
                 ALU_CASE(fsat, fmov);
 
+        /* For size conversion, we use a move. Ideally though we would squash
+         * these ops together; maybe that has to happen after in NIR as part of
+         * propagation...? An earlier algebraic pass ensured we step down by
+         * only / exactly one size. If stepping down, we use a dest override to
+         * reduce the size; if stepping up, we use a larger-sized move with a
+         * half source and a sign/zero-extension modifier */
+
+        case nir_op_i2i8:
+        case nir_op_i2i16:
+        case nir_op_i2i32:
+                /* If we end up upscale, we'll need a sign-extend on the
+                 * operand (the second argument) */
+
+                sext_2 = true;
+        case nir_op_u2u8:
+        case nir_op_u2u16:
+        case nir_op_u2u32: {
+                op = midgard_alu_op_imov;
+
+                if (dst_bitsize == (src_bitsize * 2)) {
+                        /* Converting up */
+                        half_2 = true;
+
+                        /* Use a greater register mode */
+                        reg_mode++;
+                } else if (src_bitsize == (dst_bitsize * 2)) {
+                        /* Converting down */
+                        dest_override = midgard_dest_override_lower;
+                }
+
+                break;
+        }
+
+        case nir_op_f2f16: {
+                assert(src_bitsize == 32);
+
+                op = midgard_alu_op_fmov;
+                dest_override = midgard_dest_override_lower;
+                break;
+        }
+
+        case nir_op_f2f32: {
+                assert(src_bitsize == 16);
+
+                op = midgard_alu_op_fmov;
+                half_2 = true;
+                reg_mode++;
+                break;
+        }
+                
+
         /* For greater-or-equal, we lower to less-or-equal and flip the
          * arguments */
 
@@ -820,12 +997,14 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
         }
 
         /* Midgard can perform certain modifiers on output of an ALU op */
-        midgard_outmod outmod =
-                midgard_is_integer_out_op(op) ? midgard_outmod_int :
-                instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+        unsigned outmod;
 
-        if (instr->op == nir_op_fsat)
-                outmod = midgard_outmod_sat;
+        if (midgard_is_integer_out_op(op)) {
+                outmod = midgard_outmod_int_wrap;
+        } else {
+                bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
+                outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
+        }
 
         /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
 
@@ -890,23 +1069,22 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
         bool is_int = midgard_is_integer_op(op);
 
+        ins.mask = mask_of(nr_components);
+
         midgard_vector_alu alu = {
                 .op = op,
-                .reg_mode = midgard_reg_mode_32,
-                .dest_override = midgard_dest_override_none,
+                .reg_mode = reg_mode,
+                .dest_override = dest_override,
                 .outmod = outmod,
 
-                /* Writemask only valid for non-SSA NIR */
-                .mask = expand_writemask((1 << nr_components) - 1),
-
-                .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
-                .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
+                .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
+                .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
         };
 
         /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
 
         if (!is_ssa)
-                alu.mask &= expand_writemask(instr->dest.write_mask);
+                ins.mask &= instr->dest.write_mask;
 
         ins.alu = alu;
 
@@ -951,14 +1129,22 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
                 uint8_t original_swizzle[4];
                 memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
+                unsigned orig_mask = ins.mask;
 
                 for (int i = 0; i < nr_components; ++i) {
-                        ins.alu.mask = (0x3) << (2 * i); /* Mask the associated component */
+                        /* Mask the associated component, dropping the
+                         * instruction if needed */
+
+                        ins.mask = 1 << i;
+                        ins.mask &= orig_mask;
+
+                        if (!ins.mask)
+                                continue;
 
                         for (int j = 0; j < 4; ++j)
                                 nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
 
-                        ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
+                        ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, false));
                         emit_mir_instruction(ctx, ins);
                 }
         } else {
@@ -968,12 +1154,20 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
 #undef ALU_CASE
 
+/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
+ * optimized) versions of UBO #0 */
+
 static void
-emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
+emit_ubo_read(
+                compiler_context *ctx,
+                unsigned dest,
+                unsigned offset,
+                nir_src *indirect_offset,
+                unsigned index)
 {
         /* TODO: half-floats */
 
-        if (!indirect_offset && offset < ctx->uniform_cutoff) {
+        if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) {
                 /* Fast path: For the first 16 uniforms, direct accesses are
                  * 0-cycle, since they're just a register fetch in the usual
                  * case.  So, we alias the registers while we're still in
@@ -994,109 +1188,89 @@ emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src
 
                 if (indirect_offset) {
                         emit_indirect_offset(ctx, indirect_offset);
-                        ins.load_store.unknown = 0x8700; /* xxx: what is this? */
+                        ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */
                 } else {
-                        ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+                        ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */
                 }
 
+                /* TODO respect index */
+
                 emit_mir_instruction(ctx, ins);
         }
 }
 
 static void
-emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
+emit_varying_read(
+                compiler_context *ctx,
+                unsigned dest, unsigned offset,
+                unsigned nr_comp, unsigned component,
+                nir_src *indirect_offset, nir_alu_type type)
 {
-        /* First, pull out the destination */
-        unsigned dest = nir_dest_index(ctx, &instr->dest);
-
-        /* Now, figure out which uniform this is */
-        int sysval = midgard_nir_sysval_for_intrinsic(instr);
-        void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
-
-        /* Sysvals are prefix uniforms */
-        unsigned uniform = ((uintptr_t) val) - 1;
-
-        /* Emit the read itself -- this is never indirect */
-        emit_uniform_read(ctx, dest, uniform, NULL);
-}
+        /* XXX: Half-floats? */
+        /* TODO: swizzle, mask */
 
-/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
- * using scalar ops functional on earlier Midgard generations. Newer Midgard
- * generations have faster vectorized reads. This operation is for blend
- * shaders in particular; reading the tilebuffer from the fragment shader
- * remains an open problem. */
+        midgard_instruction ins = m_ld_vary_32(dest, offset);
+        ins.mask = mask_of(nr_comp);
+        ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
 
-static void
-emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
-{
-        midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
-        ins.load_store.swizzle = 0; /* xxxx */
+        midgard_varying_parameter p = {
+                .is_varying = 1,
+                .interpolation = midgard_interp_default,
+                .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
+        };
 
-        /* Read each component sequentially */
+        unsigned u;
+        memcpy(&u, &p, sizeof(p));
+        ins.load_store.varying_parameters = u;
 
-        for (unsigned c = 0; c < 4; ++c) {
-                ins.load_store.mask = (1 << c);
-                ins.load_store.unknown = c;
-                emit_mir_instruction(ctx, ins);
+        if (indirect_offset) {
+                /* We need to add in the dynamic index, moved to r27.w */
+                emit_indirect_offset(ctx, indirect_offset);
+                ins.load_store.unknown = 0x79e; /* xxx: what is this? */
+        } else {
+                /* Just a direct load */
+                ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
         }
 
-        /* vadd.u2f hr2, zext(hr2), #0 */
-
-        midgard_vector_alu_src alu_src = blank_alu_src;
-        alu_src.mod = midgard_int_zero_extend;
-        alu_src.half = true;
-
-        midgard_instruction u2f = {
-                .type = TAG_ALU_4,
-                .ssa_args = {
-                        .src0 = reg,
-                        .src1 = SSA_UNUSED_0,
-                        .dest = reg,
-                        .inline_constant = true
-                },
-                .alu = {
-                        .op = midgard_alu_op_u2f,
-                        .reg_mode = midgard_reg_mode_16,
-                        .dest_override = midgard_dest_override_none,
-                        .mask = 0xF,
-                        .src1 = vector_alu_srco_unsigned(alu_src),
-                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                }
-        };
-
-        emit_mir_instruction(ctx, u2f);
+        /* Use the type appropriate load */
+        switch (type) {
+                case nir_type_uint:
+                case nir_type_bool:
+                        ins.load_store.op = midgard_op_ld_vary_32u;
+                        break;
+                case nir_type_int:
+                        ins.load_store.op = midgard_op_ld_vary_32i;
+                        break;
+                case nir_type_float:
+                        ins.load_store.op = midgard_op_ld_vary_32;
+                        break;
+                default:
+                        unreachable("Attempted to load unknown type");
+                        break;
+        }
 
-        /* vmul.fmul.sat r1, hr2, #0.00392151 */
+        emit_mir_instruction(ctx, ins);
+}
 
-        alu_src.mod = 0;
+static void
+emit_sysval_read(compiler_context *ctx, nir_instr *instr)
+{
+        unsigned dest;
+        /* Figure out which uniform this is */
+        int sysval = sysval_for_instr(ctx, instr, &dest);
+        void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
 
-        midgard_instruction fmul = {
-                .type = TAG_ALU_4,
-                .inline_constant = _mesa_float_to_half(1.0 / 255.0),
-                .ssa_args = {
-                        .src0 = reg,
-                        .dest = reg,
-                        .src1 = SSA_UNUSED_0,
-                        .inline_constant = true
-                },
-                .alu = {
-                        .op = midgard_alu_op_fmul,
-                        .reg_mode = midgard_reg_mode_32,
-                        .dest_override = midgard_dest_override_none,
-                        .outmod = midgard_outmod_sat,
-                        .mask = 0xFF,
-                        .src1 = vector_alu_srco_unsigned(alu_src),
-                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                }
-        };
+        /* Sysvals are prefix uniforms */
+        unsigned uniform = ((uintptr_t) val) - 1;
 
-        emit_mir_instruction(ctx, fmul);
+        /* Emit the read itself -- this is never indirect */
+        emit_ubo_read(ctx, dest, uniform, NULL, 0);
 }
 
 static void
 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 {
-        unsigned offset, reg;
+        unsigned offset = 0, reg;
 
         switch (instr->intrinsic) {
         case nir_intrinsic_discard_if:
@@ -1115,58 +1289,85 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
         }
 
         case nir_intrinsic_load_uniform:
-        case nir_intrinsic_load_input:
-                offset = nir_intrinsic_base(instr);
+        case nir_intrinsic_load_ubo:
+        case nir_intrinsic_load_input: {
+                bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
+                bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
+
+                /* Get the base type of the intrinsic */
+                /* TODO: Infer type? Does it matter? */
+                nir_alu_type t =
+                        is_ubo ? nir_type_uint : nir_intrinsic_type(instr);
+                t = nir_alu_type_get_base_type(t);
+
+                if (!is_ubo) {
+                        offset = nir_intrinsic_base(instr);
+                }
 
                 unsigned nr_comp = nir_intrinsic_dest_components(instr);
-                bool direct = nir_src_is_const(instr->src[0]);
 
-                if (direct) {
-                        offset += nir_src_as_uint(instr->src[0]);
-                }
+                nir_src *src_offset = nir_get_io_offset_src(instr);
 
-                reg = nir_dest_index(ctx, &instr->dest);
+                bool direct = nir_src_is_const(*src_offset);
 
-                if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
-                        emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
-                } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
-                        /* XXX: Half-floats? */
-                        /* TODO: swizzle, mask */
+                if (direct)
+                        offset += nir_src_as_uint(*src_offset);
 
-                        midgard_instruction ins = m_ld_vary_32(reg, offset);
-                        ins.load_store.mask = (1 << nr_comp) - 1;
+                /* We may need to apply a fractional offset */
+                int component = instr->intrinsic == nir_intrinsic_load_input ?
+                        nir_intrinsic_component(instr) : 0;
+                reg = nir_dest_index(ctx, &instr->dest);
 
-                        midgard_varying_parameter p = {
-                                .is_varying = 1,
-                                .interpolation = midgard_interp_default,
-                                .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
-                        };
+                if (is_uniform && !ctx->is_blend) {
+                        emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0);
+                } else if (is_ubo) {
+                        nir_src index = instr->src[0];
 
-                        unsigned u;
-                        memcpy(&u, &p, sizeof(p));
-                        ins.load_store.varying_parameters = u;
+                        /* We don't yet support indirect UBOs. For indirect
+                         * block numbers (if that's possible), we don't know
+                         * enough about the hardware yet. For indirect sources,
+                         * we know what we need but we need to add some NIR
+                         * support for lowering correctly with respect to
+                         * 128-bit reads */
 
-                        if (direct) {
-                                /* We have the offset totally ready */
-                                ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
-                        } else {
-                                /* We have it partially ready, but we need to
-                                 * add in the dynamic index, moved to r27.w */
-                                emit_indirect_offset(ctx, &instr->src[0]);
-                                ins.load_store.unknown = 0x79e; /* xxx: what is this? */
-                        }
+                        assert(nir_src_is_const(index));
+                        assert(nir_src_is_const(*src_offset));
 
-                        emit_mir_instruction(ctx, ins);
+                        /* TODO: Alignment */
+                        assert((offset & 0xF) == 0);
+
+                        uint32_t uindex = nir_src_as_uint(index) + 1;
+                        emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
+                } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
+                        emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t);
                 } else if (ctx->is_blend) {
                         /* For blend shaders, load the input color, which is
                          * preloaded to r0 */
 
-                        midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+                        midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
                         emit_mir_instruction(ctx, move);
                 }  else if (ctx->stage == MESA_SHADER_VERTEX) {
                         midgard_instruction ins = m_ld_attr_32(reg, offset);
                         ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
-                        ins.load_store.mask = (1 << nr_comp) - 1;
+                        ins.mask = mask_of(nr_comp);
+
+                        /* Use the type appropriate load */
+                        switch (t) {
+                                case nir_type_uint:
+                                case nir_type_bool:
+                                        ins.load_store.op = midgard_op_ld_attr_32u;
+                                        break;
+                                case nir_type_int:
+                                        ins.load_store.op = midgard_op_ld_attr_32i;
+                                        break;
+                                case nir_type_float:
+                                        ins.load_store.op = midgard_op_ld_attr_32;
+                                        break;
+                                default:
+                                        unreachable("Attempted to load unknown type");
+                                        break;
+                        }
+
                         emit_mir_instruction(ctx, ins);
                 } else {
                         DBG("Unknown load\n");
@@ -1174,19 +1375,16 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                 }
 
                 break;
+       }
 
-        case nir_intrinsic_load_output:
-                assert(nir_src_is_const(instr->src[0]));
-                reg = nir_dest_index(ctx, &instr->dest);
+        /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
 
-                if (ctx->is_blend) {
-                        /* TODO: MRT */
-                        emit_fb_read_blend_scalar(ctx, reg);
-                } else {
-                        DBG("Unknown output load\n");
-                        assert(0);
-                }
+        case nir_intrinsic_load_raw_output_pan:
+                reg = nir_dest_index(ctx, &instr->dest);
+                assert(ctx->is_blend);
 
+                midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
+                emit_mir_instruction(ctx, ins);
                 break;
 
         case nir_intrinsic_load_blend_const_color_rgba: {
@@ -1196,7 +1394,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                 /* Blend constants are embedded directly in the shader and
                  * patched in, so we use some magic routing */
 
-                midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
+                midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
                 ins.has_constants = true;
                 ins.has_blend_constant = true;
                 emit_mir_instruction(ctx, ins);
@@ -1217,7 +1415,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                          * framebuffer writeout dance. TODO: Defer
                          * writes */
 
-                        midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+                        midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
                         emit_mir_instruction(ctx, move);
 
                         /* Save the index we're writing to for later reference
@@ -1226,44 +1424,27 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                         ctx->fragment_output = reg;
                 } else if (ctx->stage == MESA_SHADER_VERTEX) {
                         /* Varyings are written into one of two special
-                         * varying register, r26 or r27. The register itself is selected as the register
-                         * in the st_vary instruction, minus the base of 26. E.g. write into r27 and then call st_vary(1)
-                         *
-                         * Normally emitting fmov's is frowned upon,
-                         * but due to unique constraints of
-                         * REGISTER_VARYING, fmov emission + a
-                         * dedicated cleanup pass is the only way to
-                         * guarantee correctness when considering some
-                         * (common) edge cases XXX: FIXME */
-
-                        /* If this varying corresponds to a constant (why?!),
-                         * emit that now since it won't get picked up by
-                         * hoisting (since there is no corresponding move
-                         * emitted otherwise) */
-
-                        void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, reg + 1);
-
-                        if (constant_value) {
-                                /* Special case: emit the varying write
-                                 * directly to r26 (looks funny in asm but it's
-                                 * fine) and emit the store _now_. Possibly
-                                 * slightly slower, but this is a really stupid
-                                 * special case anyway (why on earth would you
-                                 * have a constant varying? Your own fault for
-                                 * slightly worse perf :P) */
-
-                                midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(26));
-                                attach_constants(ctx, &ins, constant_value, reg + 1);
-                                emit_mir_instruction(ctx, ins);
-
-                                midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
-                                st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
-                                emit_mir_instruction(ctx, st);
-                        } else {
-                                /* Do not emit the varying yet -- instead, just mark down that we need to later */
+                         * varying register, r26 or r27. The register itself is
+                         * selected as the register in the st_vary instruction,
+                         * minus the base of 26. E.g. write into r27 and then
+                         * call st_vary(1) */
 
-                                _mesa_hash_table_u64_insert(ctx->ssa_varyings, reg + 1, (void *) ((uintptr_t) (offset + 1)));
-                        }
+                        midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
+                        emit_mir_instruction(ctx, ins);
+
+                        /* We should have been vectorized, though we don't
+                         * currently check that st_vary is emitted only once
+                         * per slot (this is relevant, since there's not a mask
+                         * parameter available on the store [set to 0 by the
+                         * blob]). We do respect the component by adjusting the
+                         * swizzle. */
+
+                        unsigned component = nir_intrinsic_component(instr);
+
+                        midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
+                        st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
+                        st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
+                        emit_mir_instruction(ctx, st);
                 } else {
                         DBG("Unknown store\n");
                         assert(0);
@@ -1271,6 +1452,17 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 
                 break;
 
+        /* Special case of store_output for lowered blend shaders */
+        case nir_intrinsic_store_raw_output_pan:
+                assert (ctx->stage == MESA_SHADER_FRAGMENT);
+                reg = nir_src_index(ctx, &instr->src[0]);
+
+                midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+                emit_mir_instruction(ctx, move);
+                ctx->fragment_output = reg;
+
+                break;
+
         case nir_intrinsic_load_alpha_ref_float:
                 assert(instr->dest.is_ssa);
 
@@ -1283,7 +1475,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 
         case nir_intrinsic_load_viewport_scale:
         case nir_intrinsic_load_viewport_offset:
-                emit_sysval_read(ctx, instr);
+                emit_sysval_read(ctx, &instr->instr);
                 break;
 
         default:
@@ -1297,15 +1489,19 @@ static unsigned
 midgard_tex_format(enum glsl_sampler_dim dim)
 {
         switch (dim) {
+        case GLSL_SAMPLER_DIM_1D:
+        case GLSL_SAMPLER_DIM_BUF:
+                return MALI_TEX_1D;
+
         case GLSL_SAMPLER_DIM_2D:
         case GLSL_SAMPLER_DIM_EXTERNAL:
-                return TEXTURE_2D;
+                return MALI_TEX_2D;
 
         case GLSL_SAMPLER_DIM_3D:
-                return TEXTURE_3D;
+                return MALI_TEX_3D;
 
         case GLSL_SAMPLER_DIM_CUBE:
-                return TEXTURE_CUBE;
+                return MALI_TEX_CUBE;
 
         default:
                 DBG("Unknown sampler dim type\n");
@@ -1314,13 +1510,61 @@ midgard_tex_format(enum glsl_sampler_dim dim)
         }
 }
 
+/* Tries to attach an explicit LOD / bias as a constant. Returns whether this
+ * was successful */
+
+static bool
+pan_attach_constant_bias(
+                compiler_context *ctx,
+                nir_src lod,
+                midgard_texture_word *word)
+{
+        /* To attach as constant, it has to *be* constant */
+
+        if (!nir_src_is_const(lod))
+                return false;
+
+        float f = nir_src_as_float(lod);
+
+        /* Break into fixed-point */
+        signed lod_int = f;
+        float lod_frac = f - lod_int;
+
+        /* Carry over negative fractions */
+        if (lod_frac < 0.0) {
+                lod_int--;
+                lod_frac += 1.0;
+        }
+
+        /* Encode */
+        word->bias = float_to_ubyte(lod_frac);
+        word->bias_int = lod_int;
+
+        return true;
+}
+
+static enum mali_sampler_type
+midgard_sampler_type(nir_alu_type t)
+{
+        switch (nir_alu_type_get_base_type(t)) {
+                case nir_type_float:
+                        return MALI_SAMPLER_FLOAT;
+                case nir_type_int:
+                        return MALI_SAMPLER_SIGNED;
+                case nir_type_uint:
+                        return MALI_SAMPLER_UNSIGNED;
+                default:
+                        unreachable("Unknown sampler type");
+        }
+}
+
 static void
-emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
+                 unsigned midgard_texop)
 {
         /* TODO */
         //assert (!instr->sampler);
         //assert (!instr->texture_array_size);
-        assert (instr->op == nir_texop_tex);
 
         /* Allocate registers via a round robin scheme to alternate between the two registers */
         int reg = ctx->texture_op_count & 1;
@@ -1334,106 +1578,178 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
         int texture_index = instr->texture_index;
         int sampler_index = texture_index;
 
-        for (unsigned i = 0; i < instr->num_srcs; ++i) {
-                switch (instr->src[i].src_type) {
-                case nir_tex_src_coord: {
-                        int index = nir_src_index(ctx, &instr->src[i].src);
+        /* No helper to build texture words -- we do it all here */
+        midgard_instruction ins = {
+                .type = TAG_TEXTURE_4,
+                .mask = 0xF,
+                .texture = {
+                        .op = midgard_texop,
+                        .format = midgard_tex_format(instr->sampler_dim),
+                        .texture_handle = texture_index,
+                        .sampler_handle = sampler_index,
 
-                        midgard_vector_alu_src alu_src = blank_alu_src;
+                        /* TODO: Regalloc it in */
+                        .swizzle = SWIZZLE_XYZW,
 
-                        int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
+                        /* TODO: half */
+                        .in_reg_full = 1,
+                        .out_full = 1,
+
+                        .sampler_type = midgard_sampler_type(instr->dest_type),
+                }
+        };
+
+        for (unsigned i = 0; i < instr->num_srcs; ++i) {
+                int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
+                int index = nir_src_index(ctx, &instr->src[i].src);
+                int nr_comp = nir_src_num_components(instr->src[i].src);
+                midgard_vector_alu_src alu_src = blank_alu_src;
 
+                switch (instr->src[i].src_type) {
+                case nir_tex_src_coord: {
                         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+                                /* texelFetch is undefined on samplerCube */
+                                assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
+
                                 /* For cubemaps, we need to load coords into
                                  * special r27, and then use a special ld/st op
-                                 * to copy into the texture register */
+                                 * to select the face and copy the xy into the
+                                 * texture register */
 
                                 alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
 
-                                midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
+                                midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27));
                                 emit_mir_instruction(ctx, move);
 
                                 midgard_instruction st = m_st_cubemap_coords(reg, 0);
                                 st.load_store.unknown = 0x24; /* XXX: What is this? */
-                                st.load_store.mask = 0x3; /* xy? */
+                                st.mask = 0x3; /* xy */
                                 st.load_store.swizzle = alu_src.swizzle;
                                 emit_mir_instruction(ctx, st);
 
+                                ins.texture.in_reg_swizzle = swizzle_of(2);
                         } else {
-                                alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X);
-
-                                midgard_instruction ins = v_fmov(index, alu_src, reg);
-                                emit_mir_instruction(ctx, ins);
+                                ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
+
+                                midgard_instruction mov = v_mov(index, alu_src, reg);
+                                mov.mask = mask_of(nr_comp);
+                                emit_mir_instruction(ctx, mov);
+
+                                if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
+                                        /* Texel fetch opcodes care about the
+                                         * values of z and w, so we actually
+                                         * need to spill into a second register
+                                         * for a texel fetch with register bias
+                                         * (for non-2D). TODO: Implement that
+                                         */
+
+                                        assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
+
+                                        midgard_instruction zero = v_mov(index, alu_src, reg);
+                                        zero.ssa_args.inline_constant = true;
+                                        zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+                                        zero.has_constants = true;
+                                        zero.mask = ~mov.mask;
+                                        emit_mir_instruction(ctx, zero);
+
+                                        ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
+                                } else {
+                                        /* Non-texel fetch doesn't need that
+                                         * nonsense. However we do use the Z
+                                         * for array indexing */
+                                        bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
+                                        ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
+                                }
                         }
 
                         break;
                 }
 
-                default: {
-                        DBG("Unknown source type\n");
-                        //assert(0);
-                        break;
-                }
-                }
-        }
+                case nir_tex_src_bias:
+                case nir_tex_src_lod: {
+                        /* Try as a constant if we can */
 
-        /* No helper to build texture words -- we do it all here */
-        midgard_instruction ins = {
-                .type = TAG_TEXTURE_4,
-                .texture = {
-                        .op = TEXTURE_OP_NORMAL,
-                        .format = midgard_tex_format(instr->sampler_dim),
-                        .texture_handle = texture_index,
-                        .sampler_handle = sampler_index,
+                        bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
+                        if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
+                                break;
 
-                        /* TODO: Don't force xyzw */
-                        .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
-                        .mask = 0xF,
+                        /* Otherwise we use a register. To keep RA simple, we
+                         * put the bias/LOD into the w component of the input
+                         * source, which is otherwise in xy */
 
-                        /* TODO: half */
-                        //.in_reg_full = 1,
-                        .out_full = 1,
+                        alu_src.swizzle = SWIZZLE_XXXX;
 
-                        .filter = 1,
+                        midgard_instruction mov = v_mov(index, alu_src, reg);
+                        mov.mask = 1 << COMPONENT_W;
+                        emit_mir_instruction(ctx, mov);
 
-                        /* Always 1 */
-                        .unknown7 = 1,
+                        ins.texture.lod_register = true;
+
+                        midgard_tex_register_select sel = {
+                                .select = in_reg,
+                                .full = 1,
+
+                                /* w */
+                                .component_lo = 1,
+                                .component_hi = 1
+                        };
+
+                        uint8_t packed;
+                        memcpy(&packed, &sel, sizeof(packed));
+                        ins.texture.bias = packed;
+
+                        break;
+               };
 
-                        /* Assume we can continue; hint it out later */
-                        .cont = 1,
+                default:
+                        unreachable("Unknown texture source type\n");
                 }
-        };
+        }
 
         /* Set registers to read and write from the same place */
         ins.texture.in_reg_select = in_reg;
         ins.texture.out_reg_select = out_reg;
 
-        /* TODO: Dynamic swizzle input selection, half-swizzles? */
-        if (instr->sampler_dim == GLSL_SAMPLER_DIM_3D) {
-                ins.texture.in_reg_swizzle_right = COMPONENT_X;
-                ins.texture.in_reg_swizzle_left = COMPONENT_Y;
-                //ins.texture.in_reg_swizzle_third = COMPONENT_Z;
-        } else {
-                ins.texture.in_reg_swizzle_left = COMPONENT_X;
-                ins.texture.in_reg_swizzle_right = COMPONENT_Y;
-                //ins.texture.in_reg_swizzle_third = COMPONENT_X;
-        }
-
         emit_mir_instruction(ctx, ins);
 
-        /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
-
         int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
-        alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
-        ctx->texture_index[reg] = o_index;
-
-        midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
+        midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
         emit_mir_instruction(ctx, ins2);
 
         /* Used for .cont and .last hinting */
         ctx->texture_op_count++;
 }
 
+static void
+emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+{
+        /* Fixup op, since only textureLod is permitted in VS but NIR can give
+         * generic tex in some cases (which confuses the hardware) */
+
+        bool is_vertex = ctx->stage == MESA_SHADER_VERTEX;
+
+        if (is_vertex && instr->op == nir_texop_tex)
+                instr->op = nir_texop_txl;
+
+        switch (instr->op) {
+        case nir_texop_tex:
+        case nir_texop_txb:
+                emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
+                break;
+        case nir_texop_txl:
+                emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
+                break;
+        case nir_texop_txf:
+                emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
+                break;
+        case nir_texop_txs:
+                emit_sysval_read(ctx, &instr->instr);
+                break;
+        default:
+               unreachable("Unhanlded texture op");
+        }
+}
+
 static void
 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
 {
@@ -1534,7 +1850,7 @@ inline_alu_constants(compiler_context *ctx)
                         unsigned scratch = alu->ssa_args.dest;
 
                         if (entry) {
-                                midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
+                                midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
                                 attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
 
                                 /* Force a break XXX Defer r31 writes */
@@ -1566,6 +1882,13 @@ embedded_to_inline_constant(compiler_context *ctx)
                 /* Blend constants must not be inlined by definition */
                 if (ins->has_blend_constant) continue;
 
+                /* We can inline 32-bit (sometimes) or 16-bit (usually) */
+                bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
+                bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
+
+                if (!(is_16 || is_32))
+                        continue;
+
                 /* src1 cannot be an inline constant due to encoding
                  * restrictions. So, if possible we try to flip the arguments
                  * in that case */
@@ -1616,7 +1939,7 @@ embedded_to_inline_constant(compiler_context *ctx)
                         /* Scale constant appropriately, if we can legally */
                         uint16_t scaled_constant = 0;
 
-                        if (midgard_is_integer_op(op)) {
+                        if (midgard_is_integer_op(op) || is_16) {
                                 unsigned int *iconstants = (unsigned int *) ins->constants;
                                 scaled_constant = (uint16_t) iconstants[component];
 
@@ -1655,7 +1978,7 @@ embedded_to_inline_constant(compiler_context *ctx)
                         uint32_t value = cons[component];
 
                         bool is_vector = false;
-                        unsigned mask = effective_writemask(&ins->alu);
+                        unsigned mask = effective_writemask(&ins->alu, ins->mask);
 
                         for (int c = 1; c < 4; ++c) {
                                 /* We only care if this component is actually used */
@@ -1688,6 +2011,10 @@ embedded_to_inline_constant(compiler_context *ctx)
 static void
 map_ssa_to_alias(compiler_context *ctx, int *ref)
 {
+        /* Sign is used quite deliberately for unused */
+        if (*ref < 0)
+                return;
+
         unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1);
 
         if (alias) {
@@ -1726,12 +2053,47 @@ midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
         return progress;
 }
 
+/* Dead code elimination for branches at the end of a block - only one branch
+ * per block is legal semantically */
+
+static void
+midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
+{
+        bool branched = false;
+
+        mir_foreach_instr_in_block_safe(block, ins) {
+                if (!midgard_is_branch_unit(ins->unit)) continue;
+
+                /* We ignore prepacked branches since the fragment epilogue is
+                 * just generally special */
+                if (ins->prepacked_branch) continue;
+
+                /* Discards are similarly special and may not correspond to the
+                 * end of a block */
+
+                if (ins->branch.target_type == TARGET_DISCARD) continue;
+
+                if (branched) {
+                        /* We already branched, so this is dead */
+                        mir_remove_instruction(ins);
+                }
+
+                branched = true;
+        }
+}
+
 static bool
 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
 {
         /* abs or neg */
         if (!is_int && src.mod) return true;
 
+        /* Other int mods don't matter in isolation */
+        if (is_int && src.mod == midgard_int_shift) return true;
+
+        /* size-conversion */
+        if (src.half) return true;
+
         /* swizzle */
         for (unsigned c = 0; c < 4; ++c) {
                 if (!(mask & (1 << c))) continue;
@@ -1744,13 +2106,28 @@ mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
 static bool
 mir_nontrivial_source2_mod(midgard_instruction *ins)
 {
-        unsigned mask = squeeze_writemask(ins->alu.mask);
         bool is_int = midgard_is_integer_op(ins->alu.op);
 
         midgard_vector_alu_src src2 =
                 vector_alu_from_unsigned(ins->alu.src2);
 
-        return mir_nontrivial_mod(src2, is_int, mask);
+        return mir_nontrivial_mod(src2, is_int, ins->mask);
+}
+
+static bool
+mir_nontrivial_outmod(midgard_instruction *ins)
+{
+        bool is_int = midgard_is_integer_op(ins->alu.op);
+        unsigned mod = ins->alu.outmod;
+
+        /* Type conversion is a sort of outmod */
+        if (ins->alu.dest_override != midgard_dest_override_none)
+                return true;
+
+        if (is_int)
+                return mod != midgard_outmod_int_wrap;
+        else
+                return mod != midgard_outmod_none;
 }
 
 static bool
@@ -1777,7 +2154,7 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
                 if (ins->has_constants) continue;
 
                 if (mir_nontrivial_source2_mod(ins)) continue;
-                if (ins->alu.outmod != midgard_outmod_none) continue;
+                if (mir_nontrivial_outmod(ins)) continue;
 
                 /* We're clear -- rewrite */
                 mir_rewrite_index_src(ctx, to, from);
@@ -1792,7 +2169,7 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
  * the move can be propagated away entirely */
 
 static bool
-mir_compose_outmod(midgard_outmod *outmod, midgard_outmod comp)
+mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
 {
         /* Nothing to do */
         if (comp == midgard_outmod_none)
@@ -1820,6 +2197,7 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
                 /* TODO: Registers? */
                 unsigned src = ins->ssa_args.src1;
                 if (src >= ctx->func->impl->ssa_alloc) continue;
+                assert(!mir_has_multiple_writes(ctx, src));
 
                 /* There might be a source modifier, too */
                 if (mir_nontrivial_source2_mod(ins)) continue;
@@ -1829,8 +2207,11 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
                         if (v->type != TAG_ALU_4) continue;
                         if (v->ssa_args.dest != src) continue;
 
-                        midgard_outmod temp = v->alu.outmod;
-                        progress |= mir_compose_outmod(&temp, ins->alu.outmod);
+                        /* Can we even take a float outmod? */
+                        if (midgard_is_integer_out_op(v->alu.op)) continue;
+
+                        midgard_outmod_float temp = v->alu.outmod;
+                        progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
 
                         /* Throw in the towel.. */
                         if (!progress) break;
@@ -1846,55 +2227,6 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
         return progress;
 }
 
-static bool
-midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
-{
-        bool progress = false;
-
-        mir_foreach_instr_in_block_safe(block, ins) {
-                if (ins->type != TAG_ALU_4) continue;
-                if (!OP_IS_MOVE(ins->alu.op)) continue;
-
-                unsigned from = ins->ssa_args.src1;
-                unsigned to = ins->ssa_args.dest;
-
-                /* Make sure it's simple enough for us to handle */
-
-                if (from >= SSA_FIXED_MINIMUM) continue;
-                if (from >= ctx->func->impl->ssa_alloc) continue;
-                if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
-                if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
-
-                bool eliminated = false;
-
-                mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
-                        /* The texture registers are not SSA so be careful.
-                         * Conservatively, just stop if we hit a texture op
-                         * (even if it may not write) to where we are */
-
-                        if (v->type != TAG_ALU_4)
-                                break;
-
-                        if (v->ssa_args.dest == from) {
-                                /* We don't want to track partial writes ... */
-                                if (v->alu.mask == 0xF) {
-                                        v->ssa_args.dest = to;
-                                        eliminated = true;
-                                }
-
-                                break;
-                        }
-                }
-
-                if (eliminated)
-                        mir_remove_instruction(ins);
-
-                progress |= eliminated;
-        }
-
-        return progress;
-}
-
 /* The following passes reorder MIR instructions to enable better scheduling */
 
 static void
@@ -1946,40 +2278,6 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
         }
 }
 
-/* Emit varying stores late */
-
-static void
-midgard_emit_store(compiler_context *ctx, midgard_block *block) {
-        /* Iterate in reverse to get the final write, rather than the first */
-
-        mir_foreach_instr_in_block_safe_rev(block, ins) {
-                /* Check if what we just wrote needs a store */
-                int idx = ins->ssa_args.dest;
-                uintptr_t varying = ((uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_varyings, idx + 1));
-
-                if (!varying) continue;
-
-                varying -= 1;
-
-                /* We need to store to the appropriate varying, so emit the
-                 * move/store */
-
-                /* TODO: Integrate with special purpose RA (and scheduler?) */
-                bool high_varying_register = false;
-
-                midgard_instruction mov = v_fmov(idx, blank_alu_src, SSA_FIXED_REGISTER(REGISTER_VARYING_BASE + high_varying_register));
-
-                midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
-                st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
-
-                mir_insert_instruction_before(mir_next_op(ins), st);
-                mir_insert_instruction_before(mir_next_op(ins), mov);
-
-                /* We no longer need to store this varying */
-                _mesa_hash_table_u64_remove(ctx->ssa_varyings, idx + 1);
-        }
-}
-
 /* If there are leftovers after the below pass, emit actual fmov
  * instructions for the slow-but-correct path */
 
@@ -1991,7 +2289,7 @@ emit_leftover_move(compiler_context *ctx)
                 int mapped = base;
 
                 map_ssa_to_alias(ctx, &mapped);
-                EMIT(fmov, mapped, blank_alu_src, base);
+                EMIT(mov, mapped, blank_alu_src, base);
         }
 }
 
@@ -2015,7 +2313,7 @@ emit_fragment_epilogue(compiler_context *ctx)
         void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
 
         if (constant_value) {
-                midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
+                midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
                 attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
                 emit_mir_instruction(ctx, ins);
         }
@@ -2028,92 +2326,6 @@ emit_fragment_epilogue(compiler_context *ctx)
         EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
 }
 
-/* For the blend epilogue, we need to convert the blended fragment vec4 (stored
- * in r0) to a RGBA8888 value by scaling and type converting. We then output it
- * with the int8 analogue to the fragment epilogue */
-
-static void
-emit_blend_epilogue(compiler_context *ctx)
-{
-        /* vmul.fmul.none.fulllow hr48, r0, #255 */
-
-        midgard_instruction scale = {
-                .type = TAG_ALU_4,
-                .unit = UNIT_VMUL,
-                .inline_constant = _mesa_float_to_half(255.0),
-                .ssa_args = {
-                        .src0 = SSA_FIXED_REGISTER(0),
-                        .src1 = SSA_UNUSED_0,
-                        .dest = SSA_FIXED_REGISTER(24),
-                        .inline_constant = true
-                },
-                .alu = {
-                        .op = midgard_alu_op_fmul,
-                        .reg_mode = midgard_reg_mode_32,
-                        .dest_override = midgard_dest_override_lower,
-                        .mask = 0xFF,
-                        .src1 = vector_alu_srco_unsigned(blank_alu_src),
-                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                }
-        };
-
-        emit_mir_instruction(ctx, scale);
-
-        /* vadd.f2u8.pos.low hr0, hr48, #0 */
-
-        midgard_vector_alu_src alu_src = blank_alu_src;
-        alu_src.half = true;
-
-        midgard_instruction f2u8 = {
-                .type = TAG_ALU_4,
-                .ssa_args = {
-                        .src0 = SSA_FIXED_REGISTER(24),
-                        .src1 = SSA_UNUSED_0,
-                        .dest = SSA_FIXED_REGISTER(0),
-                        .inline_constant = true
-                },
-                .alu = {
-                        .op = midgard_alu_op_f2u8,
-                        .reg_mode = midgard_reg_mode_16,
-                        .dest_override = midgard_dest_override_lower,
-                        .outmod = midgard_outmod_pos,
-                        .mask = 0xF,
-                        .src1 = vector_alu_srco_unsigned(alu_src),
-                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                }
-        };
-
-        emit_mir_instruction(ctx, f2u8);
-
-        /* vmul.imov.quarter r0, r0, r0 */
-
-        midgard_instruction imov_8 = {
-                .type = TAG_ALU_4,
-                .ssa_args = {
-                        .src0 = SSA_UNUSED_1,
-                        .src1 = SSA_FIXED_REGISTER(0),
-                        .dest = SSA_FIXED_REGISTER(0),
-                },
-                .alu = {
-                        .op = midgard_alu_op_imov,
-                        .reg_mode = midgard_reg_mode_8,
-                        .dest_override = midgard_dest_override_none,
-                        .outmod = midgard_outmod_int,
-                        .mask = 0xFF,
-                        .src1 = vector_alu_srco_unsigned(blank_alu_src),
-                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                }
-        };
-
-        /* Emit branch epilogue with the 8-bit move as the source */
-
-        emit_mir_instruction(ctx, imov_8);
-        EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
-
-        emit_mir_instruction(ctx, imov_8);
-        EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
-}
-
 static midgard_block *
 emit_block(compiler_context *ctx, nir_block *block)
 {
@@ -2145,16 +2357,12 @@ emit_block(compiler_context *ctx, nir_block *block)
         /* Perform heavylifting for aliasing */
         actualise_ssa_to_alias(ctx);
 
-        midgard_emit_store(ctx, this_block);
         midgard_pair_load_store(ctx, this_block);
 
         /* Append fragment shader epilogue (value writeout) */
         if (ctx->stage == MESA_SHADER_FRAGMENT) {
                 if (block == nir_impl_last_block(ctx->func->impl)) {
-                        if (ctx->is_blend)
-                                emit_blend_epilogue(ctx);
-                        else
-                                emit_fragment_epilogue(ctx);
+                        emit_fragment_epilogue(ctx);
                 }
         }
 
@@ -2271,6 +2479,9 @@ emit_loop(struct compiler_context *ctx, nir_loop *nloop)
         /* Now that we've finished emitting the loop, free up the depth again
          * so we play nice with recursion amid nested loops */
         --ctx->current_loop_depth;
+
+        /* Dump loop stats */
+        ++ctx->loop_count;
 }
 
 static midgard_block *
@@ -2345,7 +2556,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
                 .stage = nir->info.stage,
 
                 .is_blend = is_blend,
-                .blend_constant_offset = -1,
+                .blend_constant_offset = 0,
 
                 .alpha_ref = program->alpha_ref
         };
@@ -2358,7 +2569,6 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         /* Initialize at a global (not block) level hash tables */
 
         ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
-        ctx->ssa_varyings = _mesa_hash_table_u64_create(NULL);
         ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
         ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
         ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
@@ -2369,16 +2579,22 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         struct exec_list *varyings =
                 ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
 
+        unsigned max_varying = 0;
         nir_foreach_variable(var, varyings) {
                 unsigned loc = var->data.driver_location;
                 unsigned sz = glsl_type_size(var->type, FALSE);
 
                 for (int c = 0; c < sz; ++c) {
-                        program->varyings[loc + c] = var->data.location;
+                        program->varyings[loc + c] = var->data.location + c;
+                        max_varying = MAX2(max_varying, loc + c);
                 }
         }
 
-        /* Lower gl_Position pre-optimisation */
+        /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
+         * (so we don't accidentally duplicate the epilogue since mesa/st has
+         * messed with our I/O quite a bit already) */
+
+        NIR_PASS_V(nir, nir_lower_vars_to_ssa);
 
         if (ctx->stage == MESA_SHADER_VERTEX)
                 NIR_PASS_V(nir, nir_lower_viewport_transform);
@@ -2411,7 +2627,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
 
         program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
-        program->varying_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_outputs : ((ctx->stage == MESA_SHADER_FRAGMENT) ? nir->num_inputs : 0);
+        program->varying_count = max_varying + 1; /* Fencepost off-by-one */
 
         nir_foreach_function(func, nir) {
                 if (!func->impl)
@@ -2439,11 +2655,17 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
                 mir_foreach_block(ctx, block) {
                         progress |= midgard_opt_pos_propagate(ctx, block);
                         progress |= midgard_opt_copy_prop(ctx, block);
-                        progress |= midgard_opt_copy_prop_tex(ctx, block);
                         progress |= midgard_opt_dead_code_eliminate(ctx, block);
                 }
         } while (progress);
 
+        /* Nested control-flow can result in dead branches at the end of the
+         * block. This messes with our analysis and is just dead code, so cull
+         * them */
+        mir_foreach_block(ctx, block) {
+                midgard_opt_cull_dead_branch(ctx, block);
+        }
+
         /* Schedule! */
         schedule_program(ctx);
 
@@ -2599,7 +2821,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
          * last is an ALU, then it's also 1... */
 
         mir_foreach_block(ctx, block) {
-                util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
+                mir_foreach_bundle_in_block(block, bundle) {
                         int lookahead = 1;
 
                         if (current_bundle + 1 < bundle_count) {
@@ -2636,5 +2858,42 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
        if (midgard_debug & MIDGARD_DBG_SHADERS)
                disassemble_midgard(program->compiled.data, program->compiled.size);
 
+        if (midgard_debug & MIDGARD_DBG_SHADERDB) {
+                unsigned nr_bundles = 0, nr_ins = 0;
+
+                /* Count instructions and bundles */
+
+                mir_foreach_instr_global(ctx, ins) {
+                        nr_ins++;
+                }
+
+                mir_foreach_block(ctx, block) {
+                        nr_bundles += util_dynarray_num_elements(
+                                        &block->bundles, midgard_bundle);
+                }
+
+                /* Calculate thread count. There are certain cutoffs by
+                 * register count for thread count */
+
+                unsigned nr_registers = program->work_register_count;
+
+                unsigned nr_threads =
+                        (nr_registers <= 4) ? 4 :
+                        (nr_registers <= 8) ? 2 :
+                                              1;
+
+                /* Dump stats */
+
+                fprintf(stderr, "shader%d - %s shader: "
+                                "%u inst, %u bundles, "
+                                "%u registers, %u threads, %u loops\n",
+                                SHADER_DB_COUNT++,
+                                gl_shader_stage_name(ctx->stage),
+                                nr_ins, nr_bundles,
+                                nr_registers, nr_threads,
+                                ctx->loop_count);
+        }
+
+
         return 0;
 }