panfrost/midgard: Use fancy iterator
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
index d297e505c1c56547822b76d3bd4cd5992d889a4e..f29f938215a14d070ed5a1310106b04e20cda1ac 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2018 Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,6 @@
 #include "main/imports.h"
 #include "compiler/nir/nir_builder.h"
 #include "util/half_float.h"
-#include "util/register_allocate.h"
 #include "util/u_debug.h"
 #include "util/u_dynarray.h"
 #include "util/list.h"
@@ -45,7 +44,9 @@
 #include "midgard.h"
 #include "midgard_nir.h"
 #include "midgard_compile.h"
+#include "midgard_ops.h"
 #include "helpers.h"
+#include "compiler.h"
 
 #include "disassemble.h"
 
@@ -64,125 +65,11 @@ int midgard_debug = 0;
                        fprintf(stderr, "%s:%d: "fmt, \
                                __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
 
-/* Instruction arguments represented as block-local SSA indices, rather than
- * registers. Negative values mean unused. */
-
-typedef struct {
-        int src0;
-        int src1;
-        int dest;
-
-        /* src1 is -not- SSA but instead a 16-bit inline constant to be smudged
-         * in. Only valid for ALU ops. */
-        bool inline_constant;
-} ssa_args;
-
-/* Forward declare so midgard_branch can reference */
-struct midgard_block;
-
-/* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
- * the hardware), hence why that must be zero. TARGET_DISCARD signals this
- * instruction is actually a discard op. */
-
-#define TARGET_GOTO 0
-#define TARGET_BREAK 1
-#define TARGET_CONTINUE 2
-#define TARGET_DISCARD 3
-
-typedef struct midgard_branch {
-        /* If conditional, the condition is specified in r31.w */
-        bool conditional;
-
-        /* For conditionals, if this is true, we branch on FALSE. If false, we  branch on TRUE. */
-        bool invert_conditional;
-
-        /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
-        unsigned target_type;
-
-        /* The actual target */
-        union {
-                int target_block;
-                int target_break;
-                int target_continue;
-        };
-} midgard_branch;
-
-/* Generic in-memory data type repesenting a single logical instruction, rather
- * than a single instruction group. This is the preferred form for code gen.
- * Multiple midgard_insturctions will later be combined during scheduling,
- * though this is not represented in this structure.  Its format bridges
- * the low-level binary representation with the higher level semantic meaning.
- *
- * Notably, it allows registers to be specified as block local SSA, for code
- * emitted before the register allocation pass.
- */
-
-typedef struct midgard_instruction {
-        /* Must be first for casting */
-        struct list_head link;
-
-        unsigned type; /* ALU, load/store, texture */
-
-        /* If the register allocator has not run yet... */
-        ssa_args ssa_args;
-
-        /* Special fields for an ALU instruction */
-        midgard_reg_info registers;
-
-        /* I.e. (1 << alu_bit) */
-        int unit;
-
-        bool has_constants;
-        float constants[4];
-        uint16_t inline_constant;
-        bool has_blend_constant;
-
-        bool compact_branch;
-        bool writeout;
-        bool prepacked_branch;
-
-        union {
-                midgard_load_store_word load_store;
-                midgard_vector_alu alu;
-                midgard_texture_word texture;
-                midgard_branch_extended branch_extended;
-                uint16_t br_compact;
-
-                /* General branch, rather than packed br_compact. Higher level
-                 * than the other components */
-                midgard_branch branch;
-        };
-} midgard_instruction;
-
-typedef struct midgard_block {
-        /* Link to next block. Must be first for mir_get_block */
-        struct list_head link;
-
-        /* List of midgard_instructions emitted for the current block */
-        struct list_head instructions;
-
-        bool is_scheduled;
-
-        /* List of midgard_bundles emitted (after the scheduler has run) */
-        struct util_dynarray bundles;
-
-        /* Number of quadwords _actually_ emitted, as determined after scheduling */
-        unsigned quadword_count;
-
-        /* Successors: always one forward (the block after us), maybe
-         * one backwards (for a backward branch). No need for a second
-         * forward, since graph traversal would get there eventually
-         * anyway */
-        struct midgard_block *successors[2];
-        unsigned nr_successors;
-
-        /* The successors pointer form a graph, and in the case of
-         * complex control flow, this graph has a cycles. To aid
-         * traversal during liveness analysis, we have a visited?
-         * boolean for passes to use as they see fit, provided they
-         * clean up later */
-        bool visited;
-} midgard_block;
+static bool
+midgard_is_branch_unit(unsigned unit)
+{
+        return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
+}
 
 static void
 midgard_block_add_successor(midgard_block *block, midgard_block *successor)
@@ -220,39 +107,6 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
 #define M_LOAD(name) M_LOAD_STORE(name, dest, src0)
 #define M_STORE(name) M_LOAD_STORE(name, src0, dest)
 
-const midgard_vector_alu_src blank_alu_src = {
-        .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
-};
-
-const midgard_vector_alu_src blank_alu_src_xxxx = {
-        .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X),
-};
-
-const midgard_scalar_alu_src blank_scalar_alu_src = {
-        .full = true
-};
-
-/* Used for encoding the unused source of 1-op instructions */
-const midgard_vector_alu_src zero_alu_src = { 0 };
-
-/* Coerce structs to integer */
-
-static unsigned
-vector_alu_srco_unsigned(midgard_vector_alu_src src)
-{
-        unsigned u;
-        memcpy(&u, &src, sizeof(src));
-        return u;
-}
-
-static midgard_vector_alu_src
-vector_alu_from_unsigned(unsigned u)
-{
-        midgard_vector_alu_src s;
-        memcpy(&s, &u, sizeof(s));
-        return s;
-}
-
 /* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
  * the corresponding Midgard source */
 
@@ -281,61 +135,21 @@ vector_alu_modifiers(nir_alu_src *src, bool is_int)
         return alu_src;
 }
 
-static bool
-mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
-{
-        /* abs or neg */
-        if (!is_int && src.mod) return true;
-
-        /* swizzle */
-        for (unsigned c = 0; c < 4; ++c) {
-                if (!(mask & (1 << c))) continue;
-                if (((src.swizzle >> (2*c)) & 3) != c) return true;
-        }
-
-        return false;
-}
-
-/* 'Intrinsic' move for misc aliasing uses independent of actual NIR ALU code */
-
-static midgard_instruction
-v_fmov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
-{
-        midgard_instruction ins = {
-                .type = TAG_ALU_4,
-                .ssa_args = {
-                        .src0 = SSA_UNUSED_1,
-                        .src1 = src,
-                        .dest = dest,
-                },
-                .alu = {
-                        .op = midgard_alu_op_fmov,
-                        .reg_mode = midgard_reg_mode_full,
-                        .dest_override = midgard_dest_override_none,
-                        .mask = 0xFF,
-                        .src1 = vector_alu_srco_unsigned(zero_alu_src),
-                        .src2 = vector_alu_srco_unsigned(mod)
-                },
-        };
-
-        return ins;
-}
-
 /* load/store instructions have both 32-bit and 16-bit variants, depending on
  * whether we are using vectors composed of highp or mediump. At the moment, we
  * don't support half-floats -- this requires changes in other parts of the
  * compiler -- therefore the 16-bit versions are commented out. */
 
-//M_LOAD(load_attr_16);
-M_LOAD(load_attr_32);
-//M_LOAD(load_vary_16);
-M_LOAD(load_vary_32);
-//M_LOAD(load_uniform_16);
-M_LOAD(load_uniform_32);
-M_LOAD(load_color_buffer_8);
-//M_STORE(store_vary_16);
-M_STORE(store_vary_32);
-M_STORE(store_cubemap_coords);
+//M_LOAD(ld_attr_16);
+M_LOAD(ld_attr_32);
+//M_LOAD(ld_vary_16);
+M_LOAD(ld_vary_32);
+//M_LOAD(ld_uniform_16);
+M_LOAD(ld_uniform_32);
+M_LOAD(ld_color_buffer_8);
+//M_STORE(st_vary_16);
+M_STORE(st_vary_32);
+M_STORE(st_cubemap_coords);
 
 static midgard_instruction
 v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
@@ -407,278 +221,11 @@ midgard_create_branch_extended( midgard_condition cond,
         return branch;
 }
 
-typedef struct midgard_bundle {
-        /* Tag for the overall bundle */
-        int tag;
-
-        /* Instructions contained by the bundle */
-        int instruction_count;
-        midgard_instruction instructions[5];
-
-        /* Bundle-wide ALU configuration */
-        int padding;
-        int control;
-        bool has_embedded_constants;
-        float constants[4];
-        bool has_blend_constant;
-
-        uint16_t register_words[8];
-        int register_words_count;
-
-        uint64_t body_words[8];
-        size_t body_size[8];
-        int body_words_count;
-} midgard_bundle;
-
-typedef struct compiler_context {
-        nir_shader *nir;
-        gl_shader_stage stage;
-
-        /* Is internally a blend shader? Depends on stage == FRAGMENT */
-        bool is_blend;
-
-        /* Tracking for blend constant patching */
-        int blend_constant_number;
-        int blend_constant_offset;
-
-        /* Current NIR function */
-        nir_function *func;
-
-        /* Unordered list of midgard_blocks */
-        int block_count;
-        struct list_head blocks;
-
-        midgard_block *initial_block;
-        midgard_block *previous_source_block;
-        midgard_block *final_block;
-
-        /* List of midgard_instructions emitted for the current block */
-        midgard_block *current_block;
-
-        /* The current "depth" of the loop, for disambiguating breaks/continues
-         * when using nested loops */
-        int current_loop_depth;
-
-        /* Constants which have been loaded, for later inlining */
-        struct hash_table_u64 *ssa_constants;
-
-        /* SSA indices to be outputted to corresponding varying offset */
-        struct hash_table_u64 *ssa_varyings;
-
-        /* SSA values / registers which have been aliased. Naively, these
-         * demand a fmov output; instead, we alias them in a later pass to
-         * avoid the wasted op.
-         *
-         * A note on encoding: to avoid dynamic memory management here, rather
-         * than ampping to a pointer, we map to the source index; the key
-         * itself is just the destination index. */
-
-        struct hash_table_u64 *ssa_to_alias;
-        struct set *leftover_ssa_to_alias;
-
-        /* Actual SSA-to-register for RA */
-        struct hash_table_u64 *ssa_to_register;
-
-        /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
-        struct hash_table_u64 *hash_to_temp;
-        int temp_count;
-        int max_hash;
-
-        /* Just the count of the max register used. Higher count => higher
-         * register pressure */
-        int work_registers;
-
-        /* Used for cont/last hinting. Increase when a tex op is added.
-         * Decrease when a tex op is removed. */
-        int texture_op_count;
-
-        /* Mapping of texture register -> SSA index for unaliasing */
-        int texture_index[2];
-
-        /* If any path hits a discard instruction */
-        bool can_discard;
-
-        /* The number of uniforms allowable for the fast path */
-        int uniform_cutoff;
-
-        /* Count of instructions emitted from NIR overall, across all blocks */
-        int instruction_count;
-
-        /* Alpha ref value passed in */
-        float alpha_ref;
-
-        /* The index corresponding to the fragment output */
-        unsigned fragment_output;
-
-        /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
-        unsigned sysvals[MAX_SYSVAL_COUNT];
-        unsigned sysval_count;
-        struct hash_table_u64 *sysval_to_id;
-} compiler_context;
-
-/* Append instruction to end of current block */
-
-static midgard_instruction *
-mir_upload_ins(struct midgard_instruction ins)
-{
-        midgard_instruction *heap = malloc(sizeof(ins));
-        memcpy(heap, &ins, sizeof(ins));
-        return heap;
-}
-
-static void
-emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
-{
-        list_addtail(&(mir_upload_ins(ins))->link, &ctx->current_block->instructions);
-}
-
-static void
-mir_insert_instruction_before(struct midgard_instruction *tag, struct midgard_instruction ins)
-{
-        list_addtail(&(mir_upload_ins(ins))->link, &tag->link);
-}
-
-static void
-mir_remove_instruction(struct midgard_instruction *ins)
-{
-        list_del(&ins->link);
-}
-
-static midgard_instruction*
-mir_prev_op(struct midgard_instruction *ins)
-{
-        return list_last_entry(&(ins->link), midgard_instruction, link);
-}
-
-static midgard_instruction*
-mir_next_op(struct midgard_instruction *ins)
-{
-        return list_first_entry(&(ins->link), midgard_instruction, link);
-}
-
-#define mir_foreach_block(ctx, v) list_for_each_entry(struct midgard_block, v, &ctx->blocks, link) 
-#define mir_foreach_block_from(ctx, from, v) list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
-
-#define mir_foreach_instr(ctx, v) list_for_each_entry(struct midgard_instruction, v, &ctx->current_block->instructions, link) 
-#define mir_foreach_instr_safe(ctx, v) list_for_each_entry_safe(struct midgard_instruction, v, &ctx->current_block->instructions, link) 
-#define mir_foreach_instr_in_block(block, v) list_for_each_entry(struct midgard_instruction, v, &block->instructions, link) 
-#define mir_foreach_instr_in_block_safe(block, v) list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link) 
-#define mir_foreach_instr_in_block_safe_rev(block, v) list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link) 
-#define mir_foreach_instr_in_block_from(block, v, from) list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link) 
-#define mir_foreach_instr_in_block_from_rev(block, v, from) list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link) 
-
-
-static midgard_instruction *
-mir_last_in_block(struct midgard_block *block)
-{
-        return list_last_entry(&block->instructions, struct midgard_instruction, link);
-}
-
-static midgard_block *
-mir_get_block(compiler_context *ctx, int idx)
-{
-        struct list_head *lst = &ctx->blocks;
-
-        while ((idx--) + 1)
-                lst = lst->next;
-
-        return (struct midgard_block *) lst;
-}
-
-/* Pretty printer for internal Midgard IR */
-
-static void
-print_mir_source(int source)
-{
-        if (source >= SSA_FIXED_MINIMUM) {
-                /* Specific register */
-                int reg = SSA_REG_FROM_FIXED(source);
-
-                /* TODO: Moving threshold */
-                if (reg > 16 && reg < 24)
-                        printf("u%d", 23 - reg);
-                else
-                        printf("r%d", reg);
-        } else {
-                printf("%d", source);
-        }
-}
-
-static void
-print_mir_instruction(midgard_instruction *ins)
-{
-        printf("\t");
-
-        switch (ins->type) {
-        case TAG_ALU_4: {
-                midgard_alu_op op = ins->alu.op;
-                const char *name = alu_opcode_names[op];
-
-                if (ins->unit)
-                        printf("%d.", ins->unit);
-
-                printf("%s", name ? name : "??");
-                break;
-        }
-
-        case TAG_LOAD_STORE_4: {
-                midgard_load_store_op op = ins->load_store.op;
-                const char *name = load_store_opcode_names[op];
-
-                assert(name);
-                printf("%s", name);
-                break;
-        }
-
-        case TAG_TEXTURE_4: {
-                printf("texture");
-                break;
-        }
-
-        default:
-                assert(0);
-        }
-
-        ssa_args *args = &ins->ssa_args;
-
-        printf(" %d, ", args->dest);
-
-        print_mir_source(args->src0);
-        printf(", ");
-
-        if (args->inline_constant)
-                printf("#%d", ins->inline_constant);
-        else
-                print_mir_source(args->src1);
-
-        if (ins->has_constants)
-                printf(" <%f, %f, %f, %f>", ins->constants[0], ins->constants[1], ins->constants[2], ins->constants[3]);
-
-        printf("\n");
-}
-
-static void
-print_mir_block(midgard_block *block)
-{
-        printf("{\n");
-
-        mir_foreach_instr_in_block(block, ins) {
-                print_mir_instruction(ins);
-        }
-
-        printf("}\n");
-}
-
 static void
 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
 {
         ins->has_constants = true;
         memcpy(&ins->constants, constants, 16);
-
-        /* If this is the special blend constant, mark this instruction */
-
-        if (ctx->is_blend && ctx->blend_constant_number == name)
-                ins->has_blend_constant = true;
 }
 
 static int
@@ -797,9 +344,14 @@ static void
 optimise_nir(nir_shader *nir)
 {
         bool progress;
+        unsigned lower_flrp =
+                (nir->options->lower_flrp16 ? 16 : 0) |
+                (nir->options->lower_flrp32 ? 32 : 0) |
+                (nir->options->lower_flrp64 ? 64 : 0);
 
         NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
         NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
+        NIR_PASS(progress, nir, nir_lower_idiv);
 
         nir_lower_tex_options lower_tex_options = {
                 .lower_rect = true
@@ -820,6 +372,27 @@ optimise_nir(nir_shader *nir)
                 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
                 NIR_PASS(progress, nir, nir_opt_algebraic);
                 NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+                if (lower_flrp != 0) {
+                        bool lower_flrp_progress = false;
+                        NIR_PASS(lower_flrp_progress,
+                                 nir,
+                                 nir_lower_flrp,
+                                 lower_flrp,
+                                 false /* always_precise */,
+                                 nir->options->lower_ffma);
+                        if (lower_flrp_progress) {
+                                NIR_PASS(progress, nir,
+                                         nir_opt_constant_folding);
+                                progress = true;
+                        }
+
+                        /* Nothing should rematerialize any flrps, so we only
+                         * need to do this lowering once.
+                         */
+                        lower_flrp = 0;
+                }
+
                 NIR_PASS(progress, nir, nir_opt_undef);
                 NIR_PASS(progress, nir, nir_opt_loop_unroll,
                          nir_var_shader_in |
@@ -843,6 +416,11 @@ optimise_nir(nir_shader *nir)
         } while (progress);
 
         NIR_PASS(progress, nir, nir_opt_algebraic_late);
+
+        /* We implement booleans as 32-bit 0/~0 */
+        NIR_PASS(progress, nir, nir_lower_bool_to_int32);
+
+        /* Now that booleans are lowered, we can run out late opts */
         NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
 
         /* Lower mods for float ops only. Integer ops don't support modifiers
@@ -853,9 +431,6 @@ optimise_nir(nir_shader *nir)
         NIR_PASS(progress, nir, nir_copy_prop);
         NIR_PASS(progress, nir, nir_opt_dce);
 
-        /* We implement booleans as 32-bit 0/~0 */
-        NIR_PASS(progress, nir, nir_lower_bool_to_int32);
-
         /* Take us out of SSA */
         NIR_PASS(progress, nir, nir_lower_locals_to_regs);
         NIR_PASS(progress, nir, nir_convert_from_ssa, true);
@@ -887,18 +462,6 @@ unalias_ssa(compiler_context *ctx, int dest)
         /* TODO: Remove from leftover or no? */
 }
 
-static void
-midgard_pin_output(compiler_context *ctx, int index, int reg)
-{
-        _mesa_hash_table_u64_insert(ctx->ssa_to_register, index + 1, (void *) ((uintptr_t) reg + 1));
-}
-
-static bool
-midgard_is_pinned(compiler_context *ctx, int index)
-{
-        return _mesa_hash_table_u64_search(ctx->ssa_to_register, index + 1) != NULL;
-}
-
 /* Do not actually emit a load; instead, cache the constant for inlining */
 
 static void
@@ -906,77 +469,11 @@ emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
 {
         nir_ssa_def def = instr->def;
 
-        float *v = ralloc_array(NULL, float, 4);
+        float *v = rzalloc_array(NULL, float, 4);
         nir_const_load_to_arr(v, instr, f32);
         _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
 }
 
-/* Duplicate bits to convert sane 4-bit writemask to obscure 8-bit format (or
- * do the inverse) */
-
-static unsigned
-expand_writemask(unsigned mask)
-{
-        unsigned o = 0;
-
-        for (int i = 0; i < 4; ++i)
-                if (mask & (1 << i))
-                        o |= (3 << (2 * i));
-
-        return o;
-}
-
-static unsigned
-squeeze_writemask(unsigned mask)
-{
-        unsigned o = 0;
-
-        for (int i = 0; i < 4; ++i)
-                if (mask & (3 << (2 * i)))
-                        o |= (1 << i);
-
-        return o;
-
-}
-
-/* Determines effective writemask, taking quirks and expansion into account */
-static unsigned
-effective_writemask(midgard_vector_alu *alu)
-{
-        /* Channel count is off-by-one to fit in two-bits (0 channel makes no
-         * sense) */
-
-        unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op]);
-
-        /* If there is a fixed channel count, construct the appropriate mask */
-
-        if (channel_count)
-                return (1 << channel_count) - 1;
-
-        /* Otherwise, just squeeze the existing mask */
-        return squeeze_writemask(alu->mask);
-}
-
-static unsigned
-find_or_allocate_temp(compiler_context *ctx, unsigned hash)
-{
-        if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM))
-                return hash;
-
-        unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(ctx->hash_to_temp, hash + 1);
-
-        if (temp)
-                return temp - 1;
-
-        /* If no temp is find, allocate one */
-        temp = ctx->temp_count++;
-        ctx->max_hash = MAX2(ctx->max_hash, hash);
-
-        _mesa_hash_table_u64_insert(ctx->hash_to_temp, hash + 1, (void *) ((uintptr_t) temp + 1));
-
-        return temp;
-}
-
 static unsigned
 nir_src_index(compiler_context *ctx, nir_src *src)
 {
@@ -1005,8 +502,21 @@ nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
         return nir_src_index(ctx, &src->src);
 }
 
-/* Midgard puts conditionals in r31.w; move an arbitrary source (the output of
- * a conditional test) into that register */
+static bool
+nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
+{
+        unsigned comp = src->swizzle[0];
+
+        for (unsigned c = 1; c < nr_components; ++c) {
+                if (src->swizzle[c] != comp)
+                        return true;
+        }
+
+        return false;
+}
+
+/* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the
+ * output of a conditional test) into that register */
 
 static void
 emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
@@ -1024,15 +534,21 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
 
         midgard_instruction ins = {
                 .type = TAG_ALU_4,
-                .unit = for_branch ? UNIT_SMUL : UNIT_SADD, /* TODO: DEDUCE THIS */
+
+                /* We need to set the conditional as close as possible */
+                .precede_break = true,
+                .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+
                 .ssa_args = {
+
                         .src0 = condition,
                         .src1 = condition,
                         .dest = SSA_FIXED_REGISTER(31),
                 },
                 .alu = {
                         .op = midgard_alu_op_iand,
-                        .reg_mode = midgard_reg_mode_full,
+                        .outmod = midgard_outmod_int,
+                        .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
                         .mask = (0x3 << 6), /* w */
                         .src1 = vector_alu_srco_unsigned(alu_src),
@@ -1043,8 +559,49 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
         emit_mir_instruction(ctx, ins);
 }
 
-/* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
- * pinning to eliminate this move in all known cases */
+/* Or, for mixed conditions (with csel_v), here's a vector version using all of
+ * r31 instead */
+
+static void
+emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
+{
+        int condition = nir_src_index(ctx, &src->src);
+
+        /* Source to swizzle the desired component into w */
+
+        const midgard_vector_alu_src alu_src = {
+                .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle),
+        };
+
+        /* There is no boolean move instruction. Instead, we simulate a move by
+         * ANDing the condition with itself to get it into r31.w */
+
+        midgard_instruction ins = {
+                .type = TAG_ALU_4,
+                .precede_break = true,
+                .ssa_args = {
+                        .src0 = condition,
+                        .src1 = condition,
+                        .dest = SSA_FIXED_REGISTER(31),
+                },
+                .alu = {
+                        .op = midgard_alu_op_iand,
+                        .outmod = midgard_outmod_int,
+                        .reg_mode = midgard_reg_mode_32,
+                        .dest_override = midgard_dest_override_none,
+                        .mask = expand_writemask((1 << nr_comp) - 1),
+                        .src1 = vector_alu_srco_unsigned(alu_src),
+                        .src2 = vector_alu_srco_unsigned(alu_src)
+                },
+        };
+
+        emit_mir_instruction(ctx, ins);
+}
+
+
+
+/* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
+ * pinning to eliminate this move in all known cases */
 
 static void
 emit_indirect_offset(compiler_context *ctx, nir_src *src)
@@ -1060,7 +617,8 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
                 },
                 .alu = {
                         .op = midgard_alu_op_imov,
-                        .reg_mode = midgard_reg_mode_full,
+                        .outmod = midgard_outmod_int,
+                        .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
                         .mask = (0x3 << 6), /* w */
                         .src1 = vector_alu_srco_unsigned(zero_alu_src),
@@ -1075,6 +633,19 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
        case nir_op_##nir: \
                op = midgard_alu_op_##_op; \
                break;
+static bool
+nir_is_fzero_constant(nir_src src)
+{
+        if (!nir_src_is_const(src))
+                return false;
+
+        for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
+                if (nir_src_comp_as_float(src, c) != 0.0)
+                        return false;
+        }
+
+        return true;
+}
 
 static void
 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
@@ -1103,7 +674,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(imax, imax);
                 ALU_CASE(umin, umin);
                 ALU_CASE(umax, umax);
-                ALU_CASE(fmov, fmov);
                 ALU_CASE(ffloor, ffloor);
                 ALU_CASE(fround_even, froundeven);
                 ALU_CASE(ftrunc, ftrunc);
@@ -1113,11 +683,11 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(iadd, iadd);
                 ALU_CASE(isub, isub);
                 ALU_CASE(imul, imul);
-                ALU_CASE(iabs, iabs);
 
-                /* XXX: Use fmov, not imov, since imov was causing major
-                 * issues with texture precision? XXX research */
-                ALU_CASE(imov, imov);
+                /* Zero shoved as second-arg */
+                ALU_CASE(iabs, iabsdiff);
+
+                ALU_CASE(mov, imov);
 
                 ALU_CASE(feq32, feq);
                 ALU_CASE(fne32, fne);
@@ -1164,7 +734,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(iand, iand);
                 ALU_CASE(ior, ior);
                 ALU_CASE(ixor, ixor);
-                ALU_CASE(inot, inot);
+                ALU_CASE(inot, inand);
                 ALU_CASE(ishl, ishl);
                 ALU_CASE(ishr, iasr);
                 ALU_CASE(ushr, ilsr);
@@ -1185,6 +755,11 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(b32any_inequal3, ibany_neq);
                 ALU_CASE(b32any_inequal4, ibany_neq);
 
+                /* Source mods will be shoved in later */
+                ALU_CASE(fabs, fmov);
+                ALU_CASE(fneg, fmov);
+                ALU_CASE(fsat, fmov);
+
         /* For greater-or-equal, we lower to less-or-equal and flip the
          * arguments */
 
@@ -1208,22 +783,27 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
         }
 
         case nir_op_b32csel: {
-                op = midgard_alu_op_fcsel;
+                /* Midgard features both fcsel and icsel, depending on
+                 * the type of the arguments/output. However, as long
+                 * as we're careful we can _always_ use icsel and
+                 * _never_ need fcsel, since the latter does additional
+                 * floating-point-specific processing whereas the
+                 * former just moves bits on the wire. It's not obvious
+                 * why these are separate opcodes, save for the ability
+                 * to do things like sat/pos/abs/neg for free */
+
+                bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
+                op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
 
                 /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
                 nr_inputs = 2;
 
-                /* Figure out which component the condition is in */
-
-                unsigned comp = instr->src[0].swizzle[0];
-
-                /* Make sure NIR isn't throwing a mixed condition at us */
-
-                for (unsigned c = 1; c < nr_components; ++c)
-                        assert(instr->src[0].swizzle[c] == comp);
+                /* Emit the condition into r31 */
 
-                /* Emit the condition into r31.w */
-                emit_condition(ctx, &instr->src[0].src, false, comp);
+                if (mixed)
+                        emit_condition_mixed(ctx, &instr->src[0], nr_components);
+                else
+                        emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]);
 
                 /* The condition is the first argument; move the other
                  * arguments up one to be a binary instruction for
@@ -1239,13 +819,33 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 return;
         }
 
+        /* Midgard can perform certain modifiers on output of an ALU op */
+        midgard_outmod outmod =
+                midgard_is_integer_out_op(op) ? midgard_outmod_int :
+                instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+
+        if (instr->op == nir_op_fsat)
+                outmod = midgard_outmod_sat;
+
+        /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
+
+        if (instr->op == nir_op_fmax) {
+                if (nir_is_fzero_constant(instr->src[0].src)) {
+                        op = midgard_alu_op_fmov;
+                        nr_inputs = 1;
+                        outmod = midgard_outmod_pos;
+                        instr->src[0] = instr->src[1];
+                } else if (nir_is_fzero_constant(instr->src[1].src)) {
+                        op = midgard_alu_op_fmov;
+                        nr_inputs = 1;
+                        outmod = midgard_outmod_pos;
+                }
+        }
+
         /* Fetch unit, quirks, etc information */
-        unsigned opcode_props = alu_opcode_props[op];
+        unsigned opcode_props = alu_opcode_props[op].props;
         bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
 
-        /* Initialise fields common between scalar/vector instructions */
-        midgard_outmod outmod = instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
-
         /* src0 will always exist afaik, but src1 will not for 1-argument
          * instructions. The latter can only be fetched if the instruction
          * needs it, or else we may segfault. */
@@ -1276,11 +876,23 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 assert(0);
         }
 
+        /* These were lowered to a move, so apply the corresponding mod */
+
+        if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
+                nir_alu_src *s = nirmods[quirk_flipped_r24];
+
+                if (instr->op == nir_op_fneg)
+                        s->negate = !s->negate;
+
+                if (instr->op == nir_op_fabs)
+                        s->abs = !s->abs;
+        }
+
         bool is_int = midgard_is_integer_op(op);
 
         midgard_vector_alu alu = {
                 .op = op,
-                .reg_mode = midgard_reg_mode_full,
+                .reg_mode = midgard_reg_mode_32,
                 .dest_override = midgard_dest_override_none,
                 .outmod = outmod,
 
@@ -1319,12 +931,17 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 }
 
                 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
-        } else if (instr->op == nir_op_f2b32 || instr->op == nir_op_i2b32) {
+        } else if (nr_inputs == 1 && !quirk_flipped_r24) {
+                /* Lots of instructions need a 0 plonked in */
                 ins.ssa_args.inline_constant = false;
                 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
                 ins.has_constants = true;
                 ins.constants[0] = 0.0f;
                 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
+        } else if (instr->op == nir_op_inot) {
+                /* ~b = ~(b & b), so duplicate the source */
+                ins.ssa_args.src1 = ins.ssa_args.src0;
+                ins.alu.src2 = ins.alu.src1;
         }
 
         if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
@@ -1369,7 +986,7 @@ emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src
                  * higher-indexed uniforms, at a performance cost. More
                  * generally, we're emitting a UBO read instruction. */
 
-                midgard_instruction ins = m_load_uniform_32(dest, offset);
+                midgard_instruction ins = m_ld_uniform_32(dest, offset);
 
                 /* TODO: Don't split */
                 ins.load_store.varying_parameters = (offset & 7) << 7;
@@ -1403,6 +1020,79 @@ emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
         emit_uniform_read(ctx, dest, uniform, NULL);
 }
 
+/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
+ * using scalar ops functional on earlier Midgard generations. Newer Midgard
+ * generations have faster vectorized reads. This operation is for blend
+ * shaders in particular; reading the tilebuffer from the fragment shader
+ * remains an open problem. */
+
+static void
+emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
+{
+        midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
+        ins.load_store.swizzle = 0; /* xxxx */
+
+        /* Read each component sequentially */
+
+        for (unsigned c = 0; c < 4; ++c) {
+                ins.load_store.mask = (1 << c);
+                ins.load_store.unknown = c;
+                emit_mir_instruction(ctx, ins);
+        }
+
+        /* vadd.u2f hr2, zext(hr2), #0 */
+
+        midgard_vector_alu_src alu_src = blank_alu_src;
+        alu_src.mod = midgard_int_zero_extend;
+        alu_src.half = true;
+
+        midgard_instruction u2f = {
+                .type = TAG_ALU_4,
+                .ssa_args = {
+                        .src0 = reg,
+                        .src1 = SSA_UNUSED_0,
+                        .dest = reg,
+                        .inline_constant = true
+                },
+                .alu = {
+                        .op = midgard_alu_op_u2f,
+                        .reg_mode = midgard_reg_mode_16,
+                        .dest_override = midgard_dest_override_none,
+                        .mask = 0xF,
+                        .src1 = vector_alu_srco_unsigned(alu_src),
+                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
+                }
+        };
+
+        emit_mir_instruction(ctx, u2f);
+
+        /* vmul.fmul.sat r1, hr2, #0.00392151 */
+
+        alu_src.mod = 0;
+
+        midgard_instruction fmul = {
+                .type = TAG_ALU_4,
+                .inline_constant = _mesa_float_to_half(1.0 / 255.0),
+                .ssa_args = {
+                        .src0 = reg,
+                        .dest = reg,
+                        .src1 = SSA_UNUSED_0,
+                        .inline_constant = true
+                },
+                .alu = {
+                        .op = midgard_alu_op_fmul,
+                        .reg_mode = midgard_reg_mode_32,
+                        .dest_override = midgard_dest_override_none,
+                        .outmod = midgard_outmod_sat,
+                        .mask = 0xFF,
+                        .src1 = vector_alu_srco_unsigned(alu_src),
+                        .src2 = vector_alu_srco_unsigned(blank_alu_src),
+                }
+        };
+
+        emit_mir_instruction(ctx, fmul);
+}
+
 static void
 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 {
@@ -1428,12 +1118,16 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
         case nir_intrinsic_load_input:
                 offset = nir_intrinsic_base(instr);
 
+                unsigned nr_comp = nir_intrinsic_dest_components(instr);
                 bool direct = nir_src_is_const(instr->src[0]);
 
                 if (direct) {
                         offset += nir_src_as_uint(instr->src[0]);
                 }
 
+                /* We may need to apply a fractional offset */
+                int component = instr->intrinsic == nir_intrinsic_load_input ?
+                        nir_intrinsic_component(instr) : 0;
                 reg = nir_dest_index(ctx, &instr->dest);
 
                 if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
@@ -1442,7 +1136,9 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                         /* XXX: Half-floats? */
                         /* TODO: swizzle, mask */
 
-                        midgard_instruction ins = m_load_vary_32(reg, offset);
+                        midgard_instruction ins = m_ld_vary_32(reg, offset);
+                        ins.load_store.mask = (1 << nr_comp) - 1;
+                        ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
 
                         midgard_varying_parameter p = {
                                 .is_varying = 1,
@@ -1464,109 +1160,17 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                                 ins.load_store.unknown = 0x79e; /* xxx: what is this? */
                         }
 
-                        emit_mir_instruction(ctx, ins);
-                } else if (ctx->is_blend && instr->intrinsic == nir_intrinsic_load_uniform) {
-                        /* Constant encoded as a pinned constant */
-
-                        midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
-                        ins.has_constants = true;
-                        ins.has_blend_constant = true;
                         emit_mir_instruction(ctx, ins);
                 } else if (ctx->is_blend) {
-                        /* For blend shaders, a load might be
-                         * translated various ways depending on what
-                         * we're loading. Figure out how this is used */
-
-                        nir_variable *out = NULL;
-
-                        nir_foreach_variable(var, &ctx->nir->inputs) {
-                                int drvloc = var->data.driver_location;
-
-                                if (nir_intrinsic_base(instr) == drvloc) {
-                                        out = var;
-                                        break;
-                                }
-                        }
-
-                        assert(out);
-
-                        if (out->data.location == VARYING_SLOT_COL0) {
-                                /* Source color preloaded to r0 */
-
-                                midgard_pin_output(ctx, reg, 0);
-                        } else if (out->data.location == VARYING_SLOT_COL1) {
-                                /* Destination color must be read from framebuffer */
-
-                                midgard_instruction ins = m_load_color_buffer_8(reg, 0);
-                                ins.load_store.swizzle = 0; /* xxxx */
-
-                                /* Read each component sequentially */
-
-                                for (int c = 0; c < 4; ++c) {
-                                        ins.load_store.mask = (1 << c);
-                                        ins.load_store.unknown = c;
-                                        emit_mir_instruction(ctx, ins);
-                                }
-
-                                /* vadd.u2f hr2, zext(hr2), #0 */
-
-                                midgard_vector_alu_src alu_src = blank_alu_src;
-                                alu_src.mod = midgard_int_zero_extend;
-                                alu_src.half = true;
-
-                                midgard_instruction u2f = {
-                                        .type = TAG_ALU_4,
-                                        .ssa_args = {
-                                                .src0 = reg,
-                                                .src1 = SSA_UNUSED_0,
-                                                .dest = reg,
-                                                .inline_constant = true
-                                        },
-                                        .alu = {
-                                                .op = midgard_alu_op_u2f,
-                                                .reg_mode = midgard_reg_mode_half,
-                                                .dest_override = midgard_dest_override_none,
-                                                .mask = 0xF,
-                                                .src1 = vector_alu_srco_unsigned(alu_src),
-                                                .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                                        }
-                                };
-
-                                emit_mir_instruction(ctx, u2f);
-
-                                /* vmul.fmul.sat r1, hr2, #0.00392151 */
-
-                                alu_src.mod = 0;
-
-                                midgard_instruction fmul = {
-                                        .type = TAG_ALU_4,
-                                        .inline_constant = _mesa_float_to_half(1.0 / 255.0),
-                                        .ssa_args = {
-                                                .src0 = reg,
-                                                .dest = reg,
-                                                .src1 = SSA_UNUSED_0,
-                                                .inline_constant = true
-                                        },
-                                        .alu = {
-                                                .op = midgard_alu_op_fmul,
-                                                .reg_mode = midgard_reg_mode_full,
-                                                .dest_override = midgard_dest_override_none,
-                                                .outmod = midgard_outmod_sat,
-                                                .mask = 0xFF,
-                                                .src1 = vector_alu_srco_unsigned(alu_src),
-                                                .src2 = vector_alu_srco_unsigned(blank_alu_src),
-                                        }
-                                };
+                        /* For blend shaders, load the input color, which is
+                         * preloaded to r0 */
 
-                                emit_mir_instruction(ctx, fmul);
-                        } else {
-                                DBG("Unknown input in blend shader\n");
-                                assert(0);
-                        }
-                } else if (ctx->stage == MESA_SHADER_VERTEX) {
-                        midgard_instruction ins = m_load_attr_32(reg, offset);
+                        midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+                        emit_mir_instruction(ctx, move);
+                }  else if (ctx->stage == MESA_SHADER_VERTEX) {
+                        midgard_instruction ins = m_ld_attr_32(reg, offset);
                         ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
-                        ins.load_store.mask = (1 << instr->num_components) - 1;
+                        ins.load_store.mask = (1 << nr_comp) - 1;
                         emit_mir_instruction(ctx, ins);
                 } else {
                         DBG("Unknown load\n");
@@ -1575,6 +1179,34 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 
                 break;
 
+        case nir_intrinsic_load_output:
+                assert(nir_src_is_const(instr->src[0]));
+                reg = nir_dest_index(ctx, &instr->dest);
+
+                if (ctx->is_blend) {
+                        /* TODO: MRT */
+                        emit_fb_read_blend_scalar(ctx, reg);
+                } else {
+                        DBG("Unknown output load\n");
+                        assert(0);
+                }
+
+                break;
+
+        case nir_intrinsic_load_blend_const_color_rgba: {
+                assert(ctx->is_blend);
+                reg = nir_dest_index(ctx, &instr->dest);
+
+                /* Blend constants are embedded directly in the shader and
+                 * patched in, so we use some magic routing */
+
+                midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
+                ins.has_constants = true;
+                ins.has_blend_constant = true;
+                emit_mir_instruction(ctx, ins);
+                break;
+        }
+
         case nir_intrinsic_store_output:
                 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
 
@@ -1589,7 +1221,8 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                          * framebuffer writeout dance. TODO: Defer
                          * writes */
 
-                        midgard_pin_output(ctx, reg, 0);
+                        midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+                        emit_mir_instruction(ctx, move);
 
                         /* Save the index we're writing to for later reference
                          * in the epilogue */
@@ -1597,44 +1230,23 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                         ctx->fragment_output = reg;
                 } else if (ctx->stage == MESA_SHADER_VERTEX) {
                         /* Varyings are written into one of two special
-                         * varying register, r26 or r27. The register itself is selected as the register
-                         * in the st_vary instruction, minus the base of 26. E.g. write into r27 and then call st_vary(1)
-                         *
-                         * Normally emitting fmov's is frowned upon,
-                         * but due to unique constraints of
-                         * REGISTER_VARYING, fmov emission + a
-                         * dedicated cleanup pass is the only way to
-                         * guarantee correctness when considering some
-                         * (common) edge cases XXX: FIXME */
-
-                        /* If this varying corresponds to a constant (why?!),
-                         * emit that now since it won't get picked up by
-                         * hoisting (since there is no corresponding move
-                         * emitted otherwise) */
-
-                        void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, reg + 1);
-
-                        if (constant_value) {
-                                /* Special case: emit the varying write
-                                 * directly to r26 (looks funny in asm but it's
-                                 * fine) and emit the store _now_. Possibly
-                                 * slightly slower, but this is a really stupid
-                                 * special case anyway (why on earth would you
-                                 * have a constant varying? Your own fault for
-                                 * slightly worse perf :P) */
-
-                                midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(26));
-                                attach_constants(ctx, &ins, constant_value, reg + 1);
-                                emit_mir_instruction(ctx, ins);
+                         * varying register, r26 or r27. The register itself is
+                         * selected as the register in the st_vary instruction,
+                         * minus the base of 26. E.g. write into r27 and then
+                         * call st_vary(1) */
 
-                                midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(0), offset);
-                                st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
-                                emit_mir_instruction(ctx, st);
-                        } else {
-                                /* Do not emit the varying yet -- instead, just mark down that we need to later */
+                        midgard_instruction ins = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
+                        emit_mir_instruction(ctx, ins);
 
-                                _mesa_hash_table_u64_insert(ctx->ssa_varyings, reg + 1, (void *) ((uintptr_t) (offset + 1)));
-                        }
+                        /* We should have been vectorized. That also lets us
+                         * ignore the mask. because the mask component on
+                         * st_vary is (as far as I can tell) ignored [the blob
+                         * sets it to zero] */
+                        assert(nir_intrinsic_component(instr) == 0);
+
+                        midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
+                        st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
+                        emit_mir_instruction(ctx, st);
                 } else {
                         DBG("Unknown store\n");
                         assert(0);
@@ -1724,7 +1336,7 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
                                 midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
                                 emit_mir_instruction(ctx, move);
 
-                                midgard_instruction st = m_store_cubemap_coords(reg, 0);
+                                midgard_instruction st = m_st_cubemap_coords(reg, 0);
                                 st.load_store.unknown = 0x24; /* XXX: What is this? */
                                 st.load_store.mask = 0x3; /* xy? */
                                 st.load_store.swizzle = alu_src.swizzle;
@@ -1737,1123 +1349,125 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
                                 emit_mir_instruction(ctx, ins);
                         }
 
-                        //midgard_pin_output(ctx, index, REGISTER_TEXTURE_BASE + in_reg);
-
                         break;
                 }
 
                 default: {
                         DBG("Unknown source type\n");
                         //assert(0);
-                        break;
-                }
-                }
-        }
-
-        /* No helper to build texture words -- we do it all here */
-        midgard_instruction ins = {
-                .type = TAG_TEXTURE_4,
-                .texture = {
-                        .op = TEXTURE_OP_NORMAL,
-                        .format = midgard_tex_format(instr->sampler_dim),
-                        .texture_handle = texture_index,
-                        .sampler_handle = sampler_index,
-
-                        /* TODO: Don't force xyzw */
-                        .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
-                        .mask = 0xF,
-
-                        /* TODO: half */
-                        //.in_reg_full = 1,
-                        .out_full = 1,
-
-                        .filter = 1,
-
-                        /* Always 1 */
-                        .unknown7 = 1,
-
-                        /* Assume we can continue; hint it out later */
-                        .cont = 1,
-                }
-        };
-
-        /* Set registers to read and write from the same place */
-        ins.texture.in_reg_select = in_reg;
-        ins.texture.out_reg_select = out_reg;
-
-        /* TODO: Dynamic swizzle input selection, half-swizzles? */
-        if (instr->sampler_dim == GLSL_SAMPLER_DIM_3D) {
-                ins.texture.in_reg_swizzle_right = COMPONENT_X;
-                ins.texture.in_reg_swizzle_left = COMPONENT_Y;
-                //ins.texture.in_reg_swizzle_third = COMPONENT_Z;
-        } else {
-                ins.texture.in_reg_swizzle_left = COMPONENT_X;
-                ins.texture.in_reg_swizzle_right = COMPONENT_Y;
-                //ins.texture.in_reg_swizzle_third = COMPONENT_X;
-        }
-
-        emit_mir_instruction(ctx, ins);
-
-        /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
-
-        int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
-        alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
-        ctx->texture_index[reg] = o_index;
-
-        midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
-        emit_mir_instruction(ctx, ins2);
-
-        /* Used for .cont and .last hinting */
-        ctx->texture_op_count++;
-}
-
-static void
-emit_jump(compiler_context *ctx, nir_jump_instr *instr)
-{
-        switch (instr->type) {
-                case nir_jump_break: {
-                        /* Emit a branch out of the loop */
-                        struct midgard_instruction br = v_branch(false, false);
-                        br.branch.target_type = TARGET_BREAK;
-                        br.branch.target_break = ctx->current_loop_depth;
-                        emit_mir_instruction(ctx, br);
-
-                        DBG("break..\n");
-                        break;
-                }
-
-                default:
-                        DBG("Unknown jump type %d\n", instr->type);
-                        break;
-        }
-}
-
-static void
-emit_instr(compiler_context *ctx, struct nir_instr *instr)
-{
-        switch (instr->type) {
-        case nir_instr_type_load_const:
-                emit_load_const(ctx, nir_instr_as_load_const(instr));
-                break;
-
-        case nir_instr_type_intrinsic:
-                emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
-                break;
-
-        case nir_instr_type_alu:
-                emit_alu(ctx, nir_instr_as_alu(instr));
-                break;
-
-        case nir_instr_type_tex:
-                emit_tex(ctx, nir_instr_as_tex(instr));
-                break;
-
-        case nir_instr_type_jump:
-                emit_jump(ctx, nir_instr_as_jump(instr));
-                break;
-
-        case nir_instr_type_ssa_undef:
-                /* Spurious */
-                break;
-
-        default:
-                DBG("Unhandled instruction type\n");
-                break;
-        }
-}
-
-/* Determine the actual hardware from the index based on the RA results or special values */
-
-static int
-dealias_register(compiler_context *ctx, struct ra_graph *g, int reg, int maxreg)
-{
-        if (reg >= SSA_FIXED_MINIMUM)
-                return SSA_REG_FROM_FIXED(reg);
-
-        if (reg >= 0) {
-                assert(reg < maxreg);
-                int r = ra_get_node_reg(g, reg);
-                ctx->work_registers = MAX2(ctx->work_registers, r);
-                return r;
-        }
-
-        switch (reg) {
-        /* fmov style unused */
-        case SSA_UNUSED_0:
-                return REGISTER_UNUSED;
-
-        /* lut style unused */
-        case SSA_UNUSED_1:
-                return REGISTER_UNUSED;
-
-        default:
-                DBG("Unknown SSA register alias %d\n", reg);
-                assert(0);
-                return 31;
-        }
-}
-
-static unsigned int
-midgard_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
-{
-        /* Choose the first available register to minimise reported register pressure */
-
-        for (int i = 0; i < 16; ++i) {
-                if (BITSET_TEST(regs, i)) {
-                        return i;
-                }
-        }
-
-        assert(0);
-        return 0;
-}
-
-static bool
-midgard_is_live_in_instr(midgard_instruction *ins, int src)
-{
-        if (ins->ssa_args.src0 == src) return true;
-        if (ins->ssa_args.src1 == src) return true;
-
-        return false;
-}
-
-/* Determine if a variable is live in the successors of a block */
-static bool
-is_live_after_successors(compiler_context *ctx, midgard_block *bl, int src)
-{
-        for (unsigned i = 0; i < bl->nr_successors; ++i) {
-                midgard_block *succ = bl->successors[i];
-
-                /* If we already visited, the value we're seeking
-                 * isn't down this path (or we would have short
-                 * circuited */
-
-                if (succ->visited) continue;
-
-                /* Otherwise (it's visited *now*), check the block */
-
-                succ->visited = true;
-
-                mir_foreach_instr_in_block(succ, ins) {
-                        if (midgard_is_live_in_instr(ins, src))
-                                return true;
-                }
-
-                /* ...and also, check *its* successors */
-                if (is_live_after_successors(ctx, succ, src))
-                        return true;
-
-        }
-
-        /* Welp. We're really not live. */
-
-        return false;
-}
-
-static bool
-is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src)
-{
-        /* Check the rest of the block for liveness */
-
-        mir_foreach_instr_in_block_from(block, ins, mir_next_op(start)) {
-                if (midgard_is_live_in_instr(ins, src))
-                        return true;
-        }
-
-        /* Check the rest of the blocks for liveness recursively */
-
-        bool succ = is_live_after_successors(ctx, block, src);
-
-        mir_foreach_block(ctx, block) {
-                block->visited = false;
-        }
-
-        return succ;
-}
-
-static void
-allocate_registers(compiler_context *ctx)
-{
-        /* First, initialize the RA */
-        struct ra_regs *regs = ra_alloc_reg_set(NULL, 32, true);
-
-        /* Create a primary (general purpose) class, as well as special purpose
-         * pipeline register classes */
-
-        int primary_class = ra_alloc_reg_class(regs);
-        int varying_class  = ra_alloc_reg_class(regs);
-
-        /* Add the full set of work registers */
-        int work_count = 16 - MAX2((ctx->uniform_cutoff - 8), 0);
-        for (int i = 0; i < work_count; ++i)
-                ra_class_add_reg(regs, primary_class, i);
-
-        /* Add special registers */
-        ra_class_add_reg(regs, varying_class, REGISTER_VARYING_BASE);
-        ra_class_add_reg(regs, varying_class, REGISTER_VARYING_BASE + 1);
-
-        /* We're done setting up */
-        ra_set_finalize(regs, NULL);
-
-        /* Transform the MIR into squeezed index form */
-        mir_foreach_block(ctx, block) {
-                mir_foreach_instr_in_block(block, ins) {
-                        if (ins->compact_branch) continue;
-
-                        ins->ssa_args.src0 = find_or_allocate_temp(ctx, ins->ssa_args.src0);
-                        ins->ssa_args.src1 = find_or_allocate_temp(ctx, ins->ssa_args.src1);
-                        ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
-                }
-               if (midgard_debug & MIDGARD_DBG_SHADERS)
-                       print_mir_block(block);
-        }
-
-        /* Let's actually do register allocation */
-        int nodes = ctx->temp_count;
-        struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
-
-        /* Set everything to the work register class, unless it has somewhere
-         * special to go */
-
-        mir_foreach_block(ctx, block) {
-                mir_foreach_instr_in_block(block, ins) {
-                        if (ins->compact_branch) continue;
-
-                        if (ins->ssa_args.dest < 0) continue;
-
-                        if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
-
-                        int class = primary_class;
-
-                        ra_set_node_class(g, ins->ssa_args.dest, class);
-                }
-        }
-
-        for (int index = 0; index <= ctx->max_hash; ++index) {
-                unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_register, index + 1);
-
-                if (temp) {
-                        unsigned reg = temp - 1;
-                        int t = find_or_allocate_temp(ctx, index);
-                        ra_set_node_reg(g, t, reg);
-                }
-        }
-
-        /* Determine liveness */
-
-        int *live_start = malloc(nodes * sizeof(int));
-        int *live_end = malloc(nodes * sizeof(int));
-
-        /* Initialize as non-existent */
-
-        for (int i = 0; i < nodes; ++i) {
-                live_start[i] = live_end[i] = -1;
-        }
-
-        int d = 0;
-
-        mir_foreach_block(ctx, block) {
-                mir_foreach_instr_in_block(block, ins) {
-                        if (ins->compact_branch) continue;
-
-                        if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
-                                /* If this destination is not yet live, it is now since we just wrote it */
-
-                                int dest = ins->ssa_args.dest;
-
-                                if (live_start[dest] == -1)
-                                        live_start[dest] = d;
-                        }
-
-                        /* Since we just used a source, the source might be
-                         * dead now. Scan the rest of the block for
-                         * invocations, and if there are none, the source dies
-                         * */
-
-                        int sources[2] = { ins->ssa_args.src0, ins->ssa_args.src1 };
-
-                        for (int src = 0; src < 2; ++src) {
-                                int s = sources[src];
-
-                                if (s < 0) continue;
-
-                                if (s >= SSA_FIXED_MINIMUM) continue;
-
-                                if (!is_live_after(ctx, block, ins, s)) {
-                                        live_end[s] = d;
-                                }
-                        }
-
-                        ++d;
-                }
-        }
-
-        /* If a node still hasn't been killed, kill it now */
-
-        for (int i = 0; i < nodes; ++i) {
-                /* live_start == -1 most likely indicates a pinned output */
-
-                if (live_end[i] == -1)
-                        live_end[i] = d;
-        }
-
-        /* Setup interference between nodes that are live at the same time */
-
-        for (int i = 0; i < nodes; ++i) {
-                for (int j = i + 1; j < nodes; ++j) {
-                        if (!(live_start[i] >= live_end[j] || live_start[j] >= live_end[i]))
-                                ra_add_node_interference(g, i, j);
-                }
-        }
-
-        ra_set_select_reg_callback(g, midgard_ra_select_callback, NULL);
-
-        if (!ra_allocate(g)) {
-                DBG("Error allocating registers\n");
-                assert(0);
-        }
-
-        /* Cleanup */
-        free(live_start);
-        free(live_end);
-
-        mir_foreach_block(ctx, block) {
-                mir_foreach_instr_in_block(block, ins) {
-                        if (ins->compact_branch) continue;
-
-                        ssa_args args = ins->ssa_args;
-
-                        switch (ins->type) {
-                        case TAG_ALU_4:
-                                ins->registers.src1_reg = dealias_register(ctx, g, args.src0, nodes);
-
-                                ins->registers.src2_imm = args.inline_constant;
-
-                                if (args.inline_constant) {
-                                        /* Encode inline 16-bit constant as a vector by default */
-
-                                        ins->registers.src2_reg = ins->inline_constant >> 11;
-
-                                        int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-
-                                        uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
-                                        ins->alu.src2 = imm << 2;
-                                } else {
-                                        ins->registers.src2_reg = dealias_register(ctx, g, args.src1, nodes);
-                                }
-
-                                ins->registers.out_reg = dealias_register(ctx, g, args.dest, nodes);
-
-                                break;
-
-                        case TAG_LOAD_STORE_4: {
-                                if (OP_IS_STORE_VARY(ins->load_store.op)) {
-                                        /* TODO: use ssa_args for store_vary */
-                                        ins->load_store.reg = 0;
-                                } else {
-                                        bool has_dest = args.dest >= 0;
-                                        int ssa_arg = has_dest ? args.dest : args.src0;
-
-                                        ins->load_store.reg = dealias_register(ctx, g, ssa_arg, nodes);
-                                }
-
-                                break;
-                        }
-
-                        default:
-                                break;
-                        }
-                }
-        }
-}
-
-/* Midgard IR only knows vector ALU types, but we sometimes need to actually
- * use scalar ALU instructions, for functional or performance reasons. To do
- * this, we just demote vector ALU payloads to scalar. */
-
-static int
-component_from_mask(unsigned mask)
-{
-        for (int c = 0; c < 4; ++c) {
-                if (mask & (3 << (2 * c)))
-                        return c;
-        }
-
-        assert(0);
-        return 0;
-}
-
-static bool
-is_single_component_mask(unsigned mask)
-{
-        int components = 0;
-
-        for (int c = 0; c < 4; ++c)
-                if (mask & (3 << (2 * c)))
-                        components++;
-
-        return components == 1;
-}
-
-/* Create a mask of accessed components from a swizzle to figure out vector
- * dependencies */
-
-static unsigned
-swizzle_to_access_mask(unsigned swizzle)
-{
-        unsigned component_mask = 0;
-
-        for (int i = 0; i < 4; ++i) {
-                unsigned c = (swizzle >> (2 * i)) & 3;
-                component_mask |= (1 << c);
-        }
-
-        return component_mask;
-}
-
-static unsigned
-vector_to_scalar_source(unsigned u, bool is_int)
-{
-        midgard_vector_alu_src v;
-        memcpy(&v, &u, sizeof(v));
-
-        /* TODO: Integers */
-
-        midgard_scalar_alu_src s = {
-                .full = !v.half,
-                .component = (v.swizzle & 3) << 1
-        };
-
-        if (is_int) {
-                /* TODO */
-        } else {
-                s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
-                s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
-        }
-
-        unsigned o;
-        memcpy(&o, &s, sizeof(s));
-
-        return o & ((1 << 6) - 1);
-}
-
-static midgard_scalar_alu
-vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
-{
-        bool is_int = midgard_is_integer_op(v.op);
-
-        /* The output component is from the mask */
-        midgard_scalar_alu s = {
-                .op = v.op,
-                .src1 = vector_to_scalar_source(v.src1, is_int),
-                .src2 = vector_to_scalar_source(v.src2, is_int),
-                .unknown = 0,
-                .outmod = v.outmod,
-                .output_full = 1, /* TODO: Half */
-                .output_component = component_from_mask(v.mask) << 1,
-        };
-
-        /* Inline constant is passed along rather than trying to extract it
-         * from v */
-
-        if (ins->ssa_args.inline_constant) {
-                uint16_t imm = 0;
-                int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-                imm |= (lower_11 >> 9) & 3;
-                imm |= (lower_11 >> 6) & 4;
-                imm |= (lower_11 >> 2) & 0x38;
-                imm |= (lower_11 & 63) << 6;
-
-                s.src2 = imm;
-        }
-
-        return s;
-}
-
-/* Midgard prefetches instruction types, so during emission we need to
- * lookahead too. Unless this is the last instruction, in which we return 1. Or
- * if this is the second to last and the last is an ALU, then it's also 1... */
-
-#define IS_ALU(tag) (tag == TAG_ALU_4 || tag == TAG_ALU_8 ||  \
-                    tag == TAG_ALU_12 || tag == TAG_ALU_16)
-
-#define EMIT_AND_COUNT(type, val) util_dynarray_append(emission, type, val); \
-                                 bytes_emitted += sizeof(type)
-
-static void
-emit_binary_vector_instruction(midgard_instruction *ains,
-                               uint16_t *register_words, int *register_words_count,
-                               uint64_t *body_words, size_t *body_size, int *body_words_count,
-                               size_t *bytes_emitted)
-{
-        memcpy(&register_words[(*register_words_count)++], &ains->registers, sizeof(ains->registers));
-        *bytes_emitted += sizeof(midgard_reg_info);
-
-        body_size[*body_words_count] = sizeof(midgard_vector_alu);
-        memcpy(&body_words[(*body_words_count)++], &ains->alu, sizeof(ains->alu));
-        *bytes_emitted += sizeof(midgard_vector_alu);
-}
-
-/* Checks for an SSA data hazard between two adjacent instructions, keeping in
- * mind that we are a vector architecture and we can write to different
- * components simultaneously */
-
-static bool
-can_run_concurrent_ssa(midgard_instruction *first, midgard_instruction *second)
-{
-        /* Each instruction reads some registers and writes to a register. See
-         * where the first writes */
-
-        /* Figure out where exactly we wrote to */
-        int source = first->ssa_args.dest;
-        int source_mask = first->type == TAG_ALU_4 ? squeeze_writemask(first->alu.mask) : 0xF;
-
-        /* As long as the second doesn't read from the first, we're okay */
-        if (second->ssa_args.src0 == source) {
-                if (first->type == TAG_ALU_4) {
-                        /* Figure out which components we just read from */
-
-                        int q = second->alu.src1;
-                        midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
-
-                        /* Check if there are components in common, and fail if so */
-                        if (swizzle_to_access_mask(m->swizzle) & source_mask)
-                                return false;
-                } else
-                        return false;
-
-        }
-
-        if (second->ssa_args.src1 == source)
-                return false;
-
-        /* Otherwise, it's safe in that regard. Another data hazard is both
-         * writing to the same place, of course */
-
-        if (second->ssa_args.dest == source) {
-                /* ...but only if the components overlap */
-                int dest_mask = second->type == TAG_ALU_4 ? squeeze_writemask(second->alu.mask) : 0xF;
-
-                if (dest_mask & source_mask)
-                        return false;
-        }
-
-        /* ...That's it */
-        return true;
-}
-
-static bool
-midgard_has_hazard(
-                midgard_instruction **segment, unsigned segment_size,
-                midgard_instruction *ains)
-{
-        for (int s = 0; s < segment_size; ++s)
-                if (!can_run_concurrent_ssa(segment[s], ains))
-                        return true;
-
-        return false;
-
-
-}
-
-/* Schedules, but does not emit, a single basic block. After scheduling, the
- * final tag and size of the block are known, which are necessary for branching
- * */
-
-static midgard_bundle
-schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction *ins, int *skip)
-{
-        int instructions_emitted = 0, instructions_consumed = -1;
-        midgard_bundle bundle = { 0 };
-
-        uint8_t tag = ins->type;
-
-        /* Default to the instruction's tag */
-        bundle.tag = tag;
-
-        switch (ins->type) {
-        case TAG_ALU_4: {
-                uint32_t control = 0;
-                size_t bytes_emitted = sizeof(control);
-
-                /* TODO: Constant combining */
-                int index = 0, last_unit = 0;
-
-                /* Previous instructions, for the purpose of parallelism */
-                midgard_instruction *segment[4] = {0};
-                int segment_size = 0;
-
-                instructions_emitted = -1;
-                midgard_instruction *pins = ins;
-
-                for (;;) {
-                        midgard_instruction *ains = pins;
-
-                        /* Advance instruction pointer */
-                        if (index) {
-                                ains = mir_next_op(pins);
-                                pins = ains;
-                        }
-
-                        /* Out-of-work condition */
-                        if ((struct list_head *) ains == &block->instructions)
-                                break;
-
-                        /* Ensure that the chain can continue */
-                        if (ains->type != TAG_ALU_4) break;
-
-                        /* According to the presentation "The ARM
-                         * Mali-T880 Mobile GPU" from HotChips 27,
-                         * there are two pipeline stages. Branching
-                         * position determined experimentally. Lines
-                         * are executed in parallel:
-                         *
-                         * [ VMUL ] [ SADD ]
-                         * [ VADD ] [ SMUL ] [ LUT ] [ BRANCH ]
-                         *
-                         * Verify that there are no ordering dependencies here.
-                         *
-                         * TODO: Allow for parallelism!!!
-                         */
-
-                        /* Pick a unit for it if it doesn't force a particular unit */
-
-                        int unit = ains->unit;
-
-                        if (!unit) {
-                                int op = ains->alu.op;
-                                int units = alu_opcode_props[op];
-
-                                /* TODO: Promotion of scalars to vectors */
-                                int vector = ((!is_single_component_mask(ains->alu.mask)) || ((units & UNITS_SCALAR) == 0)) && (units & UNITS_ANY_VECTOR);
-
-                                if (!vector)
-                                        assert(units & UNITS_SCALAR);
-
-                                if (vector) {
-                                        if (last_unit >= UNIT_VADD) {
-                                                if (units & UNIT_VLUT)
-                                                        unit = UNIT_VLUT;
-                                                else
-                                                        break;
-                                        } else {
-                                                if ((units & UNIT_VMUL) && !(control & UNIT_VMUL))
-                                                        unit = UNIT_VMUL;
-                                                else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
-                                                        unit = UNIT_VADD;
-                                                else if (units & UNIT_VLUT)
-                                                        unit = UNIT_VLUT;
-                                                else
-                                                        break;
-                                        }
-                                } else {
-                                        if (last_unit >= UNIT_VADD) {
-                                                if ((units & UNIT_SMUL) && !(control & UNIT_SMUL))
-                                                        unit = UNIT_SMUL;
-                                                else if (units & UNIT_VLUT)
-                                                        unit = UNIT_VLUT;
-                                                else
-                                                        break;
-                                        } else {
-                                                if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains))
-                                                        unit = UNIT_SADD;
-                                                else if (units & UNIT_SMUL)
-                                                        unit = ((units & UNIT_VMUL) && !(control & UNIT_VMUL)) ? UNIT_VMUL : UNIT_SMUL;
-                                                else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
-                                                        unit = UNIT_VADD;
-                                                else
-                                                        break;
-                                        }
-                                }
-
-                                assert(unit & units);
-                        }
-
-                        /* Late unit check, this time for encoding (not parallelism) */
-                        if (unit <= last_unit) break;
-
-                        /* Clear the segment */
-                        if (last_unit < UNIT_VADD && unit >= UNIT_VADD)
-                                segment_size = 0;
-
-                        if (midgard_has_hazard(segment, segment_size, ains))
-                                break;
-
-                        /* We're good to go -- emit the instruction */
-                        ains->unit = unit;
-
-                        segment[segment_size++] = ains;
-
-                        /* Only one set of embedded constants per
-                         * bundle possible; if we have more, we must
-                         * break the chain early, unfortunately */
-
-                        if (ains->has_constants) {
-                                if (bundle.has_embedded_constants) {
-                                        /* ...but if there are already
-                                         * constants but these are the
-                                         * *same* constants, we let it
-                                         * through */
-
-                                        if (memcmp(bundle.constants, ains->constants, sizeof(bundle.constants)))
-                                                break;
-                                } else {
-                                        bundle.has_embedded_constants = true;
-                                        memcpy(bundle.constants, ains->constants, sizeof(bundle.constants));
-
-                                        /* If this is a blend shader special constant, track it for patching */
-                                        if (ains->has_blend_constant)
-                                                bundle.has_blend_constant = true;
-                                }
-                        }
-
-                        if (ains->unit & UNITS_ANY_VECTOR) {
-                                emit_binary_vector_instruction(ains, bundle.register_words,
-                                                               &bundle.register_words_count, bundle.body_words,
-                                                               bundle.body_size, &bundle.body_words_count, &bytes_emitted);
-                        } else if (ains->compact_branch) {
-                                /* All of r0 has to be written out
-                                 * along with the branch writeout.
-                                 * (slow!) */
-
-                                if (ains->writeout) {
-                                        if (index == 0) {
-                                                midgard_instruction ins = v_fmov(0, blank_alu_src, SSA_FIXED_REGISTER(0));
-                                                ins.unit = UNIT_VMUL;
-
-                                                control |= ins.unit;
-
-                                                emit_binary_vector_instruction(&ins, bundle.register_words,
-                                                                               &bundle.register_words_count, bundle.body_words,
-                                                                               bundle.body_size, &bundle.body_words_count, &bytes_emitted);
-                                        } else {
-                                                /* Analyse the group to see if r0 is written in full, on-time, without hanging dependencies*/
-                                                bool written_late = false;
-                                                bool components[4] = { 0 };
-                                                uint16_t register_dep_mask = 0;
-                                                uint16_t written_mask = 0;
-
-                                                midgard_instruction *qins = ins;
-                                                for (int t = 0; t < index; ++t) {
-                                                        if (qins->registers.out_reg != 0) {
-                                                                /* Mark down writes */
-
-                                                                written_mask |= (1 << qins->registers.out_reg);
-                                                        } else {
-                                                                /* Mark down the register dependencies for errata check */
-
-                                                                if (qins->registers.src1_reg < 16)
-                                                                        register_dep_mask |= (1 << qins->registers.src1_reg);
-
-                                                                if (qins->registers.src2_reg < 16)
-                                                                        register_dep_mask |= (1 << qins->registers.src2_reg);
-
-                                                                int mask = qins->alu.mask;
-
-                                                                for (int c = 0; c < 4; ++c)
-                                                                        if (mask & (0x3 << (2 * c)))
-                                                                                components[c] = true;
-
-                                                                /* ..but if the writeout is too late, we have to break up anyway... for some reason */
-
-                                                                if (qins->unit == UNIT_VLUT)
-                                                                        written_late = true;
-                                                        }
-
-                                                        /* Advance instruction pointer */
-                                                        qins = mir_next_op(qins);
-                                                }
-
-
-                                                /* ERRATA (?): In a bundle ending in a fragment writeout, the register dependencies of r0 cannot be written within this bundle (discovered in -bshading:shading=phong) */
-                                                if (register_dep_mask & written_mask) {
-                                                        DBG("ERRATA WORKAROUND: Breakup for writeout dependency masks %X vs %X (common %X)\n", register_dep_mask, written_mask, register_dep_mask & written_mask);
-                                                        break;
-                                                }
-
-                                                if (written_late)
-                                                        break;
-
-                                                /* If even a single component is not written, break it up (conservative check). */
-                                                bool breakup = false;
-
-                                                for (int c = 0; c < 4; ++c)
-                                                        if (!components[c])
-                                                                breakup = true;
-
-                                                if (breakup)
-                                                        break;
-
-                                                /* Otherwise, we're free to proceed */
-                                        }
-                                }
-
-                                if (ains->unit == ALU_ENAB_BRANCH) {
-                                        bundle.body_size[bundle.body_words_count] = sizeof(midgard_branch_extended);
-                                        memcpy(&bundle.body_words[bundle.body_words_count++], &ains->branch_extended, sizeof(midgard_branch_extended));
-                                        bytes_emitted += sizeof(midgard_branch_extended);
-                                } else {
-                                        bundle.body_size[bundle.body_words_count] = sizeof(ains->br_compact);
-                                        memcpy(&bundle.body_words[bundle.body_words_count++], &ains->br_compact, sizeof(ains->br_compact));
-                                        bytes_emitted += sizeof(ains->br_compact);
-                                }
-                        } else {
-                                memcpy(&bundle.register_words[bundle.register_words_count++], &ains->registers, sizeof(ains->registers));
-                                bytes_emitted += sizeof(midgard_reg_info);
-
-                                bundle.body_size[bundle.body_words_count] = sizeof(midgard_scalar_alu);
-                                bundle.body_words_count++;
-                                bytes_emitted += sizeof(midgard_scalar_alu);
-                        }
-
-                        /* Defer marking until after writing to allow for break */
-                        control |= ains->unit;
-                        last_unit = ains->unit;
-                        ++instructions_emitted;
-                        ++index;
-                }
-
-                /* Bubble up the number of instructions for skipping */
-                instructions_consumed = index - 1;
-
-                int padding = 0;
-
-                /* Pad ALU op to nearest word */
-
-                if (bytes_emitted & 15) {
-                        padding = 16 - (bytes_emitted & 15);
-                        bytes_emitted += padding;
-                }
-
-                /* Constants must always be quadwords */
-                if (bundle.has_embedded_constants)
-                        bytes_emitted += 16;
-
-                /* Size ALU instruction for tag */
-                bundle.tag = (TAG_ALU_4) + (bytes_emitted / 16) - 1;
-                bundle.padding = padding;
-                bundle.control = bundle.tag | control;
-
-                break;
-        }
-
-        case TAG_LOAD_STORE_4: {
-                /* Load store instructions have two words at once. If
-                 * we only have one queued up, we need to NOP pad.
-                 * Otherwise, we store both in succession to save space
-                 * and cycles -- letting them go in parallel -- skip
-                 * the next. The usefulness of this optimisation is
-                 * greatly dependent on the quality of the instruction
-                 * scheduler.
-                 */
-
-                midgard_instruction *next_op = mir_next_op(ins);
-
-                if ((struct list_head *) next_op != &block->instructions && next_op->type == TAG_LOAD_STORE_4) {
-                        /* As the two operate concurrently, make sure
-                         * they are not dependent */
-
-                        if (can_run_concurrent_ssa(ins, next_op) || true) {
-                                /* Skip ahead, since it's redundant with the pair */
-                                instructions_consumed = 1 + (instructions_emitted++);
-                        }
-                }
-
-                break;
-        }
-
-        default:
-                /* Texture ops default to single-op-per-bundle scheduling */
-                break;
-        }
-
-        /* Copy the instructions into the bundle */
-        bundle.instruction_count = instructions_emitted + 1;
-
-        int used_idx = 0;
-
-        midgard_instruction *uins = ins;
-        for (int i = 0; used_idx < bundle.instruction_count; ++i) {
-                bundle.instructions[used_idx++] = *uins;
-                uins = mir_next_op(uins);
+                        break;
+                }
+                }
         }
 
-        *skip = (instructions_consumed == -1) ? instructions_emitted : instructions_consumed;
-
-        return bundle;
-}
+        /* No helper to build texture words -- we do it all here */
+        midgard_instruction ins = {
+                .type = TAG_TEXTURE_4,
+                .texture = {
+                        .op = TEXTURE_OP_NORMAL,
+                        .format = midgard_tex_format(instr->sampler_dim),
+                        .texture_handle = texture_index,
+                        .sampler_handle = sampler_index,
 
-static int
-quadword_size(int tag)
-{
-        switch (tag) {
-        case TAG_ALU_4:
-                return 1;
+                        /* TODO: Don't force xyzw */
+                        .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
+                        .mask = 0xF,
 
-        case TAG_ALU_8:
-                return 2;
+                        /* TODO: half */
+                        //.in_reg_full = 1,
+                        .out_full = 1,
 
-        case TAG_ALU_12:
-                return 3;
+                        .filter = 1,
 
-        case TAG_ALU_16:
-                return 4;
+                        /* Always 1 */
+                        .unknown7 = 1,
 
-        case TAG_LOAD_STORE_4:
-                return 1;
+                        /* Assume we can continue; hint it out later */
+                        .cont = 1,
+                }
+        };
 
-        case TAG_TEXTURE_4:
-                return 1;
+        /* Set registers to read and write from the same place */
+        ins.texture.in_reg_select = in_reg;
+        ins.texture.out_reg_select = out_reg;
 
-        default:
-                assert(0);
-                return 0;
+        /* TODO: Dynamic swizzle input selection, half-swizzles? */
+        if (instr->sampler_dim == GLSL_SAMPLER_DIM_3D) {
+                ins.texture.in_reg_swizzle_right = COMPONENT_X;
+                ins.texture.in_reg_swizzle_left = COMPONENT_Y;
+                //ins.texture.in_reg_swizzle_third = COMPONENT_Z;
+        } else {
+                ins.texture.in_reg_swizzle_left = COMPONENT_X;
+                ins.texture.in_reg_swizzle_right = COMPONENT_Y;
+                //ins.texture.in_reg_swizzle_third = COMPONENT_X;
         }
-}
-
-/* Schedule a single block by iterating its instruction to create bundles.
- * While we go, tally about the bundle sizes to compute the block size. */
-
-static void
-schedule_block(compiler_context *ctx, midgard_block *block)
-{
-        util_dynarray_init(&block->bundles, NULL);
 
-        block->quadword_count = 0;
-
-        mir_foreach_instr_in_block(block, ins) {
-                int skip;
-                midgard_bundle bundle = schedule_bundle(ctx, block, ins, &skip);
-                util_dynarray_append(&block->bundles, midgard_bundle, bundle);
+        emit_mir_instruction(ctx, ins);
 
-                if (bundle.has_blend_constant) {
-                        /* TODO: Multiblock? */
-                        int quadwords_within_block = block->quadword_count + quadword_size(bundle.tag) - 1;
-                        ctx->blend_constant_offset = quadwords_within_block * 0x10;
-                }
+        /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
 
-                while(skip--)
-                        ins = mir_next_op(ins);
+        int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
+        alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
+        ctx->texture_index[reg] = o_index;
 
-                block->quadword_count += quadword_size(bundle.tag);
-        }
+        midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
+        emit_mir_instruction(ctx, ins2);
 
-        block->is_scheduled = true;
+        /* Used for .cont and .last hinting */
+        ctx->texture_op_count++;
 }
 
 static void
-schedule_program(compiler_context *ctx)
+emit_jump(compiler_context *ctx, nir_jump_instr *instr)
 {
-        allocate_registers(ctx);
+        switch (instr->type) {
+                case nir_jump_break: {
+                        /* Emit a branch out of the loop */
+                        struct midgard_instruction br = v_branch(false, false);
+                        br.branch.target_type = TARGET_BREAK;
+                        br.branch.target_break = ctx->current_loop_depth;
+                        emit_mir_instruction(ctx, br);
 
-        mir_foreach_block(ctx, block) {
-                schedule_block(ctx, block);
+                        DBG("break..\n");
+                        break;
+                }
+
+                default:
+                        DBG("Unknown jump type %d\n", instr->type);
+                        break;
         }
 }
 
-/* After everything is scheduled, emit whole bundles at a time */
-
 static void
-emit_binary_bundle(compiler_context *ctx, midgard_bundle *bundle, struct util_dynarray *emission, int next_tag)
+emit_instr(compiler_context *ctx, struct nir_instr *instr)
 {
-        int lookahead = next_tag << 4;
-
-        switch (bundle->tag) {
-        case TAG_ALU_4:
-        case TAG_ALU_8:
-        case TAG_ALU_12:
-        case TAG_ALU_16: {
-                /* Actually emit each component */
-                util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
-
-                for (int i = 0; i < bundle->register_words_count; ++i)
-                        util_dynarray_append(emission, uint16_t, bundle->register_words[i]);
-
-                /* Emit body words based on the instructions bundled */
-                for (int i = 0; i < bundle->instruction_count; ++i) {
-                        midgard_instruction *ins = &bundle->instructions[i];
-
-                        if (ins->unit & UNITS_ANY_VECTOR) {
-                                memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins->alu, sizeof(midgard_vector_alu));
-                        } else if (ins->compact_branch) {
-                                /* Dummy move, XXX DRY */
-                                if ((i == 0) && ins->writeout) {
-                                        midgard_instruction ins = v_fmov(0, blank_alu_src, SSA_FIXED_REGISTER(0));
-                                        memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins.alu, sizeof(midgard_vector_alu));
-                                }
-
-                                if (ins->unit == ALU_ENAB_BR_COMPACT) {
-                                        memcpy(util_dynarray_grow(emission, sizeof(ins->br_compact)), &ins->br_compact, sizeof(ins->br_compact));
-                                } else {
-                                        memcpy(util_dynarray_grow(emission, sizeof(ins->branch_extended)), &ins->branch_extended, sizeof(ins->branch_extended));
-                                }
-                        } else {
-                                /* Scalar */
-                                midgard_scalar_alu scalarised = vector_to_scalar_alu(ins->alu, ins);
-                                memcpy(util_dynarray_grow(emission, sizeof(scalarised)), &scalarised, sizeof(scalarised));
-                        }
-                }
-
-                /* Emit padding (all zero) */
-                memset(util_dynarray_grow(emission, bundle->padding), 0, bundle->padding);
-
-                /* Tack on constants */
-
-                if (bundle->has_embedded_constants) {
-                        util_dynarray_append(emission, float, bundle->constants[0]);
-                        util_dynarray_append(emission, float, bundle->constants[1]);
-                        util_dynarray_append(emission, float, bundle->constants[2]);
-                        util_dynarray_append(emission, float, bundle->constants[3]);
-                }
-
+        switch (instr->type) {
+        case nir_instr_type_load_const:
+                emit_load_const(ctx, nir_instr_as_load_const(instr));
                 break;
-        }
-
-        case TAG_LOAD_STORE_4: {
-                /* One or two composing instructions */
-
-                uint64_t current64, next64 = LDST_NOP;
-
-                memcpy(&current64, &bundle->instructions[0].load_store, sizeof(current64));
-
-                if (bundle->instruction_count == 2)
-                        memcpy(&next64, &bundle->instructions[1].load_store, sizeof(next64));
-
-                midgard_load_store instruction = {
-                        .type = bundle->tag,
-                        .next_type = next_tag,
-                        .word1 = current64,
-                        .word2 = next64
-                };
-
-                util_dynarray_append(emission, midgard_load_store, instruction);
 
+        case nir_instr_type_intrinsic:
+                emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
                 break;
-        }
-
-        case TAG_TEXTURE_4: {
-                /* Texture instructions are easy, since there is no
-                 * pipelining nor VLIW to worry about. We may need to set the .last flag */
 
-                midgard_instruction *ins = &bundle->instructions[0];
-
-                ins->texture.type = TAG_TEXTURE_4;
-                ins->texture.next_type = next_tag;
+        case nir_instr_type_alu:
+                emit_alu(ctx, nir_instr_as_alu(instr));
+                break;
 
-                ctx->texture_op_count--;
+        case nir_instr_type_tex:
+                emit_tex(ctx, nir_instr_as_tex(instr));
+                break;
 
-                if (!ctx->texture_op_count) {
-                        ins->texture.cont = 0;
-                        ins->texture.last = 1;
-                }
+        case nir_instr_type_jump:
+                emit_jump(ctx, nir_instr_as_jump(instr));
+                break;
 
-                util_dynarray_append(emission, midgard_texture_word, ins->texture);
+        case nir_instr_type_ssa_undef:
+                /* Spurious */
                 break;
-        }
 
         default:
-                DBG("Unknown midgard instruction type\n");
-                assert(0);
+                DBG("Unhandled instruction type\n");
                 break;
         }
 }
@@ -2942,34 +1556,21 @@ embedded_to_inline_constant(compiler_context *ctx)
                 int op = ins->alu.op;
 
                 if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
-                        /* Flip based on op. Fallthrough intentional */
-
                         switch (op) {
-                        /* These ops require an operational change to flip their arguments TODO */
+                        /* These ops require an operational change to flip
+                         * their arguments TODO */
                         case midgard_alu_op_flt:
                         case midgard_alu_op_fle:
                         case midgard_alu_op_ilt:
                         case midgard_alu_op_ile:
                         case midgard_alu_op_fcsel:
                         case midgard_alu_op_icsel:
-                        case midgard_alu_op_isub:
-                                DBG("Missed non-commutative flip (%s)\n", alu_opcode_names[op]);
+                                DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
+                        default:
                                 break;
+                        }
 
-                        /* These ops are commutative and Just Flip */
-                        case midgard_alu_op_fne:
-                        case midgard_alu_op_fadd:
-                        case midgard_alu_op_fmul:
-                        case midgard_alu_op_fmin:
-                        case midgard_alu_op_fmax:
-                        case midgard_alu_op_iadd:
-                        case midgard_alu_op_imul:
-                        case midgard_alu_op_feq:
-                        case midgard_alu_op_ieq:
-                        case midgard_alu_op_ine:
-                        case midgard_alu_op_iand:
-                        case midgard_alu_op_ior:
-                        case midgard_alu_op_ixor:
+                        if (alu_opcode_props[op].props & OP_COMMUTES) {
                                 /* Flip the SSA numbers */
                                 ins->ssa_args.src0 = ins->ssa_args.src1;
                                 ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
@@ -2981,9 +1582,6 @@ embedded_to_inline_constant(compiler_context *ctx)
                                 src_temp = ins->alu.src2;
                                 ins->alu.src2 = ins->alu.src1;
                                 ins->alu.src1 = src_temp;
-
-                        default:
-                                break;
                         }
                 }
 
@@ -3001,11 +1599,7 @@ embedded_to_inline_constant(compiler_context *ctx)
                         /* Scale constant appropriately, if we can legally */
                         uint16_t scaled_constant = 0;
 
-                        /* XXX: Check legality */
                         if (midgard_is_integer_op(op)) {
-                                /* TODO: Inline integer */
-                                continue;
-
                                 unsigned int *iconstants = (unsigned int *) ins->constants;
                                 scaled_constant = (uint16_t) iconstants[component];
 
@@ -3013,7 +1607,20 @@ embedded_to_inline_constant(compiler_context *ctx)
                                 if (scaled_constant != iconstants[component])
                                         continue;
                         } else {
-                                scaled_constant = _mesa_float_to_half((float) ins->constants[component]);
+                                float original = (float) ins->constants[component];
+                                scaled_constant = _mesa_float_to_half(original);
+
+                                /* Check for loss of precision. If this is
+                                 * mediump, we don't care, but for a highp
+                                 * shader, we need to pay attention. NIR
+                                 * doesn't yet tell us which mode we're in!
+                                 * Practically this prevents most constants
+                                 * from being inlined, sadly. */
+
+                                float fp32 = _mesa_half_to_float(scaled_constant);
+
+                                if (fp32 != original)
+                                        continue;
                         }
 
                         /* We don't know how to handle these with a constant */
@@ -3064,6 +1671,10 @@ embedded_to_inline_constant(compiler_context *ctx)
 static void
 map_ssa_to_alias(compiler_context *ctx, int *ref)
 {
+        /* Sign is used quite deliberately for unused */
+        if (*ref < 0)
+                return;
+
         unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1);
 
         if (alias) {
@@ -3093,8 +1704,7 @@ midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
                 if (ins->compact_branch) continue;
 
                 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
-                if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue;
-                if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+                if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
 
                 mir_remove_instruction(ins);
                 progress = true;
@@ -3103,6 +1713,57 @@ midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
         return progress;
 }
 
+/* Dead code elimination for branches at the end of a block - only one branch
+ * per block is legal semantically */
+
+static void
+midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
+{
+        bool branched = false;
+
+        mir_foreach_instr_in_block_safe(block, ins) {
+                if (!midgard_is_branch_unit(ins->unit)) continue;
+
+                /* We ignore prepacked branches since the fragment epilogue is
+                 * just generally special */
+                if (ins->prepacked_branch) continue;
+
+                if (branched) {
+                        /* We already branched, so this is dead */
+                        mir_remove_instruction(ins);
+                }
+
+                branched = true;
+        }
+}
+
+static bool
+mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
+{
+        /* abs or neg */
+        if (!is_int && src.mod) return true;
+
+        /* swizzle */
+        for (unsigned c = 0; c < 4; ++c) {
+                if (!(mask & (1 << c))) continue;
+                if (((src.swizzle >> (2*c)) & 3) != c) return true;
+        }
+
+        return false;
+}
+
+static bool
+mir_nontrivial_source2_mod(midgard_instruction *ins)
+{
+        unsigned mask = squeeze_writemask(ins->alu.mask);
+        bool is_int = midgard_is_integer_op(ins->alu.op);
+
+        midgard_vector_alu_src src2 =
+                vector_alu_from_unsigned(ins->alu.src2);
+
+        return mir_nontrivial_mod(src2, is_int, mask);
+}
+
 static bool
 midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
 {
@@ -3119,27 +1780,127 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
 
                 if (to >= SSA_FIXED_MINIMUM) continue;
                 if (from >= SSA_FIXED_MINIMUM) continue;
+                if (to >= ctx->func->impl->ssa_alloc) continue;
+                if (from >= ctx->func->impl->ssa_alloc) continue;
+
+                /* Constant propagation is not handled here, either */
+                if (ins->ssa_args.inline_constant) continue;
+                if (ins->has_constants) continue;
 
-                /* Also, if the move has side effects, we're helpless */
+                if (mir_nontrivial_source2_mod(ins)) continue;
+                if (ins->alu.outmod != midgard_outmod_none) continue;
 
-                midgard_vector_alu_src src =
-                        vector_alu_from_unsigned(ins->alu.src2);
-                unsigned mask = squeeze_writemask(ins->alu.mask);
-                bool is_int = midgard_is_integer_op(ins->alu.op);
+                /* We're clear -- rewrite */
+                mir_rewrite_index_src(ctx, to, from);
+                mir_remove_instruction(ins);
+                progress |= true;
+        }
 
-                if (mir_nontrivial_mod(src, is_int, mask)) continue;
+        return progress;
+}
 
-                mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) {
-                        if (v->ssa_args.src0 == to) {
-                                v->ssa_args.src0 = from;
-                                progress = true;
-                        }
+/* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
+ * the move can be propagated away entirely */
 
-                        if (v->ssa_args.src1 == to && !v->ssa_args.inline_constant) {
-                                v->ssa_args.src1 = from;
-                                progress = true;
+static bool
+mir_compose_outmod(midgard_outmod *outmod, midgard_outmod comp)
+{
+        /* Nothing to do */
+        if (comp == midgard_outmod_none)
+                return true;
+
+        if (*outmod == midgard_outmod_none) {
+                *outmod = comp;
+                return true;
+        }
+
+        /* TODO: Compose rules */
+        return false;
+}
+
+static bool
+midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
+{
+        bool progress = false;
+
+        mir_foreach_instr_in_block_safe(block, ins) {
+                if (ins->type != TAG_ALU_4) continue;
+                if (ins->alu.op != midgard_alu_op_fmov) continue;
+                if (ins->alu.outmod != midgard_outmod_pos) continue;
+
+                /* TODO: Registers? */
+                unsigned src = ins->ssa_args.src1;
+                if (src >= ctx->func->impl->ssa_alloc) continue;
+
+                /* There might be a source modifier, too */
+                if (mir_nontrivial_source2_mod(ins)) continue;
+
+                /* Backpropagate the modifier */
+                mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+                        if (v->type != TAG_ALU_4) continue;
+                        if (v->ssa_args.dest != src) continue;
+
+                        midgard_outmod temp = v->alu.outmod;
+                        progress |= mir_compose_outmod(&temp, ins->alu.outmod);
+
+                        /* Throw in the towel.. */
+                        if (!progress) break;
+
+                        /* Otherwise, transfer the modifier */
+                        v->alu.outmod = temp;
+                        ins->alu.outmod = midgard_outmod_none;
+
+                        break;
+                }
+        }
+
+        return progress;
+}
+
+static bool
+midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
+{
+        bool progress = false;
+
+        mir_foreach_instr_in_block_safe(block, ins) {
+                if (ins->type != TAG_ALU_4) continue;
+                if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+                unsigned from = ins->ssa_args.src1;
+                unsigned to = ins->ssa_args.dest;
+
+                /* Make sure it's simple enough for us to handle */
+
+                if (from >= SSA_FIXED_MINIMUM) continue;
+                if (from >= ctx->func->impl->ssa_alloc) continue;
+                if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
+                if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
+
+                bool eliminated = false;
+
+                mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+                        /* The texture registers are not SSA so be careful.
+                         * Conservatively, just stop if we hit a texture op
+                         * (even if it may not write) to where we are */
+
+                        if (v->type != TAG_ALU_4)
+                                break;
+
+                        if (v->ssa_args.dest == from) {
+                                /* We don't want to track partial writes ... */
+                                if (v->alu.mask == 0xF) {
+                                        v->ssa_args.dest = to;
+                                        eliminated = true;
+                                }
+
+                                break;
                         }
                 }
+
+                if (eliminated)
+                        mir_remove_instruction(ins);
+
+                progress |= eliminated;
         }
 
         return progress;
@@ -3196,40 +1957,6 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
         }
 }
 
-/* Emit varying stores late */
-
-static void
-midgard_emit_store(compiler_context *ctx, midgard_block *block) {
-        /* Iterate in reverse to get the final write, rather than the first */
-
-        mir_foreach_instr_in_block_safe_rev(block, ins) {
-                /* Check if what we just wrote needs a store */
-                int idx = ins->ssa_args.dest;
-                uintptr_t varying = ((uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_varyings, idx + 1));
-
-                if (!varying) continue;
-
-                varying -= 1;
-
-                /* We need to store to the appropriate varying, so emit the
-                 * move/store */
-
-                /* TODO: Integrate with special purpose RA (and scheduler?) */
-                bool high_varying_register = false;
-
-                midgard_instruction mov = v_fmov(idx, blank_alu_src, SSA_FIXED_REGISTER(REGISTER_VARYING_BASE + high_varying_register));
-
-                midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
-                st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
-
-                mir_insert_instruction_before(mir_next_op(ins), st);
-                mir_insert_instruction_before(mir_next_op(ins), mov);
-
-                /* We no longer need to store this varying */
-                _mesa_hash_table_u64_remove(ctx->ssa_varyings, idx + 1);
-        }
-}
-
 /* If there are leftovers after the below pass, emit actual fmov
  * instructions for the slow-but-correct path */
 
@@ -3299,7 +2026,7 @@ emit_blend_epilogue(compiler_context *ctx)
                 },
                 .alu = {
                         .op = midgard_alu_op_fmul,
-                        .reg_mode = midgard_reg_mode_full,
+                        .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_lower,
                         .mask = 0xFF,
                         .src1 = vector_alu_srco_unsigned(blank_alu_src),
@@ -3324,7 +2051,7 @@ emit_blend_epilogue(compiler_context *ctx)
                 },
                 .alu = {
                         .op = midgard_alu_op_f2u8,
-                        .reg_mode = midgard_reg_mode_half,
+                        .reg_mode = midgard_reg_mode_16,
                         .dest_override = midgard_dest_override_lower,
                         .outmod = midgard_outmod_pos,
                         .mask = 0xF,
@@ -3346,8 +2073,9 @@ emit_blend_epilogue(compiler_context *ctx)
                 },
                 .alu = {
                         .op = midgard_alu_op_imov,
-                        .reg_mode = midgard_reg_mode_quarter,
+                        .reg_mode = midgard_reg_mode_8,
                         .dest_override = midgard_dest_override_none,
+                        .outmod = midgard_outmod_int,
                         .mask = 0xFF,
                         .src1 = vector_alu_srco_unsigned(blank_alu_src),
                         .src2 = vector_alu_srco_unsigned(blank_alu_src),
@@ -3394,7 +2122,6 @@ emit_block(compiler_context *ctx, nir_block *block)
         /* Perform heavylifting for aliasing */
         actualise_ssa_to_alias(ctx);
 
-        midgard_emit_store(ctx, this_block);
         midgard_pair_load_store(ctx, this_block);
 
         /* Append fragment shader epilogue (value writeout) */
@@ -3604,18 +2331,10 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         /* TODO: Decide this at runtime */
         ctx->uniform_cutoff = 8;
 
-        /* Assign var locations early, so the epilogue can use them if necessary */
-
-        nir_assign_var_locations(&nir->outputs, &nir->num_outputs, glsl_type_size);
-        nir_assign_var_locations(&nir->inputs, &nir->num_inputs, glsl_type_size);
-        nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, glsl_type_size);
-
         /* Initialize at a global (not block) level hash tables */
 
         ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
-        ctx->ssa_varyings = _mesa_hash_table_u64_create(NULL);
         ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
-        ctx->ssa_to_register = _mesa_hash_table_u64_create(NULL);
         ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
         ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
         ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
@@ -3625,16 +2344,22 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         struct exec_list *varyings =
                 ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
 
+        unsigned max_varying = 0;
         nir_foreach_variable(var, varyings) {
                 unsigned loc = var->data.driver_location;
                 unsigned sz = glsl_type_size(var->type, FALSE);
 
-                for (int c = 0; c < sz; ++c) {
-                        program->varyings[loc + c] = var->data.location;
+                for (int c = loc; c < (loc + sz); ++c) {
+                        program->varyings[c] = var->data.location;
+                        max_varying = MAX2(max_varying, c);
                 }
         }
 
-        /* Lower gl_Position pre-optimisation */
+        /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
+         * (so we don't accidentally duplicate the epilogue since mesa/st has
+         * messed with our I/O quite a bit already) */
+
+        NIR_PASS_V(nir, nir_lower_vars_to_ssa);
 
         if (ctx->stage == MESA_SHADER_VERTEX)
                 NIR_PASS_V(nir, nir_lower_viewport_transform);
@@ -3667,7 +2392,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
 
         program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
-        program->varying_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_outputs : ((ctx->stage == MESA_SHADER_FRAGMENT) ? nir->num_inputs : 0);
+        program->varying_count = max_varying + 1; /* Fencepost off-by-one */
 
         nir_foreach_function(func, nir) {
                 if (!func->impl)
@@ -3685,11 +2410,26 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
 
         util_dynarray_init(compiled, NULL);
 
-        /* Peephole optimizations */
+        /* MIR-level optimizations */
+
+        bool progress = false;
+
+        do {
+                progress = false;
+
+                mir_foreach_block(ctx, block) {
+                        progress |= midgard_opt_pos_propagate(ctx, block);
+                        progress |= midgard_opt_copy_prop(ctx, block);
+                        progress |= midgard_opt_copy_prop_tex(ctx, block);
+                        progress |= midgard_opt_dead_code_eliminate(ctx, block);
+                }
+        } while (progress);
 
+        /* Nested control-flow can result in dead branches at the end of the
+         * block. This messes with our analysis and is just dead code, so cull
+         * them */
         mir_foreach_block(ctx, block) {
-                midgard_opt_copy_prop(ctx, block);
-                midgard_opt_dead_code_eliminate(ctx, block);
+                midgard_opt_cull_dead_branch(ctx, block);
         }
 
         /* Schedule! */
@@ -3703,7 +2443,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
         mir_foreach_block(ctx, block) {
                 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
                         for (int c = 0; c < bundle->instruction_count; ++c) {
-                                midgard_instruction *ins = &bundle->instructions[c];
+                                midgard_instruction *ins = bundle->instructions[c];
 
                                 if (!midgard_is_branch_unit(ins->unit)) continue;
 
@@ -3718,10 +2458,13 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
                                 /* Determine the block we're jumping to */
                                 int target_number = ins->branch.target_block;
 
-                                /* Report the destination tag. Discards don't need this */
+                                /* Report the destination tag */
                                 int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
 
-                                /* Count up the number of quadwords we're jumping over. That is, the number of quadwords in each of the blocks between (br_block_idx, target_number) */
+                                /* Count up the number of quadwords we're
+                                 * jumping over = number of quadwords until
+                                 * (br_block_idx, target_number) */
+
                                 int quadword_offset = 0;
 
                                 if (is_discard) {
@@ -3838,8 +2581,13 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
 
         int current_bundle = 0;
 
+        /* Midgard prefetches instruction types, so during emission we
+         * need to lookahead. Unless this is the last instruction, in
+         * which we return 1. Or if this is the second to last and the
+         * last is an ALU, then it's also 1... */
+
         mir_foreach_block(ctx, block) {
-                util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
+                mir_foreach_bundle_in_block(block, bundle) {
                         int lookahead = 1;
 
                         if (current_bundle + 1 < bundle_count) {