turnip: fix vertex_id
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
index 03d8a2c7ba93ba88edecf1f8b0ad36f7147938c0..0e5491151d3e0683959347f5e0aa43ee2268ff85 100644 (file)
@@ -31,6 +31,7 @@
 #include "util/u_math.h"
 
 #include "ir3_compiler.h"
+#include "ir3_image.h"
 #include "ir3_shader.h"
 #include "ir3_nir.h"
 
@@ -82,19 +83,19 @@ create_input(struct ir3_context *ctx, unsigned n)
 }
 
 static struct ir3_instruction *
-create_frag_input(struct ir3_context *ctx, bool use_ldlv)
+create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n)
 {
        struct ir3_block *block = ctx->block;
        struct ir3_instruction *instr;
-       /* actual inloc is assigned and fixed up later: */
-       struct ir3_instruction *inloc = create_immed(block, 0);
+       /* packed inloc is fixed up later: */
+       struct ir3_instruction *inloc = create_immed(block, n);
 
        if (use_ldlv) {
                instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
                instr->cat6.type = TYPE_U32;
                instr->cat6.iim_val = 1;
        } else {
-               instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0);
+               instr = ir3_BARY_F(block, inloc, 0, ctx->ij_pixel, 0);
                instr->regs[2]->wrmask = 0x3;
        }
 
@@ -106,7 +107,8 @@ create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
 {
        /* first four vec4 sysval's reserved for UBOs: */
        /* NOTE: dp is in scalar, but there can be >4 dp components: */
-       unsigned n = ctx->so->constbase.driver_param;
+       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       unsigned n = const_state->offsets.driver_param;
        unsigned r = regid(n + dp / 4, dp % 4);
        return create_uniform(ctx->block, r);
 }
@@ -292,6 +294,8 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        unsigned bs[info->num_inputs];     /* bit size */
        struct ir3_block *b = ctx->block;
        unsigned dst_sz, wrmask;
+       type_t dst_type = nir_dest_bit_size(alu->dest.dest) < 32 ?
+                       TYPE_U16 : TYPE_U32;
 
        if (alu->dest.dest.is_ssa) {
                dst_sz = alu->dest.dest.ssa.num_components;
@@ -319,31 +323,30 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
 
                        src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
                        if (!src[i])
-                               src[i] = create_immed(ctx->block, 0);
-                       dst[i] = ir3_MOV(b, src[i], TYPE_U32);
+                               src[i] = create_immed_typed(ctx->block, 0, dst_type);
+                       dst[i] = ir3_MOV(b, src[i], dst_type);
                }
 
-               put_dst(ctx, &alu->dest.dest);
+               ir3_put_dst(ctx, &alu->dest.dest);
                return;
        }
 
        /* We also get mov's with more than one component for mov's so
         * handle those specially:
         */
-       if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
-               type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
+       if (alu->op == nir_op_mov) {
                nir_alu_src *asrc = &alu->src[0];
                struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
 
                for (unsigned i = 0; i < dst_sz; i++) {
                        if (wrmask & (1 << i)) {
-                               dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type);
+                               dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], dst_type);
                        } else {
                                dst[i] = NULL;
                        }
                }
 
-               put_dst(ctx, &alu->dest.dest);
+               ir3_put_dst(ctx, &alu->dest.dest);
                return;
        }
 
@@ -390,6 +393,8 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                dst[0] = ir3_n2b(b, dst[0]);
                break;
        case nir_op_b2f16:
+               dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16);
+               break;
        case nir_op_b2f32:
                dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
                break;
@@ -428,7 +433,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                                (list_length(&alu->src[0].src.ssa->uses) == 1) &&
                                ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
                        src[0]->flags |= IR3_INSTR_SAT;
-                       dst[0] = ir3_MOV(b, src[0], TYPE_U32);
+                       dst[0] = ir3_MOV(b, src[0], dst_type);
                } else {
                        /* otherwise generate a max.f that saturates.. blob does
                         * similar (generating a cat2 mov using max.f)
@@ -537,16 +542,11 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        case nir_op_umin:
                dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
                break;
-       case nir_op_imul:
-               /*
-                * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
-                *   mull.u tmp0, a, b           ; mul low, i.e. al * bl
-                *   madsh.m16 tmp1, a, b, tmp0  ; mul-add shift high mix, i.e. ah * bl << 16
-                *   madsh.m16 dst, b, a, tmp1   ; i.e. al * bh << 16
-                */
-               dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
-                                       ir3_MADSH_M16(b, src[0], 0, src[1], 0,
-                                               ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
+       case nir_op_umul_low:
+               dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0);
+               break;
+       case nir_op_imadsh_mix16:
+               dst[0] = ir3_MADSH_M16(b, src[0], 0, src[1], 0, src[2], 0);
                break;
        case nir_op_ineg:
                dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
@@ -615,9 +615,30 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
                break;
        }
-       case nir_op_bit_count:
-               dst[0] = ir3_CBITS_B(b, src[0], 0);
+       case nir_op_bit_count: {
+               // TODO, we need to do this 16b at a time on a5xx+a6xx.. need to
+               // double check on earlier gen's.  Once half-precision support is
+               // in place, this should probably move to a NIR lowering pass:
+               struct ir3_instruction *hi, *lo;
+
+               hi = ir3_COV(b, ir3_SHR_B(b, src[0], 0, create_immed(b, 16), 0),
+                               TYPE_U32, TYPE_U16);
+               lo = ir3_COV(b, src[0], TYPE_U32, TYPE_U16);
+
+               hi = ir3_CBITS_B(b, hi, 0);
+               lo = ir3_CBITS_B(b, lo, 0);
+
+               // TODO maybe the builders should default to making dst half-precision
+               // if the src's were half precision, to make this less awkward.. otoh
+               // we should probably just do this lowering in NIR.
+               hi->regs[0]->flags |= IR3_REG_HALF;
+               lo->regs[0]->flags |= IR3_REG_HALF;
+
+               dst[0] = ir3_ADD_S(b, hi, 0, lo, 0);
+               dst[0]->regs[0]->flags |= IR3_REG_HALF;
+               dst[0] = ir3_COV(b, dst[0], TYPE_U16, TYPE_U32);
                break;
+       }
        case nir_op_ifind_msb: {
                struct ir3_instruction *cmp;
                dst[0] = ir3_CLZ_S(b, src[0], 0);
@@ -648,7 +669,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                break;
        }
 
-       put_dst(ctx, &alu->dest.dest);
+       ir3_put_dst(ctx, &alu->dest.dest);
 }
 
 /* handles direct/indirect UBO reads: */
@@ -658,10 +679,12 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 {
        struct ir3_block *b = ctx->block;
        struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
-       nir_const_value *const_offset;
-       /* UBO addresses are the first driver params: */
-       unsigned ubo = regid(ctx->so->constbase.ubo, 0);
-       const unsigned ptrsz = ir3_pointer_size(ctx);
+       /* UBO addresses are the first driver params, but subtract 2 here to
+        * account for nir_lower_uniforms_to_ubo rebasing the UBOs such that UBO 0
+        * is the uniforms: */
+       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       unsigned ubo = regid(const_state->offsets.ubo, 0) - 2;
+       const unsigned ptrsz = ir3_pointer_size(ctx->compiler);
 
        int off = 0;
 
@@ -672,16 +695,22 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                base_lo = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz));
                base_hi = create_uniform(b, ubo + (src0->regs[1]->iim_val * ptrsz) + 1);
        } else {
-               base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, 4));
-               base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, 4));
+               base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, ptrsz));
+               base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, ptrsz));
+
+               /* NOTE: since relative addressing is used, make sure constlen is
+                * at least big enough to cover all the UBO addresses, since the
+                * assembler won't know what the max address reg is.
+                */
+               ctx->so->constlen = MAX2(ctx->so->constlen,
+                       const_state->offsets.ubo + (ctx->s->info.num_ubos * ptrsz));
        }
 
        /* note: on 32bit gpu's base_hi is ignored and DCE'd */
        addr = base_lo;
 
-       const_offset = nir_src_as_const_value(intr->src[1]);
-       if (const_offset) {
-               off += const_offset->u32[0];
+       if (nir_src_is_const(intr->src[1])) {
+               off += nir_src_as_uint(intr->src[1]);
        } else {
                /* For load_ubo_indirect, second src is indirect offset: */
                src1 = ir3_get_src(ctx, &intr->src[1])[0];
@@ -723,195 +752,22 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        }
 }
 
-/* src[] = { buffer_index, offset }. No const_index */
-static void
-emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
-               struct ir3_instruction **dst)
-{
-       struct ir3_block *b = ctx->block;
-       struct ir3_instruction *ldgb, *src0, *src1, *offset;
-       nir_const_value *const_offset;
-
-       /* can this be non-const buffer_index?  how do we handle that? */
-       const_offset = nir_src_as_const_value(intr->src[0]);
-       compile_assert(ctx, const_offset);
-
-       offset = ir3_get_src(ctx, &intr->src[1])[0];
-
-       /* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
-       src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-               offset,
-               create_immed(b, 0),
-       }, 2);
-       src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
-
-       ldgb = ir3_LDGB(b, create_immed(b, const_offset->u32[0]), 0,
-                       src0, 0, src1, 0);
-       ldgb->regs[0]->wrmask = MASK(intr->num_components);
-       ldgb->cat6.iim_val = intr->num_components;
-       ldgb->cat6.d = 4;
-       ldgb->cat6.type = TYPE_U32;
-       ldgb->barrier_class = IR3_BARRIER_BUFFER_R;
-       ldgb->barrier_conflict = IR3_BARRIER_BUFFER_W;
-
-       ir3_split_dest(b, dst, ldgb, 0, intr->num_components);
-}
-
-/* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
-static void
-emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
-       struct ir3_block *b = ctx->block;
-       struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
-       nir_const_value *const_offset;
-       /* TODO handle wrmask properly, see _store_shared().. but I think
-        * it is more a PITA than that, since blob ends up loading the
-        * masked components and writing them back out.
-        */
-       unsigned wrmask = intr->const_index[0];
-       unsigned ncomp = ffs(~wrmask) - 1;
-
-       /* can this be non-const buffer_index?  how do we handle that? */
-       const_offset = nir_src_as_const_value(intr->src[1]);
-       compile_assert(ctx, const_offset);
-
-       offset = ir3_get_src(ctx, &intr->src[2])[0];
-
-       /* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
-        * nir already *= 4:
-        */
-       src0 = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
-       src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
-       src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-               offset,
-               create_immed(b, 0),
-       }, 2);
-
-       stgb = ir3_STGB(b, create_immed(b, const_offset->u32[0]), 0,
-                       src0, 0, src1, 0, src2, 0);
-       stgb->cat6.iim_val = ncomp;
-       stgb->cat6.d = 4;
-       stgb->cat6.type = TYPE_U32;
-       stgb->barrier_class = IR3_BARRIER_BUFFER_W;
-       stgb->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
-
-       array_insert(b, b->keeps, stgb);
-}
-
 /* src[] = { block_index } */
 static void
 emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                struct ir3_instruction **dst)
 {
        /* SSBO size stored as a const starting at ssbo_sizes: */
-       unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0];
-       unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) +
-               ctx->so->const_layout.ssbo_size.off[blk_idx];
+       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       unsigned blk_idx = nir_src_as_uint(intr->src[0]);
+       unsigned idx = regid(const_state->offsets.ssbo_sizes, 0) +
+               const_state->ssbo_size.off[blk_idx];
 
-       debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
+       debug_assert(const_state->ssbo_size.mask & (1 << blk_idx));
 
        dst[0] = create_uniform(ctx->block, idx);
 }
 
-/*
- * SSBO atomic intrinsics
- *
- * All of the SSBO atomic memory operations read a value from memory,
- * compute a new value using one of the operations below, write the new
- * value to memory, and return the original value read.
- *
- * All operations take 3 sources except CompSwap that takes 4. These
- * sources represent:
- *
- * 0: The SSBO buffer index.
- * 1: The offset into the SSBO buffer of the variable that the atomic
- *    operation will operate on.
- * 2: The data parameter to the atomic function (i.e. the value to add
- *    in ssbo_atomic_add, etc).
- * 3: For CompSwap only: the second data parameter.
- */
-static struct ir3_instruction *
-emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
-       struct ir3_block *b = ctx->block;
-       struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
-       nir_const_value *const_offset;
-       type_t type = TYPE_U32;
-
-       /* can this be non-const buffer_index?  how do we handle that? */
-       const_offset = nir_src_as_const_value(intr->src[0]);
-       compile_assert(ctx, const_offset);
-       ssbo = create_immed(b, const_offset->u32[0]);
-
-       offset = ir3_get_src(ctx, &intr->src[1])[0];
-
-       /* src0 is data (or uvec2(data, compare))
-        * src1 is offset
-        * src2 is uvec2(offset*4, 0) (appears to be 64b byte offset)
-        *
-        * Note that nir already multiplies the offset by four
-        */
-       src0 = ir3_get_src(ctx, &intr->src[2])[0];
-       src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
-       src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-               offset,
-               create_immed(b, 0),
-       }, 2);
-
-       switch (intr->intrinsic) {
-       case nir_intrinsic_ssbo_atomic_add:
-               atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_imin:
-               atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               type = TYPE_S32;
-               break;
-       case nir_intrinsic_ssbo_atomic_umin:
-               atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_imax:
-               atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               type = TYPE_S32;
-               break;
-       case nir_intrinsic_ssbo_atomic_umax:
-               atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_and:
-               atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_or:
-               atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_xor:
-               atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_exchange:
-               atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_ssbo_atomic_comp_swap:
-               /* for cmpxchg, src0 is [ui]vec2(data, compare): */
-               src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-                       ir3_get_src(ctx, &intr->src[3])[0],
-                       src0,
-               }, 2);
-               atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       default:
-               unreachable("boo");
-       }
-
-       atomic->cat6.iim_val = 1;
-       atomic->cat6.d = 4;
-       atomic->cat6.type = type;
-       atomic->barrier_class = IR3_BARRIER_BUFFER_W;
-       atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
-
-       /* even if nothing consume the result, we can't DCE the instruction: */
-       array_insert(b, b->keeps, atomic);
-
-       return atomic;
-}
-
 /* src[] = { offset }. const_index[] = { base } */
 static void
 emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
@@ -1058,145 +914,22 @@ emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        return atomic;
 }
 
-/* Images get mapped into SSBO/image state (for store/atomic) and texture
- * state block (for load).  To simplify things, invert the image id and
- * map it from end of state block, ie. image 0 becomes num-1, image 1
- * becomes num-2, etc.  This potentially avoids needing to re-emit texture
- * state when switching shaders.
- *
- * TODO is max # of samplers and SSBOs the same.  This shouldn't be hard-
- * coded.  Also, since all the gl shader stages (ie. everything but CS)
- * share the same SSBO/image state block, this might require some more
- * logic if we supported images in anything other than FS..
+/* TODO handle actual indirect/dynamic case.. which is going to be weird
+ * to handle with the image_mapping table..
  */
-static unsigned
-get_image_slot(struct ir3_context *ctx, nir_deref_instr *deref)
-{
-       unsigned int loc = 0;
-       unsigned inner_size = 1;
-
-       while (deref->deref_type != nir_deref_type_var) {
-               assert(deref->deref_type == nir_deref_type_array);
-               nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
-               assert(const_index);
-
-               /* Go to the next instruction */
-               deref = nir_deref_instr_parent(deref);
-
-               assert(glsl_type_is_array(deref->type));
-               const unsigned array_len = glsl_get_length(deref->type);
-               loc += MIN2(const_index->u32[0], array_len - 1) * inner_size;
-
-               /* Update the inner size */
-               inner_size *= array_len;
-       }
-
-       loc += deref->var->data.driver_location;
-
-       /* TODO figure out real limit per generation, and don't hardcode: */
-       const unsigned max_samplers = 16;
-       return max_samplers - loc - 1;
-}
-
-/* see tex_info() for equiv logic for texture instructions.. it would be
- * nice if this could be better unified..
- */
-static unsigned
-get_image_coords(const nir_variable *var, unsigned *flagsp)
-{
-       const struct glsl_type *type = glsl_without_array(var->type);
-       unsigned coords, flags = 0;
-
-       switch (glsl_get_sampler_dim(type)) {
-       case GLSL_SAMPLER_DIM_1D:
-       case GLSL_SAMPLER_DIM_BUF:
-               coords = 1;
-               break;
-       case GLSL_SAMPLER_DIM_2D:
-       case GLSL_SAMPLER_DIM_RECT:
-       case GLSL_SAMPLER_DIM_EXTERNAL:
-       case GLSL_SAMPLER_DIM_MS:
-               coords = 2;
-               break;
-       case GLSL_SAMPLER_DIM_3D:
-       case GLSL_SAMPLER_DIM_CUBE:
-               flags |= IR3_INSTR_3D;
-               coords = 3;
-               break;
-       default:
-               unreachable("bad sampler dim");
-               return 0;
-       }
-
-       if (glsl_sampler_type_is_array(type)) {
-               /* note: unlike tex_info(), adjust # of coords to include array idx: */
-               coords++;
-               flags |= IR3_INSTR_A;
-       }
-
-       if (flagsp)
-               *flagsp = flags;
-
-       return coords;
-}
-
-static type_t
-get_image_type(const nir_variable *var)
-{
-       switch (glsl_get_sampler_result_type(glsl_without_array(var->type))) {
-       case GLSL_TYPE_UINT:
-               return TYPE_U32;
-       case GLSL_TYPE_INT:
-               return TYPE_S32;
-       case GLSL_TYPE_FLOAT:
-               return TYPE_F32;
-       default:
-               unreachable("bad sampler type.");
-               return 0;
-       }
-}
-
 static struct ir3_instruction *
-get_image_offset(struct ir3_context *ctx, const nir_variable *var,
-               struct ir3_instruction * const *coords, bool byteoff)
+get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
-       struct ir3_block *b = ctx->block;
-       struct ir3_instruction *offset;
-       unsigned ncoords = get_image_coords(var, NULL);
-
-       /* to calculate the byte offset (yes, uggg) we need (up to) three
-        * const values to know the bytes per pixel, and y and z stride:
-        */
-       unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
-               ctx->so->const_layout.image_dims.off[var->data.driver_location];
-
-       debug_assert(ctx->so->const_layout.image_dims.mask &
-                       (1 << var->data.driver_location));
-
-       /* offset = coords.x * bytes_per_pixel: */
-       offset = ir3_MUL_S(b, coords[0], 0, create_uniform(b, cb + 0), 0);
-       if (ncoords > 1) {
-               /* offset += coords.y * y_pitch: */
-               offset = ir3_MAD_S24(b, create_uniform(b, cb + 1), 0,
-                               coords[1], 0, offset, 0);
-       }
-       if (ncoords > 2) {
-               /* offset += coords.z * z_pitch: */
-               offset = ir3_MAD_S24(b, create_uniform(b, cb + 2), 0,
-                               coords[2], 0, offset, 0);
-       }
+       unsigned slot = ir3_get_image_slot(nir_src_as_deref(intr->src[0]));
+       unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
+       struct ir3_instruction *texture, *sampler;
 
-       if (!byteoff) {
-               /* Some cases, like atomics, seem to use dword offset instead
-                * of byte offsets.. blob just puts an extra shr.b in there
-                * in those cases:
-                */
-               offset = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
-       }
+       texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
+       sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
 
        return ir3_create_collect(ctx, (struct ir3_instruction*[]){
-               offset,
-               create_immed(b, 0),
+               sampler,
+               texture,
        }, 2);
 }
 
@@ -1207,12 +940,12 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 {
        struct ir3_block *b = ctx->block;
        const nir_variable *var = nir_intrinsic_get_var(intr, 0);
+       struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
        struct ir3_instruction *sam;
        struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
        struct ir3_instruction *coords[4];
-       unsigned flags, ncoords = get_image_coords(var, &flags);
-       unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
-       type_t type = get_image_type(var);
+       unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
+       type_t type = ir3_get_image_type(var);
 
        /* hmm, this seems a bit odd, but it is what blob does and (at least
         * a5xx) just faults on bogus addresses otherwise:
@@ -1229,7 +962,7 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                coords[ncoords++] = create_immed(b, 0);
 
        sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
-                       tex_idx, tex_idx, ir3_create_collect(ctx, coords, ncoords), NULL);
+                       samp_tex, ir3_create_collect(ctx, coords, ncoords), NULL);
 
        sam->barrier_class = IR3_BARRIER_IMAGE_R;
        sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
@@ -1237,130 +970,19 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        ir3_split_dest(b, dst, sam, 0, 4);
 }
 
-/* Returns the number of components for the different image formats
- * supported by the GLES 3.1 spec, plus those added by the
- * GL_NV_image_formats extension.
- */
-static unsigned
-get_num_components_for_glformat(GLuint format)
-{
-       switch (format) {
-       case GL_R32F:
-       case GL_R32I:
-       case GL_R32UI:
-       case GL_R16F:
-       case GL_R16I:
-       case GL_R16UI:
-       case GL_R16:
-       case GL_R16_SNORM:
-       case GL_R8I:
-       case GL_R8UI:
-       case GL_R8:
-       case GL_R8_SNORM:
-               return 1;
-
-       case GL_RG32F:
-       case GL_RG32I:
-       case GL_RG32UI:
-       case GL_RG16F:
-       case GL_RG16I:
-       case GL_RG16UI:
-       case GL_RG16:
-       case GL_RG16_SNORM:
-       case GL_RG8I:
-       case GL_RG8UI:
-       case GL_RG8:
-       case GL_RG8_SNORM:
-               return 2;
-
-       case GL_R11F_G11F_B10F:
-               return 3;
-
-       case GL_RGBA32F:
-       case GL_RGBA32I:
-       case GL_RGBA32UI:
-       case GL_RGBA16F:
-       case GL_RGBA16I:
-       case GL_RGBA16UI:
-       case GL_RGBA16:
-       case GL_RGBA16_SNORM:
-       case GL_RGBA8I:
-       case GL_RGBA8UI:
-       case GL_RGBA8:
-       case GL_RGBA8_SNORM:
-       case GL_RGB10_A2UI:
-       case GL_RGB10_A2:
-               return 4;
-
-       case GL_NONE:
-               /* Omitting the image format qualifier is allowed on desktop GL
-                * profiles. Assuming 4 components is always safe.
-                */
-               return 4;
-
-       default:
-               /* Return 4 components also for all other formats we don't know
-                * about. The format should have been validated already by
-                * the higher level API, but drop a debug message just in case.
-                */
-               debug_printf("Unhandled GL format %u while emitting imageStore()\n",
-                                        format);
-               return 4;
-       }
-}
-
-/* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
-static void
-emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
-       struct ir3_block *b = ctx->block;
-       const nir_variable *var = nir_intrinsic_get_var(intr, 0);
-       struct ir3_instruction *stib, *offset;
-       struct ir3_instruction * const *value = ir3_get_src(ctx, &intr->src[3]);
-       struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
-       unsigned ncoords = get_image_coords(var, NULL);
-       unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
-       unsigned ncomp = get_num_components_for_glformat(var->data.image.format);
-
-       /* src0 is value
-        * src1 is coords
-        * src2 is 64b byte offset
-        */
-
-       offset = get_image_offset(ctx, var, coords, true);
-
-       /* NOTE: stib seems to take byte offset, but stgb.typed can be used
-        * too and takes a dword offset.. not quite sure yet why blob uses
-        * one over the other in various cases.
-        */
-
-       stib = ir3_STIB(b, create_immed(b, tex_idx), 0,
-                       ir3_create_collect(ctx, value, ncomp), 0,
-                       ir3_create_collect(ctx, coords, ncoords), 0,
-                       offset, 0);
-       stib->cat6.iim_val = ncomp;
-       stib->cat6.d = ncoords;
-       stib->cat6.type = get_image_type(var);
-       stib->cat6.typed = true;
-       stib->barrier_class = IR3_BARRIER_IMAGE_W;
-       stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
-
-       array_insert(b, b->keeps, stib);
-}
-
 static void
 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                struct ir3_instruction **dst)
 {
        struct ir3_block *b = ctx->block;
        const nir_variable *var = nir_intrinsic_get_var(intr, 0);
-       unsigned tex_idx = get_image_slot(ctx, nir_src_as_deref(intr->src[0]));
+       struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
        struct ir3_instruction *sam, *lod;
-       unsigned flags, ncoords = get_image_coords(var, &flags);
+       unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
 
        lod = create_immed(b, 0);
        sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
-                       tex_idx, tex_idx, lod, NULL);
+                       samp_tex, lod, NULL);
 
        /* Array size actually ends up in .w rather than .z. This doesn't
         * matter for miplevel 0, but for higher mips the value in z is
@@ -1392,8 +1014,9 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                 * bytes-per-pixel should have been emitted in 2nd slot of
                 * image_dims. See ir3_shader::emit_image_dims().
                 */
-               unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
-                       ctx->so->const_layout.image_dims.off[var->data.driver_location];
+               struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+               unsigned cb = regid(const_state->offsets.image_dims, 0) +
+                       const_state->image_dims.off[var->data.driver_location];
                struct ir3_instruction *aux = create_uniform(b, cb + 1);
 
                tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
@@ -1411,73 +1034,6 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        }
 }
 
-/* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
-static struct ir3_instruction *
-emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
-{
-       struct ir3_block *b = ctx->block;
-       const nir_variable *var = nir_intrinsic_get_var(intr, 0);
-       struct ir3_instruction *atomic, *image, *src0, *src1, *src2;
-       struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
-       unsigned ncoords = get_image_coords(var, NULL);
-
-       image = create_immed(b, get_image_slot(ctx, nir_src_as_deref(intr->src[0])));
-
-       /* src0 is value (or uvec2(value, compare))
-        * src1 is coords
-        * src2 is 64b byte offset
-        */
-       src0 = ir3_get_src(ctx, &intr->src[3])[0];
-       src1 = ir3_create_collect(ctx, coords, ncoords);
-       src2 = get_image_offset(ctx, var, coords, false);
-
-       switch (intr->intrinsic) {
-       case nir_intrinsic_image_deref_atomic_add:
-               atomic = ir3_ATOMIC_ADD_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_min:
-               atomic = ir3_ATOMIC_MIN_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_max:
-               atomic = ir3_ATOMIC_MAX_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_and:
-               atomic = ir3_ATOMIC_AND_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_or:
-               atomic = ir3_ATOMIC_OR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_xor:
-               atomic = ir3_ATOMIC_XOR_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_exchange:
-               atomic = ir3_ATOMIC_XCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       case nir_intrinsic_image_deref_atomic_comp_swap:
-               /* for cmpxchg, src0 is [ui]vec2(data, compare): */
-               src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-                       ir3_get_src(ctx, &intr->src[4])[0],
-                       src0,
-               }, 2);
-               atomic = ir3_ATOMIC_CMPXCHG_G(b, image, 0, src0, 0, src1, 0, src2, 0);
-               break;
-       default:
-               unreachable("boo");
-       }
-
-       atomic->cat6.iim_val = 1;
-       atomic->cat6.d = ncoords;
-       atomic->cat6.type = get_image_type(var);
-       atomic->cat6.typed = true;
-       atomic->barrier_class = IR3_BARRIER_IMAGE_W;
-       atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
-
-       /* even if nothing consume the result, we can't DCE the instruction: */
-       array_insert(b, b->keeps, atomic);
-
-       return atomic;
-}
-
 static void
 emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
@@ -1497,6 +1053,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                barrier->cat7.g = true;
                barrier->cat7.r = true;
                barrier->cat7.w = true;
+               barrier->cat7.l = true;
                barrier->barrier_class = IR3_BARRIER_IMAGE_W |
                                IR3_BARRIER_BUFFER_W;
                barrier->barrier_conflict =
@@ -1580,6 +1137,95 @@ static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
        add_sysval_input_compmask(ctx, slot, 0x1, instr);
 }
 
+static struct ir3_instruction *
+get_barycentric_centroid(struct ir3_context *ctx)
+{
+       if (!ctx->ij_centroid) {
+               struct ir3_instruction *xy[2];
+               struct ir3_instruction *ij;
+
+               ij = create_input_compmask(ctx, 0, 0x3);
+               ir3_split_dest(ctx->block, xy, ij, 0, 2);
+
+               ctx->ij_centroid = ir3_create_collect(ctx, xy, 2);
+
+               add_sysval_input_compmask(ctx,
+                               SYSTEM_VALUE_BARYCENTRIC_CENTROID,
+                               0x3, ij);
+       }
+
+       return ctx->ij_centroid;
+}
+
+static struct ir3_instruction *
+get_barycentric_sample(struct ir3_context *ctx)
+{
+       if (!ctx->ij_sample) {
+               struct ir3_instruction *xy[2];
+               struct ir3_instruction *ij;
+
+               ij = create_input_compmask(ctx, 0, 0x3);
+               ir3_split_dest(ctx->block, xy, ij, 0, 2);
+
+               ctx->ij_sample = ir3_create_collect(ctx, xy, 2);
+
+               add_sysval_input_compmask(ctx,
+                               SYSTEM_VALUE_BARYCENTRIC_SAMPLE,
+                               0x3, ij);
+       }
+
+       return ctx->ij_sample;
+}
+
+static struct ir3_instruction  *
+get_barycentric_pixel(struct ir3_context *ctx)
+{
+       /* TODO when tgsi_to_nir supports "new-style" FS inputs switch
+        * this to create ij_pixel only on demand:
+        */
+       return ctx->ij_pixel;
+}
+
+static struct ir3_instruction *
+get_frag_coord(struct ir3_context *ctx)
+{
+       if (!ctx->frag_coord) {
+               struct ir3_block *b = ctx->block;
+               struct ir3_instruction *xyzw[4];
+               struct ir3_instruction *hw_frag_coord;
+
+               hw_frag_coord = create_input_compmask(ctx, 0, 0xf);
+               ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4);
+
+               /* for frag_coord.xy, we get unsigned values.. we need
+                * to subtract (integer) 8 and divide by 16 (right-
+                * shift by 4) then convert to float:
+                *
+                *    sub.s tmp, src, 8
+                *    shr.b tmp, tmp, 4
+                *    mov.u32f32 dst, tmp
+                *
+                */
+               for (int i = 0; i < 2; i++) {
+                       xyzw[i] = ir3_SUB_S(b, xyzw[i], 0,
+                                       create_immed(b, 8), 0);
+                       xyzw[i] = ir3_SHR_B(b, xyzw[i], 0,
+                                       create_immed(b, 4), 0);
+                       xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32);
+               }
+
+               ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4);
+
+               add_sysval_input_compmask(ctx,
+                               SYSTEM_VALUE_FRAG_COORD,
+                               0xf, hw_frag_coord);
+
+               ctx->so->frag_coord = true;
+       }
+
+       return ctx->frag_coord;
+}
+
 static void
 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
@@ -1587,7 +1233,6 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        struct ir3_instruction **dst;
        struct ir3_instruction * const *src;
        struct ir3_block *b = ctx->block;
-       nir_const_value *const_offset;
        int idx, comp;
 
        if (info->has_dest) {
@@ -1600,40 +1245,101 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        switch (intr->intrinsic) {
        case nir_intrinsic_load_uniform:
                idx = nir_intrinsic_base(intr);
-               const_offset = nir_src_as_const_value(intr->src[0]);
-               if (const_offset) {
-                       idx += const_offset->u32[0];
+               if (nir_src_is_const(intr->src[0])) {
+                       idx += nir_src_as_uint(intr->src[0]);
                        for (int i = 0; i < intr->num_components; i++) {
-                               unsigned n = idx * 4 + i;
-                               dst[i] = create_uniform(b, n);
+                               dst[i] = create_uniform_typed(b, idx + i,
+                                       nir_dest_bit_size(intr->dest) < 32 ? TYPE_F16 : TYPE_F32);
                        }
                } else {
                        src = ir3_get_src(ctx, &intr->src[0]);
                        for (int i = 0; i < intr->num_components; i++) {
-                               int n = idx * 4 + i;
-                               dst[i] = create_uniform_indirect(b, n,
-                                               ir3_get_addr(ctx, src[0], 4));
+                               dst[i] = create_uniform_indirect(b, idx + i,
+                                               ir3_get_addr(ctx, src[0], 1));
                        }
                        /* NOTE: if relative addressing is used, we set
                         * constlen in the compiler (to worst-case value)
                         * since we don't know in the assembler what the max
                         * addr reg value can be:
                         */
-                       ctx->so->constlen = ctx->s->num_uniforms;
+                       ctx->so->constlen = MAX2(ctx->so->constlen,
+                                       ctx->so->shader->ubo_state.size / 16);
                }
                break;
        case nir_intrinsic_load_ubo:
                emit_intrinsic_load_ubo(ctx, intr, dst);
                break;
+       case nir_intrinsic_load_frag_coord:
+               ir3_split_dest(b, dst, get_frag_coord(ctx), 0, 4);
+               break;
+       case nir_intrinsic_load_sample_pos_from_id: {
+               /* NOTE: blob seems to always use TYPE_F16 and then cov.f16f32,
+                * but that doesn't seem necessary.
+                */
+               struct ir3_instruction *offset =
+                       ir3_RGETPOS(b, ir3_get_src(ctx, &intr->src[0])[0], 0);
+               offset->regs[0]->wrmask = 0x3;
+               offset->cat5.type = TYPE_F32;
+
+               ir3_split_dest(b, dst, offset, 0, 2);
+
+               break;
+       }
+       case nir_intrinsic_load_size_ir3:
+               if (!ctx->ij_size) {
+                       ctx->ij_size = create_input(ctx, 0);
+
+                       add_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE,
+                                       ctx->ij_size);
+               }
+               dst[0] = ctx->ij_size;
+               break;
+       case nir_intrinsic_load_barycentric_centroid:
+               ir3_split_dest(b, dst, get_barycentric_centroid(ctx), 0, 2);
+               break;
+       case nir_intrinsic_load_barycentric_sample:
+               if (ctx->so->key.msaa) {
+                       ir3_split_dest(b, dst, get_barycentric_sample(ctx), 0, 2);
+               } else {
+                       ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
+               }
+               break;
+       case nir_intrinsic_load_barycentric_pixel:
+               ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
+               break;
+       case nir_intrinsic_load_interpolated_input:
+               idx = nir_intrinsic_base(intr);
+               comp = nir_intrinsic_component(intr);
+               src = ir3_get_src(ctx, &intr->src[0]);
+               if (nir_src_is_const(intr->src[1])) {
+                       struct ir3_instruction *coord = ir3_create_collect(ctx, src, 2);
+                       idx += nir_src_as_uint(intr->src[1]);
+                       for (int i = 0; i < intr->num_components; i++) {
+                               unsigned inloc = idx * 4 + i + comp;
+                               if (ctx->so->inputs[idx].bary &&
+                                               !ctx->so->inputs[idx].use_ldlv) {
+                                       dst[i] = ir3_BARY_F(b, create_immed(b, inloc), 0, coord, 0);
+                               } else {
+                                       /* for non-varyings use the pre-setup input, since
+                                        * that is easier than mapping things back to a
+                                        * nir_variable to figure out what it is.
+                                        */
+                                       dst[i] = ctx->ir->inputs[inloc];
+                               }
+                       }
+               } else {
+                       ir3_context_error(ctx, "unhandled");
+               }
+               break;
        case nir_intrinsic_load_input:
                idx = nir_intrinsic_base(intr);
                comp = nir_intrinsic_component(intr);
-               const_offset = nir_src_as_const_value(intr->src[0]);
-               if (const_offset) {
-                       idx += const_offset->u32[0];
+               if (nir_src_is_const(intr->src[0])) {
+                       idx += nir_src_as_uint(intr->src[0]);
                        for (int i = 0; i < intr->num_components; i++) {
                                unsigned n = idx * 4 + i + comp;
                                dst[i] = ctx->ir->inputs[n];
+                               compile_assert(ctx, ctx->ir->inputs[n]);
                        }
                } else {
                        src = ir3_get_src(ctx, &intr->src[0]);
@@ -1647,26 +1353,36 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                        }
                }
                break;
-       case nir_intrinsic_load_ssbo:
-               emit_intrinsic_load_ssbo(ctx, intr, dst);
+       /* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
+        * pass and replaced by an ir3-specifc version that adds the
+        * dword-offset in the last source.
+        */
+       case nir_intrinsic_load_ssbo_ir3:
+               ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
                break;
-       case nir_intrinsic_store_ssbo:
-               emit_intrinsic_store_ssbo(ctx, intr);
+       case nir_intrinsic_store_ssbo_ir3:
+               if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
+                               !ctx->s->info.fs.early_fragment_tests)
+                       ctx->so->no_earlyz = true;
+               ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
                break;
        case nir_intrinsic_get_buffer_size:
                emit_intrinsic_ssbo_size(ctx, intr, dst);
                break;
-       case nir_intrinsic_ssbo_atomic_add:
-       case nir_intrinsic_ssbo_atomic_imin:
-       case nir_intrinsic_ssbo_atomic_umin:
-       case nir_intrinsic_ssbo_atomic_imax:
-       case nir_intrinsic_ssbo_atomic_umax:
-       case nir_intrinsic_ssbo_atomic_and:
-       case nir_intrinsic_ssbo_atomic_or:
-       case nir_intrinsic_ssbo_atomic_xor:
-       case nir_intrinsic_ssbo_atomic_exchange:
-       case nir_intrinsic_ssbo_atomic_comp_swap:
-               dst[0] = emit_intrinsic_atomic_ssbo(ctx, intr);
+       case nir_intrinsic_ssbo_atomic_add_ir3:
+       case nir_intrinsic_ssbo_atomic_imin_ir3:
+       case nir_intrinsic_ssbo_atomic_umin_ir3:
+       case nir_intrinsic_ssbo_atomic_imax_ir3:
+       case nir_intrinsic_ssbo_atomic_umax_ir3:
+       case nir_intrinsic_ssbo_atomic_and_ir3:
+       case nir_intrinsic_ssbo_atomic_or_ir3:
+       case nir_intrinsic_ssbo_atomic_xor_ir3:
+       case nir_intrinsic_ssbo_atomic_exchange_ir3:
+       case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
+               if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
+                               !ctx->s->info.fs.early_fragment_tests)
+                       ctx->so->no_earlyz = true;
+               dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
                break;
        case nir_intrinsic_load_shared:
                emit_intrinsic_load_shared(ctx, intr, dst);
@@ -1690,20 +1406,28 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                emit_intrinsic_load_image(ctx, intr, dst);
                break;
        case nir_intrinsic_image_deref_store:
-               emit_intrinsic_store_image(ctx, intr);
+               if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
+                               !ctx->s->info.fs.early_fragment_tests)
+                       ctx->so->no_earlyz = true;
+               ctx->funcs->emit_intrinsic_store_image(ctx, intr);
                break;
        case nir_intrinsic_image_deref_size:
                emit_intrinsic_image_size(ctx, intr, dst);
                break;
        case nir_intrinsic_image_deref_atomic_add:
-       case nir_intrinsic_image_deref_atomic_min:
-       case nir_intrinsic_image_deref_atomic_max:
+       case nir_intrinsic_image_deref_atomic_imin:
+       case nir_intrinsic_image_deref_atomic_umin:
+       case nir_intrinsic_image_deref_atomic_imax:
+       case nir_intrinsic_image_deref_atomic_umax:
        case nir_intrinsic_image_deref_atomic_and:
        case nir_intrinsic_image_deref_atomic_or:
        case nir_intrinsic_image_deref_atomic_xor:
        case nir_intrinsic_image_deref_atomic_exchange:
        case nir_intrinsic_image_deref_atomic_comp_swap:
-               dst[0] = emit_intrinsic_atomic_image(ctx, intr);
+               if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
+                               !ctx->s->info.fs.early_fragment_tests)
+                       ctx->so->no_earlyz = true;
+               dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
                break;
        case nir_intrinsic_barrier:
        case nir_intrinsic_memory_barrier:
@@ -1719,9 +1443,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        case nir_intrinsic_store_output:
                idx = nir_intrinsic_base(intr);
                comp = nir_intrinsic_component(intr);
-               const_offset = nir_src_as_const_value(intr->src[1]);
-               compile_assert(ctx, const_offset != NULL);
-               idx += const_offset->u32[0];
+               compile_assert(ctx, nir_src_is_const(intr->src[1]));
+               idx += nir_src_as_uint(intr->src[1]);
 
                src = ir3_get_src(ctx, &intr->src[0]);
                for (int i = 0; i < intr->num_components; i++) {
@@ -1756,6 +1479,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                dst[0] = ctx->instance_id;
                break;
        case nir_intrinsic_load_sample_id:
+               ctx->so->per_samp = true;
+               /* fall-thru */
        case nir_intrinsic_load_sample_id_no_per_sample:
                if (!ctx->samp_id) {
                        ctx->samp_id = create_input(ctx, 0);
@@ -1844,7 +1569,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                array_insert(ctx->ir, ctx->ir->predicates, kill);
 
                array_insert(b, b->keeps, kill);
-               ctx->so->has_kill = true;
+               ctx->so->no_earlyz = true;
 
                break;
        }
@@ -1855,7 +1580,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        }
 
        if (info->has_dest)
-               put_dst(ctx, &intr->dest);
+               ir3_put_dst(ctx, &intr->dest);
 }
 
 static void
@@ -1863,10 +1588,19 @@ emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
 {
        struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
                        instr->def.num_components);
-       type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
 
-       for (int i = 0; i < instr->def.num_components; i++)
-               dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
+       if (instr->def.bit_size < 32) {
+               for (int i = 0; i < instr->def.num_components; i++)
+                       dst[i] = create_immed_typed(ctx->block,
+                                                                               instr->value[i].u16,
+                                                                               TYPE_U16);
+       } else {
+               for (int i = 0; i < instr->def.num_components; i++)
+                       dst[i] = create_immed_typed(ctx->block,
+                                                                               instr->value[i].u32,
+                                                                               TYPE_U32);
+       }
+
 }
 
 static void
@@ -1926,6 +1660,43 @@ tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
        *coordsp = coords;
 }
 
+/* Gets the sampler/texture idx as a hvec2.  Which could either be dynamic
+ * or immediate (in which case it will get lowered later to a non .s2en
+ * version of the tex instruction which encode tex/samp as immediates:
+ */
+static struct ir3_instruction *
+get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
+{
+       int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
+       int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
+       struct ir3_instruction *texture, *sampler;
+
+       if (texture_idx >= 0) {
+               texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
+               texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
+       } else {
+               /* TODO what to do for dynamic case? I guess we only need the
+                * max index for astc srgb workaround so maybe not a problem
+                * to worry about if we don't enable indirect samplers for
+                * a4xx?
+                */
+               ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index);
+               texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
+       }
+
+       if (sampler_idx >= 0) {
+               sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
+               sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
+       } else {
+               sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
+       }
+
+       return ir3_create_collect(ctx, (struct ir3_instruction*[]){
+               sampler,
+               texture,
+       }, 2);
+}
+
 static void
 emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
 {
@@ -1934,16 +1705,17 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
        struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
        struct ir3_instruction *lod, *compare, *proj, *sample_index;
        bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
-       unsigned i, coords, flags;
+       unsigned i, coords, flags, ncomp;
        unsigned nsrc0 = 0, nsrc1 = 0;
        type_t type;
        opc_t opc = 0;
 
+       ncomp = nir_dest_num_components(tex->dest);
+
        coord = off = ddx = ddy = NULL;
        lod = proj = compare = sample_index = NULL;
 
-       /* TODO: might just be one component for gathers? */
-       dst = ir3_get_dst(ctx, &tex->dest, 4);
+       dst = ir3_get_dst(ctx, &tex->dest, ncomp);
 
        for (unsigned i = 0; i < tex->num_srcs; i++) {
                switch (tex->src[i].src_type) {
@@ -1978,6 +1750,10 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                case nir_tex_src_ms_index:
                        sample_index = ir3_get_src(ctx, &tex->src[i].src)[0];
                        break;
+               case nir_tex_src_texture_offset:
+               case nir_tex_src_sampler_offset:
+                       /* handled in get_tex_samp_src() */
+                       break;
                default:
                        ir3_context_error(ctx, "Unhandled NIR tex src type: %d\n",
                                        tex->src[i].src_type);
@@ -2004,12 +1780,9 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                case 3:              opc = OPC_GATHER4A; break;
                }
                break;
+       case nir_texop_txf_ms_fb:
        case nir_texop_txf_ms:   opc = OPC_ISAMM;    break;
-       case nir_texop_txs:
-       case nir_texop_query_levels:
-       case nir_texop_texture_samples:
-       case nir_texop_samples_identical:
-       case nir_texop_txf_ms_mcs:
+       default:
                ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
                return;
        }
@@ -2033,27 +1806,6 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
 
        nsrc0 = i;
 
-       /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
-        * with scaled x coord according to requested sample:
-        */
-       if (tex->op == nir_texop_txf_ms) {
-               if (ctx->compiler->txf_ms_with_isaml) {
-                       /* the samples are laid out in x dimension as
-                        *     0 1 2 3
-                        * x_ms = (x << ms) + sample_index;
-                        */
-                       struct ir3_instruction *ms;
-                       ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
-
-                       src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
-                       src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
-
-                       opc = OPC_ISAML;
-               } else {
-                       src0[nsrc0++] = sample_index;
-               }
-       }
-
        /* scale up integer coords for TXF based on the LOD */
        if (ctx->compiler->unminify_coords && (opc == OPC_ISAML)) {
                assert(has_lod);
@@ -2064,9 +1816,12 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
        if (coords == 1) {
                /* hw doesn't do 1d, so we treat it as 2d with
                 * height of 1, and patch up the y coord.
-                * TODO: y coord should be (int)0 in some cases..
                 */
-               src0[nsrc0++] = create_immed(b, fui(0.5));
+               if (is_isam(opc)) {
+                       src0[nsrc0++] = create_immed(b, 0);
+               } else {
+                       src0[nsrc0++] = create_immed(b, fui(0.5));
+               }
        }
 
        if (tex->is_shadow && tex->op != nir_texop_lod)
@@ -2076,7 +1831,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                struct ir3_instruction *idx = coord[coords];
 
                /* the array coord for cube arrays needs 0.5 added to it */
-               if (ctx->compiler->array_index_add_half && (opc != OPC_ISAML))
+               if (ctx->compiler->array_index_add_half && !is_isam(opc))
                        idx = ir3_ADD_F(b, idx, 0, create_immed(b, fui(0.5)), 0);
 
                src0[nsrc0++] = idx;
@@ -2101,6 +1856,27 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                        src0[nsrc0++] = create_immed(b, fui(0.0));
        }
 
+       /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
+        * with scaled x coord according to requested sample:
+        */
+       if (opc == OPC_ISAMM) {
+               if (ctx->compiler->txf_ms_with_isaml) {
+                       /* the samples are laid out in x dimension as
+                        *     0 1 2 3
+                        * x_ms = (x << ms) + sample_index;
+                        */
+                       struct ir3_instruction *ms;
+                       ms = create_immed(b, (ctx->samples >> (2 * tex->texture_index)) & 3);
+
+                       src0[0] = ir3_SHL_B(b, src0[0], 0, ms, 0);
+                       src0[0] = ir3_ADD_U(b, src0[0], 0, sample_index, 0);
+
+                       opc = OPC_ISAML;
+               } else {
+                       src0[nsrc0++] = sample_index;
+               }
+       }
+
        /*
         * second argument (if applicable):
         *  - offsets
@@ -2142,17 +1918,31 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
        if (opc == OPC_GETLOD)
                type = TYPE_U32;
 
-       unsigned tex_idx = tex->texture_index;
+       struct ir3_instruction *samp_tex;
+
+       if (tex->op == nir_texop_txf_ms_fb) {
+               /* only expect a single txf_ms_fb per shader: */
+               compile_assert(ctx, !ctx->so->fb_read);
+               compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
+
+               ctx->so->fb_read = true;
+               samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
+                       create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
+                       create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
+               }, 2);
 
-       ctx->max_texture_index = MAX2(ctx->max_texture_index, tex_idx);
+               ctx->so->num_samp++;
+       } else {
+               samp_tex = get_tex_samp_tex_src(ctx, tex);
+       }
 
        struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
        struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
 
-       sam = ir3_SAM(b, opc, type, 0b1111, flags,
-                       tex_idx, tex_idx, col0, col1);
+       sam = ir3_SAM(b, opc, type, MASK(ncomp), flags,
+                       samp_tex, col0, col1);
 
-       if ((ctx->astc_srgb & (1 << tex_idx)) && !nir_tex_instr_is_query(tex)) {
+       if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) {
                /* only need first 3 components: */
                sam->regs[0]->wrmask = 0x7;
                ir3_split_dest(b, dst, sam, 0, 3);
@@ -2161,7 +1951,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                 * texture state:
                 */
                sam = ir3_SAM(b, opc, type, 0b1000, flags,
-                               tex_idx, tex_idx, col0, col1);
+                               samp_tex, col0, col1);
 
                array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
 
@@ -2169,7 +1959,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                ir3_split_dest(b, &dst[3], sam, 3, 1);
        } else {
                /* normal (non-workaround) case: */
-               ir3_split_dest(b, dst, sam, 0, 4);
+               ir3_split_dest(b, dst, sam, 0, ncomp);
        }
 
        /* GETLOD returns results in 4.8 fixed point */
@@ -2183,7 +1973,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                }
        }
 
-       put_dst(ctx, &tex->dest);
+       ir3_put_dst(ctx, &tex->dest);
 }
 
 static void
@@ -2195,7 +1985,7 @@ emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
        dst = ir3_get_dst(ctx, &tex->dest, 1);
 
        sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
-                       tex->texture_index, tex->texture_index, NULL, NULL);
+                       get_tex_samp_tex_src(ctx, tex), NULL, NULL);
 
        /* even though there is only one component, since it ends
         * up in .z rather than .x, we need a split_dest()
@@ -2208,7 +1998,7 @@ emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
        if (ctx->compiler->levels_add_one)
                dst[0] = ir3_ADD_U(b, dst[0], 0, create_immed(b, 1), 0);
 
-       put_dst(ctx, &tex->dest);
+       ir3_put_dst(ctx, &tex->dest);
 }
 
 static void
@@ -2235,7 +2025,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
        lod = ir3_get_src(ctx, &tex->src[0].src)[0];
 
        sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
-                       tex->texture_index, tex->texture_index, lod, NULL);
+                       get_tex_samp_tex_src(ctx, tex), lod, NULL);
 
        ir3_split_dest(b, dst, sam, 0, 4);
 
@@ -2252,7 +2042,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
                }
        }
 
-       put_dst(ctx, &tex->dest);
+       ir3_put_dst(ctx, &tex->dest);
 }
 
 static void
@@ -2328,7 +2118,6 @@ get_block(struct ir3_context *ctx, const nir_block *nblock)
 {
        struct ir3_block *block;
        struct hash_entry *hentry;
-       unsigned i;
 
        hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
        if (hentry)
@@ -2338,12 +2127,9 @@ get_block(struct ir3_context *ctx, const nir_block *nblock)
        block->nblock = nblock;
        _mesa_hash_table_insert(ctx->block_ht, nblock, block);
 
-       block->predecessors_count = nblock->predecessors->entries;
-       block->predecessors = ralloc_array_size(block,
-               sizeof(block->predecessors[0]), block->predecessors_count);
-       i = 0;
+       block->predecessors = _mesa_pointer_set_create(block);
        set_foreach(nblock->predecessors, sentry) {
-               block->predecessors[i++] = get_block(ctx, sentry->key);
+               _mesa_set_add(block->predecessors, get_block(ctx, sentry->key));
        }
 
        return block;
@@ -2397,6 +2183,7 @@ static void
 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
 {
        emit_cf_list(ctx, &nloop->body);
+       ctx->so->loops++;
 }
 
 static void
@@ -2459,7 +2246,6 @@ emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
 static void
 emit_stream_out(struct ir3_context *ctx)
 {
-       struct ir3_shader_variant *v = ctx->so;
        struct ir3 *ir = ctx->ir;
        struct ir3_stream_output_info *strmout =
                        &ctx->so->shader->stream_output;
@@ -2517,10 +2303,11 @@ emit_stream_out(struct ir3_context *ctx)
         * stripped out in the backend.
         */
        for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+               struct ir3_const_state *const_state = &ctx->so->shader->const_state;
                unsigned stride = strmout->stride[i];
                struct ir3_instruction *base, *off;
 
-               base = create_uniform(ctx->block, regid(v->constbase.tfbo, i));
+               base = create_uniform(ctx->block, regid(const_state->offsets.tfbo, i));
 
                /* 24-bit should be enough: */
                off = ir3_MUL_U(ctx->block, vtxcnt, 0,
@@ -2589,46 +2376,6 @@ emit_function(struct ir3_context *ctx, nir_function_impl *impl)
        ir3_END(ctx->block);
 }
 
-static struct ir3_instruction *
-create_frag_coord(struct ir3_context *ctx, unsigned comp)
-{
-       struct ir3_block *block = ctx->block;
-       struct ir3_instruction *instr;
-
-       if (!ctx->frag_coord) {
-               ctx->frag_coord = create_input_compmask(ctx, 0, 0xf);
-               /* defer add_sysval_input() until after all inputs created */
-       }
-
-       ir3_split_dest(block, &instr, ctx->frag_coord, comp, 1);
-
-       switch (comp) {
-       case 0: /* .x */
-       case 1: /* .y */
-               /* for frag_coord, we get unsigned values.. we need
-                * to subtract (integer) 8 and divide by 16 (right-
-                * shift by 4) then convert to float:
-                *
-                *    sub.s tmp, src, 8
-                *    shr.b tmp, tmp, 4
-                *    mov.u32f32 dst, tmp
-                *
-                */
-               instr = ir3_SUB_S(block, instr, 0,
-                               create_immed(block, 8), 0);
-               instr = ir3_SHR_B(block, instr, 0,
-                               create_immed(block, 4), 0);
-               instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
-
-               return instr;
-       case 2: /* .z */
-       case 3: /* .w */
-       default:
-               /* seems that we can use these as-is: */
-               return instr;
-       }
-}
-
 static void
 setup_input(struct ir3_context *ctx, nir_variable *in)
 {
@@ -2649,16 +2396,21 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
        so->inputs[n].compmask = (1 << (ncomp + frac)) - 1;
        so->inputs_count = MAX2(so->inputs_count, n + 1);
        so->inputs[n].interpolate = in->data.interpolation;
+       so->inputs[n].ncomp = ncomp;
 
        if (ctx->so->type == MESA_SHADER_FRAGMENT) {
+
+               /* if any varyings have 'sample' qualifer, that triggers us
+                * to run in per-sample mode:
+                */
+               so->per_samp |= in->data.sample;
+
                for (int i = 0; i < ncomp; i++) {
                        struct ir3_instruction *instr = NULL;
                        unsigned idx = (n * 4) + i + frac;
 
                        if (slot == VARYING_SLOT_POS) {
-                               so->inputs[n].bary = false;
-                               so->frag_coord = true;
-                               instr = create_frag_coord(ctx, i);
+                               ir3_context_error(ctx, "fragcoord should be a sysval!\n");
                        } else if (slot == VARYING_SLOT_PNTC) {
                                /* see for example st_nir_fixup_varying_slots().. this is
                                 * maybe a bit mesa/st specific.  But we need things to line
@@ -2670,10 +2422,8 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                                 */
                                so->inputs[n].slot = VARYING_SLOT_VAR8;
                                so->inputs[n].bary = true;
-                               instr = create_frag_input(ctx, false);
+                               instr = create_frag_input(ctx, false, idx);
                        } else {
-                               bool use_ldlv = false;
-
                                /* detect the special case for front/back colors where
                                 * we need to do flat vs smooth shading depending on
                                 * rast state:
@@ -2694,12 +2444,12 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                                if (ctx->compiler->flat_bypass) {
                                        if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
                                                        (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
-                                               use_ldlv = true;
+                                               so->inputs[n].use_ldlv = true;
                                }
 
                                so->inputs[n].bary = true;
 
-                               instr = create_frag_input(ctx, use_ldlv);
+                               instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx);
                        }
 
                        compile_assert(ctx, idx < ctx->ir->ninputs);
@@ -2721,6 +2471,92 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
        }
 }
 
+/* Initially we assign non-packed inloc's for varyings, as we don't really
+ * know up-front which components will be unused.  After all the compilation
+ * stages we scan the shader to see which components are actually used, and
+ * re-pack the inlocs to eliminate unneeded varyings.
+ */
+static void
+pack_inlocs(struct ir3_context *ctx)
+{
+       struct ir3_shader_variant *so = ctx->so;
+       uint8_t used_components[so->inputs_count];
+
+       memset(used_components, 0, sizeof(used_components));
+
+       /*
+        * First Step: scan shader to find which bary.f/ldlv remain:
+        */
+
+       list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+                       if (is_input(instr)) {
+                               unsigned inloc = instr->regs[1]->iim_val;
+                               unsigned i = inloc / 4;
+                               unsigned j = inloc % 4;
+
+                               compile_assert(ctx, instr->regs[1]->flags & IR3_REG_IMMED);
+                               compile_assert(ctx, i < so->inputs_count);
+
+                               used_components[i] |= 1 << j;
+                       }
+               }
+       }
+
+       /*
+        * Second Step: reassign varying inloc/slots:
+        */
+
+       unsigned actual_in = 0;
+       unsigned inloc = 0;
+
+       for (unsigned i = 0; i < so->inputs_count; i++) {
+               unsigned compmask = 0, maxcomp = 0;
+
+               so->inputs[i].ncomp = 0;
+               so->inputs[i].inloc = inloc;
+               so->inputs[i].bary = false;
+
+               for (unsigned j = 0; j < 4; j++) {
+                       if (!(used_components[i] & (1 << j)))
+                               continue;
+
+                       compmask |= (1 << j);
+                       actual_in++;
+                       so->inputs[i].ncomp++;
+                       maxcomp = j + 1;
+
+                       /* at this point, since used_components[i] mask is only
+                        * considering varyings (ie. not sysvals) we know this
+                        * is a varying:
+                        */
+                       so->inputs[i].bary = true;
+               }
+
+               if (so->inputs[i].bary) {
+                       so->varying_in++;
+                       so->inputs[i].compmask = (1 << maxcomp) - 1;
+                       inloc += maxcomp;
+               }
+       }
+
+       /*
+        * Third Step: reassign packed inloc's:
+        */
+
+       list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+                       if (is_input(instr)) {
+                               unsigned inloc = instr->regs[1]->iim_val;
+                               unsigned i = inloc / 4;
+                               unsigned j = inloc % 4;
+
+                               instr->regs[1]->iim_val = so->inputs[i].inloc + j;
+                       }
+               }
+       }
+}
+
 static void
 setup_output(struct ir3_context *ctx, nir_variable *out)
 {
@@ -2740,6 +2576,9 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
                case FRAG_RESULT_COLOR:
                        so->color0_mrt = 1;
                        break;
+               case FRAG_RESULT_SAMPLE_MASK:
+                       so->writes_smask = true;
+                       break;
                default:
                        if (slot >= FRAG_RESULT_DATA0)
                                break;
@@ -2833,7 +2672,7 @@ emit_instructions(struct ir3_context *ctx)
         */
        ninputs += max_sysvals[ctx->so->type];
 
-       ctx->ir = ir3_create(ctx->compiler, ninputs, noutputs);
+       ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs);
 
        /* Create inputs in first block: */
        ctx->block = get_block(ctx, nir_start_block(fxn));
@@ -2844,6 +2683,12 @@ emit_instructions(struct ir3_context *ctx)
 
        /* for fragment shader, the vcoord input register is used as the
         * base for bary.f varying fetch instrs:
+        *
+        * TODO defer creating ctx->ij_pixel and corresponding sysvals
+        * until emit_intrinsic when we know they are actually needed.
+        * For now, we defer creating ctx->ij_centroid, etc, since we
+        * only need ij_pixel for "old style" varying inputs (ie.
+        * tgsi_to_nir)
         */
        struct ir3_instruction *vcoord = NULL;
        if (ctx->so->type == MESA_SHADER_FRAGMENT) {
@@ -2852,7 +2697,7 @@ emit_instructions(struct ir3_context *ctx)
                vcoord = create_input_compmask(ctx, 0, 0x3);
                ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
 
-               ctx->frag_vcoord = ir3_create_collect(ctx, xy, 2);
+               ctx->ij_pixel = ir3_create_collect(ctx, xy, 2);
        }
 
        /* Setup inputs: */
@@ -2864,23 +2709,23 @@ emit_instructions(struct ir3_context *ctx)
         * because sysvals need to be appended after varyings:
         */
        if (vcoord) {
-               add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD,
+               add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PIXEL,
                                0x3, vcoord);
        }
 
-       if (ctx->frag_coord) {
-               add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD,
-                               0xf, ctx->frag_coord);
-       }
-
        /* Setup outputs: */
        nir_foreach_variable(var, &ctx->s->outputs) {
                setup_output(ctx, var);
        }
 
-       /* Setup registers (which should only be arrays): */
-       nir_foreach_register(reg, &ctx->s->registers) {
-               ir3_declare_array(ctx, reg);
+       /* Find # of samplers: */
+       nir_foreach_variable(var, &ctx->s->uniforms) {
+               ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
+               /* just assume that we'll be reading from images.. if it
+                * is write-only we don't have to count it, but not sure
+                * if there is a good way to know?
+                */
+               ctx->so->num_samp += glsl_type_get_image_count(var->type);
        }
 
        /* NOTE: need to do something more clever when we support >1 fxn */
@@ -2981,7 +2826,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        struct ir3_context *ctx;
        struct ir3 *ir;
        struct ir3_instruction **inputs;
-       unsigned i, actual_in, inloc;
+       unsigned i;
        int ret = 0, max_bary;
 
        assert(!so->ir);
@@ -3059,6 +2904,32 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
                fixup_binning_pass(ctx);
 
+       /* for a6xx+, binning and draw pass VS use same VBO state, so we
+        * need to make sure not to remove any inputs that are used by
+        * the nonbinning VS.
+        */
+       if (ctx->compiler->gpu_id >= 600 && so->binning_pass) {
+               debug_assert(so->type == MESA_SHADER_VERTEX);
+               for (int i = 0; i < ir->ninputs; i++) {
+                       struct ir3_instruction *in = ir->inputs[i];
+
+                       if (!in)
+                               continue;
+
+                       unsigned n = i / 4;
+                       unsigned c = i % 4;
+
+                       debug_assert(n < so->nonbinning->inputs_count);
+
+                       if (so->nonbinning->inputs[n].sysval)
+                               continue;
+
+                       /* be sure to keep inputs, even if only used in VS */
+                       if (so->nonbinning->inputs[n].compmask & (1 << c))
+                               array_insert(in->block, in->block->keeps, in);
+               }
+       }
+
        /* Insert mov if there's same instruction for each output.
         * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
         */
@@ -3097,18 +2968,25 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_print(ir);
        }
 
+       /* do Sethi–Ullman numbering before scheduling: */
+       ir3_sun(ir);
+
        ret = ir3_sched(ir);
        if (ret) {
                DBG("SCHED failed!");
                goto out;
        }
 
+       if (compiler->gpu_id >= 600) {
+               ir3_a6xx_fixup_atomic_dests(ir, so);
+       }
+
        if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
                printf("AFTER SCHED:\n");
                ir3_print(ir);
        }
 
-       ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
+       ret = ir3_ra(so);
        if (ret) {
                DBG("RA failed!");
                goto out;
@@ -3119,6 +2997,9 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_print(ir);
        }
 
+       if (so->type == MESA_SHADER_FRAGMENT)
+               pack_inlocs(ctx);
+
        /* fixup input/outputs: */
        for (i = 0; i < so->outputs_count; i++) {
                /* sometimes we get outputs that don't write the .x coord, like:
@@ -3133,41 +3014,34 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                        struct ir3_instruction *instr = ir->outputs[(i*4) + j];
                        if (instr) {
                                so->outputs[i].regid = instr->regs[0]->num;
+                               so->outputs[i].half  = !!(instr->regs[0]->flags & IR3_REG_HALF);
                                break;
                        }
                }
        }
 
        /* Note that some or all channels of an input may be unused: */
-       actual_in = 0;
-       inloc = 0;
        for (i = 0; i < so->inputs_count; i++) {
-               unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0;
-               so->inputs[i].ncomp = 0;
-               so->inputs[i].inloc = inloc;
+               unsigned j, reg = regid(63,0);
+               bool half = false;
                for (j = 0; j < 4; j++) {
                        struct ir3_instruction *in = inputs[(i*4) + j];
-                       if (in && !(in->flags & IR3_INSTR_UNUSED)) {
-                               compmask |= (1 << j);
-                               reg = in->regs[0]->num - j;
-                               actual_in++;
-                               so->inputs[i].ncomp++;
-                               if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) {
-                                       /* assign inloc: */
-                                       assert(in->regs[1]->flags & IR3_REG_IMMED);
-                                       in->regs[1]->iim_val = inloc + j;
-                                       maxcomp = j + 1;
-                               }
+
+                       if (!in)
+                               continue;
+
+                       if (in->flags & IR3_INSTR_UNUSED)
+                               continue;
+
+                       reg = in->regs[0]->num - j;
+                       if (half) {
+                               compile_assert(ctx, in->regs[0]->flags & IR3_REG_HALF);
+                       } else {
+                               half = !!(in->regs[0]->flags & IR3_REG_HALF);
                        }
                }
-               if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
-                       so->varying_in++;
-                       so->inputs[i].compmask = (1 << maxcomp) - 1;
-                       inloc += maxcomp;
-               } else if (!so->inputs[i].sysval) {
-                       so->inputs[i].compmask = compmask;
-               }
                so->inputs[i].regid = reg;
+               so->inputs[i].half  = half;
        }
 
        if (ctx->astc_srgb)
@@ -3176,7 +3050,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        /* We need to do legalize after (for frag shader's) the "bary.f"
         * offsets (inloc) have been assigned.
         */
-       ir3_legalize(ir, &so->num_samp, &so->has_ssbo, &max_bary);
+       ir3_legalize(ir, &so->has_ssbo, &so->need_pixlod, &max_bary);
 
        if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
                printf("AFTER LEGALIZE:\n");
@@ -3186,11 +3060,11 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        so->branchstack = ctx->max_stack;
 
        /* Note that actual_in counts inputs that are not bary.f'd for FS: */
-       if (so->type == MESA_SHADER_VERTEX)
-               so->total_in = actual_in;
-       else
+       if (so->type == MESA_SHADER_FRAGMENT)
                so->total_in = max_bary + 1;
 
+       so->max_sun = ir->max_sun;
+
 out:
        if (ret) {
                if (so->ir)