freedreno/ir3: don't DCE ij_pix if used for pre-fs-texture-fetch
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
index afab76ab8c8c491c3c436e66d7e98d503072b809..39bef63a7803d6547eaabbf1a0ef637832fa4da3 100644 (file)
@@ -68,7 +68,7 @@ create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
        struct ir3_instruction *in;
 
        in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
-       in->inout.block = ctx->in_block;
+       in->input.sysval = ~0;
        ir3_reg_create(in, n, 0);
 
        in->regs[0]->wrmask = compmask;
@@ -83,19 +83,19 @@ create_input(struct ir3_context *ctx, unsigned n)
 }
 
 static struct ir3_instruction *
-create_frag_input(struct ir3_context *ctx, bool use_ldlv)
+create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n)
 {
        struct ir3_block *block = ctx->block;
        struct ir3_instruction *instr;
-       /* actual inloc is assigned and fixed up later: */
-       struct ir3_instruction *inloc = create_immed(block, 0);
+       /* packed inloc is fixed up later: */
+       struct ir3_instruction *inloc = create_immed(block, n);
 
        if (use_ldlv) {
                instr = ir3_LDLV(block, inloc, 0, create_immed(block, 1), 0);
                instr->cat6.type = TYPE_U32;
                instr->cat6.iim_val = 1;
        } else {
-               instr = ir3_BARY_F(block, inloc, 0, ctx->frag_vcoord, 0);
+               instr = ir3_BARY_F(block, inloc, 0, ctx->ij_pixel, 0);
                instr->regs[2]->wrmask = 0x3;
        }
 
@@ -107,7 +107,8 @@ create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
 {
        /* first four vec4 sysval's reserved for UBOs: */
        /* NOTE: dp is in scalar, but there can be >4 dp components: */
-       unsigned n = ctx->so->constbase.driver_param;
+       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       unsigned n = const_state->offsets.driver_param;
        unsigned r = regid(n + dp / 4, dp % 4);
        return create_uniform(ctx->block, r);
 }
@@ -293,6 +294,8 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        unsigned bs[info->num_inputs];     /* bit size */
        struct ir3_block *b = ctx->block;
        unsigned dst_sz, wrmask;
+       type_t dst_type = nir_dest_bit_size(alu->dest.dest) < 32 ?
+                       TYPE_U16 : TYPE_U32;
 
        if (alu->dest.dest.is_ssa) {
                dst_sz = alu->dest.dest.ssa.num_components;
@@ -320,8 +323,8 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
 
                        src[i] = ir3_get_src(ctx, &asrc->src)[asrc->swizzle[0]];
                        if (!src[i])
-                               src[i] = create_immed(ctx->block, 0);
-                       dst[i] = ir3_MOV(b, src[i], TYPE_U32);
+                               src[i] = create_immed_typed(ctx->block, 0, dst_type);
+                       dst[i] = ir3_MOV(b, src[i], dst_type);
                }
 
                ir3_put_dst(ctx, &alu->dest.dest);
@@ -331,14 +334,13 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        /* We also get mov's with more than one component for mov's so
         * handle those specially:
         */
-       if ((alu->op == nir_op_imov) || (alu->op == nir_op_fmov)) {
-               type_t type = (alu->op == nir_op_imov) ? TYPE_U32 : TYPE_F32;
+       if (alu->op == nir_op_mov) {
                nir_alu_src *asrc = &alu->src[0];
                struct ir3_instruction *const *src0 = ir3_get_src(ctx, &asrc->src);
 
                for (unsigned i = 0; i < dst_sz; i++) {
                        if (wrmask & (1 << i)) {
-                               dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], type);
+                               dst[i] = ir3_MOV(b, src0[asrc->swizzle[i]], dst_type);
                        } else {
                                dst[i] = NULL;
                        }
@@ -385,12 +387,19 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        case nir_op_u2u8:
                dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
                break;
+       case nir_op_fquantize2f16:
+               dst[0] = create_cov(ctx,
+                                                       create_cov(ctx, src[0], 32, nir_op_f2f16),
+                                                       16, nir_op_f2f32);
+               break;
        case nir_op_f2b32:
                dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
                dst[0]->cat2.condition = IR3_COND_NE;
                dst[0] = ir3_n2b(b, dst[0]);
                break;
        case nir_op_b2f16:
+               dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16);
+               break;
        case nir_op_b2f32:
                dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
                break;
@@ -429,7 +438,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                                (list_length(&alu->src[0].src.ssa->uses) == 1) &&
                                ((opc_cat(src[0]->opc) == 2) || (opc_cat(src[0]->opc) == 3))) {
                        src[0]->flags |= IR3_INSTR_SAT;
-                       dst[0] = ir3_MOV(b, src[0], TYPE_U32);
+                       dst[0] = ir3_MOV(b, src[0], dst_type);
                } else {
                        /* otherwise generate a max.f that saturates.. blob does
                         * similar (generating a cat2 mov using max.f)
@@ -451,10 +460,12 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                dst[0] = ir3_MAD_F32(b, src[0], 0, src[1], 0, src[2], 0);
                break;
        case nir_op_fddx:
+       case nir_op_fddx_coarse:
                dst[0] = ir3_DSX(b, src[0], 0);
                dst[0]->cat5.type = TYPE_F32;
                break;
        case nir_op_fddy:
+       case nir_op_fddy_coarse:
                dst[0] = ir3_DSY(b, src[0], 0);
                dst[0]->cat5.type = TYPE_F32;
                break;
@@ -538,16 +549,11 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        case nir_op_umin:
                dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
                break;
-       case nir_op_imul:
-               /*
-                * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
-                *   mull.u tmp0, a, b           ; mul low, i.e. al * bl
-                *   madsh.m16 tmp1, a, b, tmp0  ; mul-add shift high mix, i.e. ah * bl << 16
-                *   madsh.m16 dst, b, a, tmp1   ; i.e. al * bh << 16
-                */
-               dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
-                                       ir3_MADSH_M16(b, src[0], 0, src[1], 0,
-                                               ir3_MULL_U(b, src[0], 0, src[1], 0), 0), 0);
+       case nir_op_umul_low:
+               dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0);
+               break;
+       case nir_op_imadsh_mix16:
+               dst[0] = ir3_MADSH_M16(b, src[0], 0, src[1], 0, src[2], 0);
                break;
        case nir_op_ineg:
                dst[0] = ir3_ABSNEG_S(b, src[0], IR3_REG_SNEG);
@@ -680,12 +686,12 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 {
        struct ir3_block *b = ctx->block;
        struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
-       nir_const_value *const_offset;
        /* UBO addresses are the first driver params, but subtract 2 here to
         * account for nir_lower_uniforms_to_ubo rebasing the UBOs such that UBO 0
         * is the uniforms: */
-       unsigned ubo = regid(ctx->so->constbase.ubo, 0) - 2;
-       const unsigned ptrsz = ir3_pointer_size(ctx);
+       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       unsigned ubo = regid(const_state->offsets.ubo, 0) - 2;
+       const unsigned ptrsz = ir3_pointer_size(ctx->compiler);
 
        int off = 0;
 
@@ -698,14 +704,20 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        } else {
                base_lo = create_uniform_indirect(b, ubo, ir3_get_addr(ctx, src0, ptrsz));
                base_hi = create_uniform_indirect(b, ubo + 1, ir3_get_addr(ctx, src0, ptrsz));
+
+               /* NOTE: since relative addressing is used, make sure constlen is
+                * at least big enough to cover all the UBO addresses, since the
+                * assembler won't know what the max address reg is.
+                */
+               ctx->so->constlen = MAX2(ctx->so->constlen,
+                       const_state->offsets.ubo + (ctx->s->info.num_ubos * ptrsz));
        }
 
        /* note: on 32bit gpu's base_hi is ignored and DCE'd */
        addr = base_lo;
 
-       const_offset = nir_src_as_const_value(intr->src[1]);
-       if (const_offset) {
-               off += const_offset->u32[0];
+       if (nir_src_is_const(intr->src[1])) {
+               off += nir_src_as_uint(intr->src[1]);
        } else {
                /* For load_ubo_indirect, second src is indirect offset: */
                src1 = ir3_get_src(ctx, &intr->src[1])[0];
@@ -740,9 +752,9 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 
        for (int i = 0; i < intr->num_components; i++) {
                struct ir3_instruction *load =
-                               ir3_LDG(b, addr, 0, create_immed(b, 1), 0);
+                       ir3_LDG(b, addr, 0, create_immed(b, 1), 0, /* num components */
+                                       create_immed(b, off + i * 4), 0);
                load->cat6.type = TYPE_U32;
-               load->cat6.src_offset = off + i * 4;     /* byte offset */
                dst[i] = load;
        }
 }
@@ -753,11 +765,12 @@ emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                struct ir3_instruction **dst)
 {
        /* SSBO size stored as a const starting at ssbo_sizes: */
-       unsigned blk_idx = nir_src_as_const_value(intr->src[0])->u32[0];
-       unsigned idx = regid(ctx->so->constbase.ssbo_sizes, 0) +
-               ctx->so->const_layout.ssbo_size.off[blk_idx];
+       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       unsigned blk_idx = nir_src_as_uint(intr->src[0]);
+       unsigned idx = regid(const_state->offsets.ssbo_sizes, 0) +
+               const_state->ssbo_size.off[blk_idx];
 
-       debug_assert(ctx->so->const_layout.ssbo_size.mask & (1 << blk_idx));
+       debug_assert(const_state->ssbo_size.mask & (1 << blk_idx));
 
        dst[0] = create_uniform(ctx->block, idx);
 }
@@ -774,8 +787,10 @@ emit_intrinsic_load_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        offset = ir3_get_src(ctx, &intr->src[0])[0];
        base   = nir_intrinsic_base(intr);
 
-       ldl = ir3_LDL(b, offset, 0, create_immed(b, intr->num_components), 0);
-       ldl->cat6.src_offset = base;
+       ldl = ir3_LDL(b, offset, 0,
+                       create_immed(b, intr->num_components), 0,
+                       create_immed(b, base), 0);
+
        ldl->cat6.type = utype_dst(intr->dest);
        ldl->regs[0]->wrmask = MASK(intr->num_components);
 
@@ -828,6 +843,75 @@ emit_intrinsic_store_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        }
 }
 
+/* src[] = { offset }. const_index[] = { base } */
+static void
+emit_intrinsic_load_shared_ir3(struct ir3_context *ctx, nir_intrinsic_instr *intr,
+               struct ir3_instruction **dst)
+{
+       struct ir3_block *b = ctx->block;
+       struct ir3_instruction *load, *offset;
+       unsigned base;
+
+       offset = ir3_get_src(ctx, &intr->src[0])[0];
+       base   = nir_intrinsic_base(intr);
+
+       load = ir3_LDLW(b, offset, 0,
+                       create_immed(b, intr->num_components), 0,
+                       create_immed(b, base), 0);
+
+       load->cat6.type = utype_dst(intr->dest);
+       load->regs[0]->wrmask = MASK(intr->num_components);
+
+       load->barrier_class = IR3_BARRIER_SHARED_R;
+       load->barrier_conflict = IR3_BARRIER_SHARED_W;
+
+       ir3_split_dest(b, dst, load, 0, intr->num_components);
+}
+
+/* src[] = { value, offset }. const_index[] = { base, write_mask } */
+static void
+emit_intrinsic_store_shared_ir3(struct ir3_context *ctx, nir_intrinsic_instr *intr)
+{
+       struct ir3_block *b = ctx->block;
+       struct ir3_instruction *store, *offset;
+       struct ir3_instruction * const *value;
+       unsigned base, wrmask;
+
+       value  = ir3_get_src(ctx, &intr->src[0]);
+       offset = ir3_get_src(ctx, &intr->src[1])[0];
+
+       base   = nir_intrinsic_base(intr);
+       wrmask = nir_intrinsic_write_mask(intr);
+
+       /* Combine groups of consecutive enabled channels in one write
+        * message. We use ffs to find the first enabled channel and then ffs on
+        * the bit-inverse, down-shifted writemask to determine the length of
+        * the block of enabled bits.
+        *
+        * (trick stolen from i965's fs_visitor::nir_emit_cs_intrinsic())
+        */
+       while (wrmask) {
+               unsigned first_component = ffs(wrmask) - 1;
+               unsigned length = ffs(~(wrmask >> first_component)) - 1;
+
+               store = ir3_STLW(b, offset, 0,
+                       ir3_create_collect(ctx, &value[first_component], length), 0,
+                       create_immed(b, length), 0);
+
+               store->cat6.dst_offset = first_component + base;
+               store->cat6.type = utype_src(intr->src[0]);
+               store->barrier_class = IR3_BARRIER_SHARED_W;
+               store->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
+
+               array_insert(b, b->keeps, store);
+
+               /* Clear the bits in the writemask that we just wrote, then try
+                * again to see if more channels are left.
+                */
+               wrmask &= (15 << (first_component + length));
+       }
+}
+
 /*
  * CS shared variable atomic intrinsics
  *
@@ -1008,8 +1092,9 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                 * bytes-per-pixel should have been emitted in 2nd slot of
                 * image_dims. See ir3_shader::emit_image_dims().
                 */
-               unsigned cb = regid(ctx->so->constbase.image_dims, 0) +
-                       ctx->so->const_layout.image_dims.off[var->data.driver_location];
+               struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+               unsigned cb = regid(const_state->offsets.image_dims, 0) +
+                       const_state->image_dims.off[var->data.driver_location];
                struct ir3_instruction *aux = create_uniform(b, cb + 1);
 
                tmp[0] = ir3_SHR_B(b, tmp[0], 0, aux, 0);
@@ -1046,6 +1131,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                barrier->cat7.g = true;
                barrier->cat7.r = true;
                barrier->cat7.w = true;
+               barrier->cat7.l = true;
                barrier->barrier_class = IR3_BARRIER_IMAGE_W |
                                IR3_BARRIER_BUFFER_W;
                barrier->barrier_conflict =
@@ -1112,6 +1198,9 @@ static void add_sysval_input_compmask(struct ir3_context *ctx,
        unsigned r = regid(so->inputs_count, 0);
        unsigned n = so->inputs_count++;
 
+       assert(instr->opc == OPC_META_INPUT);
+       instr->input.sysval = slot;
+
        so->inputs[n].sysval = true;
        so->inputs[n].slot = slot;
        so->inputs[n].compmask = compmask;
@@ -1129,6 +1218,95 @@ static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
        add_sysval_input_compmask(ctx, slot, 0x1, instr);
 }
 
+static struct ir3_instruction *
+get_barycentric_centroid(struct ir3_context *ctx)
+{
+       if (!ctx->ij_centroid) {
+               struct ir3_instruction *xy[2];
+               struct ir3_instruction *ij;
+
+               ij = create_input_compmask(ctx, 0, 0x3);
+               ir3_split_dest(ctx->block, xy, ij, 0, 2);
+
+               ctx->ij_centroid = ir3_create_collect(ctx, xy, 2);
+
+               add_sysval_input_compmask(ctx,
+                               SYSTEM_VALUE_BARYCENTRIC_CENTROID,
+                               0x3, ij);
+       }
+
+       return ctx->ij_centroid;
+}
+
+static struct ir3_instruction *
+get_barycentric_sample(struct ir3_context *ctx)
+{
+       if (!ctx->ij_sample) {
+               struct ir3_instruction *xy[2];
+               struct ir3_instruction *ij;
+
+               ij = create_input_compmask(ctx, 0, 0x3);
+               ir3_split_dest(ctx->block, xy, ij, 0, 2);
+
+               ctx->ij_sample = ir3_create_collect(ctx, xy, 2);
+
+               add_sysval_input_compmask(ctx,
+                               SYSTEM_VALUE_BARYCENTRIC_SAMPLE,
+                               0x3, ij);
+       }
+
+       return ctx->ij_sample;
+}
+
+static struct ir3_instruction  *
+get_barycentric_pixel(struct ir3_context *ctx)
+{
+       /* TODO when tgsi_to_nir supports "new-style" FS inputs switch
+        * this to create ij_pixel only on demand:
+        */
+       return ctx->ij_pixel;
+}
+
+static struct ir3_instruction *
+get_frag_coord(struct ir3_context *ctx)
+{
+       if (!ctx->frag_coord) {
+               struct ir3_block *b = ctx->block;
+               struct ir3_instruction *xyzw[4];
+               struct ir3_instruction *hw_frag_coord;
+
+               hw_frag_coord = create_input_compmask(ctx, 0, 0xf);
+               ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4);
+
+               /* for frag_coord.xy, we get unsigned values.. we need
+                * to subtract (integer) 8 and divide by 16 (right-
+                * shift by 4) then convert to float:
+                *
+                *    sub.s tmp, src, 8
+                *    shr.b tmp, tmp, 4
+                *    mov.u32f32 dst, tmp
+                *
+                */
+               for (int i = 0; i < 2; i++) {
+                       xyzw[i] = ir3_SUB_S(b, xyzw[i], 0,
+                                       create_immed(b, 8), 0);
+                       xyzw[i] = ir3_SHR_B(b, xyzw[i], 0,
+                                       create_immed(b, 4), 0);
+                       xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32);
+               }
+
+               ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4);
+
+               add_sysval_input_compmask(ctx,
+                               SYSTEM_VALUE_FRAG_COORD,
+                               0xf, hw_frag_coord);
+
+               ctx->so->frag_coord = true;
+       }
+
+       return ctx->frag_coord;
+}
+
 static void
 emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
@@ -1136,7 +1314,6 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        struct ir3_instruction **dst;
        struct ir3_instruction * const *src;
        struct ir3_block *b = ctx->block;
-       nir_const_value *const_offset;
        int idx, comp;
 
        if (info->has_dest) {
@@ -1146,14 +1323,17 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                dst = NULL;
        }
 
+       const unsigned primitive_param = ctx->so->shader->const_state.offsets.primitive_param * 4;
+       const unsigned primitive_map = ctx->so->shader->const_state.offsets.primitive_map * 4;
+
        switch (intr->intrinsic) {
        case nir_intrinsic_load_uniform:
                idx = nir_intrinsic_base(intr);
-               const_offset = nir_src_as_const_value(intr->src[0]);
-               if (const_offset) {
-                       idx += const_offset->u32[0];
+               if (nir_src_is_const(intr->src[0])) {
+                       idx += nir_src_as_uint(intr->src[0]);
                        for (int i = 0; i < intr->num_components; i++) {
-                               dst[i] = create_uniform(b, idx + i);
+                               dst[i] = create_uniform_typed(b, idx + i,
+                                       nir_dest_bit_size(intr->dest) < 32 ? TYPE_F16 : TYPE_F32);
                        }
                } else {
                        src = ir3_get_src(ctx, &intr->src[0]);
@@ -1166,21 +1346,104 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                         * since we don't know in the assembler what the max
                         * addr reg value can be:
                         */
-                       ctx->so->constlen = ctx->s->num_uniforms;
+                       ctx->so->constlen = MAX2(ctx->so->constlen,
+                                       ctx->so->shader->ubo_state.size / 16);
                }
                break;
+
+       case nir_intrinsic_load_vs_primitive_stride_ir3:
+               dst[0] = create_uniform(b, primitive_param + 0);
+               break;
+       case nir_intrinsic_load_vs_vertex_stride_ir3:
+               dst[0] = create_uniform(b, primitive_param + 1);
+               break;
+       case nir_intrinsic_load_primitive_location_ir3:
+               idx = nir_intrinsic_driver_location(intr);
+               dst[0] = create_uniform(b, primitive_map + idx);
+               break;
+
+       case nir_intrinsic_load_gs_header_ir3:
+               dst[0] = ctx->gs_header;
+               break;
+
+       case nir_intrinsic_load_primitive_id:
+               dst[0] = ctx->primitive_id;
+               break;
+
        case nir_intrinsic_load_ubo:
                emit_intrinsic_load_ubo(ctx, intr, dst);
                break;
+       case nir_intrinsic_load_frag_coord:
+               ir3_split_dest(b, dst, get_frag_coord(ctx), 0, 4);
+               break;
+       case nir_intrinsic_load_sample_pos_from_id: {
+               /* NOTE: blob seems to always use TYPE_F16 and then cov.f16f32,
+                * but that doesn't seem necessary.
+                */
+               struct ir3_instruction *offset =
+                       ir3_RGETPOS(b, ir3_get_src(ctx, &intr->src[0])[0], 0);
+               offset->regs[0]->wrmask = 0x3;
+               offset->cat5.type = TYPE_F32;
+
+               ir3_split_dest(b, dst, offset, 0, 2);
+
+               break;
+       }
+       case nir_intrinsic_load_size_ir3:
+               if (!ctx->ij_size) {
+                       ctx->ij_size = create_input(ctx, 0);
+
+                       add_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE,
+                                       ctx->ij_size);
+               }
+               dst[0] = ctx->ij_size;
+               break;
+       case nir_intrinsic_load_barycentric_centroid:
+               ir3_split_dest(b, dst, get_barycentric_centroid(ctx), 0, 2);
+               break;
+       case nir_intrinsic_load_barycentric_sample:
+               if (ctx->so->key.msaa) {
+                       ir3_split_dest(b, dst, get_barycentric_sample(ctx), 0, 2);
+               } else {
+                       ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
+               }
+               break;
+       case nir_intrinsic_load_barycentric_pixel:
+               ir3_split_dest(b, dst, get_barycentric_pixel(ctx), 0, 2);
+               break;
+       case nir_intrinsic_load_interpolated_input:
+               idx = nir_intrinsic_base(intr);
+               comp = nir_intrinsic_component(intr);
+               src = ir3_get_src(ctx, &intr->src[0]);
+               if (nir_src_is_const(intr->src[1])) {
+                       struct ir3_instruction *coord = ir3_create_collect(ctx, src, 2);
+                       idx += nir_src_as_uint(intr->src[1]);
+                       for (int i = 0; i < intr->num_components; i++) {
+                               unsigned inloc = idx * 4 + i + comp;
+                               if (ctx->so->inputs[idx].bary &&
+                                               !ctx->so->inputs[idx].use_ldlv) {
+                                       dst[i] = ir3_BARY_F(b, create_immed(b, inloc), 0, coord, 0);
+                               } else {
+                                       /* for non-varyings use the pre-setup input, since
+                                        * that is easier than mapping things back to a
+                                        * nir_variable to figure out what it is.
+                                        */
+                                       dst[i] = ctx->ir->inputs[inloc];
+                               }
+                       }
+               } else {
+                       ir3_context_error(ctx, "unhandled");
+               }
+               break;
        case nir_intrinsic_load_input:
                idx = nir_intrinsic_base(intr);
                comp = nir_intrinsic_component(intr);
-               const_offset = nir_src_as_const_value(intr->src[0]);
-               if (const_offset) {
-                       idx += const_offset->u32[0];
+               if (nir_src_is_const(intr->src[0])) {
+                       idx += nir_src_as_uint(intr->src[0]);
                        for (int i = 0; i < intr->num_components; i++) {
                                unsigned n = idx * 4 + i + comp;
                                dst[i] = ctx->ir->inputs[n];
+                               compile_assert(ctx, ctx->ir->inputs[n]);
                        }
                } else {
                        src = ir3_get_src(ctx, &intr->src[0]);
@@ -1256,8 +1519,10 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                emit_intrinsic_image_size(ctx, intr, dst);
                break;
        case nir_intrinsic_image_deref_atomic_add:
-       case nir_intrinsic_image_deref_atomic_min:
-       case nir_intrinsic_image_deref_atomic_max:
+       case nir_intrinsic_image_deref_atomic_imin:
+       case nir_intrinsic_image_deref_atomic_umin:
+       case nir_intrinsic_image_deref_atomic_imax:
+       case nir_intrinsic_image_deref_atomic_umax:
        case nir_intrinsic_image_deref_atomic_and:
        case nir_intrinsic_image_deref_atomic_or:
        case nir_intrinsic_image_deref_atomic_xor:
@@ -1282,9 +1547,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        case nir_intrinsic_store_output:
                idx = nir_intrinsic_base(intr);
                comp = nir_intrinsic_component(intr);
-               const_offset = nir_src_as_const_value(intr->src[1]);
-               compile_assert(ctx, const_offset != NULL);
-               idx += const_offset->u32[0];
+               compile_assert(ctx, nir_src_is_const(intr->src[1]));
+               idx += nir_src_as_uint(intr->src[1]);
 
                src = ir3_get_src(ctx, &intr->src[0]);
                for (int i = 0; i < intr->num_components; i++) {
@@ -1319,6 +1583,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                dst[0] = ctx->instance_id;
                break;
        case nir_intrinsic_load_sample_id:
+               ctx->so->per_samp = true;
+               /* fall-thru */
        case nir_intrinsic_load_sample_id_no_per_sample:
                if (!ctx->samp_id) {
                        ctx->samp_id = create_input(ctx, 0);
@@ -1411,6 +1677,12 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 
                break;
        }
+       case nir_intrinsic_load_shared_ir3:
+               emit_intrinsic_load_shared_ir3(ctx, intr, dst);
+               break;
+       case nir_intrinsic_store_shared_ir3:
+               emit_intrinsic_store_shared_ir3(ctx, intr);
+               break;
        default:
                ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
                                nir_intrinsic_infos[intr->intrinsic].name);
@@ -1426,10 +1698,19 @@ emit_load_const(struct ir3_context *ctx, nir_load_const_instr *instr)
 {
        struct ir3_instruction **dst = ir3_get_dst_ssa(ctx, &instr->def,
                        instr->def.num_components);
-       type_t type = (instr->def.bit_size < 32) ? TYPE_U16 : TYPE_U32;
 
-       for (int i = 0; i < instr->def.num_components; i++)
-               dst[i] = create_immed_typed(ctx->block, instr->value.u32[i], type);
+       if (instr->def.bit_size < 32) {
+               for (int i = 0; i < instr->def.num_components; i++)
+                       dst[i] = create_immed_typed(ctx->block,
+                                                                               instr->value[i].u16,
+                                                                               TYPE_U16);
+       } else {
+               for (int i = 0; i < instr->def.num_components; i++)
+                       dst[i] = create_immed_typed(ctx->block,
+                                                                               instr->value[i].u32,
+                                                                               TYPE_U32);
+       }
+
 }
 
 static void
@@ -1609,12 +1890,9 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                case 3:              opc = OPC_GATHER4A; break;
                }
                break;
+       case nir_texop_txf_ms_fb:
        case nir_texop_txf_ms:   opc = OPC_ISAMM;    break;
-       case nir_texop_txs:
-       case nir_texop_query_levels:
-       case nir_texop_texture_samples:
-       case nir_texop_samples_identical:
-       case nir_texop_txf_ms_mcs:
+       default:
                ir3_context_error(ctx, "Unhandled NIR tex type: %d\n", tex->op);
                return;
        }
@@ -1691,7 +1969,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
        /* NOTE a3xx (and possibly a4xx?) might be different, using isaml
         * with scaled x coord according to requested sample:
         */
-       if (tex->op == nir_texop_txf_ms) {
+       if (opc == OPC_ISAMM) {
                if (ctx->compiler->txf_ms_with_isaml) {
                        /* the samples are laid out in x dimension as
                         *     0 1 2 3
@@ -1748,9 +2026,26 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
        }
 
        if (opc == OPC_GETLOD)
-               type = TYPE_U32;
+               type = TYPE_S32;
+
+       struct ir3_instruction *samp_tex;
+
+       if (tex->op == nir_texop_txf_ms_fb) {
+               /* only expect a single txf_ms_fb per shader: */
+               compile_assert(ctx, !ctx->so->fb_read);
+               compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
+
+               ctx->so->fb_read = true;
+               samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
+                       create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
+                       create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
+               }, 2);
+
+               ctx->so->num_samp++;
+       } else {
+               samp_tex = get_tex_samp_tex_src(ctx, tex);
+       }
 
-       struct ir3_instruction *samp_tex = get_tex_samp_tex_src(ctx, tex);
        struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
        struct ir3_instruction *col1 = ir3_create_collect(ctx, src1, nsrc1);
 
@@ -1783,7 +2078,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
 
                compile_assert(ctx, tex->dest_type == nir_type_float);
                for (i = 0; i < 2; i++) {
-                       dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_U32, TYPE_F32), 0,
+                       dst[i] = ir3_MUL_F(b, ir3_COV(b, dst[i], TYPE_S32, TYPE_F32), 0,
                                                           factor, 0);
                }
        }
@@ -1792,20 +2087,21 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
 }
 
 static void
-emit_tex_query_levels(struct ir3_context *ctx, nir_tex_instr *tex)
+emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx)
 {
        struct ir3_block *b = ctx->block;
        struct ir3_instruction **dst, *sam;
 
        dst = ir3_get_dst(ctx, &tex->dest, 1);
 
-       sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 0b0100, 0,
+       sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 1 << idx, 0,
                        get_tex_samp_tex_src(ctx, tex), NULL, NULL);
 
        /* even though there is only one component, since it ends
-        * up in .z rather than .x, we need a split_dest()
+        * up in .y/.z/.w rather than .x, we need a split_dest()
         */
-       ir3_split_dest(b, dst, sam, 0, 3);
+       if (idx)
+               ir3_split_dest(b, dst, sam, 0, idx + 1);
 
        /* The # of levels comes from getinfo.z. We need to add 1 to it, since
         * the value in TEX_CONST_0 is zero-based.
@@ -1906,7 +2202,10 @@ emit_instr(struct ir3_context *ctx, nir_instr *instr)
                        emit_tex_txs(ctx, tex);
                        break;
                case nir_texop_query_levels:
-                       emit_tex_query_levels(ctx, tex);
+                       emit_tex_info(ctx, tex, 2);
+                       break;
+               case nir_texop_texture_samples:
+                       emit_tex_info(ctx, tex, 3);
                        break;
                default:
                        emit_tex(ctx, tex);
@@ -1933,7 +2232,6 @@ get_block(struct ir3_context *ctx, const nir_block *nblock)
 {
        struct ir3_block *block;
        struct hash_entry *hentry;
-       unsigned i;
 
        hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
        if (hentry)
@@ -1943,12 +2241,9 @@ get_block(struct ir3_context *ctx, const nir_block *nblock)
        block->nblock = nblock;
        _mesa_hash_table_insert(ctx->block_ht, nblock, block);
 
-       block->predecessors_count = nblock->predecessors->entries;
-       block->predecessors = ralloc_array_size(block,
-               sizeof(block->predecessors[0]), block->predecessors_count);
-       i = 0;
+       block->predecessors = _mesa_pointer_set_create(block);
        set_foreach(nblock->predecessors, sentry) {
-               block->predecessors[i++] = get_block(ctx, sentry->key);
+               _mesa_set_add(block->predecessors, get_block(ctx, sentry->key));
        }
 
        return block;
@@ -2002,6 +2297,7 @@ static void
 emit_loop(struct ir3_context *ctx, nir_loop *nloop)
 {
        emit_cf_list(ctx, &nloop->body);
+       ctx->so->loops++;
 }
 
 static void
@@ -2064,7 +2360,6 @@ emit_cf_list(struct ir3_context *ctx, struct exec_list *list)
 static void
 emit_stream_out(struct ir3_context *ctx)
 {
-       struct ir3_shader_variant *v = ctx->so;
        struct ir3 *ir = ctx->ir;
        struct ir3_stream_output_info *strmout =
                        &ctx->so->shader->stream_output;
@@ -2122,10 +2417,11 @@ emit_stream_out(struct ir3_context *ctx)
         * stripped out in the backend.
         */
        for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+               struct ir3_const_state *const_state = &ctx->so->shader->const_state;
                unsigned stride = strmout->stride[i];
                struct ir3_instruction *base, *off;
 
-               base = create_uniform(ctx->block, regid(v->constbase.tfbo, i));
+               base = create_uniform(ctx->block, regid(const_state->offsets.tfbo, i));
 
                /* 24-bit should be enough: */
                off = ir3_MUL_U(ctx->block, vtxcnt, 0,
@@ -2191,46 +2487,23 @@ emit_function(struct ir3_context *ctx, nir_function_impl *impl)
                emit_stream_out(ctx);
        }
 
-       ir3_END(ctx->block);
-}
-
-static struct ir3_instruction *
-create_frag_coord(struct ir3_context *ctx, unsigned comp)
-{
-       struct ir3_block *block = ctx->block;
-       struct ir3_instruction *instr;
-
-       if (!ctx->frag_coord) {
-               ctx->frag_coord = create_input_compmask(ctx, 0, 0xf);
-               /* defer add_sysval_input() until after all inputs created */
-       }
-
-       ir3_split_dest(block, &instr, ctx->frag_coord, comp, 1);
-
-       switch (comp) {
-       case 0: /* .x */
-       case 1: /* .y */
-               /* for frag_coord, we get unsigned values.. we need
-                * to subtract (integer) 8 and divide by 16 (right-
-                * shift by 4) then convert to float:
-                *
-                *    sub.s tmp, src, 8
-                *    shr.b tmp, tmp, 4
-                *    mov.u32f32 dst, tmp
-                *
-                */
-               instr = ir3_SUB_S(block, instr, 0,
-                               create_immed(block, 8), 0);
-               instr = ir3_SHR_B(block, instr, 0,
-                               create_immed(block, 4), 0);
-               instr = ir3_COV(block, instr, TYPE_U32, TYPE_F32);
-
-               return instr;
-       case 2: /* .z */
-       case 3: /* .w */
-       default:
-               /* seems that we can use these as-is: */
-               return instr;
+       /* Vertex shaders in a tessellation or geometry pipeline treat END as a
+        * NOP and has an epilogue that writes the VS outputs to local storage, to
+        * be read by the HS.  Then it resets execution mask (chmask) and chains
+        * to the next shader (chsh).
+        */
+       if (ctx->so->type == MESA_SHADER_VERTEX && ctx->so->key.has_gs) {
+               struct ir3_instruction *chmask =
+                       ir3_CHMASK(ctx->block);
+               chmask->barrier_class = IR3_BARRIER_EVERYTHING;
+               chmask->barrier_conflict = IR3_BARRIER_EVERYTHING;
+
+               struct ir3_instruction *chsh =
+                       ir3_CHSH(ctx->block);
+               chsh->barrier_class = IR3_BARRIER_EVERYTHING;
+               chsh->barrier_conflict = IR3_BARRIER_EVERYTHING;
+       } else {
+               ir3_END(ctx->block);
        }
 }
 
@@ -2243,6 +2516,12 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
        unsigned frac = in->data.location_frac;
        unsigned slot = in->data.location;
 
+       /* Inputs are loaded using ldlw or ldg for these stages. */
+       if (ctx->so->type == MESA_SHADER_TESS_CTRL ||
+                       ctx->so->type == MESA_SHADER_TESS_EVAL ||
+                       ctx->so->type == MESA_SHADER_GEOMETRY)
+               return;
+
        /* skip unread inputs, we could end up with (for example), unsplit
         * matrix/etc inputs in the case they are not read, so just silently
         * skip these.
@@ -2256,14 +2535,18 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
        so->inputs[n].interpolate = in->data.interpolation;
 
        if (ctx->so->type == MESA_SHADER_FRAGMENT) {
+
+               /* if any varyings have 'sample' qualifer, that triggers us
+                * to run in per-sample mode:
+                */
+               so->per_samp |= in->data.sample;
+
                for (int i = 0; i < ncomp; i++) {
                        struct ir3_instruction *instr = NULL;
                        unsigned idx = (n * 4) + i + frac;
 
                        if (slot == VARYING_SLOT_POS) {
-                               so->inputs[n].bary = false;
-                               so->frag_coord = true;
-                               instr = create_frag_coord(ctx, i);
+                               ir3_context_error(ctx, "fragcoord should be a sysval!\n");
                        } else if (slot == VARYING_SLOT_PNTC) {
                                /* see for example st_nir_fixup_varying_slots().. this is
                                 * maybe a bit mesa/st specific.  But we need things to line
@@ -2275,10 +2558,8 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                                 */
                                so->inputs[n].slot = VARYING_SLOT_VAR8;
                                so->inputs[n].bary = true;
-                               instr = create_frag_input(ctx, false);
+                               instr = create_frag_input(ctx, false, idx);
                        } else {
-                               bool use_ldlv = false;
-
                                /* detect the special case for front/back colors where
                                 * we need to do flat vs smooth shading depending on
                                 * rast state:
@@ -2299,12 +2580,12 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                                if (ctx->compiler->flat_bypass) {
                                        if ((so->inputs[n].interpolate == INTERP_MODE_FLAT) ||
                                                        (so->inputs[n].rasterflat && ctx->so->key.rasterflat))
-                                               use_ldlv = true;
+                                               so->inputs[n].use_ldlv = true;
                                }
 
                                so->inputs[n].bary = true;
 
-                               instr = create_frag_input(ctx, use_ldlv);
+                               instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx);
                        }
 
                        compile_assert(ctx, idx < ctx->ir->ninputs);
@@ -2326,6 +2607,90 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
        }
 }
 
+/* Initially we assign non-packed inloc's for varyings, as we don't really
+ * know up-front which components will be unused.  After all the compilation
+ * stages we scan the shader to see which components are actually used, and
+ * re-pack the inlocs to eliminate unneeded varyings.
+ */
+static void
+pack_inlocs(struct ir3_context *ctx)
+{
+       struct ir3_shader_variant *so = ctx->so;
+       uint8_t used_components[so->inputs_count];
+
+       memset(used_components, 0, sizeof(used_components));
+
+       /*
+        * First Step: scan shader to find which bary.f/ldlv remain:
+        */
+
+       list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+                       if (is_input(instr)) {
+                               unsigned inloc = instr->regs[1]->iim_val;
+                               unsigned i = inloc / 4;
+                               unsigned j = inloc % 4;
+
+                               compile_assert(ctx, instr->regs[1]->flags & IR3_REG_IMMED);
+                               compile_assert(ctx, i < so->inputs_count);
+
+                               used_components[i] |= 1 << j;
+                       }
+               }
+       }
+
+       /*
+        * Second Step: reassign varying inloc/slots:
+        */
+
+       unsigned actual_in = 0;
+       unsigned inloc = 0;
+
+       for (unsigned i = 0; i < so->inputs_count; i++) {
+               unsigned compmask = 0, maxcomp = 0;
+
+               so->inputs[i].inloc = inloc;
+               so->inputs[i].bary = false;
+
+               for (unsigned j = 0; j < 4; j++) {
+                       if (!(used_components[i] & (1 << j)))
+                               continue;
+
+                       compmask |= (1 << j);
+                       actual_in++;
+                       maxcomp = j + 1;
+
+                       /* at this point, since used_components[i] mask is only
+                        * considering varyings (ie. not sysvals) we know this
+                        * is a varying:
+                        */
+                       so->inputs[i].bary = true;
+               }
+
+               if (so->inputs[i].bary) {
+                       so->varying_in++;
+                       so->inputs[i].compmask = (1 << maxcomp) - 1;
+                       inloc += maxcomp;
+               }
+       }
+
+       /*
+        * Third Step: reassign packed inloc's:
+        */
+
+       list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+                       if (is_input(instr)) {
+                               unsigned inloc = instr->regs[1]->iim_val;
+                               unsigned i = inloc / 4;
+                               unsigned j = inloc % 4;
+
+                               instr->regs[1]->iim_val = so->inputs[i].inloc + j;
+                       }
+               }
+       }
+}
+
 static void
 setup_output(struct ir3_context *ctx, nir_variable *out)
 {
@@ -2345,13 +2710,17 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
                case FRAG_RESULT_COLOR:
                        so->color0_mrt = 1;
                        break;
+               case FRAG_RESULT_SAMPLE_MASK:
+                       so->writes_smask = true;
+                       break;
                default:
                        if (slot >= FRAG_RESULT_DATA0)
                                break;
                        ir3_context_error(ctx, "unknown FS output name: %s\n",
                                        gl_frag_result_name(slot));
                }
-       } else if (ctx->so->type == MESA_SHADER_VERTEX) {
+       } else if (ctx->so->type == MESA_SHADER_VERTEX ||
+                       ctx->so->type == MESA_SHADER_GEOMETRY) {
                switch (slot) {
                case VARYING_SLOT_POS:
                        so->writes_pos = true;
@@ -2359,6 +2728,11 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
                case VARYING_SLOT_PSIZ:
                        so->writes_psize = true;
                        break;
+               case VARYING_SLOT_PRIMITIVE_ID:
+               case VARYING_SLOT_LAYER:
+               case VARYING_SLOT_GS_VERTEX_FLAGS_IR3:
+                       debug_assert(ctx->so->type == MESA_SHADER_GEOMETRY);
+                       /* fall through */
                case VARYING_SLOT_COL0:
                case VARYING_SLOT_COL1:
                case VARYING_SLOT_BFC0:
@@ -2373,7 +2747,8 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
                                break;
                        if ((VARYING_SLOT_TEX0 <= slot) && (slot <= VARYING_SLOT_TEX7))
                                break;
-                       ir3_context_error(ctx, "unknown VS output name: %s\n",
+                       ir3_context_error(ctx, "unknown %s shader output name: %s\n",
+                                       _mesa_shader_stage_to_string(ctx->so->type),
                                        gl_varying_slot_name(slot));
                }
        } else {
@@ -2419,8 +2794,9 @@ max_drvloc(struct exec_list *vars)
 }
 
 static const unsigned max_sysvals[] = {
-       [MESA_SHADER_FRAGMENT] = 24,  // TODO
        [MESA_SHADER_VERTEX]  = 16,
+       [MESA_SHADER_GEOMETRY] = 16,
+       [MESA_SHADER_FRAGMENT] = 24,  // TODO
        [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
        [MESA_SHADER_KERNEL]  = 16, // TODO how many do we actually need?
 };
@@ -2437,6 +2813,8 @@ emit_instructions(struct ir3_context *ctx)
        /* we need to leave room for sysvals:
         */
        ninputs += max_sysvals[ctx->so->type];
+       if (ctx->so->type == MESA_SHADER_VERTEX)
+               noutputs += 8; /* gs or tess header + primitive_id */
 
        ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs);
 
@@ -2447,8 +2825,22 @@ emit_instructions(struct ir3_context *ctx)
 
        ninputs -= max_sysvals[ctx->so->type];
 
+       if (ctx->so->key.has_gs) {
+               if (ctx->so->type == MESA_SHADER_VERTEX ||
+                       ctx->so->type == MESA_SHADER_GEOMETRY) {
+                       ctx->gs_header = create_input(ctx, 0);
+                       ctx->primitive_id = create_input(ctx, 0);
+               }
+       }
+
        /* for fragment shader, the vcoord input register is used as the
         * base for bary.f varying fetch instrs:
+        *
+        * TODO defer creating ctx->ij_pixel and corresponding sysvals
+        * until emit_intrinsic when we know they are actually needed.
+        * For now, we defer creating ctx->ij_centroid, etc, since we
+        * only need ij_pixel for "old style" varying inputs (ie.
+        * tgsi_to_nir)
         */
        struct ir3_instruction *vcoord = NULL;
        if (ctx->so->type == MESA_SHADER_FRAGMENT) {
@@ -2457,7 +2849,7 @@ emit_instructions(struct ir3_context *ctx)
                vcoord = create_input_compmask(ctx, 0, 0x3);
                ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
 
-               ctx->frag_vcoord = ir3_create_collect(ctx, xy, 2);
+               ctx->ij_pixel = ir3_create_collect(ctx, xy, 2);
        }
 
        /* Setup inputs: */
@@ -2469,20 +2861,44 @@ emit_instructions(struct ir3_context *ctx)
         * because sysvals need to be appended after varyings:
         */
        if (vcoord) {
-               add_sysval_input_compmask(ctx, SYSTEM_VALUE_VARYING_COORD,
+               add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PIXEL,
                                0x3, vcoord);
        }
 
-       if (ctx->frag_coord) {
-               add_sysval_input_compmask(ctx, SYSTEM_VALUE_FRAG_COORD,
-                               0xf, ctx->frag_coord);
-       }
+       if (ctx->primitive_id)
+               add_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, ctx->primitive_id);
+       if (ctx->gs_header)
+               add_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, ctx->gs_header);
 
        /* Setup outputs: */
        nir_foreach_variable(var, &ctx->s->outputs) {
                setup_output(ctx, var);
        }
 
+       /* Set up the gs header as an output for the vertex shader so it won't
+        * clobber it for the tess ctrl shader. */
+       if (ctx->so->type == MESA_SHADER_VERTEX) {
+               struct ir3_shader_variant *so = ctx->so;
+               if (ctx->primitive_id) {
+                       unsigned n = so->outputs_count++;
+                       so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID;
+                       so->outputs[n].regid = regid(n, 0);
+                       ctx->ir->outputs[n * 4] = ctx->primitive_id;
+
+                       compile_assert(ctx, n * 4 < ctx->ir->noutputs);
+               }
+
+               if (ctx->gs_header) {
+                       unsigned n = so->outputs_count++;
+                       so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3;
+                       so->outputs[n].regid = regid(n, 0);
+                       ctx->ir->outputs[n * 4] = ctx->gs_header;
+
+                       compile_assert(ctx, n * 4 < ctx->ir->noutputs);
+               }
+
+       }
+
        /* Find # of samplers: */
        nir_foreach_variable(var, &ctx->s->uniforms) {
                ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
@@ -2493,11 +2909,6 @@ emit_instructions(struct ir3_context *ctx)
                ctx->so->num_samp += glsl_type_get_image_count(var->type);
        }
 
-       /* Setup registers (which should only be arrays): */
-       nir_foreach_register(reg, &ctx->s->registers) {
-               ir3_declare_array(ctx, reg);
-       }
-
        /* NOTE: need to do something more clever when we support >1 fxn */
        nir_foreach_register(reg, &fxn->registers) {
                ir3_declare_array(ctx, reg);
@@ -2596,7 +3007,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        struct ir3_context *ctx;
        struct ir3 *ir;
        struct ir3_instruction **inputs;
-       unsigned i, actual_in, inloc;
+       unsigned i;
        int ret = 0, max_bary;
 
        assert(!so->ir);
@@ -2674,6 +3085,32 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
                fixup_binning_pass(ctx);
 
+       /* for a6xx+, binning and draw pass VS use same VBO state, so we
+        * need to make sure not to remove any inputs that are used by
+        * the nonbinning VS.
+        */
+       if (ctx->compiler->gpu_id >= 600 && so->binning_pass) {
+               debug_assert(so->type == MESA_SHADER_VERTEX);
+               for (int i = 0; i < ir->ninputs; i++) {
+                       struct ir3_instruction *in = ir->inputs[i];
+
+                       if (!in)
+                               continue;
+
+                       unsigned n = i / 4;
+                       unsigned c = i % 4;
+
+                       debug_assert(n < so->nonbinning->inputs_count);
+
+                       if (so->nonbinning->inputs[n].sysval)
+                               continue;
+
+                       /* be sure to keep inputs, even if only used in VS */
+                       if (so->nonbinning->inputs[n].compmask & (1 << c))
+                               array_insert(in->block, in->block->keeps, in);
+               }
+       }
+
        /* Insert mov if there's same instruction for each output.
         * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
         */
@@ -2705,7 +3142,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_print(ir);
        }
 
-       ir3_depth(ir);
+       ir3_depth(ir, so);
 
        if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
                printf("AFTER DEPTH:\n");
@@ -2730,7 +3167,44 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_print(ir);
        }
 
-       ret = ir3_ra(ir, so->type, so->frag_coord, so->frag_face);
+       /* Pre-assign VS inputs on a6xx+ binning pass shader, to align
+        * with draw pass VS, so binning and draw pass can both use the
+        * same VBO state.
+        *
+        * Note that VS inputs are expected to be full precision.
+        */
+       bool pre_assign_inputs = (ir->compiler->gpu_id >= 600) &&
+                       (ir->type == MESA_SHADER_VERTEX) &&
+                       so->binning_pass;
+
+       if (pre_assign_inputs) {
+               for (unsigned i = 0; i < ir->ninputs; i++) {
+                       struct ir3_instruction *instr = ir->inputs[i];
+
+                       if (!instr)
+                               continue;
+
+                       unsigned n = i / 4;
+                       unsigned c = i % 4;
+                       unsigned regid = so->nonbinning->inputs[n].regid + c;
+
+                       instr->regs[0]->num = regid;
+               }
+
+               ret = ir3_ra(so, ir->inputs, ir->ninputs);
+       } else if (ctx->gs_header) {
+               /* We need to have these values in the same registers between VS and GS
+                * since the VS chains to GS and doesn't get the sysvals redelivered.
+                */
+
+               ctx->gs_header->regs[0]->num = 0;
+               ctx->primitive_id->regs[0]->num = 1;
+               struct ir3_instruction *precolor[] = { ctx->gs_header, ctx->primitive_id };
+               ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor));
+       } else {
+               ret = ir3_ra(so, NULL, 0);
+       }
+
        if (ret) {
                DBG("RA failed!");
                goto out;
@@ -2741,6 +3215,9 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_print(ir);
        }
 
+       if (so->type == MESA_SHADER_FRAGMENT)
+               pack_inlocs(ctx);
+
        /* fixup input/outputs: */
        for (i = 0; i < so->outputs_count; i++) {
                /* sometimes we get outputs that don't write the .x coord, like:
@@ -2755,41 +3232,34 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                        struct ir3_instruction *instr = ir->outputs[(i*4) + j];
                        if (instr) {
                                so->outputs[i].regid = instr->regs[0]->num;
+                               so->outputs[i].half  = !!(instr->regs[0]->flags & IR3_REG_HALF);
                                break;
                        }
                }
        }
 
        /* Note that some or all channels of an input may be unused: */
-       actual_in = 0;
-       inloc = 0;
        for (i = 0; i < so->inputs_count; i++) {
-               unsigned j, reg = regid(63,0), compmask = 0, maxcomp = 0;
-               so->inputs[i].ncomp = 0;
-               so->inputs[i].inloc = inloc;
+               unsigned j, reg = regid(63,0);
+               bool half = false;
                for (j = 0; j < 4; j++) {
                        struct ir3_instruction *in = inputs[(i*4) + j];
-                       if (in && !(in->flags & IR3_INSTR_UNUSED)) {
-                               compmask |= (1 << j);
-                               reg = in->regs[0]->num - j;
-                               actual_in++;
-                               so->inputs[i].ncomp++;
-                               if ((so->type == MESA_SHADER_FRAGMENT) && so->inputs[i].bary) {
-                                       /* assign inloc: */
-                                       assert(in->regs[1]->flags & IR3_REG_IMMED);
-                                       in->regs[1]->iim_val = inloc + j;
-                                       maxcomp = j + 1;
-                               }
+
+                       if (!in)
+                               continue;
+
+                       if (in->flags & IR3_INSTR_UNUSED)
+                               continue;
+
+                       reg = in->regs[0]->num - j;
+                       if (half) {
+                               compile_assert(ctx, in->regs[0]->flags & IR3_REG_HALF);
+                       } else {
+                               half = !!(in->regs[0]->flags & IR3_REG_HALF);
                        }
                }
-               if ((so->type == MESA_SHADER_FRAGMENT) && compmask && so->inputs[i].bary) {
-                       so->varying_in++;
-                       so->inputs[i].compmask = (1 << maxcomp) - 1;
-                       inloc += maxcomp;
-               } else if (!so->inputs[i].sysval) {
-                       so->inputs[i].compmask = compmask;
-               }
                so->inputs[i].regid = reg;
+               so->inputs[i].half  = half;
        }
 
        if (ctx->astc_srgb)
@@ -2805,12 +3275,23 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_print(ir);
        }
 
+       /* Set (ss)(sy) on first TCS and GEOMETRY instructions, since we don't
+        * know what we might have to wait on when coming in from VS chsh.
+        */
+       if (so->type == MESA_SHADER_TESS_CTRL ||
+               so->type == MESA_SHADER_GEOMETRY ) {
+               list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+                       list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+                               instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
+                               break;
+                       }
+               }
+       }
+
        so->branchstack = ctx->max_stack;
 
        /* Note that actual_in counts inputs that are not bary.f'd for FS: */
-       if (so->type == MESA_SHADER_VERTEX)
-               so->total_in = actual_in;
-       else
+       if (so->type == MESA_SHADER_FRAGMENT)
                so->total_in = max_bary + 1;
 
        so->max_sun = ir->max_sun;