nir: Rename nir_intrinsic_barrier to control_barrier
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
index 2341c359324e9178181c9043f1a9103f0473b428..f1ba10456ed78ad6592c982c9f898bdca6cba3ea 100644 (file)
@@ -51,9 +51,8 @@ create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
        mov = ir3_instr_create(block, OPC_MOV);
        mov->cat1.src_type = TYPE_U32;
        mov->cat1.dst_type = TYPE_U32;
-       ir3_reg_create(mov, 0, 0);
-       src = ir3_reg_create(mov, 0, IR3_REG_SSA | IR3_REG_RELATIV);
-       src->instr = collect;
+       __ssa_dst(mov);
+       src = __ssa_src(mov, collect, IR3_REG_RELATIV);
        src->size  = arrsz;
        src->array.offset = n;
 
@@ -63,25 +62,19 @@ create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
 }
 
 static struct ir3_instruction *
-create_input_compmask(struct ir3_context *ctx, unsigned n, unsigned compmask)
+create_input(struct ir3_context *ctx, unsigned compmask)
 {
        struct ir3_instruction *in;
 
        in = ir3_instr_create(ctx->in_block, OPC_META_INPUT);
        in->input.sysval = ~0;
-       ir3_reg_create(in, n, 0);
+       __ssa_dst(in)->wrmask = compmask;
 
-       in->regs[0]->wrmask = compmask;
+       array_insert(ctx->ir, ctx->ir->inputs, in);
 
        return in;
 }
 
-static struct ir3_instruction *
-create_input(struct ir3_context *ctx, unsigned n)
-{
-       return create_input_compmask(ctx, n, 0x1);
-}
-
 static struct ir3_instruction *
 create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n)
 {
@@ -392,10 +385,15 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                                                        create_cov(ctx, src[0], 32, nir_op_f2f16),
                                                        16, nir_op_f2f32);
                break;
+       case nir_op_f2b16: {
+               struct ir3_instruction *zero = create_immed_typed(b, 0, TYPE_F16);
+               dst[0] = ir3_CMPS_F(b, src[0], 0, zero, 0);
+               dst[0]->cat2.condition = IR3_COND_NE;
+               break;
+       }
        case nir_op_f2b32:
                dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
                dst[0]->cat2.condition = IR3_COND_NE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
        case nir_op_b2f16:
                dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F16);
@@ -408,10 +406,15 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        case nir_op_b2i32:
                dst[0] = ir3_b2n(b, src[0]);
                break;
+       case nir_op_i2b16: {
+               struct ir3_instruction *zero = create_immed_typed(b, 0, TYPE_S16);
+               dst[0] = ir3_CMPS_S(b, src[0], 0, zero, 0);
+               dst[0]->cat2.condition = IR3_COND_NE;
+               break;
+       }
        case nir_op_i2b32:
                dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
                dst[0]->cat2.condition = IR3_COND_NE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
 
        case nir_op_fneg:
@@ -470,25 +473,25 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                dst[0]->cat5.type = TYPE_F32;
                break;
                break;
+       case nir_op_flt16:
        case nir_op_flt32:
                dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_LT;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_fge16:
        case nir_op_fge32:
                dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_GE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_feq16:
        case nir_op_feq32:
                dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_EQ;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_fne16:
        case nir_op_fne32:
                dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_NE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
        case nir_op_fceil:
                dst[0] = ir3_CEIL_F(b, src[0], 0);
@@ -585,47 +588,57 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
        case nir_op_ushr:
                dst[0] = ir3_SHR_B(b, src[0], 0, src[1], 0);
                break;
+       case nir_op_ilt16:
        case nir_op_ilt32:
                dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_LT;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_ige16:
        case nir_op_ige32:
                dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_GE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_ieq16:
        case nir_op_ieq32:
                dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_EQ;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_ine16:
        case nir_op_ine32:
                dst[0] = ir3_CMPS_S(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_NE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_ult16:
        case nir_op_ult32:
                dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_LT;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
+       case nir_op_uge16:
        case nir_op_uge32:
                dst[0] = ir3_CMPS_U(b, src[0], 0, src[1], 0);
                dst[0]->cat2.condition = IR3_COND_GE;
-               dst[0] = ir3_n2b(b, dst[0]);
                break;
 
+       case nir_op_b16csel:
        case nir_op_b32csel: {
                struct ir3_instruction *cond = ir3_b2n(b, src[0]);
+
+               if ((src[0]->regs[0]->flags & IR3_REG_HALF))
+                       cond->regs[0]->flags |= IR3_REG_HALF;
+
                compile_assert(ctx, bs[1] == bs[2]);
-               /* the boolean condition is 32b even if src[1] and src[2] are
-                * half-precision, but sel.b16 wants all three src's to be the
-                * same type.
+               /* Make sure the boolean condition has the same bit size as the other
+                * two arguments, adding a conversion if necessary.
                 */
-               if (bs[1] < 32)
+               if (bs[1] < bs[0])
                        cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16);
-               dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
+               else if (bs[1] > bs[0])
+                       cond = ir3_COV(b, cond, TYPE_U16, TYPE_U32);
+
+               if (bs[1] > 16)
+                       dst[0] = ir3_SEL_B32(b, src[1], 0, cond, 0, src[2], 0);
+               else
+                       dst[0] = ir3_SEL_B16(b, src[1], 0, cond, 0, src[2], 0);
                break;
        }
        case nir_op_bit_count: {
@@ -682,6 +695,21 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
                break;
        }
 
+       if (nir_alu_type_get_base_type(info->output_type) == nir_type_bool) {
+               assert(dst_sz == 1);
+
+               if (nir_dest_bit_size(alu->dest.dest) < 32)
+                       dst[0]->regs[0]->flags |= IR3_REG_HALF;
+
+               dst[0] = ir3_n2b(b, dst[0]);
+       }
+
+       if (nir_dest_bit_size(alu->dest.dest) < 32) {
+               for (unsigned i = 0; i < dst_sz; i++) {
+                       dst[i]->regs[0]->flags |= IR3_REG_HALF;
+               }
+       }
+
        ir3_put_dst(ctx, &alu->dest.dest);
 }
 
@@ -1063,9 +1091,11 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
        struct ir3_instruction *sam, *lod;
        unsigned flags, ncoords = ir3_get_image_coords(var, &flags);
+       type_t dst_type = nir_dest_bit_size(intr->dest) < 32 ?
+                       TYPE_U16 : TYPE_U32;
 
        lod = create_immed(b, 0);
-       sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
+       sam = ir3_SAM(b, OPC_GETSIZE, dst_type, 0b1111, flags,
                        samp_tex, lod, NULL);
 
        /* Array size actually ends up in .w rather than .z. This doesn't
@@ -1125,7 +1155,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        struct ir3_instruction *barrier;
 
        switch (intr->intrinsic) {
-       case nir_intrinsic_barrier:
+       case nir_intrinsic_control_barrier:
                barrier = ir3_BAR(b);
                barrier->cat7.g = true;
                barrier->cat7.l = true;
@@ -1201,27 +1231,27 @@ static void add_sysval_input_compmask(struct ir3_context *ctx,
                struct ir3_instruction *instr)
 {
        struct ir3_shader_variant *so = ctx->so;
-       unsigned r = regid(so->inputs_count, 0);
        unsigned n = so->inputs_count++;
 
        assert(instr->opc == OPC_META_INPUT);
+       instr->input.inidx = n;
        instr->input.sysval = slot;
 
        so->inputs[n].sysval = true;
        so->inputs[n].slot = slot;
        so->inputs[n].compmask = compmask;
-       so->inputs[n].regid = r;
        so->inputs[n].interpolate = INTERP_MODE_FLAT;
        so->total_in++;
-
-       ctx->ir->ninputs = MAX2(ctx->ir->ninputs, r + 1);
-       ctx->ir->inputs[r] = instr;
 }
 
-static void add_sysval_input(struct ir3_context *ctx, gl_system_value slot,
-               struct ir3_instruction *instr)
+static struct ir3_instruction *
+create_sysval_input(struct ir3_context *ctx, gl_system_value slot,
+               unsigned compmask)
 {
-       add_sysval_input_compmask(ctx, slot, 0x1, instr);
+       assert(compmask);
+       struct ir3_instruction *sysval = create_input(ctx, compmask);
+       add_sysval_input_compmask(ctx, slot, compmask, sysval);
+       return sysval;
 }
 
 static struct ir3_instruction *
@@ -1231,14 +1261,10 @@ get_barycentric_centroid(struct ir3_context *ctx)
                struct ir3_instruction *xy[2];
                struct ir3_instruction *ij;
 
-               ij = create_input_compmask(ctx, 0, 0x3);
+               ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_CENTROID, 0x3);
                ir3_split_dest(ctx->block, xy, ij, 0, 2);
 
                ctx->ij_centroid = ir3_create_collect(ctx, xy, 2);
-
-               add_sysval_input_compmask(ctx,
-                               SYSTEM_VALUE_BARYCENTRIC_CENTROID,
-                               0x3, ij);
        }
 
        return ctx->ij_centroid;
@@ -1251,14 +1277,10 @@ get_barycentric_sample(struct ir3_context *ctx)
                struct ir3_instruction *xy[2];
                struct ir3_instruction *ij;
 
-               ij = create_input_compmask(ctx, 0, 0x3);
+               ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SAMPLE, 0x3);
                ir3_split_dest(ctx->block, xy, ij, 0, 2);
 
                ctx->ij_sample = ir3_create_collect(ctx, xy, 2);
-
-               add_sysval_input_compmask(ctx,
-                               SYSTEM_VALUE_BARYCENTRIC_SAMPLE,
-                               0x3, ij);
        }
 
        return ctx->ij_sample;
@@ -1281,7 +1303,7 @@ get_frag_coord(struct ir3_context *ctx)
                struct ir3_instruction *xyzw[4];
                struct ir3_instruction *hw_frag_coord;
 
-               hw_frag_coord = create_input_compmask(ctx, 0, 0xf);
+               hw_frag_coord = create_sysval_input(ctx, SYSTEM_VALUE_FRAG_COORD, 0xf);
                ir3_split_dest(ctx->block, xyzw, hw_frag_coord, 0, 4);
 
                /* for frag_coord.xy, we get unsigned values.. we need
@@ -1302,11 +1324,6 @@ get_frag_coord(struct ir3_context *ctx)
                }
 
                ctx->frag_coord = ir3_create_collect(ctx, xyzw, 4);
-
-               add_sysval_input_compmask(ctx,
-                               SYSTEM_VALUE_FRAG_COORD,
-                               0xf, hw_frag_coord);
-
                ctx->so->frag_coord = true;
        }
 
@@ -1396,9 +1413,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 
        case nir_intrinsic_load_tess_coord:
                if (!ctx->tess_coord) {
-                       ctx->tess_coord = create_input_compmask(ctx, 0, 0x3);
-                       add_sysval_input_compmask(ctx, SYSTEM_VALUE_TESS_COORD,
-                                                                         0x3, ctx->tess_coord);
+                       ctx->tess_coord =
+                               create_sysval_input(ctx, SYSTEM_VALUE_TESS_COORD, 0x3);
                }
                ir3_split_dest(b, dst, ctx->tess_coord, 0, 2);
 
@@ -1485,10 +1501,8 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        }
        case nir_intrinsic_load_size_ir3:
                if (!ctx->ij_size) {
-                       ctx->ij_size = create_input(ctx, 0);
-
-                       add_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE,
-                                       ctx->ij_size);
+                       ctx->ij_size =
+                               create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_SIZE, 0x1);
                }
                dst[0] = ctx->ij_size;
                break;
@@ -1522,7 +1536,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                                         * that is easier than mapping things back to a
                                         * nir_variable to figure out what it is.
                                         */
-                                       dst[i] = ctx->ir->inputs[inloc];
+                                       dst[i] = ctx->inputs[inloc];
                                }
                        }
                } else {
@@ -1536,17 +1550,17 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                        idx += nir_src_as_uint(intr->src[0]);
                        for (int i = 0; i < intr->num_components; i++) {
                                unsigned n = idx * 4 + i + comp;
-                               dst[i] = ctx->ir->inputs[n];
-                               compile_assert(ctx, ctx->ir->inputs[n]);
+                               dst[i] = ctx->inputs[n];
+                               compile_assert(ctx, ctx->inputs[n]);
                        }
                } else {
                        src = ir3_get_src(ctx, &intr->src[0]);
                        struct ir3_instruction *collect =
-                                       ir3_create_collect(ctx, ctx->ir->inputs, ctx->ir->ninputs);
+                                       ir3_create_collect(ctx, ctx->ir->inputs, ctx->ninputs);
                        struct ir3_instruction *addr = ir3_get_addr(ctx, src[0], 4);
                        for (int i = 0; i < intr->num_components; i++) {
                                unsigned n = idx * 4 + i + comp;
-                               dst[i] = create_indirect_load(ctx, ctx->ir->ninputs,
+                               dst[i] = create_indirect_load(ctx, ctx->ninputs,
                                                n, addr, collect);
                        }
                }
@@ -1627,7 +1641,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                        ctx->so->no_earlyz = true;
                dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
                break;
-       case nir_intrinsic_barrier:
+       case nir_intrinsic_control_barrier:
        case nir_intrinsic_memory_barrier:
        case nir_intrinsic_group_memory_barrier:
        case nir_intrinsic_memory_barrier_atomic_counter:
@@ -1647,32 +1661,34 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                src = ir3_get_src(ctx, &intr->src[0]);
                for (int i = 0; i < intr->num_components; i++) {
                        unsigned n = idx * 4 + i + comp;
-                       ctx->ir->outputs[n] = src[i];
+                       ctx->outputs[n] = src[i];
                }
                break;
        case nir_intrinsic_load_base_vertex:
        case nir_intrinsic_load_first_vertex:
                if (!ctx->basevertex) {
                        ctx->basevertex = create_driver_param(ctx, IR3_DP_VTXID_BASE);
-                       add_sysval_input(ctx, SYSTEM_VALUE_FIRST_VERTEX, ctx->basevertex);
                }
                dst[0] = ctx->basevertex;
                break;
+       case nir_intrinsic_load_base_instance:
+               if (!ctx->base_instance) {
+                       ctx->base_instance = create_driver_param(ctx, IR3_DP_INSTID_BASE);
+               }
+               dst[0] = ctx->base_instance;
+               break;
        case nir_intrinsic_load_vertex_id_zero_base:
        case nir_intrinsic_load_vertex_id:
                if (!ctx->vertex_id) {
                        gl_system_value sv = (intr->intrinsic == nir_intrinsic_load_vertex_id) ?
                                SYSTEM_VALUE_VERTEX_ID : SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
-                       ctx->vertex_id = create_input(ctx, 0);
-                       add_sysval_input(ctx, sv, ctx->vertex_id);
+                       ctx->vertex_id = create_sysval_input(ctx, sv, 0x1);
                }
                dst[0] = ctx->vertex_id;
                break;
        case nir_intrinsic_load_instance_id:
                if (!ctx->instance_id) {
-                       ctx->instance_id = create_input(ctx, 0);
-                       add_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID,
-                                       ctx->instance_id);
+                       ctx->instance_id = create_sysval_input(ctx, SYSTEM_VALUE_INSTANCE_ID, 0x1);
                }
                dst[0] = ctx->instance_id;
                break;
@@ -1681,18 +1697,14 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                /* fall-thru */
        case nir_intrinsic_load_sample_id_no_per_sample:
                if (!ctx->samp_id) {
-                       ctx->samp_id = create_input(ctx, 0);
+                       ctx->samp_id = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID, 0x1);
                        ctx->samp_id->regs[0]->flags |= IR3_REG_HALF;
-                       add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_ID,
-                                       ctx->samp_id);
                }
                dst[0] = ir3_COV(b, ctx->samp_id, TYPE_U16, TYPE_U32);
                break;
        case nir_intrinsic_load_sample_mask_in:
                if (!ctx->samp_mask_in) {
-                       ctx->samp_mask_in = create_input(ctx, 0);
-                       add_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN,
-                                       ctx->samp_mask_in);
+                       ctx->samp_mask_in = create_sysval_input(ctx, SYSTEM_VALUE_SAMPLE_MASK_IN, 0x1);
                }
                dst[0] = ctx->samp_mask_in;
                break;
@@ -1706,8 +1718,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        case nir_intrinsic_load_front_face:
                if (!ctx->frag_face) {
                        ctx->so->frag_face = true;
-                       ctx->frag_face = create_input(ctx, 0);
-                       add_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, ctx->frag_face);
+                       ctx->frag_face = create_sysval_input(ctx, SYSTEM_VALUE_FRONT_FACE, 0x1);
                        ctx->frag_face->regs[0]->flags |= IR3_REG_HALF;
                }
                /* for fragface, we get -1 for back and 0 for front. However this is
@@ -1718,17 +1729,15 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                break;
        case nir_intrinsic_load_local_invocation_id:
                if (!ctx->local_invocation_id) {
-                       ctx->local_invocation_id = create_input_compmask(ctx, 0, 0x7);
-                       add_sysval_input_compmask(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID,
-                                       0x7, ctx->local_invocation_id);
+                       ctx->local_invocation_id =
+                               create_sysval_input(ctx, SYSTEM_VALUE_LOCAL_INVOCATION_ID, 0x7);
                }
                ir3_split_dest(b, dst, ctx->local_invocation_id, 0, 3);
                break;
        case nir_intrinsic_load_work_group_id:
                if (!ctx->work_group_id) {
-                       ctx->work_group_id = create_input_compmask(ctx, 0, 0x7);
-                       add_sysval_input_compmask(ctx, SYSTEM_VALUE_WORK_GROUP_ID,
-                                       0x7, ctx->work_group_id);
+                       ctx->work_group_id =
+                               create_sysval_input(ctx, SYSTEM_VALUE_WORK_GROUP_ID, 0x7);
                        ctx->work_group_id->regs[0]->flags |= IR3_REG_HIGH;
                }
                ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
@@ -1762,6 +1771,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 
                /* condition always goes in predicate register: */
                cond->regs[0]->num = regid(REG_P0, 0);
+               cond->regs[0]->flags &= ~IR3_REG_SSA;
 
                kill = ir3_KILL(b, cond, 0);
                array_insert(ctx->ir, ctx->ir->predicates, kill);
@@ -1849,6 +1859,30 @@ emit_undef(struct ir3_context *ctx, nir_ssa_undef_instr *undef)
  * texture fetch/sample instructions:
  */
 
+static type_t
+get_tex_dest_type(nir_tex_instr *tex)
+{
+       type_t type;
+
+       switch (nir_alu_type_get_base_type(tex->dest_type)) {
+       case nir_type_invalid:
+       case nir_type_float:
+               type = nir_dest_bit_size(tex->dest) < 32 ? TYPE_F16 : TYPE_F32;
+               break;
+       case nir_type_int:
+               type = nir_dest_bit_size(tex->dest) < 32 ? TYPE_S16 : TYPE_S32;
+               break;
+       case nir_type_uint:
+       case nir_type_bool:
+               type = nir_dest_bit_size(tex->dest) < 32 ? TYPE_U16 : TYPE_U32;
+               break;
+       default:
+               unreachable("bad dest_type");
+       }
+
+       return type;
+}
+
 static void
 tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
 {
@@ -1867,6 +1901,8 @@ tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
        case GLSL_SAMPLER_DIM_RECT:
        case GLSL_SAMPLER_DIM_EXTERNAL:
        case GLSL_SAMPLER_DIM_MS:
+       case GLSL_SAMPLER_DIM_SUBPASS:
+       case GLSL_SAMPLER_DIM_SUBPASS_MS:
                coords = 2;
                break;
        case GLSL_SAMPLER_DIM_3D:
@@ -2145,21 +2181,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                        src1[nsrc1++] = lod;
        }
 
-       switch (tex->dest_type) {
-       case nir_type_invalid:
-       case nir_type_float:
-               type = TYPE_F32;
-               break;
-       case nir_type_int:
-               type = TYPE_S32;
-               break;
-       case nir_type_uint:
-       case nir_type_bool:
-               type = TYPE_U32;
-               break;
-       default:
-               unreachable("bad dest_type");
-       }
+       type = get_tex_dest_type(tex);
 
        if (opc == OPC_GETLOD)
                type = TYPE_S32;
@@ -2191,7 +2213,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
                compile_assert(ctx, tex->src[idx].src.is_ssa);
 
                sam = ir3_META_TEX_PREFETCH(b);
-               ir3_reg_create(sam, 0, 0)->wrmask = MASK(ncomp);   /* dst */
+               __ssa_dst(sam)->wrmask = MASK(ncomp);   /* dst */
                sam->prefetch.input_offset =
                                ir3_nir_coord_offset(tex->src[idx].src.ssa);
                sam->prefetch.tex  = tex->texture_index;
@@ -2242,10 +2264,11 @@ emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx)
 {
        struct ir3_block *b = ctx->block;
        struct ir3_instruction **dst, *sam;
+       type_t dst_type = get_tex_dest_type(tex);
 
        dst = ir3_get_dst(ctx, &tex->dest, 1);
 
-       sam = ir3_SAM(b, OPC_GETINFO, TYPE_U32, 1 << idx, 0,
+       sam = ir3_SAM(b, OPC_GETINFO, dst_type, 1 << idx, 0,
                        get_tex_samp_tex_src(ctx, tex), NULL, NULL);
 
        /* even though there is only one component, since it ends
@@ -2270,6 +2293,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
        struct ir3_instruction **dst, *sam;
        struct ir3_instruction *lod;
        unsigned flags, coords;
+       type_t dst_type = get_tex_dest_type(tex);
 
        tex_info(tex, &flags, &coords);
 
@@ -2286,7 +2310,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
 
        lod = ir3_get_src(ctx, &tex->src[0].src)[0];
 
-       sam = ir3_SAM(b, OPC_GETSIZE, TYPE_U32, 0b1111, flags,
+       sam = ir3_SAM(b, OPC_GETSIZE, dst_type, 0b1111, flags,
                        get_tex_samp_tex_src(ctx, tex), lod, NULL);
 
        ir3_split_dest(b, dst, sam, 0, 4);
@@ -2522,9 +2546,7 @@ emit_stream_out(struct ir3_context *ctx)
         * so that it is seen as live over the entire duration
         * of the shader:
         */
-       vtxcnt = create_input(ctx, 0);
-       add_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, vtxcnt);
-
+       vtxcnt = create_sysval_input(ctx, SYSTEM_VALUE_VERTEX_CNT, 0x1);
        maxvtxcnt = create_driver_param(ctx, IR3_DP_VTXCNT_MAX);
 
        /* at this point, we are at the original 'end' block,
@@ -2550,6 +2572,7 @@ emit_stream_out(struct ir3_context *ctx)
        /* setup 'if (vtxcnt < maxvtxcnt)' condition: */
        cond = ir3_CMPS_S(ctx->block, vtxcnt, 0, maxvtxcnt, 0);
        cond->regs[0]->num = regid(REG_P0, 0);
+       cond->regs[0]->flags &= ~IR3_REG_SSA;
        cond->cat2.condition = IR3_COND_LT;
 
        /* condition goes on previous block to the conditional,
@@ -2741,15 +2764,27 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                                instr = create_frag_input(ctx, so->inputs[n].use_ldlv, idx);
                        }
 
-                       compile_assert(ctx, idx < ctx->ir->ninputs);
+                       compile_assert(ctx, idx < ctx->ninputs);
 
-                       ctx->ir->inputs[idx] = instr;
+                       ctx->inputs[idx] = instr;
                }
        } else if (ctx->so->type == MESA_SHADER_VERTEX) {
+               /* We shouldn't have fractional input for VS input.. that only shows
+                * up with varying packing
+                */
+               assert(frac == 0);
+
+               struct ir3_instruction *input = create_input(ctx, (1 << ncomp) - 1);
+               struct ir3_instruction *components[ncomp];
+
+               input->input.inidx = n;
+
+               ir3_split_dest(ctx->block, components, input, 0, ncomp);
+
                for (int i = 0; i < ncomp; i++) {
                        unsigned idx = (n * 4) + i + frac;
-                       compile_assert(ctx, idx < ctx->ir->ninputs);
-                       ctx->ir->inputs[idx] = create_input(ctx, idx);
+                       compile_assert(ctx, idx < ctx->ninputs);
+                       ctx->inputs[idx] = components[i];
                }
        } else {
                ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
@@ -2777,8 +2812,8 @@ pack_inlocs(struct ir3_context *ctx)
         * First Step: scan shader to find which bary.f/ldlv remain:
         */
 
-       list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
-               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+       foreach_block (block, &ctx->ir->block_list) {
+               foreach_instr (instr, &block->instr_list) {
                        if (is_input(instr)) {
                                unsigned inloc = instr->regs[1]->iim_val;
                                unsigned i = inloc / 4;
@@ -2841,14 +2876,18 @@ pack_inlocs(struct ir3_context *ctx)
         * Third Step: reassign packed inloc's:
         */
 
-       list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
-               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+       foreach_block (block, &ctx->ir->block_list) {
+               foreach_instr (instr, &block->instr_list) {
                        if (is_input(instr)) {
                                unsigned inloc = instr->regs[1]->iim_val;
                                unsigned i = inloc / 4;
                                unsigned j = inloc % 4;
 
                                instr->regs[1]->iim_val = so->inputs[i].inloc + j;
+                       } else if (instr->opc == OPC_META_TEX_PREFETCH) {
+                               unsigned i = instr->prefetch.input_offset / 4;
+                               unsigned j = instr->prefetch.input_offset % 4;
+                               instr->prefetch.input_offset = so->inputs[i].inloc + j;
                        }
                }
        }
@@ -2883,6 +2922,7 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
                                        gl_frag_result_name(slot));
                }
        } else if (ctx->so->type == MESA_SHADER_VERTEX ||
+                       ctx->so->type == MESA_SHADER_TESS_EVAL ||
                        ctx->so->type == MESA_SHADER_GEOMETRY) {
                switch (slot) {
                case VARYING_SLOT_POS:
@@ -2914,6 +2954,9 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
                                        _mesa_shader_stage_to_string(ctx->so->type),
                                        gl_varying_slot_name(slot));
                }
+       } else if (ctx->so->type == MESA_SHADER_TESS_CTRL) {
+               /* output lowered to buffer writes. */
+               return;
        } else {
                ir3_context_error(ctx, "unknown shader type: %d\n", ctx->so->type);
        }
@@ -2926,8 +2969,8 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
 
        for (int i = 0; i < ncomp; i++) {
                unsigned idx = (n * 4) + i + frac;
-               compile_assert(ctx, idx < ctx->ir->noutputs);
-               ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
+               compile_assert(ctx, idx < ctx->noutputs);
+               ctx->outputs[idx] = create_immed(ctx->block, fui(0.0));
        }
 
        /* if varying packing doesn't happen, we could end up in a situation
@@ -2940,8 +2983,8 @@ setup_output(struct ir3_context *ctx, nir_variable *out)
         */
        for (int i = 0; i < frac; i++) {
                unsigned idx = (n * 4) + i;
-               if (!ctx->ir->outputs[idx]) {
-                       ctx->ir->outputs[idx] = create_immed(ctx->block, fui(0.0));
+               if (!ctx->outputs[idx]) {
+                       ctx->outputs[idx] = create_immed(ctx->block, fui(0.0));
                }
        }
 }
@@ -2956,46 +2999,24 @@ max_drvloc(struct exec_list *vars)
        return drvloc;
 }
 
-static const unsigned max_sysvals[] = {
-       [MESA_SHADER_VERTEX]  = 16,
-       [MESA_SHADER_GEOMETRY] = 16,
-       [MESA_SHADER_FRAGMENT] = 24,  // TODO
-       [MESA_SHADER_COMPUTE] = 16, // TODO how many do we actually need?
-       [MESA_SHADER_KERNEL]  = 16, // TODO how many do we actually need?
-};
-
 static void
 emit_instructions(struct ir3_context *ctx)
 {
-       unsigned ninputs, noutputs;
        nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
 
-       ninputs  = (max_drvloc(&ctx->s->inputs) + 1) * 4;
-       noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
+       ctx->ninputs  = (max_drvloc(&ctx->s->inputs) + 1) * 4;
+       ctx->noutputs = (max_drvloc(&ctx->s->outputs) + 1) * 4;
 
-       /* we need to leave room for sysvals:
-        */
-       ninputs += max_sysvals[ctx->so->type];
-       if (ctx->so->type == MESA_SHADER_VERTEX)
-               noutputs += 8; /* gs or tess header + primitive_id */
+       ctx->inputs  = rzalloc_array(ctx, struct ir3_instruction *, ctx->ninputs);
+       ctx->outputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->noutputs);
 
-       ctx->ir = ir3_create(ctx->compiler, ctx->so->type, ninputs, noutputs);
+       ctx->ir = ir3_create(ctx->compiler, ctx->so->type);
 
        /* Create inputs in first block: */
        ctx->block = get_block(ctx, nir_start_block(fxn));
        ctx->in_block = ctx->block;
        list_addtail(&ctx->block->node, &ctx->ir->block_list);
 
-       ninputs -= max_sysvals[ctx->so->type];
-
-       if (ctx->so->key.has_gs) {
-               if (ctx->so->type == MESA_SHADER_VERTEX ||
-                       ctx->so->type == MESA_SHADER_GEOMETRY) {
-                       ctx->gs_header = create_input(ctx, 0);
-                       ctx->primitive_id = create_input(ctx, 0);
-               }
-       }
-
        /* for fragment shader, the vcoord input register is used as the
         * base for bary.f varying fetch instrs:
         *
@@ -3009,7 +3030,7 @@ emit_instructions(struct ir3_context *ctx)
        if (ctx->so->type == MESA_SHADER_FRAGMENT) {
                struct ir3_instruction *xy[2];
 
-               vcoord = create_input_compmask(ctx, 0, 0x3);
+               vcoord = create_input(ctx, 0x3);
                ir3_split_dest(ctx->block, xy, vcoord, 0, 2);
 
                ctx->ij_pixel = ir3_create_collect(ctx, xy, 2);
@@ -3028,40 +3049,46 @@ emit_instructions(struct ir3_context *ctx)
                                0x3, vcoord);
        }
 
-       if (ctx->primitive_id)
-               add_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, ctx->primitive_id);
-       if (ctx->gs_header)
-               add_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, ctx->gs_header);
+
+       /* Tesselation shaders always need primitive ID for indexing the
+        * BO. Geometry shaders don't always need it but when they do it has be
+        * delivered and unclobbered in the VS. To make things easy, we always
+        * make room for it in VS/DS.
+        */
+       bool has_tess = ctx->so->key.tessellation != IR3_TESS_NONE;
+       bool has_gs = ctx->so->key.has_gs;
+       switch (ctx->so->type) {
+       case MESA_SHADER_VERTEX:
+               if (has_tess) {
+                       ctx->tcs_header = create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1);
+                       ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
+               } else if (has_gs) {
+                       ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1);
+                       ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
+               }
+               break;
+       case MESA_SHADER_TESS_CTRL:
+               ctx->tcs_header = create_sysval_input(ctx, SYSTEM_VALUE_TCS_HEADER_IR3, 0x1);
+               ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
+               break;
+       case MESA_SHADER_TESS_EVAL:
+               if (has_gs)
+                       ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1);
+               ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
+               break;
+       case MESA_SHADER_GEOMETRY:
+               ctx->gs_header = create_sysval_input(ctx, SYSTEM_VALUE_GS_HEADER_IR3, 0x1);
+               ctx->primitive_id = create_sysval_input(ctx, SYSTEM_VALUE_PRIMITIVE_ID, 0x1);
+               break;
+       default:
+               break;
+       }
 
        /* Setup outputs: */
        nir_foreach_variable(var, &ctx->s->outputs) {
                setup_output(ctx, var);
        }
 
-       /* Set up the gs header as an output for the vertex shader so it won't
-        * clobber it for the tess ctrl shader. */
-       if (ctx->so->type == MESA_SHADER_VERTEX) {
-               struct ir3_shader_variant *so = ctx->so;
-               if (ctx->primitive_id) {
-                       unsigned n = so->outputs_count++;
-                       so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID;
-                       so->outputs[n].regid = regid(n, 0);
-                       ctx->ir->outputs[n * 4] = ctx->primitive_id;
-
-                       compile_assert(ctx, n * 4 < ctx->ir->noutputs);
-               }
-
-               if (ctx->gs_header) {
-                       unsigned n = so->outputs_count++;
-                       so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3;
-                       so->outputs[n].regid = regid(n, 0);
-                       ctx->ir->outputs[n * 4] = ctx->gs_header;
-
-                       compile_assert(ctx, n * 4 < ctx->ir->noutputs);
-               }
-
-       }
-
        /* Find # of samplers: */
        nir_foreach_variable(var, &ctx->s->uniforms) {
                ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
@@ -3081,28 +3108,6 @@ emit_instructions(struct ir3_context *ctx)
        emit_function(ctx, fxn);
 }
 
-/* from NIR perspective, we actually have varying inputs.  But the varying
- * inputs, from an IR standpoint, are just bary.f/ldlv instructions.  The
- * only actual inputs are the sysvals.
- */
-static void
-fixup_frag_inputs(struct ir3_context *ctx)
-{
-       struct ir3_shader_variant *so = ctx->so;
-       struct ir3 *ir = ctx->ir;
-       unsigned i = 0;
-
-       /* sysvals should appear at the end of the inputs, drop everything else: */
-       while ((i < so->inputs_count) && !so->inputs[i].sysval)
-               i++;
-
-       /* at IR level, inputs are always blocks of 4 scalars: */
-       i *= 4;
-
-       ir->inputs = &ir->inputs[i];
-       ir->ninputs -= i;
-}
-
 /* Fixup tex sampler state for astc/srgb workaround instructions.  We
  * need to assign the tex state indexes for these after we know the
  * max tex index.
@@ -3144,23 +3149,44 @@ fixup_binning_pass(struct ir3_context *ctx)
        struct ir3 *ir = ctx->ir;
        unsigned i, j;
 
+       /* first pass, remove unused outputs from the IR level outputs: */
+       for (i = 0, j = 0; i < ir->outputs_count; i++) {
+               struct ir3_instruction *out = ir->outputs[i];
+               assert(out->opc == OPC_META_COLLECT);
+               unsigned outidx = out->collect.outidx;
+               unsigned slot = so->outputs[outidx].slot;
+
+               /* throw away everything but first position/psize */
+               if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
+                       ir->outputs[j] = ir->outputs[i];
+                       j++;
+               }
+       }
+       ir->outputs_count = j;
+
+       /* second pass, cleanup the unused slots in ir3_shader_variant::outputs
+        * table:
+        */
        for (i = 0, j = 0; i < so->outputs_count; i++) {
                unsigned slot = so->outputs[i].slot;
 
                /* throw away everything but first position/psize */
                if ((slot == VARYING_SLOT_POS) || (slot == VARYING_SLOT_PSIZ)) {
-                       if (i != j) {
-                               so->outputs[j] = so->outputs[i];
-                               ir->outputs[(j*4)+0] = ir->outputs[(i*4)+0];
-                               ir->outputs[(j*4)+1] = ir->outputs[(i*4)+1];
-                               ir->outputs[(j*4)+2] = ir->outputs[(i*4)+2];
-                               ir->outputs[(j*4)+3] = ir->outputs[(i*4)+3];
+                       so->outputs[j] = so->outputs[i];
+
+                       /* fixup outidx to point to new output table entry: */
+                       struct ir3_instruction *out;
+                       foreach_output(out, ir) {
+                               if (out->collect.outidx == i) {
+                                       out->collect.outidx = j;
+                                       break;
+                               }
                        }
+
                        j++;
                }
        }
        so->outputs_count = j;
-       ir->noutputs = j * 4;
 }
 
 static void
@@ -3169,9 +3195,8 @@ collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir)
        unsigned idx = 0;
 
        /* Collect sampling instructions eligible for pre-dispatch. */
-       list_for_each_entry(struct ir3_block, block, &ir->block_list, node) {
-               list_for_each_entry_safe(struct ir3_instruction, instr,
-                               &block->instr_list, node) {
+       foreach_block (block, &ir->block_list) {
+               foreach_instr_safe (instr, &block->instr_list) {
                        if (instr->opc == OPC_META_TEX_PREFETCH) {
                                assert(idx < ARRAY_SIZE(ctx->so->sampler_prefetch));
                                struct ir3_sampler_prefetch *fetch =
@@ -3189,7 +3214,7 @@ collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir)
                                        MAX2(ctx->so->total_in, instr->prefetch.input_offset + 2);
 
                                /* Disable half precision until supported. */
-                               fetch->half_precision = 0x0;
+                               fetch->half_precision = !!(instr->regs[0]->flags & IR3_REG_HALF);
 
                                /* Remove the prefetch placeholder instruction: */
                                list_delinit(&instr->node);
@@ -3204,8 +3229,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
 {
        struct ir3_context *ctx;
        struct ir3 *ir;
-       struct ir3_instruction **inputs;
-       unsigned i;
        int ret = 0, max_bary;
 
        assert(!so->ir);
@@ -3227,52 +3250,88 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
 
        ir = so->ir = ctx->ir;
 
-       /* keep track of the inputs from TGSI perspective.. */
-       inputs = ir->inputs;
+       assert((ctx->noutputs % 4) == 0);
 
-       /* but fixup actual inputs for frag shader: */
-       if (so->type == MESA_SHADER_FRAGMENT)
-               fixup_frag_inputs(ctx);
+       /* Setup IR level outputs, which are "collects" that gather
+        * the scalar components of outputs.
+        */
+       for (unsigned i = 0; i < ctx->noutputs; i += 4) {
+               unsigned ncomp = 0;
+               /* figure out the # of components written:
+                *
+                * TODO do we need to handle holes, ie. if .x and .z
+                * components written, but .y component not written?
+                */
+               for (unsigned j = 0; j < 4; j++) {
+                       if (!ctx->outputs[i + j])
+                               break;
+                       ncomp++;
+               }
 
-       /* at this point, for binning pass, throw away unneeded outputs: */
-       if (so->binning_pass && (ctx->compiler->gpu_id < 600))
-               fixup_binning_pass(ctx);
+               /* Note that in some stages, like TCS, store_output is
+                * lowered to memory writes, so no components of the
+                * are "written" from the PoV of traditional store-
+                * output instructions:
+                */
+               if (!ncomp)
+                       continue;
 
-       /* if we want half-precision outputs, mark the output registers
-        * as half:
-        */
-       if (so->key.half_precision) {
-               for (i = 0; i < ir->noutputs; i++) {
-                       struct ir3_instruction *out = ir->outputs[i];
+               struct ir3_instruction *out =
+                       ir3_create_collect(ctx, &ctx->outputs[i], ncomp);
 
-                       if (!out)
-                               continue;
+               int outidx = i / 4;
+               assert(outidx < so->outputs_count);
 
-                       /* if frag shader writes z, that needs to be full precision: */
-                       if (so->outputs[i/4].slot == FRAG_RESULT_DEPTH)
-                               continue;
+               /* stash index into so->outputs[] so we can map the
+                * output back to slot/etc later:
+                */
+               out->collect.outidx = outidx;
 
-                       out->regs[0]->flags |= IR3_REG_HALF;
-                       /* output could be a fanout (ie. texture fetch output)
-                        * in which case we need to propagate the half-reg flag
-                        * up to the definer so that RA sees it:
-                        */
-                       if (out->opc == OPC_META_FO) {
-                               out = out->regs[1]->instr;
-                               out->regs[0]->flags |= IR3_REG_HALF;
-                       }
+               array_insert(ir, ir->outputs, out);
+       }
 
-                       if (out->opc == OPC_MOV) {
-                               out->cat1.dst_type = half_type(out->cat1.dst_type);
-                       }
+       /* Set up the gs header as an output for the vertex shader so it won't
+        * clobber it for the tess ctrl shader.
+        *
+        * TODO this could probably be done more cleanly in a nir pass.
+        */
+       if (ctx->so->type == MESA_SHADER_VERTEX ||
+                       (ctx->so->key.has_gs && ctx->so->type == MESA_SHADER_TESS_EVAL)) {
+               if (ctx->primitive_id) {
+                       unsigned n = so->outputs_count++;
+                       so->outputs[n].slot = VARYING_SLOT_PRIMITIVE_ID;
+
+                       struct ir3_instruction *out =
+                               ir3_create_collect(ctx, &ctx->primitive_id, 1);
+                       out->collect.outidx = n;
+                       array_insert(ir, ir->outputs, out);
+               }
+
+               if (ctx->gs_header) {
+                       unsigned n = so->outputs_count++;
+                       so->outputs[n].slot = VARYING_SLOT_GS_HEADER_IR3;
+                       struct ir3_instruction *out =
+                               ir3_create_collect(ctx, &ctx->gs_header, 1);
+                       out->collect.outidx = n;
+                       array_insert(ir, ir->outputs, out);
                }
-       }
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("BEFORE CP:\n");
-               ir3_print(ir);
+               if (ctx->tcs_header) {
+                       unsigned n = so->outputs_count++;
+                       so->outputs[n].slot = VARYING_SLOT_TCS_HEADER_IR3;
+                       struct ir3_instruction *out =
+                               ir3_create_collect(ctx, &ctx->tcs_header, 1);
+                       out->collect.outidx = n;
+                       array_insert(ir, ir->outputs, out);
+               }
        }
 
+       /* at this point, for binning pass, throw away unneeded outputs: */
+       if (so->binning_pass && (ctx->compiler->gpu_id < 600))
+               fixup_binning_pass(ctx);
+
+       ir3_debug_print(ir, "BEFORE CP");
+
        ir3_cp(ir, so);
 
        /* at this point, for binning pass, throw away unneeded outputs:
@@ -3287,10 +3346,10 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
         * need to make sure not to remove any inputs that are used by
         * the nonbinning VS.
         */
-       if (ctx->compiler->gpu_id >= 600 && so->binning_pass) {
-               debug_assert(so->type == MESA_SHADER_VERTEX);
-               for (int i = 0; i < ir->ninputs; i++) {
-                       struct ir3_instruction *in = ir->inputs[i];
+       if (ctx->compiler->gpu_id >= 600 && so->binning_pass &&
+                       so->type == MESA_SHADER_VERTEX) {
+               for (int i = 0; i < ctx->ninputs; i++) {
+                       struct ir3_instruction *in = ctx->inputs[i];
 
                        if (!in)
                                continue;
@@ -3309,24 +3368,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                }
        }
 
-       /* Insert mov if there's same instruction for each output.
-        * eg. dEQP-GLES31.functional.shaders.opaque_type_indexing.sampler.const_expression.vertex.sampler2dshadow
-        */
-       for (int i = ir->noutputs - 1; i >= 0; i--) {
-               if (!ir->outputs[i])
-                       continue;
-               for (unsigned j = 0; j < i; j++) {
-                       if (ir->outputs[i] == ir->outputs[j]) {
-                               ir->outputs[i] =
-                                       ir3_MOV(ir->outputs[i]->block, ir->outputs[i], TYPE_F32);
-                       }
-               }
-       }
-
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("BEFORE GROUPING:\n");
-               ir3_print(ir);
-       }
+       ir3_debug_print(ir, "BEFORE GROUPING");
 
        ir3_sched_add_deps(ir);
 
@@ -3335,17 +3377,11 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
         */
        ir3_group(ir);
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER GROUPING:\n");
-               ir3_print(ir);
-       }
+       ir3_debug_print(ir, "AFTER GROUPING");
 
        ir3_depth(ir, so);
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER DEPTH:\n");
-               ir3_print(ir);
-       }
+       ir3_debug_print(ir, "AFTER DEPTH");
 
        /* do Sethi–Ullman numbering before scheduling: */
        ir3_sun(ir);
@@ -3360,10 +3396,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ir3_a6xx_fixup_atomic_dests(ir, so);
        }
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER SCHED:\n");
-               ir3_print(ir);
-       }
+       ir3_debug_print(ir, "AFTER SCHED");
 
        /* Pre-assign VS inputs on a6xx+ binning pass shader, to align
         * with draw pass VS, so binning and draw pass can both use the
@@ -3376,8 +3409,8 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                        so->binning_pass;
 
        if (pre_assign_inputs) {
-               for (unsigned i = 0; i < ir->ninputs; i++) {
-                       struct ir3_instruction *instr = ir->inputs[i];
+               for (unsigned i = 0; i < ctx->ninputs; i++) {
+                       struct ir3_instruction *instr = ctx->inputs[i];
 
                        if (!instr)
                                continue;
@@ -3389,27 +3422,32 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                        instr->regs[0]->num = regid;
                }
 
-               ret = ir3_ra(so, ir->inputs, ir->ninputs);
+               ret = ir3_ra(so, ctx->inputs, ctx->ninputs);
+       } else if (ctx->tcs_header) {
+               /* We need to have these values in the same registers between VS and TCS
+                * since the VS chains to TCS and doesn't get the sysvals redelivered.
+                */
+
+               ctx->tcs_header->regs[0]->num = regid(0, 0);
+               ctx->primitive_id->regs[0]->num = regid(0, 1);
+               struct ir3_instruction *precolor[] = { ctx->tcs_header, ctx->primitive_id };
+               ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor));
        } else if (ctx->gs_header) {
-               /* We need to have these values in the same registers between VS and GS
-                * since the VS chains to GS and doesn't get the sysvals redelivered.
+               /* We need to have these values in the same registers between producer
+                * (VS or DS) and GS since the producer chains to GS and doesn't get
+                * the sysvals redelivered.
                 */
 
-               ctx->gs_header->regs[0]->num = 0;
-               ctx->primitive_id->regs[0]->num = 1;
+               ctx->gs_header->regs[0]->num = regid(0, 0);
+               ctx->primitive_id->regs[0]->num = regid(0, 1);
                struct ir3_instruction *precolor[] = { ctx->gs_header, ctx->primitive_id };
                ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor));
        } else if (so->num_sampler_prefetch) {
                assert(so->type == MESA_SHADER_FRAGMENT);
-               struct ir3_instruction *precolor[2];
+               struct ir3_instruction *instr, *precolor[2];
                int idx = 0;
 
-               for (unsigned i = 0; i < ir->ninputs; i++) {
-                       struct ir3_instruction *instr = ctx->ir->inputs[i];
-
-                       if (!instr)
-                               continue;
-
+               foreach_input(instr, ir) {
                        if (instr->input.sysval != SYSTEM_VALUE_BARYCENTRIC_PIXEL)
                                continue;
 
@@ -3430,56 +3468,52 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                goto out;
        }
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER RA:\n");
-               ir3_print(ir);
-       }
+       ir3_debug_print(ir, "AFTER RA");
 
        if (so->type == MESA_SHADER_FRAGMENT)
                pack_inlocs(ctx);
 
-       /* fixup input/outputs: */
-       for (i = 0; i < so->outputs_count; i++) {
-               /* sometimes we get outputs that don't write the .x coord, like:
-                *
-                *   decl_var shader_out INTERP_MODE_NONE float Color (VARYING_SLOT_VAR9.z, 1, 0)
-                *
-                * Presumably the result of varying packing and then eliminating
-                * some unneeded varyings?  Just skip head to the first valid
-                * component of the output.
-                */
-               for (unsigned j = 0; j < 4; j++) {
-                       struct ir3_instruction *instr = ir->outputs[(i*4) + j];
-                       if (instr) {
-                               so->outputs[i].regid = instr->regs[0]->num;
-                               so->outputs[i].half  = !!(instr->regs[0]->flags & IR3_REG_HALF);
-                               break;
-                       }
-               }
-       }
+       /*
+        * Fixup inputs/outputs to point to the actual registers assigned:
+        *
+        * 1) initialize to r63.x (invalid/unused)
+        * 2) iterate IR level inputs/outputs and update the variants
+        *    inputs/outputs table based on the assigned registers for
+        *    the remaining inputs/outputs.
+        */
 
-       /* Note that some or all channels of an input may be unused: */
-       for (i = 0; i < so->inputs_count; i++) {
-               unsigned j, reg = regid(63,0);
-               bool half = false;
-               for (j = 0; j < 4; j++) {
-                       struct ir3_instruction *in = inputs[(i*4) + j];
+       for (unsigned i = 0; i < so->inputs_count; i++)
+               so->inputs[i].regid = INVALID_REG;
+       for (unsigned i = 0; i < so->outputs_count; i++)
+               so->outputs[i].regid = INVALID_REG;
 
-                       if (!in)
-                               continue;
+       struct ir3_instruction *out;
+       foreach_output(out, ir) {
+               assert(out->opc == OPC_META_COLLECT);
+               unsigned outidx = out->collect.outidx;
 
-                       if (in->flags & IR3_INSTR_UNUSED)
-                               continue;
+               so->outputs[outidx].regid = out->regs[0]->num;
+               so->outputs[outidx].half  = !!(out->regs[0]->flags & IR3_REG_HALF);
+       }
 
-                       reg = in->regs[0]->num - j;
-                       if (half) {
-                               compile_assert(ctx, in->regs[0]->flags & IR3_REG_HALF);
-                       } else {
-                               half = !!(in->regs[0]->flags & IR3_REG_HALF);
+       struct ir3_instruction *in;
+       foreach_input(in, ir) {
+               assert(in->opc == OPC_META_INPUT);
+               unsigned inidx = in->input.inidx;
+
+               if (pre_assign_inputs && !so->inputs[inidx].sysval) {
+                       if (VALIDREG(so->nonbinning->inputs[inidx].regid)) {
+                               compile_assert(ctx, in->regs[0]->num ==
+                                               so->nonbinning->inputs[inidx].regid);
+                               compile_assert(ctx, !!(in->regs[0]->flags & IR3_REG_HALF) ==
+                                               so->nonbinning->inputs[inidx].half);
                        }
+                       so->inputs[inidx].regid = so->nonbinning->inputs[inidx].regid;
+                       so->inputs[inidx].half  = so->nonbinning->inputs[inidx].half;
+               } else {
+                       so->inputs[inidx].regid = in->regs[0]->num;
+                       so->inputs[inidx].half  = !!(in->regs[0]->flags & IR3_REG_HALF);
                }
-               so->inputs[i].regid = reg;
-               so->inputs[i].half  = half;
        }
 
        if (ctx->astc_srgb)
@@ -3490,18 +3524,15 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
         */
        ir3_legalize(ir, &so->has_ssbo, &so->need_pixlod, &max_bary);
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER LEGALIZE:\n");
-               ir3_print(ir);
-       }
+       ir3_debug_print(ir, "AFTER LEGALIZE");
 
        /* Set (ss)(sy) on first TCS and GEOMETRY instructions, since we don't
         * know what we might have to wait on when coming in from VS chsh.
         */
        if (so->type == MESA_SHADER_TESS_CTRL ||
                so->type == MESA_SHADER_GEOMETRY ) {
-               list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
-                       list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+               foreach_block (block, &ir->block_list) {
+                       foreach_instr (instr, &block->instr_list) {
                                instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
                                break;
                        }