freedreno/ir3: add generic get_barycentric()
[mesa.git] / src / freedreno / ir3 / ir3_compiler_nir.c
index 53fcadc5933f01608ff1bcb7aaa06f9914b47773..096611a297504f0eb3c577727ace91730e6cf5df 100644 (file)
 #include "ir3.h"
 #include "ir3_context.h"
 
+void
+ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc)
+{
+       nir_intrinsic_instr *intrin = ir3_bindless_resource(rsrc);
+       if (!intrin)
+               return;
+
+       instr->flags |= IR3_INSTR_B;
+       instr->cat6.base = nir_intrinsic_desc_set(intrin);
+}
 
 static struct ir3_instruction *
 create_indirect_load(struct ir3_context *ctx, unsigned arrsz, int n,
@@ -88,7 +98,7 @@ create_frag_input(struct ir3_context *ctx, bool use_ldlv, unsigned n)
                instr->cat6.type = TYPE_U32;
                instr->cat6.iim_val = 1;
        } else {
-               instr = ir3_BARY_F(block, inloc, 0, ctx->ij_pixel, 0);
+               instr = ir3_BARY_F(block, inloc, 0, ctx->ij[IJ_PERSP_PIXEL], 0);
                instr->regs[2]->wrmask = 0x3;
        }
 
@@ -100,7 +110,7 @@ create_driver_param(struct ir3_context *ctx, enum ir3_driver_param dp)
 {
        /* first four vec4 sysval's reserved for UBOs: */
        /* NOTE: dp is in scalar, but there can be >4 dp components: */
-       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       struct ir3_const_state *const_state = ir3_const_state(ctx->so);
        unsigned n = const_state->offsets.driver_param;
        unsigned r = regid(n + dp / 4, dp % 4);
        return create_uniform(ctx->block, r);
@@ -747,12 +757,9 @@ emit_intrinsic_load_ubo_ldc(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        ldc->cat6.d = nir_intrinsic_base(intr);
        ldc->cat6.type = TYPE_U32;
 
-       nir_intrinsic_instr *bindless = ir3_bindless_resource(intr->src[0]);
-       if (bindless) {
-               ldc->flags |= IR3_INSTR_B;
-               ldc->cat6.base = nir_intrinsic_desc_set(bindless);
+       ir3_handle_bindless_cat6(ldc, intr->src[0]);
+       if (ldc->flags & IR3_INSTR_B)
                ctx->so->bindless_ubo = true;
-       }
 
        ir3_split_dest(b, dst, ldc, 0, ncomp);
 }
@@ -765,7 +772,7 @@ emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 {
        struct ir3_block *b = ctx->block;
        struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
-       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
        unsigned ubo = regid(const_state->offsets.ubo, 0);
        const unsigned ptrsz = ir3_pointer_size(ctx->compiler);
 
@@ -841,7 +848,7 @@ emit_intrinsic_ssbo_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                struct ir3_instruction **dst)
 {
        /* SSBO size stored as a const starting at ssbo_sizes: */
-       struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+       const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
        unsigned blk_idx = nir_src_as_uint(intr->src[0]);
        unsigned idx = regid(const_state->offsets.ssbo_sizes, 0) +
                const_state->ssbo_size.off[blk_idx];
@@ -1168,8 +1175,9 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
        ir3_split_dest(b, dst, sam, 0, 4);
 }
 
-static void
-emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
+/* A4xx version of image_size, see ir3_a6xx.c for newer resinfo version. */
+void
+emit_intrinsic_image_size_tex(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                struct ir3_instruction **dst)
 {
        struct ir3_block *b = ctx->block;
@@ -1211,7 +1219,8 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
                 * bytes-per-pixel should have been emitted in 2nd slot of
                 * image_dims. See ir3_shader::emit_image_dims().
                 */
-               struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+               const struct ir3_const_state *const_state =
+                               ir3_const_state(ctx->so);
                unsigned cb = regid(const_state->offsets.image_dims, 0) +
                        const_state->image_dims.off[nir_src_as_uint(intr->src[0])];
                struct ir3_instruction *aux = create_uniform(b, cb + 1);
@@ -1337,44 +1346,47 @@ create_sysval_input(struct ir3_context *ctx, gl_system_value slot,
 }
 
 static struct ir3_instruction *
-get_barycentric_centroid(struct ir3_context *ctx)
+get_barycentric(struct ir3_context *ctx, enum ir3_bary bary)
 {
-       if (!ctx->ij_centroid) {
+       static const gl_system_value sysval_base = SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
+
+       STATIC_ASSERT(sysval_base + IJ_PERSP_PIXEL == SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL);
+       STATIC_ASSERT(sysval_base + IJ_PERSP_SAMPLE == SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE);
+       STATIC_ASSERT(sysval_base + IJ_PERSP_CENTROID == SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID);
+       STATIC_ASSERT(sysval_base + IJ_PERSP_SIZE == SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE);
+       STATIC_ASSERT(sysval_base + IJ_LINEAR_PIXEL == SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL);
+       STATIC_ASSERT(sysval_base + IJ_LINEAR_CENTROID == SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID);
+       STATIC_ASSERT(sysval_base + IJ_LINEAR_SAMPLE == SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE);
+
+       if (!ctx->ij[bary]) {
                struct ir3_instruction *xy[2];
                struct ir3_instruction *ij;
 
-               ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID, 0x3);
+               ij = create_sysval_input(ctx, sysval_base + bary, 0x3);
                ir3_split_dest(ctx->block, xy, ij, 0, 2);
 
-               ctx->ij_centroid = ir3_create_collect(ctx, xy, 2);
+               ctx->ij[bary] = ir3_create_collect(ctx, xy, 2);
        }
 
-       return ctx->ij_centroid;
+       return ctx->ij[bary];
 }
 
 static struct ir3_instruction *
-get_barycentric_sample(struct ir3_context *ctx)
+get_barycentric_centroid(struct ir3_context *ctx)
 {
-       if (!ctx->ij_sample) {
-               struct ir3_instruction *xy[2];
-               struct ir3_instruction *ij;
-
-               ij = create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE, 0x3);
-               ir3_split_dest(ctx->block, xy, ij, 0, 2);
-
-               ctx->ij_sample = ir3_create_collect(ctx, xy, 2);
-       }
+       return get_barycentric(ctx, IJ_PERSP_CENTROID);
+}
 
-       return ctx->ij_sample;
+static struct ir3_instruction *
+get_barycentric_sample(struct ir3_context *ctx)
+{
+       return get_barycentric(ctx, IJ_PERSP_SAMPLE);
 }
 
 static struct ir3_instruction  *
 get_barycentric_pixel(struct ir3_context *ctx)
 {
-       /* TODO when tgsi_to_nir supports "new-style" FS inputs switch
-        * this to create ij_pixel only on demand:
-        */
-       return ctx->ij_pixel;
+       return get_barycentric(ctx, IJ_PERSP_PIXEL);
 }
 
 static struct ir3_instruction *
@@ -1418,30 +1430,31 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
        struct ir3_instruction **dst;
        struct ir3_instruction * const *src;
        struct ir3_block *b = ctx->block;
+       unsigned dest_components = nir_intrinsic_dest_components(intr);
        int idx, comp;
 
        if (info->has_dest) {
-               unsigned n = nir_intrinsic_dest_components(intr);
-               dst = ir3_get_dst(ctx, &intr->dest, n);
+               dst = ir3_get_dst(ctx, &intr->dest, dest_components);
        } else {
                dst = NULL;
        }
 
-       const unsigned primitive_param = ctx->so->shader->const_state.offsets.primitive_param * 4;
-       const unsigned primitive_map = ctx->so->shader->const_state.offsets.primitive_map * 4;
+       const struct ir3_const_state *const_state = ir3_const_state(ctx->so);
+       const unsigned primitive_param = const_state->offsets.primitive_param * 4;
+       const unsigned primitive_map = const_state->offsets.primitive_map * 4;
 
        switch (intr->intrinsic) {
        case nir_intrinsic_load_uniform:
                idx = nir_intrinsic_base(intr);
                if (nir_src_is_const(intr->src[0])) {
                        idx += nir_src_as_uint(intr->src[0]);
-                       for (int i = 0; i < intr->num_components; i++) {
+                       for (int i = 0; i < dest_components; i++) {
                                dst[i] = create_uniform_typed(b, idx + i,
                                        nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32);
                        }
                } else {
                        src = ir3_get_src(ctx, &intr->src[0]);
-                       for (int i = 0; i < intr->num_components; i++) {
+                       for (int i = 0; i < dest_components; i++) {
                                dst[i] = create_uniform_indirect(b, idx + i,
                                                ir3_get_addr0(ctx, src[0], 1));
                        }
@@ -1451,7 +1464,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                         * addr reg value can be:
                         */
                        ctx->so->constlen = MAX2(ctx->so->constlen,
-                                       ctx->so->shader->ubo_state.size / 16);
+                                       const_state->ubo_state.size / 16);
                }
                break;
 
@@ -1514,6 +1527,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 
        case nir_intrinsic_store_global_ir3: {
                struct ir3_instruction *value, *addr, *offset;
+               unsigned ncomp = nir_intrinsic_src_components(intr, 0);
 
                addr = ir3_create_collect(ctx, (struct ir3_instruction*[]){
                                ir3_get_src(ctx, &intr->src[1])[0],
@@ -1522,12 +1536,11 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 
                offset = ir3_get_src(ctx, &intr->src[2])[0];
 
-               value = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]),
-                                                                  intr->num_components);
+               value = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
 
                struct ir3_instruction *stg =
                        ir3_STG_G(ctx->block, addr, 0, value, 0,
-                                         create_immed(ctx->block, intr->num_components), 0, offset, 0);
+                                         create_immed(ctx->block, ncomp), 0, offset, 0);
                stg->cat6.type = TYPE_U32;
                stg->cat6.iim_val = 1;
 
@@ -1549,15 +1562,15 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                offset = ir3_get_src(ctx, &intr->src[1])[0];
 
                struct ir3_instruction *load =
-                       ir3_LDG(b, addr, 0, create_immed(ctx->block, intr->num_components),
+                       ir3_LDG(b, addr, 0, create_immed(ctx->block, dest_components),
                                        0, offset, 0);
                load->cat6.type = TYPE_U32;
-               load->regs[0]->wrmask = MASK(intr->num_components);
+               load->regs[0]->wrmask = MASK(dest_components);
 
                load->barrier_class = IR3_BARRIER_BUFFER_R;
                load->barrier_conflict = IR3_BARRIER_BUFFER_W;
 
-               ir3_split_dest(b, dst, load, 0, intr->num_components);
+               ir3_split_dest(b, dst, load, 0, dest_components);
                break;
        }
 
@@ -1584,11 +1597,11 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                break;
        }
        case nir_intrinsic_load_size_ir3:
-               if (!ctx->ij_size) {
-                       ctx->ij_size =
+               if (!ctx->ij[IJ_PERSP_SIZE]) {
+                       ctx->ij[IJ_PERSP_SIZE] =
                                create_sysval_input(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE, 0x1);
                }
-               dst[0] = ctx->ij_size;
+               dst[0] = ctx->ij[IJ_PERSP_SIZE];
                break;
        case nir_intrinsic_load_barycentric_centroid:
                ir3_split_dest(b, dst, get_barycentric_centroid(ctx), 0, 2);
@@ -1610,7 +1623,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                if (nir_src_is_const(intr->src[1])) {
                        struct ir3_instruction *coord = ir3_create_collect(ctx, src, 2);
                        idx += nir_src_as_uint(intr->src[1]);
-                       for (int i = 0; i < intr->num_components; i++) {
+                       for (int i = 0; i < dest_components; i++) {
                                unsigned inloc = idx * 4 + i + comp;
                                if (ctx->so->inputs[idx].bary &&
                                                !ctx->so->inputs[idx].use_ldlv) {
@@ -1633,7 +1646,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                comp = nir_intrinsic_component(intr);
                if (nir_src_is_const(intr->src[0])) {
                        idx += nir_src_as_uint(intr->src[0]);
-                       for (int i = 0; i < intr->num_components; i++) {
+                       for (int i = 0; i < dest_components; i++) {
                                unsigned n = idx * 4 + i + comp;
                                dst[i] = ctx->inputs[n];
                                compile_assert(ctx, ctx->inputs[n]);
@@ -1643,7 +1656,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                        struct ir3_instruction *collect =
                                        ir3_create_collect(ctx, ctx->ir->inputs, ctx->ninputs);
                        struct ir3_instruction *addr = ir3_get_addr0(ctx, src[0], 4);
-                       for (int i = 0; i < intr->num_components; i++) {
+                       for (int i = 0; i < dest_components; i++) {
                                unsigned n = idx * 4 + i + comp;
                                dst[i] = create_indirect_load(ctx, ctx->ninputs,
                                                n, addr, collect);
@@ -1719,7 +1732,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                break;
        case nir_intrinsic_image_size:
        case nir_intrinsic_bindless_image_size:
-               emit_intrinsic_image_size(ctx, intr, dst);
+               ctx->funcs->emit_intrinsic_image_size(ctx, intr, dst);
                break;
        case nir_intrinsic_image_atomic_add:
        case nir_intrinsic_bindless_image_atomic_add:
@@ -1763,7 +1776,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                idx += nir_src_as_uint(intr->src[1]);
 
                src = ir3_get_src(ctx, &intr->src[0]);
-               for (int i = 0; i < intr->num_components; i++) {
+               for (int i = 0; i < nir_intrinsic_src_components(intr, 0); i++) {
                        unsigned n = idx * 4 + i + comp;
                        ctx->outputs[n] = src[i];
                }
@@ -1775,6 +1788,12 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                }
                dst[0] = ctx->basevertex;
                break;
+       case nir_intrinsic_load_draw_id:
+               if (!ctx->draw_id) {
+                       ctx->draw_id = create_driver_param(ctx, IR3_DP_DRAWID);
+               }
+               dst[0] = ctx->draw_id;
+               break;
        case nir_intrinsic_load_base_instance:
                if (!ctx->base_instance) {
                        ctx->base_instance = create_driver_param(ctx, IR3_DP_INSTID_BASE);
@@ -1814,7 +1833,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                break;
        case nir_intrinsic_load_user_clip_plane:
                idx = nir_intrinsic_ucp_id(intr);
-               for (int i = 0; i < intr->num_components; i++) {
+               for (int i = 0; i < dest_components; i++) {
                        unsigned n = idx * 4 + i;
                        dst[i] = create_driver_param(ctx, IR3_DP_UCP0_X + n);
                }
@@ -1849,12 +1868,12 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                ir3_split_dest(b, dst, ctx->work_group_id, 0, 3);
                break;
        case nir_intrinsic_load_num_work_groups:
-               for (int i = 0; i < intr->num_components; i++) {
+               for (int i = 0; i < dest_components; i++) {
                        dst[i] = create_driver_param(ctx, IR3_DP_NUM_WORK_GROUPS_X + i);
                }
                break;
        case nir_intrinsic_load_local_group_size:
-               for (int i = 0; i < intr->num_components; i++) {
+               for (int i = 0; i < dest_components; i++) {
                        dst[i] = create_driver_param(ctx, IR3_DP_LOCAL_GROUP_SIZE_X + i);
                }
                break;
@@ -1884,7 +1903,7 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
                array_insert(ctx->ir, ctx->ir->predicates, kill);
 
                array_insert(b, b->keeps, kill);
-               ctx->so->no_earlyz = true;
+               ctx->so->has_kill = true;
 
                break;
        }
@@ -2797,7 +2816,8 @@ emit_stream_out(struct ir3_context *ctx)
         * stripped out in the backend.
         */
        for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
-               struct ir3_const_state *const_state = &ctx->so->shader->const_state;
+               const struct ir3_const_state *const_state =
+                               ir3_const_state(ctx->so);
                unsigned stride = strmout->stride[i];
                struct ir3_instruction *base, *off;
 
@@ -2963,7 +2983,7 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                        ctx->inputs[idx] = instr;
                }
        } else if (ctx->so->type == MESA_SHADER_VERTEX) {
-               struct ir3_instruction *input = NULL, *in;
+               struct ir3_instruction *input = NULL;
                struct ir3_instruction *components[4];
                unsigned mask = (1 << (ncomp + frac)) - 1;
 
@@ -2978,6 +2998,20 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                        input = create_input(ctx, mask);
                        input->input.inidx = n;
                } else {
+                       /* For aliased inputs, just append to the wrmask.. ie. if we
+                        * first see a vec2 index at slot N, and then later a vec4,
+                        * the wrmask of the resulting overlapped vec2 and vec4 is 0xf
+                        *
+                        * If the new input that aliases a previously processed input
+                        * sets no new bits, then just bail as there is nothing to see
+                        * here.
+                        *
+                        * Note that we don't expect to get an input w/ frac!=0, if we
+                        * did we'd have to adjust ncomp and frac to cover the entire
+                        * merged input.
+                        */
+                       if (!(mask & ~input->regs[0]->wrmask))
+                               return;
                        input->regs[0]->wrmask |= mask;
                }
 
@@ -2986,6 +3020,25 @@ setup_input(struct ir3_context *ctx, nir_variable *in)
                for (int i = 0; i < ncomp; i++) {
                        unsigned idx = (n * 4) + i + frac;
                        compile_assert(ctx, idx < ctx->ninputs);
+
+                       /* With aliased inputs, since we add to the wrmask above, we
+                        * can end up with stale meta:split instructions in the inputs
+                        * table.  This is basically harmless, since eventually they
+                        * will get swept away by DCE, but the mismatch wrmask (since
+                        * they would be using the previous wrmask before we OR'd in
+                        * more bits) angers ir3_validate.  So just preemptively clean
+                        * them up.  See:
+                        *
+                        * dEQP-GLES2.functional.attribute_location.bind_aliasing.cond_vec2
+                        *
+                        * Note however that split_dest() will return the src if it is
+                        * scalar, so the previous ctx->inputs[idx] could be the input
+                        * itself (which we don't want to remove)
+                        */
+                       if (ctx->inputs[idx] && (ctx->inputs[idx] != input)) {
+                               list_del(&ctx->inputs[idx]->node);
+                       }
+
                        ctx->inputs[idx] = components[i];
                }
        } else {
@@ -3204,7 +3257,7 @@ emit_instructions(struct ir3_context *ctx)
        ctx->inputs  = rzalloc_array(ctx, struct ir3_instruction *, ctx->ninputs);
        ctx->outputs = rzalloc_array(ctx, struct ir3_instruction *, ctx->noutputs);
 
-       ctx->ir = ir3_create(ctx->compiler, ctx->so->type);
+       ctx->ir = ir3_create(ctx->compiler, ctx->so);
 
        /* Create inputs in first block: */
        ctx->block = get_block(ctx, nir_start_block(fxn));
@@ -3220,7 +3273,7 @@ emit_instructions(struct ir3_context *ctx)
         * tgsi_to_nir)
         */
        if (ctx->so->type == MESA_SHADER_FRAGMENT) {
-               ctx->ij_pixel = create_input(ctx, 0x3);
+               ctx->ij[IJ_PERSP_PIXEL] = create_input(ctx, 0x3);
        }
 
        /* Setup inputs: */
@@ -3231,9 +3284,9 @@ emit_instructions(struct ir3_context *ctx)
        /* Defer add_sysval_input() stuff until after setup_inputs(),
         * because sysvals need to be appended after varyings:
         */
-       if (ctx->ij_pixel) {
+       if (ctx->ij[IJ_PERSP_PIXEL]) {
                add_sysval_input_compmask(ctx, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL,
-                               0x3, ctx->ij_pixel);
+                               0x3, ctx->ij[IJ_PERSP_PIXEL]);
        }
 
 
@@ -3276,15 +3329,11 @@ emit_instructions(struct ir3_context *ctx)
                setup_output(ctx, var);
        }
 
-       /* Find # of samplers: */
-       nir_foreach_variable (var, &ctx->s->uniforms) {
-               ctx->so->num_samp += glsl_type_get_sampler_count(var->type);
-               /* just assume that we'll be reading from images.. if it
-                * is write-only we don't have to count it, but not sure
-                * if there is a good way to know?
-                */
-               ctx->so->num_samp += glsl_type_get_image_count(var->type);
-       }
+       /* Find # of samplers. Just assume that we'll be reading from images.. if
+        * it is write-only we don't have to count it, but after lowering derefs
+        * is too late to compact indices for that.
+        */
+       ctx->so->num_samp = util_last_bit(ctx->s->info.textures_used) + ctx->s->info.num_images;
 
        /* NOTE: need to do something more clever when we support >1 fxn */
        nir_foreach_register (reg, &fxn->registers) {
@@ -3362,7 +3411,6 @@ fixup_binning_pass(struct ir3_context *ctx)
                        so->outputs[j] = so->outputs[i];
 
                        /* fixup outidx to point to new output table entry: */
-                       struct ir3_instruction *out;
                        foreach_output (out, ir) {
                                if (out->collect.outidx == i) {
                                        out->collect.outidx = j;
@@ -3406,10 +3454,16 @@ collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir)
                                fetch->dst = instr->regs[0]->num;
                                fetch->src = instr->prefetch.input_offset;
 
+                               /* These are the limits on a5xx/a6xx, we might need to
+                                * revisit if SP_FS_PREFETCH[n] changes on later gens:
+                                */
+                               assert(fetch->dst <= 0x3f);
+                               assert(fetch->tex_id <= 0x1f);
+                               assert(fetch->samp_id < 0xf);
+
                                ctx->so->total_in =
                                        MAX2(ctx->so->total_in, instr->prefetch.input_offset + 2);
 
-                               /* Disable half precision until supported. */
                                fetch->half_precision = !!(instr->regs[0]->flags & IR3_REG_HALF);
 
                                /* Remove the prefetch placeholder instruction: */
@@ -3426,6 +3480,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        struct ir3_context *ctx;
        struct ir3 *ir;
        int ret = 0, max_bary;
+       bool progress;
 
        assert(!so->ir);
 
@@ -3522,23 +3577,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                }
        }
 
-       /* at this point, for binning pass, throw away unneeded outputs: */
-       if (so->binning_pass && (ctx->compiler->gpu_id < 600))
-               fixup_binning_pass(ctx);
-
-       ir3_debug_print(ir, "AFTER: nir->ir3");
-
-       IR3_PASS(ir, ir3_cf);
-       IR3_PASS(ir, ir3_cp, so);
-
-       /* at this point, for binning pass, throw away unneeded outputs:
-        * Note that for a6xx and later, we do this after ir3_cp to ensure
-        * that the uniform/constant layout for BS and VS matches, so that
-        * we can re-use same VS_CONST state group.
-        */
-       if (so->binning_pass && (ctx->compiler->gpu_id >= 600))
-               fixup_binning_pass(ctx);
-
        /* for a6xx+, binning and draw pass VS use same VBO state, so we
         * need to make sure not to remove any inputs that are used by
         * the nonbinning VS.
@@ -3565,6 +3603,31 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                }
        }
 
+       /* at this point, for binning pass, throw away unneeded outputs: */
+       if (so->binning_pass && (ctx->compiler->gpu_id < 600))
+               fixup_binning_pass(ctx);
+
+       ir3_debug_print(ir, "AFTER: nir->ir3");
+       ir3_validate(ir);
+
+       do {
+               progress = false;
+
+               progress |= IR3_PASS(ir, ir3_cf);
+               progress |= IR3_PASS(ir, ir3_cp, so);
+               progress |= IR3_PASS(ir, ir3_dce, so);
+       } while (progress);
+
+       /* at this point, for binning pass, throw away unneeded outputs:
+        * Note that for a6xx and later, we do this after ir3_cp to ensure
+        * that the uniform/constant layout for BS and VS matches, so that
+        * we can re-use same VS_CONST state group.
+        */
+       if (so->binning_pass && (ctx->compiler->gpu_id >= 600)) {
+               fixup_binning_pass(ctx);
+               /* cleanup the result of removing unneeded outputs: */
+               while (IR3_PASS(ir, ir3_dce, so)) {}
+       }
 
        IR3_PASS(ir, ir3_sched_add_deps);
 
@@ -3573,7 +3636,8 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
         */
        IR3_PASS(ir, ir3_group);
 
-       IR3_PASS(ir, ir3_dce, so);
+       /* At this point, all the dead code should be long gone: */
+       assert(!IR3_PASS(ir, ir3_dce, so));
 
        ret = ir3_sched(ir);
        if (ret) {
@@ -3583,6 +3647,11 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
 
        ir3_debug_print(ir, "AFTER: ir3_sched");
 
+       if (IR3_PASS(ir, ir3_cp_postsched)) {
+               /* cleanup the result of removing unneeded mov's: */
+               while (IR3_PASS(ir, ir3_dce, so)) {}
+       }
+
        /* Pre-assign VS inputs on a6xx+ binning pass shader, to align
         * with draw pass VS, so binning and draw pass can both use the
         * same VBO state.
@@ -3629,7 +3698,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                ret = ir3_ra(so, precolor, ARRAY_SIZE(precolor));
        } else if (so->num_sampler_prefetch) {
                assert(so->type == MESA_SHADER_FRAGMENT);
-               struct ir3_instruction *instr, *precolor[2];
+               struct ir3_instruction *precolor[2];
                int idx = 0;
 
                foreach_input (instr, ir) {
@@ -3653,7 +3722,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                goto out;
        }
 
-       IR3_PASS(ir, ir3_postsched);
+       IR3_PASS(ir, ir3_postsched, so);
 
        if (compiler->gpu_id >= 600) {
                IR3_PASS(ir, ir3_a6xx_fixup_atomic_dests, so);
@@ -3676,7 +3745,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
        for (unsigned i = 0; i < so->outputs_count; i++)
                so->outputs[i].regid = INVALID_REG;
 
-       struct ir3_instruction *out;
        foreach_output (out, ir) {
                assert(out->opc == OPC_META_COLLECT);
                unsigned outidx = out->collect.outidx;
@@ -3685,7 +3753,6 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
                so->outputs[outidx].half  = !!(out->regs[0]->flags & IR3_REG_HALF);
        }
 
-       struct ir3_instruction *in;
        foreach_input (in, ir) {
                assert(in->opc == OPC_META_INPUT);
                unsigned inidx = in->input.inidx;