panfrost: Implement instanced rendering
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
index 47742a5a7a949e47de4a70dae1a48526385db609..5559aa4445491129a5ab6fbdfa9182dca06a7def 100644 (file)
@@ -36,6 +36,7 @@
 #include "main/imports.h"
 #include "compiler/nir/nir_builder.h"
 #include "util/half_float.h"
+#include "util/u_math.h"
 #include "util/u_debug.h"
 #include "util/u_dynarray.h"
 #include "util/list.h"
@@ -82,35 +83,6 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
  * driver seems to do it that way */
 
 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define SWIZZLE_XXXX SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X)
-#define SWIZZLE_XYXX SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X)
-#define SWIZZLE_XYZX SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X)
-#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
-#define SWIZZLE_XYXZ SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_Z)
-#define SWIZZLE_WWWW SWIZZLE(COMPONENT_W, COMPONENT_W, COMPONENT_W, COMPONENT_W)
-
-static inline unsigned
-swizzle_of(unsigned comp)
-{
-        switch (comp) {
-                case 1:
-                        return SWIZZLE_XXXX;
-                case 2:
-                        return SWIZZLE_XYXX;
-                case 3:
-                        return SWIZZLE_XYZX;
-                case 4:
-                        return SWIZZLE_XYZW;
-                default:
-                        unreachable("Invalid component count");
-        }
-}
-
-static inline unsigned
-mask_of(unsigned nr_comp)
-{
-        return (1 << nr_comp) - 1;
-}
 
 #define M_LOAD_STORE(name, rname, uname) \
        static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
@@ -139,10 +111,23 @@ mask_of(unsigned nr_comp)
  * the corresponding Midgard source */
 
 static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int)
+vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count)
 {
         if (!src) return blank_alu_src;
 
+        /* Figure out how many components there are so we can adjust the
+         * swizzle.  Specifically we want to broadcast the last channel so
+         * things like ball2/3 work
+         */
+
+        if (broadcast_count) {
+                uint8_t last_component = src->swizzle[broadcast_count - 1];
+
+                for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
+                        src->swizzle[c] = last_component;
+                }
+        }
+
         midgard_vector_alu_src alu_src = {
                 .rep_low = 0,
                 .rep_high = 0,
@@ -297,16 +282,59 @@ midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
         }
 }
 
-static void
-midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+static unsigned
+nir_dest_index(compiler_context *ctx, nir_dest *dst)
+{
+        if (dst->is_ssa)
+                return dst->ssa.index;
+        else {
+                assert(!dst->reg.indirect);
+                return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+        }
+}
+
+static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
+                            unsigned *dest)
 {
+        nir_intrinsic_instr *intr;
+        nir_dest *dst = NULL;
+        nir_tex_instr *tex;
         int sysval = -1;
 
-        if (instr->type == nir_instr_type_intrinsic) {
-                nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+        switch (instr->type) {
+        case nir_instr_type_intrinsic:
+                intr = nir_instr_as_intrinsic(instr);
                 sysval = midgard_nir_sysval_for_intrinsic(intr);
+                dst = &intr->dest;
+                break;
+        case nir_instr_type_tex:
+                tex = nir_instr_as_tex(instr);
+                if (tex->op != nir_texop_txs)
+                        break;
+
+                sysval = PAN_SYSVAL(TEXTURE_SIZE,
+                                   PAN_TXS_SYSVAL_ID(tex->texture_index,
+                                                     nir_tex_instr_dest_size(tex) -
+                                                     (tex->is_array ? 1 : 0),
+                                                     tex->is_array));
+                dst  = &tex->dest;
+                break;
+        default:
+                break;
         }
 
+        if (dest && dst)
+                *dest = nir_dest_index(ctx, dst);
+
+        return sysval;
+}
+
+static void
+midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+{
+        int sysval;
+
+        sysval = sysval_for_instr(ctx, instr, NULL);
         if (sysval < 0)
                 return;
 
@@ -381,12 +409,17 @@ optimise_nir(nir_shader *nir)
         NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
         NIR_PASS(progress, nir, nir_lower_idiv);
 
-        nir_lower_tex_options lower_tex_options = {
+        nir_lower_tex_options lower_tex_1st_pass_options = {
                 .lower_rect = true,
                 .lower_txp = ~0
         };
 
-        NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+        nir_lower_tex_options lower_tex_2nd_pass_options = {
+                .lower_txs_lod = true,
+       };
+
+        NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options);
+        NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options);
 
         do {
                 progress = false;
@@ -428,8 +461,7 @@ optimise_nir(nir_shader *nir)
                          nir_var_shader_out |
                          nir_var_function_temp);
 
-                /* TODO: Enable vectorize when merged upstream */
-                // NIR_PASS(progress, nir, nir_opt_vectorize);
+                NIR_PASS(progress, nir, nir_opt_vectorize);
         } while (progress);
 
         /* Must be run at the end to prevent creation of fsin/fcos ops */
@@ -514,17 +546,6 @@ nir_src_index(compiler_context *ctx, nir_src *src)
         }
 }
 
-static unsigned
-nir_dest_index(compiler_context *ctx, nir_dest *dst)
-{
-        if (dst->is_ssa)
-                return dst->ssa.index;
-        else {
-                assert(!dst->reg.indirect);
-                return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
-        }
-}
-
 static unsigned
 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
 {
@@ -662,6 +683,12 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
        case nir_op_##nir: \
                op = midgard_alu_op_##_op; \
                break;
+
+#define ALU_CASE_BCAST(nir, _op, count) \
+        case nir_op_##nir: \
+                op = midgard_alu_op_##_op; \
+                broadcast_swizzle = count; \
+                break;
 static bool
 nir_is_fzero_constant(nir_src src)
 {
@@ -694,6 +721,13 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
         unsigned op;
 
+        /* Number of components valid to check for the instruction (the rest
+         * will be forced to the last), or 0 to use as-is. Relevant as
+         * ball-type instructions have a channel count in NIR but are all vec4
+         * in Midgard */
+
+        unsigned broadcast_swizzle = 0;
+
         switch (instr->op) {
                 ALU_CASE(fadd, fadd);
                 ALU_CASE(fmul, fmul);
@@ -769,20 +803,20 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 ALU_CASE(ishr, iasr);
                 ALU_CASE(ushr, ilsr);
 
-                ALU_CASE(b32all_fequal2, fball_eq);
-                ALU_CASE(b32all_fequal3, fball_eq);
+                ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
+                ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
                 ALU_CASE(b32all_fequal4, fball_eq);
 
-                ALU_CASE(b32any_fnequal2, fbany_neq);
-                ALU_CASE(b32any_fnequal3, fbany_neq);
+                ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
+                ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
                 ALU_CASE(b32any_fnequal4, fbany_neq);
 
-                ALU_CASE(b32all_iequal2, iball_eq);
-                ALU_CASE(b32all_iequal3, iball_eq);
+                ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
+                ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
                 ALU_CASE(b32all_iequal4, iball_eq);
 
-                ALU_CASE(b32any_inequal2, ibany_neq);
-                ALU_CASE(b32any_inequal3, ibany_neq);
+                ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
+                ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
                 ALU_CASE(b32any_inequal4, ibany_neq);
 
                 /* Source mods will be shoved in later */
@@ -931,8 +965,8 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 /* Writemask only valid for non-SSA NIR */
                 .mask = expand_writemask(mask_of(nr_components)),
 
-                .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
-                .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
+                .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)),
+                .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)),
         };
 
         /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
@@ -997,7 +1031,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                         for (int j = 0; j < 4; ++j)
                                 nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
 
-                        ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
+                        ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle));
                         emit_mir_instruction(ctx, ins);
                 }
         } else {
@@ -1007,12 +1041,20 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
 #undef ALU_CASE
 
+/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
+ * optimized) versions of UBO #0 */
+
 static void
-emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
+emit_ubo_read(
+                compiler_context *ctx,
+                unsigned dest,
+                unsigned offset,
+                nir_src *indirect_offset,
+                unsigned index)
 {
         /* TODO: half-floats */
 
-        if (!indirect_offset && offset < ctx->uniform_cutoff) {
+        if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) {
                 /* Fast path: For the first 16 uniforms, direct accesses are
                  * 0-cycle, since they're just a register fetch in the usual
                  * case.  So, we alias the registers while we're still in
@@ -1033,11 +1075,13 @@ emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src
 
                 if (indirect_offset) {
                         emit_indirect_offset(ctx, indirect_offset);
-                        ins.load_store.unknown = 0x8700; /* xxx: what is this? */
+                        ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */
                 } else {
-                        ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+                        ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */
                 }
 
+                /* TODO respect index */
+
                 emit_mir_instruction(ctx, ins);
         }
 }
@@ -1047,7 +1091,7 @@ emit_varying_read(
                 compiler_context *ctx,
                 unsigned dest, unsigned offset,
                 unsigned nr_comp, unsigned component,
-                nir_src *indirect_offset)
+                nir_src *indirect_offset, nir_alu_type type)
 {
         /* XXX: Half-floats? */
         /* TODO: swizzle, mask */
@@ -1075,24 +1119,39 @@ emit_varying_read(
                 ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
         }
 
+        /* Use the type appropriate load */
+        switch (type) {
+                case nir_type_uint:
+                case nir_type_bool:
+                        ins.load_store.op = midgard_op_ld_vary_32u;
+                        break;
+                case nir_type_int:
+                        ins.load_store.op = midgard_op_ld_vary_32i;
+                        break;
+                case nir_type_float:
+                        ins.load_store.op = midgard_op_ld_vary_32;
+                        break;
+                default:
+                        unreachable("Attempted to load unknown type");
+                        break;
+        }
+
         emit_mir_instruction(ctx, ins);
 }
 
 static void
-emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
+emit_sysval_read(compiler_context *ctx, nir_instr *instr)
 {
-        /* First, pull out the destination */
-        unsigned dest = nir_dest_index(ctx, &instr->dest);
-
-        /* Now, figure out which uniform this is */
-        int sysval = midgard_nir_sysval_for_intrinsic(instr);
+        unsigned dest;
+        /* Figure out which uniform this is */
+        int sysval = sysval_for_instr(ctx, instr, &dest);
         void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
 
         /* Sysvals are prefix uniforms */
         unsigned uniform = ((uintptr_t) val) - 1;
 
         /* Emit the read itself -- this is never indirect */
-        emit_uniform_read(ctx, dest, uniform, NULL);
+        emit_ubo_read(ctx, dest, uniform, NULL, 0);
 }
 
 /* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
@@ -1171,7 +1230,7 @@ emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
 static void
 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 {
-        unsigned offset, reg;
+        unsigned offset = 0, reg;
 
         switch (instr->intrinsic) {
         case nir_intrinsic_discard_if:
@@ -1190,25 +1249,57 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
         }
 
         case nir_intrinsic_load_uniform:
-        case nir_intrinsic_load_input:
-                offset = nir_intrinsic_base(instr);
+        case nir_intrinsic_load_ubo:
+        case nir_intrinsic_load_input: {
+                bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
+                bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
+
+                /* Get the base type of the intrinsic */
+                /* TODO: Infer type? Does it matter? */
+                nir_alu_type t =
+                        is_ubo ? nir_type_uint : nir_intrinsic_type(instr);
+                t = nir_alu_type_get_base_type(t);
+
+                if (!is_ubo) {
+                        offset = nir_intrinsic_base(instr);
+                }
 
                 unsigned nr_comp = nir_intrinsic_dest_components(instr);
-                bool direct = nir_src_is_const(instr->src[0]);
 
-                if (direct) {
-                        offset += nir_src_as_uint(instr->src[0]);
-                }
+                nir_src *src_offset = nir_get_io_offset_src(instr);
+
+                bool direct = nir_src_is_const(*src_offset);
+
+                if (direct)
+                        offset += nir_src_as_uint(*src_offset);
 
                 /* We may need to apply a fractional offset */
                 int component = instr->intrinsic == nir_intrinsic_load_input ?
                         nir_intrinsic_component(instr) : 0;
                 reg = nir_dest_index(ctx, &instr->dest);
 
-                if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
-                        emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
+                if (is_uniform && !ctx->is_blend) {
+                        emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0);
+                } else if (is_ubo) {
+                        nir_src index = instr->src[0];
+
+                        /* We don't yet support indirect UBOs. For indirect
+                         * block numbers (if that's possible), we don't know
+                         * enough about the hardware yet. For indirect sources,
+                         * we know what we need but we need to add some NIR
+                         * support for lowering correctly with respect to
+                         * 128-bit reads */
+
+                        assert(nir_src_is_const(index));
+                        assert(nir_src_is_const(*src_offset));
+
+                        /* TODO: Alignment */
+                        assert((offset & 0xF) == 0);
+
+                        uint32_t uindex = nir_src_as_uint(index) + 1;
+                        emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
                 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
-                        emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL);
+                        emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t);
                 } else if (ctx->is_blend) {
                         /* For blend shaders, load the input color, which is
                          * preloaded to r0 */
@@ -1219,6 +1310,24 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                         midgard_instruction ins = m_ld_attr_32(reg, offset);
                         ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
                         ins.load_store.mask = mask_of(nr_comp);
+
+                        /* Use the type appropriate load */
+                        switch (t) {
+                                case nir_type_uint:
+                                case nir_type_bool:
+                                        ins.load_store.op = midgard_op_ld_attr_32u;
+                                        break;
+                                case nir_type_int:
+                                        ins.load_store.op = midgard_op_ld_attr_32i;
+                                        break;
+                                case nir_type_float:
+                                        ins.load_store.op = midgard_op_ld_attr_32;
+                                        break;
+                                default:
+                                        unreachable("Attempted to load unknown type");
+                                        break;
+                        }
+
                         emit_mir_instruction(ctx, ins);
                 } else {
                         DBG("Unknown load\n");
@@ -1226,6 +1335,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                 }
 
                 break;
+       }
 
         case nir_intrinsic_load_output:
                 assert(nir_src_is_const(instr->src[0]));
@@ -1286,14 +1396,18 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                         midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
                         emit_mir_instruction(ctx, ins);
 
-                        /* We should have been vectorized. That also lets us
-                         * ignore the mask. because the mask component on
-                         * st_vary is (as far as I can tell) ignored [the blob
-                         * sets it to zero] */
-                        assert(nir_intrinsic_component(instr) == 0);
+                        /* We should have been vectorized, though we don't
+                         * currently check that st_vary is emitted only once
+                         * per slot (this is relevant, since there's not a mask
+                         * parameter available on the store [set to 0 by the
+                         * blob]). We do respect the component by adjusting the
+                         * swizzle. */
+
+                        unsigned component = nir_intrinsic_component(instr);
 
                         midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
                         st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
+                        st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
                         emit_mir_instruction(ctx, st);
                 } else {
                         DBG("Unknown store\n");
@@ -1314,7 +1428,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 
         case nir_intrinsic_load_viewport_scale:
         case nir_intrinsic_load_viewport_offset:
-                emit_sysval_read(ctx, instr);
+                emit_sysval_read(ctx, &instr->instr);
                 break;
 
         default:
@@ -1328,15 +1442,19 @@ static unsigned
 midgard_tex_format(enum glsl_sampler_dim dim)
 {
         switch (dim) {
+        case GLSL_SAMPLER_DIM_1D:
+        case GLSL_SAMPLER_DIM_BUF:
+                return MALI_TEX_1D;
+
         case GLSL_SAMPLER_DIM_2D:
         case GLSL_SAMPLER_DIM_EXTERNAL:
-                return TEXTURE_2D;
+                return MALI_TEX_2D;
 
         case GLSL_SAMPLER_DIM_3D:
-                return TEXTURE_3D;
+                return MALI_TEX_3D;
 
         case GLSL_SAMPLER_DIM_CUBE:
-                return TEXTURE_CUBE;
+                return MALI_TEX_CUBE;
 
         default:
                 DBG("Unknown sampler dim type\n");
@@ -1345,22 +1463,57 @@ midgard_tex_format(enum glsl_sampler_dim dim)
         }
 }
 
-static unsigned
-midgard_tex_op(nir_texop op)
+/* Tries to attach an explicit LOD / bias as a constant. Returns whether this
+ * was successful */
+
+static bool
+pan_attach_constant_bias(
+                compiler_context *ctx,
+                nir_src lod,
+                midgard_texture_word *word)
+{
+        /* To attach as constant, it has to *be* constant */
+
+        if (!nir_src_is_const(lod))
+                return false;
+
+        float f = nir_src_as_float(lod);
+
+        /* Break into fixed-point */
+        signed lod_int = f;
+        float lod_frac = f - lod_int;
+
+        /* Carry over negative fractions */
+        if (lod_frac < 0.0) {
+                lod_int--;
+                lod_frac += 1.0;
+        }
+
+        /* Encode */
+        word->bias = float_to_ubyte(lod_frac);
+        word->bias_int = lod_int;
+
+        return true;
+}
+
+static enum mali_sampler_type
+midgard_sampler_type(nir_alu_type t)
 {
-        switch (op) {
-                case nir_texop_tex:
-                case nir_texop_txb:
-                        return TEXTURE_OP_NORMAL;
-                case nir_texop_txl:
-                        return TEXTURE_OP_LOD;
+        switch (nir_alu_type_get_base_type(t)) {
+                case nir_type_float:
+                        return MALI_SAMPLER_FLOAT;
+                case nir_type_int:
+                        return MALI_SAMPLER_SIGNED;
+                case nir_type_uint:
+                        return MALI_SAMPLER_UNSIGNED;
                 default:
-                        unreachable("Unhanlded texture op");
+                        unreachable("Unknown sampler type");
         }
 }
 
 static void
-emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
+                 unsigned midgard_texop)
 {
         /* TODO */
         //assert (!instr->sampler);
@@ -1378,7 +1531,26 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
         int texture_index = instr->texture_index;
         int sampler_index = texture_index;
 
-        unsigned position_swizzle = 0;
+        /* No helper to build texture words -- we do it all here */
+        midgard_instruction ins = {
+                .type = TAG_TEXTURE_4,
+                .texture = {
+                        .op = midgard_texop,
+                        .format = midgard_tex_format(instr->sampler_dim),
+                        .texture_handle = texture_index,
+                        .sampler_handle = sampler_index,
+
+                        /* TODO: Regalloc it in */
+                        .swizzle = SWIZZLE_XYZW,
+                        .mask = 0xF,
+
+                        /* TODO: half */
+                        .in_reg_full = 1,
+                        .out_full = 1,
+
+                        .sampler_type = midgard_sampler_type(instr->dest_type),
+                }
+        };
 
         for (unsigned i = 0; i < instr->num_srcs; ++i) {
                 int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
@@ -1389,6 +1561,9 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
                 switch (instr->src[i].src_type) {
                 case nir_tex_src_coord: {
                         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+                                /* texelFetch is undefined on samplerCube */
+                                assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
+
                                 /* For cubemaps, we need to load coords into
                                  * special r27, and then use a special ld/st op
                                  * to select the face and copy the xy into the
@@ -1405,23 +1580,39 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
                                 st.load_store.swizzle = alu_src.swizzle;
                                 emit_mir_instruction(ctx, st);
 
-                                position_swizzle = swizzle_of(2);
+                                ins.texture.in_reg_swizzle = swizzle_of(2);
                         } else {
-                                position_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
-
-                                midgard_instruction ins = v_mov(index, alu_src, reg);
-                                ins.alu.mask = expand_writemask(mask_of(nr_comp));
-                                emit_mir_instruction(ctx, ins);
-
-                                /* To the hardware, z is depth, w is array
-                                 * layer. To NIR, z is array layer for a 2D
-                                 * array */
-
-                                bool has_array = instr->texture_array_size > 0;
-                                bool is_2d = instr->sampler_dim == GLSL_SAMPLER_DIM_2D;
-
-                                if (is_2d && has_array)
-                                        position_swizzle = SWIZZLE_XYXZ;
+                                ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
+
+                                midgard_instruction mov = v_mov(index, alu_src, reg);
+                                mov.alu.mask = expand_writemask(mask_of(nr_comp));
+                                emit_mir_instruction(ctx, mov);
+
+                                if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
+                                        /* Texel fetch opcodes care about the
+                                         * values of z and w, so we actually
+                                         * need to spill into a second register
+                                         * for a texel fetch with register bias
+                                         * (for non-2D). TODO: Implement that
+                                         */
+
+                                        assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
+
+                                        midgard_instruction zero = v_mov(index, alu_src, reg);
+                                        zero.ssa_args.inline_constant = true;
+                                        zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+                                        zero.has_constants = true;
+                                        zero.alu.mask = ~mov.alu.mask;
+                                        emit_mir_instruction(ctx, zero);
+
+                                        ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
+                                } else {
+                                        /* Non-texel fetch doesn't need that
+                                         * nonsense. However we do use the Z
+                                         * for array indexing */
+                                        bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
+                                        ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
+                                }
                         }
 
                         break;
@@ -1429,14 +1620,37 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
 
                 case nir_tex_src_bias:
                 case nir_tex_src_lod: {
-                        /* To keep RA simple, we put the bias/LOD into the w
-                         * component of the input source, which is otherwise in xy */
+                        /* Try as a constant if we can */
+
+                        bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
+                        if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
+                                break;
+
+                        /* Otherwise we use a register. To keep RA simple, we
+                         * put the bias/LOD into the w component of the input
+                         * source, which is otherwise in xy */
 
                         alu_src.swizzle = SWIZZLE_XXXX;
 
-                        midgard_instruction ins = v_mov(index, alu_src, reg);
-                        ins.alu.mask = expand_writemask(1 << COMPONENT_W);
-                        emit_mir_instruction(ctx, ins);
+                        midgard_instruction mov = v_mov(index, alu_src, reg);
+                        mov.alu.mask = expand_writemask(1 << COMPONENT_W);
+                        emit_mir_instruction(ctx, mov);
+
+                        ins.texture.lod_register = true;
+
+                        midgard_tex_register_select sel = {
+                                .select = in_reg,
+                                .full = 1,
+
+                                /* w */
+                                .component_lo = 1,
+                                .component_hi = 1
+                        };
+
+                        uint8_t packed;
+                        memcpy(&packed, &sel, sizeof(packed));
+                        ins.texture.bias = packed;
+
                         break;
                };
 
@@ -1445,53 +1659,10 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
                 }
         }
 
-        /* No helper to build texture words -- we do it all here */
-        midgard_instruction ins = {
-                .type = TAG_TEXTURE_4,
-                .texture = {
-                        .op = midgard_tex_op(instr->op),
-                        .format = midgard_tex_format(instr->sampler_dim),
-                        .texture_handle = texture_index,
-                        .sampler_handle = sampler_index,
-
-                        /* TODO: Regalloc it in */
-                        .swizzle = SWIZZLE_XYZW,
-                        .mask = 0xF,
-
-                        /* TODO: half */
-                        .in_reg_full = 1,
-                        .in_reg_swizzle = position_swizzle,
-                        .out_full = 1,
-
-                        /* Always 1 */
-                        .unknown7 = 1,
-                }
-        };
-
         /* Set registers to read and write from the same place */
         ins.texture.in_reg_select = in_reg;
         ins.texture.out_reg_select = out_reg;
 
-        /* Setup bias/LOD if necessary. Only register mode support right now.
-         * TODO: Immediate mode for performance gains */
-
-        if (instr->op == nir_texop_txb || instr->op == nir_texop_txl) {
-                ins.texture.lod_register = true;
-
-                midgard_tex_register_select sel = {
-                        .select = in_reg,
-                        .full = 1,
-
-                        /* w */
-                        .component_lo = 1,
-                        .component_hi = 1
-                };
-
-                uint8_t packed;
-                memcpy(&packed, &sel, sizeof(packed));
-                ins.texture.bias = packed;
-        }
-
         emit_mir_instruction(ctx, ins);
 
         /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
@@ -1507,6 +1678,36 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr)
         ctx->texture_op_count++;
 }
 
+static void
+emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+{
+        /* Fixup op, since only textureLod is permitted in VS but NIR can give
+         * generic tex in some cases (which confuses the hardware) */
+
+        bool is_vertex = ctx->stage == MESA_SHADER_VERTEX;
+
+        if (is_vertex && instr->op == nir_texop_tex)
+                instr->op = nir_texop_txl;
+
+        switch (instr->op) {
+        case nir_texop_tex:
+        case nir_texop_txb:
+                emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
+                break;
+        case nir_texop_txl:
+                emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
+                break;
+        case nir_texop_txf:
+                emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
+                break;
+        case nir_texop_txs:
+                emit_sysval_read(ctx, &instr->instr);
+                break;
+        default:
+               unreachable("Unhanlded texture op");
+        }
+}
+
 static void
 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
 {