pan/mdg: eliminate references to ins->alu.reg_mode
[mesa.git] / src / panfrost / midgard / midgard_compile.c
index 5d5ffe50ce67eae555e89c6a5a332427e9049237..71191083794e58ca1383a0beb64404682f017cff 100644 (file)
@@ -137,6 +137,7 @@ M_LOAD(ld_int4, nir_type_uint32);
 M_STORE(st_int4, nir_type_uint32);
 M_LOAD(ld_color_buffer_32u, nir_type_uint32);
 M_LOAD(ld_color_buffer_as_fp16, nir_type_float16);
+M_LOAD(ld_color_buffer_as_fp32, nir_type_float32);
 M_STORE(st_vary_32, nir_type_uint32);
 M_LOAD(ld_cubemap_coords, nir_type_uint32);
 M_LOAD(ld_compute_id, nir_type_uint32);
@@ -256,9 +257,9 @@ midgard_nir_lower_fdot2(nir_shader *shader)
 }
 
 static const nir_variable *
-search_var(struct exec_list *vars, unsigned driver_loc)
+search_var(nir_shader *nir, nir_variable_mode mode, unsigned driver_loc)
 {
-        nir_foreach_variable(var, vars) {
+        nir_foreach_variable_with_modes(var, nir, mode) {
                 if (var->data.driver_location == driver_loc)
                         return var;
         }
@@ -278,7 +279,7 @@ midgard_nir_lower_zs_store(nir_shader *nir)
 
         nir_variable *z_var = NULL, *s_var = NULL;
 
-        nir_foreach_variable(var, &nir->outputs) {
+        nir_foreach_shader_out_variable(var, nir) {
                 if (var->data.location == FRAG_RESULT_DEPTH)
                         z_var = var;
                 else if (var->data.location == FRAG_RESULT_STENCIL)
@@ -329,13 +330,16 @@ midgard_nir_lower_zs_store(nir_shader *nir)
                                 if (intr->intrinsic != nir_intrinsic_store_output)
                                         continue;
 
-                                const nir_variable *var = search_var(&nir->outputs, nir_intrinsic_base(intr));
+                                const nir_variable *var = search_var(nir, nir_var_shader_out, nir_intrinsic_base(intr));
                                 assert(var);
 
                                 if (var->data.location != FRAG_RESULT_COLOR &&
                                     var->data.location < FRAG_RESULT_DATA0)
                                         continue;
 
+                                if (var->data.index)
+                                        continue;
+
                                 assert(nir_src_is_const(intr->src[1]) && "no indirect outputs");
 
                                 nir_builder b;
@@ -401,7 +405,12 @@ midgard_nir_lower_zs_store(nir_shader *nir)
 
                         combined_store->num_components = 4;
 
-                        nir_intrinsic_set_base(combined_store, 0);
+                        unsigned base;
+                        if (z_store)
+                                base = nir_intrinsic_base(z_store);
+                        else
+                                base = nir_intrinsic_base(s_store);
+                        nir_intrinsic_set_base(combined_store, base);
 
                         unsigned writeout = 0;
                         if (z_store)
@@ -439,6 +448,50 @@ midgard_nir_lower_zs_store(nir_shader *nir)
         return progress;
 }
 
+/* Real writeout stores, which break execution, need to be moved to after
+ * dual-source stores, which are just standard register writes. */
+static bool
+midgard_nir_reorder_writeout(nir_shader *nir)
+{
+        bool progress = false;
+
+        nir_foreach_function(function, nir) {
+                if (!function->impl) continue;
+
+                nir_foreach_block(block, function->impl) {
+                        nir_instr *last_writeout = NULL;
+
+                        nir_foreach_instr_reverse_safe(instr, block) {
+                                if (instr->type != nir_instr_type_intrinsic)
+                                        continue;
+
+                                nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+                                if (intr->intrinsic != nir_intrinsic_store_output)
+                                        continue;
+
+                                const nir_variable *var = search_var(nir, nir_var_shader_out, nir_intrinsic_base(intr));
+
+                                if (var->data.index) {
+                                        if (!last_writeout)
+                                                last_writeout = instr;
+                                        continue;
+                                }
+
+                                if (!last_writeout)
+                                        continue;
+
+                                /* This is a real store, so move it to after dual-source stores */
+                                exec_node_remove(&instr->node);
+                                exec_node_insert_after(&last_writeout->node, &instr->node);
+
+                                progress = true;
+                        }
+                }
+        }
+
+        return progress;
+}
+
 /* Flushes undefined values to zero */
 
 static void
@@ -653,60 +706,6 @@ nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
                op = midgard_alu_op_##_op; \
                 ALU_CHECK_CMP(sext); \
                  break;
-       
-/* Analyze the sizes of the dest and inputs to determine reg mode. */
-
-static midgard_reg_mode
-reg_mode_for_nir(nir_alu_instr *instr)
-{
-        unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
-        unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
-        unsigned max_bitsize = MAX2(src_bitsize, dst_bitsize);
-
-        /* We don't have fp16 LUTs, so we'll want to emit code like:
-         *
-         *      vlut.fsinr hr0, hr0
-         *
-         * where both input and output are 16-bit but the operation is carried
-         * out in 32-bit
-         */
-
-        switch (instr->op) {
-        case nir_op_fsqrt:
-        case nir_op_frcp:
-        case nir_op_frsq:
-        case nir_op_fsin:
-        case nir_op_fcos:
-        case nir_op_fexp2:
-        case nir_op_flog2:
-                max_bitsize = MAX2(max_bitsize, 32);
-                break;
-
-        /* These get lowered to moves */
-        case nir_op_pack_32_4x8:
-                max_bitsize = 8;
-                break;
-        case nir_op_pack_32_2x16:
-                max_bitsize = 16;
-                break;
-        default:
-                break;
-        }
-
-
-        switch (max_bitsize) {
-                /* Use 16 pipe for 8 since we don't support vec16 yet */
-        case 8:
-        case 16:
-                return midgard_reg_mode_16;
-        case 32:
-                return midgard_reg_mode_32;
-        case 64:
-                return midgard_reg_mode_64;
-        default:
-                unreachable("Invalid bit size");
-        }
-}
 
 /* Compare mir_lower_invert */
 static bool
@@ -859,10 +858,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
         unsigned broadcast_swizzle = 0;
 
-        /* What register mode should we operate in? */
-        midgard_reg_mode reg_mode =
-                reg_mode_for_nir(instr);
-
         /* Should we swap arguments? */
         bool flip_src12 = false;
 
@@ -1095,7 +1090,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
          * fsat alone.
          */
 
-        if (!is_int && !(opcode_props & OP_TYPE_CONVERT)) {
+        if (!midgard_is_integer_out_op(op)) {
                 bool fpos = mir_accept_dest_mod(ctx, &dest, nir_op_fclamp_pos);
                 bool fsat = mir_accept_dest_mod(ctx, &dest, nir_op_fsat);
                 bool ssat = mir_accept_dest_mod(ctx, &dest, nir_op_fsat_signed);
@@ -1167,8 +1162,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
         ins.mask = mask_of(nr_components);
 
         midgard_vector_alu alu = {
-                .op = op,
-                .reg_mode = reg_mode,
                 .outmod = outmod,
         };
 
@@ -1181,6 +1174,8 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
         ins.alu = alu;
 
+        ins.op = op;
+
         /* Late fixup for emulated instructions */
 
         if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
@@ -1213,7 +1208,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
                 /* Lots of instructions need a 0 plonked in */
                 ins.has_inline_constant = false;
                 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
-                ins.src_types[1] = nir_type_uint32;
+                ins.src_types[1] = ins.src_types[0];
                 ins.has_constants = true;
                 ins.constants.u32[0] = 0;
 
@@ -1582,6 +1577,22 @@ emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
         emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
 }
 
+static void
+emit_msaa_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
+{
+        unsigned reg = nir_dest_index(&instr->dest);
+
+        midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
+        ld.load_store.op = midgard_op_ld_color_buffer_32u_old;
+        ld.load_store.address = 97;
+        ld.load_store.arg_2 = 0x1E;
+
+        for (int i = 0; i < 4; ++i)
+                ld.swizzle[0][i] = COMPONENT_X;
+
+        emit_mir_instruction(ctx, ld);
+}
+
 static void
 emit_control_barrier(compiler_context *ctx)
 {
@@ -1614,6 +1625,32 @@ mir_get_branch_cond(nir_src *src, bool *invert)
         return nir_src_index(NULL, &alu.src);
 }
 
+static uint8_t
+output_load_rt_addr(compiler_context *ctx, nir_intrinsic_instr *instr)
+{
+        if (ctx->is_blend)
+                return ctx->blend_rt;
+
+        const nir_variable *var;
+        var = search_var(ctx->nir, nir_var_shader_out, nir_intrinsic_base(instr));
+        assert(var);
+
+        unsigned loc = var->data.location;
+
+        if (loc == FRAG_RESULT_COLOR)
+                loc = FRAG_RESULT_DATA0;
+
+        if (loc >= FRAG_RESULT_DATA0)
+                return loc - FRAG_RESULT_DATA0;
+
+        if (loc == FRAG_RESULT_DEPTH)
+                return 0x1F;
+        if (loc == FRAG_RESULT_STENCIL)
+                return 0x1E;
+
+        unreachable("Invalid RT to load from");
+}
+
 static void
 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 {
@@ -1694,13 +1731,15 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
                         emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
                 } else if (ctx->is_blend) {
-                        /* ctx->blend_input will be precoloured to r0, where
+                        /* ctx->blend_input will be precoloured to r0/r2, where
                          * the input is preloaded */
 
-                        if (ctx->blend_input == ~0)
-                                ctx->blend_input = reg;
+                        unsigned *input = offset ? &ctx->blend_src1 : &ctx->blend_input;
+
+                        if (*input == ~0)
+                                *input = reg;
                         else
-                                emit_mir_instruction(ctx, v_mov(ctx->blend_input, reg));
+                                emit_mir_instruction(ctx, v_mov(*input, reg));
                 } else if (ctx->stage == MESA_SHADER_VERTEX) {
                         emit_attr_read(ctx, reg, offset, nr_comp, t);
                 } else {
@@ -1726,6 +1765,16 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
 
                 midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
 
+                ld.load_store.arg_2 = output_load_rt_addr(ctx, instr);
+
+                if (nir_src_is_const(instr->src[0])) {
+                        ld.load_store.arg_1 = nir_src_as_uint(instr->src[0]);
+                } else {
+                        ld.load_store.varying_parameters = 2;
+                        ld.src[1] = nir_src_index(ctx, &instr->src[0]);
+                        ld.src_types[1] = nir_type_int32;
+                }
+
                 if (ctx->quirks & MIDGARD_OLD_BLEND) {
                         ld.load_store.op = midgard_op_ld_color_buffer_32u_old;
                         ld.load_store.address = 16;
@@ -1739,13 +1788,24 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
         case nir_intrinsic_load_output: {
                 reg = nir_dest_index(&instr->dest);
 
-                midgard_instruction ld = m_ld_color_buffer_as_fp16(reg, 0);
+                unsigned bits = nir_dest_bit_size(instr->dest);
+
+                midgard_instruction ld;
+                if (bits == 16)
+                        ld = m_ld_color_buffer_as_fp16(reg, 0);
+                else
+                        ld = m_ld_color_buffer_as_fp32(reg, 0);
+
+                ld.load_store.arg_2 = output_load_rt_addr(ctx, instr);
 
                 for (unsigned c = 4; c < 16; ++c)
                         ld.swizzle[0][c] = 0;
 
                 if (ctx->quirks & MIDGARD_OLD_BLEND) {
-                        ld.load_store.op = midgard_op_ld_color_buffer_as_fp16_old;
+                        if (bits == 16)
+                                ld.load_store.op = midgard_op_ld_color_buffer_as_fp16_old;
+                        else
+                                ld.load_store.op = midgard_op_ld_color_buffer_as_fp32_old;
                         ld.load_store.address = 1;
                         ld.load_store.arg_2 = 0x1E;
                 }
@@ -1781,11 +1841,30 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                                 nir_intrinsic_store_combined_output_pan;
 
                         const nir_variable *var;
-                        enum midgard_rt_id rt;
-
-                        var = search_var(&ctx->nir->outputs,
+                        var = search_var(ctx->nir, nir_var_shader_out,
                                          nir_intrinsic_base(instr));
                         assert(var);
+
+                        /* Dual-source blend writeout is done by leaving the
+                         * value in r2 for the blend shader to use. */
+                        if (var->data.index) {
+                                if (instr->src[0].is_ssa) {
+                                        emit_explicit_constant(ctx, reg, reg);
+
+                                        unsigned out = make_compiler_temp(ctx);
+
+                                        midgard_instruction ins = v_mov(reg, out);
+                                        emit_mir_instruction(ctx, ins);
+
+                                        ctx->blend_src1 = out;
+                                } else {
+                                        ctx->blend_src1 = reg;
+                                }
+
+                                break;
+                        }
+
+                        enum midgard_rt_id rt;
                         if (var->data.location == FRAG_RESULT_COLOR)
                                 rt = MIDGARD_COLOR_RT0;
                         else if (var->data.location >= FRAG_RESULT_DATA0)
@@ -1905,6 +1984,10 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                 emit_vertex_builtin(ctx, instr);
                 break;
 
+        case nir_intrinsic_load_sample_id:
+                emit_msaa_builtin(ctx, instr);
+                break;
+
         case nir_intrinsic_memory_barrier_buffer:
         case nir_intrinsic_memory_barrier_shared:
                 break;
@@ -2301,6 +2384,61 @@ inline_alu_constants(compiler_context *ctx, midgard_block *block)
         }
 }
 
+unsigned
+max_bitsize_for_alu(midgard_instruction *ins)
+{
+        unsigned max_bitsize = 0;
+        for (int i = 0; i < MIR_SRC_COUNT; i++) {
+                if (ins->src[i] == ~0) continue;
+                unsigned src_bitsize = nir_alu_type_get_type_size(ins->src_types[i]);
+                max_bitsize = MAX2(src_bitsize, max_bitsize);
+        }
+        unsigned dst_bitsize = nir_alu_type_get_type_size(ins->dest_type);
+        max_bitsize = MAX2(dst_bitsize, max_bitsize);
+
+        /* We don't have fp16 LUTs, so we'll want to emit code like:
+         *
+         *      vlut.fsinr hr0, hr0
+         *
+         * where both input and output are 16-bit but the operation is carried
+         * out in 32-bit
+         */
+
+        switch (ins->op) {
+        case midgard_alu_op_fsqrt:
+        case midgard_alu_op_frcp:
+        case midgard_alu_op_frsqrt:
+        case midgard_alu_op_fsin:
+        case midgard_alu_op_fcos:
+        case midgard_alu_op_fexp2:
+        case midgard_alu_op_flog2:
+                max_bitsize = MAX2(max_bitsize, 32);
+                break;
+
+        default:
+                break;
+        }
+
+        return max_bitsize;
+}
+
+midgard_reg_mode
+reg_mode_for_bitsize(unsigned bitsize)
+{
+        switch (bitsize) {
+                /* use 16 pipe for 8 since we don't support vec16 yet */
+        case 8:
+        case 16:
+                return midgard_reg_mode_16;
+        case 32:
+                return midgard_reg_mode_32;
+        case 64:
+                return midgard_reg_mode_64;
+        default:
+                unreachable("invalid bit size");
+        }
+}
+
 /* Midgard supports two types of constants, embedded constants (128-bit) and
  * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
  * constants can be demoted to inline constants, for space savings and
@@ -2316,9 +2454,11 @@ embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
                 /* Blend constants must not be inlined by definition */
                 if (ins->has_blend_constant) continue;
 
+                unsigned max_bitsize = max_bitsize_for_alu(ins);
+
                 /* We can inline 32-bit (sometimes) or 16-bit (usually) */
-                bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
-                bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
+                bool is_16 = max_bitsize == 16;
+                bool is_32 = max_bitsize == 32;
 
                 if (!(is_16 || is_32))
                         continue;
@@ -2327,7 +2467,7 @@ embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
                  * restrictions. So, if possible we try to flip the arguments
                  * in that case */
 
-                int op = ins->alu.op;
+                int op = ins->op;
 
                 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
                                 alu_opcode_props[op].props & OP_COMMUTES) {
@@ -2379,7 +2519,7 @@ embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
                         uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
 
                         bool is_vector = false;
-                        unsigned mask = effective_writemask(&ins->alu, ins->mask);
+                        unsigned mask = effective_writemask(ins->op, ins->mask);
 
                         for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
                                 /* We only care if this component is actually used */
@@ -2439,8 +2579,8 @@ midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
         mir_foreach_instr_in_block(block, ins) {
                 if (ins->type != TAG_ALU_4) continue;
 
-                if (ins->alu.op != midgard_alu_op_iand &&
-                    ins->alu.op != midgard_alu_op_ior) continue;
+                if (ins->op != midgard_alu_op_iand &&
+                    ins->op != midgard_alu_op_ior) continue;
 
                 if (ins->src_invert[1] || !ins->src_invert[0]) continue;
 
@@ -2717,7 +2857,7 @@ mir_add_writeout_loops(compiler_context *ctx)
 }
 
 int
-midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id, bool shaderdb)
+midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id, bool shaderdb, bool silent)
 {
         struct util_dynarray *compiled = &program->compiled;
 
@@ -2732,6 +2872,7 @@ midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_b
         ctx->alpha_ref = program->alpha_ref;
         ctx->blend_rt = MIDGARD_COLOR_RT0 + blend_rt;
         ctx->blend_input = ~0;
+        ctx->blend_src1 = ~0;
         ctx->quirks = midgard_get_quirks(gpu_id);
 
         /* Start off with a safe cutoff, allowing usage of all 16 work
@@ -2776,7 +2917,9 @@ midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_b
 
         optimise_nir(nir, ctx->quirks, is_blend);
 
-        if (midgard_debug & MIDGARD_DBG_SHADERS) {
+        NIR_PASS_V(nir, midgard_nir_reorder_writeout);
+
+        if ((midgard_debug & MIDGARD_DBG_SHADERS) && !silent) {
                 nir_print_shader(nir, stdout);
         }
 
@@ -3028,10 +3171,10 @@ midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_b
         program->blend_patch_offset = ctx->blend_constant_offset;
         program->tls_size = ctx->tls_size;
 
-        if (midgard_debug & MIDGARD_DBG_SHADERS)
+        if ((midgard_debug & MIDGARD_DBG_SHADERS) && !silent)
                 disassemble_midgard(stdout, program->compiled.data, program->compiled.size, gpu_id, ctx->stage);
 
-        if (midgard_debug & MIDGARD_DBG_SHADERDB || shaderdb) {
+        if ((midgard_debug & MIDGARD_DBG_SHADERDB || shaderdb) && !silent) {
                 unsigned nr_bundles = 0, nr_ins = 0;
 
                 /* Count instructions and bundles */