panfrost/midgard: Include loop count for shader-db
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
index 21197efa499ff844202bcd8e9a365809463d39c5..1e09e46218c5100252e7e1017f57e3c08a6b6b1f 100644 (file)
 static const struct debug_named_value debug_options[] = {
        {"msgs",      MIDGARD_DBG_MSGS,         "Print debug messages"},
        {"shaders",   MIDGARD_DBG_SHADERS,      "Dump shaders in NIR and MIR"},
+        {"shaderdb",  MIDGARD_DBG_SHADERDB,     "Prints shader-db statistics"},
        DEBUG_NAMED_VALUE_END
 };
 
 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
 
+unsigned SHADER_DB_COUNT = 0;
+
 int midgard_debug = 0;
 
 #define DBG(fmt, ...) \
@@ -88,6 +91,7 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
        static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
                midgard_instruction i = { \
                        .type = TAG_LOAD_STORE_4, \
+                        .mask = 0xF, \
                        .ssa_args = { \
                                .rname = ssa, \
                                .uname = -1, \
@@ -95,7 +99,6 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor)
                        }, \
                        .load_store = { \
                                .op = midgard_op_##name, \
-                               .mask = 0xF, \
                                .swizzle = SWIZZLE_XYZW, \
                                .address = address \
                        } \
@@ -404,6 +407,8 @@ midgard_nir_lower_fdot2(nir_shader *shader)
         return progress;
 }
 
+/* Flushes undefined values to zero */
+
 static void
 optimise_nir(nir_shader *nir)
 {
@@ -464,6 +469,8 @@ optimise_nir(nir_shader *nir)
                 }
 
                 NIR_PASS(progress, nir, nir_opt_undef);
+                NIR_PASS(progress, nir, nir_undef_to_zero);
+
                 NIR_PASS(progress, nir, nir_opt_loop_unroll,
                          nir_var_shader_in |
                          nir_var_shader_out |
@@ -596,6 +603,7 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
                 /* We need to set the conditional as close as possible */
                 .precede_break = true,
                 .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+                .mask = 1 << COMPONENT_W,
 
                 .ssa_args = {
                         .src0 = condition,
@@ -608,7 +616,6 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co
                         .outmod = midgard_outmod_int_wrap,
                         .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
-                        .mask = (0x3 << 6), /* w */
                         .src1 = vector_alu_srco_unsigned(alu_src),
                         .src2 = vector_alu_srco_unsigned(alu_src)
                 },
@@ -637,6 +644,7 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
         midgard_instruction ins = {
                 .type = TAG_ALU_4,
                 .precede_break = true,
+                .mask = mask_of(nr_comp),
                 .ssa_args = {
                         .src0 = condition,
                         .src1 = condition,
@@ -647,7 +655,6 @@ emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
                         .outmod = midgard_outmod_int_wrap,
                         .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
-                        .mask = expand_writemask(mask_of(nr_comp)),
                         .src1 = vector_alu_srco_unsigned(alu_src),
                         .src2 = vector_alu_srco_unsigned(alu_src)
                 },
@@ -668,6 +675,7 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
 
         midgard_instruction ins = {
                 .type = TAG_ALU_4,
+                .mask = 1 << COMPONENT_W,
                 .ssa_args = {
                         .src0 = SSA_UNUSED_1,
                         .src1 = offset,
@@ -678,7 +686,6 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src)
                         .outmod = midgard_outmod_int_wrap,
                         .reg_mode = midgard_reg_mode_32,
                         .dest_override = midgard_dest_override_none,
-                        .mask = (0x3 << 6), /* w */
                         .src1 = vector_alu_srco_unsigned(zero_alu_src),
                         .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
                 },
@@ -1062,15 +1069,14 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
         bool is_int = midgard_is_integer_op(op);
 
+        ins.mask = mask_of(nr_components);
+
         midgard_vector_alu alu = {
                 .op = op,
                 .reg_mode = reg_mode,
                 .dest_override = dest_override,
                 .outmod = outmod,
 
-                /* Writemask only valid for non-SSA NIR */
-                .mask = expand_writemask(mask_of(nr_components)),
-
                 .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
                 .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
         };
@@ -1078,7 +1084,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
         /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
 
         if (!is_ssa)
-                alu.mask &= expand_writemask(instr->dest.write_mask);
+                ins.mask &= instr->dest.write_mask;
 
         ins.alu = alu;
 
@@ -1123,15 +1129,16 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
 
                 uint8_t original_swizzle[4];
                 memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
+                unsigned orig_mask = ins.mask;
 
                 for (int i = 0; i < nr_components; ++i) {
                         /* Mask the associated component, dropping the
                          * instruction if needed */
 
-                        ins.alu.mask = (0x3) << (2 * i);
-                        ins.alu.mask &= alu.mask;
+                        ins.mask = 1 << i;
+                        ins.mask &= orig_mask;
 
-                        if (!ins.alu.mask)
+                        if (!ins.mask)
                                 continue;
 
                         for (int j = 0; j < 4; ++j)
@@ -1203,7 +1210,7 @@ emit_varying_read(
         /* TODO: swizzle, mask */
 
         midgard_instruction ins = m_ld_vary_32(dest, offset);
-        ins.load_store.mask = mask_of(nr_comp);
+        ins.mask = mask_of(nr_comp);
         ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
 
         midgard_varying_parameter p = {
@@ -1342,7 +1349,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
                 }  else if (ctx->stage == MESA_SHADER_VERTEX) {
                         midgard_instruction ins = m_ld_attr_32(reg, offset);
                         ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
-                        ins.load_store.mask = mask_of(nr_comp);
+                        ins.mask = mask_of(nr_comp);
 
                         /* Use the type appropriate load */
                         switch (t) {
@@ -1574,6 +1581,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
         /* No helper to build texture words -- we do it all here */
         midgard_instruction ins = {
                 .type = TAG_TEXTURE_4,
+                .mask = 0xF,
                 .texture = {
                         .op = midgard_texop,
                         .format = midgard_tex_format(instr->sampler_dim),
@@ -1582,7 +1590,6 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
 
                         /* TODO: Regalloc it in */
                         .swizzle = SWIZZLE_XYZW,
-                        .mask = 0xF,
 
                         /* TODO: half */
                         .in_reg_full = 1,
@@ -1616,7 +1623,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
 
                                 midgard_instruction st = m_st_cubemap_coords(reg, 0);
                                 st.load_store.unknown = 0x24; /* XXX: What is this? */
-                                st.load_store.mask = 0x3; /* xy */
+                                st.mask = 0x3; /* xy */
                                 st.load_store.swizzle = alu_src.swizzle;
                                 emit_mir_instruction(ctx, st);
 
@@ -1625,7 +1632,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
                                 ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
 
                                 midgard_instruction mov = v_mov(index, alu_src, reg);
-                                mov.alu.mask = expand_writemask(mask_of(nr_comp));
+                                mov.mask = mask_of(nr_comp);
                                 emit_mir_instruction(ctx, mov);
 
                                 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
@@ -1642,7 +1649,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
                                         zero.ssa_args.inline_constant = true;
                                         zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
                                         zero.has_constants = true;
-                                        zero.alu.mask = ~mov.alu.mask;
+                                        zero.mask = ~mov.mask;
                                         emit_mir_instruction(ctx, zero);
 
                                         ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
@@ -1673,7 +1680,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
                         alu_src.swizzle = SWIZZLE_XXXX;
 
                         midgard_instruction mov = v_mov(index, alu_src, reg);
-                        mov.alu.mask = expand_writemask(1 << COMPONENT_W);
+                        mov.mask = 1 << COMPONENT_W;
                         emit_mir_instruction(ctx, mov);
 
                         ins.texture.lod_register = true;
@@ -1705,12 +1712,7 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
 
         emit_mir_instruction(ctx, ins);
 
-        /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
-
         int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
-        alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
-        ctx->texture_index[reg] = o_index;
-
         midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
         emit_mir_instruction(ctx, ins2);
 
@@ -1880,6 +1882,13 @@ embedded_to_inline_constant(compiler_context *ctx)
                 /* Blend constants must not be inlined by definition */
                 if (ins->has_blend_constant) continue;
 
+                /* We can inline 32-bit (sometimes) or 16-bit (usually) */
+                bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
+                bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
+
+                if (!(is_16 || is_32))
+                        continue;
+
                 /* src1 cannot be an inline constant due to encoding
                  * restrictions. So, if possible we try to flip the arguments
                  * in that case */
@@ -1930,7 +1939,7 @@ embedded_to_inline_constant(compiler_context *ctx)
                         /* Scale constant appropriately, if we can legally */
                         uint16_t scaled_constant = 0;
 
-                        if (midgard_is_integer_op(op)) {
+                        if (midgard_is_integer_op(op) || is_16) {
                                 unsigned int *iconstants = (unsigned int *) ins->constants;
                                 scaled_constant = (uint16_t) iconstants[component];
 
@@ -1969,7 +1978,7 @@ embedded_to_inline_constant(compiler_context *ctx)
                         uint32_t value = cons[component];
 
                         bool is_vector = false;
-                        unsigned mask = effective_writemask(&ins->alu);
+                        unsigned mask = effective_writemask(&ins->alu, ins->mask);
 
                         for (int c = 1; c < 4; ++c) {
                                 /* We only care if this component is actually used */
@@ -2097,13 +2106,12 @@ mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
 static bool
 mir_nontrivial_source2_mod(midgard_instruction *ins)
 {
-        unsigned mask = squeeze_writemask(ins->alu.mask);
         bool is_int = midgard_is_integer_op(ins->alu.op);
 
         midgard_vector_alu_src src2 =
                 vector_alu_from_unsigned(ins->alu.src2);
 
-        return mir_nontrivial_mod(src2, is_int, mask);
+        return mir_nontrivial_mod(src2, is_int, ins->mask);
 }
 
 static bool
@@ -2219,55 +2227,6 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
         return progress;
 }
 
-static bool
-midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
-{
-        bool progress = false;
-
-        mir_foreach_instr_in_block_safe(block, ins) {
-                if (ins->type != TAG_ALU_4) continue;
-                if (!OP_IS_MOVE(ins->alu.op)) continue;
-
-                unsigned from = ins->ssa_args.src1;
-                unsigned to = ins->ssa_args.dest;
-
-                /* Make sure it's simple enough for us to handle */
-
-                if (from >= SSA_FIXED_MINIMUM) continue;
-                if (from >= ctx->func->impl->ssa_alloc) continue;
-                if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
-                if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
-
-                bool eliminated = false;
-
-                mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
-                        /* The texture registers are not SSA so be careful.
-                         * Conservatively, just stop if we hit a texture op
-                         * (even if it may not write) to where we are */
-
-                        if (v->type != TAG_ALU_4)
-                                break;
-
-                        if (v->ssa_args.dest == from) {
-                                /* We don't want to track partial writes ... */
-                                if (v->alu.mask == 0xF) {
-                                        v->ssa_args.dest = to;
-                                        eliminated = true;
-                                }
-
-                                break;
-                        }
-                }
-
-                if (eliminated)
-                        mir_remove_instruction(ins);
-
-                progress |= eliminated;
-        }
-
-        return progress;
-}
-
 /* The following passes reorder MIR instructions to enable better scheduling */
 
 static void
@@ -2520,6 +2479,9 @@ emit_loop(struct compiler_context *ctx, nir_loop *nloop)
         /* Now that we've finished emitting the loop, free up the depth again
          * so we play nice with recursion amid nested loops */
         --ctx->current_loop_depth;
+
+        /* Dump loop stats */
+        ++ctx->loop_count;
 }
 
 static midgard_block *
@@ -2594,7 +2556,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
                 .stage = nir->info.stage,
 
                 .is_blend = is_blend,
-                .blend_constant_offset = -1,
+                .blend_constant_offset = 0,
 
                 .alpha_ref = program->alpha_ref
         };
@@ -2693,7 +2655,6 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
                 mir_foreach_block(ctx, block) {
                         progress |= midgard_opt_pos_propagate(ctx, block);
                         progress |= midgard_opt_copy_prop(ctx, block);
-                        progress |= midgard_opt_copy_prop_tex(ctx, block);
                         progress |= midgard_opt_dead_code_eliminate(ctx, block);
                 }
         } while (progress);
@@ -2897,5 +2858,42 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
        if (midgard_debug & MIDGARD_DBG_SHADERS)
                disassemble_midgard(program->compiled.data, program->compiled.size);
 
+        if (midgard_debug & MIDGARD_DBG_SHADERDB) {
+                unsigned nr_bundles = 0, nr_ins = 0;
+
+                /* Count instructions and bundles */
+
+                mir_foreach_instr_global(ctx, ins) {
+                        nr_ins++;
+                }
+
+                mir_foreach_block(ctx, block) {
+                        nr_bundles += util_dynarray_num_elements(
+                                        &block->bundles, midgard_bundle);
+                }
+
+                /* Calculate thread count. There are certain cutoffs by
+                 * register count for thread count */
+
+                unsigned nr_registers = program->work_register_count;
+
+                unsigned nr_threads =
+                        (nr_registers <= 4) ? 4 :
+                        (nr_registers <= 8) ? 2 :
+                                              1;
+
+                /* Dump stats */
+
+                fprintf(stderr, "shader%d - %s shader: "
+                                "%u inst, %u bundles, "
+                                "%u registers, %u threads, %u loops\n",
+                                SHADER_DB_COUNT++,
+                                gl_shader_stage_name(ctx->stage),
+                                nr_ins, nr_bundles,
+                                nr_registers, nr_threads,
+                                ctx->loop_count);
+        }
+
+
         return 0;
 }