pan/bi: Add a bunch of ALU ops
[mesa.git] / src / panfrost / bifrost / bifrost_compile.c
index a22d6420117185767af88c187245d09e824c50bc..b02089e69861c30b694b53436fe85f9268e2c376 100644 (file)
@@ -60,31 +60,49 @@ emit_jump(bi_context *ctx, nir_jump_instr *instr)
         bi_block_add_successor(ctx->current_block, branch->branch.target);
 }
 
-static void
-bi_emit_ld_vary(bi_context *ctx, nir_intrinsic_instr *instr)
+/* Gets a bytemask for a complete vecN write */
+static unsigned
+bi_mask_for_channels_32(unsigned i)
 {
-        bi_instruction ins = {
-                .type = BI_LOAD_VAR,
-                .load_vary = {
-                        .load = {
-                                .location = nir_intrinsic_base(instr),
-                                .channels = instr->num_components,
-                        },
-                        .interp_mode = BIFROST_INTERP_DEFAULT, /* TODO */
-                        .reuse = false, /* TODO */
-                        .flat = instr->intrinsic != nir_intrinsic_load_interpolated_input
-                },
-                .dest = bir_dest_index(&instr->dest),
-                .dest_type = nir_type_float | nir_dest_bit_size(instr->dest),
+        return (1 << (4 * i)) - 1;
+}
+
+static bi_instruction
+bi_load(enum bi_class T, nir_intrinsic_instr *instr)
+{
+        bi_instruction load = {
+                .type = T,
+                .writemask = bi_mask_for_channels_32(instr->num_components),
+                .src = { BIR_INDEX_CONSTANT },
+                .constant = { .u64 = nir_intrinsic_base(instr) },
         };
 
+        const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
+
+        if (info->has_dest)
+                load.dest = bir_dest_index(&instr->dest);
+
+        if (info->has_dest && info->index_map[NIR_INTRINSIC_TYPE] > 0)
+                load.dest_type = nir_intrinsic_type(instr);
+
         nir_src *offset = nir_get_io_offset_src(instr);
 
         if (nir_src_is_const(*offset))
-                ins.load_vary.load.location += nir_src_as_uint(*offset);
+                load.constant.u64 += nir_src_as_uint(*offset);
         else
-                ins.src[0] = bir_src_index(offset);
+                load.src[0] = bir_src_index(offset);
+
+        return load;
+}
 
+static void
+bi_emit_ld_vary(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+        bi_instruction ins = bi_load(BI_LOAD_VAR, instr);
+        ins.load_vary.interp_mode = BIFROST_INTERP_DEFAULT; /* TODO */
+        ins.load_vary.reuse = false; /* TODO */
+        ins.load_vary.flat = instr->intrinsic != nir_intrinsic_load_interpolated_input;
+        ins.dest_type = nir_type_float | nir_dest_bit_size(instr->dest),
         bi_emit(ctx, ins);
 }
 
@@ -106,6 +124,9 @@ bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
                 .blend_location = nir_intrinsic_base(instr),
                 .src = {
                         bir_src_index(&instr->src[0])
+                },
+                .swizzle = {
+                        { 0, 1, 2, 3 }
                 }
         };
 
@@ -113,31 +134,35 @@ bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
         bi_schedule_barrier(ctx);
 }
 
-static struct bi_load
-bi_direct_load_for_instr(nir_intrinsic_instr *instr)
+static void
+bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
 {
-        nir_src *offset = nir_get_io_offset_src(instr);
-        assert(nir_src_is_const(*offset)); /* no indirects */
+        bi_instruction address = bi_load(BI_LOAD_VAR_ADDRESS, instr);
+        address.dest = bi_make_temp(ctx);
+        address.dest_type = nir_type_uint64;
+        address.writemask = (1 << 8) - 1;
 
-        struct bi_load load = {
-                .location = nir_intrinsic_base(instr) + nir_src_as_uint(*offset),
-                .channels = instr->num_components
+        bi_instruction st = {
+                .type = BI_STORE_VAR,
+                .src = {
+                        address.dest,
+                        bir_src_index(&instr->src[0])
+                },
+                .swizzle = {
+                        { 0, 1, 2, 3 }
+                }
         };
 
-        return load;
+        bi_emit(ctx, address);
+        bi_emit(ctx, st);
 }
 
 static void
-bi_emit_ld_attr(bi_context *ctx, nir_intrinsic_instr *instr)
+bi_emit_ld_uniform(bi_context *ctx, nir_intrinsic_instr *instr)
 {
-        bi_instruction load = {
-                .type = BI_LOAD_ATTR,
-                .load = bi_direct_load_for_instr(instr),
-                .dest = bir_dest_index(&instr->dest),
-                .dest_type = nir_intrinsic_type(instr)
-        };
-
-        bi_emit(ctx, load);
+        bi_instruction ld = bi_load(BI_LOAD_UNIFORM, instr);
+        ld.src[1] = BIR_INDEX_ZERO; /* TODO: UBO index */
+        bi_emit(ctx, ld);
 }
 
 static void
@@ -153,7 +178,7 @@ emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
                 if (ctx->stage == MESA_SHADER_FRAGMENT)
                         bi_emit_ld_vary(ctx, instr);
                 else if (ctx->stage == MESA_SHADER_VERTEX)
-                        bi_emit_ld_attr(ctx, instr);
+                        bi_emit(ctx, bi_load(BI_LOAD_ATTR, instr));
                 else {
                         unreachable("Unsupported shader stage");
                 }
@@ -162,35 +187,192 @@ emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
         case nir_intrinsic_store_output:
                 if (ctx->stage == MESA_SHADER_FRAGMENT)
                         bi_emit_frag_out(ctx, instr);
-                else {
-                        /* TODO */
-                }
+                else if (ctx->stage == MESA_SHADER_VERTEX)
+                        bi_emit_st_vary(ctx, instr);
+                else
+                        unreachable("Unsupported shader stage");
+                break;
+
+        case nir_intrinsic_load_uniform:
+                bi_emit_ld_uniform(ctx, instr);
                 break;
+
         default:
                 /* todo */
                 break;
         }
 }
 
+static void
+emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
+{
+        /* Make sure we've been lowered */
+        assert(instr->def.num_components == 1);
+
+        bi_instruction move = {
+                .type = BI_MOV,
+                .dest = bir_ssa_index(&instr->def),
+                .dest_type = instr->def.bit_size | nir_type_uint,
+                .writemask = (1 << (instr->def.bit_size / 8)) - 1,
+                .src = {
+                        BIR_INDEX_CONSTANT
+                },
+                .constant = {
+                        .u64 = nir_const_value_as_uint(instr->value[0], instr->def.bit_size)
+                }
+        };
+
+        bi_emit(ctx, move);
+}
+
+static enum bi_class
+bi_class_for_nir_alu(nir_op op)
+{
+        switch (op) {
+        case nir_op_iadd:
+        case nir_op_fadd:
+                return BI_ADD;
+
+        case nir_op_i2i8:
+        case nir_op_i2i16:
+        case nir_op_i2i32:
+        case nir_op_i2i64:
+        case nir_op_u2u8:
+        case nir_op_u2u16:
+        case nir_op_u2u32:
+        case nir_op_u2u64:
+        case nir_op_f2i16:
+        case nir_op_f2i32:
+        case nir_op_f2i64:
+        case nir_op_f2u16:
+        case nir_op_f2u32:
+        case nir_op_f2u64:
+        case nir_op_i2f16:
+        case nir_op_i2f32:
+        case nir_op_i2f64:
+        case nir_op_u2f16:
+        case nir_op_u2f32:
+        case nir_op_u2f64:
+                return BI_CONVERT;
+
+        case nir_op_fmul:
+                return BI_FMA;
+
+        case nir_op_imin:
+        case nir_op_imax:
+        case nir_op_umin:
+        case nir_op_umax:
+        case nir_op_fmin:
+        case nir_op_fmax:
+                return BI_MINMAX;
+
+        case nir_op_fsat:
+        case nir_op_mov:
+                return BI_MOV;
+
+        default:
+                unreachable("Unknown ALU op");
+        }
+}
+
+static void
+emit_alu(bi_context *ctx, nir_alu_instr *instr)
+{
+        /* Assume it's something we can handle normally */
+        bi_instruction alu = {
+                .type = bi_class_for_nir_alu(instr->op),
+                .dest = bir_dest_index(&instr->dest.dest),
+                .dest_type = nir_op_infos[instr->op].output_type
+                        | nir_dest_bit_size(instr->dest.dest),
+        };
+
+        if (instr->dest.dest.is_ssa) {
+                /* Construct a writemask */
+                unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
+                unsigned comps = instr->dest.dest.ssa.num_components;
+                assert(comps == 1);
+                unsigned bits = bits_per_comp * comps;
+                unsigned bytes = MAX2(bits / 8, 1);
+                alu.writemask = (1 << bytes) - 1;
+        } else {
+                unsigned comp_mask = instr->dest.write_mask;
+
+                alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
+                                comp_mask);
+        }
+
+        /* We inline constants as we go. This tracks how many constants have
+         * been inlined, since we're limited to 64-bits of constants per
+         * instruction */
+
+        unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
+        unsigned constants_left = (64 / dest_bits);
+        unsigned constant_shift = 0;
+
+        /* Copy sources */
+
+        unsigned num_inputs = nir_op_infos[instr->op].num_inputs;
+        assert(num_inputs <= ARRAY_SIZE(alu.src));
+
+        for (unsigned i = 0; i < num_inputs; ++i) {
+                unsigned bits = nir_src_bit_size(instr->src[i].src);
+                alu.src_types[i] = nir_op_infos[instr->op].input_types[i]
+                        | bits;
+
+                /* Try to inline a constant */
+                if (nir_src_is_const(instr->src[i].src) && constants_left && (dest_bits == bits)) {
+                        alu.constant.u64 |=
+                                (nir_src_as_uint(instr->src[i].src)) << constant_shift;
+
+                        alu.src[i] = BIR_INDEX_CONSTANT | constant_shift;
+                        --constants_left;
+                        constant_shift += dest_bits;
+                        continue;
+                }
+
+                alu.src[i] = bir_src_index(&instr->src[i].src);
+
+                /* We assert scalarization above */
+                alu.swizzle[i][0] = instr->src[i].swizzle[0];
+        }
+
+        /* Op-specific fixup */
+        switch (instr->op) {
+        case nir_op_fmul:
+                alu.src[2] = BIR_INDEX_ZERO; /* FMA */
+                break;
+        case nir_op_fsat:
+                alu.outmod = BIFROST_SAT; /* MOV */
+                break;
+        case nir_op_fmax:
+        case nir_op_imax:
+        case nir_op_umax:
+                alu.op.minmax = BI_MINMAX_MAX; /* MINMAX */
+                break;
+        default:
+                break;
+        }
+
+        bi_emit(ctx, alu);
+}
+
 static void
 emit_instr(bi_context *ctx, struct nir_instr *instr)
 {
         switch (instr->type) {
-#if 0
         case nir_instr_type_load_const:
                 emit_load_const(ctx, nir_instr_as_load_const(instr));
                 break;
-#endif
 
         case nir_instr_type_intrinsic:
                 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
                 break;
 
-#if 0
         case nir_instr_type_alu:
                 emit_alu(ctx, nir_instr_as_alu(instr));
                 break;
 
+#if 0
         case nir_instr_type_tex:
                 emit_tex(ctx, nir_instr_as_tex(instr));
                 break;
@@ -445,6 +627,8 @@ bi_optimize_nir(nir_shader *nir)
         };
 
         NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+        NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
+        NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
 
         do {
                 progress = false;
@@ -489,10 +673,20 @@ bi_optimize_nir(nir_shader *nir)
         } while (progress);
 
         NIR_PASS(progress, nir, nir_opt_algebraic_late);
+        NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
+        NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
 
         /* Take us out of SSA */
         NIR_PASS(progress, nir, nir_lower_locals_to_regs);
         NIR_PASS(progress, nir, nir_convert_from_ssa, true);
+
+        /* We're a primary scalar architecture but there's enough vector that
+         * we use a vector IR so let's not also deal with scalar hacks on top
+         * of the vector hacks */
+
+        NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
+        NIR_PASS(progress, nir, nir_lower_vec_to_movs);
+        NIR_PASS(progress, nir, nir_opt_dce);
 }
 
 void
@@ -522,10 +716,6 @@ bifrost_compile_shader_nir(nir_shader *nir, bifrost_program *program, unsigned p
         NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
         NIR_PASS_V(nir, nir_lower_ssbo);
 
-        /* We have to lower ALU to scalar ourselves since viewport
-         * transformations produce vector ops */
-        NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
-
         bi_optimize_nir(nir);
         nir_print_shader(nir, stdout);
 
@@ -539,6 +729,7 @@ bifrost_compile_shader_nir(nir_shader *nir, bifrost_program *program, unsigned p
         }
 
         bi_print_shader(ctx, stdout);
+        bi_schedule(ctx);
 
         ralloc_free(ctx);
 }