util: Add a mapping from VkFormat to PIPE_FORMAT.
[mesa.git] / src / panfrost / midgard / midgard_emit.c
index 1996e1b1ef6e555c2c731a7816bf9ca375ed16cd..63dcb41ad4141ca94b899dbb2fcf89fbb345e5e2 100644 (file)
@@ -42,16 +42,13 @@ component_from_mask(unsigned mask)
 
 static unsigned
 vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
-                unsigned masked_component)
+                unsigned component)
 {
         midgard_vector_alu_src v;
         memcpy(&v, &u, sizeof(v));
 
         /* TODO: Integers */
 
-        unsigned component = (v.swizzle >> (2*masked_component)) & 3;
-        bool upper = false; /* TODO */
-
         midgard_scalar_alu_src s = { 0 };
 
         if (is_full) {
@@ -70,7 +67,7 @@ vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
         if (s.full)
                 s.component = component << 1;
         else
-                s.component = component + (upper << 2);
+                s.component = component;
 
         if (is_int) {
                 /* TODO */
@@ -90,15 +87,15 @@ vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
 {
         bool is_int = midgard_is_integer_op(v.op);
         bool is_full = v.reg_mode == midgard_reg_mode_32;
-        bool is_inline_constant = ins->ssa_args.inline_constant;
+        bool is_inline_constant = ins->has_inline_constant;
 
         unsigned comp = component_from_mask(ins->mask);
 
         /* The output component is from the mask */
         midgard_scalar_alu s = {
                 .op = v.op,
-                .src1 = vector_to_scalar_source(v.src1, is_int, is_full, comp),
-                .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, comp) : 0,
+                .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
+                .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
                 .unknown = 0,
                 .outmod = v.outmod,
                 .output_full = is_full,
@@ -114,7 +111,7 @@ vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
         /* Inline constant is passed along rather than trying to extract it
          * from v */
 
-        if (ins->ssa_args.inline_constant) {
+        if (ins->has_inline_constant) {
                 uint16_t imm = 0;
                 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
                 imm |= (lower_11 >> 9) & 3;
@@ -128,6 +125,168 @@ vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
         return s;
 }
 
+/* 64-bit swizzles are super easy since there are 2 components of 2 components
+ * in an 8-bit field ... lots of duplication to go around!
+ *
+ * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
+ * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
+ * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
+ * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
+ * with rep. Pretty nifty, huh? */
+
+static unsigned
+mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
+{
+        unsigned packed = 0;
+
+        for (unsigned i = 0; i < 2; ++i) {
+                assert(swizzle[i] <= max_component);
+
+                unsigned a = swizzle[i] & 1 ?
+                        (COMPONENT_W << 2) | COMPONENT_Z :
+                        (COMPONENT_Y << 2) | COMPONENT_X;
+
+                packed |= a << (i * 4);
+        }
+
+        return packed;
+}
+
+static void
+mir_pack_swizzle_alu(midgard_instruction *ins)
+{
+        midgard_vector_alu_src src[] = {
+                vector_alu_from_unsigned(ins->alu.src1),
+                vector_alu_from_unsigned(ins->alu.src2)
+        };
+
+        for (unsigned i = 0; i < 2; ++i) {
+                unsigned packed = 0;
+
+                if (ins->alu.reg_mode == midgard_reg_mode_64) {
+                        midgard_reg_mode mode = mir_srcsize(ins, i);
+                        unsigned components = 16 / mir_bytes_for_mode(mode);
+
+                        packed = mir_pack_swizzle_64(ins->swizzle[i], components);
+
+                        if (mode == midgard_reg_mode_32) {
+                                src[i].rep_low |= (ins->swizzle[i][0] >= COMPONENT_Z);
+                                src[i].rep_high |= (ins->swizzle[i][1] >= COMPONENT_Z);
+                        } else if (mode < midgard_reg_mode_32) {
+                                unreachable("Cannot encode 8/16 swizzle in 64-bit");
+                        }
+                } else {
+                        /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
+                         * the strategy is to check whether the nibble we're on is
+                         * upper or lower. We need all components to be on the same
+                         * "side"; that much is enforced by the ISA and should have
+                         * been lowered. TODO: 8-bit packing. TODO: vec8 */
+
+                        unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
+                        bool upper = ins->swizzle[i][first] > 3;
+
+                        if (upper && ins->mask)
+                                assert(mir_srcsize(ins, i) <= midgard_reg_mode_16);
+
+                        for (unsigned c = 0; c < 4; ++c) {
+                                unsigned v = ins->swizzle[i][c];
+
+                                bool t_upper = v > 3;
+
+                                /* Ensure we're doing something sane */
+
+                                if (ins->mask & (1 << c)) {
+                                        assert(t_upper == upper);
+                                        assert(v <= 7);
+                                }
+
+                                /* Use the non upper part */
+                                v &= 0x3;
+
+                                packed |= v << (2 * c);
+                        }
+
+                        src[i].rep_high = upper;
+                }
+
+                src[i].swizzle = packed;
+        }
+
+        ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
+
+        if (!ins->has_inline_constant)
+                ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
+}
+
+static void
+mir_pack_swizzle_ldst(midgard_instruction *ins)
+{
+        /* TODO: non-32-bit, non-vec4 */
+        for (unsigned c = 0; c < 4; ++c) {
+                unsigned v = ins->swizzle[0][c];
+
+                /* Check vec4 */
+                assert(v <= 3);
+
+                ins->load_store.swizzle |= v << (2 * c);
+        }
+
+        /* TODO: arg_1/2 */
+}
+
+static void
+mir_pack_swizzle_tex(midgard_instruction *ins)
+{
+        for (unsigned i = 0; i < 2; ++i) {
+                unsigned packed = 0;
+
+                for (unsigned c = 0; c < 4; ++c) {
+                        unsigned v = ins->swizzle[i][c];
+
+                        /* Check vec4 */
+                        assert(v <= 3);
+
+                        packed |= v << (2 * c);
+                }
+
+                if (i == 0)
+                        ins->texture.swizzle = packed;
+                else
+                        ins->texture.in_reg_swizzle = packed;
+        }
+
+        /* TODO: bias component */
+}
+
+/* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
+ * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
+ */
+
+static void
+mir_pack_ldst_mask(midgard_instruction *ins)
+{
+        midgard_reg_mode mode = mir_typesize(ins);
+        unsigned packed = ins->mask;
+
+        if (mode == midgard_reg_mode_64) {
+                packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
+                         ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
+        } else if (mode == midgard_reg_mode_16) {
+                packed = 0;
+
+                for (unsigned i = 0; i < 4; ++i) {
+                        /* Make sure we're duplicated */
+                        bool u = (ins->mask & (1 << (2*i + 0))) != 0;
+                        bool v = (ins->mask & (1 << (2*i + 1))) != 0;
+                        assert(u == v);
+
+                        packed |= (u << i);
+                }
+        }
+
+        ins->load_store.mask = packed;
+}
+
 static void
 emit_alu_bundle(compiler_context *ctx,
                 midgard_bundle *bundle,
@@ -162,11 +321,14 @@ emit_alu_bundle(compiler_context *ctx,
                 midgard_scalar_alu scalarized;
 
                 if (ins->unit & UNITS_ANY_VECTOR) {
-                        if (ins->alu.reg_mode == midgard_reg_mode_32)
-                                ins->alu.mask = expand_writemask_32(ins->mask);
+                        if (ins->alu.reg_mode == midgard_reg_mode_64)
+                                ins->alu.mask = expand_writemask(ins->mask, 2);
+                        else if (ins->alu.reg_mode == midgard_reg_mode_32)
+                                ins->alu.mask = expand_writemask(ins->mask, 4);
                         else
                                 ins->alu.mask = ins->mask;
 
+                        mir_pack_swizzle_alu(ins);
                         size = sizeof(midgard_vector_alu);
                         source = &ins->alu;
                 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
@@ -223,8 +385,17 @@ emit_binary_bundle(compiler_context *ctx,
                 /* Copy masks */
 
                 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
-                        bundle->instructions[i]->load_store.mask =
-                                bundle->instructions[i]->mask;
+                        mir_pack_ldst_mask(bundle->instructions[i]);
+
+                        mir_pack_swizzle_ldst(bundle->instructions[i]);
+
+                        /* Apply a constant offset */
+                        unsigned offset = bundle->instructions[i]->constants[0];
+
+                        if (offset) {
+                                bundle->instructions[i]->load_store.varying_parameters |= (offset & 0x7F) << 3;
+                                bundle->instructions[i]->load_store.address |= (offset >> 7);
+                        }
                 }
 
                 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
@@ -255,10 +426,11 @@ emit_binary_bundle(compiler_context *ctx,
                 ins->texture.type = bundle->tag;
                 ins->texture.next_type = next_tag;
                 ins->texture.mask = ins->mask;
+                mir_pack_swizzle_tex(ins);
 
                 ctx->texture_op_count--;
 
-                if (mir_op_computes_derivatives(ins->texture.op)) {
+                if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
                         bool continues = ctx->texture_op_count > 0;
 
                         /* Control flow complicates helper invocation