pan/midgard: Represent ld/st offset unpacked
authorAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Fri, 15 Nov 2019 19:19:34 +0000 (14:19 -0500)
committerAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Mon, 18 Nov 2019 03:19:31 +0000 (22:19 -0500)
This simplifies manipulation of the offsets dramatically, fixing some
UBO access related bugs.

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
src/panfrost/midgard/compiler.h
src/panfrost/midgard/midgard_compile.c
src/panfrost/midgard/midgard_emit.c
src/panfrost/midgard/midgard_schedule.c
src/panfrost/midgard/mir.c
src/panfrost/midgard/mir_promote_uniforms.c

index 3c1730143e2a40c09e087e5b7950d3915fb21884..7e3f998ab4f88a292a13e6f67d7d1b3031283dec 100644 (file)
@@ -517,7 +517,6 @@ bool mir_special_index(compiler_context *ctx, unsigned idx);
 unsigned mir_use_count(compiler_context *ctx, unsigned value);
 bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
 uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
-unsigned mir_ubo_shift(midgard_load_store_op op);
 midgard_reg_mode mir_typesize(midgard_instruction *ins);
 midgard_reg_mode mir_srcsize(midgard_instruction *ins, unsigned i);
 unsigned mir_bytes_for_mode(midgard_reg_mode mode);
index a187beaab7c9647fbb1ba60a7852720ccf011195..da7d995acd065bb7d61882b1c47e5aab74a9f3d1 100644 (file)
@@ -1141,11 +1141,8 @@ emit_ubo_read(
 {
         /* TODO: half-floats */
 
-        midgard_instruction ins = m_ld_ubo_int4(dest, offset);
-
-        /* TODO: Don't split */
-        ins.load_store.varying_parameters = (offset & 0x7F) << 3;
-        ins.load_store.address = offset >> 7;
+        midgard_instruction ins = m_ld_ubo_int4(dest, 0);
+        ins.constants[0] = offset;
         mir_set_intr_mask(instr, &ins, true);
 
         if (indirect_offset) {
index 9d03bbc1a098c166b5b1c35ffda3b7a73af1c2e4..7559a34dcfb08bd40d795dab00d5fe676c697dce 100644 (file)
@@ -388,6 +388,14 @@ emit_binary_bundle(compiler_context *ctx,
                         mir_pack_ldst_mask(bundle->instructions[i]);
 
                         mir_pack_swizzle_ldst(bundle->instructions[i]);
+
+                        /* Apply a constant offset */
+                        unsigned offset = bundle->instructions[i]->constants[0];
+
+                        if (offset) {
+                                bundle->instructions[i]->load_store.varying_parameters |= (offset & 0x7F) << 3;
+                                bundle->instructions[i]->load_store.address |= (offset >> 7);
+                        }
                 }
 
                 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
index 418589a6192a8a863aab36065383d3e23d32e25e..addd65306d6bbb148826a5684c1f6b4d69890f98 100644 (file)
@@ -1169,16 +1169,14 @@ v_load_store_scratch(
                         /* For register spilling - to thread local storage */
                         .arg_1 = 0xEA,
                         .arg_2 = 0x1E,
-
-                        /* Splattered across, TODO combine logically */
-                        .varying_parameters = (byte & 0x1FF) << 1,
-                        .address = (byte >> 9)
                 },
 
                 /* If we spill an unspill, RA goes into an infinite loop */
                 .no_spill = true
         };
 
+        ins.constants[0] = byte;
+
        if (is_store) {
                 /* r0 = r26, r1 = r27 */
                 assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
index 8d928458a54e4d79097e52183a8df6b3a7a6ea18..3f4d53c578114e8c089bfb9ad9f4d0fc901bfd11 100644 (file)
@@ -478,25 +478,6 @@ mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
         return mask;
 }
 
-unsigned
-mir_ubo_shift(midgard_load_store_op op)
-{
-        switch (op) {
-        case midgard_op_ld_ubo_char:
-                return 0;
-        case midgard_op_ld_ubo_char2:
-                return 1;
-        case midgard_op_ld_ubo_char4:
-                return 2;
-        case midgard_op_ld_ubo_short4:
-                return 3;
-        case midgard_op_ld_ubo_int4:
-                return 4;
-        default:
-                unreachable("Invalid op");
-        }
-}
-
 /* Register allocation occurs after instruction scheduling, which is fine until
  * we start needing to spill registers and therefore insert instructions into
  * an already-scheduled program. We don't have to be terribly efficient about
index 8d887a615fb26ded5bcb7bed4afee640ae2f92c5..d7b3cce36d2a9d002f8698716e140f100d8ff053 100644 (file)
  * program so we allow that many registers through at minimum, to prevent
  * spilling. If we spill anyway, I mean, it's a lose-lose at that point. */
 
-static unsigned
-mir_ubo_offset(midgard_instruction *ins)
-{
-        assert(ins->type == TAG_LOAD_STORE_4);
-        assert(OP_IS_UBO_READ(ins->load_store.op));
-
-        /* Grab the offset as the hw understands it */
-        unsigned lo = ins->load_store.varying_parameters >> 7;
-        unsigned hi = ins->load_store.address;
-        unsigned raw = ((hi << 3) | lo);
-
-        /* Account for the op's shift */
-        unsigned shift = mir_ubo_shift(ins->load_store.op);
-        return (raw << shift);
-}
-
 void
 midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
 {
@@ -59,8 +43,8 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
                 if (ins->type != TAG_LOAD_STORE_4) continue;
                 if (!OP_IS_UBO_READ(ins->load_store.op)) continue;
 
-                /* Get the offset. TODO: can we promote unaligned access? */
-                unsigned off = mir_ubo_offset(ins);
+                /* TODO: promote unaligned access via swizzle? */
+                unsigned off = ins->constants[0];
                 if (off & 0xF) continue;
 
                 unsigned address = off / 16;