unsigned mir_use_count(compiler_context *ctx, unsigned value);
bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
-unsigned mir_ubo_shift(midgard_load_store_op op);
midgard_reg_mode mir_typesize(midgard_instruction *ins);
midgard_reg_mode mir_srcsize(midgard_instruction *ins, unsigned i);
unsigned mir_bytes_for_mode(midgard_reg_mode mode);
{
/* TODO: half-floats */
- midgard_instruction ins = m_ld_ubo_int4(dest, offset);
-
- /* TODO: Don't split */
- ins.load_store.varying_parameters = (offset & 0x7F) << 3;
- ins.load_store.address = offset >> 7;
+ midgard_instruction ins = m_ld_ubo_int4(dest, 0);
+ ins.constants[0] = offset;
mir_set_intr_mask(instr, &ins, true);
if (indirect_offset) {
mir_pack_ldst_mask(bundle->instructions[i]);
mir_pack_swizzle_ldst(bundle->instructions[i]);
+
+ /* Apply a constant offset */
+ unsigned offset = bundle->instructions[i]->constants[0];
+
+ if (offset) {
+ bundle->instructions[i]->load_store.varying_parameters |= (offset & 0x7F) << 3;
+ bundle->instructions[i]->load_store.address |= (offset >> 7);
+ }
}
memcpy(¤t64, &bundle->instructions[0]->load_store, sizeof(current64));
/* For register spilling - to thread local storage */
.arg_1 = 0xEA,
.arg_2 = 0x1E,
-
- /* Splattered across, TODO combine logically */
- .varying_parameters = (byte & 0x1FF) << 1,
- .address = (byte >> 9)
},
/* If we spill an unspill, RA goes into an infinite loop */
.no_spill = true
};
+ ins.constants[0] = byte;
+
if (is_store) {
/* r0 = r26, r1 = r27 */
assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
return mask;
}
-unsigned
-mir_ubo_shift(midgard_load_store_op op)
-{
- switch (op) {
- case midgard_op_ld_ubo_char:
- return 0;
- case midgard_op_ld_ubo_char2:
- return 1;
- case midgard_op_ld_ubo_char4:
- return 2;
- case midgard_op_ld_ubo_short4:
- return 3;
- case midgard_op_ld_ubo_int4:
- return 4;
- default:
- unreachable("Invalid op");
- }
-}
-
/* Register allocation occurs after instruction scheduling, which is fine until
* we start needing to spill registers and therefore insert instructions into
* an already-scheduled program. We don't have to be terribly efficient about
* program so we allow that many registers through at minimum, to prevent
* spilling. If we spill anyway, I mean, it's a lose-lose at that point. */
-static unsigned
-mir_ubo_offset(midgard_instruction *ins)
-{
- assert(ins->type == TAG_LOAD_STORE_4);
- assert(OP_IS_UBO_READ(ins->load_store.op));
-
- /* Grab the offset as the hw understands it */
- unsigned lo = ins->load_store.varying_parameters >> 7;
- unsigned hi = ins->load_store.address;
- unsigned raw = ((hi << 3) | lo);
-
- /* Account for the op's shift */
- unsigned shift = mir_ubo_shift(ins->load_store.op);
- return (raw << shift);
-}
-
void
midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
{
if (ins->type != TAG_LOAD_STORE_4) continue;
if (!OP_IS_UBO_READ(ins->load_store.op)) continue;
- /* Get the offset. TODO: can we promote unaligned access? */
- unsigned off = mir_ubo_offset(ins);
+ /* TODO: promote unaligned access via swizzle? */
+ unsigned off = ins->constants[0];
if (off & 0xF) continue;
unsigned address = off / 16;