static unsigned
vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
- unsigned masked_component)
+ unsigned component)
{
midgard_vector_alu_src v;
memcpy(&v, &u, sizeof(v));
/* TODO: Integers */
- unsigned component = (v.swizzle >> (2*masked_component)) & 3;
- bool upper = false; /* TODO */
-
midgard_scalar_alu_src s = { 0 };
if (is_full) {
if (s.full)
s.component = component << 1;
else
- s.component = component + (upper << 2);
+ s.component = component;
if (is_int) {
/* TODO */
{
bool is_int = midgard_is_integer_op(v.op);
bool is_full = v.reg_mode == midgard_reg_mode_32;
- bool is_inline_constant = ins->ssa_args.inline_constant;
+ bool is_inline_constant = ins->has_inline_constant;
unsigned comp = component_from_mask(ins->mask);
/* The output component is from the mask */
midgard_scalar_alu s = {
.op = v.op,
- .src1 = vector_to_scalar_source(v.src1, is_int, is_full, comp),
- .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, comp) : 0,
+ .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
+ .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
.unknown = 0,
.outmod = v.outmod,
.output_full = is_full,
/* Inline constant is passed along rather than trying to extract it
* from v */
- if (ins->ssa_args.inline_constant) {
+ if (ins->has_inline_constant) {
uint16_t imm = 0;
int lower_11 = ins->inline_constant & ((1 << 12) - 1);
imm |= (lower_11 >> 9) & 3;
return s;
}
+/* 64-bit swizzles are super easy since there are 2 components of 2 components
+ * in an 8-bit field ... lots of duplication to go around!
+ *
+ * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
+ * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
+ * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
+ * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
+ * with rep. Pretty nifty, huh? */
+
+static unsigned
+mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
+{
+ unsigned packed = 0;
+
+ for (unsigned i = 0; i < 2; ++i) {
+ assert(swizzle[i] <= max_component);
+
+ unsigned a = (swizzle[i] & 1) ?
+ (COMPONENT_W << 2) | COMPONENT_Z :
+ (COMPONENT_Y << 2) | COMPONENT_X;
+
+ packed |= a << (i * 4);
+ }
+
+ return packed;
+}
+
+static void
+mir_pack_mask_alu(midgard_instruction *ins)
+{
+ unsigned effective = ins->mask;
+
+ /* If we have a destination override, we need to figure out whether to
+ * override to the lower or upper half, shifting the effective mask in
+ * the latter, so AAAA.... becomes AAAA */
+
+ unsigned upper_shift = mir_upper_override(ins);
+
+ if (upper_shift) {
+ effective >>= upper_shift;
+ ins->alu.dest_override = midgard_dest_override_upper;
+ }
+
+ if (ins->alu.reg_mode == midgard_reg_mode_32)
+ ins->alu.mask = expand_writemask(effective, 4);
+ else if (ins->alu.reg_mode == midgard_reg_mode_64)
+ ins->alu.mask = expand_writemask(effective, 2);
+ else
+ ins->alu.mask = effective;
+}
+
+static void
+mir_pack_swizzle_alu(midgard_instruction *ins)
+{
+ midgard_vector_alu_src src[] = {
+ vector_alu_from_unsigned(ins->alu.src1),
+ vector_alu_from_unsigned(ins->alu.src2)
+ };
+
+ for (unsigned i = 0; i < 2; ++i) {
+ unsigned packed = 0;
+
+ if (ins->alu.reg_mode == midgard_reg_mode_64) {
+ midgard_reg_mode mode = mir_srcsize(ins, i);
+ unsigned components = 16 / mir_bytes_for_mode(mode);
+
+ packed = mir_pack_swizzle_64(ins->swizzle[i], components);
+
+ if (mode == midgard_reg_mode_32) {
+ bool lo = ins->swizzle[i][0] >= COMPONENT_Z;
+ bool hi = ins->swizzle[i][1] >= COMPONENT_Z;
+ unsigned mask = mir_bytemask(ins);
+
+ if (mask & 0xFF) {
+ /* We can't mix halves... */
+ if (mask & 0xFF00)
+ assert(lo == hi);
+
+ src[i].rep_low |= lo;
+ } else {
+ src[i].rep_low |= hi;
+ }
+ } else if (mode < midgard_reg_mode_32) {
+ unreachable("Cannot encode 8/16 swizzle in 64-bit");
+ }
+ } else {
+ /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
+ * the strategy is to check whether the nibble we're on is
+ * upper or lower. We need all components to be on the same
+ * "side"; that much is enforced by the ISA and should have
+ * been lowered. TODO: 8-bit packing. TODO: vec8 */
+
+ unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
+ bool upper = ins->swizzle[i][first] > 3;
+
+ if (upper && ins->mask)
+ assert(mir_srcsize(ins, i) <= midgard_reg_mode_16);
+
+ for (unsigned c = 0; c < 4; ++c) {
+ unsigned v = ins->swizzle[i][c];
+
+ bool t_upper = v > 3;
+
+ /* Ensure we're doing something sane */
+
+ if (ins->mask & (1 << c)) {
+ assert(t_upper == upper);
+ assert(v <= 7);
+ }
+
+ /* Use the non upper part */
+ v &= 0x3;
+
+ packed |= v << (2 * c);
+ }
+
+ src[i].rep_high = upper;
+ }
+
+ src[i].swizzle = packed;
+ }
+
+ ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
+
+ if (!ins->has_inline_constant)
+ ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
+}
+
+static void
+mir_pack_swizzle_ldst(midgard_instruction *ins)
+{
+ /* TODO: non-32-bit, non-vec4 */
+ for (unsigned c = 0; c < 4; ++c) {
+ unsigned v = ins->swizzle[0][c];
+
+ /* Check vec4 */
+ assert(v <= 3);
+
+ ins->load_store.swizzle |= v << (2 * c);
+ }
+
+ /* TODO: arg_1/2 */
+}
+
+static void
+mir_pack_swizzle_tex(midgard_instruction *ins)
+{
+ for (unsigned i = 0; i < 2; ++i) {
+ unsigned packed = 0;
+
+ for (unsigned c = 0; c < 4; ++c) {
+ unsigned v = ins->swizzle[i][c];
+
+ /* Check vec4 */
+ assert(v <= 3);
+
+ packed |= v << (2 * c);
+ }
+
+ if (i == 0)
+ ins->texture.swizzle = packed;
+ else
+ ins->texture.in_reg_swizzle = packed;
+ }
+
+ /* TODO: bias component */
+}
+
+/* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
+ * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
+ */
+
+static void
+mir_pack_ldst_mask(midgard_instruction *ins)
+{
+ midgard_reg_mode mode = mir_typesize(ins);
+ unsigned packed = ins->mask;
+
+ if (mode == midgard_reg_mode_64) {
+ packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
+ ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
+ } else if (mode == midgard_reg_mode_16) {
+ packed = 0;
+
+ for (unsigned i = 0; i < 4; ++i) {
+ /* Make sure we're duplicated */
+ bool u = (ins->mask & (1 << (2*i + 0))) != 0;
+ bool v = (ins->mask & (1 << (2*i + 1))) != 0;
+ assert(u == v);
+
+ packed |= (u << i);
+ }
+ }
+
+ ins->load_store.mask = packed;
+}
+
static void
emit_alu_bundle(compiler_context *ctx,
midgard_bundle *bundle,
midgard_instruction *ins = bundle->instructions[i];
/* Check if this instruction has registers */
- if (ins->compact_branch || ins->prepacked_branch) continue;
+ if (ins->compact_branch) continue;
/* Otherwise, just emit the registers */
uint16_t reg_word = 0;
midgard_scalar_alu scalarized;
if (ins->unit & UNITS_ANY_VECTOR) {
- if (ins->alu.reg_mode == midgard_reg_mode_32)
- ins->alu.mask = expand_writemask_32(ins->mask);
- else
- ins->alu.mask = ins->mask;
-
+ mir_pack_mask_alu(ins);
+ mir_pack_swizzle_alu(ins);
size = sizeof(midgard_vector_alu);
source = &ins->alu;
} else if (ins->unit == ALU_ENAB_BR_COMPACT) {
source = &scalarized;
}
- memcpy(util_dynarray_grow_bytes(emission, 1, size), source, size);
+ memcpy(util_dynarray_grow_bytes(emission, size, 1), source, size);
}
/* Emit padding (all zero) */
- memset(util_dynarray_grow_bytes(emission, 1, bundle->padding), 0, bundle->padding);
+ memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0, bundle->padding);
/* Tack on constants */
- if (bundle->has_embedded_constants) {
- util_dynarray_append(emission, float, bundle->constants[0]);
- util_dynarray_append(emission, float, bundle->constants[1]);
- util_dynarray_append(emission, float, bundle->constants[2]);
- util_dynarray_append(emission, float, bundle->constants[3]);
- }
+ if (bundle->has_embedded_constants)
+ util_dynarray_append(emission, midgard_constants, bundle->constants);
+}
+
+/* Shift applied to the immediate used as an offset. Probably this is papering
+ * over some other semantic distinction else well, but it unifies things in the
+ * compiler so I don't mind. */
+
+static unsigned
+mir_ldst_imm_shift(midgard_load_store_op op)
+{
+ if (OP_IS_UBO_READ(op))
+ return 3;
+ else
+ return 1;
}
/* After everything is scheduled, emit whole bundles at a time */
case TAG_ALU_8:
case TAG_ALU_12:
case TAG_ALU_16:
+ case TAG_ALU_4 + 4:
+ case TAG_ALU_8 + 4:
+ case TAG_ALU_12 + 4:
+ case TAG_ALU_16 + 4:
emit_alu_bundle(ctx, bundle, emission, lookahead);
break;
/* Copy masks */
for (unsigned i = 0; i < bundle->instruction_count; ++i) {
- bundle->instructions[i]->load_store.mask =
- bundle->instructions[i]->mask;
+ mir_pack_ldst_mask(bundle->instructions[i]);
+
+ mir_pack_swizzle_ldst(bundle->instructions[i]);
+
+ /* Apply a constant offset */
+ unsigned offset = bundle->instructions[i]->constants.u32[0];
+
+ if (offset) {
+ unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->load_store.op);
+ unsigned upper_shift = 10 - shift;
+
+ bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
+ bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
+ }
}
memcpy(¤t64, &bundle->instructions[0]->load_store, sizeof(current64));
}
case TAG_TEXTURE_4:
- case TAG_TEXTURE_4_VTX: {
+ case TAG_TEXTURE_4_VTX:
+ case TAG_TEXTURE_4_BARRIER: {
/* Texture instructions are easy, since there is no pipelining
* nor VLIW to worry about. We may need to set .cont/.last
* flags. */
ins->texture.type = bundle->tag;
ins->texture.next_type = next_tag;
ins->texture.mask = ins->mask;
+ mir_pack_swizzle_tex(ins);
ctx->texture_op_count--;
- if (mir_op_computes_derivatives(ins->texture.op)) {
+ if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
bool continues = ctx->texture_op_count > 0;
+
+ /* Control flow complicates helper invocation
+ * lifespans, so for now just keep helper threads
+ * around indefinitely with loops. TODO: Proper
+ * analysis */
+ continues |= ctx->loop_count > 0;
+
ins->texture.cont = continues;
ins->texture.last = !continues;
} else {