if (!ctx->temp_count)
return NULL;
- struct lcra_state *l = lcra_alloc_equations(ctx->temp_count, 1, 8, 16, 5);
+ struct lcra_state *l = lcra_alloc_equations(ctx->temp_count, 5);
/* Starts of classes, in bytes */
l->class_start[REG_CLASS_WORK] = 16 * 0;
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
+ unsigned *min_bound = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
/* Swizzles of 32-bit sources on 64-bit instructions need to be
if (ins->dest >= SSA_FIXED_MINIMUM) continue;
+ unsigned size = nir_alu_type_get_type_size(ins->dest_type);
+
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
- int class = util_logbase2(ins->mask);
+ int comps1 = util_logbase2(ins->mask);
+
+ int bytes = (comps1 + 1) * (size / 8);
/* Use the largest class if there's ambiguity, this
* handles partial writes */
int dest = ins->dest;
- found_class[dest] = MAX2(found_class[dest], class);
+ found_class[dest] = MAX2(found_class[dest], bytes);
- /* XXX: Ensure swizzles align the right way with more LCRA constraints? */
- if (ins->type == TAG_ALU_4 && ins->alu.reg_mode != midgard_reg_mode_32)
- min_alignment[dest] = 3; /* (1 << 3) = 8 */
+ min_alignment[dest] =
+ (size == 16) ? 1 : /* (1 << 1) = 2-byte */
+ (size == 32) ? 2 : /* (1 << 2) = 4-byte */
+ (size == 64) ? 3 : /* (1 << 3) = 8-byte */
+ 3; /* 8-bit todo */
- if (ins->type == TAG_LOAD_STORE_4 && ins->load_64)
- min_alignment[dest] = 3;
+ /* We can't cross xy/zw boundaries. TODO: vec8 can */
+ if (size == 16)
+ min_bound[dest] = 8;
/* We don't have a swizzle for the conditional and we don't
* want to muck with the conditional itself, so just force
* alignment for now */
- if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op))
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op)) {
min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
+ /* LCRA assumes bound >= alignment */
+ min_bound[dest] = 16;
+ }
+
+ /* Since ld/st swizzles and masks are 32-bit only, we need them
+ * aligned to enable final packing */
+ if (ins->type == TAG_LOAD_STORE_4)
+ min_alignment[dest] = MAX2(min_alignment[dest], 2);
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
- lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2);
- lcra_restrict_range(l, i, (found_class[i] + 1) * 4);
+ lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2,
+ min_bound[i] ? min_bound[i] : 16);
+ lcra_restrict_range(l, i, found_class[i]);
}
free(found_class);
free(min_alignment);
+ free(min_bound);
/* Next, we'll determine semantic class. We default to zero (work).
* But, if we're used with a special operation, that will force us to a
ins->alu.src2 = imm << 2;
} else {
- midgard_vector_alu_src mod2 =
- vector_alu_from_unsigned(ins->alu.src2);
offset_swizzle(ins->swizzle[1], src2.offset, src2.size, dest.size, dest_offset);
- ins->alu.src2 = vector_alu_srco_unsigned(mod2);
ins->registers.src2_reg = src2.reg;
}
struct phys_reg dst = index_to_reg(ctx, l, ins->dest, dest_size);
ins->load_store.reg = dst.reg;
- offset_swizzle(ins->swizzle[0], 0, 4, dst.size, dst.offset);
+ offset_swizzle(ins->swizzle[0], 0, 4, 4, dst.offset);
mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
}
struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], src_size[3]);
/* First, install the texture coordinate */
- ins->texture.in_reg_full = 1;
- ins->texture.in_reg_upper = 0;
ins->texture.in_reg_select = coord.reg & 1;
offset_swizzle(ins->swizzle[1], coord.offset, coord.size, dest.size, 0);
/* Next, install the destination */
- ins->texture.out_full = 1;
- ins->texture.out_upper = 0;
ins->texture.out_reg_select = dest.reg & 1;
- offset_swizzle(ins->swizzle[0], 0, 4, dest.size, dest.offset);
+ offset_swizzle(ins->swizzle[0], 0, 4, dest.size,
+ dest_size == 2 ? dest.offset % 8 :
+ dest.offset);
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
/* If there is a register LOD/bias, use it */