unsigned nr_dependencies;
BITSET_WORD *dependents;
+ /* Use this in conjunction with `type` */
+ unsigned op;
+
union {
midgard_load_store_word load_store;
midgard_vector_alu alu;
.swizzle = SWIZZLE_IDENTITY,
.dest = dest,
.dest_type = nir_type_uint32,
+ .op = midgard_alu_op_imov,
.alu = {
- .op = midgard_alu_op_imov,
.reg_mode = midgard_reg_mode_32,
.outmod = midgard_outmod_int_wrap
},
ins.mask = mask_of(nr_components);
midgard_vector_alu alu = {
- .op = op,
.reg_mode = reg_mode,
.outmod = outmod,
};
ins.alu = alu;
+ ins.op = op;
+
/* Late fixup for emulated instructions */
if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
* restrictions. So, if possible we try to flip the arguments
* in that case */
- int op = ins->alu.op;
+ int op = ins->op;
if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
alu_opcode_props[op].props & OP_COMMUTES) {
uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
bool is_vector = false;
- unsigned mask = effective_writemask(ins->alu.op, ins->mask);
+ unsigned mask = effective_writemask(ins->op, ins->mask);
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
/* We only care if this component is actually used */
mir_foreach_instr_in_block(block, ins) {
if (ins->type != TAG_ALU_4) continue;
- if (ins->alu.op != midgard_alu_op_iand &&
- ins->alu.op != midgard_alu_op_ior) continue;
+ if (ins->op != midgard_alu_op_iand &&
+ ins->op != midgard_alu_op_ior) continue;
if (ins->src_invert[1] || !ins->src_invert[0]) continue;
static unsigned
mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
{
- bool integer = midgard_is_integer_op(ins->alu.op);
+ bool integer = midgard_is_integer_op(ins->op);
unsigned base_size = (8 << ins->alu.reg_mode);
unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
bool half = (sz == (base_size >> 1));
static void
mir_pack_vector_srcs(midgard_instruction *ins)
{
- bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props);
+ bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props);
midgard_reg_mode mode = ins->alu.reg_mode;
unsigned base_size = (8 << mode);
ins->src_invert[2]
};
- switch (ins->alu.op) {
+ switch (ins->op) {
case midgard_alu_op_iand:
/* a & ~b = iandnot(a, b) */
/* ~a & ~b = ~(a | b) = inor(a, b) */
if (inv[0] && inv[1])
- ins->alu.op = midgard_alu_op_inor;
+ ins->op = midgard_alu_op_inor;
else if (inv[1])
- ins->alu.op = midgard_alu_op_iandnot;
+ ins->op = midgard_alu_op_iandnot;
break;
case midgard_alu_op_ior:
/* ~a | ~b = ~(a & b) = inand(a, b) */
if (inv[0] && inv[1])
- ins->alu.op = midgard_alu_op_inand;
+ ins->op = midgard_alu_op_inand;
else if (inv[1])
- ins->alu.op = midgard_alu_op_iornot;
+ ins->op = midgard_alu_op_iornot;
break;
/* ~a ^ ~b = a ^ b */
if (inv[0] ^ inv[1])
- ins->alu.op = midgard_alu_op_inxor;
+ ins->op = midgard_alu_op_inxor;
break;
static void
mir_lower_roundmode(midgard_instruction *ins)
{
- if (alu_opcode_props[ins->alu.op].props & MIDGARD_ROUNDS) {
+ if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
assert(ins->roundmode <= 0x3);
- ins->alu.op += ins->roundmode;
+ ins->op += ins->roundmode;
}
}
unsigned size = 0;
void *source = NULL;
+ midgard_vector_alu source_alu;
+
/* In case we demote to a scalar */
midgard_scalar_alu scalarized;
mir_pack_mask_alu(ins);
mir_pack_vector_srcs(ins);
size = sizeof(midgard_vector_alu);
- source = &ins->alu;
+ source_alu = ins->alu;
+ source_alu.op = ins->op;
+ source = &source_alu;
} else if (ins->unit == ALU_ENAB_BR_COMPACT) {
size = sizeof(midgard_branch_cond);
source = &ins->br_compact;
source = &ins->branch_extended;
} else {
size = sizeof(midgard_scalar_alu);
- scalarized = vector_to_scalar_alu(ins->alu, ins);
+ source_alu = ins->alu;
+ source_alu.op = ins->op;
+ scalarized = vector_to_scalar_alu(source_alu, ins);
source = &scalarized;
}
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
+ if (!OP_IS_MOVE(ins->op)) continue;
if (ins->is_pack) continue;
unsigned from = ins->src[1];
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
+ if (!OP_IS_MOVE(ins->op)) continue;
if (ins->is_pack) continue;
unsigned from = ins->src[1];
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
if (ins->compact_branch) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
+ if (!OP_IS_MOVE(ins->op)) continue;
/* Check if it's overwritten in this block before being read */
bool overwritten = false;
mir_foreach_instr_in_block_safe(block, ins) {
/* First search for fmul */
if (ins->type != TAG_ALU_4) continue;
- if (ins->alu.op != midgard_alu_op_fmul) continue;
+ if (ins->op != midgard_alu_op_fmul) continue;
/* TODO: Flip */
frcp_found =
(sub->type == TAG_ALU_4) &&
- (sub->alu.op == midgard_alu_op_frcp);
+ (sub->op == midgard_alu_op_frcp);
break;
}
src = vector_alu_from_unsigned(ins->alu.src2);
unsigned *swizzle = ins->swizzle[src_idx];
- unsigned comp_mask = effective_writemask(ins->alu.op, ins->mask);
+ unsigned comp_mask = effective_writemask(ins->op, ins->mask);
unsigned num_comp = util_bitcount(comp_mask);
unsigned max_comp = mir_components_for_type(ins->dest_type);
bool first = true;
mir_print_constant_component(stdout, &ins->constants,
swizzle[comp], ins->alu.reg_mode,
- src.half, src.mod, ins->alu.op);
+ src.half, src.mod, ins->op);
}
if (num_comp > 1)
switch (ins->type) {
case TAG_ALU_4: {
- midgard_alu_op op = ins->alu.op;
+ midgard_alu_op op = ins->op;
const char *name = alu_opcode_props[op].name;
if (ins->unit)
* want to muck with the conditional itself, so just force
* alignment for now */
- if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op)) {
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
/* LCRA assumes bound >= alignment */
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
unsigned dest_offset =
- GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? 0 :
+ GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
dest.offset;
offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
static bool
mir_is_add_2(midgard_instruction *ins)
{
- if (ins->alu.op != midgard_alu_op_fadd)
+ if (ins->op != midgard_alu_op_fadd)
return false;
if (ins->src[0] != ins->src[1])
{
/* FADD x, x = FMUL x, #2 */
if (mir_is_add_2(ins) && (unit & (UNITS_MUL | UNIT_VLUT))) {
- ins->alu.op = midgard_alu_op_fmul;
+ ins->op = midgard_alu_op_fmul;
ins->src[1] = ~0;
ins->src_abs[1] = false;
static unsigned
mir_has_unit(midgard_instruction *ins, unsigned unit)
{
- if (alu_opcode_props[ins->alu.op].props & unit)
+ if (alu_opcode_props[ins->op].props & unit)
return true;
/* FADD x, x can run on any adder or any multiplier */
BITSET_FOREACH_SET(i, worklist, count) {
bool is_move = alu &&
- (instructions[i]->alu.op == midgard_alu_op_imov ||
- instructions[i]->alu.op == midgard_alu_op_fmov);
+ (instructions[i]->op == midgard_alu_op_imov ||
+ instructions[i]->op == midgard_alu_op_fmov);
if ((max_active - i) >= max_distance)
continue;
if (ldst && mir_pipeline_count(instructions[i]) + predicate->pipeline_count > 2)
continue;
- bool conditional = alu && !branch && OP_IS_CSEL(instructions[i]->alu.op);
+ bool conditional = alu && !branch && OP_IS_CSEL(instructions[i]->op);
conditional |= (branch && instructions[i]->branch.conditional);
if (conditional && no_cond)
return ~0;
/* If it would itself require a condition, that's recursive */
- if (OP_IS_CSEL(instructions[i]->alu.op))
+ if (OP_IS_CSEL(instructions[i]->op))
return ~0;
/* We'll need to rewrite to .w but that doesn't work for vector
* ops that don't replicate (ball/bany), so bail there */
- if (GET_CHANNEL_COUNT(alu_opcode_props[instructions[i]->alu.op].props))
+ if (GET_CHANNEL_COUNT(alu_opcode_props[instructions[i]->op].props))
return ~0;
/* Ensure it will fit with constants */
unsigned condition_index = branch ? 0 : 2;
/* csel_v is vector; otherwise, conditions are scalar */
- bool vector = !branch && OP_IS_CSEL_V(last->alu.op);
+ bool vector = !branch && OP_IS_CSEL_V(last->op);
/* Grab the conditional instruction */
*vadd = v_mov(~0, make_compiler_temp(ctx));
if (!ctx->is_blend) {
- vadd->alu.op = midgard_alu_op_iadd;
+ vadd->op = midgard_alu_op_iadd;
vadd->src[0] = SSA_FIXED_REGISTER(31);
vadd->src_types[0] = nir_type_uint32;
mir_update_worklist(worklist, len, instructions, vadd);
mir_update_worklist(worklist, len, instructions, smul);
- bool vadd_csel = vadd && OP_IS_CSEL(vadd->alu.op);
- bool smul_csel = smul && OP_IS_CSEL(smul->alu.op);
+ bool vadd_csel = vadd && OP_IS_CSEL(vadd->op);
+ bool smul_csel = smul && OP_IS_CSEL(smul->op);
if (vadd_csel || smul_csel) {
midgard_instruction *ins = vadd_csel ? vadd : smul;
bool
mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
{
- bool is_int = midgard_is_integer_op(ins->alu.op);
+ bool is_int = midgard_is_integer_op(ins->op);
if (is_int) {
if (ins->src_shift[i]) return true;
bool
mir_nontrivial_outmod(midgard_instruction *ins)
{
- bool is_int = midgard_is_integer_op(ins->alu.op);
+ bool is_int = midgard_is_integer_op(ins->op);
unsigned mod = ins->alu.outmod;
if (ins->dest_type != ins->src_types[1])
/* Handle dot products and things */
if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
- unsigned props = alu_opcode_props[ins->alu.op].props;
+ unsigned props = alu_opcode_props[ins->op].props;
unsigned channel_override = GET_CHANNEL_COUNT(props);
};
if (bundle.tag == TAG_ALU_4) {
- assert(OP_IS_MOVE(u->alu.op));
+ assert(OP_IS_MOVE(u->op));
u->unit = UNIT_VMUL;
size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);