pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
}
-/* Gets a bytemask for a complete vecN write */
-static unsigned
-bi_mask_for_channels_32(unsigned i)
-{
- return (1 << (4 * i)) - 1;
-}
-
static bi_instruction
bi_load(enum bi_class T, nir_intrinsic_instr *instr)
{
bi_instruction load = {
.type = T,
- .writemask = bi_mask_for_channels_32(instr->num_components),
+ .vector_channels = instr->num_components,
.src = { BIR_INDEX_CONSTANT },
.src_types = { nir_type_uint32 },
.constant = { .u64 = nir_intrinsic_base(instr) },
},
.dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
.dest_type = nir_type_uint32,
- .writemask = 0xF
};
bi_emit(ctx, ins);
},
.dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
.dest_type = nir_type_uint32,
- .writemask = 0xF
+ .vector_channels = 4
};
assert(blend.blend_location < 8);
bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
address.dest = bi_make_temp(ctx);
address.dest_type = nir_type_uint32;
- address.writemask = (1 << 12) - 1;
+ address.vector_channels = 3;
unsigned nr = nir_intrinsic_src_components(instr, 0);
assert(nir_intrinsic_write_mask(instr) == ((1 << nr) - 1));
{ 0 },
{ 0 }, { 1 }, { 2}
},
- .store_channels = nr,
+ .vector_channels = nr,
};
for (unsigned i = 0; i < nr; ++i)
bi_instruction load = {
.type = BI_LOAD_UNIFORM,
- .writemask = (1 << (nr_components * 4)) - 1,
+ .vector_channels = nr_components,
.src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
.src_types = { nir_type_uint32, nir_type_uint32 },
.constant = { (uniform * 16) + offset },
.type = BI_MOV,
.dest = bir_ssa_index(&instr->def),
.dest_type = instr->def.bit_size | nir_type_uint,
- .writemask = (1 << (instr->def.bit_size / 8)) - 1,
.src = {
BIR_INDEX_CONSTANT
},
case nir_op_isub:
return BI_ISUB;
+ case nir_op_iand:
+ case nir_op_ior:
+ case nir_op_ixor:
+ return BI_BITWISE;
+
BI_CASE_CMP(nir_op_flt)
BI_CASE_CMP(nir_op_fge)
BI_CASE_CMP(nir_op_feq)
static void
bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
- unsigned *constants_left, unsigned *constant_shift)
+ unsigned *constants_left, unsigned *constant_shift, unsigned comps)
{
unsigned bits = nir_src_bit_size(instr->src[i].src);
unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
alu->src[to] = bir_src_index(&instr->src[i].src);
- /* We assert scalarization above */
- alu->swizzle[to][0] = instr->src[i].swizzle[0];
+ /* Copy swizzle for all vectored components, replicating last component
+ * to fill undersized */
+
+ unsigned vec = alu->type == BI_COMBINE ? 1 :
+ MAX2(1, 32 / dest_bits);
+
+ for (unsigned j = 0; j < vec; ++j)
+ alu->swizzle[to][j] = instr->src[i].swizzle[MIN2(j, comps - 1)];
}
static void
bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
- unsigned *constants_left, unsigned *constant_shift)
+ unsigned *constants_left, unsigned *constant_shift, unsigned comps)
{
/* Bail for vector weirdness */
if (cond.swizzle[0] != 0)
return;
/* We found one, let's fuse it in */
- csel->csel_cond = bcond;
- bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift);
- bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift);
+ csel->cond = bcond;
+ bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift, comps);
+ bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift, comps);
}
static void
/* TODO: Implement lowering of special functions for older Bifrost */
assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
- if (instr->dest.dest.is_ssa) {
- /* Construct a writemask */
- unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
- unsigned comps = instr->dest.dest.ssa.num_components;
+ unsigned comps = nir_dest_num_components(instr->dest.dest);
- if (alu.type != BI_COMBINE)
- assert(comps == 1);
+ if (alu.type != BI_COMBINE)
+ assert(comps <= MAX2(1, 32 / comps));
- unsigned bits = bits_per_comp * comps;
- unsigned bytes = bits / 8;
- alu.writemask = (1 << bytes) - 1;
- } else {
- unsigned comp_mask = instr->dest.write_mask;
-
- alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
- comp_mask);
+ if (!instr->dest.dest.is_ssa) {
+ for (unsigned i = 0; i < comps; ++i)
+ assert(instr->dest.write_mask);
}
/* We inline constants as we go. This tracks how many constants have
if (i && alu.type == BI_CSEL)
f++;
- bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift);
+ bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift, comps);
}
/* Op-specific fixup */
BI_CASE_CMP(nir_op_ieq)
BI_CASE_CMP(nir_op_fne)
BI_CASE_CMP(nir_op_ine)
- alu.op.compare = bi_cond_for_nir(instr->op, false);
+ alu.cond = bi_cond_for_nir(instr->op, false);
break;
case nir_op_fround_even:
- alu.op.round = BI_ROUND_MODE;
alu.roundmode = BIFROST_RTE;
break;
case nir_op_fceil:
- alu.op.round = BI_ROUND_MODE;
alu.roundmode = BIFROST_RTP;
break;
case nir_op_ffloor:
- alu.op.round = BI_ROUND_MODE;
alu.roundmode = BIFROST_RTN;
break;
case nir_op_ftrunc:
- alu.op.round = BI_ROUND_MODE;
alu.roundmode = BIFROST_RTZ;
break;
+ case nir_op_iand:
+ alu.op.bitwise = BI_BITWISE_AND;
+ break;
+ case nir_op_ior:
+ alu.op.bitwise = BI_BITWISE_OR;
+ break;
+ case nir_op_ixor:
+ alu.op.bitwise = BI_BITWISE_XOR;
+ break;
default:
break;
}
if (alu.type == BI_CSEL) {
/* Default to csel3 */
- alu.csel_cond = BI_COND_NE;
+ alu.cond = BI_COND_NE;
alu.src[1] = BIR_INDEX_ZERO;
alu.src_types[1] = alu.src_types[0];
bi_fuse_csel_cond(&alu, instr->src[0],
- &constants_left, &constant_shift);
+ &constants_left, &constant_shift, comps);
+ } else if (alu.type == BI_BITWISE) {
+ /* Implicit shift argument... at some point we should fold */
+ alu.src[2] = BIR_INDEX_ZERO;
+ alu.src_types[2] = alu.src_types[1];
}
bi_emit(ctx, alu);
.dest = bir_dest_index(&instr->dest),
.dest_type = instr->dest_type,
.src_types = { nir_type_float32, nir_type_float32 },
- .writemask = instr->dest_type == nir_type_float32 ?
- 0xFFFF : 0xFF,
+ .vector_channels = 4
};
for (unsigned i = 0; i < instr->num_srcs; ++i) {