{
/* live_in[s] = GEN[s] + (live_out[s] - KILL[s]) */
- pan_liveness_kill(live, ins->dest, max, ins->writemask);
+ pan_liveness_kill(live, ins->dest, max, bi_writemask(ins));
bi_foreach_src(ins, src) {
unsigned node = ins->src[src];
* component c is `x`, we are accessing v.x, and each of the succeeding
* components y, z... up to the last component of the vector are accessed
* sequentially, then we may perform the same rewrite. If this is not the case,
- * rewriting would require a swizzle or writemask (TODO), so we fallback on a
+ * rewriting would require more complex vector features, so we fallback on a
* move.
*
* Otherwise is the source is not SSA, we also fallback on a move. We could
*/
static void
-bi_insert_combine_mov(bi_context *ctx, bi_instruction *parent, unsigned comp, unsigned R)
+bi_combine_mov32(bi_context *ctx, bi_instruction *parent, unsigned comp, unsigned R)
{
- unsigned bits = nir_alu_type_get_type_size(parent->dest_type);
- unsigned bytes = bits / 8;
-
bi_instruction move = {
.type = BI_MOV,
.dest = R,
- .dest_type = parent->dest_type,
- .writemask = ((1 << bytes) - 1) << (bytes * comp),
+ .dest_type = nir_type_uint32,
+ .dest_offset = comp,
.src = { parent->src[comp] },
- .src_types = { parent->dest_type },
+ .src_types = { nir_type_uint32 },
.swizzle = { { parent->swizzle[comp][0] } }
};
* bookkeeping. */
static bi_instruction *
-bi_get_parent(bi_context *ctx, unsigned idx, unsigned mask)
+bi_get_parent(bi_context *ctx, unsigned idx)
{
bi_foreach_instr_global(ctx, ins) {
if (ins->dest == idx)
- if ((ins->writemask & mask) == mask)
- return ins;
+ return ins;
}
return NULL;
}
}
-/* Shifts the writemask of an instruction by a specified byte count,
- * rotating the sources to compensate. Returns true if successful, and
- * returns false if not (nondestructive in this case). */
-
-static bool
-bi_shift_mask(bi_instruction *ins, unsigned shift)
-{
- /* No op and handles the funny cases */
- if (!shift)
- return true;
-
- unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
- unsigned bytes = sz / 8;
-
- /* If things are misaligned, we bail. Check if shift % bytes is
- * nonzero. Note bytes is a power-of-two. */
- if (shift & (bytes - 1))
- return false;
-
- /* Ensure there are no funny types */
- bi_foreach_src(ins, s) {
- if (ins->src[s] && nir_alu_type_get_type_size(ins->src_types[s]) != sz)
- return false;
- }
-
- /* Shift swizzle so old i'th component is accessed by new (i + j)'th
- * component where j is component shift */
- unsigned component_shift = shift / bytes;
-
- /* Sanity check to avoid memory corruption */
- if (component_shift >= sizeof(ins->swizzle[0]))
- return false;
-
- /* Otherwise, shift is divisible by bytes, and all relevant src types
- * are the same size as the dest type. */
- ins->writemask <<= shift;
-
- bi_foreach_src(ins, s) {
- if (!ins->src[s]) continue;
-
- size_t overlap = sizeof(ins->swizzle[s]) - component_shift;
- memmove(ins->swizzle[s] + component_shift, ins->swizzle[s], overlap);
- }
-
- return true;
-}
-
/* Checks if we have a nicely aligned vector prefix */
static bool
-bi_is_aligned_vec(bi_instruction *combine, unsigned s, bi_instruction *parent,
+bi_is_aligned_vec32(bi_instruction *combine, unsigned s, bi_instruction *io,
unsigned *count)
{
/* We only support prefixes */
if (s != 0)
return false;
- /* Is it a contiguous write? */
- unsigned writes = util_bitcount(parent->writemask);
- if (parent->writemask != ((1 << writes) - 1))
+ if (!(bi_class_props[io->type] & BI_VECTOR))
+ return false;
+
+ if (nir_alu_type_get_type_size(combine->dest_type) != 32)
return false;
- /* Okay - how many components? */
- unsigned bytes = nir_alu_type_get_type_size(parent->dest_type) / 8;
- unsigned components = writes / bytes;
+ if (nir_alu_type_get_type_size(io->dest_type) != 32)
+ return false;
+
+ unsigned components = io->vector_channels;
/* Are we contiguous like that? */
for (unsigned i = 0; i < components; ++i) {
- if (combine->src[i] != parent->dest)
+ if (combine->src[i] != io->dest)
return false;
if (combine->swizzle[i][0] != i)
return true;
}
+#if 0
/* Tries to lower a given source of a combine to an appropriate rewrite,
* returning true if successful, and false with no changes otherwise. */
unsigned pbytes = nir_alu_type_get_type_size(parent->dest_type) / 8;
if (pbytes != bytes) return false;
- bool scalar = (parent->writemask == ((1 << bytes) - 1));
+ bool scalar = parent->vector_channels != 0;
if (!(scalar || bi_is_aligned_vec(ins, s, parent, vec_count))) return false;
if (!bi_shift_mask(parent, bytes * s)) return false;
parent->dest = R;
return true;
}
+#endif
void
bi_lower_combine(bi_context *ctx, bi_block *block)
bi_foreach_instr_in_block_safe(block, ins) {
if (ins->type != BI_COMBINE) continue;
- /* The vector itself can't be shifted */
- assert(ins->writemask & 0x1);
-
unsigned R = bi_make_temp_reg(ctx);
bi_foreach_src(ins, s) {
if (!ins->src[s])
continue;
+#if 0
unsigned vec_count = 0;
if (bi_lower_combine_src(ctx, ins, s, R, &vec_count)) {
} else {
bi_insert_combine_mov(ctx, ins, s, R);
}
+#endif
+ bi_combine_mov32(ctx, ins, s, R);
}
BIFROST_ADD_OP_LD_VAR_32 :
BIFROST_ADD_OP_LD_VAR_16;
- unsigned cmask = bi_from_bytemask(ins->writemask, size / 8);
- unsigned channels = util_bitcount(cmask);
- assert(cmask == ((1 << channels) - 1));
-
unsigned packed_addr = 0;
if (ins->src[0] & BIR_INDEX_CONSTANT) {
assert(ins->dest & BIR_INDEX_REGISTER);
clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
+ unsigned channels = ins->vector_channels;
assert(channels >= 1 && channels <= 4);
struct bifrost_ld_var pack = {
static unsigned
bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
{
- unsigned components = bi_load32_components(ins);
+ assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
const unsigned ops[4] = {
BIFROST_ADD_OP_LD_UBO_1,
};
bi_write_data_register(clause, ins);
- return bi_pack_add_2src(ins, regs, ops[components - 1]);
+ return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
}
static enum bifrost_ldst_type
static unsigned
bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
{
+ assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
+
struct bifrost_ld_attr pack = {
.src0 = bi_get_src(ins, regs, 1, false),
.src1 = bi_get_src(ins, regs, 2, false),
.location = bi_get_immediate(ins, 0),
- .channels = MALI_POSITIVE(bi_load32_components(ins)),
+ .channels = MALI_POSITIVE(ins->vector_channels),
.type = bi_pack_ldst_type(ins->dest_type),
.op = BIFROST_ADD_OP_LD_ATTR
};
static unsigned
bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
{
- assert(ins->store_channels >= 1 && ins->store_channels <= 4);
+ assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
struct bifrost_st_vary pack = {
.src0 = bi_get_src(ins, regs, 1, false),
.src1 = bi_get_src(ins, regs, 2, false),
.src2 = bi_get_src(ins, regs, 3, false),
- .channels = MALI_POSITIVE(ins->store_channels),
+ .channels = MALI_POSITIVE(ins->vector_channels),
.op = BIFROST_ADD_OP_ST_VAR
};
static void
bi_print_swizzle(bi_instruction *ins, unsigned src, FILE *fp)
{
- unsigned size = nir_alu_type_get_type_size(ins->dest_type);
- unsigned count = (size == 64) ? 1 : (32 / size);
-
fprintf(fp, ".");
- for (unsigned u = 0; u < count; ++u) {
+ for (unsigned u = 0; u < bi_get_component_count(ins, src); ++u) {
assert(ins->swizzle[src][u] < 4);
fputc("xyzw"[ins->swizzle[src][u]], fp);
}
fprintf(fp, ".%s", bi_cond_name(branch->cond));
}
-static void
-bi_print_writemask(bi_instruction *ins, FILE *fp)
-{
- unsigned bits_per_comp = nir_alu_type_get_type_size(ins->dest_type);
- assert(bits_per_comp);
- unsigned bytes_per_comp = bits_per_comp / 8;
- unsigned comps = 16 / bytes_per_comp;
- unsigned smask = (1 << bytes_per_comp) - 1;
- fprintf(fp, ".");
-
- for (unsigned i = 0; i < comps; ++i) {
- unsigned masked = (ins->writemask >> (i * bytes_per_comp)) & smask;
- if (!masked)
- continue;
-
- assert(masked == smask);
- assert(i < 4);
- fputc("xyzw"[i], fp);
- }
-}
-
void
bi_print_instruction(bi_instruction *ins, FILE *fp)
{
fprintf(fp, ".%s", bi_cond_name(ins->csel_cond));
else if (ins->type == BI_BLEND)
fprintf(fp, ".loc%u", ins->blend_location);
- else if (ins->type == BI_STORE || ins->type == BI_STORE_VAR)
- fprintf(fp, ".v%u", ins->store_channels);
else if (ins->type == BI_TEX)
fprintf(fp, ".%s", bi_tex_op_name(ins->op.texture));
+ if (ins->vector_channels)
+ fprintf(fp, ".v%u", ins->vector_channels);
+
if (ins->dest)
bi_print_alu_type(ins->dest_type, fp);
bool succ = bi_print_dest_index(fp, ins, ins->dest);
assert(succ);
- if (ins->dest)
- bi_print_writemask(ins, fp);
+ if (ins->dest_offset)
+ fprintf(fp, "+%u", ins->dest_offset);
fprintf(fp, ", ");
*/
#include "compiler.h"
+#include "bi_print.h"
#include "panfrost/util/lcra.h"
#include "util/u_memory.h"
if (ins->dest && (ins->dest < l->node_count)) {
for (unsigned i = 1; i < l->node_count; ++i) {
if (live[i])
- lcra_add_node_interference(l, ins->dest, ins->writemask, i, live[i]);
+ lcra_add_node_interference(l, ins->dest, bi_writemask(ins), i, live[i]);
}
}
if (solution < 0)
return index;
- solution += offset;
-
assert((solution & 0x3) == 0);
unsigned reg = solution / 4;
+ reg += offset;
+
return BIR_INDEX_REGISTER | reg;
}
/* TODO: Do we do anything here? */
} else {
/* Use the swizzle as component select */
- nir_alu_type T = ins->src_types[src];
- unsigned size = nir_alu_type_get_type_size(T);
- assert(size <= 32); /* TODO: 64-bit */
- unsigned comps_per_reg = 32 / size;
unsigned components = bi_get_component_count(ins, src);
for (unsigned i = 0; i < components; ++i) {
- /* If we're not writing the component, who cares? */
- if (!bi_writes_component(ins, i))
- continue;
-
- unsigned off = ins->swizzle[src][i] / comps_per_reg;
- off *= 4; /* 32-bit registers */
+ unsigned off = ins->swizzle[src][i] / components;
/* We can't cross register boundaries in a swizzle */
if (i == 0)
else
assert(off == offset);
- ins->swizzle[src][i] %= comps_per_reg;
+ ins->swizzle[src][i] %= components;
}
}
if (ins->dest >= l->node_count)
return;
- bool vector = (bi_class_props[ins->type] & BI_VECTOR);
- unsigned offset = 0;
-
- if (!vector) {
- /* Look at the writemask to get an offset, specifically the
- * trailing zeros */
-
- unsigned tz = __builtin_ctz(ins->writemask);
-
- /* Recall writemask is one bit per byte, so tz is in eytes */
- unsigned regs = tz / 4;
- offset = regs * 4;
-
- /* Adjust writemask to compensate */
- ins->writemask >>= offset;
- }
-
- ins->dest = bi_reg_from_index(l, ins->dest, offset);
-
+ ins->dest = bi_reg_from_index(l, ins->dest, ins->dest_offset);
}
static void
.op = { .mscale = true },
.dest = bi_make_temp(ctx),
.dest_type = nir_type_float32,
- .writemask = 0xF,
.src = {
bir_src_index(&instr->src[0].src),
BIR_INDEX_CONSTANT | 0,
.type = BI_CONVERT,
.dest = bi_make_temp(ctx),
.dest_type = nir_type_int32,
- .writemask = 0xF,
.src = { mscale.dest },
.src_types = { nir_type_float32 },
.roundmode = BIFROST_RTE
.op = { .special = BI_SPECIAL_EXP2_LOW },
.dest = bir_dest_index(&instr->dest.dest),
.dest_type = nir_type_float32,
- .writemask = 0xF,
.src = { f2i.dest, mscale.src[0] },
.src_types = { nir_type_int32, nir_type_float32 },
};
.op = { .frexp = BI_FREXPE_LOG },
.dest = bi_make_temp(ctx),
.dest_type = nir_type_int32,
- .writemask = 0xF,
.src = { bir_src_index(&instr->src[0].src) },
.src_types = { nir_type_float32 }
};
.type = BI_CONVERT,
.dest = bi_make_temp(ctx),
.dest_type = nir_type_float32,
- .writemask = 0xF,
.src = { frexpe.dest },
.src_types = { nir_type_int32 },
.roundmode = BIFROST_RTZ
.op = { .reduce = BI_REDUCE_ADD_FREXPM },
.dest = bi_make_temp(ctx),
.dest_type = nir_type_float32,
- .writemask = 0xF,
.src = {
BIR_INDEX_CONSTANT,
bir_src_index(&instr->src[0].src),
.op = { .table = BI_TABLE_LOG2_U_OVER_U_1_LOW },
.dest = bi_make_temp(ctx),
.dest_type = nir_type_float32,
- .writemask = 0xF,
.src = { bir_src_index(&instr->src[0].src) },
.src_types = { nir_type_float32 },
};
.type = BI_FMA,
.dest = bir_dest_index(&instr->dest.dest),
.dest_type = nir_type_float32,
- .writemask = 0xF,
.src = {
help.dest,
x_minus_1.dest,
pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
}
-/* Gets a bytemask for a complete vecN write */
-static unsigned
-bi_mask_for_channels_32(unsigned i)
-{
- return (1 << (4 * i)) - 1;
-}
-
static bi_instruction
bi_load(enum bi_class T, nir_intrinsic_instr *instr)
{
bi_instruction load = {
.type = T,
- .writemask = bi_mask_for_channels_32(instr->num_components),
+ .vector_channels = instr->num_components,
.src = { BIR_INDEX_CONSTANT },
.src_types = { nir_type_uint32 },
.constant = { .u64 = nir_intrinsic_base(instr) },
},
.dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
.dest_type = nir_type_uint32,
- .writemask = 0xF
};
bi_emit(ctx, ins);
},
.dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
.dest_type = nir_type_uint32,
- .writemask = 0xF
+ .vector_channels = 4
};
assert(blend.blend_location < 8);
bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
address.dest = bi_make_temp(ctx);
address.dest_type = nir_type_uint32;
- address.writemask = (1 << 12) - 1;
+ address.vector_channels = 3;
unsigned nr = nir_intrinsic_src_components(instr, 0);
assert(nir_intrinsic_write_mask(instr) == ((1 << nr) - 1));
{ 0 },
{ 0 }, { 1 }, { 2}
},
- .store_channels = nr,
+ .vector_channels = nr,
};
for (unsigned i = 0; i < nr; ++i)
bi_instruction load = {
.type = BI_LOAD_UNIFORM,
- .writemask = (1 << (nr_components * 4)) - 1,
+ .vector_channels = nr_components,
.src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
.src_types = { nir_type_uint32, nir_type_uint32 },
.constant = { (uniform * 16) + offset },
.type = BI_MOV,
.dest = bir_ssa_index(&instr->def),
.dest_type = instr->def.bit_size | nir_type_uint,
- .writemask = (1 << (instr->def.bit_size / 8)) - 1,
.src = {
BIR_INDEX_CONSTANT
},
static void
bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
- unsigned *constants_left, unsigned *constant_shift)
+ unsigned *constants_left, unsigned *constant_shift, unsigned comps)
{
unsigned bits = nir_src_bit_size(instr->src[i].src);
unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
alu->src[to] = bir_src_index(&instr->src[i].src);
- /* We assert scalarization above */
- alu->swizzle[to][0] = instr->src[i].swizzle[0];
+ /* Copy swizzle for all vectored components, replicating last component
+ * to fill undersized */
+
+ unsigned vec = alu->type == BI_COMBINE ? 1 :
+ MAX2(1, 32 / dest_bits);
+
+ for (unsigned j = 0; j < vec; ++j)
+ alu->swizzle[to][j] = instr->src[i].swizzle[MIN2(j, comps - 1)];
}
static void
bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
- unsigned *constants_left, unsigned *constant_shift)
+ unsigned *constants_left, unsigned *constant_shift, unsigned comps)
{
/* Bail for vector weirdness */
if (cond.swizzle[0] != 0)
/* We found one, let's fuse it in */
csel->csel_cond = bcond;
- bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift);
- bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift);
+ bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift, comps);
+ bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift, comps);
}
static void
/* TODO: Implement lowering of special functions for older Bifrost */
assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
- if (instr->dest.dest.is_ssa) {
- /* Construct a writemask */
- unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
- unsigned comps = instr->dest.dest.ssa.num_components;
-
- if (alu.type != BI_COMBINE)
- assert(comps == 1);
+ unsigned comps = nir_dest_num_components(instr->dest.dest);
- unsigned bits = bits_per_comp * comps;
- unsigned bytes = bits / 8;
- alu.writemask = (1 << bytes) - 1;
- } else {
- unsigned comp_mask = instr->dest.write_mask;
+ if (alu.type != BI_COMBINE)
+ assert(comps <= MAX2(1, 32 / comps));
- alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
- comp_mask);
+ if (!instr->dest.dest.is_ssa) {
+ for (unsigned i = 0; i < comps; ++i)
+ assert(instr->dest.write_mask);
}
/* We inline constants as we go. This tracks how many constants have
if (i && alu.type == BI_CSEL)
f++;
- bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift);
+ bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift, comps);
}
/* Op-specific fixup */
alu.src_types[1] = alu.src_types[0];
bi_fuse_csel_cond(&alu, instr->src[0],
- &constants_left, &constant_shift);
+ &constants_left, &constant_shift, comps);
}
bi_emit(ctx, alu);
.dest = bir_dest_index(&instr->dest),
.dest_type = instr->dest_type,
.src_types = { nir_type_float32, nir_type_float32 },
- .writemask = instr->dest_type == nir_type_float32 ?
- 0xFFFF : 0xFF,
+ .vector_channels = 4
};
for (unsigned i = 0; i < instr->num_srcs; ++i) {
}
unsigned
-bi_get_component_count(bi_instruction *ins, unsigned src)
+bi_get_component_count(bi_instruction *ins, signed src)
{
if (bi_class_props[ins->type] & BI_VECTOR) {
- return (src == 0) ? 4 : 1;
+ assert(ins->vector_channels);
+ return (src <= 0) ? ins->vector_channels : 1;
} else {
- /* Stores imply VECTOR */
- assert(ins->dest_type);
- unsigned bytes = nir_alu_type_get_type_size(ins->dest_type);
- return 32 / bytes;
+ unsigned bytes = nir_alu_type_get_type_size(src < 0 ? ins->dest_type : ins->src_types[src]);
+ return MAX2(32 / bytes, 1);
}
}
-unsigned
-bi_load32_components(bi_instruction *ins)
-{
- unsigned mask = bi_from_bytemask(ins->writemask, 4);
- unsigned count = util_bitcount(mask);
- assert(mask == ((1 << count) - 1));
- assert(count >= 1 && count <= 4);
- return count;
-}
-
uint16_t
bi_bytemask_of_read_components(bi_instruction *ins, unsigned node)
{
bool
bi_writes_component(bi_instruction *ins, unsigned comp)
{
- /* TODO: Do we want something less coarse? */
- if (bi_class_props[ins->type] & BI_VECTOR)
- return true;
+ return comp < bi_get_component_count(ins, -1);
+}
+unsigned
+bi_writemask(bi_instruction *ins)
+{
nir_alu_type T = ins->dest_type;
unsigned size = nir_alu_type_get_type_size(T);
- return ins->writemask & (0xF << (comp * (size / 8)));
+ unsigned bytes_per_comp = size / 8;
+ unsigned components = bi_get_component_count(ins, -1);
+ unsigned bytes = bytes_per_comp * components;
+ unsigned mask = (1 << bytes) - 1;
+ unsigned shift = ins->dest_offset * 4; /* 32-bit words */
+ return (mask << shift);
}
* the end of a clause. Implies ADD */
#define BI_SCHED_HI_LATENCY (1 << 7)
-/* Intrinsic is vectorized and should read 4 components in the first source
- * regardless of writemask */
+/* Intrinsic is vectorized and acts with `vector_channels` components */
#define BI_VECTOR (1 << 8)
/* Use a data register for src0/dest respectively, bypassing the usual
unsigned dest;
unsigned src[BIR_SRC_COUNT];
+ /* 32-bit word offset for destination, added to the register number in
+ * RA when lowering combines */
+ unsigned dest_offset;
+
/* If one of the sources has BIR_INDEX_CONSTANT */
union {
uint64_t u64;
/* Round mode (requires BI_ROUNDMODE) */
enum bifrost_roundmode roundmode;
- /* Writemask (bit for each affected byte). This is quite restricted --
- * ALU ops can only write to a single channel (exception: <32 in which
- * you can write to 32/N contiguous aligned channels). Load/store can
- * only write to all channels at once, in a sense. But it's still
- * better to use this generic form than have synthetic ops flying
- * about, since we're not essentially vector for RA purposes. */
- uint16_t writemask;
-
/* Destination type. Usually the type of the instruction
* itself, but if sources and destination have different
* types, the type of the destination wins (so f2i would be
* selection, so we don't have to special case extraction. */
uint8_t swizzle[BIR_SRC_COUNT][NIR_MAX_VEC_COMPONENTS];
+ /* For VECTOR ops, how many channels are written? */
+ unsigned vector_channels;
+
/* A class-specific op from which the actual opcode can be derived
* (along with the above information) */
/* For BLEND -- the location 0-7 */
unsigned blend_location;
-
- /* For STORE, STORE_VAR -- channel count */
- unsigned store_channels;
};
} bi_instruction;
bool bi_is_src_swizzled(bi_instruction *ins, unsigned s);
bool bi_has_arg(bi_instruction *ins, unsigned arg);
uint16_t bi_from_bytemask(uint16_t bytemask, unsigned bytes);
-unsigned bi_get_component_count(bi_instruction *ins, unsigned s);
-unsigned bi_load32_components(bi_instruction *ins);
+unsigned bi_get_component_count(bi_instruction *ins, signed s);
uint16_t bi_bytemask_of_read_components(bi_instruction *ins, unsigned node);
uint64_t bi_get_immediate(bi_instruction *ins, unsigned index);
bool bi_writes_component(bi_instruction *ins, unsigned comp);
+unsigned bi_writemask(bi_instruction *ins);
/* BIR passes */
},
.dest = BIR_INDEX_REGISTER | 0,
.dest_type = nir_type_uint32,
- .writemask = 0xFFFF
+ .vector_channels = 4,
};
bi_instruction ldva = {
.type = BI_LOAD_VAR_ADDRESS,
- .writemask = (1 << 12) - 1,
+ .vector_channels = 3,
.dest = BIR_INDEX_REGISTER | 32,
.dest_type = nir_type_uint32,
.src = {
nir_type_uint32,
nir_type_uint32, nir_type_uint32, nir_type_uint32,
},
- .store_channels = 4
+ .vector_channels = 4
};
bi_context *ctx = rzalloc(NULL, bi_context);
bi_instruction ins = {
.type = BI_CONVERT,
.dest = BIR_INDEX_REGISTER | 0,
- .writemask = 0xF,
.src = { BIR_INDEX_REGISTER | 0 }
};