/* We currently only handle SSA */
if (!src) return false;
- if (src & (BIR_SPECIAL | BIR_IS_REG)) return false;
+ if (src & (BIR_SPECIAL | PAN_IS_REG)) return false;
/* We are SSA. Lookup the generating instruction. */
unsigned bytes = nir_alu_type_get_type_size(ins->dest_type) / 8;
fprintf(fp, "_");
else if (index & BIR_INDEX_REGISTER)
fprintf(fp, "br%u", index & ~BIR_INDEX_REGISTER);
- else if (index & BIR_IS_REG)
+ else if (index & PAN_IS_REG)
fprintf(fp, "r%u", index >> 1);
else if (!(index & BIR_SPECIAL))
fprintf(fp, "%u", (index >> 1) - 1);
.dest = bi_make_temp(ctx),
.dest_type = nir_type_float32,
.src = {
- bir_src_index(&instr->src[0].src),
+ pan_src_index(&instr->src[0].src),
BIR_INDEX_CONSTANT | 0,
BIR_INDEX_ZERO,
BIR_INDEX_CONSTANT | 32,
bi_instruction fexp = {
.type = BI_SPECIAL,
.op = { .special = BI_SPECIAL_EXP2_LOW },
- .dest = bir_dest_index(&instr->dest.dest),
+ .dest = pan_dest_index(&instr->dest.dest),
.dest_type = nir_type_float32,
.src = { f2i.dest, mscale.src[0] },
.src_types = { nir_type_int32, nir_type_float32 },
.op = { .frexp = BI_FREXPE_LOG },
.dest = bi_make_temp(ctx),
.dest_type = nir_type_int32,
- .src = { bir_src_index(&instr->src[0].src) },
+ .src = { pan_src_index(&instr->src[0].src) },
.src_types = { nir_type_float32 }
};
.dest_type = nir_type_float32,
.src = {
BIR_INDEX_CONSTANT,
- bir_src_index(&instr->src[0].src),
+ pan_src_index(&instr->src[0].src),
},
.src_types = { nir_type_float32, nir_type_float32 },
.constant = {
.op = { .table = BI_TABLE_LOG2_U_OVER_U_1_LOW },
.dest = bi_make_temp(ctx),
.dest_type = nir_type_float32,
- .src = { bir_src_index(&instr->src[0].src) },
+ .src = { pan_src_index(&instr->src[0].src) },
.src_types = { nir_type_float32 },
};
/* FMA log2(x)/(x - 1), (x - 1), M */
bi_instruction fma = {
.type = BI_FMA,
- .dest = bir_dest_index(&instr->dest.dest),
+ .dest = pan_dest_index(&instr->dest.dest),
.dest_type = nir_type_float32,
.src = {
help.dest,
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
if (info->has_dest)
- load.dest = bir_dest_index(&instr->dest);
+ load.dest = pan_dest_index(&instr->dest);
if (info->has_dest && info->index_map[NIR_INTRINSIC_TYPE] > 0)
load.dest_type = nir_intrinsic_type(instr);
if (nir_src_is_const(*offset))
load.constant.u64 += nir_src_as_uint(*offset);
else
- load.src[0] = bir_src_index(offset);
+ load.src[0] = pan_src_index(offset);
return load;
}
.type = BI_ATEST,
.src = {
BIR_INDEX_REGISTER | 60 /* TODO: RA */,
- bir_src_index(&instr->src[0])
+ pan_src_index(&instr->src[0])
},
.src_types = {
nir_type_uint32,
.type = BI_BLEND,
.blend_location = nir_intrinsic_base(instr),
.src = {
- bir_src_index(&instr->src[0]),
+ pan_src_index(&instr->src[0]),
BIR_INDEX_REGISTER | 60 /* Can this be arbitrary? */,
},
.src_types = {
bi_instruction st = {
.type = BI_STORE_VAR,
.src = {
- bir_src_index(&instr->src[0]),
+ pan_src_index(&instr->src[0]),
address.dest, address.dest, address.dest,
},
.src_types = {
.src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
.src_types = { nir_type_uint32, nir_type_uint32 },
.constant = { (uniform * 16) + offset },
- .dest = bir_dest_index(&nir_dest),
+ .dest = pan_dest_index(&nir_dest),
.dest_type = nir_type_uint32, /* TODO */
};
bi_instruction move = {
.type = BI_MOV,
- .dest = bir_ssa_index(&instr->def),
+ .dest = pan_ssa_index(&instr->def),
.dest_type = instr->def.bit_size | nir_type_uint,
.src = {
BIR_INDEX_CONSTANT
return;
}
- alu->src[to] = bir_src_index(&instr->src[i].src);
+ alu->src[to] = pan_src_index(&instr->src[i].src);
/* Copy swizzle for all vectored components, replicating last component
* to fill undersized */
/* Otherwise, assume it's something we can handle normally */
bi_instruction alu = {
.type = bi_class_for_nir_alu(instr->op),
- .dest = bir_dest_index(&instr->dest.dest),
+ .dest = pan_dest_index(&instr->dest.dest),
.dest_type = nir_op_infos[instr->op].output_type
| nir_dest_bit_size(instr->dest.dest),
};
bi_instruction tex = {
.type = BI_TEX,
.op = { .texture = BI_TEX_COMPACT },
- .dest = bir_dest_index(&instr->dest),
+ .dest = pan_dest_index(&instr->dest),
.dest_type = instr->dest_type,
.src_types = { nir_type_float32, nir_type_float32 },
.vector_channels = 4
};
for (unsigned i = 0; i < instr->num_srcs; ++i) {
- int index = bir_src_index(&instr->src[i].src);
+ int index = pan_src_index(&instr->src[i].src);
assert (instr->src[i].src_type == nir_tex_src_coord);
tex.src[0] = index;
bi_set_branch_cond(bi_instruction *branch, nir_src *cond, bool invert)
{
/* TODO: Try to unwrap instead of always bailing */
- branch->src[0] = bir_src_index(cond);
+ branch->src[0] = pan_src_index(cond);
branch->src[1] = BIR_INDEX_ZERO;
branch->src_types[0] = branch->src_types[1] = nir_type_uint16;
branch->branch.cond = invert ? BI_COND_EQ : BI_COND_NE;
struct list_head link; /* Must be first */
enum bi_class type;
- /* Indices, see bir_ssa_index etc. Note zero is special cased
+ /* Indices, see pan_ssa_index etc. Note zero is special cased
* to "no argument" */
unsigned dest;
unsigned src[BIR_SRC_COUNT];
list_del(&ins->link);
}
-/* So we can distinguish between SSA/reg/sentinel quickly */
-#define BIR_NO_ARG (0)
-#define BIR_IS_REG (1)
-
/* If high bits are set, instead of SSA/registers, we have specials indexed by
* the low bits if necessary.
*
static inline unsigned
bi_make_temp_reg(bi_context *ctx)
{
- return ((ctx->impl->reg_alloc + ctx->temp_alloc++) << 1) | BIR_IS_REG;
-}
-
-static inline unsigned
-bir_ssa_index(nir_ssa_def *ssa)
-{
- /* Off-by-one ensures BIR_NO_ARG is skipped */
- return ((ssa->index + 1) << 1) | 0;
-}
-
-static inline unsigned
-bir_src_index(nir_src *src)
-{
- if (src->is_ssa)
- return bir_ssa_index(src->ssa);
- else {
- assert(!src->reg.indirect);
- return (src->reg.reg->index << 1) | BIR_IS_REG;
- }
-}
-
-static inline unsigned
-bir_dest_index(nir_dest *dst)
-{
- if (dst->is_ssa)
- return bir_ssa_index(&dst->ssa);
- else {
- assert(!dst->reg.indirect);
- return (dst->reg.reg->index << 1) | BIR_IS_REG;
- }
+ return ((ctx->impl->reg_alloc + ctx->temp_alloc++) << 1) | PAN_IS_REG;
}
/* Iterators for Bifrost IR */
return pan_ssa_index(src->ssa);
else {
assert(!src->reg.indirect);
- return (src->reg.reg->index << 1) | BIR_IS_REG;
+ return (src->reg.reg->index << 1) | PAN_IS_REG;
}
}
return pan_ssa_index(&dst->ssa);
else {
assert(!dst->reg.indirect);
- return (dst->reg.reg->index << 1) | BIR_IS_REG;
+ return (dst->reg.reg->index << 1) | PAN_IS_REG;
}
}