#include "main/mtypes.h"
#include "compiler/glsl/glsl_to_nir.h"
#include "compiler/nir_types.h"
-#include "main/imports.h"
#include "compiler/nir/nir_builder.h"
#include "disassemble.h"
pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
}
-/* Gets a bytemask for a complete vecN write */
-static unsigned
-bi_mask_for_channels_32(unsigned i)
-{
- return (1 << (4 * i)) - 1;
-}
-
static bi_instruction
bi_load(enum bi_class T, nir_intrinsic_instr *instr)
{
bi_instruction load = {
.type = T,
- .writemask = bi_mask_for_channels_32(instr->num_components),
+ .vector_channels = instr->num_components,
.src = { BIR_INDEX_CONSTANT },
+ .src_types = { nir_type_uint32 },
.constant = { .u64 = nir_intrinsic_base(instr) },
};
},
.src_types = {
nir_type_uint32,
- nir_type_float32
+ nir_intrinsic_type(instr)
},
.swizzle = {
{ 0 },
},
.dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
.dest_type = nir_type_uint32,
- .writemask = 0xF
};
bi_emit(ctx, ins);
.type = BI_BLEND,
.blend_location = nir_intrinsic_base(instr),
.src = {
+ bir_src_index(&instr->src[0]),
BIR_INDEX_REGISTER | 60 /* Can this be arbitrary? */,
- bir_src_index(&instr->src[0])
},
.src_types = {
- nir_type_uint32,
- nir_type_float32,
+ nir_intrinsic_type(instr),
+ nir_type_uint32
},
.swizzle = {
- { 0 },
- { 0, 1, 2, 3 }
+ { 0, 1, 2, 3 },
+ { 0 }
},
.dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
.dest_type = nir_type_uint32,
- .writemask = 0xF
+ .vector_channels = 4
};
+ assert(blend.blend_location < 8);
+ assert(ctx->blend_types);
+ ctx->blend_types[blend.blend_location] = blend.src_types[0];
+
bi_emit(ctx, blend);
bi_schedule_barrier(ctx);
}
+static bi_instruction
+bi_load_with_r61(enum bi_class T, nir_intrinsic_instr *instr)
+{
+ bi_instruction ld = bi_load(T, instr);
+ ld.src[1] = BIR_INDEX_REGISTER | 61; /* TODO: RA */
+ ld.src[2] = BIR_INDEX_REGISTER | 62;
+ ld.src[3] = 0;
+ ld.src_types[1] = nir_type_uint32;
+ ld.src_types[2] = nir_type_uint32;
+ ld.src_types[3] = nir_intrinsic_type(instr);
+ return ld;
+}
+
static void
bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
{
- bi_instruction address = bi_load(BI_LOAD_VAR_ADDRESS, instr);
- address.src[1] = BIR_INDEX_REGISTER | 61; /* TODO: RA */
- address.src[2] = BIR_INDEX_REGISTER | 62;
- address.src[3] = 0;
- address.src_types[1] = nir_type_uint32;
- address.src_types[2] = nir_type_uint32;
- address.src_types[3] = nir_intrinsic_type(instr);
+ bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
address.dest = bi_make_temp(ctx);
- address.dest_type = nir_type_uint64;
- address.writemask = (1 << 8) - 1;
+ address.dest_type = nir_type_uint32;
+ address.vector_channels = 3;
+
+ unsigned nr = nir_intrinsic_src_components(instr, 0);
+ assert(nir_intrinsic_write_mask(instr) == ((1 << nr) - 1));
bi_instruction st = {
.type = BI_STORE_VAR,
.src = {
- address.dest,
- bir_src_index(&instr->src[0])
+ bir_src_index(&instr->src[0]),
+ address.dest, address.dest, address.dest,
},
.src_types = {
- nir_type_uint64,
- nir_type_uint32
+ nir_type_uint32,
+ nir_type_uint32, nir_type_uint32, nir_type_uint32,
},
.swizzle = {
{ 0 },
- { 0, 1, 2, 3 }
- }
+ { 0 }, { 1 }, { 2}
+ },
+ .vector_channels = nr,
};
+ for (unsigned i = 0; i < nr; ++i)
+ st.swizzle[0][i] = i;
+
bi_emit(ctx, address);
bi_emit(ctx, st);
}
bi_instruction load = {
.type = BI_LOAD_UNIFORM,
- .writemask = (1 << (nr_components * 4)) - 1,
+ .vector_channels = nr_components,
.src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
+ .src_types = { nir_type_uint32, nir_type_uint32 },
.constant = { (uniform * 16) + offset },
.dest = bir_dest_index(&nir_dest),
.dest_type = nir_type_uint32, /* TODO */
if (ctx->stage == MESA_SHADER_FRAGMENT)
bi_emit_ld_vary(ctx, instr);
else if (ctx->stage == MESA_SHADER_VERTEX)
- bi_emit(ctx, bi_load(BI_LOAD_ATTR, instr));
+ bi_emit(ctx, bi_load_with_r61(BI_LOAD_ATTR, instr));
else {
unreachable("Unsupported shader stage");
}
.type = BI_MOV,
.dest = bir_ssa_index(&instr->def),
.dest_type = instr->def.bit_size | nir_type_uint,
- .writemask = (1 << (instr->def.bit_size / 8)) - 1,
.src = {
BIR_INDEX_CONSTANT
},
+ .src_types = {
+ instr->def.bit_size | nir_type_uint,
+ },
.constant = {
.u64 = nir_const_value_as_uint(instr->value[0], instr->def.bit_size)
}
bi_emit(ctx, move);
}
+#define BI_CASE_CMP(op) \
+ case op##8: \
+ case op##16: \
+ case op##32: \
+
static enum bi_class
bi_class_for_nir_alu(nir_op op)
{
case nir_op_isub:
return BI_ISUB;
- case nir_op_flt:
- case nir_op_fge:
- case nir_op_feq:
- case nir_op_fne:
- case nir_op_ilt:
- case nir_op_ige:
- case nir_op_ieq:
- case nir_op_ine:
+ case nir_op_iand:
+ case nir_op_ior:
+ case nir_op_ixor:
+ return BI_BITWISE;
+
+ BI_CASE_CMP(nir_op_flt)
+ BI_CASE_CMP(nir_op_fge)
+ BI_CASE_CMP(nir_op_feq)
+ BI_CASE_CMP(nir_op_fne)
+ BI_CASE_CMP(nir_op_ilt)
+ BI_CASE_CMP(nir_op_ige)
+ BI_CASE_CMP(nir_op_ieq)
+ BI_CASE_CMP(nir_op_ine)
return BI_CMP;
- case nir_op_bcsel:
+ case nir_op_b8csel:
+ case nir_op_b16csel:
+ case nir_op_b32csel:
return BI_CSEL;
case nir_op_i2i8:
case nir_op_u2f16:
case nir_op_u2f32:
case nir_op_u2f64:
+ case nir_op_f2f16:
+ case nir_op_f2f32:
+ case nir_op_f2f64:
+ case nir_op_f2fmp:
return BI_CONVERT;
+ case nir_op_vec2:
+ case nir_op_vec3:
+ case nir_op_vec4:
+ return BI_COMBINE;
+
+ case nir_op_vec8:
+ case nir_op_vec16:
+ unreachable("should've been lowered");
+
case nir_op_ffma:
case nir_op_fmul:
return BI_FMA;
case nir_op_mov:
return BI_MOV;
+ case nir_op_fround_even:
+ case nir_op_fceil:
+ case nir_op_ffloor:
+ case nir_op_ftrunc:
+ return BI_ROUND;
+
case nir_op_frcp:
case nir_op_frsq:
- case nir_op_fsin:
- case nir_op_fcos:
return BI_SPECIAL;
default:
}
}
+/* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
+ * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
+ * optimizations). Otherwise it will bail (when used for primary code
+ * generation). */
+
static enum bi_cond
-bi_cond_for_nir(nir_op op)
+bi_cond_for_nir(nir_op op, bool soft)
{
switch (op) {
- case nir_op_flt:
- case nir_op_ilt:
+ BI_CASE_CMP(nir_op_flt)
+ BI_CASE_CMP(nir_op_ilt)
return BI_COND_LT;
- case nir_op_fge:
- case nir_op_ige:
+
+ BI_CASE_CMP(nir_op_fge)
+ BI_CASE_CMP(nir_op_ige)
return BI_COND_GE;
- case nir_op_feq:
- case nir_op_ieq:
+
+ BI_CASE_CMP(nir_op_feq)
+ BI_CASE_CMP(nir_op_ieq)
return BI_COND_EQ;
- case nir_op_fne:
- case nir_op_ine:
+
+ BI_CASE_CMP(nir_op_fne)
+ BI_CASE_CMP(nir_op_ine)
return BI_COND_NE;
default:
- unreachable("Invalid compare");
+ if (soft)
+ return BI_COND_ALWAYS;
+ else
+ unreachable("Invalid compare");
}
}
+static void
+bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
+ unsigned *constants_left, unsigned *constant_shift, unsigned comps)
+{
+ unsigned bits = nir_src_bit_size(instr->src[i].src);
+ unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
+
+ alu->src_types[to] = nir_op_infos[instr->op].input_types[i]
+ | bits;
+
+ /* Try to inline a constant */
+ if (nir_src_is_const(instr->src[i].src) && *constants_left && (dest_bits == bits)) {
+ uint64_t mask = (1ull << dest_bits) - 1;
+ uint64_t cons = nir_src_as_uint(instr->src[i].src);
+
+ /* Try to reuse a constant */
+ for (unsigned i = 0; i < (*constant_shift); i += dest_bits) {
+ if (((alu->constant.u64 >> i) & mask) == cons) {
+ alu->src[to] = BIR_INDEX_CONSTANT | i;
+ return;
+ }
+ }
+
+ alu->constant.u64 |= cons << *constant_shift;
+ alu->src[to] = BIR_INDEX_CONSTANT | (*constant_shift);
+ --(*constants_left);
+ (*constant_shift) += MAX2(dest_bits, 32); /* lo/hi */
+ return;
+ }
+
+ alu->src[to] = bir_src_index(&instr->src[i].src);
+
+ /* Copy swizzle for all vectored components, replicating last component
+ * to fill undersized */
+
+ unsigned vec = alu->type == BI_COMBINE ? 1 :
+ MAX2(1, 32 / dest_bits);
+
+ for (unsigned j = 0; j < vec; ++j)
+ alu->swizzle[to][j] = instr->src[i].swizzle[MIN2(j, comps - 1)];
+}
+
+static void
+bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
+ unsigned *constants_left, unsigned *constant_shift, unsigned comps)
+{
+ /* Bail for vector weirdness */
+ if (cond.swizzle[0] != 0)
+ return;
+
+ if (!cond.src.is_ssa)
+ return;
+
+ nir_ssa_def *def = cond.src.ssa;
+ nir_instr *parent = def->parent_instr;
+
+ if (parent->type != nir_instr_type_alu)
+ return;
+
+ nir_alu_instr *alu = nir_instr_as_alu(parent);
+
+ /* Try to match a condition */
+ enum bi_cond bcond = bi_cond_for_nir(alu->op, true);
+
+ if (bcond == BI_COND_ALWAYS)
+ return;
+
+ /* We found one, let's fuse it in */
+ csel->cond = bcond;
+ bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift, comps);
+ bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift, comps);
+}
+
static void
emit_alu(bi_context *ctx, nir_alu_instr *instr)
{
- /* Assume it's something we can handle normally */
+ /* Try some special functions */
+ switch (instr->op) {
+ case nir_op_fexp2:
+ bi_emit_fexp2(ctx, instr);
+ return;
+ case nir_op_flog2:
+ bi_emit_flog2(ctx, instr);
+ return;
+ default:
+ break;
+ }
+
+ /* Otherwise, assume it's something we can handle normally */
bi_instruction alu = {
.type = bi_class_for_nir_alu(instr->op),
.dest = bir_dest_index(&instr->dest.dest),
/* TODO: Implement lowering of special functions for older Bifrost */
assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
- if (instr->dest.dest.is_ssa) {
- /* Construct a writemask */
- unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
- unsigned comps = instr->dest.dest.ssa.num_components;
- assert(comps == 1);
- unsigned bits = bits_per_comp * comps;
- unsigned bytes = MAX2(bits / 8, 1);
- alu.writemask = (1 << bytes) - 1;
- } else {
- unsigned comp_mask = instr->dest.write_mask;
+ unsigned comps = nir_dest_num_components(instr->dest.dest);
- alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
- comp_mask);
+ if (alu.type != BI_COMBINE)
+ assert(comps <= MAX2(1, 32 / comps));
+
+ if (!instr->dest.dest.is_ssa) {
+ for (unsigned i = 0; i < comps; ++i)
+ assert(instr->dest.write_mask);
}
/* We inline constants as we go. This tracks how many constants have
unsigned constants_left = (64 / dest_bits);
unsigned constant_shift = 0;
+ if (alu.type == BI_COMBINE)
+ constants_left = 0;
+
/* Copy sources */
unsigned num_inputs = nir_op_infos[instr->op].num_inputs;
assert(num_inputs <= ARRAY_SIZE(alu.src));
for (unsigned i = 0; i < num_inputs; ++i) {
- unsigned bits = nir_src_bit_size(instr->src[i].src);
- alu.src_types[i] = nir_op_infos[instr->op].input_types[i]
- | bits;
-
- /* Try to inline a constant */
- if (nir_src_is_const(instr->src[i].src) && constants_left && (dest_bits == bits)) {
- alu.constant.u64 |=
- (nir_src_as_uint(instr->src[i].src)) << constant_shift;
-
- alu.src[i] = BIR_INDEX_CONSTANT | constant_shift;
- --constants_left;
- constant_shift += dest_bits;
- continue;
- }
+ unsigned f = 0;
- alu.src[i] = bir_src_index(&instr->src[i].src);
+ if (i && alu.type == BI_CSEL)
+ f++;
- /* We assert scalarization above */
- alu.swizzle[i][0] = instr->src[i].swizzle[0];
+ bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift, comps);
}
/* Op-specific fixup */
switch (instr->op) {
case nir_op_fmul:
alu.src[2] = BIR_INDEX_ZERO; /* FMA */
+ alu.src_types[2] = alu.src_types[1];
break;
case nir_op_fsat:
alu.outmod = BIFROST_SAT; /* FMOV */
case nir_op_frsq:
alu.op.special = BI_SPECIAL_FRSQ;
break;
- case nir_op_fsin:
- alu.op.special = BI_SPECIAL_FSIN;
+ BI_CASE_CMP(nir_op_flt)
+ BI_CASE_CMP(nir_op_ilt)
+ BI_CASE_CMP(nir_op_fge)
+ BI_CASE_CMP(nir_op_ige)
+ BI_CASE_CMP(nir_op_feq)
+ BI_CASE_CMP(nir_op_ieq)
+ BI_CASE_CMP(nir_op_fne)
+ BI_CASE_CMP(nir_op_ine)
+ alu.cond = bi_cond_for_nir(instr->op, false);
break;
- case nir_op_fcos:
- alu.op.special = BI_SPECIAL_FCOS;
+ case nir_op_fround_even:
+ alu.roundmode = BIFROST_RTE;
break;
- case nir_op_flt:
- case nir_op_ilt:
- case nir_op_fge:
- case nir_op_ige:
- case nir_op_feq:
- case nir_op_ieq:
- case nir_op_fne:
- case nir_op_ine:
- alu.op.compare = bi_cond_for_nir(instr->op);
+ case nir_op_fceil:
+ alu.roundmode = BIFROST_RTP;
+ break;
+ case nir_op_ffloor:
+ alu.roundmode = BIFROST_RTN;
+ break;
+ case nir_op_ftrunc:
+ alu.roundmode = BIFROST_RTZ;
+ break;
+ case nir_op_iand:
+ alu.op.bitwise = BI_BITWISE_AND;
+ break;
+ case nir_op_ior:
+ alu.op.bitwise = BI_BITWISE_OR;
+ break;
+ case nir_op_ixor:
+ alu.op.bitwise = BI_BITWISE_XOR;
break;
default:
break;
}
+ if (alu.type == BI_CSEL) {
+ /* Default to csel3 */
+ alu.cond = BI_COND_NE;
+ alu.src[1] = BIR_INDEX_ZERO;
+ alu.src_types[1] = alu.src_types[0];
+
+ bi_fuse_csel_cond(&alu, instr->src[0],
+ &constants_left, &constant_shift, comps);
+ } else if (alu.type == BI_BITWISE) {
+ /* Implicit shift argument... at some point we should fold */
+ alu.src[2] = BIR_INDEX_ZERO;
+ alu.src_types[2] = alu.src_types[1];
+ }
+
bi_emit(ctx, alu);
}
+/* TEX_COMPACT instructions assume normal 2D f32 operation but are more
+ * space-efficient and with simpler RA/scheduling requirements*/
+
+static void
+emit_tex_compact(bi_context *ctx, nir_tex_instr *instr)
+{
+ /* TODO: Pipe through indices */
+ assert(instr->texture_index == 0);
+ assert(instr->sampler_index == 0);
+
+ bi_instruction tex = {
+ .type = BI_TEX,
+ .op = { .texture = BI_TEX_COMPACT },
+ .dest = bir_dest_index(&instr->dest),
+ .dest_type = instr->dest_type,
+ .src_types = { nir_type_float32, nir_type_float32 },
+ .vector_channels = 4
+ };
+
+ for (unsigned i = 0; i < instr->num_srcs; ++i) {
+ int index = bir_src_index(&instr->src[i].src);
+ assert (instr->src[i].src_type == nir_tex_src_coord);
+
+ tex.src[0] = index;
+ tex.src[1] = index;
+ tex.swizzle[0][0] = 0;
+ tex.swizzle[1][0] = 1;
+ }
+
+ bi_emit(ctx, tex);
+}
+
+static void
+emit_tex_full(bi_context *ctx, nir_tex_instr *instr)
+{
+ unreachable("stub");
+}
+
+static void
+emit_tex(bi_context *ctx, nir_tex_instr *instr)
+{
+ nir_alu_type base = nir_alu_type_get_base_type(instr->dest_type);
+ unsigned sz = nir_dest_bit_size(instr->dest);
+ instr->dest_type = base | sz;
+
+ bool is_normal = instr->op == nir_texop_tex;
+ bool is_2d = instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
+ instr->sampler_dim == GLSL_SAMPLER_DIM_EXTERNAL;
+ bool is_f = base == nir_type_float && (sz == 16 || sz == 32);
+
+ bool is_compact = is_normal && is_2d && is_f && !instr->is_shadow;
+
+ if (is_compact)
+ emit_tex_compact(ctx, instr);
+ else
+ emit_tex_full(ctx, instr);
+}
+
static void
emit_instr(bi_context *ctx, struct nir_instr *instr)
{
emit_alu(ctx, nir_instr_as_alu(instr));
break;
-#if 0
case nir_instr_type_tex:
emit_tex(ctx, nir_instr_as_tex(instr));
break;
-#endif
case nir_instr_type_jump:
emit_jump(ctx, nir_instr_as_jump(instr));
break;
default:
- //unreachable("Unhandled instruction type");
+ unreachable("Unhandled instruction type");
break;
}
}
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
+ NIR_PASS(progress, nir, nir_lower_bool_to_int32);
NIR_PASS(progress, nir, bifrost_nir_lower_algebraic_late);
NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
/* Take us out of SSA */
NIR_PASS(progress, nir, nir_lower_locals_to_regs);
- NIR_PASS(progress, nir, nir_convert_from_ssa, true);
-
- /* We're a primary scalar architecture but there's enough vector that
- * we use a vector IR so let's not also deal with scalar hacks on top
- * of the vector hacks */
-
NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
- NIR_PASS(progress, nir, nir_lower_vec_to_movs);
- NIR_PASS(progress, nir, nir_opt_dce);
-}
-
-static void
-bi_insert_mov32(bi_context *ctx, bi_instruction *parent, unsigned comp)
-{
- bi_instruction move = {
- .type = BI_MOV,
- .dest = parent->dest,
- .dest_type = nir_type_uint32,
- .writemask = (0xF << (4 * comp)),
- .src = { parent->src[0] },
- .src_types = { nir_type_uint32 },
- .swizzle = { { comp } }
- };
-
- bi_emit_before(ctx, parent, move);
-}
-
-static void
-bi_lower_mov(bi_context *ctx, bi_block *block)
-{
- bi_foreach_instr_in_block_safe(block, ins) {
- if (ins->type != BI_MOV) continue;
- if (util_bitcount(ins->writemask) <= 4) continue;
-
- for (unsigned i = 0; i < 4; ++i) {
- unsigned quad = (ins->writemask >> (4 * i)) & 0xF;
-
- if (quad == 0)
- continue;
- else if (quad == 0xF)
- bi_insert_mov32(ctx, ins, i);
- else
- unreachable("TODO: Lowering <32bit moves");
- }
-
- bi_remove_instruction(ins);
- }
+ NIR_PASS(progress, nir, nir_convert_from_ssa, true);
}
void
panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
program->sysval_count = ctx->sysvals.sysval_count;
memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
+ ctx->blend_types = program->blend_types;
nir_foreach_function(func, nir) {
if (!func->impl)
bi_foreach_block(ctx, _block) {
bi_block *block = (bi_block *) _block;
- bi_lower_mov(ctx, block);
+ bi_lower_combine(ctx, block);
}
bool progress = false;