#include "bifrost_compile.h"
#include "compiler.h"
#include "bi_quirks.h"
+#include "bi_print.h"
+
+static bi_block *emit_cf_list(bi_context *ctx, struct exec_list *list);
+static bi_instruction *bi_emit_branch(bi_context *ctx);
+static void bi_block_add_successor(bi_block *block, bi_block *successor);
+static void bi_schedule_barrier(bi_context *ctx);
+
+static void
+emit_jump(bi_context *ctx, nir_jump_instr *instr)
+{
+ bi_instruction *branch = bi_emit_branch(ctx);
+
+ switch (instr->type) {
+ case nir_jump_break:
+ branch->branch.target = ctx->break_block;
+ break;
+ case nir_jump_continue:
+ branch->branch.target = ctx->continue_block;
+ break;
+ default:
+ unreachable("Unhandled jump type");
+ }
+
+ bi_block_add_successor(ctx->current_block, branch->branch.target);
+}
+
+/* Gets a bytemask for a complete vecN write */
+static unsigned
+bi_mask_for_channels_32(unsigned i)
+{
+ return (1 << (4 * i)) - 1;
+}
+
+static bi_instruction
+bi_load(enum bi_class T, nir_intrinsic_instr *instr)
+{
+ bi_instruction load = {
+ .type = T,
+ .writemask = bi_mask_for_channels_32(instr->num_components),
+ .src = { BIR_INDEX_CONSTANT },
+ .constant = { .u64 = nir_intrinsic_base(instr) },
+ };
+
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
+
+ if (info->has_dest)
+ load.dest = bir_dest_index(&instr->dest);
+
+ if (info->has_dest && info->index_map[NIR_INTRINSIC_TYPE] > 0)
+ load.dest_type = nir_intrinsic_type(instr);
+
+ nir_src *offset = nir_get_io_offset_src(instr);
+
+ if (nir_src_is_const(*offset))
+ load.constant.u64 += nir_src_as_uint(*offset);
+ else
+ load.src[0] = bir_src_index(offset);
+
+ return load;
+}
+
+static void
+bi_emit_ld_vary(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ bi_instruction ins = bi_load(BI_LOAD_VAR, instr);
+ ins.load_vary.interp_mode = BIFROST_INTERP_DEFAULT; /* TODO */
+ ins.load_vary.reuse = false; /* TODO */
+ ins.load_vary.flat = instr->intrinsic != nir_intrinsic_load_interpolated_input;
+ ins.dest_type = nir_type_float | nir_dest_bit_size(instr->dest),
+ bi_emit(ctx, ins);
+}
+
+static void
+bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ if (!ctx->emitted_atest) {
+ bi_instruction ins = {
+ .type = BI_ATEST
+ };
+
+ bi_emit(ctx, ins);
+ bi_schedule_barrier(ctx);
+ ctx->emitted_atest = true;
+ }
+
+ bi_instruction blend = {
+ .type = BI_BLEND,
+ .blend_location = nir_intrinsic_base(instr),
+ .src = {
+ bir_src_index(&instr->src[0])
+ },
+ .swizzle = {
+ { 0, 1, 2, 3 }
+ }
+ };
+
+ bi_emit(ctx, blend);
+ bi_schedule_barrier(ctx);
+}
+
+static void
+bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ bi_instruction address = bi_load(BI_LOAD_VAR_ADDRESS, instr);
+ address.dest = bi_make_temp(ctx);
+ address.dest_type = nir_type_uint64;
+ address.writemask = (1 << 8) - 1;
+
+ bi_instruction st = {
+ .type = BI_STORE_VAR,
+ .src = {
+ address.dest,
+ bir_src_index(&instr->src[0])
+ },
+ .swizzle = {
+ { 0, 1, 2, 3 }
+ }
+ };
+
+ bi_emit(ctx, address);
+ bi_emit(ctx, st);
+}
+
+static void
+bi_emit_ld_uniform(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ bi_instruction ld = bi_load(BI_LOAD_UNIFORM, instr);
+ ld.src[1] = BIR_INDEX_ZERO; /* TODO: UBO index */
+ bi_emit(ctx, ld);
+}
+
+static void
+emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+
+ switch (instr->intrinsic) {
+ case nir_intrinsic_load_barycentric_pixel:
+ /* stub */
+ break;
+ case nir_intrinsic_load_interpolated_input:
+ case nir_intrinsic_load_input:
+ if (ctx->stage == MESA_SHADER_FRAGMENT)
+ bi_emit_ld_vary(ctx, instr);
+ else if (ctx->stage == MESA_SHADER_VERTEX)
+ bi_emit(ctx, bi_load(BI_LOAD_ATTR, instr));
+ else {
+ unreachable("Unsupported shader stage");
+ }
+ break;
+
+ case nir_intrinsic_store_output:
+ if (ctx->stage == MESA_SHADER_FRAGMENT)
+ bi_emit_frag_out(ctx, instr);
+ else if (ctx->stage == MESA_SHADER_VERTEX)
+ bi_emit_st_vary(ctx, instr);
+ else
+ unreachable("Unsupported shader stage");
+ break;
+
+ case nir_intrinsic_load_uniform:
+ bi_emit_ld_uniform(ctx, instr);
+ break;
+
+ default:
+ /* todo */
+ break;
+ }
+}
+
+static void
+emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
+{
+ /* Make sure we've been lowered */
+ assert(instr->def.num_components == 1);
+
+ bi_instruction move = {
+ .type = BI_MOV,
+ .dest = bir_ssa_index(&instr->def),
+ .dest_type = instr->def.bit_size | nir_type_uint,
+ .writemask = (1 << (instr->def.bit_size / 8)) - 1,
+ .src = {
+ BIR_INDEX_CONSTANT
+ },
+ .constant = {
+ .u64 = nir_const_value_as_uint(instr->value[0], instr->def.bit_size)
+ }
+ };
+
+ bi_emit(ctx, move);
+}
+
+static enum bi_class
+bi_class_for_nir_alu(nir_op op)
+{
+ switch (op) {
+ case nir_op_iadd:
+ case nir_op_fadd:
+ return BI_ADD;
+
+ case nir_op_i2i8:
+ case nir_op_i2i16:
+ case nir_op_i2i32:
+ case nir_op_i2i64:
+ case nir_op_u2u8:
+ case nir_op_u2u16:
+ case nir_op_u2u32:
+ case nir_op_u2u64:
+ case nir_op_f2i16:
+ case nir_op_f2i32:
+ case nir_op_f2i64:
+ case nir_op_f2u16:
+ case nir_op_f2u32:
+ case nir_op_f2u64:
+ case nir_op_i2f16:
+ case nir_op_i2f32:
+ case nir_op_i2f64:
+ case nir_op_u2f16:
+ case nir_op_u2f32:
+ case nir_op_u2f64:
+ return BI_CONVERT;
+
+ case nir_op_fmul:
+ return BI_FMA;
+
+ case nir_op_imin:
+ case nir_op_imax:
+ case nir_op_umin:
+ case nir_op_umax:
+ case nir_op_fmin:
+ case nir_op_fmax:
+ return BI_MINMAX;
+
+ case nir_op_fsat:
+ case nir_op_mov:
+ return BI_MOV;
+
+ default:
+ unreachable("Unknown ALU op");
+ }
+}
+
+static void
+emit_alu(bi_context *ctx, nir_alu_instr *instr)
+{
+ /* Assume it's something we can handle normally */
+ bi_instruction alu = {
+ .type = bi_class_for_nir_alu(instr->op),
+ .dest = bir_dest_index(&instr->dest.dest),
+ .dest_type = nir_op_infos[instr->op].output_type
+ | nir_dest_bit_size(instr->dest.dest),
+ };
+
+ if (instr->dest.dest.is_ssa) {
+ /* Construct a writemask */
+ unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
+ unsigned comps = instr->dest.dest.ssa.num_components;
+ assert(comps == 1);
+ unsigned bits = bits_per_comp * comps;
+ unsigned bytes = MAX2(bits / 8, 1);
+ alu.writemask = (1 << bytes) - 1;
+ } else {
+ unsigned comp_mask = instr->dest.write_mask;
+
+ alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
+ comp_mask);
+ }
+
+ /* We inline constants as we go. This tracks how many constants have
+ * been inlined, since we're limited to 64-bits of constants per
+ * instruction */
+
+ unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
+ unsigned constants_left = (64 / dest_bits);
+ unsigned constant_shift = 0;
+
+ /* Copy sources */
+
+ unsigned num_inputs = nir_op_infos[instr->op].num_inputs;
+ assert(num_inputs <= ARRAY_SIZE(alu.src));
+
+ for (unsigned i = 0; i < num_inputs; ++i) {
+ unsigned bits = nir_src_bit_size(instr->src[i].src);
+ alu.src_types[i] = nir_op_infos[instr->op].input_types[i]
+ | bits;
+
+ /* Try to inline a constant */
+ if (nir_src_is_const(instr->src[i].src) && constants_left && (dest_bits == bits)) {
+ alu.constant.u64 |=
+ (nir_src_as_uint(instr->src[i].src)) << constant_shift;
+
+ alu.src[i] = BIR_INDEX_CONSTANT | constant_shift;
+ --constants_left;
+ constant_shift += dest_bits;
+ continue;
+ }
+
+ alu.src[i] = bir_src_index(&instr->src[i].src);
+
+ /* We assert scalarization above */
+ alu.swizzle[i][0] = instr->src[i].swizzle[0];
+ }
+
+ /* Op-specific fixup */
+ switch (instr->op) {
+ case nir_op_fmul:
+ alu.src[2] = BIR_INDEX_ZERO; /* FMA */
+ break;
+ case nir_op_fsat:
+ alu.outmod = BIFROST_SAT; /* MOV */
+ break;
+ case nir_op_fmax:
+ case nir_op_imax:
+ case nir_op_umax:
+ alu.op.minmax = BI_MINMAX_MAX; /* MINMAX */
+ break;
+ default:
+ break;
+ }
+
+ bi_emit(ctx, alu);
+}
+
+static void
+emit_instr(bi_context *ctx, struct nir_instr *instr)
+{
+ switch (instr->type) {
+ case nir_instr_type_load_const:
+ emit_load_const(ctx, nir_instr_as_load_const(instr));
+ break;
+
+ case nir_instr_type_intrinsic:
+ emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
+ break;
+
+ case nir_instr_type_alu:
+ emit_alu(ctx, nir_instr_as_alu(instr));
+ break;
+
+#if 0
+ case nir_instr_type_tex:
+ emit_tex(ctx, nir_instr_as_tex(instr));
+ break;
+#endif
+
+ case nir_instr_type_jump:
+ emit_jump(ctx, nir_instr_as_jump(instr));
+ break;
+
+ case nir_instr_type_ssa_undef:
+ /* Spurious */
+ break;
+
+ default:
+ //unreachable("Unhandled instruction type");
+ break;
+ }
+}
+
+
+
+static bi_block *
+create_empty_block(bi_context *ctx)
+{
+ bi_block *blk = rzalloc(ctx, bi_block);
+
+ blk->predecessors = _mesa_set_create(blk,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ blk->name = ctx->block_name_count++;
+
+ return blk;
+}
+
+static void
+bi_block_add_successor(bi_block *block, bi_block *successor)
+{
+ assert(block);
+ assert(successor);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(block->successors); ++i) {
+ if (block->successors[i]) {
+ if (block->successors[i] == successor)
+ return;
+ else
+ continue;
+ }
+
+ block->successors[i] = successor;
+ _mesa_set_add(successor->predecessors, block);
+ return;
+ }
+
+ unreachable("Too many successors");
+}
+
+static void
+bi_schedule_barrier(bi_context *ctx)
+{
+ bi_block *temp = ctx->after_block;
+ ctx->after_block = create_empty_block(ctx);
+ list_addtail(&ctx->after_block->link, &ctx->blocks);
+ list_inithead(&ctx->after_block->instructions);
+ bi_block_add_successor(ctx->current_block, ctx->after_block);
+ ctx->current_block = ctx->after_block;
+ ctx->after_block = temp;
+}
+
+static bi_block *
+emit_block(bi_context *ctx, nir_block *block)
+{
+ if (ctx->after_block) {
+ ctx->current_block = ctx->after_block;
+ ctx->after_block = NULL;
+ } else {
+ ctx->current_block = create_empty_block(ctx);
+ }
+
+ list_addtail(&ctx->current_block->link, &ctx->blocks);
+ list_inithead(&ctx->current_block->instructions);
+
+ nir_foreach_instr(instr, block) {
+ emit_instr(ctx, instr);
+ ++ctx->instruction_count;
+ }
+
+ return ctx->current_block;
+}
+
+/* Emits an unconditional branch to the end of the current block, returning a
+ * pointer so the user can fill in details */
+
+static bi_instruction *
+bi_emit_branch(bi_context *ctx)
+{
+ bi_instruction branch = {
+ .type = BI_BRANCH,
+ .branch = {
+ .cond = BI_COND_ALWAYS
+ }
+ };
+
+ return bi_emit(ctx, branch);
+}
+
+/* Sets a condition for a branch by examing the NIR condition. If we're
+ * familiar with the condition, we unwrap it to fold it into the branch
+ * instruction. Otherwise, we consume the condition directly. We
+ * generally use 1-bit booleans which allows us to use small types for
+ * the conditions.
+ */
+
+static void
+bi_set_branch_cond(bi_instruction *branch, nir_src *cond, bool invert)
+{
+ /* TODO: Try to unwrap instead of always bailing */
+ branch->src[0] = bir_src_index(cond);
+ branch->src[1] = BIR_INDEX_ZERO;
+ branch->src_types[0] = branch->src_types[1] = nir_type_uint16;
+ branch->branch.cond = invert ? BI_COND_EQ : BI_COND_NE;
+}
+
+static void
+emit_if(bi_context *ctx, nir_if *nif)
+{
+ bi_block *before_block = ctx->current_block;
+
+ /* Speculatively emit the branch, but we can't fill it in until later */
+ bi_instruction *then_branch = bi_emit_branch(ctx);
+ bi_set_branch_cond(then_branch, &nif->condition, true);
+
+ /* Emit the two subblocks. */
+ bi_block *then_block = emit_cf_list(ctx, &nif->then_list);
+ bi_block *end_then_block = ctx->current_block;
+
+ /* Emit a jump from the end of the then block to the end of the else */
+ bi_instruction *then_exit = bi_emit_branch(ctx);
+
+ /* Emit second block, and check if it's empty */
+
+ int count_in = ctx->instruction_count;
+ bi_block *else_block = emit_cf_list(ctx, &nif->else_list);
+ bi_block *end_else_block = ctx->current_block;
+ ctx->after_block = create_empty_block(ctx);
+
+ /* Now that we have the subblocks emitted, fix up the branches */
+
+ assert(then_block);
+ assert(else_block);
+
+ if (ctx->instruction_count == count_in) {
+ /* The else block is empty, so don't emit an exit jump */
+ bi_remove_instruction(then_exit);
+ then_branch->branch.target = ctx->after_block;
+ } else {
+ then_branch->branch.target = else_block;
+ then_exit->branch.target = ctx->after_block;
+ bi_block_add_successor(end_then_block, then_exit->branch.target);
+ }
+
+ /* Wire up the successors */
+
+ bi_block_add_successor(before_block, then_branch->branch.target); /* then_branch */
+
+ bi_block_add_successor(before_block, then_block); /* fallthrough */
+ bi_block_add_successor(end_else_block, ctx->after_block); /* fallthrough */
+}
+
+static void
+emit_loop(bi_context *ctx, nir_loop *nloop)
+{
+ /* Remember where we are */
+ bi_block *start_block = ctx->current_block;
+
+ bi_block *saved_break = ctx->break_block;
+ bi_block *saved_continue = ctx->continue_block;
+
+ ctx->continue_block = create_empty_block(ctx);
+ ctx->break_block = create_empty_block(ctx);
+ ctx->after_block = ctx->continue_block;
+
+ /* Emit the body itself */
+ emit_cf_list(ctx, &nloop->body);
+
+ /* Branch back to loop back */
+ bi_instruction *br_back = bi_emit_branch(ctx);
+ br_back->branch.target = ctx->continue_block;
+ bi_block_add_successor(start_block, ctx->continue_block);
+ bi_block_add_successor(ctx->current_block, ctx->continue_block);
+
+ ctx->after_block = ctx->break_block;
+
+ /* Pop off */
+ ctx->break_block = saved_break;
+ ctx->continue_block = saved_continue;
+ ++ctx->loop_count;
+}
+
+static bi_block *
+emit_cf_list(bi_context *ctx, struct exec_list *list)
+{
+ bi_block *start_block = NULL;
+
+ foreach_list_typed(nir_cf_node, node, node, list) {
+ switch (node->type) {
+ case nir_cf_node_block: {
+ bi_block *block = emit_block(ctx, nir_cf_node_as_block(node));
+
+ if (!start_block)
+ start_block = block;
+
+ break;
+ }
+
+ case nir_cf_node_if:
+ emit_if(ctx, nir_cf_node_as_if(node));
+ break;
+
+ case nir_cf_node_loop:
+ emit_loop(ctx, nir_cf_node_as_loop(node));
+ break;
+
+ default:
+ unreachable("Unknown control flow");
+ }
+ }
+
+ return start_block;
+}
static int
glsl_type_size(const struct glsl_type *type, bool bindless)
};
NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+ NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
+ NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
do {
progress = false;
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
+ NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
+ NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
/* Take us out of SSA */
NIR_PASS(progress, nir, nir_lower_locals_to_regs);
NIR_PASS(progress, nir, nir_convert_from_ssa, true);
+
+ /* We're a primary scalar architecture but there's enough vector that
+ * we use a vector IR so let's not also deal with scalar hacks on top
+ * of the vector hacks */
+
+ NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
+ NIR_PASS(progress, nir, nir_lower_vec_to_movs);
+ NIR_PASS(progress, nir, nir_opt_dce);
}
void
ctx->nir = nir;
ctx->stage = nir->info.stage;
ctx->quirks = bifrost_get_quirks(product_id);
+ list_inithead(&ctx->blocks);
/* Lower gl_Position pre-optimisation, but after lowering vars to ssa
* (so we don't accidentally duplicate the epilogue since mesa/st has
NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
NIR_PASS_V(nir, nir_lower_ssbo);
- /* We have to lower ALU to scalar ourselves since viewport
- * transformations produce vector ops */
- NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
-
bi_optimize_nir(nir);
nir_print_shader(nir, stdout);
+ nir_foreach_function(func, nir) {
+ if (!func->impl)
+ continue;
+
+ ctx->impl = func->impl;
+ emit_cf_list(ctx, &func->impl->body);
+ break; /* TODO: Multi-function shaders */
+ }
+
bi_print_shader(ctx, stdout);
+ bi_schedule(ctx);
ralloc_free(ctx);
}