#include "compiler/glsl/glsl_to_nir.h"
#include "compiler/nir_types.h"
#include "compiler/nir/nir_builder.h"
+#include "util/u_debug.h"
#include "disassemble.h"
#include "bifrost_compile.h"
#include "bi_quirks.h"
#include "bi_print.h"
+static const struct debug_named_value debug_options[] = {
+ {"msgs", BIFROST_DBG_MSGS, "Print debug messages"},
+ {"shaders", BIFROST_DBG_SHADERS, "Dump shaders in NIR and MIR"},
+ DEBUG_NAMED_VALUE_END
+};
+
+DEBUG_GET_ONCE_FLAGS_OPTION(bifrost_debug, "BIFROST_MESA_DEBUG", debug_options, 0)
+
+int bifrost_debug = 0;
+
+#define DBG(fmt, ...) \
+ do { if (bifrost_debug & BIFROST_DBG_MSGS) \
+ fprintf(stderr, "%s:%d: "fmt, \
+ __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
+
static bi_block *emit_cf_list(bi_context *ctx, struct exec_list *list);
static bi_instruction *bi_emit_branch(bi_context *ctx);
-static void bi_schedule_barrier(bi_context *ctx);
static void
emit_jump(bi_context *ctx, nir_jump_instr *instr)
switch (instr->type) {
case nir_jump_break:
- branch->branch.target = ctx->break_block;
+ branch->branch_target = ctx->break_block;
break;
case nir_jump_continue:
- branch->branch.target = ctx->continue_block;
+ branch->branch_target = ctx->continue_block;
break;
default:
unreachable("Unhandled jump type");
}
- pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
+ pan_block_add_successor(&ctx->current_block->base, &branch->branch_target->base);
}
static bi_instruction
};
bi_emit(ctx, ins);
- bi_schedule_barrier(ctx);
ctx->emitted_atest = true;
}
.vector_channels = 4
};
- assert(blend.blend_location < 8);
+ assert(blend.blend_location < BIFROST_MAX_RENDER_TARGET_COUNT);
assert(ctx->blend_types);
+ assert(blend.src_types[0]);
ctx->blend_types[blend.blend_location] = blend.src_types[0];
bi_emit(ctx, blend);
- bi_schedule_barrier(ctx);
}
static bi_instruction
bi_emit(ctx, load);
}
+/* gl_FragCoord.xy = u16_to_f32(R59.xy) + 0.5
+ * gl_FragCoord.z = ld_vary(fragz)
+ * gl_FragCoord.w = ld_vary(fragw)
+ */
+
+static void
+bi_emit_ld_frag_coord(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ /* Future proofing for mediump fragcoord at some point.. */
+ nir_alu_type T = nir_type_float32;
+
+ /* First, sketch a combine */
+ bi_instruction combine = {
+ .type = BI_COMBINE,
+ .dest_type = nir_type_uint32,
+ .dest = pan_dest_index(&instr->dest),
+ .src_types = { T, T, T, T },
+ };
+
+ /* Second, handle xy */
+ for (unsigned i = 0; i < 2; ++i) {
+ bi_instruction conv = {
+ .type = BI_CONVERT,
+ .dest_type = T,
+ .dest = bi_make_temp(ctx),
+ .src = {
+ /* TODO: RA XXX */
+ BIR_INDEX_REGISTER | 59
+ },
+ .src_types = { nir_type_uint16 },
+ .swizzle = { { i } }
+ };
+
+ bi_instruction add = {
+ .type = BI_ADD,
+ .dest_type = T,
+ .dest = bi_make_temp(ctx),
+ .src = { conv.dest, BIR_INDEX_CONSTANT },
+ .src_types = { T, T },
+ };
+
+ float half = 0.5;
+ memcpy(&add.constant.u32, &half, sizeof(float));
+
+ bi_emit(ctx, conv);
+ bi_emit(ctx, add);
+
+ combine.src[i] = add.dest;
+ }
+
+ /* Third, zw */
+ for (unsigned i = 0; i < 2; ++i) {
+ bi_instruction load = {
+ .type = BI_LOAD_VAR,
+ .load_vary = {
+ .interp_mode = BIFROST_INTERP_DEFAULT,
+ .reuse = false,
+ .flat = true
+ },
+ .vector_channels = 1,
+ .dest_type = nir_type_float32,
+ .dest = bi_make_temp(ctx),
+ .src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
+ .src_types = { nir_type_uint32, nir_type_uint32 },
+ .constant = {
+ .u32 = (i == 0) ? BIFROST_FRAGZ : BIFROST_FRAGW
+ }
+ };
+
+ bi_emit(ctx, load);
+
+ combine.src[i + 2] = load.dest;
+ }
+
+ /* Finally, emit the combine */
+ bi_emit(ctx, combine);
+}
+
+static void
+bi_emit_discard(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ /* Goofy lowering */
+ bi_instruction discard = {
+ .type = BI_DISCARD,
+ .cond = BI_COND_EQ,
+ .src_types = { nir_type_uint32, nir_type_uint32 },
+ .src = { BIR_INDEX_ZERO, BIR_INDEX_ZERO },
+ };
+
+ bi_emit(ctx, discard);
+}
+
+static void
+bi_fuse_cond(bi_instruction *csel, nir_alu_src cond,
+ unsigned *constants_left, unsigned *constant_shift,
+ unsigned comps, bool float_only);
+
+static void
+bi_emit_discard_if(bi_context *ctx, nir_intrinsic_instr *instr)
+{
+ nir_src cond = instr->src[0];
+ nir_alu_type T = nir_type_uint | nir_src_bit_size(cond);
+
+ bi_instruction discard = {
+ .type = BI_DISCARD,
+ .cond = BI_COND_NE,
+ .src_types = { T, T },
+ .src = {
+ pan_src_index(&cond),
+ BIR_INDEX_ZERO
+ },
+ };
+
+ /* Try to fuse in the condition */
+ unsigned constants_left = 1, constant_shift = 0;
+
+ /* Scalar so no swizzle */
+ nir_alu_src wrap = {
+ .src = instr->src[0]
+ };
+
+ /* May or may not succeed but we're optimistic */
+ bi_fuse_cond(&discard, wrap, &constants_left, &constant_shift, 1, true);
+
+ bi_emit(ctx, discard);
+}
+
static void
emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
{
bi_emit_ld_uniform(ctx, instr);
break;
+ case nir_intrinsic_load_frag_coord:
+ bi_emit_ld_frag_coord(ctx, instr);
+ break;
+
+ case nir_intrinsic_discard:
+ bi_emit_discard(ctx, instr);
+ break;
+
+ case nir_intrinsic_discard_if:
+ bi_emit_discard_if(ctx, instr);
+ break;
+
case nir_intrinsic_load_ssbo_address:
bi_emit_sysval(ctx, &instr->instr, 1, 0);
break;
break;
default:
- /* todo */
+ unreachable("Unknown intrinsic");
break;
}
}
bi_class_for_nir_alu(nir_op op)
{
switch (op) {
- case nir_op_iadd:
case nir_op_fadd:
case nir_op_fsub:
return BI_ADD;
+
+ case nir_op_iadd:
case nir_op_isub:
- return BI_ISUB;
+ return BI_IMATH;
case nir_op_iand:
case nir_op_ior:
}
static void
-bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
- unsigned *constants_left, unsigned *constant_shift, unsigned comps)
+bi_fuse_cond(bi_instruction *csel, nir_alu_src cond,
+ unsigned *constants_left, unsigned *constant_shift,
+ unsigned comps, bool float_only)
{
/* Bail for vector weirdness */
if (cond.swizzle[0] != 0)
if (bcond == BI_COND_ALWAYS)
return;
+ /* Some instructions can't compare ints */
+ if (float_only) {
+ nir_alu_type T = nir_op_infos[alu->op].input_types[0];
+ T = nir_alu_type_get_base_type(T);
+
+ if (T != nir_type_float)
+ return;
+ }
+
/* We found one, let's fuse it in */
csel->cond = bcond;
bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift, comps);
case nir_op_fsub:
alu.src_neg[1] = true; /* FADD */
break;
+ case nir_op_iadd:
+ alu.op.imath = BI_IMATH_ADD;
+ break;
+ case nir_op_isub:
+ alu.op.imath = BI_IMATH_SUB;
+ break;
case nir_op_fmax:
case nir_op_imax:
case nir_op_umax:
alu.src[1] = BIR_INDEX_ZERO;
alu.src_types[1] = alu.src_types[0];
- bi_fuse_csel_cond(&alu, instr->src[0],
- &constants_left, &constant_shift, comps);
+ /* TODO: Reenable cond fusing when we can split up registers
+ * when scheduling */
+#if 0
+ bi_fuse_cond(&alu, instr->src[0],
+ &constants_left, &constant_shift, comps, false);
+#endif
} else if (alu.type == BI_BITWISE) {
/* Implicit shift argument... at some point we should fold */
alu.src[2] = BIR_INDEX_ZERO;
static void
emit_tex_compact(bi_context *ctx, nir_tex_instr *instr)
{
- /* TODO: Pipe through indices */
- assert(instr->texture_index == 0);
- assert(instr->sampler_index == 0);
-
bi_instruction tex = {
.type = BI_TEX,
.op = { .texture = BI_TEX_COMPACT },
+ .texture = {
+ .texture_index = instr->texture_index,
+ .sampler_index = instr->sampler_index,
+ },
.dest = pan_dest_index(&instr->dest),
.dest_type = instr->dest_type,
.src_types = { nir_type_float32, nir_type_float32 },
for (unsigned i = 0; i < instr->num_srcs; ++i) {
int index = pan_src_index(&instr->src[i].src);
+
+ /* We were checked ahead-of-time */
+ if (instr->src[i].src_type == nir_tex_src_lod)
+ continue;
+
assert (instr->src[i].src_type == nir_tex_src_coord);
tex.src[0] = index;
unreachable("stub");
}
+/* Normal textures ops are tex for frag shaders and txl for vertex shaders with
+ * lod a constant 0. Anything else needs a full texture op. */
+
+static bool
+bi_is_normal_tex(gl_shader_stage stage, nir_tex_instr *instr)
+{
+ if (stage == MESA_SHADER_FRAGMENT)
+ return instr->op == nir_texop_tex;
+
+ if (instr->op != nir_texop_txl)
+ return false;
+
+ for (unsigned i = 0; i < instr->num_srcs; ++i) {
+ if (instr->src[i].src_type != nir_tex_src_lod)
+ continue;
+
+ nir_src src = instr->src[i].src;
+
+ if (!nir_src_is_const(src))
+ continue;
+
+ if (nir_src_as_uint(src) != 0)
+ continue;
+ }
+
+ return true;
+}
+
static void
emit_tex(bi_context *ctx, nir_tex_instr *instr)
{
unsigned sz = nir_dest_bit_size(instr->dest);
instr->dest_type = base | sz;
- bool is_normal = instr->op == nir_texop_tex;
+ bool is_normal = bi_is_normal_tex(ctx->stage, instr);
bool is_2d = instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
instr->sampler_dim == GLSL_SAMPLER_DIM_EXTERNAL;
bool is_f = base == nir_type_float && (sz == 16 || sz == 32);
return blk;
}
-static void
-bi_schedule_barrier(bi_context *ctx)
-{
- bi_block *temp = ctx->after_block;
- ctx->after_block = create_empty_block(ctx);
- list_addtail(&ctx->after_block->base.link, &ctx->blocks);
- list_inithead(&ctx->after_block->base.instructions);
- pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
- ctx->current_block = ctx->after_block;
- ctx->after_block = temp;
-}
-
static bi_block *
emit_block(bi_context *ctx, nir_block *block)
{
{
bi_instruction branch = {
.type = BI_BRANCH,
- .branch = {
- .cond = BI_COND_ALWAYS
- }
+ .cond = BI_COND_ALWAYS
};
return bi_emit(ctx, branch);
/* TODO: Try to unwrap instead of always bailing */
branch->src[0] = pan_src_index(cond);
branch->src[1] = BIR_INDEX_ZERO;
- branch->src_types[0] = branch->src_types[1] = nir_type_uint16;
- branch->branch.cond = invert ? BI_COND_EQ : BI_COND_NE;
+ branch->src_types[0] = branch->src_types[1] = nir_type_uint |
+ nir_src_bit_size(*cond);
+ branch->cond = invert ? BI_COND_EQ : BI_COND_NE;
}
static void
if (ctx->instruction_count == count_in) {
/* The else block is empty, so don't emit an exit jump */
bi_remove_instruction(then_exit);
- then_branch->branch.target = ctx->after_block;
+ then_branch->branch_target = ctx->after_block;
+ pan_block_add_successor(&end_then_block->base, &ctx->after_block->base); /* fallthrough */
} else {
- then_branch->branch.target = else_block;
- then_exit->branch.target = ctx->after_block;
- pan_block_add_successor(&end_then_block->base, &then_exit->branch.target->base);
+ then_branch->branch_target = else_block;
+ then_exit->branch_target = ctx->after_block;
+ pan_block_add_successor(&end_then_block->base, &then_exit->branch_target->base);
+ pan_block_add_successor(&end_else_block->base, &ctx->after_block->base); /* fallthrough */
}
- /* Wire up the successors */
-
- pan_block_add_successor(&before_block->base, &then_branch->branch.target->base); /* then_branch */
-
+ pan_block_add_successor(&before_block->base, &then_branch->branch_target->base); /* then_branch */
pan_block_add_successor(&before_block->base, &then_block->base); /* fallthrough */
- pan_block_add_successor(&end_else_block->base, &ctx->after_block->base); /* fallthrough */
}
static void
/* Branch back to loop back */
bi_instruction *br_back = bi_emit_branch(ctx);
- br_back->branch.target = ctx->continue_block;
+ br_back->branch_target = ctx->continue_block;
pan_block_add_successor(&start_block->base, &ctx->continue_block->base);
pan_block_add_successor(&ctx->current_block->base, &ctx->continue_block->base);
void
bifrost_compile_shader_nir(nir_shader *nir, panfrost_program *program, unsigned product_id)
{
+ bifrost_debug = debug_get_option_bifrost_debug();
+
bi_context *ctx = rzalloc(NULL, bi_context);
ctx->nir = nir;
ctx->stage = nir->info.stage;
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
NIR_PASS_V(nir, nir_lower_ssbo);
+ NIR_PASS_V(nir, nir_lower_mediump_outputs);
bi_optimize_nir(nir);
- nir_print_shader(nir, stdout);
+
+ if (bifrost_debug & BIFROST_DBG_SHADERS) {
+ nir_print_shader(nir, stdout);
+ }
panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
program->sysval_count = ctx->sysvals.sysval_count;
}
} while(progress);
- bi_print_shader(ctx, stdout);
+ if (bifrost_debug & BIFROST_DBG_SHADERS)
+ bi_print_shader(ctx, stdout);
bi_schedule(ctx);
bi_register_allocate(ctx);
- bi_print_shader(ctx, stdout);
+ if (bifrost_debug & BIFROST_DBG_SHADERS)
+ bi_print_shader(ctx, stdout);
bi_pack(ctx, &program->compiled);
- disassemble_bifrost(stdout, program->compiled.data, program->compiled.size, true);
+
+ if (bifrost_debug & BIFROST_DBG_SHADERS)
+ disassemble_bifrost(stdout, program->compiled.data, program->compiled.size, true);
ralloc_free(ctx);
}