/*
- * Copyright (C) 2019 Ryan Houdek <Sonicadvance1@gmail.com>
+ * Copyright (C) 2020 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
+ *
+ * Authors (Collabora):
+ * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
*/
+#include "main/mtypes.h"
+#include "compiler/glsl/glsl_to_nir.h"
+#include "compiler/nir_types.h"
+#include "util/imports.h"
#include "compiler/nir/nir_builder.h"
-#include "bifrost_compile.h"
-#include "bifrost_opts.h"
-#include "bifrost_sched.h"
-#include "compiler_defines.h"
+
#include "disassemble.h"
-#include "bifrost_print.h"
+#include "bifrost_compile.h"
+#include "bifrost_nir.h"
+#include "compiler.h"
+#include "bi_quirks.h"
+#include "bi_print.h"
-#define BI_DEBUG
-
-static int
-glsl_type_size(const struct glsl_type *type, bool bindless)
-{
- return glsl_count_attribute_slots(type, false);
-}
+static bi_block *emit_cf_list(bi_context *ctx, struct exec_list *list);
+static bi_instruction *bi_emit_branch(bi_context *ctx);
+static void bi_schedule_barrier(bi_context *ctx);
static void
-optimize_nir(nir_shader *nir)
+emit_jump(bi_context *ctx, nir_jump_instr *instr)
{
- bool progress;
+ bi_instruction *branch = bi_emit_branch(ctx);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
- NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
+ switch (instr->type) {
+ case nir_jump_break:
+ branch->branch.target = ctx->break_block;
+ break;
+ case nir_jump_continue:
+ branch->branch.target = ctx->continue_block;
+ break;
+ default:
+ unreachable("Unhandled jump type");
+ }
- do {
- progress = false;
+ pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
+}
- NIR_PASS(progress, nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
+/* Gets a bytemask for a complete vecN write */
+static unsigned
+bi_mask_for_channels_32(unsigned i)
+{
+ return (1 << (4 * i)) - 1;
+}
- NIR_PASS(progress, nir, nir_lower_var_copies);
- NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
+static bi_instruction
+bi_load(enum bi_class T, nir_intrinsic_instr *instr)
+{
+ bi_instruction load = {
+ .type = T,
+ .writemask = bi_mask_for_channels_32(instr->num_components),
+ .src = { BIR_INDEX_CONSTANT },
+ .constant = { .u64 = nir_intrinsic_base(instr) },
+ };
- NIR_PASS(progress, nir, nir_copy_prop);
- NIR_PASS(progress, nir, nir_opt_constant_folding);
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
- NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
- NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
- NIR_PASS(progress, nir, nir_opt_if, true);
+ if (info->has_dest)
+ load.dest = bir_dest_index(&instr->dest);
- } while (progress);
+ if (info->has_dest && info->index_map[NIR_INTRINSIC_TYPE] > 0)
+ load.dest_type = nir_intrinsic_type(instr);
- NIR_PASS(progress, nir, nir_copy_prop);
- NIR_PASS(progress, nir, nir_opt_dce);
-}
+ nir_src *offset = nir_get_io_offset_src(instr);
-static unsigned
-nir_src_index(compiler_context *ctx, nir_src *src)
-{
- if (src->is_ssa)
- return src->ssa->index;
+ if (nir_src_is_const(*offset))
+ load.constant.u64 += nir_src_as_uint(*offset);
else
- return ctx->func->impl->ssa_alloc + src->reg.reg->index;
+ load.src[0] = bir_src_index(offset);
+
+ return load;
}
-static unsigned
-nir_dest_index(compiler_context *ctx, nir_dest *dst)
+static void
+bi_emit_ld_vary(bi_context *ctx, nir_intrinsic_instr *instr)
{
- if (dst->is_ssa)
- return dst->ssa.index;
- else
- return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+ bi_instruction ins = bi_load(BI_LOAD_VAR, instr);
+ ins.load_vary.interp_mode = BIFROST_INTERP_DEFAULT; /* TODO */
+ ins.load_vary.reuse = false; /* TODO */
+ ins.load_vary.flat = instr->intrinsic != nir_intrinsic_load_interpolated_input;
+ ins.dest_type = nir_type_float | nir_dest_bit_size(instr->dest);
+
+ if (nir_src_is_const(*nir_get_io_offset_src(instr))) {
+ /* Zero it out for direct */
+ ins.src[1] = BIR_INDEX_ZERO;
+ } else {
+ /* R61 contains sample mask stuff, TODO RA XXX */
+ ins.src[1] = BIR_INDEX_REGISTER | 61;
+ }
+
+ bi_emit(ctx, ins);
}
-static unsigned
-nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
+static void
+bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
{
- return nir_src_index(ctx, &src->src);
+ if (!ctx->emitted_atest) {
+ bi_instruction ins = {
+ .type = BI_ATEST,
+ .src = {
+ BIR_INDEX_REGISTER | 60 /* TODO: RA */,
+ bir_src_index(&instr->src[0])
+ },
+ .src_types = {
+ nir_type_uint32,
+ nir_type_float32
+ },
+ .swizzle = {
+ { 0 },
+ { 3, 0 } /* swizzle out the alpha */
+ },
+ .dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
+ .dest_type = nir_type_uint32,
+ .writemask = 0xF
+ };
+
+ bi_emit(ctx, ins);
+ bi_schedule_barrier(ctx);
+ ctx->emitted_atest = true;
+ }
+
+ bi_instruction blend = {
+ .type = BI_BLEND,
+ .blend_location = nir_intrinsic_base(instr),
+ .src = {
+ bir_src_index(&instr->src[0]),
+ BIR_INDEX_REGISTER | 60 /* Can this be arbitrary? */,
+ },
+ .src_types = {
+ nir_type_float32,
+ nir_type_uint32
+ },
+ .swizzle = {
+ { 0, 1, 2, 3 },
+ { 0 }
+ },
+ .dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
+ .dest_type = nir_type_uint32,
+ .writemask = 0xF
+ };
+
+ bi_emit(ctx, blend);
+ bi_schedule_barrier(ctx);
}
-struct bifrost_instruction *
-mir_alloc_ins(struct bifrost_instruction instr)
+static bi_instruction
+bi_load_with_r61(enum bi_class T, nir_intrinsic_instr *instr)
{
- struct bifrost_instruction *heap_ins = malloc(sizeof(instr));
- memcpy(heap_ins, &instr, sizeof(instr));
- return heap_ins;
+ bi_instruction ld = bi_load(T, instr);
+ ld.src[1] = BIR_INDEX_REGISTER | 61; /* TODO: RA */
+ ld.src[2] = BIR_INDEX_REGISTER | 62;
+ ld.src[3] = 0;
+ ld.src_types[1] = nir_type_uint32;
+ ld.src_types[2] = nir_type_uint32;
+ ld.src_types[3] = nir_intrinsic_type(instr);
+ return ld;
}
static void
-emit_mir_instruction(struct compiler_context *ctx, struct bifrost_instruction instr)
+bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
{
- list_addtail(&(mir_alloc_ins(instr))->link, &ctx->current_block->instructions);
+ bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
+ address.dest = bi_make_temp(ctx);
+ address.dest_type = nir_type_uint32;
+ address.writemask = (1 << 12) - 1;
+
+ bi_instruction st = {
+ .type = BI_STORE_VAR,
+ .src = {
+ bir_src_index(&instr->src[0]),
+ address.dest, address.dest, address.dest,
+ },
+ .src_types = {
+ nir_type_uint32,
+ nir_type_uint32, nir_type_uint32, nir_type_uint32,
+ },
+ .swizzle = {
+ { 0, 1, 2, 3 },
+ { 0 }, { 1 }, { 2}
+ },
+ .store_channels = 4, /* TODO: WRITEMASK */
+ };
+
+ bi_emit(ctx, address);
+ bi_emit(ctx, st);
}
static void
-bifrost_block_add_successor(bifrost_block *block, bifrost_block *successor)
+bi_emit_ld_uniform(bi_context *ctx, nir_intrinsic_instr *instr)
{
- assert(block->num_successors < ARRAY_SIZE(block->successors));
- block->successors[block->num_successors++] = successor;
+ bi_instruction ld = bi_load(BI_LOAD_UNIFORM, instr);
+ ld.src[1] = BIR_INDEX_ZERO; /* TODO: UBO index */
+
+ /* TODO: Indirect access, since we need to multiply by the element
+ * size. I believe we can get this lowering automatically via
+ * nir_lower_io (as mul instructions) with the proper options, but this
+ * is TODO */
+ assert(ld.src[0] & BIR_INDEX_CONSTANT);
+ ld.constant.u64 += ctx->sysvals.sysval_count;
+ ld.constant.u64 *= 16;
+
+ bi_emit(ctx, ld);
}
static void
-emit_load_const(struct compiler_context *ctx, nir_load_const_instr *instr)
+bi_emit_sysval(bi_context *ctx, nir_instr *instr,
+ unsigned nr_components, unsigned offset)
{
- nir_ssa_def def = instr->def;
+ nir_dest nir_dest;
- float *v = ralloc_array(NULL, float, 1);
- nir_const_value_to_array(v, instr->value, instr->def.num_components, f32);
- _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
-}
+ /* Figure out which uniform this is */
+ int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
+ void *val = _mesa_hash_table_u64_search(ctx->sysvals.sysval_to_id, sysval);
-static uint32_t
-alloc_mir_temp(struct compiler_context *ctx)
-{
- return SSA_TEMP_VALUE(ctx->mir_temp++);
-}
+ /* Sysvals are prefix uniforms */
+ unsigned uniform = ((uintptr_t) val) - 1;
-static uint32_t
-emit_ld_vary_addr_constant(struct compiler_context *ctx, uint32_t location)
-{
- // LD_VAR_ADDR.f32 {R0, T1}, R61, R62, location:1, R12
- // ...
- // ST_VAR.v4 T1, R12, R13, R14, R4
-
- // R61-R62 is filled with information needed for varying interpolation
- // This loads a vec3 with the information that ST_VAR needs to work
-
- uint32_t mir_temp_location = alloc_mir_temp(ctx);
- // This instruction loads a vec3 starting from the initial register
- struct bifrost_instruction instr = {
- .op = op_ld_var_addr,
- .dest_components = 3,
- .ssa_args = {
- .dest = mir_temp_location,
- .src0 = SSA_FIXED_REGISTER(61),
- .src1 = SSA_FIXED_REGISTER(62),
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .literal_args[0] = location,
+ /* Emit the read itself -- this is never indirect */
+
+ bi_instruction load = {
+ .type = BI_LOAD_UNIFORM,
+ .writemask = (1 << (nr_components * 4)) - 1,
+ .src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
+ .constant = { (uniform * 16) + offset },
+ .dest = bir_dest_index(&nir_dest),
+ .dest_type = nir_type_uint32, /* TODO */
};
- emit_mir_instruction(ctx, instr);
- return mir_temp_location;
+ bi_emit(ctx, load);
}
-// XXX: Doesn't support duplicated values in the components!
-// RA WILL fail!
static void
-emit_create_vector(struct compiler_context *ctx, unsigned dest, unsigned num_comps, uint32_t *comps)
+emit_intrinsic(bi_context *ctx, nir_intrinsic_instr *instr)
{
- assert(num_comps <= 4 && "Can't make a vector larger than 4 components");
-
- // This instruction loads a vec3 starting from the initial register
- struct bifrost_instruction instr = {
- .op = op_create_vector,
- .dest_components = num_comps,
- .ssa_args = {
- .dest = dest,
- }
- };
- uint32_t *srcs[4] = {
- &instr.ssa_args.src0,
- &instr.ssa_args.src1,
- &instr.ssa_args.src2,
- &instr.ssa_args.src3,
- };
+ switch (instr->intrinsic) {
+ case nir_intrinsic_load_barycentric_pixel:
+ /* stub */
+ break;
+ case nir_intrinsic_load_interpolated_input:
+ case nir_intrinsic_load_input:
+ if (ctx->stage == MESA_SHADER_FRAGMENT)
+ bi_emit_ld_vary(ctx, instr);
+ else if (ctx->stage == MESA_SHADER_VERTEX)
+ bi_emit(ctx, bi_load_with_r61(BI_LOAD_ATTR, instr));
+ else {
+ unreachable("Unsupported shader stage");
+ }
+ break;
- for (unsigned i = 0; i < 4; ++i) {
- if (i < num_comps)
- *srcs[i] = comps[i];
+ case nir_intrinsic_store_output:
+ if (ctx->stage == MESA_SHADER_FRAGMENT)
+ bi_emit_frag_out(ctx, instr);
+ else if (ctx->stage == MESA_SHADER_VERTEX)
+ bi_emit_st_vary(ctx, instr);
else
- *srcs[i] = SSA_INVALID_VALUE;
+ unreachable("Unsupported shader stage");
+ break;
+
+ case nir_intrinsic_load_uniform:
+ bi_emit_ld_uniform(ctx, instr);
+ break;
+
+ case nir_intrinsic_load_ssbo_address:
+ bi_emit_sysval(ctx, &instr->instr, 1, 0);
+ break;
+
+ case nir_intrinsic_get_buffer_size:
+ bi_emit_sysval(ctx, &instr->instr, 1, 8);
+ break;
+
+ case nir_intrinsic_load_viewport_scale:
+ case nir_intrinsic_load_viewport_offset:
+ case nir_intrinsic_load_num_work_groups:
+ case nir_intrinsic_load_sampler_lod_parameters_pan:
+ bi_emit_sysval(ctx, &instr->instr, 3, 0);
+ break;
+
+ default:
+ /* todo */
+ break;
}
- emit_mir_instruction(ctx, instr);
}
-static uint32_t
-emit_extract_vector_element(struct compiler_context *ctx, unsigned ssa_vector, unsigned element)
+static void
+emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
{
- uint32_t mir_temp_location = alloc_mir_temp(ctx);
- // This instruction loads a vec3 starting from the initial register
- struct bifrost_instruction instr = {
- .op = op_extract_element,
- .dest_components = 1,
- .ssa_args = {
- .dest = mir_temp_location,
- .src0 = ssa_vector,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
+ /* Make sure we've been lowered */
+ assert(instr->def.num_components == 1);
+
+ bi_instruction move = {
+ .type = BI_MOV,
+ .dest = bir_ssa_index(&instr->def),
+ .dest_type = instr->def.bit_size | nir_type_uint,
+ .writemask = (1 << (instr->def.bit_size / 8)) - 1,
+ .src = {
+ BIR_INDEX_CONSTANT
+ },
+ .src_types = {
+ instr->def.bit_size | nir_type_uint,
},
- .literal_args[0] = element,
+ .constant = {
+ .u64 = nir_const_value_as_uint(instr->value[0], instr->def.bit_size)
+ }
};
- emit_mir_instruction(ctx, instr);
- return mir_temp_location;
+ bi_emit(ctx, move);
}
-static uint32_t
-emit_movi(struct compiler_context *ctx, uint32_t literal)
+
+#define BI_CASE_CMP(op) \
+ case op##8: \
+ case op##16: \
+ case op##32: \
+
+static enum bi_class
+bi_class_for_nir_alu(nir_op op)
{
- uint32_t mir_temp_location = alloc_mir_temp(ctx);
- // This instruction loads a vec3 starting from the initial register
- struct bifrost_instruction instr = {
- .op = op_movi,
- .dest_components = 1,
- .ssa_args = {
- .dest = mir_temp_location,
- .src0 = SSA_INVALID_VALUE,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .literal_args[0] = literal,
- };
- emit_mir_instruction(ctx, instr);
+ switch (op) {
+ case nir_op_iadd:
+ case nir_op_fadd:
+ case nir_op_fsub:
+ return BI_ADD;
+ case nir_op_isub:
+ return BI_ISUB;
+
+ BI_CASE_CMP(nir_op_flt)
+ BI_CASE_CMP(nir_op_fge)
+ BI_CASE_CMP(nir_op_feq)
+ BI_CASE_CMP(nir_op_fne)
+ BI_CASE_CMP(nir_op_ilt)
+ BI_CASE_CMP(nir_op_ige)
+ BI_CASE_CMP(nir_op_ieq)
+ BI_CASE_CMP(nir_op_ine)
+ return BI_CMP;
+
+ case nir_op_b8csel:
+ case nir_op_b16csel:
+ case nir_op_b32csel:
+ return BI_CSEL;
+
+ case nir_op_i2i8:
+ case nir_op_i2i16:
+ case nir_op_i2i32:
+ case nir_op_i2i64:
+ case nir_op_u2u8:
+ case nir_op_u2u16:
+ case nir_op_u2u32:
+ case nir_op_u2u64:
+ case nir_op_f2i16:
+ case nir_op_f2i32:
+ case nir_op_f2i64:
+ case nir_op_f2u16:
+ case nir_op_f2u32:
+ case nir_op_f2u64:
+ case nir_op_i2f16:
+ case nir_op_i2f32:
+ case nir_op_i2f64:
+ case nir_op_u2f16:
+ case nir_op_u2f32:
+ case nir_op_u2f64:
+ case nir_op_f2f16:
+ case nir_op_f2f32:
+ case nir_op_f2f64:
+ case nir_op_f2fmp:
+ return BI_CONVERT;
+
+ case nir_op_vec2:
+ case nir_op_vec3:
+ case nir_op_vec4:
+ return BI_COMBINE;
+
+ case nir_op_vec8:
+ case nir_op_vec16:
+ unreachable("should've been lowered");
+
+ case nir_op_ffma:
+ case nir_op_fmul:
+ return BI_FMA;
+
+ case nir_op_imin:
+ case nir_op_imax:
+ case nir_op_umin:
+ case nir_op_umax:
+ case nir_op_fmin:
+ case nir_op_fmax:
+ return BI_MINMAX;
+
+ case nir_op_fsat:
+ case nir_op_fneg:
+ case nir_op_fabs:
+ return BI_FMOV;
+ case nir_op_mov:
+ return BI_MOV;
+
+ case nir_op_fround_even:
+ case nir_op_fceil:
+ case nir_op_ffloor:
+ case nir_op_ftrunc:
+ return BI_ROUND;
+
+ case nir_op_frcp:
+ case nir_op_frsq:
+ return BI_SPECIAL;
- return mir_temp_location;
+ default:
+ unreachable("Unknown ALU op");
+ }
}
-static unsigned
-nir_alu_src_index_scalar(compiler_context *ctx, nir_alu_instr *nir_instr, unsigned src)
-{
- // NIR uses a combination of single channels plus swizzles to determine which component is pulled out of a source
- for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
- if (!nir_alu_instr_channel_used(nir_instr, src, c))
- continue;
- // Pull the swizzle from this element that is active and use it as the source
- unsigned element = nir_instr->src[src].swizzle[c];
+/* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
+ * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
+ * optimizations). Otherwise it will bail (when used for primary code
+ * generation). */
- // Create an op that extracts an element from a vector
- return emit_extract_vector_element(ctx, nir_alu_src_index(ctx, &nir_instr->src[src]), element);
+static enum bi_cond
+bi_cond_for_nir(nir_op op, bool soft)
+{
+ switch (op) {
+ BI_CASE_CMP(nir_op_flt)
+ BI_CASE_CMP(nir_op_ilt)
+ return BI_COND_LT;
+
+ BI_CASE_CMP(nir_op_fge)
+ BI_CASE_CMP(nir_op_ige)
+ return BI_COND_GE;
+
+ BI_CASE_CMP(nir_op_feq)
+ BI_CASE_CMP(nir_op_ieq)
+ return BI_COND_EQ;
+
+ BI_CASE_CMP(nir_op_fne)
+ BI_CASE_CMP(nir_op_ine)
+ return BI_COND_NE;
+ default:
+ if (soft)
+ return BI_COND_ALWAYS;
+ else
+ unreachable("Invalid compare");
}
- assert(0);
- return 0;
}
static void
-emit_intrinsic(struct compiler_context *ctx, nir_intrinsic_instr *nir_instr)
+bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
+ unsigned *constants_left, unsigned *constant_shift)
{
- nir_const_value *const_offset;
- unsigned offset, reg;
-
- switch (nir_instr->intrinsic) {
- case nir_intrinsic_load_ubo: {
- nir_const_value *location = nir_src_as_const_value(nir_instr->src[0]);
- const_offset = nir_src_as_const_value(nir_instr->src[1]);
- assert (location && "no indirect ubo selection");
- assert (const_offset && "no indirect inputs");
-
- enum bifrost_ir_ops op;
-
- // load_ubo <UBO binding>, <byte offset>
- // ld_ubo <byte offset>, <UBO binding>
- switch (nir_dest_num_components(nir_instr->dest)) {
- case 1:
- op = op_ld_ubo_v1;
- break;
- case 2:
- op = op_ld_ubo_v2;
- break;
- case 3:
- op = op_ld_ubo_v3;
- break;
- case 4:
- op = op_ld_ubo_v4;
- break;
- default:
- assert(0);
- break;
- }
+ unsigned bits = nir_src_bit_size(instr->src[i].src);
+ unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
- reg = nir_dest_index(ctx, &nir_instr->dest);
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = nir_dest_num_components(nir_instr->dest),
- .ssa_args = {
- .dest = reg,
- .src0 = SSA_INVALID_VALUE,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .literal_args[0] = nir_src_as_uint(nir_instr->src[1]),
- .literal_args[1] = nir_src_as_uint(nir_instr->src[0]),
- };
+ alu->src_types[to] = nir_op_infos[instr->op].input_types[i]
+ | bits;
- emit_mir_instruction(ctx, instr);
- break;
- }
- case nir_intrinsic_store_ssbo: {
- nir_const_value *location = nir_src_as_const_value(nir_instr->src[1]);
- const_offset = nir_src_as_const_value(nir_instr->src[2]);
- assert (location && "no indirect ubo selection");
- assert (const_offset && "no indirect inputs");
-
- // store_ssbo <Value>, <binding>, <offset>
- // store_vN <Addr>, <Value>
- reg = nir_src_index(ctx, &nir_instr->src[0]);
-
- enum bifrost_ir_ops op;
- switch (nir_src_num_components(nir_instr->src[0])) {
- case 1:
- op = op_store_v1;
- break;
- case 2:
- op = op_store_v2;
- break;
- case 3:
- op = op_store_v3;
- break;
- case 4:
- op = op_store_v4;
- break;
- default:
- assert(0);
- break;
- }
+ /* Try to inline a constant */
+ if (nir_src_is_const(instr->src[i].src) && *constants_left && (dest_bits == bits)) {
+ alu->constant.u64 |=
+ (nir_src_as_uint(instr->src[i].src)) << *constant_shift;
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = 0,
- .ssa_args = {
- .dest = SSA_INVALID_VALUE,
- .src0 = reg,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .literal_args[0] = nir_src_as_uint(nir_instr->src[2]),
- };
- emit_mir_instruction(ctx, instr);
- break;
+ alu->src[to] = BIR_INDEX_CONSTANT | (*constant_shift);
+ --(*constants_left);
+ (*constant_shift) += dest_bits;
+ return;
}
- case nir_intrinsic_load_uniform:
- offset = nir_intrinsic_base(nir_instr);
- if (nir_src_is_const(nir_instr->src[0])) {
- offset += nir_src_as_uint(nir_instr->src[0]);
- } else {
- assert(0 && "Can't handle indirect load_uniform");
- }
+ alu->src[to] = bir_src_index(&instr->src[i].src);
- reg = nir_dest_index(ctx, &nir_instr->dest);
-
- unsigned num_components = nir_dest_num_components(nir_instr->dest);
- if (num_components == 1) {
- struct bifrost_instruction instr = {
- .op = op_mov,
- .dest_components = 1,
- .ssa_args = {
- .dest = reg,
- .src0 = SSA_FIXED_UREGISTER(offset),
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- };
- emit_mir_instruction(ctx, instr);
- } else {
- uint32_t comps[4];
-
- for (unsigned i = 0; i < nir_dest_num_components(nir_instr->dest); ++i) {
- uint32_t temp_dest = alloc_mir_temp(ctx);
- comps[i] = temp_dest;
- struct bifrost_instruction instr = {
- .op = op_mov,
- .dest_components = 1,
- .ssa_args = {
- .dest = temp_dest,
- .src0 = SSA_FIXED_UREGISTER(offset + (i * 4)),
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- };
- emit_mir_instruction(ctx, instr);
- }
+ /* We assert scalarization above */
+ alu->swizzle[to][0] = instr->src[i].swizzle[0];
+}
- emit_create_vector(ctx, reg, num_components, comps);
- }
- break;
+static void
+bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
+ unsigned *constants_left, unsigned *constant_shift)
+{
+ /* Bail for vector weirdness */
+ if (cond.swizzle[0] != 0)
+ return;
- case nir_intrinsic_load_input: {
- const_offset = nir_src_as_const_value(nir_instr->src[0]);
- assert (const_offset && "no indirect inputs");
+ if (!cond.src.is_ssa)
+ return;
- offset = nir_intrinsic_base(nir_instr) + nir_src_as_uint(nir_instr->src[0]);
+ nir_ssa_def *def = cond.src.ssa;
+ nir_instr *parent = def->parent_instr;
- reg = nir_dest_index(ctx, &nir_instr->dest);
+ if (parent->type != nir_instr_type_alu)
+ return;
- enum bifrost_ir_ops op;
- switch (nir_dest_num_components(nir_instr->dest)) {
- case 1:
- op = op_ld_attr_v1;
- break;
- case 2:
- op = op_ld_attr_v2;
- break;
- case 3:
- op = op_ld_attr_v3;
- break;
- case 4:
- op = op_ld_attr_v4;
- break;
- default:
- assert(0);
- break;
- }
+ nir_alu_instr *alu = nir_instr_as_alu(parent);
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = nir_dest_num_components(nir_instr->dest),
- .ssa_args = {
- .dest = reg,
- .src0 = offset,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- }
- };
+ /* Try to match a condition */
+ enum bi_cond bcond = bi_cond_for_nir(alu->op, true);
- emit_mir_instruction(ctx, instr);
- break;
- }
- case nir_intrinsic_store_output: {
- const_offset = nir_src_as_const_value(nir_instr->src[1]);
- assert(const_offset && "no indirect outputs");
-
- offset = nir_intrinsic_base(nir_instr);
- if (ctx->stage == MESA_SHADER_FRAGMENT) {
- int comp = nir_intrinsic_component(nir_instr);
- offset += comp;
- // XXX: Once we support more than colour output then this will need to change
- void *entry = _mesa_hash_table_u64_search(ctx->outputs_nir_to_bi, offset + FRAG_RESULT_DATA0 + 1);
-
- if (!entry) {
- printf("WARNING: skipping fragment output\n");
- break;
- }
+ if (bcond == BI_COND_ALWAYS)
+ return;
- offset = (uintptr_t) (entry) - 1;
- reg = nir_src_index(ctx, &nir_instr->src[0]);
-
- enum bifrost_ir_ops op;
- switch (nir_src_num_components(nir_instr->src[0])) {
- case 1:
- op = op_store_v1;
- break;
- case 2:
- op = op_store_v2;
- break;
- case 3:
- op = op_store_v3;
- break;
- case 4:
- op = op_store_v4;
- break;
- default:
- assert(0);
- break;
- }
+ /* We found one, let's fuse it in */
+ csel->csel_cond = bcond;
+ bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift);
+ bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift);
+}
- // XXX: All offsets aren't vec4 aligned. Will need to adjust this in the future
- // XXX: This needs to offset correctly in to memory so the blend step can pick it up
- uint32_t movi = emit_movi(ctx, offset * 16);
- uint32_t movi2 = emit_movi(ctx, 0);
-
- uint32_t comps[2] = {
- movi, movi2,
- };
- uint32_t offset_val = alloc_mir_temp(ctx);
- emit_create_vector(ctx, offset_val, 2, comps);
-
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = 0,
- .ssa_args = {
- .dest = SSA_INVALID_VALUE,
- .src0 = offset_val,
- .src1 = reg,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- }
- };
- emit_mir_instruction(ctx, instr);
- } else if (ctx->stage == MESA_SHADER_VERTEX) {
- int comp = nir_intrinsic_component(nir_instr);
- offset += comp;
- void *entry = _mesa_hash_table_u64_search(ctx->varying_nir_to_bi, offset + 2);
-
- if (!entry) {
- printf("WARNING: skipping varying\n");
- break;
- }
+static void
+emit_alu(bi_context *ctx, nir_alu_instr *instr)
+{
+ /* Assume it's something we can handle normally */
+ bi_instruction alu = {
+ .type = bi_class_for_nir_alu(instr->op),
+ .dest = bir_dest_index(&instr->dest.dest),
+ .dest_type = nir_op_infos[instr->op].output_type
+ | nir_dest_bit_size(instr->dest.dest),
+ };
- offset = (uintptr_t) (entry) - 1;
-
- reg = nir_src_index(ctx, &nir_instr->src[0]);
- // LD_VAR_ADDR.f32 {R0, T1}, R61, R62, location:1, R12
- // ...
- // ST_VAR.v4 T1, R12, R13, R14, R4
-
- offset = emit_ld_vary_addr_constant(ctx, offset);
- enum bifrost_ir_ops op;
- switch (nir_src_num_components(nir_instr->src[0])) {
- case 1:
- op = op_st_vary_v1;
- break;
- case 2:
- op = op_st_vary_v2;
- break;
- case 3:
- op = op_st_vary_v3;
- break;
- case 4:
- op = op_st_vary_v4;
- break;
- default:
- assert(0);
- break;
- }
+ /* TODO: Implement lowering of special functions for older Bifrost */
+ assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = 0,
- .ssa_args = {
- .dest = SSA_INVALID_VALUE,
- .src0 = offset,
- .src1 = reg,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- }
- };
- emit_mir_instruction(ctx, instr);
- } else {
- assert(0 && "Unknown store_output stage");
- }
- break;
- }
- default:
- printf ("Unhandled intrinsic %s\n", nir_intrinsic_infos[nir_instr->intrinsic].name);
- break;
+ if (instr->dest.dest.is_ssa) {
+ /* Construct a writemask */
+ unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
+ unsigned comps = instr->dest.dest.ssa.num_components;
+
+ if (alu.type != BI_COMBINE)
+ assert(comps == 1);
+
+ unsigned bits = bits_per_comp * comps;
+ unsigned bytes = bits / 8;
+ alu.writemask = (1 << bytes) - 1;
+ } else {
+ unsigned comp_mask = instr->dest.write_mask;
+
+ alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
+ comp_mask);
}
-}
-#define ALU_CASE(arguments, nir, name) \
- case nir_op_##nir: \
- argument_count = arguments; \
- op = op_##name; \
- break
-#define ALU_CASE_MOD(arguments, nir, name, modifiers) \
- case nir_op_##nir: \
- argument_count = arguments; \
- op = op_##name; \
- src_modifiers = modifiers; \
- break
+ /* We inline constants as we go. This tracks how many constants have
+ * been inlined, since we're limited to 64-bits of constants per
+ * instruction */
-static void
-emit_alu(struct compiler_context *ctx, nir_alu_instr *nir_instr)
-{
- unsigned dest = nir_dest_index(ctx, &nir_instr->dest.dest);
- unsigned op = ~0U, argument_count;
- unsigned src_modifiers = 0;
-
- switch (nir_instr->op) {
- ALU_CASE(2, fmul, fmul_f32);
- ALU_CASE(2, fadd, fadd_f32);
- ALU_CASE_MOD(2, fsub, fadd_f32, SOURCE_MODIFIER(1, SRC_MOD_NEG));
- ALU_CASE(1, ftrunc, trunc);
- ALU_CASE(1, fceil, ceil);
- ALU_CASE(1, ffloor, floor);
- ALU_CASE(1, fround_even, roundeven);
- ALU_CASE(1, frcp, frcp_fast_f32);
- ALU_CASE(2, fmax, max_f32);
- ALU_CASE(2, fmin, min_f32);
- ALU_CASE(2, iadd, add_i32);
- ALU_CASE(2, isub, sub_i32);
- ALU_CASE(2, imul, mul_i32);
- ALU_CASE(2, iand, and_i32);
- ALU_CASE(2, ior, or_i32);
- ALU_CASE(2, ixor, xor_i32);
- ALU_CASE(2, ishl, lshift_i32);
- ALU_CASE(2, ushr, rshift_i32);
- ALU_CASE(2, ishr, arshift_i32);
- case nir_op_ineg: {
- unsigned src0 = nir_alu_src_index_scalar(ctx, nir_instr, 0);
- printf("ineg 0x%08x\n", src0);
- struct bifrost_instruction instr = {
- .op = op_sub_i32,
- .dest_components = 1,
- .ssa_args = {
- .dest = dest,
- .src0 = SSA_FIXED_CONST_0,
- .src1 = src0,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- };
+ unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
+ unsigned constants_left = (64 / dest_bits);
+ unsigned constant_shift = 0;
- emit_mir_instruction(ctx, instr);
- return;
+ if (alu.type == BI_COMBINE)
+ constants_left = 0;
+
+ /* Copy sources */
+
+ unsigned num_inputs = nir_op_infos[instr->op].num_inputs;
+ assert(num_inputs <= ARRAY_SIZE(alu.src));
+ for (unsigned i = 0; i < num_inputs; ++i) {
+ unsigned f = 0;
+
+ if (i && alu.type == BI_CSEL)
+ f++;
+
+ bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift);
}
- case nir_op_vec2: {
- uint32_t comps[3] = {
- nir_alu_src_index(ctx, &nir_instr->src[0]),
- nir_alu_src_index(ctx, &nir_instr->src[1]),
- };
- emit_create_vector(ctx, dest, 2, comps);
- return;
+
+ /* Op-specific fixup */
+ switch (instr->op) {
+ case nir_op_fmul:
+ alu.src[2] = BIR_INDEX_ZERO; /* FMA */
+ alu.src_types[2] = alu.src_types[1];
break;
- }
- case nir_op_vec3: {
- uint32_t comps[3] = {
- nir_alu_src_index(ctx, &nir_instr->src[0]),
- nir_alu_src_index(ctx, &nir_instr->src[1]),
- nir_alu_src_index(ctx, &nir_instr->src[2]),
- };
- emit_create_vector(ctx, dest, 3, comps);
- return;
+ case nir_op_fsat:
+ alu.outmod = BIFROST_SAT; /* FMOV */
break;
- }
- case nir_op_vec4: {
- uint32_t comps[4] = {
- nir_alu_src_index(ctx, &nir_instr->src[0]),
- nir_alu_src_index(ctx, &nir_instr->src[1]),
- nir_alu_src_index(ctx, &nir_instr->src[2]),
- nir_alu_src_index(ctx, &nir_instr->src[3]),
- };
- emit_create_vector(ctx, dest, 4, comps);
- return;
+ case nir_op_fneg:
+ alu.src_neg[0] = true; /* FMOV */
break;
- }
- case nir_op_fdiv: {
- unsigned src0 = nir_alu_src_index_scalar(ctx, nir_instr, 0);
- unsigned src1 = nir_alu_src_index_scalar(ctx, nir_instr, 1);
- uint32_t mir_temp_location = alloc_mir_temp(ctx);
- {
- struct bifrost_instruction instr = {
- .op = op_frcp_fast_f32,
- .dest_components = 1,
- .ssa_args = {
- .dest = mir_temp_location,
- .src0 = src1,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- };
- emit_mir_instruction(ctx, instr);
- }
-
- struct bifrost_instruction instr = {
- .op = op_fmul_f32,
- .dest_components = 1,
- .ssa_args = {
- .dest = dest,
- .src0 = src0,
- .src1 = src1,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .src_modifiers = src_modifiers,
- };
-
- emit_mir_instruction(ctx, instr);
- return;
+ case nir_op_fabs:
+ alu.src_abs[0] = true; /* FMOV */
break;
- }
- case nir_op_umin:
- case nir_op_imin:
+ case nir_op_fsub:
+ alu.src_neg[1] = true; /* FADD */
+ break;
+ case nir_op_fmax:
+ case nir_op_imax:
case nir_op_umax:
- case nir_op_imax: {
- unsigned src0 = nir_alu_src_index_scalar(ctx, nir_instr, 0);
- unsigned src1 = nir_alu_src_index_scalar(ctx, nir_instr, 1);
- struct bifrost_instruction instr = {
- .op = op_csel_i32,
- .dest_components = 1,
- .ssa_args = {
- .dest = dest,
- .src0 = src0,
- .src1 = src1,
- .src2 = src0,
- .src3 = src1,
- },
- .src_modifiers = src_modifiers,
- .literal_args[0] = 0, /* XXX: Comparison operator */
- };
-
- emit_mir_instruction(ctx, instr);
- return;
+ alu.op.minmax = BI_MINMAX_MAX; /* MINMAX */
break;
- }
- case nir_op_umin3:
- case nir_op_imin3:
- case nir_op_umax3:
- case nir_op_imax3: {
- unsigned src0 = nir_alu_src_index_scalar(ctx, nir_instr, 0);
- unsigned src1 = nir_alu_src_index_scalar(ctx, nir_instr, 1);
- unsigned src2 = nir_alu_src_index_scalar(ctx, nir_instr, 2);
-
- unsigned op = 0;
- if (nir_instr->op == nir_op_umin3)
- op = op_umin3_i32;
- else if (nir_instr->op == nir_op_imin3)
- op = op_imin3_i32;
- else if (nir_instr->op == nir_op_umax3)
- op = op_umax3_i32;
- else if (nir_instr->op == nir_op_imax3)
- op = op_imax3_i32;
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = 1,
- .ssa_args = {
- .dest = dest,
- .src0 = src0,
- .src1 = src1,
- .src2 = src2,
- .src3 = SSA_INVALID_VALUE,
- },
- .src_modifiers = src_modifiers,
- };
-
- emit_mir_instruction(ctx, instr);
-
- return;
+ case nir_op_frcp:
+ alu.op.special = BI_SPECIAL_FRCP;
break;
- }
- case nir_op_ine: {
- uint32_t movi = emit_movi(ctx, ~0U);
- unsigned src0 = nir_alu_src_index(ctx, &nir_instr->src[0]);
- unsigned src1 = nir_alu_src_index(ctx, &nir_instr->src[1]);
- struct bifrost_instruction instr = {
- .op = op_csel_i32,
- .dest_components = 1,
- .ssa_args = {
- .dest = dest,
- .src0 = src0,
- .src1 = src1,
- .src2 = movi,
- .src3 = SSA_FIXED_CONST_0,
- },
- .src_modifiers = src_modifiers,
- .literal_args[0] = CSEL_IEQ, /* XXX: Comparison operator */
- };
-
- emit_mir_instruction(ctx, instr);
- return;
+ case nir_op_frsq:
+ alu.op.special = BI_SPECIAL_FRSQ;
+ break;
+ BI_CASE_CMP(nir_op_flt)
+ BI_CASE_CMP(nir_op_ilt)
+ BI_CASE_CMP(nir_op_fge)
+ BI_CASE_CMP(nir_op_ige)
+ BI_CASE_CMP(nir_op_feq)
+ BI_CASE_CMP(nir_op_ieq)
+ BI_CASE_CMP(nir_op_fne)
+ BI_CASE_CMP(nir_op_ine)
+ alu.op.compare = bi_cond_for_nir(instr->op, false);
+ break;
+ case nir_op_fround_even:
+ alu.op.round = BI_ROUND_MODE;
+ alu.roundmode = BIFROST_RTE;
+ break;
+ case nir_op_fceil:
+ alu.op.round = BI_ROUND_MODE;
+ alu.roundmode = BIFROST_RTP;
+ break;
+ case nir_op_ffloor:
+ alu.op.round = BI_ROUND_MODE;
+ alu.roundmode = BIFROST_RTN;
+ break;
+ case nir_op_ftrunc:
+ alu.op.round = BI_ROUND_MODE;
+ alu.roundmode = BIFROST_RTZ;
break;
- }
default:
- printf("Unhandled ALU op %s\n", nir_op_infos[nir_instr->op].name);
- return;
+ break;
}
- unsigned src0 = nir_alu_src_index_scalar(ctx, nir_instr, 0);
- unsigned src1 = argument_count >= 2 ? nir_alu_src_index_scalar(ctx, nir_instr, 1) : SSA_INVALID_VALUE;
- unsigned src2 = argument_count >= 3 ? nir_alu_src_index_scalar(ctx, nir_instr, 2) : SSA_INVALID_VALUE;
- unsigned src3 = argument_count >= 4 ? nir_alu_src_index_scalar(ctx, nir_instr, 3) : SSA_INVALID_VALUE;
-
- struct bifrost_instruction instr = {
- .op = op,
- .dest_components = 1,
- .ssa_args = {
- .dest = dest,
- .src0 = src0,
- .src1 = src1,
- .src2 = src2,
- .src3 = src3,
- },
- .src_modifiers = src_modifiers,
- };
+ if (alu.type == BI_CSEL) {
+ /* Default to csel3 */
+ alu.csel_cond = BI_COND_NE;
+ alu.src[1] = BIR_INDEX_ZERO;
+ alu.src_types[1] = alu.src_types[0];
+
+ bi_fuse_csel_cond(&alu, instr->src[0],
+ &constants_left, &constant_shift);
+ }
- emit_mir_instruction(ctx, instr);
+ bi_emit(ctx, alu);
}
static void
-emit_instr(struct compiler_context *ctx, struct nir_instr *instr)
+emit_instr(bi_context *ctx, struct nir_instr *instr)
{
switch (instr->type) {
case nir_instr_type_load_const:
emit_load_const(ctx, nir_instr_as_load_const(instr));
break;
+
case nir_instr_type_intrinsic:
emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
break;
+
case nir_instr_type_alu:
emit_alu(ctx, nir_instr_as_alu(instr));
break;
+
+#if 0
case nir_instr_type_tex:
- printf("Unhandled NIR inst tex\n");
+ emit_tex(ctx, nir_instr_as_tex(instr));
break;
+#endif
+
case nir_instr_type_jump:
- printf("Unhandled NIR inst jump\n");
+ emit_jump(ctx, nir_instr_as_jump(instr));
break;
+
case nir_instr_type_ssa_undef:
- printf("Unhandled NIR inst ssa_undef\n");
+ /* Spurious */
break;
+
default:
- printf("Unhandled instruction type\n");
+ //unreachable("Unhandled instruction type");
break;
}
-
}
-static bifrost_block *
-emit_block(struct compiler_context *ctx, nir_block *block)
+
+
+static bi_block *
+create_empty_block(bi_context *ctx)
{
- bifrost_block *this_block = calloc(sizeof(bifrost_block), 1);
- list_addtail(&this_block->link, &ctx->blocks);
+ bi_block *blk = rzalloc(ctx, bi_block);
- ++ctx->block_count;
+ blk->base.predecessors = _mesa_set_create(blk,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
- /* Add this block to be a successor to the previous block */
- if (ctx->current_block)
- bifrost_block_add_successor(ctx->current_block, this_block);
+ blk->base.name = ctx->block_name_count++;
- /* Set up current block */
- list_inithead(&this_block->instructions);
- ctx->current_block = this_block;
+ return blk;
+}
+
+static void
+bi_schedule_barrier(bi_context *ctx)
+{
+ bi_block *temp = ctx->after_block;
+ ctx->after_block = create_empty_block(ctx);
+ list_addtail(&ctx->after_block->base.link, &ctx->blocks);
+ list_inithead(&ctx->after_block->base.instructions);
+ pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
+ ctx->current_block = ctx->after_block;
+ ctx->after_block = temp;
+}
+
+static bi_block *
+emit_block(bi_context *ctx, nir_block *block)
+{
+ if (ctx->after_block) {
+ ctx->current_block = ctx->after_block;
+ ctx->after_block = NULL;
+ } else {
+ ctx->current_block = create_empty_block(ctx);
+ }
+
+ list_addtail(&ctx->current_block->base.link, &ctx->blocks);
+ list_inithead(&ctx->current_block->base.instructions);
nir_foreach_instr(instr, block) {
emit_instr(ctx, instr);
++ctx->instruction_count;
}
-#ifdef BI_DEBUG
- print_mir_block(this_block, false);
-#endif
- return this_block;
+ return ctx->current_block;
}
-void
-emit_if(struct compiler_context *ctx, nir_if *nir_inst);
+/* Emits an unconditional branch to the end of the current block, returning a
+ * pointer so the user can fill in details */
-static struct bifrost_block *
-emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
+static bi_instruction *
+bi_emit_branch(bi_context *ctx)
{
- struct bifrost_block *start_block = NULL;
+ bi_instruction branch = {
+ .type = BI_BRANCH,
+ .branch = {
+ .cond = BI_COND_ALWAYS
+ }
+ };
+
+ return bi_emit(ctx, branch);
+}
+
+/* Sets a condition for a branch by examing the NIR condition. If we're
+ * familiar with the condition, we unwrap it to fold it into the branch
+ * instruction. Otherwise, we consume the condition directly. We
+ * generally use 1-bit booleans which allows us to use small types for
+ * the conditions.
+ */
+
+static void
+bi_set_branch_cond(bi_instruction *branch, nir_src *cond, bool invert)
+{
+ /* TODO: Try to unwrap instead of always bailing */
+ branch->src[0] = bir_src_index(cond);
+ branch->src[1] = BIR_INDEX_ZERO;
+ branch->src_types[0] = branch->src_types[1] = nir_type_uint16;
+ branch->branch.cond = invert ? BI_COND_EQ : BI_COND_NE;
+}
+
+static void
+emit_if(bi_context *ctx, nir_if *nif)
+{
+ bi_block *before_block = ctx->current_block;
+
+ /* Speculatively emit the branch, but we can't fill it in until later */
+ bi_instruction *then_branch = bi_emit_branch(ctx);
+ bi_set_branch_cond(then_branch, &nif->condition, true);
+
+ /* Emit the two subblocks. */
+ bi_block *then_block = emit_cf_list(ctx, &nif->then_list);
+ bi_block *end_then_block = ctx->current_block;
+
+ /* Emit a jump from the end of the then block to the end of the else */
+ bi_instruction *then_exit = bi_emit_branch(ctx);
+
+ /* Emit second block, and check if it's empty */
+
+ int count_in = ctx->instruction_count;
+ bi_block *else_block = emit_cf_list(ctx, &nif->else_list);
+ bi_block *end_else_block = ctx->current_block;
+ ctx->after_block = create_empty_block(ctx);
+
+ /* Now that we have the subblocks emitted, fix up the branches */
+
+ assert(then_block);
+ assert(else_block);
+
+ if (ctx->instruction_count == count_in) {
+ /* The else block is empty, so don't emit an exit jump */
+ bi_remove_instruction(then_exit);
+ then_branch->branch.target = ctx->after_block;
+ } else {
+ then_branch->branch.target = else_block;
+ then_exit->branch.target = ctx->after_block;
+ pan_block_add_successor(&end_then_block->base, &then_exit->branch.target->base);
+ }
+
+ /* Wire up the successors */
+
+ pan_block_add_successor(&before_block->base, &then_branch->branch.target->base); /* then_branch */
+
+ pan_block_add_successor(&before_block->base, &then_block->base); /* fallthrough */
+ pan_block_add_successor(&end_else_block->base, &ctx->after_block->base); /* fallthrough */
+}
+
+static void
+emit_loop(bi_context *ctx, nir_loop *nloop)
+{
+ /* Remember where we are */
+ bi_block *start_block = ctx->current_block;
+
+ bi_block *saved_break = ctx->break_block;
+ bi_block *saved_continue = ctx->continue_block;
+
+ ctx->continue_block = create_empty_block(ctx);
+ ctx->break_block = create_empty_block(ctx);
+ ctx->after_block = ctx->continue_block;
+
+ /* Emit the body itself */
+ emit_cf_list(ctx, &nloop->body);
+
+ /* Branch back to loop back */
+ bi_instruction *br_back = bi_emit_branch(ctx);
+ br_back->branch.target = ctx->continue_block;
+ pan_block_add_successor(&start_block->base, &ctx->continue_block->base);
+ pan_block_add_successor(&ctx->current_block->base, &ctx->continue_block->base);
+
+ ctx->after_block = ctx->break_block;
+
+ /* Pop off */
+ ctx->break_block = saved_break;
+ ctx->continue_block = saved_continue;
+ ++ctx->loop_count;
+}
+
+static bi_block *
+emit_cf_list(bi_context *ctx, struct exec_list *list)
+{
+ bi_block *start_block = NULL;
+
foreach_list_typed(nir_cf_node, node, node, list) {
switch (node->type) {
case nir_cf_node_block: {
- bifrost_block *block = emit_block(ctx, nir_cf_node_as_block(node));
+ bi_block *block = emit_block(ctx, nir_cf_node_as_block(node));
if (!start_block)
start_block = block;
emit_if(ctx, nir_cf_node_as_if(node));
break;
- default:
case nir_cf_node_loop:
- case nir_cf_node_function:
- assert(0);
+ emit_loop(ctx, nir_cf_node_as_loop(node));
break;
+
+ default:
+ unreachable("Unknown control flow");
}
}
return start_block;
}
-void
-emit_if(struct compiler_context *ctx, nir_if *nir_inst)
+static int
+glsl_type_size(const struct glsl_type *type, bool bindless)
{
+ return glsl_count_attribute_slots(type, false);
+}
- // XXX: Conditional branch instruction can do a variety of comparisons with the sources
- // Merge the source instruction `ine` with our conditional branch
- {
- uint32_t movi = emit_movi(ctx, ~0U);
- struct bifrost_instruction instr = {
- .op = op_branch,
- .dest_components = 0,
- .ssa_args = {
- .dest = SSA_INVALID_VALUE,
- .src0 = nir_src_index(ctx, &nir_inst->condition),
- .src1 = movi,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .src_modifiers = 0,
- .literal_args[0] = BR_COND_EQ, /* XXX: Comparison Arg type */
- .literal_args[1] = 0, /* XXX: Branch target */
- };
+static void
+bi_optimize_nir(nir_shader *nir)
+{
+ bool progress;
+ unsigned lower_flrp = 16 | 32 | 64;
- emit_mir_instruction(ctx, instr);
- }
+ NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
+ NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
- bifrost_instruction *true_branch = mir_last_instr_in_block(ctx->current_block);
+ nir_lower_tex_options lower_tex_options = {
+ .lower_txs_lod = true,
+ .lower_txp = ~0,
+ .lower_tex_without_implicit_lod = true,
+ .lower_txd = true,
+ };
- bifrost_block *true_block = emit_cf_list(ctx, &nir_inst->then_list);
+ NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+ NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
+ NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
- {
- struct bifrost_instruction instr = {
- .op = op_branch,
- .dest_components = 0,
- .ssa_args = {
- .dest = SSA_INVALID_VALUE,
- .src0 = SSA_INVALID_VALUE,
- .src1 = SSA_INVALID_VALUE,
- .src2 = SSA_INVALID_VALUE,
- .src3 = SSA_INVALID_VALUE,
- },
- .src_modifiers = 0,
- .literal_args[0] = BR_ALWAYS, /* XXX: ALWAYS */
- .literal_args[1] = 0, /* XXX: Branch target */
- };
+ do {
+ progress = false;
- emit_mir_instruction(ctx, instr);
- }
- bifrost_instruction *true_exit_branch = mir_last_instr_in_block(ctx->current_block);
+ NIR_PASS(progress, nir, nir_lower_var_copies);
+ NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
- unsigned false_idx = ctx->block_count;
- unsigned inst_count = ctx->instruction_count;
+ NIR_PASS(progress, nir, nir_copy_prop);
+ NIR_PASS(progress, nir, nir_opt_remove_phis);
+ NIR_PASS(progress, nir, nir_opt_dce);
+ NIR_PASS(progress, nir, nir_opt_dead_cf);
+ NIR_PASS(progress, nir, nir_opt_cse);
+ NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
+ NIR_PASS(progress, nir, nir_opt_algebraic);
+ NIR_PASS(progress, nir, nir_opt_constant_folding);
- bifrost_block *false_block = emit_cf_list(ctx, &nir_inst->else_list);
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress = false;
+ NIR_PASS(lower_flrp_progress,
+ nir,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
- unsigned if_footer_idx = ctx->block_count;
- assert(true_block);
- assert(false_block);
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+ NIR_PASS(progress, nir, nir_opt_undef);
+ NIR_PASS(progress, nir, nir_opt_loop_unroll,
+ nir_var_shader_in |
+ nir_var_shader_out |
+ nir_var_function_temp);
+ } while (progress);
- if (ctx->instruction_count == inst_count) {
- // If the else branch didn't have anything in it then we can remove the dead jump
- mir_remove_instr(true_exit_branch);
- } else {
- true_exit_branch->literal_args[1] = if_footer_idx;
- }
+ NIR_PASS(progress, nir, nir_opt_algebraic_late);
+ NIR_PASS(progress, nir, nir_lower_bool_to_int32);
+ NIR_PASS(progress, nir, bifrost_nir_lower_algebraic_late);
+ NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
+ NIR_PASS(progress, nir, nir_lower_load_const_to_scalar);
- true_branch->literal_args[1] = false_idx;
+ /* Take us out of SSA */
+ NIR_PASS(progress, nir, nir_lower_locals_to_regs);
+ NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
+ NIR_PASS(progress, nir, nir_convert_from_ssa, true);
}
-int
-bifrost_compile_shader_nir(nir_shader *nir, struct bifrost_program *program)
+void
+bifrost_compile_shader_nir(nir_shader *nir, panfrost_program *program, unsigned product_id)
{
- struct compiler_context ictx = {
- .nir = nir,
- .stage = nir->info.stage,
- };
-
- struct compiler_context *ctx = &ictx;
-
- ctx->mir_temp = 0;
-
- /* Initialize at a global (not block) level hash tables */
- ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
- ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
-
- /* Assign actual uniform location, skipping over samplers */
- ctx->uniform_nir_to_bi = _mesa_hash_table_u64_create(NULL);
+ bi_context *ctx = rzalloc(NULL, bi_context);
+ ctx->nir = nir;
+ ctx->stage = nir->info.stage;
+ ctx->quirks = bifrost_get_quirks(product_id);
+ list_inithead(&ctx->blocks);
- nir_foreach_variable(var, &nir->uniforms) {
- if (glsl_get_base_type(var->type) == GLSL_TYPE_SAMPLER) continue;
+ /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
+ * (so we don't accidentally duplicate the epilogue since mesa/st has
+ * messed with our I/O quite a bit already) */
- for (int col = 0; col < glsl_get_matrix_columns(var->type); ++col) {
- int id = ctx->uniform_count++;
- _mesa_hash_table_u64_insert(ctx->uniform_nir_to_bi, var->data.driver_location + col + 1, (void *) ((uintptr_t) (id + 1)));
- }
- }
+ NIR_PASS_V(nir, nir_lower_vars_to_ssa);
if (ctx->stage == MESA_SHADER_VERTEX) {
- ctx->varying_nir_to_bi = _mesa_hash_table_u64_create(NULL);
- nir_foreach_variable(var, &nir->outputs) {
- if (var->data.location < VARYING_SLOT_VAR0) {
- if (var->data.location == VARYING_SLOT_POS)
- ctx->varying_count++;
- _mesa_hash_table_u64_insert(ctx->varying_nir_to_bi, var->data.driver_location + 1, (void *) ((uintptr_t) (1)));
-
- continue;
- }
-
- for (int col = 0; col < glsl_get_matrix_columns(var->type); ++col) {
- for (int comp = 0; comp < 4; ++comp) {
- int id = comp + ctx->varying_count++;
- _mesa_hash_table_u64_insert(ctx->varying_nir_to_bi, var->data.driver_location + col + comp + 1, (void *) ((uintptr_t) (id + 1)));
- }
- }
- }
-
- } else if (ctx->stage == MESA_SHADER_FRAGMENT) {
- ctx->outputs_nir_to_bi = _mesa_hash_table_u64_create(NULL);
- nir_foreach_variable(var, &nir->outputs) {
- if (var->data.location >= FRAG_RESULT_DATA0 && var->data.location <= FRAG_RESULT_DATA7) {
- int id = ctx->outputs_count++;
- printf("Driver location: %d with id %d\n", var->data.location + 1, id);
- _mesa_hash_table_u64_insert(ctx->outputs_nir_to_bi, var->data.location + 1, (void *) ((uintptr_t) (id + 1)));
- }
- }
+ NIR_PASS_V(nir, nir_lower_viewport_transform);
+ NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
}
- /* Optimisation passes */
- optimize_nir(nir);
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+ NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
+ NIR_PASS_V(nir, nir_lower_ssbo);
-#ifdef BI_DEBUG
+ bi_optimize_nir(nir);
nir_print_shader(nir, stdout);
-#endif
- /* Generate machine IR for shader */
- nir_foreach_function(func, nir) {
- nir_builder _b;
- ctx->b = &_b;
- nir_builder_init(ctx->b, func->impl);
+ panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
+ program->sysval_count = ctx->sysvals.sysval_count;
+ memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
- list_inithead(&ctx->blocks);
- ctx->block_count = 0;
- ctx->func = func;
+ nir_foreach_function(func, nir) {
+ if (!func->impl)
+ continue;
+ ctx->impl = func->impl;
emit_cf_list(ctx, &func->impl->body);
-
- break; // XXX: Once we support multi function shaders then implement
+ break; /* TODO: Multi-function shaders */
}
- util_dynarray_init(&program->compiled, NULL);
-
- // MIR pre-RA optimizations
+ bi_foreach_block(ctx, _block) {
+ bi_block *block = (bi_block *) _block;
+ bi_lower_combine(ctx, block);
+ }
bool progress = false;
do {
progress = false;
- mir_foreach_block(ctx, block) {
- // XXX: Not yet working
-// progress |= bifrost_opt_branch_fusion(ctx, block);
+
+ bi_foreach_block(ctx, _block) {
+ bi_block *block = (bi_block *) _block;
+ progress |= bi_opt_dead_code_eliminate(ctx, block);
}
- } while (progress);
+ } while(progress);
- bifrost_schedule_program(ctx);
+ bi_print_shader(ctx, stdout);
+ bi_schedule(ctx);
+ bi_register_allocate(ctx);
+ bi_print_shader(ctx, stdout);
+ bi_pack(ctx, &program->compiled);
+ disassemble_bifrost(stdout, program->compiled.data, program->compiled.size, true);
-#ifdef BI_DEBUG
- nir_print_shader(nir, stdout);
- disassemble_bifrost(program->compiled.data, program->compiled.size, false);
-#endif
- return 0;
+ ralloc_free(ctx);
}