};
} midgard_branch;
+static bool
+midgard_is_branch_unit(unsigned unit)
+{
+ return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
+}
+
/* Generic in-memory data type repesenting a single logical instruction, rather
* than a single instruction group. This is the preferred form for code gen.
* Multiple midgard_insturctions will later be combined during scheduling,
return u;
}
+static midgard_vector_alu_src
+vector_alu_from_unsigned(unsigned u)
+{
+ midgard_vector_alu_src s;
+ memcpy(&s, &u, sizeof(s));
+ return s;
+}
+
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src)
+vector_alu_modifiers(nir_alu_src *src, bool is_int)
{
if (!src) return blank_alu_src;
midgard_vector_alu_src alu_src = {
- .abs = src->abs,
- .negate = src->negate,
.rep_low = 0,
.rep_high = 0,
.half = 0, /* TODO */
.swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
};
+ if (is_int) {
+ /* TODO: sign-extend/zero-extend */
+ alu_src.mod = midgard_int_normal;
+
+ /* These should have been lowered away */
+ assert(!(src->abs || src->negate));
+ } else {
+ alu_src.mod = (src->abs << 0) | (src->negate << 1);
+ }
+
return alu_src;
}
+static bool
+mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
+{
+ /* abs or neg */
+ if (!is_int && src.mod) return true;
+
+ /* swizzle */
+ for (unsigned c = 0; c < 4; ++c) {
+ if (!(mask & (1 << c))) continue;
+ if (((src.swizzle >> (2*c)) & 3) != c) return true;
+ }
+
+ return false;
+}
+
/* 'Intrinsic' move for misc aliasing uses independent of actual NIR ALU code */
static midgard_instruction
},
.alu = {
.op = midgard_alu_op_fmov,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(zero_alu_src),
/* List of midgard_instructions emitted for the current block */
midgard_block *current_block;
- /* The index corresponding to the current loop, e.g. for breaks/contineus */
- int current_loop;
+ /* The current "depth" of the loop, for disambiguating breaks/continues
+ * when using nested loops */
+ int current_loop_depth;
/* Constants which have been loaded, for later inlining */
struct hash_table_u64 *ssa_constants;
return list_first_entry(&(ins->link), midgard_instruction, link);
}
-static midgard_block *
-mir_next_block(struct midgard_block *blk)
-{
- return list_first_entry(&(blk->link), midgard_block, link);
-}
-
-
#define mir_foreach_block(ctx, v) list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
#define mir_foreach_block_from(ctx, from, v) list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
#define mir_foreach_instr_in_block_safe(block, v) list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_safe_rev(block, v) list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_from(block, v, from) list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
+#define mir_foreach_instr_in_block_from_rev(block, v, from) list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
static midgard_instruction *
switch (ins->type) {
case TAG_ALU_4: {
midgard_alu_op op = ins->alu.op;
- const char *name = alu_opcode_names[op];
+ const char *name = alu_opcode_props[op].name;
if (ins->unit)
printf("%d.", ins->unit);
printf("}\n");
}
-
-
static void
attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
{
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
}
+/* Lower csel with mixed condition channels to mulitple csel instructions. For
+ * context, the csel ops on Midgard are vector in *outputs*, but not in
+ * *conditions*. So, if the condition is e.g. yyyy, a single op can select a
+ * vec4. But if the condition is e.g. xyzw, four ops are needed as the ISA
+ * can't cope with the divergent channels.*/
+
+static void
+midgard_nir_lower_mixed_csel_body(nir_builder *b, nir_alu_instr *alu)
+{
+ if (alu->op != nir_op_bcsel)
+ return;
+
+ b->cursor = nir_before_instr(&alu->instr);
+
+ /* Must be run before registering */
+ assert(alu->dest.dest.is_ssa);
+
+ /* Check for mixed condition */
+
+ unsigned comp = alu->src[0].swizzle[0];
+ unsigned nr_components = alu->dest.dest.ssa.num_components;
+
+ bool mixed = false;
+
+ for (unsigned c = 1; c < nr_components; ++c)
+ mixed |= (alu->src[0].swizzle[c] != comp);
+
+ if (!mixed)
+ return;
+
+ /* We're mixed, so lower */
+
+ assert(nr_components <= 4);
+ nir_ssa_def *results[4];
+
+ nir_ssa_def *cond = nir_ssa_for_alu_src(b, alu, 0);
+ nir_ssa_def *choice0 = nir_ssa_for_alu_src(b, alu, 1);
+ nir_ssa_def *choice1 = nir_ssa_for_alu_src(b, alu, 2);
+
+ for (unsigned c = 0; c < nr_components; ++c) {
+ results[c] = nir_bcsel(b,
+ nir_channel(b, cond, c),
+ nir_channel(b, choice0, c),
+ nir_channel(b, choice1, c));
+ }
+
+ /* Replace with our scalarized version */
+
+ nir_ssa_def *result = nir_vec(b, results, nr_components);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(result));
+}
+
static int
midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
{
return progress;
}
+static bool
+midgard_nir_lower_mixed_csel(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl) continue;
+
+ nir_builder _b;
+ nir_builder *b = &_b;
+ nir_builder_init(b, function->impl);
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_alu) continue;
+
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
+ midgard_nir_lower_mixed_csel_body(b, alu);
+
+ progress |= true;
+ }
+ }
+
+ nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
+
+ }
+
+ return progress;
+}
+
static void
optimise_nir(nir_shader *nir)
{
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
+ NIR_PASS(progress, nir, midgard_nir_lower_mixed_csel);
nir_lower_tex_options lower_tex_options = {
.lower_rect = true
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+ NIR_PASS(lower_flrp_progress,
+ nir,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
/* Channel count is off-by-one to fit in two-bits (0 channel makes no
* sense) */
- unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op]);
+ unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op].props);
/* If there is a fixed channel count, construct the appropriate mask */
},
.alu = {
.op = midgard_alu_op_iand,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
},
.alu = {
.op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(zero_alu_src),
op = midgard_alu_op_##_op; \
break;
+static bool
+nir_is_fzero_constant(nir_src src)
+{
+ if (!nir_src_is_const(src))
+ return false;
+
+ for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
+ if (nir_src_comp_as_float(src, c) != 0.0)
+ return false;
+ }
+
+ return true;
+}
+
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
{
ALU_CASE(isub, isub);
ALU_CASE(imul, imul);
ALU_CASE(iabs, iabs);
-
- /* XXX: Use fmov, not imov, since imov was causing major
- * issues with texture precision? XXX research */
ALU_CASE(imov, imov);
ALU_CASE(feq32, feq);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
- ALU_CASE(inot, inot);
+ ALU_CASE(inot, inand);
ALU_CASE(ishl, ishl);
ALU_CASE(ishr, iasr);
ALU_CASE(ushr, ilsr);
break;
}
+ /* For a few special csel cases not handled by NIR, we can opt to
+ * bitwise. Otherwise, we emit the condition and do a real csel */
+
case nir_op_b32csel: {
- op = midgard_alu_op_fcsel;
+ if (nir_is_fzero_constant(instr->src[2].src)) {
+ /* (b ? v : 0) = (b & v) */
+ op = midgard_alu_op_iand;
+ nr_inputs = 2;
+ } else if (nir_is_fzero_constant(instr->src[1].src)) {
+ /* (b ? 0 : v) = (!b ? v : 0) = (~b & v) = (v & ~b) */
+ op = midgard_alu_op_iandnot;
+ nr_inputs = 2;
+ instr->src[1] = instr->src[0];
+ instr->src[0] = instr->src[2];
+ } else {
+ /* Midgard features both fcsel and icsel, depending on
+ * the type of the arguments/output. However, as long
+ * as we're careful we can _always_ use icsel and
+ * _never_ need fcsel, since the latter does additional
+ * floating-point-specific processing whereas the
+ * former just moves bits on the wire. It's not obvious
+ * why these are separate opcodes, save for the ability
+ * to do things like sat/pos/abs/neg for free */
- /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
- nr_inputs = 2;
+ op = midgard_alu_op_icsel;
- /* Figure out which component the condition is in */
+ /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
+ nr_inputs = 2;
- unsigned comp = instr->src[0].swizzle[0];
+ /* Figure out which component the condition is in */
- /* Make sure NIR isn't throwing a mixed condition at us */
+ unsigned comp = instr->src[0].swizzle[0];
- for (unsigned c = 1; c < nr_components; ++c)
- assert(instr->src[0].swizzle[c] == comp);
+ /* Make sure NIR isn't throwing a mixed condition at us */
- /* Emit the condition into r31.w */
- emit_condition(ctx, &instr->src[0].src, false, comp);
+ for (unsigned c = 1; c < nr_components; ++c)
+ assert(instr->src[0].swizzle[c] == comp);
- /* The condition is the first argument; move the other
- * arguments up one to be a binary instruction for
- * Midgard */
+ /* Emit the condition into r31.w */
+ emit_condition(ctx, &instr->src[0].src, false, comp);
- memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
+ /* The condition is the first argument; move the other
+ * arguments up one to be a binary instruction for
+ * Midgard */
+
+ memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
+ }
break;
}
return;
}
+ /* Midgard can perform certain modifiers on output ofa n ALU op */
+ midgard_outmod outmod =
+ instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+
+ /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
+
+ if (instr->op == nir_op_fmax) {
+ if (nir_is_fzero_constant(instr->src[0].src)) {
+ op = midgard_alu_op_fmov;
+ nr_inputs = 1;
+ outmod = midgard_outmod_pos;
+ instr->src[0] = instr->src[1];
+ } else if (nir_is_fzero_constant(instr->src[1].src)) {
+ op = midgard_alu_op_fmov;
+ nr_inputs = 1;
+ outmod = midgard_outmod_pos;
+ }
+ }
+
/* Fetch unit, quirks, etc information */
- unsigned opcode_props = alu_opcode_props[op];
+ unsigned opcode_props = alu_opcode_props[op].props;
bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
- /* Initialise fields common between scalar/vector instructions */
- midgard_outmod outmod = instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
-
/* src0 will always exist afaik, but src1 will not for 1-argument
* instructions. The latter can only be fetched if the instruction
* needs it, or else we may segfault. */
assert(0);
}
+ bool is_int = midgard_is_integer_op(op);
+
midgard_vector_alu alu = {
.op = op,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = outmod,
/* Writemask only valid for non-SSA NIR */
.mask = expand_writemask((1 << nr_components) - 1),
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0])),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1])),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
ins.has_constants = true;
ins.constants[0] = 0.0f;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
+ } else if (instr->op == nir_op_inot) {
+ /* ~b = ~(b & b), so duplicate the source */
+ ins.ssa_args.src1 = ins.ssa_args.src0;
+ ins.alu.src2 = ins.alu.src1;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0]));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
emit_mir_instruction(ctx, ins);
}
} else {
emit_mir_instruction(ctx, ins);
}
- /* vadd.u2f hr2, abs(hr2), #0 */
+ /* vadd.u2f hr2, zext(hr2), #0 */
midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.abs = true;
+ alu_src.mod = midgard_int_zero_extend;
alu_src.half = true;
midgard_instruction u2f = {
},
.alu = {
.op = midgard_alu_op_u2f,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_none,
.mask = 0xF,
.src1 = vector_alu_srco_unsigned(alu_src),
/* vmul.fmul.sat r1, hr2, #0.00392151 */
- alu_src.abs = false;
+ alu_src.mod = 0;
midgard_instruction fmul = {
.type = TAG_ALU_4,
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = midgard_outmod_sat,
.mask = 0xFF,
emit_mir_instruction(ctx, ins);
}
- //midgard_pin_output(ctx, index, REGISTER_TEXTURE_BASE + in_reg);
-
break;
}
/* Emit a branch out of the loop */
struct midgard_instruction br = v_branch(false, false);
br.branch.target_type = TARGET_BREAK;
- br.branch.target_break = ctx->current_loop;
+ br.branch.target_break = ctx->current_loop_depth;
emit_mir_instruction(ctx, br);
DBG("break..\n");
if (reg >= 0) {
assert(reg < maxreg);
+ assert(g);
int r = ra_get_node_reg(g, reg);
ctx->work_registers = MAX2(ctx->work_registers, r);
return r;
return succ;
}
+/* Once registers have been decided via register allocation
+ * (allocate_registers), we need to rewrite the MIR to use registers instead of
+ * SSA */
+
static void
+install_registers(compiler_context *ctx, struct ra_graph *g)
+{
+ mir_foreach_block(ctx, block) {
+ mir_foreach_instr_in_block(block, ins) {
+ if (ins->compact_branch) continue;
+
+ ssa_args args = ins->ssa_args;
+
+ switch (ins->type) {
+ case TAG_ALU_4:
+ ins->registers.src1_reg = dealias_register(ctx, g, args.src0, ctx->temp_count);
+
+ ins->registers.src2_imm = args.inline_constant;
+
+ if (args.inline_constant) {
+ /* Encode inline 16-bit constant as a vector by default */
+
+ ins->registers.src2_reg = ins->inline_constant >> 11;
+
+ int lower_11 = ins->inline_constant & ((1 << 12) - 1);
+
+ uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
+ ins->alu.src2 = imm << 2;
+ } else {
+ ins->registers.src2_reg = dealias_register(ctx, g, args.src1, ctx->temp_count);
+ }
+
+ ins->registers.out_reg = dealias_register(ctx, g, args.dest, ctx->temp_count);
+
+ break;
+
+ case TAG_LOAD_STORE_4: {
+ if (OP_IS_STORE_VARY(ins->load_store.op)) {
+ /* TODO: use ssa_args for store_vary */
+ ins->load_store.reg = 0;
+ } else {
+ bool has_dest = args.dest >= 0;
+ int ssa_arg = has_dest ? args.dest : args.src0;
+
+ ins->load_store.reg = dealias_register(ctx, g, ssa_arg, ctx->temp_count);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+}
+
+/* This routine performs the actual register allocation. It should be succeeded
+ * by install_registers */
+
+static struct ra_graph *
allocate_registers(compiler_context *ctx)
{
/* First, initialize the RA */
print_mir_block(block);
}
+ /* No register allocation to do with no SSA */
+
+ if (!ctx->temp_count)
+ return NULL;
+
/* Let's actually do register allocation */
int nodes = ctx->temp_count;
struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
mir_foreach_instr_in_block(block, ins) {
if (ins->compact_branch) continue;
+ /* Dest is < 0 for store_vary instructions, which break
+ * the usual SSA conventions. Liveness analysis doesn't
+ * make sense on these instructions, so skip them to
+ * avoid memory corruption */
+
+ if (ins->ssa_args.dest < 0) continue;
+
if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
/* If this destination is not yet live, it is now since we just wrote it */
free(live_start);
free(live_end);
- mir_foreach_block(ctx, block) {
- mir_foreach_instr_in_block(block, ins) {
- if (ins->compact_branch) continue;
-
- ssa_args args = ins->ssa_args;
-
- switch (ins->type) {
- case TAG_ALU_4:
- ins->registers.src1_reg = dealias_register(ctx, g, args.src0, nodes);
-
- ins->registers.src2_imm = args.inline_constant;
-
- if (args.inline_constant) {
- /* Encode inline 16-bit constant as a vector by default */
-
- ins->registers.src2_reg = ins->inline_constant >> 11;
-
- int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-
- uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
- ins->alu.src2 = imm << 2;
- } else {
- ins->registers.src2_reg = dealias_register(ctx, g, args.src1, nodes);
- }
-
- ins->registers.out_reg = dealias_register(ctx, g, args.dest, nodes);
-
- break;
-
- case TAG_LOAD_STORE_4: {
- if (OP_IS_STORE_VARY(ins->load_store.op)) {
- /* TODO: use ssa_args for store_vary */
- ins->load_store.reg = 0;
- } else {
- bool has_dest = args.dest >= 0;
- int ssa_arg = has_dest ? args.dest : args.src0;
-
- ins->load_store.reg = dealias_register(ctx, g, ssa_arg, nodes);
- }
-
- break;
- }
-
- default:
- break;
- }
- }
- }
+ return g;
}
/* Midgard IR only knows vector ALU types, but we sometimes need to actually
}
static unsigned
-vector_to_scalar_source(unsigned u)
+vector_to_scalar_source(unsigned u, bool is_int)
{
midgard_vector_alu_src v;
memcpy(&v, &u, sizeof(v));
+ /* TODO: Integers */
+
midgard_scalar_alu_src s = {
- .abs = v.abs,
- .negate = v.negate,
.full = !v.half,
.component = (v.swizzle & 3) << 1
};
+ if (is_int) {
+ /* TODO */
+ } else {
+ s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
+ s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
+ }
+
unsigned o;
memcpy(&o, &s, sizeof(s));
static midgard_scalar_alu
vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
{
+ bool is_int = midgard_is_integer_op(v.op);
+
/* The output component is from the mask */
midgard_scalar_alu s = {
.op = v.op,
- .src1 = vector_to_scalar_source(v.src1),
- .src2 = vector_to_scalar_source(v.src2),
+ .src1 = vector_to_scalar_source(v.src1, is_int),
+ .src2 = vector_to_scalar_source(v.src2, is_int),
.unknown = 0,
.outmod = v.outmod,
.output_full = 1, /* TODO: Half */
if (!unit) {
int op = ains->alu.op;
- int units = alu_opcode_props[op];
+ int units = alu_opcode_props[op].props;
/* TODO: Promotion of scalars to vectors */
int vector = ((!is_single_component_mask(ains->alu.mask)) || ((units & UNITS_SCALAR) == 0)) && (units & UNITS_ANY_VECTOR);
static void
schedule_program(compiler_context *ctx)
{
- allocate_registers(ctx);
+ /* We run RA prior to scheduling */
+ struct ra_graph *g = allocate_registers(ctx);
+ install_registers(ctx, g);
mir_foreach_block(ctx, block) {
schedule_block(ctx, block);
int op = ins->alu.op;
if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
- /* Flip based on op. Fallthrough intentional */
-
switch (op) {
- /* These ops require an operational change to flip their arguments TODO */
+ /* These ops require an operational change to flip
+ * their arguments TODO */
case midgard_alu_op_flt:
case midgard_alu_op_fle:
case midgard_alu_op_ilt:
case midgard_alu_op_ile:
case midgard_alu_op_fcsel:
case midgard_alu_op_icsel:
- case midgard_alu_op_isub:
- DBG("Missed non-commutative flip (%s)\n", alu_opcode_names[op]);
+ DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
+ default:
break;
+ }
- /* These ops are commutative and Just Flip */
- case midgard_alu_op_fne:
- case midgard_alu_op_fadd:
- case midgard_alu_op_fmul:
- case midgard_alu_op_fmin:
- case midgard_alu_op_fmax:
- case midgard_alu_op_iadd:
- case midgard_alu_op_imul:
- case midgard_alu_op_feq:
- case midgard_alu_op_ieq:
- case midgard_alu_op_ine:
- case midgard_alu_op_iand:
- case midgard_alu_op_ior:
- case midgard_alu_op_ixor:
+ if (alu_opcode_props[op].props & OP_COMMUTES) {
/* Flip the SSA numbers */
ins->ssa_args.src0 = ins->ssa_args.src1;
ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
src_temp = ins->alu.src2;
ins->alu.src2 = ins->alu.src1;
ins->alu.src1 = src_temp;
-
- default:
- break;
}
}
if (scaled_constant != iconstants[component])
continue;
} else {
- scaled_constant = _mesa_float_to_half((float) ins->constants[component]);
+ float original = (float) ins->constants[component];
+ scaled_constant = _mesa_float_to_half(original);
+
+ /* Check for loss of precision. If this is
+ * mediump, we don't care, but for a highp
+ * shader, we need to pay attention. NIR
+ * doesn't yet tell us which mode we're in!
+ * Practically this prevents most constants
+ * from being inlined, sadly. */
+
+ float fp32 = _mesa_half_to_float(scaled_constant);
+
+ if (fp32 != original)
+ continue;
}
/* We don't know how to handle these with a constant */
- if (src->abs || src->negate || src->half || src->rep_low || src->rep_high) {
+ if (src->mod || src->half || src->rep_low || src->rep_high) {
DBG("Bailing inline constant...\n");
continue;
}
}
}
-#define AS_SRC(to, u) \
- int q##to = ins->alu.src2; \
- midgard_vector_alu_src *to = (midgard_vector_alu_src *) &q##to;
+/* Basic dead code elimination on the MIR itself, which cleans up e.g. the
+ * texture pipeline */
-/* Removing unused moves is necessary to clean up the texture pipeline results.
- *
- * To do so, we find moves in the MIR. We check if their destination is live later. If it's not, the move is redundant. */
+static bool
+midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (ins->compact_branch) continue;
+
+ if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue;
+ if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+
+ mir_remove_instruction(ins);
+ progress = true;
+ }
+
+ return progress;
+}
+
+static bool
+midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* We only work on pure SSA */
+
+ if (to >= SSA_FIXED_MINIMUM) continue;
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (to >= ctx->func->impl->ssa_alloc) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+
+ /* Also, if the move has side effects, we're helpless */
+
+ midgard_vector_alu_src src =
+ vector_alu_from_unsigned(ins->alu.src2);
+ unsigned mask = squeeze_writemask(ins->alu.mask);
+ bool is_int = midgard_is_integer_op(ins->alu.op);
+
+ if (mir_nontrivial_mod(src, is_int, mask)) continue;
+ if (ins->alu.outmod != midgard_outmod_none) continue;
+
+ mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) {
+ if (v->ssa_args.src0 == to) {
+ v->ssa_args.src0 = from;
+ progress = true;
+ }
+
+ if (v->ssa_args.src1 == to && !v->ssa_args.inline_constant) {
+ v->ssa_args.src1 = from;
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
+
+static bool
+midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* Make sure it's simple enough for us to handle */
+
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+ if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
+ if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
+
+ bool eliminated = false;
+
+ mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+ /* The texture registers are not SSA so be careful.
+ * Conservatively, just stop if we hit a texture op
+ * (even if it may not write) to where we are */
+
+ if (v->type != TAG_ALU_4)
+ break;
+
+ if (v->ssa_args.dest == from) {
+ /* We don't want to track partial writes ... */
+ if (v->alu.mask == 0xF) {
+ v->ssa_args.dest = to;
+ eliminated = true;
+ }
+
+ break;
+ }
+ }
+
+ if (eliminated)
+ mir_remove_instruction(ins);
+
+ progress |= eliminated;
+ }
+
+ return progress;
+}
+
+/* We don't really understand the imov/fmov split, so always use fmov (but let
+ * it be imov in the IR so we don't do unsafe floating point "optimizations"
+ * and break things */
static void
-midgard_eliminate_orphan_moves(compiler_context *ctx, midgard_block *block)
+midgard_imov_workaround(compiler_context *ctx, midgard_block *block)
{
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
+ if (ins->alu.op != midgard_alu_op_imov) continue;
- if (ins->alu.op != midgard_alu_op_fmov) continue;
+ ins->alu.op = midgard_alu_op_fmov;
+ ins->alu.outmod = midgard_outmod_none;
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ /* Remove flags that don't make sense */
- if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue;
+ midgard_vector_alu_src s =
+ vector_alu_from_unsigned(ins->alu.src2);
- if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+ s.mod = 0;
- mir_remove_instruction(ins);
+ ins->alu.src2 = vector_alu_srco_unsigned(s);
}
}
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_lower,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
},
.alu = {
.op = midgard_alu_op_f2u8,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_lower,
.outmod = midgard_outmod_pos,
.mask = 0xF,
},
.alu = {
.op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_quarter,
+ .reg_mode = midgard_reg_mode_8,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
actualise_ssa_to_alias(ctx);
midgard_emit_store(ctx, this_block);
- midgard_eliminate_orphan_moves(ctx, this_block);
midgard_pair_load_store(ctx, this_block);
+ midgard_imov_workaround(ctx, this_block);
/* Append fragment shader epilogue (value writeout) */
if (ctx->stage == MESA_SHADER_FRAGMENT) {
/* Remember where we are */
midgard_block *start_block = ctx->current_block;
- /* Allocate a loop number for this. TODO: Nested loops. Instead of a
- * single current_loop variable, maybe we need a stack */
-
- int loop_idx = ++ctx->current_loop;
+ /* Allocate a loop number, growing the current inner loop depth */
+ int loop_idx = ++ctx->current_loop_depth;
/* Get index from before the body so we can loop back later */
int start_idx = ctx->block_count;
br_back.branch.target_block = start_idx;
emit_mir_instruction(ctx, br_back);
- /* Mark down that branch in the graph */
- midgard_block_add_successor(ctx->current_block, start_block);
+ /* Mark down that branch in the graph. Note that we're really branching
+ * to the block *after* we started in. TODO: Why doesn't the branch
+ * itself have an off-by-one then...? */
+ midgard_block_add_successor(ctx->current_block, start_block->successors[0]);
/* Find the index of the block about to follow us (note: we don't add
* one; blocks are 0-indexed so we get a fencepost problem) */
ins->branch.target_block = break_block_idx;
}
}
+
+ /* Now that we've finished emitting the loop, free up the depth again
+ * so we play nice with recursion amid nested loops */
+ --ctx->current_loop_depth;
}
static midgard_block *
util_dynarray_init(compiled, NULL);
+ /* MIR-level optimizations */
+
+ bool progress = false;
+
+ do {
+ progress = false;
+
+ mir_foreach_block(ctx, block) {
+ progress |= midgard_opt_copy_prop(ctx, block);
+ progress |= midgard_opt_copy_prop_tex(ctx, block);
+ progress |= midgard_opt_dead_code_eliminate(ctx, block);
+ }
+ } while (progress);
+
/* Schedule! */
schedule_program(ctx);