#include "main/mtypes.h"
#include "compiler/glsl/glsl_to_nir.h"
-#include "mesa/state_tracker/st_glsl_types.h"
#include "compiler/nir_types.h"
#include "main/imports.h"
#include "compiler/nir/nir_builder.h"
};
} midgard_branch;
+static bool
+midgard_is_branch_unit(unsigned unit)
+{
+ return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
+}
+
/* Generic in-memory data type repesenting a single logical instruction, rather
* than a single instruction group. This is the preferred form for code gen.
* Multiple midgard_insturctions will later be combined during scheduling,
/* I.e. (1 << alu_bit) */
int unit;
+ /* When emitting bundle, should this instruction have a break forced
+ * before it? Used for r31 writes which are valid only within a single
+ * bundle and *need* to happen as early as possible... this is a hack,
+ * TODO remove when we have a scheduler */
+ bool precede_break;
+
bool has_constants;
float constants[4];
uint16_t inline_constant;
/* Number of quadwords _actually_ emitted, as determined after scheduling */
unsigned quadword_count;
- struct midgard_block *next_fallthrough;
+ /* Successors: always one forward (the block after us), maybe
+ * one backwards (for a backward branch). No need for a second
+ * forward, since graph traversal would get there eventually
+ * anyway */
+ struct midgard_block *successors[2];
+ unsigned nr_successors;
+
+ /* The successors pointer form a graph, and in the case of
+ * complex control flow, this graph has a cycles. To aid
+ * traversal during liveness analysis, we have a visited?
+ * boolean for passes to use as they see fit, provided they
+ * clean up later */
+ bool visited;
} midgard_block;
+static void
+midgard_block_add_successor(midgard_block *block, midgard_block *successor)
+{
+ block->successors[block->nr_successors++] = successor;
+ assert(block->nr_successors <= ARRAY_SIZE(block->successors));
+}
+
/* Helpers to generate midgard_instruction's using macro magic, since every
* driver seems to do it that way */
return u;
}
+static midgard_vector_alu_src
+vector_alu_from_unsigned(unsigned u)
+{
+ midgard_vector_alu_src s;
+ memcpy(&s, &u, sizeof(s));
+ return s;
+}
+
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src)
+vector_alu_modifiers(nir_alu_src *src, bool is_int)
{
if (!src) return blank_alu_src;
midgard_vector_alu_src alu_src = {
- .abs = src->abs,
- .negate = src->negate,
.rep_low = 0,
.rep_high = 0,
.half = 0, /* TODO */
.swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
};
+ if (is_int) {
+ /* TODO: sign-extend/zero-extend */
+ alu_src.mod = midgard_int_normal;
+
+ /* These should have been lowered away */
+ assert(!(src->abs || src->negate));
+ } else {
+ alu_src.mod = (src->abs << 0) | (src->negate << 1);
+ }
+
return alu_src;
}
},
.alu = {
.op = midgard_alu_op_fmov,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(zero_alu_src),
* don't support half-floats -- this requires changes in other parts of the
* compiler -- therefore the 16-bit versions are commented out. */
-//M_LOAD(load_attr_16);
-M_LOAD(load_attr_32);
-//M_LOAD(load_vary_16);
-M_LOAD(load_vary_32);
-//M_LOAD(load_uniform_16);
-M_LOAD(load_uniform_32);
-M_LOAD(load_color_buffer_8);
-//M_STORE(store_vary_16);
-M_STORE(store_vary_32);
-M_STORE(store_cubemap_coords);
+//M_LOAD(ld_attr_16);
+M_LOAD(ld_attr_32);
+//M_LOAD(ld_vary_16);
+M_LOAD(ld_vary_32);
+//M_LOAD(ld_uniform_16);
+M_LOAD(ld_uniform_32);
+M_LOAD(ld_color_buffer_8);
+//M_STORE(st_vary_16);
+M_STORE(st_vary_32);
+M_STORE(st_cubemap_coords);
static midgard_instruction
v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
/* List of midgard_instructions emitted for the current block */
midgard_block *current_block;
- /* The index corresponding to the current loop, e.g. for breaks/contineus */
- int current_loop;
+ /* The current "depth" of the loop, for disambiguating breaks/continues
+ * when using nested loops */
+ int current_loop_depth;
/* Constants which have been loaded, for later inlining */
struct hash_table_u64 *ssa_constants;
return list_first_entry(&(ins->link), midgard_instruction, link);
}
-static midgard_block *
-mir_next_block(struct midgard_block *blk)
-{
- return list_first_entry(&(blk->link), midgard_block, link);
-}
-
-
#define mir_foreach_block(ctx, v) list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
#define mir_foreach_block_from(ctx, from, v) list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
#define mir_foreach_instr_in_block_safe(block, v) list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_safe_rev(block, v) list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_from(block, v, from) list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
+#define mir_foreach_instr_in_block_from_rev(block, v, from) list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
static midgard_instruction *
switch (ins->type) {
case TAG_ALU_4: {
midgard_alu_op op = ins->alu.op;
- const char *name = alu_opcode_names[op];
+ const char *name = alu_opcode_props[op].name;
if (ins->unit)
printf("%d.", ins->unit);
printf("}\n");
}
-
-
static void
attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
{
}
static int
-glsl_type_size(const struct glsl_type *type)
+glsl_type_size(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
-static int
-uniform_type_size(const struct glsl_type *type)
-{
- return st_glsl_storage_type_size(type, false);
-}
-
/* Lower fdot2 to a vector multiplication followed by channel addition */
static void
midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
optimise_nir(nir_shader *nir)
{
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
do {
progress = false;
- NIR_PASS(progress, nir, midgard_nir_lower_algebraic);
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress = false;
+ NIR_PASS(lower_flrp_progress,
+ nir,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
+
+ /* We implement booleans as 32-bit 0/~0 */
+ NIR_PASS(progress, nir, nir_lower_bool_to_int32);
+
+ /* Now that booleans are lowered, we can run out late opts */
NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
/* Lower mods for float ops only. Integer ops don't support modifiers
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
- /* We implement booleans as 32-bit 0/~0 */
- NIR_PASS(progress, nir, nir_lower_bool_to_int32);
-
/* Take us out of SSA */
NIR_PASS(progress, nir, nir_lower_locals_to_regs);
NIR_PASS(progress, nir, nir_convert_from_ssa, true);
{
nir_ssa_def def = instr->def;
- float *v = ralloc_array(NULL, float, 4);
- memcpy(v, &instr->value.f32, 4 * sizeof(float));
+ float *v = rzalloc_array(NULL, float, 4);
+ nir_const_load_to_arr(v, instr, f32);
_mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
}
/* Channel count is off-by-one to fit in two-bits (0 channel makes no
* sense) */
- unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op]);
+ unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op].props);
/* If there is a fixed channel count, construct the appropriate mask */
{
if (src->is_ssa)
return src->ssa->index;
- else
+ else {
+ assert(!src->reg.indirect);
return ctx->func->impl->ssa_alloc + src->reg.reg->index;
+ }
}
static unsigned
{
if (dst->is_ssa)
return dst->ssa.index;
- else
+ else {
+ assert(!dst->reg.indirect);
return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+ }
}
static unsigned
return nir_src_index(ctx, &src->src);
}
-/* Midgard puts conditionals in r31.w; move an arbitrary source (the output of
- * a conditional test) into that register */
+static bool
+nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
+{
+ unsigned comp = src->swizzle[0];
+
+ for (unsigned c = 1; c < nr_components; ++c) {
+ if (src->swizzle[c] != comp)
+ return true;
+ }
+
+ return false;
+}
+
+/* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the
+ * output of a conditional test) into that register */
static void
-emit_condition(compiler_context *ctx, nir_src *src, bool for_branch)
+emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
{
- /* XXX: Force component correct */
int condition = nir_src_index(ctx, src);
+ /* Source to swizzle the desired component into w */
+
+ const midgard_vector_alu_src alu_src = {
+ .swizzle = SWIZZLE(component, component, component, component),
+ };
+
+ /* There is no boolean move instruction. Instead, we simulate a move by
+ * ANDing the condition with itself to get it into r31.w */
+
+ midgard_instruction ins = {
+ .type = TAG_ALU_4,
+
+ /* We need to set the conditional as close as possible */
+ .precede_break = true,
+ .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+
+ .ssa_args = {
+
+ .src0 = condition,
+ .src1 = condition,
+ .dest = SSA_FIXED_REGISTER(31),
+ },
+ .alu = {
+ .op = midgard_alu_op_iand,
+ .reg_mode = midgard_reg_mode_32,
+ .dest_override = midgard_dest_override_none,
+ .mask = (0x3 << 6), /* w */
+ .src1 = vector_alu_srco_unsigned(alu_src),
+ .src2 = vector_alu_srco_unsigned(alu_src)
+ },
+ };
+
+ emit_mir_instruction(ctx, ins);
+}
+
+/* Or, for mixed conditions (with csel_v), here's a vector version using all of
+ * r31 instead */
+
+static void
+emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
+{
+ int condition = nir_src_index(ctx, &src->src);
+
+ /* Source to swizzle the desired component into w */
+
+ const midgard_vector_alu_src alu_src = {
+ .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle),
+ };
+
/* There is no boolean move instruction. Instead, we simulate a move by
* ANDing the condition with itself to get it into r31.w */
midgard_instruction ins = {
.type = TAG_ALU_4,
- .unit = for_branch ? UNIT_SMUL : UNIT_SADD, /* TODO: DEDUCE THIS */
+ .precede_break = true,
.ssa_args = {
.src0 = condition,
.src1 = condition,
},
.alu = {
.op = midgard_alu_op_iand,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
+ .dest_override = midgard_dest_override_none,
+ .mask = expand_writemask((1 << nr_comp) - 1),
+ .src1 = vector_alu_srco_unsigned(alu_src),
+ .src2 = vector_alu_srco_unsigned(alu_src)
+ },
+ };
+
+ emit_mir_instruction(ctx, ins);
+}
+
+
+
+/* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
+ * pinning to eliminate this move in all known cases */
+
+static void
+emit_indirect_offset(compiler_context *ctx, nir_src *src)
+{
+ int offset = nir_src_index(ctx, src);
+
+ midgard_instruction ins = {
+ .type = TAG_ALU_4,
+ .ssa_args = {
+ .src0 = SSA_UNUSED_1,
+ .src1 = offset,
+ .dest = SSA_FIXED_REGISTER(REGISTER_OFFSET),
+ },
+ .alu = {
+ .op = midgard_alu_op_imov,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
- .src1 = vector_alu_srco_unsigned(blank_alu_src_xxxx),
+ .src1 = vector_alu_srco_unsigned(zero_alu_src),
.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
},
};
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
break;
+static bool
+nir_is_fzero_constant(nir_src src)
+{
+ if (!nir_src_is_const(src))
+ return false;
+
+ for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
+ if (nir_src_comp_as_float(src, c) != 0.0)
+ return false;
+ }
+
+ return true;
+}
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
ALU_CASE(fmax, fmax);
ALU_CASE(imin, imin);
ALU_CASE(imax, imax);
+ ALU_CASE(umin, umin);
+ ALU_CASE(umax, umax);
ALU_CASE(fmov, fmov);
ALU_CASE(ffloor, ffloor);
ALU_CASE(fround_even, froundeven);
ALU_CASE(isub, isub);
ALU_CASE(imul, imul);
ALU_CASE(iabs, iabs);
-
- /* XXX: Use fmov, not imov, since imov was causing major
- * issues with texture precision? XXX research */
- ALU_CASE(imov, fmov);
+ ALU_CASE(imov, imov);
ALU_CASE(feq32, feq);
ALU_CASE(fne32, fne);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
- ALU_CASE(inot, inot);
+ ALU_CASE(inot, inand);
ALU_CASE(ishl, ishl);
ALU_CASE(ishr, iasr);
ALU_CASE(ushr, ilsr);
}
case nir_op_b32csel: {
- op = midgard_alu_op_fcsel;
+ /* Midgard features both fcsel and icsel, depending on
+ * the type of the arguments/output. However, as long
+ * as we're careful we can _always_ use icsel and
+ * _never_ need fcsel, since the latter does additional
+ * floating-point-specific processing whereas the
+ * former just moves bits on the wire. It's not obvious
+ * why these are separate opcodes, save for the ability
+ * to do things like sat/pos/abs/neg for free */
+
+ bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
+ op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
/* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
nr_inputs = 2;
- emit_condition(ctx, &instr->src[0].src, false);
+ /* Emit the condition into r31 */
+
+ if (mixed)
+ emit_condition_mixed(ctx, &instr->src[0], nr_components);
+ else
+ emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]);
/* The condition is the first argument; move the other
* arguments up one to be a binary instruction for
return;
}
+ /* Midgard can perform certain modifiers on output of an ALU op */
+ midgard_outmod outmod =
+ midgard_is_integer_out_op(op) ? midgard_outmod_int :
+ instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+
+ /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
+
+ if (instr->op == nir_op_fmax) {
+ if (nir_is_fzero_constant(instr->src[0].src)) {
+ op = midgard_alu_op_fmov;
+ nr_inputs = 1;
+ outmod = midgard_outmod_pos;
+ instr->src[0] = instr->src[1];
+ } else if (nir_is_fzero_constant(instr->src[1].src)) {
+ op = midgard_alu_op_fmov;
+ nr_inputs = 1;
+ outmod = midgard_outmod_pos;
+ }
+ }
+
/* Fetch unit, quirks, etc information */
- unsigned opcode_props = alu_opcode_props[op];
+ unsigned opcode_props = alu_opcode_props[op].props;
bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
- /* Initialise fields common between scalar/vector instructions */
- midgard_outmod outmod = instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
-
/* src0 will always exist afaik, but src1 will not for 1-argument
* instructions. The latter can only be fetched if the instruction
* needs it, or else we may segfault. */
assert(0);
}
+ bool is_int = midgard_is_integer_op(op);
+
midgard_vector_alu alu = {
.op = op,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = outmod,
/* Writemask only valid for non-SSA NIR */
.mask = expand_writemask((1 << nr_components) - 1),
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0])),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1])),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
ins.has_constants = true;
ins.constants[0] = 0.0f;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
+ } else if (instr->op == nir_op_inot) {
+ /* ~b = ~(b & b), so duplicate the source */
+ ins.ssa_args.src1 = ins.ssa_args.src0;
+ ins.alu.src2 = ins.alu.src1;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0]));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
emit_mir_instruction(ctx, ins);
}
} else {
#undef ALU_CASE
static void
-emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset)
+emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
{
/* TODO: half-floats */
- if (offset < ctx->uniform_cutoff) {
- /* Fast path: For the first 16 uniform,
- * accesses are 0-cycle, since they're
- * just a register fetch in the usual
- * case. So, we alias the registers
- * while we're still in SSA-space */
+ if (!indirect_offset && offset < ctx->uniform_cutoff) {
+ /* Fast path: For the first 16 uniforms, direct accesses are
+ * 0-cycle, since they're just a register fetch in the usual
+ * case. So, we alias the registers while we're still in
+ * SSA-space */
int reg_slot = 23 - offset;
alias_ssa(ctx, dest, SSA_FIXED_REGISTER(reg_slot));
} else {
- /* Otherwise, read from the 'special'
- * UBO to access higher-indexed
- * uniforms, at a performance cost */
+ /* Otherwise, read from the 'special' UBO to access
+ * higher-indexed uniforms, at a performance cost. More
+ * generally, we're emitting a UBO read instruction. */
- midgard_instruction ins = m_load_uniform_32(dest, offset);
+ midgard_instruction ins = m_ld_uniform_32(dest, offset);
/* TODO: Don't split */
ins.load_store.varying_parameters = (offset & 7) << 7;
ins.load_store.address = offset >> 3;
- ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+ if (indirect_offset) {
+ emit_indirect_offset(ctx, indirect_offset);
+ ins.load_store.unknown = 0x8700; /* xxx: what is this? */
+ } else {
+ ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+ }
+
emit_mir_instruction(ctx, ins);
}
}
/* Sysvals are prefix uniforms */
unsigned uniform = ((uintptr_t) val) - 1;
- emit_uniform_read(ctx, dest, uniform);
+ /* Emit the read itself -- this is never indirect */
+ emit_uniform_read(ctx, dest, uniform, NULL);
}
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
- nir_const_value *const_offset;
unsigned offset, reg;
switch (instr->intrinsic) {
case nir_intrinsic_discard_if:
- emit_condition(ctx, &instr->src[0], true);
+ emit_condition(ctx, &instr->src[0], true, COMPONENT_X);
/* fallthrough */
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_input:
- const_offset = nir_src_as_const_value(instr->src[0]);
- assert (const_offset && "no indirect inputs");
+ offset = nir_intrinsic_base(instr);
+
+ bool direct = nir_src_is_const(instr->src[0]);
- offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ if (direct) {
+ offset += nir_src_as_uint(instr->src[0]);
+ }
reg = nir_dest_index(ctx, &instr->dest);
if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
- emit_uniform_read(ctx, reg, ctx->sysval_count + offset);
+ emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
/* XXX: Half-floats? */
/* TODO: swizzle, mask */
- midgard_instruction ins = m_load_vary_32(reg, offset);
+ midgard_instruction ins = m_ld_vary_32(reg, offset);
midgard_varying_parameter p = {
.is_varying = 1,
memcpy(&u, &p, sizeof(p));
ins.load_store.varying_parameters = u;
- ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
+ if (direct) {
+ /* We have the offset totally ready */
+ ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
+ } else {
+ /* We have it partially ready, but we need to
+ * add in the dynamic index, moved to r27.w */
+ emit_indirect_offset(ctx, &instr->src[0]);
+ ins.load_store.unknown = 0x79e; /* xxx: what is this? */
+ }
+
emit_mir_instruction(ctx, ins);
} else if (ctx->is_blend && instr->intrinsic == nir_intrinsic_load_uniform) {
/* Constant encoded as a pinned constant */
} else if (out->data.location == VARYING_SLOT_COL1) {
/* Destination color must be read from framebuffer */
- midgard_instruction ins = m_load_color_buffer_8(reg, 0);
+ midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
ins.load_store.swizzle = 0; /* xxxx */
/* Read each component sequentially */
emit_mir_instruction(ctx, ins);
}
- /* vadd.u2f hr2, abs(hr2), #0 */
+ /* vadd.u2f hr2, zext(hr2), #0 */
midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.abs = true;
+ alu_src.mod = midgard_int_zero_extend;
alu_src.half = true;
midgard_instruction u2f = {
},
.alu = {
.op = midgard_alu_op_u2f,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_none,
.mask = 0xF,
.src1 = vector_alu_srco_unsigned(alu_src),
/* vmul.fmul.sat r1, hr2, #0.00392151 */
- alu_src.abs = false;
+ alu_src.mod = 0;
midgard_instruction fmul = {
.type = TAG_ALU_4,
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = midgard_outmod_sat,
.mask = 0xFF,
assert(0);
}
} else if (ctx->stage == MESA_SHADER_VERTEX) {
- midgard_instruction ins = m_load_attr_32(reg, offset);
+ midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
ins.load_store.mask = (1 << instr->num_components) - 1;
emit_mir_instruction(ctx, ins);
break;
case nir_intrinsic_store_output:
- const_offset = nir_src_as_const_value(instr->src[1]);
- assert(const_offset && "no indirect outputs");
+ assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
- offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
reg = nir_src_index(ctx, &instr->src[0]);
attach_constants(ctx, &ins, constant_value, reg + 1);
emit_mir_instruction(ctx, ins);
- midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(0), offset);
+ midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
emit_mir_instruction(ctx, st);
} else {
midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
emit_mir_instruction(ctx, move);
- midgard_instruction st = m_store_cubemap_coords(reg, 0);
+ midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
st.load_store.mask = 0x3; /* xy? */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, ins);
}
- //midgard_pin_output(ctx, index, REGISTER_TEXTURE_BASE + in_reg);
-
break;
}
/* Emit a branch out of the loop */
struct midgard_instruction br = v_branch(false, false);
br.branch.target_type = TARGET_BREAK;
- br.branch.target_break = ctx->current_loop;
+ br.branch.target_break = ctx->current_loop_depth;
emit_mir_instruction(ctx, br);
DBG("break..\n");
if (reg >= 0) {
assert(reg < maxreg);
+ assert(g);
int r = ra_get_node_reg(g, reg);
ctx->work_registers = MAX2(ctx->work_registers, r);
return r;
return false;
}
+/* Determine if a variable is live in the successors of a block */
+static bool
+is_live_after_successors(compiler_context *ctx, midgard_block *bl, int src)
+{
+ for (unsigned i = 0; i < bl->nr_successors; ++i) {
+ midgard_block *succ = bl->successors[i];
+
+ /* If we already visited, the value we're seeking
+ * isn't down this path (or we would have short
+ * circuited */
+
+ if (succ->visited) continue;
+
+ /* Otherwise (it's visited *now*), check the block */
+
+ succ->visited = true;
+
+ mir_foreach_instr_in_block(succ, ins) {
+ if (midgard_is_live_in_instr(ins, src))
+ return true;
+ }
+
+ /* ...and also, check *its* successors */
+ if (is_live_after_successors(ctx, succ, src))
+ return true;
+
+ }
+
+ /* Welp. We're really not live. */
+
+ return false;
+}
+
static bool
is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src)
{
/* Check the rest of the block for liveness */
+
mir_foreach_instr_in_block_from(block, ins, mir_next_op(start)) {
if (midgard_is_live_in_instr(ins, src))
return true;
}
- /* Check the rest of the blocks for liveness */
- mir_foreach_block_from(ctx, mir_next_block(block), b) {
- mir_foreach_instr_in_block(b, ins) {
- if (midgard_is_live_in_instr(ins, src))
- return true;
- }
- }
+ /* Check the rest of the blocks for liveness recursively */
- /* TODO: How does control flow interact in complex shaders? */
+ bool succ = is_live_after_successors(ctx, block, src);
- return false;
+ mir_foreach_block(ctx, block) {
+ block->visited = false;
+ }
+
+ return succ;
}
+/* Once registers have been decided via register allocation
+ * (allocate_registers), we need to rewrite the MIR to use registers instead of
+ * SSA */
+
static void
+install_registers(compiler_context *ctx, struct ra_graph *g)
+{
+ mir_foreach_block(ctx, block) {
+ mir_foreach_instr_in_block(block, ins) {
+ if (ins->compact_branch) continue;
+
+ ssa_args args = ins->ssa_args;
+
+ switch (ins->type) {
+ case TAG_ALU_4:
+ ins->registers.src1_reg = dealias_register(ctx, g, args.src0, ctx->temp_count);
+
+ ins->registers.src2_imm = args.inline_constant;
+
+ if (args.inline_constant) {
+ /* Encode inline 16-bit constant as a vector by default */
+
+ ins->registers.src2_reg = ins->inline_constant >> 11;
+
+ int lower_11 = ins->inline_constant & ((1 << 12) - 1);
+
+ uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
+ ins->alu.src2 = imm << 2;
+ } else {
+ ins->registers.src2_reg = dealias_register(ctx, g, args.src1, ctx->temp_count);
+ }
+
+ ins->registers.out_reg = dealias_register(ctx, g, args.dest, ctx->temp_count);
+
+ break;
+
+ case TAG_LOAD_STORE_4: {
+ if (OP_IS_STORE_VARY(ins->load_store.op)) {
+ /* TODO: use ssa_args for st_vary */
+ ins->load_store.reg = 0;
+ } else {
+ bool has_dest = args.dest >= 0;
+ int ssa_arg = has_dest ? args.dest : args.src0;
+
+ ins->load_store.reg = dealias_register(ctx, g, ssa_arg, ctx->temp_count);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+}
+
+/* This routine performs the actual register allocation. It should be succeeded
+ * by install_registers */
+
+static struct ra_graph *
allocate_registers(compiler_context *ctx)
{
/* First, initialize the RA */
print_mir_block(block);
}
+ /* No register allocation to do with no SSA */
+
+ if (!ctx->temp_count)
+ return NULL;
+
/* Let's actually do register allocation */
int nodes = ctx->temp_count;
struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
mir_foreach_instr_in_block(block, ins) {
if (ins->compact_branch) continue;
+ /* Dest is < 0 for st_vary instructions, which break
+ * the usual SSA conventions. Liveness analysis doesn't
+ * make sense on these instructions, so skip them to
+ * avoid memory corruption */
+
+ if (ins->ssa_args.dest < 0) continue;
+
if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
/* If this destination is not yet live, it is now since we just wrote it */
free(live_start);
free(live_end);
- mir_foreach_block(ctx, block) {
- mir_foreach_instr_in_block(block, ins) {
- if (ins->compact_branch) continue;
-
- ssa_args args = ins->ssa_args;
-
- switch (ins->type) {
- case TAG_ALU_4:
- ins->registers.src1_reg = dealias_register(ctx, g, args.src0, nodes);
-
- ins->registers.src2_imm = args.inline_constant;
-
- if (args.inline_constant) {
- /* Encode inline 16-bit constant as a vector by default */
-
- ins->registers.src2_reg = ins->inline_constant >> 11;
-
- int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-
- uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
- ins->alu.src2 = imm << 2;
- } else {
- ins->registers.src2_reg = dealias_register(ctx, g, args.src1, nodes);
- }
-
- ins->registers.out_reg = dealias_register(ctx, g, args.dest, nodes);
-
- break;
-
- case TAG_LOAD_STORE_4: {
- if (OP_IS_STORE_VARY(ins->load_store.op)) {
- /* TODO: use ssa_args for store_vary */
- ins->load_store.reg = 0;
- } else {
- bool has_dest = args.dest >= 0;
- int ssa_arg = has_dest ? args.dest : args.src0;
-
- ins->load_store.reg = dealias_register(ctx, g, ssa_arg, nodes);
- }
-
- break;
- }
-
- default:
- break;
- }
- }
- }
+ return g;
}
/* Midgard IR only knows vector ALU types, but we sometimes need to actually
}
static unsigned
-vector_to_scalar_source(unsigned u)
+vector_to_scalar_source(unsigned u, bool is_int)
{
midgard_vector_alu_src v;
memcpy(&v, &u, sizeof(v));
+ /* TODO: Integers */
+
midgard_scalar_alu_src s = {
- .abs = v.abs,
- .negate = v.negate,
.full = !v.half,
.component = (v.swizzle & 3) << 1
};
+ if (is_int) {
+ /* TODO */
+ } else {
+ s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
+ s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
+ }
+
unsigned o;
memcpy(&o, &s, sizeof(s));
static midgard_scalar_alu
vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
{
+ bool is_int = midgard_is_integer_op(v.op);
+
/* The output component is from the mask */
midgard_scalar_alu s = {
.op = v.op,
- .src1 = vector_to_scalar_source(v.src1),
- .src2 = vector_to_scalar_source(v.src2),
+ .src1 = vector_to_scalar_source(v.src1, is_int),
+ .src2 = vector_to_scalar_source(v.src2, is_int),
.unknown = 0,
.outmod = v.outmod,
.output_full = 1, /* TODO: Half */
/* Ensure that the chain can continue */
if (ains->type != TAG_ALU_4) break;
+ /* If there's already something in the bundle and we
+ * have weird scheduler constraints, break now */
+ if (ains->precede_break && index) break;
+
/* According to the presentation "The ARM
* Mali-T880 Mobile GPU" from HotChips 27,
* there are two pipeline stages. Branching
if (!unit) {
int op = ains->alu.op;
- int units = alu_opcode_props[op];
+ int units = alu_opcode_props[op].props;
/* TODO: Promotion of scalars to vectors */
int vector = ((!is_single_component_mask(ains->alu.mask)) || ((units & UNITS_SCALAR) == 0)) && (units & UNITS_ANY_VECTOR);
static void
schedule_program(compiler_context *ctx)
{
- allocate_registers(ctx);
+ /* We run RA prior to scheduling */
+ struct ra_graph *g = allocate_registers(ctx);
+ install_registers(ctx, g);
mir_foreach_block(ctx, block) {
schedule_block(ctx, block);
/* If there is already a constant here, we can do nothing */
if (alu->has_constants) continue;
+ /* It makes no sense to inline constants on a branch */
+ if (alu->compact_branch || alu->prepacked_branch) continue;
+
CONDITIONAL_ATTACH(src0);
if (!alu->has_constants) {
int op = ins->alu.op;
if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
- /* Flip based on op. Fallthrough intentional */
-
switch (op) {
- /* These ops require an operational change to flip their arguments TODO */
+ /* These ops require an operational change to flip
+ * their arguments TODO */
case midgard_alu_op_flt:
case midgard_alu_op_fle:
case midgard_alu_op_ilt:
case midgard_alu_op_ile:
case midgard_alu_op_fcsel:
case midgard_alu_op_icsel:
- case midgard_alu_op_isub:
- DBG("Missed non-commutative flip (%s)\n", alu_opcode_names[op]);
+ DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
+ default:
break;
+ }
- /* These ops are commutative and Just Flip */
- case midgard_alu_op_fne:
- case midgard_alu_op_fadd:
- case midgard_alu_op_fmul:
- case midgard_alu_op_fmin:
- case midgard_alu_op_fmax:
- case midgard_alu_op_iadd:
- case midgard_alu_op_imul:
- case midgard_alu_op_feq:
- case midgard_alu_op_ieq:
- case midgard_alu_op_ine:
- case midgard_alu_op_iand:
- case midgard_alu_op_ior:
- case midgard_alu_op_ixor:
+ if (alu_opcode_props[op].props & OP_COMMUTES) {
/* Flip the SSA numbers */
ins->ssa_args.src0 = ins->ssa_args.src1;
ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
src_temp = ins->alu.src2;
ins->alu.src2 = ins->alu.src1;
ins->alu.src1 = src_temp;
-
- default:
- break;
}
}
/* Scale constant appropriately, if we can legally */
uint16_t scaled_constant = 0;
- /* XXX: Check legality */
if (midgard_is_integer_op(op)) {
- /* TODO: Inline integer */
- continue;
-
unsigned int *iconstants = (unsigned int *) ins->constants;
scaled_constant = (uint16_t) iconstants[component];
if (scaled_constant != iconstants[component])
continue;
} else {
- scaled_constant = _mesa_float_to_half((float) ins->constants[component]);
+ float original = (float) ins->constants[component];
+ scaled_constant = _mesa_float_to_half(original);
+
+ /* Check for loss of precision. If this is
+ * mediump, we don't care, but for a highp
+ * shader, we need to pay attention. NIR
+ * doesn't yet tell us which mode we're in!
+ * Practically this prevents most constants
+ * from being inlined, sadly. */
+
+ float fp32 = _mesa_half_to_float(scaled_constant);
+
+ if (fp32 != original)
+ continue;
}
/* We don't know how to handle these with a constant */
- if (src->abs || src->negate || src->half || src->rep_low || src->rep_high) {
+ if (src->mod || src->half || src->rep_low || src->rep_high) {
DBG("Bailing inline constant...\n");
continue;
}
}
}
-#define AS_SRC(to, u) \
- int q##to = ins->alu.src2; \
- midgard_vector_alu_src *to = (midgard_vector_alu_src *) &q##to;
-
-/* Removing unused moves is necessary to clean up the texture pipeline results.
- *
- * To do so, we find moves in the MIR. We check if their destination is live later. If it's not, the move is redundant. */
+/* Basic dead code elimination on the MIR itself, which cleans up e.g. the
+ * texture pipeline */
-static void
-midgard_eliminate_orphan_moves(compiler_context *ctx, midgard_block *block)
+static bool
+midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
{
+ bool progress = false;
+
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
-
- if (ins->alu.op != midgard_alu_op_fmov) continue;
+ if (ins->compact_branch) continue;
if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
-
if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue;
-
if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
mir_remove_instruction(ins);
+ progress = true;
}
+
+ return progress;
+}
+
+static bool
+mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
+{
+ /* abs or neg */
+ if (!is_int && src.mod) return true;
+
+ /* swizzle */
+ for (unsigned c = 0; c < 4; ++c) {
+ if (!(mask & (1 << c))) continue;
+ if (((src.swizzle >> (2*c)) & 3) != c) return true;
+ }
+
+ return false;
+}
+
+static bool
+midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* We only work on pure SSA */
+
+ if (to >= SSA_FIXED_MINIMUM) continue;
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (to >= ctx->func->impl->ssa_alloc) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+
+ /* Constant propagation is not handled here, either */
+ if (ins->ssa_args.inline_constant) continue;
+ if (ins->has_constants) continue;
+
+ /* Also, if the move has side effects, we're helpless */
+
+ midgard_vector_alu_src src =
+ vector_alu_from_unsigned(ins->alu.src2);
+ unsigned mask = squeeze_writemask(ins->alu.mask);
+ bool is_int = midgard_is_integer_op(ins->alu.op);
+
+ if (mir_nontrivial_mod(src, is_int, mask)) continue;
+ if (ins->alu.outmod != midgard_outmod_none) continue;
+
+ mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) {
+ if (v->ssa_args.src0 == to) {
+ v->ssa_args.src0 = from;
+ progress = true;
+ }
+
+ if (v->ssa_args.src1 == to && !v->ssa_args.inline_constant) {
+ v->ssa_args.src1 = from;
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
+
+static bool
+midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* Make sure it's simple enough for us to handle */
+
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+ if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
+ if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
+
+ bool eliminated = false;
+
+ mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+ /* The texture registers are not SSA so be careful.
+ * Conservatively, just stop if we hit a texture op
+ * (even if it may not write) to where we are */
+
+ if (v->type != TAG_ALU_4)
+ break;
+
+ if (v->ssa_args.dest == from) {
+ /* We don't want to track partial writes ... */
+ if (v->alu.mask == 0xF) {
+ v->ssa_args.dest = to;
+ eliminated = true;
+ }
+
+ break;
+ }
+ }
+
+ if (eliminated)
+ mir_remove_instruction(ins);
+
+ progress |= eliminated;
+ }
+
+ return progress;
}
/* The following passes reorder MIR instructions to enable better scheduling */
if (c->type != TAG_LOAD_STORE_4) continue;
+ /* Stores cannot be reordered, since they have
+ * dependencies. For the same reason, indirect
+ * loads cannot be reordered as their index is
+ * loaded in r27.w */
+
if (OP_IS_STORE(c->load_store.op)) continue;
+ /* It appears the 0x800 bit is set whenever a
+ * load is direct, unset when it is indirect.
+ * Skip indirect loads. */
+
+ if (!(c->load_store.unknown & 0x800)) continue;
+
/* We found one! Move it up to pair and remove it from the old location */
mir_insert_instruction_before(ins, *c);
midgard_instruction mov = v_fmov(idx, blank_alu_src, SSA_FIXED_REGISTER(REGISTER_VARYING_BASE + high_varying_register));
- midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
+ midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
mir_insert_instruction_before(mir_next_op(ins), st);
emit_leftover_move(ctx);
}
-/* Vertex shaders do not write gl_Position as is; instead, they write a
- * transformed screen space position as a varying. See section 12.5 "Coordinate
- * Transformation" of the ES 3.2 full specification for details.
- *
- * This transformation occurs early on, as NIR and prior to optimisation, in
- * order to take advantage of NIR optimisation passes of the transform itself.
- * */
-
-static void
-write_transformed_position(nir_builder *b, nir_src input_point_src)
-{
- nir_ssa_def *input_point = nir_ssa_for_src(b, input_point_src, 4);
- nir_ssa_def *scale = nir_load_viewport_scale(b);
- nir_ssa_def *offset = nir_load_viewport_offset(b);
-
- /* World space to normalised device coordinates to screen space */
-
- nir_ssa_def *w_recip = nir_frcp(b, nir_channel(b, input_point, 3));
- nir_ssa_def *ndc_point = nir_fmul(b, nir_channels(b, input_point, 0x7), w_recip);
- nir_ssa_def *screen = nir_fadd(b, nir_fmul(b, ndc_point, scale), offset);
-
- /* gl_Position will be written out in screenspace xyz, with w set to
- * the reciprocal we computed earlier. The transformed w component is
- * then used for perspective-correct varying interpolation. The
- * transformed w component must preserve its original sign; this is
- * used in depth clipping computations */
-
- nir_ssa_def *screen_space = nir_vec4(b,
- nir_channel(b, screen, 0),
- nir_channel(b, screen, 1),
- nir_channel(b, screen, 2),
- w_recip);
-
- /* Finally, write out the transformed values to the varying */
-
- nir_intrinsic_instr *store;
- store = nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
- store->num_components = 4;
- nir_intrinsic_set_base(store, 0);
- nir_intrinsic_set_write_mask(store, 0xf);
- store->src[0].ssa = screen_space;
- store->src[0].is_ssa = true;
- store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
- nir_builder_instr_insert(b, &store->instr);
-}
-
-static void
-transform_position_writes(nir_shader *shader)
-{
- nir_foreach_function(func, shader) {
- nir_foreach_block(block, func->impl) {
- nir_foreach_instr_safe(instr, block) {
- if (instr->type != nir_instr_type_intrinsic) continue;
-
- nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- nir_variable *out = NULL;
-
- switch (intr->intrinsic) {
- case nir_intrinsic_store_output:
- /* already had i/o lowered.. lookup the matching output var: */
- nir_foreach_variable(var, &shader->outputs) {
- int drvloc = var->data.driver_location;
-
- if (nir_intrinsic_base(intr) == drvloc) {
- out = var;
- break;
- }
- }
-
- break;
-
- default:
- break;
- }
-
- if (!out) continue;
-
- if (out->data.mode != nir_var_shader_out)
- continue;
-
- if (out->data.location != VARYING_SLOT_POS)
- continue;
-
- nir_builder b;
- nir_builder_init(&b, func->impl);
- b.cursor = nir_before_instr(instr);
-
- write_transformed_position(&b, intr->src[0]);
- nir_instr_remove(instr);
- }
- }
- }
-}
-
static void
emit_fragment_epilogue(compiler_context *ctx)
{
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_lower,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
},
.alu = {
.op = midgard_alu_op_f2u8,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_lower,
.outmod = midgard_outmod_pos,
.mask = 0xF,
},
.alu = {
.op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_quarter,
+ .reg_mode = midgard_reg_mode_8,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
static midgard_block *
emit_block(compiler_context *ctx, nir_block *block)
{
- midgard_block *this_block = malloc(sizeof(midgard_block));
+ midgard_block *this_block = calloc(sizeof(midgard_block), 1);
list_addtail(&this_block->link, &ctx->blocks);
this_block->is_scheduled = false;
ctx->texture_index[0] = -1;
ctx->texture_index[1] = -1;
+ /* Add us as a successor to the block we are following */
+ if (ctx->current_block)
+ midgard_block_add_successor(ctx->current_block, this_block);
+
/* Set up current block */
list_inithead(&this_block->instructions);
ctx->current_block = this_block;
actualise_ssa_to_alias(ctx);
midgard_emit_store(ctx, this_block);
- midgard_eliminate_orphan_moves(ctx, this_block);
midgard_pair_load_store(ctx, this_block);
/* Append fragment shader epilogue (value writeout) */
}
}
- /* Fallthrough save */
- this_block->next_fallthrough = ctx->previous_source_block;
-
if (block == nir_start_block(ctx->func->impl))
ctx->initial_block = this_block;
{
/* Conditional branches expect the condition in r31.w; emit a move for
* that in the _previous_ block (which is the current block). */
- emit_condition(ctx, &nif->condition, true);
+ emit_condition(ctx, &nif->condition, true, COMPONENT_X);
/* Speculatively emit the branch, but we can't fill it in until later */
EMIT(branch, true, true);
/* Remember where we are */
midgard_block *start_block = ctx->current_block;
- /* Allocate a loop number for this. TODO: Nested loops. Instead of a
- * single current_loop variable, maybe we need a stack */
-
- int loop_idx = ++ctx->current_loop;
+ /* Allocate a loop number, growing the current inner loop depth */
+ int loop_idx = ++ctx->current_loop_depth;
/* Get index from before the body so we can loop back later */
int start_idx = ctx->block_count;
br_back.branch.target_block = start_idx;
emit_mir_instruction(ctx, br_back);
+ /* Mark down that branch in the graph. Note that we're really branching
+ * to the block *after* we started in. TODO: Why doesn't the branch
+ * itself have an off-by-one then...? */
+ midgard_block_add_successor(ctx->current_block, start_block->successors[0]);
+
/* Find the index of the block about to follow us (note: we don't add
* one; blocks are 0-indexed so we get a fencepost problem) */
int break_block_idx = ctx->block_count;
* now that we can allocate a block number for them */
list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
- if (midgard_debug & MIDGARD_DBG_SHADERS)
- print_mir_block(block);
mir_foreach_instr_in_block(block, ins) {
if (ins->type != TAG_ALU_4) continue;
if (!ins->compact_branch) continue;
ins->branch.target_block = break_block_idx;
}
}
+
+ /* Now that we've finished emitting the loop, free up the depth again
+ * so we play nice with recursion amid nested loops */
+ --ctx->current_loop_depth;
}
static midgard_block *
nir_assign_var_locations(&nir->outputs, &nir->num_outputs, glsl_type_size);
nir_assign_var_locations(&nir->inputs, &nir->num_inputs, glsl_type_size);
- nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, uniform_type_size);
+ nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, glsl_type_size);
/* Initialize at a global (not block) level hash tables */
nir_foreach_variable(var, varyings) {
unsigned loc = var->data.driver_location;
- program->varyings[loc] = var->data.location;
+ unsigned sz = glsl_type_size(var->type, FALSE);
+
+ for (int c = 0; c < sz; ++c) {
+ program->varyings[loc + c] = var->data.location;
+ }
}
- /* Lower vars -- not I/O -- before epilogue */
+ /* Lower gl_Position pre-optimisation */
+
+ if (ctx->stage == MESA_SHADER_VERTEX)
+ NIR_PASS_V(nir, nir_lower_viewport_transform);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
- NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, uniform_type_size, 0);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all & ~nir_var_uniform, glsl_type_size, 0);
-
- /* Append vertex epilogue before optimisation, so the epilogue itself
- * is optimised */
-
- if (ctx->stage == MESA_SHADER_VERTEX)
- transform_position_writes(nir);
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
/* Optimisation passes */
util_dynarray_init(compiled, NULL);
+ /* MIR-level optimizations */
+
+ bool progress = false;
+
+ do {
+ progress = false;
+
+ mir_foreach_block(ctx, block) {
+ progress |= midgard_opt_copy_prop(ctx, block);
+ progress |= midgard_opt_copy_prop_tex(ctx, block);
+ progress |= midgard_opt_dead_code_eliminate(ctx, block);
+ }
+ } while (progress);
+
/* Schedule! */
schedule_program(ctx);