#include "main/imports.h"
#include "compiler/nir/nir_builder.h"
#include "util/half_float.h"
+#include "util/u_math.h"
#include "util/u_debug.h"
#include "util/u_dynarray.h"
#include "util/list.h"
* driver seems to do it that way */
#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
#define M_LOAD_STORE(name, rname, uname) \
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
+ .mask = 0xF, \
.ssa_args = { \
.rname = ssa, \
.uname = -1, \
}, \
.load_store = { \
.op = midgard_op_##name, \
- .mask = 0xF, \
.swizzle = SWIZZLE_XYZW, \
.address = address \
} \
#define M_LOAD(name) M_LOAD_STORE(name, dest, src0)
#define M_STORE(name) M_LOAD_STORE(name, src0, dest)
-const midgard_vector_alu_src blank_alu_src = {
- .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
-};
-
-const midgard_vector_alu_src blank_alu_src_xxxx = {
- .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X),
-};
-
-const midgard_scalar_alu_src blank_scalar_alu_src = {
- .full = true
-};
-
-/* Used for encoding the unused source of 1-op instructions */
-const midgard_vector_alu_src zero_alu_src = { 0 };
-
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int)
+vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
+ bool half, bool sext)
{
if (!src) return blank_alu_src;
+ /* Figure out how many components there are so we can adjust the
+ * swizzle. Specifically we want to broadcast the last channel so
+ * things like ball2/3 work
+ */
+
+ if (broadcast_count) {
+ uint8_t last_component = src->swizzle[broadcast_count - 1];
+
+ for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
+ src->swizzle[c] = last_component;
+ }
+ }
+
midgard_vector_alu_src alu_src = {
.rep_low = 0,
.rep_high = 0,
- .half = 0, /* TODO */
+ .half = half,
.swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
};
if (is_int) {
- /* TODO: sign-extend/zero-extend */
alu_src.mod = midgard_int_normal;
+ /* Sign/zero-extend if needed */
+
+ if (half) {
+ alu_src.mod = sext ?
+ midgard_int_sign_extend
+ : midgard_int_zero_extend;
+ }
+
/* These should have been lowered away */
assert(!(src->abs || src->negate));
} else {
return alu_src;
}
-/* 'Intrinsic' move for misc aliasing uses independent of actual NIR ALU code */
-
-static midgard_instruction
-v_fmov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
-{
- midgard_instruction ins = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = SSA_UNUSED_1,
- .src1 = src,
- .dest = dest,
- },
- .alu = {
- .op = midgard_alu_op_fmov,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_none,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(zero_alu_src),
- .src2 = vector_alu_srco_unsigned(mod)
- },
- };
-
- return ins;
-}
-
/* load/store instructions have both 32-bit and 16-bit variants, depending on
* whether we are using vectors composed of highp or mediump. At the moment, we
* don't support half-floats -- this requires changes in other parts of the
}
}
-static void
-midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+static unsigned
+nir_dest_index(compiler_context *ctx, nir_dest *dst)
+{
+ if (dst->is_ssa)
+ return dst->ssa.index;
+ else {
+ assert(!dst->reg.indirect);
+ return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+ }
+}
+
+static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
+ unsigned *dest)
{
+ nir_intrinsic_instr *intr;
+ nir_dest *dst = NULL;
+ nir_tex_instr *tex;
int sysval = -1;
- if (instr->type == nir_instr_type_intrinsic) {
- nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ switch (instr->type) {
+ case nir_instr_type_intrinsic:
+ intr = nir_instr_as_intrinsic(instr);
sysval = midgard_nir_sysval_for_intrinsic(intr);
+ dst = &intr->dest;
+ break;
+ case nir_instr_type_tex:
+ tex = nir_instr_as_tex(instr);
+ if (tex->op != nir_texop_txs)
+ break;
+
+ sysval = PAN_SYSVAL(TEXTURE_SIZE,
+ PAN_TXS_SYSVAL_ID(tex->texture_index,
+ nir_tex_instr_dest_size(tex) -
+ (tex->is_array ? 1 : 0),
+ tex->is_array));
+ dst = &tex->dest;
+ break;
+ default:
+ break;
}
+ if (dest && dst)
+ *dest = nir_dest_index(ctx, dst);
+
+ return sysval;
+}
+
+static void
+midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+{
+ int sysval;
+
+ sysval = sysval_for_instr(ctx, instr, NULL);
if (sysval < 0)
return;
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
+ NIR_PASS(progress, nir, nir_lower_idiv);
- nir_lower_tex_options lower_tex_options = {
- .lower_rect = true
+ nir_lower_tex_options lower_tex_1st_pass_options = {
+ .lower_rect = true,
+ .lower_txp = ~0
};
- NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+ nir_lower_tex_options lower_tex_2nd_pass_options = {
+ .lower_txs_lod = true,
+ };
+
+ NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options);
+ NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options);
do {
progress = false;
nir_var_shader_out |
nir_var_function_temp);
- /* TODO: Enable vectorize when merged upstream */
- // NIR_PASS(progress, nir, nir_opt_vectorize);
+ NIR_PASS(progress, nir, nir_opt_vectorize);
} while (progress);
/* Must be run at the end to prevent creation of fsin/fcos ops */
}
}
-static unsigned
-nir_dest_index(compiler_context *ctx, nir_dest *dst)
-{
- if (dst->is_ssa)
- return dst->ssa.index;
- else {
- assert(!dst->reg.indirect);
- return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
- }
-}
-
static unsigned
nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
{
/* We need to set the conditional as close as possible */
.precede_break = true,
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
-
.src0 = condition,
.src1 = condition,
.dest = SSA_FIXED_REGISTER(31),
},
+
.alu = {
.op = midgard_alu_op_iand,
+ .outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
.precede_break = true,
+ .mask = mask_of(nr_comp),
.ssa_args = {
.src0 = condition,
.src1 = condition,
},
.alu = {
.op = midgard_alu_op_iand,
+ .outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = expand_writemask((1 << nr_comp) - 1),
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = SSA_UNUSED_1,
.src1 = offset,
},
.alu = {
.op = midgard_alu_op_imov,
+ .outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(zero_alu_src),
.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
},
#define ALU_CASE(nir, _op) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
+ assert(src_bitsize == dst_bitsize); \
break;
+
+#define ALU_CASE_BCAST(nir, _op, count) \
+ case nir_op_##nir: \
+ op = midgard_alu_op_##_op; \
+ broadcast_swizzle = count; \
+ assert(src_bitsize == dst_bitsize); \
+ break;
static bool
nir_is_fzero_constant(nir_src src)
{
return true;
}
+/* Analyze the sizes of the inputs to determine which reg mode. Ops needed
+ * special treatment override this anyway. */
+
+static midgard_reg_mode
+reg_mode_for_nir(nir_alu_instr *instr)
+{
+ unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+
+ switch (src_bitsize) {
+ case 8:
+ return midgard_reg_mode_8;
+ case 16:
+ return midgard_reg_mode_16;
+ case 32:
+ return midgard_reg_mode_32;
+ case 64:
+ return midgard_reg_mode_64;
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
{
bool is_ssa = instr->dest.dest.is_ssa;
unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
- unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components;
+ unsigned nr_components = nir_dest_num_components(instr->dest.dest);
unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
/* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
unsigned op;
+ /* Number of components valid to check for the instruction (the rest
+ * will be forced to the last), or 0 to use as-is. Relevant as
+ * ball-type instructions have a channel count in NIR but are all vec4
+ * in Midgard */
+
+ unsigned broadcast_swizzle = 0;
+
+ /* What register mode should we operate in? */
+ midgard_reg_mode reg_mode =
+ reg_mode_for_nir(instr);
+
+ /* Do we need a destination override? Used for inline
+ * type conversion */
+
+ midgard_dest_override dest_override =
+ midgard_dest_override_none;
+
+ /* Should we use a smaller respective source and sign-extend? */
+
+ bool half_1 = false, sext_1 = false;
+ bool half_2 = false, sext_2 = false;
+
+ unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+ unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+
switch (instr->op) {
ALU_CASE(fadd, fadd);
ALU_CASE(fmul, fmul);
ALU_CASE(iadd, iadd);
ALU_CASE(isub, isub);
ALU_CASE(imul, imul);
- ALU_CASE(iabs, iabs);
+
+ /* Zero shoved as second-arg */
+ ALU_CASE(iabs, iabsdiff);
+
ALU_CASE(mov, imov);
ALU_CASE(feq32, feq);
ALU_CASE(fexp2, fexp2);
ALU_CASE(flog2, flog2);
- ALU_CASE(f2i32, f2i);
- ALU_CASE(f2u32, f2u);
- ALU_CASE(i2f32, i2f);
- ALU_CASE(u2f32, u2f);
+ ALU_CASE(f2i32, f2i_rtz);
+ ALU_CASE(f2u32, f2u_rtz);
+ ALU_CASE(i2f32, i2f_rtz);
+ ALU_CASE(u2f32, u2f_rtz);
+
+ ALU_CASE(f2i16, f2i_rtz);
+ ALU_CASE(f2u16, f2u_rtz);
+ ALU_CASE(i2f16, i2f_rtz);
+ ALU_CASE(u2f16, u2f_rtz);
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
+ /* Second op implicit #0 */
+ ALU_CASE(inot, inor);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
- ALU_CASE(inot, inand);
ALU_CASE(ishl, ishl);
ALU_CASE(ishr, iasr);
ALU_CASE(ushr, ilsr);
- ALU_CASE(b32all_fequal2, fball_eq);
- ALU_CASE(b32all_fequal3, fball_eq);
+ ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
+ ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
ALU_CASE(b32all_fequal4, fball_eq);
- ALU_CASE(b32any_fnequal2, fbany_neq);
- ALU_CASE(b32any_fnequal3, fbany_neq);
+ ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
+ ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
ALU_CASE(b32any_fnequal4, fbany_neq);
- ALU_CASE(b32all_iequal2, iball_eq);
- ALU_CASE(b32all_iequal3, iball_eq);
+ ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
+ ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
ALU_CASE(b32all_iequal4, iball_eq);
- ALU_CASE(b32any_inequal2, ibany_neq);
- ALU_CASE(b32any_inequal3, ibany_neq);
+ ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
+ ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
ALU_CASE(b32any_inequal4, ibany_neq);
/* Source mods will be shoved in later */
ALU_CASE(fneg, fmov);
ALU_CASE(fsat, fmov);
+ /* For size conversion, we use a move. Ideally though we would squash
+ * these ops together; maybe that has to happen after in NIR as part of
+ * propagation...? An earlier algebraic pass ensured we step down by
+ * only / exactly one size. If stepping down, we use a dest override to
+ * reduce the size; if stepping up, we use a larger-sized move with a
+ * half source and a sign/zero-extension modifier */
+
+ case nir_op_i2i8:
+ case nir_op_i2i16:
+ case nir_op_i2i32:
+ /* If we end up upscale, we'll need a sign-extend on the
+ * operand (the second argument) */
+
+ sext_2 = true;
+ case nir_op_u2u8:
+ case nir_op_u2u16:
+ case nir_op_u2u32: {
+ op = midgard_alu_op_imov;
+
+ if (dst_bitsize == (src_bitsize * 2)) {
+ /* Converting up */
+ half_2 = true;
+
+ /* Use a greater register mode */
+ reg_mode++;
+ } else if (src_bitsize == (dst_bitsize * 2)) {
+ /* Converting down */
+ dest_override = midgard_dest_override_lower;
+ }
+
+ break;
+ }
+
+ case nir_op_f2f16: {
+ assert(src_bitsize == 32);
+
+ op = midgard_alu_op_fmov;
+ dest_override = midgard_dest_override_lower;
+ break;
+ }
+
+ case nir_op_f2f32: {
+ assert(src_bitsize == 16);
+
+ op = midgard_alu_op_fmov;
+ half_2 = true;
+ reg_mode++;
+ break;
+ }
+
+
/* For greater-or-equal, we lower to less-or-equal and flip the
* arguments */
}
/* Midgard can perform certain modifiers on output of an ALU op */
- midgard_outmod outmod =
- midgard_is_integer_out_op(op) ? midgard_outmod_int :
- instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+ unsigned outmod;
- if (instr->op == nir_op_fsat)
- outmod = midgard_outmod_sat;
+ if (midgard_is_integer_out_op(op)) {
+ outmod = midgard_outmod_int_wrap;
+ } else {
+ bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
+ outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
+ }
/* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
bool is_int = midgard_is_integer_op(op);
+ ins.mask = mask_of(nr_components);
+
midgard_vector_alu alu = {
.op = op,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_none,
+ .reg_mode = reg_mode,
+ .dest_override = dest_override,
.outmod = outmod,
- /* Writemask only valid for non-SSA NIR */
- .mask = expand_writemask((1 << nr_components) - 1),
-
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
if (!is_ssa)
- alu.mask &= expand_writemask(instr->dest.write_mask);
+ ins.mask &= instr->dest.write_mask;
ins.alu = alu;
}
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
- } else if (instr->op == nir_op_f2b32 || instr->op == nir_op_i2b32) {
+ } else if (nr_inputs == 1 && !quirk_flipped_r24) {
+ /* Lots of instructions need a 0 plonked in */
ins.ssa_args.inline_constant = false;
ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
uint8_t original_swizzle[4];
memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
+ unsigned orig_mask = ins.mask;
for (int i = 0; i < nr_components; ++i) {
- ins.alu.mask = (0x3) << (2 * i); /* Mask the associated component */
+ /* Mask the associated component, dropping the
+ * instruction if needed */
+
+ ins.mask = 1 << i;
+ ins.mask &= orig_mask;
+
+ if (!ins.mask)
+ continue;
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, false));
emit_mir_instruction(ctx, ins);
}
} else {
#undef ALU_CASE
+/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
+ * optimized) versions of UBO #0 */
+
static void
-emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
+emit_ubo_read(
+ compiler_context *ctx,
+ unsigned dest,
+ unsigned offset,
+ nir_src *indirect_offset,
+ unsigned index)
{
/* TODO: half-floats */
- if (!indirect_offset && offset < ctx->uniform_cutoff) {
+ if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) {
/* Fast path: For the first 16 uniforms, direct accesses are
* 0-cycle, since they're just a register fetch in the usual
* case. So, we alias the registers while we're still in
if (indirect_offset) {
emit_indirect_offset(ctx, indirect_offset);
- ins.load_store.unknown = 0x8700; /* xxx: what is this? */
+ ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */
} else {
- ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+ ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */
}
+ /* TODO respect index */
+
emit_mir_instruction(ctx, ins);
}
}
static void
-emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
-{
- /* First, pull out the destination */
- unsigned dest = nir_dest_index(ctx, &instr->dest);
+emit_varying_read(
+ compiler_context *ctx,
+ unsigned dest, unsigned offset,
+ unsigned nr_comp, unsigned component,
+ nir_src *indirect_offset, nir_alu_type type)
+{
+ /* XXX: Half-floats? */
+ /* TODO: swizzle, mask */
+
+ midgard_instruction ins = m_ld_vary_32(dest, offset);
+ ins.mask = mask_of(nr_comp);
+ ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
+
+ midgard_varying_parameter p = {
+ .is_varying = 1,
+ .interpolation = midgard_interp_default,
+ .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
+ };
- /* Now, figure out which uniform this is */
- int sysval = midgard_nir_sysval_for_intrinsic(instr);
- void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
+ unsigned u;
+ memcpy(&u, &p, sizeof(p));
+ ins.load_store.varying_parameters = u;
- /* Sysvals are prefix uniforms */
- unsigned uniform = ((uintptr_t) val) - 1;
+ if (indirect_offset) {
+ /* We need to add in the dynamic index, moved to r27.w */
+ emit_indirect_offset(ctx, indirect_offset);
+ ins.load_store.unknown = 0x79e; /* xxx: what is this? */
+ } else {
+ /* Just a direct load */
+ ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
+ }
- /* Emit the read itself -- this is never indirect */
- emit_uniform_read(ctx, dest, uniform, NULL);
-}
+ /* Use the type appropriate load */
+ switch (type) {
+ case nir_type_uint:
+ case nir_type_bool:
+ ins.load_store.op = midgard_op_ld_vary_32u;
+ break;
+ case nir_type_int:
+ ins.load_store.op = midgard_op_ld_vary_32i;
+ break;
+ case nir_type_float:
+ ins.load_store.op = midgard_op_ld_vary_32;
+ break;
+ default:
+ unreachable("Attempted to load unknown type");
+ break;
+ }
-/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
- * using scalar ops functional on earlier Midgard generations. Newer Midgard
- * generations have faster vectorized reads. This operation is for blend
- * shaders in particular; reading the tilebuffer from the fragment shader
- * remains an open problem. */
+ emit_mir_instruction(ctx, ins);
+}
static void
-emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
+emit_sysval_read(compiler_context *ctx, nir_instr *instr)
{
- midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
- ins.load_store.swizzle = 0; /* xxxx */
-
- /* Read each component sequentially */
-
- for (unsigned c = 0; c < 4; ++c) {
- ins.load_store.mask = (1 << c);
- ins.load_store.unknown = c;
- emit_mir_instruction(ctx, ins);
- }
-
- /* vadd.u2f hr2, zext(hr2), #0 */
-
- midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.mod = midgard_int_zero_extend;
- alu_src.half = true;
-
- midgard_instruction u2f = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = reg,
- .src1 = SSA_UNUSED_0,
- .dest = reg,
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_u2f,
- .reg_mode = midgard_reg_mode_16,
- .dest_override = midgard_dest_override_none,
- .mask = 0xF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, u2f);
-
- /* vmul.fmul.sat r1, hr2, #0.00392151 */
-
- alu_src.mod = 0;
+ unsigned dest;
+ /* Figure out which uniform this is */
+ int sysval = sysval_for_instr(ctx, instr, &dest);
+ void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
- midgard_instruction fmul = {
- .type = TAG_ALU_4,
- .inline_constant = _mesa_float_to_half(1.0 / 255.0),
- .ssa_args = {
- .src0 = reg,
- .dest = reg,
- .src1 = SSA_UNUSED_0,
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_none,
- .outmod = midgard_outmod_sat,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
+ /* Sysvals are prefix uniforms */
+ unsigned uniform = ((uintptr_t) val) - 1;
- emit_mir_instruction(ctx, fmul);
+ /* Emit the read itself -- this is never indirect */
+ emit_ubo_read(ctx, dest, uniform, NULL, 0);
}
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
- unsigned offset, reg;
+ unsigned offset = 0, reg;
switch (instr->intrinsic) {
case nir_intrinsic_discard_if:
}
case nir_intrinsic_load_uniform:
- case nir_intrinsic_load_input:
- offset = nir_intrinsic_base(instr);
+ case nir_intrinsic_load_ubo:
+ case nir_intrinsic_load_input: {
+ bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
+ bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
+
+ /* Get the base type of the intrinsic */
+ /* TODO: Infer type? Does it matter? */
+ nir_alu_type t =
+ is_ubo ? nir_type_uint : nir_intrinsic_type(instr);
+ t = nir_alu_type_get_base_type(t);
+
+ if (!is_ubo) {
+ offset = nir_intrinsic_base(instr);
+ }
- bool direct = nir_src_is_const(instr->src[0]);
+ unsigned nr_comp = nir_intrinsic_dest_components(instr);
- if (direct) {
- offset += nir_src_as_uint(instr->src[0]);
- }
+ nir_src *src_offset = nir_get_io_offset_src(instr);
- reg = nir_dest_index(ctx, &instr->dest);
+ bool direct = nir_src_is_const(*src_offset);
- if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
- emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
- } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
- /* XXX: Half-floats? */
- /* TODO: swizzle, mask */
+ if (direct)
+ offset += nir_src_as_uint(*src_offset);
- midgard_instruction ins = m_ld_vary_32(reg, offset);
+ /* We may need to apply a fractional offset */
+ int component = instr->intrinsic == nir_intrinsic_load_input ?
+ nir_intrinsic_component(instr) : 0;
+ reg = nir_dest_index(ctx, &instr->dest);
- midgard_varying_parameter p = {
- .is_varying = 1,
- .interpolation = midgard_interp_default,
- .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
- };
+ if (is_uniform && !ctx->is_blend) {
+ emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0);
+ } else if (is_ubo) {
+ nir_src index = instr->src[0];
- unsigned u;
- memcpy(&u, &p, sizeof(p));
- ins.load_store.varying_parameters = u;
+ /* We don't yet support indirect UBOs. For indirect
+ * block numbers (if that's possible), we don't know
+ * enough about the hardware yet. For indirect sources,
+ * we know what we need but we need to add some NIR
+ * support for lowering correctly with respect to
+ * 128-bit reads */
- if (direct) {
- /* We have the offset totally ready */
- ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
- } else {
- /* We have it partially ready, but we need to
- * add in the dynamic index, moved to r27.w */
- emit_indirect_offset(ctx, &instr->src[0]);
- ins.load_store.unknown = 0x79e; /* xxx: what is this? */
- }
+ assert(nir_src_is_const(index));
+ assert(nir_src_is_const(*src_offset));
- emit_mir_instruction(ctx, ins);
+ /* TODO: Alignment */
+ assert((offset & 0xF) == 0);
+
+ uint32_t uindex = nir_src_as_uint(index) + 1;
+ emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
+ } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
+ emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t);
} else if (ctx->is_blend) {
/* For blend shaders, load the input color, which is
* preloaded to r0 */
- midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
emit_mir_instruction(ctx, move);
} else if (ctx->stage == MESA_SHADER_VERTEX) {
midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
- ins.load_store.mask = (1 << instr->num_components) - 1;
+ ins.mask = mask_of(nr_comp);
+
+ /* Use the type appropriate load */
+ switch (t) {
+ case nir_type_uint:
+ case nir_type_bool:
+ ins.load_store.op = midgard_op_ld_attr_32u;
+ break;
+ case nir_type_int:
+ ins.load_store.op = midgard_op_ld_attr_32i;
+ break;
+ case nir_type_float:
+ ins.load_store.op = midgard_op_ld_attr_32;
+ break;
+ default:
+ unreachable("Attempted to load unknown type");
+ break;
+ }
+
emit_mir_instruction(ctx, ins);
} else {
DBG("Unknown load\n");
}
break;
+ }
- case nir_intrinsic_load_output:
- assert(nir_src_is_const(instr->src[0]));
- reg = nir_dest_index(ctx, &instr->dest);
+ /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
- if (ctx->is_blend) {
- /* TODO: MRT */
- emit_fb_read_blend_scalar(ctx, reg);
- } else {
- DBG("Unknown output load\n");
- assert(0);
- }
+ case nir_intrinsic_load_raw_output_pan:
+ reg = nir_dest_index(ctx, &instr->dest);
+ assert(ctx->is_blend);
+ midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
+ emit_mir_instruction(ctx, ins);
break;
case nir_intrinsic_load_blend_const_color_rgba: {
/* Blend constants are embedded directly in the shader and
* patched in, so we use some magic routing */
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
ins.has_constants = true;
ins.has_blend_constant = true;
emit_mir_instruction(ctx, ins);
* framebuffer writeout dance. TODO: Defer
* writes */
- midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
emit_mir_instruction(ctx, move);
/* Save the index we're writing to for later reference
ctx->fragment_output = reg;
} else if (ctx->stage == MESA_SHADER_VERTEX) {
/* Varyings are written into one of two special
- * varying register, r26 or r27. The register itself is selected as the register
- * in the st_vary instruction, minus the base of 26. E.g. write into r27 and then call st_vary(1)
- *
- * Normally emitting fmov's is frowned upon,
- * but due to unique constraints of
- * REGISTER_VARYING, fmov emission + a
- * dedicated cleanup pass is the only way to
- * guarantee correctness when considering some
- * (common) edge cases XXX: FIXME */
-
- /* If this varying corresponds to a constant (why?!),
- * emit that now since it won't get picked up by
- * hoisting (since there is no corresponding move
- * emitted otherwise) */
-
- void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, reg + 1);
-
- if (constant_value) {
- /* Special case: emit the varying write
- * directly to r26 (looks funny in asm but it's
- * fine) and emit the store _now_. Possibly
- * slightly slower, but this is a really stupid
- * special case anyway (why on earth would you
- * have a constant varying? Your own fault for
- * slightly worse perf :P) */
-
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(26));
- attach_constants(ctx, &ins, constant_value, reg + 1);
- emit_mir_instruction(ctx, ins);
-
- midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
- st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
- emit_mir_instruction(ctx, st);
- } else {
- /* Do not emit the varying yet -- instead, just mark down that we need to later */
+ * varying register, r26 or r27. The register itself is
+ * selected as the register in the st_vary instruction,
+ * minus the base of 26. E.g. write into r27 and then
+ * call st_vary(1) */
- _mesa_hash_table_u64_insert(ctx->ssa_varyings, reg + 1, (void *) ((uintptr_t) (offset + 1)));
- }
+ midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
+ emit_mir_instruction(ctx, ins);
+
+ /* We should have been vectorized, though we don't
+ * currently check that st_vary is emitted only once
+ * per slot (this is relevant, since there's not a mask
+ * parameter available on the store [set to 0 by the
+ * blob]). We do respect the component by adjusting the
+ * swizzle. */
+
+ unsigned component = nir_intrinsic_component(instr);
+
+ midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
+ st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
+ st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
+ emit_mir_instruction(ctx, st);
} else {
DBG("Unknown store\n");
assert(0);
break;
+ /* Special case of store_output for lowered blend shaders */
+ case nir_intrinsic_store_raw_output_pan:
+ assert (ctx->stage == MESA_SHADER_FRAGMENT);
+ reg = nir_src_index(ctx, &instr->src[0]);
+
+ midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ emit_mir_instruction(ctx, move);
+ ctx->fragment_output = reg;
+
+ break;
+
case nir_intrinsic_load_alpha_ref_float:
assert(instr->dest.is_ssa);
case nir_intrinsic_load_viewport_scale:
case nir_intrinsic_load_viewport_offset:
- emit_sysval_read(ctx, instr);
+ emit_sysval_read(ctx, &instr->instr);
break;
default:
midgard_tex_format(enum glsl_sampler_dim dim)
{
switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ case GLSL_SAMPLER_DIM_BUF:
+ return MALI_TEX_1D;
+
case GLSL_SAMPLER_DIM_2D:
case GLSL_SAMPLER_DIM_EXTERNAL:
- return TEXTURE_2D;
+ return MALI_TEX_2D;
case GLSL_SAMPLER_DIM_3D:
- return TEXTURE_3D;
+ return MALI_TEX_3D;
case GLSL_SAMPLER_DIM_CUBE:
- return TEXTURE_CUBE;
+ return MALI_TEX_CUBE;
default:
DBG("Unknown sampler dim type\n");
}
}
+/* Tries to attach an explicit LOD / bias as a constant. Returns whether this
+ * was successful */
+
+static bool
+pan_attach_constant_bias(
+ compiler_context *ctx,
+ nir_src lod,
+ midgard_texture_word *word)
+{
+ /* To attach as constant, it has to *be* constant */
+
+ if (!nir_src_is_const(lod))
+ return false;
+
+ float f = nir_src_as_float(lod);
+
+ /* Break into fixed-point */
+ signed lod_int = f;
+ float lod_frac = f - lod_int;
+
+ /* Carry over negative fractions */
+ if (lod_frac < 0.0) {
+ lod_int--;
+ lod_frac += 1.0;
+ }
+
+ /* Encode */
+ word->bias = float_to_ubyte(lod_frac);
+ word->bias_int = lod_int;
+
+ return true;
+}
+
+static enum mali_sampler_type
+midgard_sampler_type(nir_alu_type t)
+{
+ switch (nir_alu_type_get_base_type(t)) {
+ case nir_type_float:
+ return MALI_SAMPLER_FLOAT;
+ case nir_type_int:
+ return MALI_SAMPLER_SIGNED;
+ case nir_type_uint:
+ return MALI_SAMPLER_UNSIGNED;
+ default:
+ unreachable("Unknown sampler type");
+ }
+}
+
static void
-emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
+ unsigned midgard_texop)
{
/* TODO */
//assert (!instr->sampler);
//assert (!instr->texture_array_size);
- assert (instr->op == nir_texop_tex);
/* Allocate registers via a round robin scheme to alternate between the two registers */
int reg = ctx->texture_op_count & 1;
int texture_index = instr->texture_index;
int sampler_index = texture_index;
- for (unsigned i = 0; i < instr->num_srcs; ++i) {
- switch (instr->src[i].src_type) {
- case nir_tex_src_coord: {
- int index = nir_src_index(ctx, &instr->src[i].src);
+ /* No helper to build texture words -- we do it all here */
+ midgard_instruction ins = {
+ .type = TAG_TEXTURE_4,
+ .mask = 0xF,
+ .texture = {
+ .op = midgard_texop,
+ .format = midgard_tex_format(instr->sampler_dim),
+ .texture_handle = texture_index,
+ .sampler_handle = sampler_index,
+
+ /* TODO: Regalloc it in */
+ .swizzle = SWIZZLE_XYZW,
+
+ /* TODO: half */
+ .in_reg_full = 1,
+ .out_full = 1,
- midgard_vector_alu_src alu_src = blank_alu_src;
+ .sampler_type = midgard_sampler_type(instr->dest_type),
+ }
+ };
- int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
+ for (unsigned i = 0; i < instr->num_srcs; ++i) {
+ int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
+ int index = nir_src_index(ctx, &instr->src[i].src);
+ int nr_comp = nir_src_num_components(instr->src[i].src);
+ midgard_vector_alu_src alu_src = blank_alu_src;
+ switch (instr->src[i].src_type) {
+ case nir_tex_src_coord: {
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+ /* texelFetch is undefined on samplerCube */
+ assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
+
/* For cubemaps, we need to load coords into
* special r27, and then use a special ld/st op
- * to copy into the texture register */
+ * to select the face and copy the xy into the
+ * texture register */
alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
- midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
+ midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27));
emit_mir_instruction(ctx, move);
midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
- st.load_store.mask = 0x3; /* xy? */
+ st.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
+ ins.texture.in_reg_swizzle = swizzle_of(2);
} else {
- alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X);
-
- midgard_instruction ins = v_fmov(index, alu_src, reg);
- emit_mir_instruction(ctx, ins);
+ ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
+
+ midgard_instruction mov = v_mov(index, alu_src, reg);
+ mov.mask = mask_of(nr_comp);
+ emit_mir_instruction(ctx, mov);
+
+ if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
+ /* Texel fetch opcodes care about the
+ * values of z and w, so we actually
+ * need to spill into a second register
+ * for a texel fetch with register bias
+ * (for non-2D). TODO: Implement that
+ */
+
+ assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
+
+ midgard_instruction zero = v_mov(index, alu_src, reg);
+ zero.ssa_args.inline_constant = true;
+ zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ zero.has_constants = true;
+ zero.mask = ~mov.mask;
+ emit_mir_instruction(ctx, zero);
+
+ ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
+ } else {
+ /* Non-texel fetch doesn't need that
+ * nonsense. However we do use the Z
+ * for array indexing */
+ bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
+ ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
+ }
}
break;
}
- default: {
- DBG("Unknown source type\n");
- //assert(0);
- break;
- }
- }
- }
+ case nir_tex_src_bias:
+ case nir_tex_src_lod: {
+ /* Try as a constant if we can */
- /* No helper to build texture words -- we do it all here */
- midgard_instruction ins = {
- .type = TAG_TEXTURE_4,
- .texture = {
- .op = TEXTURE_OP_NORMAL,
- .format = midgard_tex_format(instr->sampler_dim),
- .texture_handle = texture_index,
- .sampler_handle = sampler_index,
+ bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
+ if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
+ break;
- /* TODO: Don't force xyzw */
- .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- .mask = 0xF,
+ /* Otherwise we use a register. To keep RA simple, we
+ * put the bias/LOD into the w component of the input
+ * source, which is otherwise in xy */
- /* TODO: half */
- //.in_reg_full = 1,
- .out_full = 1,
+ alu_src.swizzle = SWIZZLE_XXXX;
- .filter = 1,
+ midgard_instruction mov = v_mov(index, alu_src, reg);
+ mov.mask = 1 << COMPONENT_W;
+ emit_mir_instruction(ctx, mov);
- /* Always 1 */
- .unknown7 = 1,
+ ins.texture.lod_register = true;
- /* Assume we can continue; hint it out later */
- .cont = 1,
+ midgard_tex_register_select sel = {
+ .select = in_reg,
+ .full = 1,
+
+ /* w */
+ .component_lo = 1,
+ .component_hi = 1
+ };
+
+ uint8_t packed;
+ memcpy(&packed, &sel, sizeof(packed));
+ ins.texture.bias = packed;
+
+ break;
+ };
+
+ default:
+ unreachable("Unknown texture source type\n");
}
- };
+ }
/* Set registers to read and write from the same place */
ins.texture.in_reg_select = in_reg;
ins.texture.out_reg_select = out_reg;
- /* TODO: Dynamic swizzle input selection, half-swizzles? */
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_3D) {
- ins.texture.in_reg_swizzle_right = COMPONENT_X;
- ins.texture.in_reg_swizzle_left = COMPONENT_Y;
- //ins.texture.in_reg_swizzle_third = COMPONENT_Z;
- } else {
- ins.texture.in_reg_swizzle_left = COMPONENT_X;
- ins.texture.in_reg_swizzle_right = COMPONENT_Y;
- //ins.texture.in_reg_swizzle_third = COMPONENT_X;
- }
-
emit_mir_instruction(ctx, ins);
/* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
ctx->texture_index[reg] = o_index;
- midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
+ midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
emit_mir_instruction(ctx, ins2);
/* Used for .cont and .last hinting */
ctx->texture_op_count++;
}
+static void
+emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+{
+ /* Fixup op, since only textureLod is permitted in VS but NIR can give
+ * generic tex in some cases (which confuses the hardware) */
+
+ bool is_vertex = ctx->stage == MESA_SHADER_VERTEX;
+
+ if (is_vertex && instr->op == nir_texop_tex)
+ instr->op = nir_texop_txl;
+
+ switch (instr->op) {
+ case nir_texop_tex:
+ case nir_texop_txb:
+ emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
+ break;
+ case nir_texop_txl:
+ emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
+ break;
+ case nir_texop_txf:
+ emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
+ break;
+ case nir_texop_txs:
+ emit_sysval_read(ctx, &instr->instr);
+ break;
+ default:
+ unreachable("Unhanlded texture op");
+ }
+}
+
static void
emit_jump(compiler_context *ctx, nir_jump_instr *instr)
{
}
}
-/* Midgard IR only knows vector ALU types, but we sometimes need to actually
- * use scalar ALU instructions, for functional or performance reasons. To do
- * this, we just demote vector ALU payloads to scalar. */
-
-static int
-component_from_mask(unsigned mask)
-{
- for (int c = 0; c < 4; ++c) {
- if (mask & (3 << (2 * c)))
- return c;
- }
-
- assert(0);
- return 0;
-}
-
-static bool
-is_single_component_mask(unsigned mask)
-{
- int components = 0;
-
- for (int c = 0; c < 4; ++c)
- if (mask & (3 << (2 * c)))
- components++;
-
- return components == 1;
-}
-
-/* Create a mask of accessed components from a swizzle to figure out vector
- * dependencies */
-
-static unsigned
-swizzle_to_access_mask(unsigned swizzle)
-{
- unsigned component_mask = 0;
-
- for (int i = 0; i < 4; ++i) {
- unsigned c = (swizzle >> (2 * i)) & 3;
- component_mask |= (1 << c);
- }
-
- return component_mask;
-}
-
-static unsigned
-vector_to_scalar_source(unsigned u, bool is_int)
-{
- midgard_vector_alu_src v;
- memcpy(&v, &u, sizeof(v));
-
- /* TODO: Integers */
-
- midgard_scalar_alu_src s = {
- .full = !v.half,
- .component = (v.swizzle & 3) << 1
- };
-
- if (is_int) {
- /* TODO */
- } else {
- s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
- s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
- }
-
- unsigned o;
- memcpy(&o, &s, sizeof(s));
-
- return o & ((1 << 6) - 1);
-}
-
-static midgard_scalar_alu
-vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
-{
- bool is_int = midgard_is_integer_op(v.op);
-
- /* The output component is from the mask */
- midgard_scalar_alu s = {
- .op = v.op,
- .src1 = vector_to_scalar_source(v.src1, is_int),
- .src2 = vector_to_scalar_source(v.src2, is_int),
- .unknown = 0,
- .outmod = v.outmod,
- .output_full = 1, /* TODO: Half */
- .output_component = component_from_mask(v.mask) << 1,
- };
-
- /* Inline constant is passed along rather than trying to extract it
- * from v */
-
- if (ins->ssa_args.inline_constant) {
- uint16_t imm = 0;
- int lower_11 = ins->inline_constant & ((1 << 12) - 1);
- imm |= (lower_11 >> 9) & 3;
- imm |= (lower_11 >> 6) & 4;
- imm |= (lower_11 >> 2) & 0x38;
- imm |= (lower_11 & 63) << 6;
-
- s.src2 = imm;
- }
-
- return s;
-}
-
-/* Midgard prefetches instruction types, so during emission we need to
- * lookahead too. Unless this is the last instruction, in which we return 1. Or
- * if this is the second to last and the last is an ALU, then it's also 1... */
-
-#define IS_ALU(tag) (tag == TAG_ALU_4 || tag == TAG_ALU_8 || \
- tag == TAG_ALU_12 || tag == TAG_ALU_16)
-
-#define EMIT_AND_COUNT(type, val) util_dynarray_append(emission, type, val); \
- bytes_emitted += sizeof(type)
-
-static void
-emit_binary_vector_instruction(midgard_instruction *ains,
- uint16_t *register_words, int *register_words_count,
- uint64_t *body_words, size_t *body_size, int *body_words_count,
- size_t *bytes_emitted)
-{
- memcpy(®ister_words[(*register_words_count)++], &ains->registers, sizeof(ains->registers));
- *bytes_emitted += sizeof(midgard_reg_info);
-
- body_size[*body_words_count] = sizeof(midgard_vector_alu);
- memcpy(&body_words[(*body_words_count)++], &ains->alu, sizeof(ains->alu));
- *bytes_emitted += sizeof(midgard_vector_alu);
-}
-
-/* Checks for an SSA data hazard between two adjacent instructions, keeping in
- * mind that we are a vector architecture and we can write to different
- * components simultaneously */
-
-static bool
-can_run_concurrent_ssa(midgard_instruction *first, midgard_instruction *second)
-{
- /* Each instruction reads some registers and writes to a register. See
- * where the first writes */
-
- /* Figure out where exactly we wrote to */
- int source = first->ssa_args.dest;
- int source_mask = first->type == TAG_ALU_4 ? squeeze_writemask(first->alu.mask) : 0xF;
-
- /* As long as the second doesn't read from the first, we're okay */
- if (second->ssa_args.src0 == source) {
- if (first->type == TAG_ALU_4) {
- /* Figure out which components we just read from */
-
- int q = second->alu.src1;
- midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
-
- /* Check if there are components in common, and fail if so */
- if (swizzle_to_access_mask(m->swizzle) & source_mask)
- return false;
- } else
- return false;
-
- }
-
- if (second->ssa_args.src1 == source)
- return false;
-
- /* Otherwise, it's safe in that regard. Another data hazard is both
- * writing to the same place, of course */
-
- if (second->ssa_args.dest == source) {
- /* ...but only if the components overlap */
- int dest_mask = second->type == TAG_ALU_4 ? squeeze_writemask(second->alu.mask) : 0xF;
-
- if (dest_mask & source_mask)
- return false;
- }
-
- /* ...That's it */
- return true;
-}
-
-static bool
-midgard_has_hazard(
- midgard_instruction **segment, unsigned segment_size,
- midgard_instruction *ains)
-{
- for (int s = 0; s < segment_size; ++s)
- if (!can_run_concurrent_ssa(segment[s], ains))
- return true;
-
- return false;
-
-
-}
-
-/* Schedules, but does not emit, a single basic block. After scheduling, the
- * final tag and size of the block are known, which are necessary for branching
- * */
-
-static midgard_bundle
-schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction *ins, int *skip)
-{
- int instructions_emitted = 0, instructions_consumed = -1;
- midgard_bundle bundle = { 0 };
-
- uint8_t tag = ins->type;
-
- /* Default to the instruction's tag */
- bundle.tag = tag;
-
- switch (ins->type) {
- case TAG_ALU_4: {
- uint32_t control = 0;
- size_t bytes_emitted = sizeof(control);
-
- /* TODO: Constant combining */
- int index = 0, last_unit = 0;
-
- /* Previous instructions, for the purpose of parallelism */
- midgard_instruction *segment[4] = {0};
- int segment_size = 0;
-
- instructions_emitted = -1;
- midgard_instruction *pins = ins;
-
- for (;;) {
- midgard_instruction *ains = pins;
-
- /* Advance instruction pointer */
- if (index) {
- ains = mir_next_op(pins);
- pins = ains;
- }
-
- /* Out-of-work condition */
- if ((struct list_head *) ains == &block->instructions)
- break;
-
- /* Ensure that the chain can continue */
- if (ains->type != TAG_ALU_4) break;
-
- /* If there's already something in the bundle and we
- * have weird scheduler constraints, break now */
- if (ains->precede_break && index) break;
-
- /* According to the presentation "The ARM
- * Mali-T880 Mobile GPU" from HotChips 27,
- * there are two pipeline stages. Branching
- * position determined experimentally. Lines
- * are executed in parallel:
- *
- * [ VMUL ] [ SADD ]
- * [ VADD ] [ SMUL ] [ LUT ] [ BRANCH ]
- *
- * Verify that there are no ordering dependencies here.
- *
- * TODO: Allow for parallelism!!!
- */
-
- /* Pick a unit for it if it doesn't force a particular unit */
-
- int unit = ains->unit;
-
- if (!unit) {
- int op = ains->alu.op;
- int units = alu_opcode_props[op].props;
-
- /* TODO: Promotion of scalars to vectors */
- int vector = ((!is_single_component_mask(ains->alu.mask)) || ((units & UNITS_SCALAR) == 0)) && (units & UNITS_ANY_VECTOR);
-
- if (!vector)
- assert(units & UNITS_SCALAR);
-
- if (vector) {
- if (last_unit >= UNIT_VADD) {
- if (units & UNIT_VLUT)
- unit = UNIT_VLUT;
- else
- break;
- } else {
- if ((units & UNIT_VMUL) && !(control & UNIT_VMUL))
- unit = UNIT_VMUL;
- else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
- unit = UNIT_VADD;
- else if (units & UNIT_VLUT)
- unit = UNIT_VLUT;
- else
- break;
- }
- } else {
- if (last_unit >= UNIT_VADD) {
- if ((units & UNIT_SMUL) && !(control & UNIT_SMUL))
- unit = UNIT_SMUL;
- else if (units & UNIT_VLUT)
- unit = UNIT_VLUT;
- else
- break;
- } else {
- if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains))
- unit = UNIT_SADD;
- else if (units & UNIT_SMUL)
- unit = ((units & UNIT_VMUL) && !(control & UNIT_VMUL)) ? UNIT_VMUL : UNIT_SMUL;
- else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
- unit = UNIT_VADD;
- else
- break;
- }
- }
-
- assert(unit & units);
- }
-
- /* Late unit check, this time for encoding (not parallelism) */
- if (unit <= last_unit) break;
-
- /* Clear the segment */
- if (last_unit < UNIT_VADD && unit >= UNIT_VADD)
- segment_size = 0;
-
- if (midgard_has_hazard(segment, segment_size, ains))
- break;
-
- /* We're good to go -- emit the instruction */
- ains->unit = unit;
-
- segment[segment_size++] = ains;
-
- /* Only one set of embedded constants per
- * bundle possible; if we have more, we must
- * break the chain early, unfortunately */
-
- if (ains->has_constants) {
- if (bundle.has_embedded_constants) {
- /* The blend constant needs to be
- * alone, since it conflicts with
- * everything by definition*/
-
- if (ains->has_blend_constant || bundle.has_blend_constant)
- break;
-
- /* ...but if there are already
- * constants but these are the
- * *same* constants, we let it
- * through */
-
- if (memcmp(bundle.constants, ains->constants, sizeof(bundle.constants)))
- break;
- } else {
- bundle.has_embedded_constants = true;
- memcpy(bundle.constants, ains->constants, sizeof(bundle.constants));
-
- /* If this is a blend shader special constant, track it for patching */
- bundle.has_blend_constant |= ains->has_blend_constant;
- }
- }
-
- if (ains->unit & UNITS_ANY_VECTOR) {
- emit_binary_vector_instruction(ains, bundle.register_words,
- &bundle.register_words_count, bundle.body_words,
- bundle.body_size, &bundle.body_words_count, &bytes_emitted);
- } else if (ains->compact_branch) {
- /* All of r0 has to be written out
- * along with the branch writeout.
- * (slow!) */
-
- if (ains->writeout) {
- if (index == 0) {
- midgard_instruction ins = v_fmov(0, blank_alu_src, SSA_FIXED_REGISTER(0));
- ins.unit = UNIT_VMUL;
-
- control |= ins.unit;
-
- emit_binary_vector_instruction(&ins, bundle.register_words,
- &bundle.register_words_count, bundle.body_words,
- bundle.body_size, &bundle.body_words_count, &bytes_emitted);
- } else {
- /* Analyse the group to see if r0 is written in full, on-time, without hanging dependencies*/
- bool written_late = false;
- bool components[4] = { 0 };
- uint16_t register_dep_mask = 0;
- uint16_t written_mask = 0;
-
- midgard_instruction *qins = ins;
- for (int t = 0; t < index; ++t) {
- if (qins->registers.out_reg != 0) {
- /* Mark down writes */
-
- written_mask |= (1 << qins->registers.out_reg);
- } else {
- /* Mark down the register dependencies for errata check */
-
- if (qins->registers.src1_reg < 16)
- register_dep_mask |= (1 << qins->registers.src1_reg);
-
- if (qins->registers.src2_reg < 16)
- register_dep_mask |= (1 << qins->registers.src2_reg);
-
- int mask = qins->alu.mask;
-
- for (int c = 0; c < 4; ++c)
- if (mask & (0x3 << (2 * c)))
- components[c] = true;
-
- /* ..but if the writeout is too late, we have to break up anyway... for some reason */
-
- if (qins->unit == UNIT_VLUT)
- written_late = true;
- }
-
- /* Advance instruction pointer */
- qins = mir_next_op(qins);
- }
-
-
- /* ERRATA (?): In a bundle ending in a fragment writeout, the register dependencies of r0 cannot be written within this bundle (discovered in -bshading:shading=phong) */
- if (register_dep_mask & written_mask) {
- DBG("ERRATA WORKAROUND: Breakup for writeout dependency masks %X vs %X (common %X)\n", register_dep_mask, written_mask, register_dep_mask & written_mask);
- break;
- }
-
- if (written_late)
- break;
-
- /* If even a single component is not written, break it up (conservative check). */
- bool breakup = false;
-
- for (int c = 0; c < 4; ++c)
- if (!components[c])
- breakup = true;
-
- if (breakup)
- break;
-
- /* Otherwise, we're free to proceed */
- }
- }
-
- if (ains->unit == ALU_ENAB_BRANCH) {
- bundle.body_size[bundle.body_words_count] = sizeof(midgard_branch_extended);
- memcpy(&bundle.body_words[bundle.body_words_count++], &ains->branch_extended, sizeof(midgard_branch_extended));
- bytes_emitted += sizeof(midgard_branch_extended);
- } else {
- bundle.body_size[bundle.body_words_count] = sizeof(ains->br_compact);
- memcpy(&bundle.body_words[bundle.body_words_count++], &ains->br_compact, sizeof(ains->br_compact));
- bytes_emitted += sizeof(ains->br_compact);
- }
- } else {
- memcpy(&bundle.register_words[bundle.register_words_count++], &ains->registers, sizeof(ains->registers));
- bytes_emitted += sizeof(midgard_reg_info);
-
- bundle.body_size[bundle.body_words_count] = sizeof(midgard_scalar_alu);
- bundle.body_words_count++;
- bytes_emitted += sizeof(midgard_scalar_alu);
- }
-
- /* Defer marking until after writing to allow for break */
- control |= ains->unit;
- last_unit = ains->unit;
- ++instructions_emitted;
- ++index;
- }
-
- /* Bubble up the number of instructions for skipping */
- instructions_consumed = index - 1;
-
- int padding = 0;
-
- /* Pad ALU op to nearest word */
-
- if (bytes_emitted & 15) {
- padding = 16 - (bytes_emitted & 15);
- bytes_emitted += padding;
- }
-
- /* Constants must always be quadwords */
- if (bundle.has_embedded_constants)
- bytes_emitted += 16;
-
- /* Size ALU instruction for tag */
- bundle.tag = (TAG_ALU_4) + (bytes_emitted / 16) - 1;
- bundle.padding = padding;
- bundle.control = bundle.tag | control;
-
- break;
- }
-
- case TAG_LOAD_STORE_4: {
- /* Load store instructions have two words at once. If
- * we only have one queued up, we need to NOP pad.
- * Otherwise, we store both in succession to save space
- * and cycles -- letting them go in parallel -- skip
- * the next. The usefulness of this optimisation is
- * greatly dependent on the quality of the instruction
- * scheduler.
- */
-
- midgard_instruction *next_op = mir_next_op(ins);
-
- if ((struct list_head *) next_op != &block->instructions && next_op->type == TAG_LOAD_STORE_4) {
- /* As the two operate concurrently, make sure
- * they are not dependent */
-
- if (can_run_concurrent_ssa(ins, next_op) || true) {
- /* Skip ahead, since it's redundant with the pair */
- instructions_consumed = 1 + (instructions_emitted++);
- }
- }
-
- break;
- }
-
- default:
- /* Texture ops default to single-op-per-bundle scheduling */
- break;
- }
-
- /* Copy the instructions into the bundle */
- bundle.instruction_count = instructions_emitted + 1;
-
- int used_idx = 0;
-
- midgard_instruction *uins = ins;
- for (int i = 0; used_idx < bundle.instruction_count; ++i) {
- bundle.instructions[used_idx++] = *uins;
- uins = mir_next_op(uins);
- }
-
- *skip = (instructions_consumed == -1) ? instructions_emitted : instructions_consumed;
-
- return bundle;
-}
-
-static int
-quadword_size(int tag)
-{
- switch (tag) {
- case TAG_ALU_4:
- return 1;
-
- case TAG_ALU_8:
- return 2;
-
- case TAG_ALU_12:
- return 3;
-
- case TAG_ALU_16:
- return 4;
-
- case TAG_LOAD_STORE_4:
- return 1;
-
- case TAG_TEXTURE_4:
- return 1;
-
- default:
- assert(0);
- return 0;
- }
-}
-
-/* Schedule a single block by iterating its instruction to create bundles.
- * While we go, tally about the bundle sizes to compute the block size. */
-
-static void
-schedule_block(compiler_context *ctx, midgard_block *block)
-{
- util_dynarray_init(&block->bundles, NULL);
-
- block->quadword_count = 0;
-
- mir_foreach_instr_in_block(block, ins) {
- int skip;
- midgard_bundle bundle = schedule_bundle(ctx, block, ins, &skip);
- util_dynarray_append(&block->bundles, midgard_bundle, bundle);
-
- if (bundle.has_blend_constant) {
- /* TODO: Multiblock? */
- int quadwords_within_block = block->quadword_count + quadword_size(bundle.tag) - 1;
- ctx->blend_constant_offset = quadwords_within_block * 0x10;
- }
-
- while(skip--)
- ins = mir_next_op(ins);
-
- block->quadword_count += quadword_size(bundle.tag);
- }
-
- block->is_scheduled = true;
-}
-
-static void
-schedule_program(compiler_context *ctx)
-{
- /* We run RA prior to scheduling */
- struct ra_graph *g = allocate_registers(ctx);
- install_registers(ctx, g);
-
- mir_foreach_block(ctx, block) {
- schedule_block(ctx, block);
- }
-}
-
-/* After everything is scheduled, emit whole bundles at a time */
-
-static void
-emit_binary_bundle(compiler_context *ctx, midgard_bundle *bundle, struct util_dynarray *emission, int next_tag)
-{
- int lookahead = next_tag << 4;
-
- switch (bundle->tag) {
- case TAG_ALU_4:
- case TAG_ALU_8:
- case TAG_ALU_12:
- case TAG_ALU_16: {
- /* Actually emit each component */
- util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
-
- for (int i = 0; i < bundle->register_words_count; ++i)
- util_dynarray_append(emission, uint16_t, bundle->register_words[i]);
-
- /* Emit body words based on the instructions bundled */
- for (int i = 0; i < bundle->instruction_count; ++i) {
- midgard_instruction *ins = &bundle->instructions[i];
-
- if (ins->unit & UNITS_ANY_VECTOR) {
- memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins->alu, sizeof(midgard_vector_alu));
- } else if (ins->compact_branch) {
- /* Dummy move, XXX DRY */
- if ((i == 0) && ins->writeout) {
- midgard_instruction ins = v_fmov(0, blank_alu_src, SSA_FIXED_REGISTER(0));
- memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins.alu, sizeof(midgard_vector_alu));
- }
-
- if (ins->unit == ALU_ENAB_BR_COMPACT) {
- memcpy(util_dynarray_grow(emission, sizeof(ins->br_compact)), &ins->br_compact, sizeof(ins->br_compact));
- } else {
- memcpy(util_dynarray_grow(emission, sizeof(ins->branch_extended)), &ins->branch_extended, sizeof(ins->branch_extended));
- }
- } else {
- /* Scalar */
- midgard_scalar_alu scalarised = vector_to_scalar_alu(ins->alu, ins);
- memcpy(util_dynarray_grow(emission, sizeof(scalarised)), &scalarised, sizeof(scalarised));
- }
- }
-
- /* Emit padding (all zero) */
- memset(util_dynarray_grow(emission, bundle->padding), 0, bundle->padding);
-
- /* Tack on constants */
-
- if (bundle->has_embedded_constants) {
- util_dynarray_append(emission, float, bundle->constants[0]);
- util_dynarray_append(emission, float, bundle->constants[1]);
- util_dynarray_append(emission, float, bundle->constants[2]);
- util_dynarray_append(emission, float, bundle->constants[3]);
- }
-
- break;
- }
-
- case TAG_LOAD_STORE_4: {
- /* One or two composing instructions */
-
- uint64_t current64, next64 = LDST_NOP;
-
- memcpy(¤t64, &bundle->instructions[0].load_store, sizeof(current64));
-
- if (bundle->instruction_count == 2)
- memcpy(&next64, &bundle->instructions[1].load_store, sizeof(next64));
-
- midgard_load_store instruction = {
- .type = bundle->tag,
- .next_type = next_tag,
- .word1 = current64,
- .word2 = next64
- };
-
- util_dynarray_append(emission, midgard_load_store, instruction);
-
- break;
- }
-
- case TAG_TEXTURE_4: {
- /* Texture instructions are easy, since there is no
- * pipelining nor VLIW to worry about. We may need to set the .last flag */
-
- midgard_instruction *ins = &bundle->instructions[0];
-
- ins->texture.type = TAG_TEXTURE_4;
- ins->texture.next_type = next_tag;
-
- ctx->texture_op_count--;
-
- if (!ctx->texture_op_count) {
- ins->texture.cont = 0;
- ins->texture.last = 1;
- }
-
- util_dynarray_append(emission, midgard_texture_word, ins->texture);
- break;
- }
-
- default:
- DBG("Unknown midgard instruction type\n");
- assert(0);
- break;
- }
-}
-
/* ALU instructions can inline or embed constants, which decreases register
* pressure and saves space. */
unsigned scratch = alu->ssa_args.dest;
if (entry) {
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
/* Force a break XXX Defer r31 writes */
/* Blend constants must not be inlined by definition */
if (ins->has_blend_constant) continue;
+ /* We can inline 32-bit (sometimes) or 16-bit (usually) */
+ bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
+ bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
+
+ if (!(is_16 || is_32))
+ continue;
+
/* src1 cannot be an inline constant due to encoding
* restrictions. So, if possible we try to flip the arguments
* in that case */
/* Scale constant appropriately, if we can legally */
uint16_t scaled_constant = 0;
- if (midgard_is_integer_op(op)) {
+ if (midgard_is_integer_op(op) || is_16) {
unsigned int *iconstants = (unsigned int *) ins->constants;
scaled_constant = (uint16_t) iconstants[component];
uint32_t value = cons[component];
bool is_vector = false;
- unsigned mask = effective_writemask(&ins->alu);
+ unsigned mask = effective_writemask(&ins->alu, ins->mask);
for (int c = 1; c < 4; ++c) {
/* We only care if this component is actually used */
static void
map_ssa_to_alias(compiler_context *ctx, int *ref)
{
+ /* Sign is used quite deliberately for unused */
+ if (*ref < 0)
+ return;
+
unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1);
if (alias) {
return progress;
}
+/* Dead code elimination for branches at the end of a block - only one branch
+ * per block is legal semantically */
+
+static void
+midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
+{
+ bool branched = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (!midgard_is_branch_unit(ins->unit)) continue;
+
+ /* We ignore prepacked branches since the fragment epilogue is
+ * just generally special */
+ if (ins->prepacked_branch) continue;
+
+ /* Discards are similarly special and may not correspond to the
+ * end of a block */
+
+ if (ins->branch.target_type == TARGET_DISCARD) continue;
+
+ if (branched) {
+ /* We already branched, so this is dead */
+ mir_remove_instruction(ins);
+ }
+
+ branched = true;
+ }
+}
+
static bool
mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
{
/* abs or neg */
if (!is_int && src.mod) return true;
+ /* Other int mods don't matter in isolation */
+ if (is_int && src.mod == midgard_int_shift) return true;
+
+ /* size-conversion */
+ if (src.half) return true;
+
/* swizzle */
for (unsigned c = 0; c < 4; ++c) {
if (!(mask & (1 << c))) continue;
return false;
}
+static bool
+mir_nontrivial_source2_mod(midgard_instruction *ins)
+{
+ bool is_int = midgard_is_integer_op(ins->alu.op);
+
+ midgard_vector_alu_src src2 =
+ vector_alu_from_unsigned(ins->alu.src2);
+
+ return mir_nontrivial_mod(src2, is_int, ins->mask);
+}
+
+static bool
+mir_nontrivial_outmod(midgard_instruction *ins)
+{
+ bool is_int = midgard_is_integer_op(ins->alu.op);
+ unsigned mod = ins->alu.outmod;
+
+ /* Type conversion is a sort of outmod */
+ if (ins->alu.dest_override != midgard_dest_override_none)
+ return true;
+
+ if (is_int)
+ return mod != midgard_outmod_int_wrap;
+ else
+ return mod != midgard_outmod_none;
+}
+
static bool
midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
{
if (ins->ssa_args.inline_constant) continue;
if (ins->has_constants) continue;
- /* Also, if the move has side effects, we're helpless */
+ if (mir_nontrivial_source2_mod(ins)) continue;
+ if (mir_nontrivial_outmod(ins)) continue;
- midgard_vector_alu_src src =
- vector_alu_from_unsigned(ins->alu.src2);
- unsigned mask = squeeze_writemask(ins->alu.mask);
- bool is_int = midgard_is_integer_op(ins->alu.op);
+ /* We're clear -- rewrite */
+ mir_rewrite_index_src(ctx, to, from);
+ mir_remove_instruction(ins);
+ progress |= true;
+ }
- if (mir_nontrivial_mod(src, is_int, mask)) continue;
- if (ins->alu.outmod != midgard_outmod_none) continue;
+ return progress;
+}
- mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) {
- if (v->ssa_args.src0 == to) {
- v->ssa_args.src0 = from;
- progress = true;
- }
+/* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
+ * the move can be propagated away entirely */
- if (v->ssa_args.src1 == to && !v->ssa_args.inline_constant) {
- v->ssa_args.src1 = from;
- progress = true;
- }
- }
+static bool
+mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
+{
+ /* Nothing to do */
+ if (comp == midgard_outmod_none)
+ return true;
+
+ if (*outmod == midgard_outmod_none) {
+ *outmod = comp;
+ return true;
}
- return progress;
+ /* TODO: Compose rules */
+ return false;
}
static bool
-midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
+midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
{
bool progress = false;
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
+ if (ins->alu.op != midgard_alu_op_fmov) continue;
+ if (ins->alu.outmod != midgard_outmod_pos) continue;
- unsigned from = ins->ssa_args.src1;
- unsigned to = ins->ssa_args.dest;
-
- /* Make sure it's simple enough for us to handle */
+ /* TODO: Registers? */
+ unsigned src = ins->ssa_args.src1;
+ if (src >= ctx->func->impl->ssa_alloc) continue;
+ assert(!mir_has_multiple_writes(ctx, src));
- if (from >= SSA_FIXED_MINIMUM) continue;
- if (from >= ctx->func->impl->ssa_alloc) continue;
- if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
- if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
-
- bool eliminated = false;
+ /* There might be a source modifier, too */
+ if (mir_nontrivial_source2_mod(ins)) continue;
+ /* Backpropagate the modifier */
mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
- /* The texture registers are not SSA so be careful.
- * Conservatively, just stop if we hit a texture op
- * (even if it may not write) to where we are */
+ if (v->type != TAG_ALU_4) continue;
+ if (v->ssa_args.dest != src) continue;
- if (v->type != TAG_ALU_4)
- break;
+ /* Can we even take a float outmod? */
+ if (midgard_is_integer_out_op(v->alu.op)) continue;
- if (v->ssa_args.dest == from) {
- /* We don't want to track partial writes ... */
- if (v->alu.mask == 0xF) {
- v->ssa_args.dest = to;
- eliminated = true;
- }
+ midgard_outmod_float temp = v->alu.outmod;
+ progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
- break;
- }
- }
+ /* Throw in the towel.. */
+ if (!progress) break;
- if (eliminated)
- mir_remove_instruction(ins);
+ /* Otherwise, transfer the modifier */
+ v->alu.outmod = temp;
+ ins->alu.outmod = midgard_outmod_none;
- progress |= eliminated;
+ break;
+ }
}
return progress;
}
}
-/* Emit varying stores late */
-
-static void
-midgard_emit_store(compiler_context *ctx, midgard_block *block) {
- /* Iterate in reverse to get the final write, rather than the first */
-
- mir_foreach_instr_in_block_safe_rev(block, ins) {
- /* Check if what we just wrote needs a store */
- int idx = ins->ssa_args.dest;
- uintptr_t varying = ((uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_varyings, idx + 1));
-
- if (!varying) continue;
-
- varying -= 1;
-
- /* We need to store to the appropriate varying, so emit the
- * move/store */
-
- /* TODO: Integrate with special purpose RA (and scheduler?) */
- bool high_varying_register = false;
-
- midgard_instruction mov = v_fmov(idx, blank_alu_src, SSA_FIXED_REGISTER(REGISTER_VARYING_BASE + high_varying_register));
-
- midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying);
- st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
-
- mir_insert_instruction_before(mir_next_op(ins), st);
- mir_insert_instruction_before(mir_next_op(ins), mov);
-
- /* We no longer need to store this varying */
- _mesa_hash_table_u64_remove(ctx->ssa_varyings, idx + 1);
- }
-}
-
/* If there are leftovers after the below pass, emit actual fmov
* instructions for the slow-but-correct path */
int mapped = base;
map_ssa_to_alias(ctx, &mapped);
- EMIT(fmov, mapped, blank_alu_src, base);
+ EMIT(mov, mapped, blank_alu_src, base);
}
}
void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
if (constant_value) {
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
emit_mir_instruction(ctx, ins);
}
EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
}
-/* For the blend epilogue, we need to convert the blended fragment vec4 (stored
- * in r0) to a RGBA8888 value by scaling and type converting. We then output it
- * with the int8 analogue to the fragment epilogue */
-
-static void
-emit_blend_epilogue(compiler_context *ctx)
-{
- /* vmul.fmul.none.fulllow hr48, r0, #255 */
-
- midgard_instruction scale = {
- .type = TAG_ALU_4,
- .unit = UNIT_VMUL,
- .inline_constant = _mesa_float_to_half(255.0),
- .ssa_args = {
- .src0 = SSA_FIXED_REGISTER(0),
- .src1 = SSA_UNUSED_0,
- .dest = SSA_FIXED_REGISTER(24),
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_lower,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(blank_alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, scale);
-
- /* vadd.f2u8.pos.low hr0, hr48, #0 */
-
- midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.half = true;
-
- midgard_instruction f2u8 = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = SSA_FIXED_REGISTER(24),
- .src1 = SSA_UNUSED_0,
- .dest = SSA_FIXED_REGISTER(0),
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_f2u8,
- .reg_mode = midgard_reg_mode_16,
- .dest_override = midgard_dest_override_lower,
- .outmod = midgard_outmod_pos,
- .mask = 0xF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, f2u8);
-
- /* vmul.imov.quarter r0, r0, r0 */
-
- midgard_instruction imov_8 = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = SSA_UNUSED_1,
- .src1 = SSA_FIXED_REGISTER(0),
- .dest = SSA_FIXED_REGISTER(0),
- },
- .alu = {
- .op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_8,
- .dest_override = midgard_dest_override_none,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(blank_alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- /* Emit branch epilogue with the 8-bit move as the source */
-
- emit_mir_instruction(ctx, imov_8);
- EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
-
- emit_mir_instruction(ctx, imov_8);
- EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
-}
-
static midgard_block *
emit_block(compiler_context *ctx, nir_block *block)
{
/* Perform heavylifting for aliasing */
actualise_ssa_to_alias(ctx);
- midgard_emit_store(ctx, this_block);
midgard_pair_load_store(ctx, this_block);
/* Append fragment shader epilogue (value writeout) */
if (ctx->stage == MESA_SHADER_FRAGMENT) {
if (block == nir_impl_last_block(ctx->func->impl)) {
- if (ctx->is_blend)
- emit_blend_epilogue(ctx);
- else
- emit_fragment_epilogue(ctx);
+ emit_fragment_epilogue(ctx);
}
}
/* TODO: Decide this at runtime */
ctx->uniform_cutoff = 8;
- /* Assign var locations early, so the epilogue can use them if necessary */
-
- nir_assign_var_locations(&nir->outputs, &nir->num_outputs, glsl_type_size);
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs, glsl_type_size);
- nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, glsl_type_size);
-
/* Initialize at a global (not block) level hash tables */
ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
- ctx->ssa_varyings = _mesa_hash_table_u64_create(NULL);
ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
struct exec_list *varyings =
ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
+ unsigned max_varying = 0;
nir_foreach_variable(var, varyings) {
unsigned loc = var->data.driver_location;
unsigned sz = glsl_type_size(var->type, FALSE);
for (int c = 0; c < sz; ++c) {
- program->varyings[loc + c] = var->data.location;
+ program->varyings[loc + c] = var->data.location + c;
+ max_varying = MAX2(max_varying, loc + c);
}
}
- /* Lower gl_Position pre-optimisation */
+ /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
+ * (so we don't accidentally duplicate the epilogue since mesa/st has
+ * messed with our I/O quite a bit already) */
+
+ NIR_PASS_V(nir, nir_lower_vars_to_ssa);
if (ctx->stage == MESA_SHADER_VERTEX)
NIR_PASS_V(nir, nir_lower_viewport_transform);
memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
- program->varying_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_outputs : ((ctx->stage == MESA_SHADER_FRAGMENT) ? nir->num_inputs : 0);
+ program->varying_count = max_varying + 1; /* Fencepost off-by-one */
nir_foreach_function(func, nir) {
if (!func->impl)
progress = false;
mir_foreach_block(ctx, block) {
+ progress |= midgard_opt_pos_propagate(ctx, block);
progress |= midgard_opt_copy_prop(ctx, block);
- progress |= midgard_opt_copy_prop_tex(ctx, block);
progress |= midgard_opt_dead_code_eliminate(ctx, block);
}
} while (progress);
+ /* Nested control-flow can result in dead branches at the end of the
+ * block. This messes with our analysis and is just dead code, so cull
+ * them */
+ mir_foreach_block(ctx, block) {
+ midgard_opt_cull_dead_branch(ctx, block);
+ }
+
/* Schedule! */
schedule_program(ctx);
mir_foreach_block(ctx, block) {
util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
for (int c = 0; c < bundle->instruction_count; ++c) {
- midgard_instruction *ins = &bundle->instructions[c];
+ midgard_instruction *ins = bundle->instructions[c];
if (!midgard_is_branch_unit(ins->unit)) continue;
/* Determine the block we're jumping to */
int target_number = ins->branch.target_block;
- /* Report the destination tag. Discards don't need this */
+ /* Report the destination tag */
int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
- /* Count up the number of quadwords we're jumping over. That is, the number of quadwords in each of the blocks between (br_block_idx, target_number) */
+ /* Count up the number of quadwords we're
+ * jumping over = number of quadwords until
+ * (br_block_idx, target_number) */
+
int quadword_offset = 0;
if (is_discard) {
int current_bundle = 0;
+ /* Midgard prefetches instruction types, so during emission we
+ * need to lookahead. Unless this is the last instruction, in
+ * which we return 1. Or if this is the second to last and the
+ * last is an ALU, then it's also 1... */
+
mir_foreach_block(ctx, block) {
- util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
+ mir_foreach_bundle_in_block(block, bundle) {
int lookahead = 1;
if (current_bundle + 1 < bundle_count) {