#include "main/imports.h"
#include "compiler/nir/nir_builder.h"
#include "util/half_float.h"
+#include "util/u_math.h"
#include "util/u_debug.h"
#include "util/u_dynarray.h"
#include "util/list.h"
* driver seems to do it that way */
#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
#define M_LOAD_STORE(name, rname, uname) \
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int)
+vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count)
{
if (!src) return blank_alu_src;
+ /* Figure out how many components there are so we can adjust the
+ * swizzle. Specifically we want to broadcast the last channel so
+ * things like ball2/3 work
+ */
+
+ if (broadcast_count) {
+ uint8_t last_component = src->swizzle[broadcast_count - 1];
+
+ for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
+ src->swizzle[c] = last_component;
+ }
+ }
+
midgard_vector_alu_src alu_src = {
.rep_low = 0,
.rep_high = 0,
}
}
-static void
-midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+static unsigned
+nir_dest_index(compiler_context *ctx, nir_dest *dst)
{
+ if (dst->is_ssa)
+ return dst->ssa.index;
+ else {
+ assert(!dst->reg.indirect);
+ return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+ }
+}
+
+static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
+ unsigned *dest)
+{
+ nir_intrinsic_instr *intr;
+ nir_dest *dst = NULL;
+ nir_tex_instr *tex;
int sysval = -1;
- if (instr->type == nir_instr_type_intrinsic) {
- nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ switch (instr->type) {
+ case nir_instr_type_intrinsic:
+ intr = nir_instr_as_intrinsic(instr);
sysval = midgard_nir_sysval_for_intrinsic(intr);
+ dst = &intr->dest;
+ break;
+ case nir_instr_type_tex:
+ tex = nir_instr_as_tex(instr);
+ if (tex->op != nir_texop_txs)
+ break;
+
+ sysval = PAN_SYSVAL(TEXTURE_SIZE,
+ PAN_TXS_SYSVAL_ID(tex->texture_index,
+ nir_tex_instr_dest_size(tex) -
+ (tex->is_array ? 1 : 0),
+ tex->is_array));
+ dst = &tex->dest;
+ break;
+ default:
+ break;
}
+ if (dest && dst)
+ *dest = nir_dest_index(ctx, dst);
+
+ return sysval;
+}
+
+static void
+midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+{
+ int sysval;
+
+ sysval = sysval_for_instr(ctx, instr, NULL);
if (sysval < 0)
return;
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
NIR_PASS(progress, nir, nir_lower_idiv);
- nir_lower_tex_options lower_tex_options = {
- .lower_rect = true
+ nir_lower_tex_options lower_tex_1st_pass_options = {
+ .lower_rect = true,
+ .lower_txp = ~0
};
- NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
+ nir_lower_tex_options lower_tex_2nd_pass_options = {
+ .lower_txs_lod = true,
+ };
+
+ NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options);
+ NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options);
do {
progress = false;
nir_var_shader_out |
nir_var_function_temp);
- /* TODO: Enable vectorize when merged upstream */
- // NIR_PASS(progress, nir, nir_opt_vectorize);
+ NIR_PASS(progress, nir, nir_opt_vectorize);
} while (progress);
/* Must be run at the end to prevent creation of fsin/fcos ops */
}
}
-static unsigned
-nir_dest_index(compiler_context *ctx, nir_dest *dst)
-{
- if (dst->is_ssa)
- return dst->ssa.index;
- else {
- assert(!dst->reg.indirect);
- return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
- }
-}
-
static unsigned
nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
{
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
.ssa_args = {
-
.src0 = condition,
.src1 = condition,
.dest = SSA_FIXED_REGISTER(31),
},
+
.alu = {
.op = midgard_alu_op_iand,
- .outmod = midgard_outmod_int,
+ .outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
},
.alu = {
.op = midgard_alu_op_iand,
- .outmod = midgard_outmod_int,
+ .outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = expand_writemask((1 << nr_comp) - 1),
+ .mask = expand_writemask(mask_of(nr_comp)),
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
},
.alu = {
.op = midgard_alu_op_imov,
- .outmod = midgard_outmod_int,
+ .outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
break;
+
+#define ALU_CASE_BCAST(nir, _op, count) \
+ case nir_op_##nir: \
+ op = midgard_alu_op_##_op; \
+ broadcast_swizzle = count; \
+ break;
static bool
nir_is_fzero_constant(nir_src src)
{
unsigned op;
+ /* Number of components valid to check for the instruction (the rest
+ * will be forced to the last), or 0 to use as-is. Relevant as
+ * ball-type instructions have a channel count in NIR but are all vec4
+ * in Midgard */
+
+ unsigned broadcast_swizzle = 0;
+
switch (instr->op) {
ALU_CASE(fadd, fadd);
ALU_CASE(fmul, fmul);
ALU_CASE(fexp2, fexp2);
ALU_CASE(flog2, flog2);
- ALU_CASE(f2i32, f2i);
- ALU_CASE(f2u32, f2u);
- ALU_CASE(i2f32, i2f);
- ALU_CASE(u2f32, u2f);
+ ALU_CASE(f2i32, f2i_rtz);
+ ALU_CASE(f2u32, f2u_rtz);
+ ALU_CASE(i2f32, i2f_rtz);
+ ALU_CASE(u2f32, u2f_rtz);
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
+ /* Second op implicit #0 */
+ ALU_CASE(inot, inor);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
- ALU_CASE(inot, inand);
ALU_CASE(ishl, ishl);
ALU_CASE(ishr, iasr);
ALU_CASE(ushr, ilsr);
- ALU_CASE(b32all_fequal2, fball_eq);
- ALU_CASE(b32all_fequal3, fball_eq);
+ ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
+ ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
ALU_CASE(b32all_fequal4, fball_eq);
- ALU_CASE(b32any_fnequal2, fbany_neq);
- ALU_CASE(b32any_fnequal3, fbany_neq);
+ ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
+ ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
ALU_CASE(b32any_fnequal4, fbany_neq);
- ALU_CASE(b32all_iequal2, iball_eq);
- ALU_CASE(b32all_iequal3, iball_eq);
+ ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
+ ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
ALU_CASE(b32all_iequal4, iball_eq);
- ALU_CASE(b32any_inequal2, ibany_neq);
- ALU_CASE(b32any_inequal3, ibany_neq);
+ ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
+ ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
ALU_CASE(b32any_inequal4, ibany_neq);
/* Source mods will be shoved in later */
}
/* Midgard can perform certain modifiers on output of an ALU op */
- midgard_outmod outmod =
- midgard_is_integer_out_op(op) ? midgard_outmod_int :
- instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+ unsigned outmod;
- if (instr->op == nir_op_fsat)
- outmod = midgard_outmod_sat;
+ if (midgard_is_integer_out_op(op)) {
+ outmod = midgard_outmod_int_wrap;
+ } else {
+ bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
+ outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
+ }
/* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
.outmod = outmod,
/* Writemask only valid for non-SSA NIR */
- .mask = expand_writemask((1 << nr_components) - 1),
+ .mask = expand_writemask(mask_of(nr_components)),
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
for (int i = 0; i < nr_components; ++i) {
- ins.alu.mask = (0x3) << (2 * i); /* Mask the associated component */
+ /* Mask the associated component, dropping the
+ * instruction if needed */
+
+ ins.alu.mask = (0x3) << (2 * i);
+ ins.alu.mask &= alu.mask;
+
+ if (!ins.alu.mask)
+ continue;
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle));
emit_mir_instruction(ctx, ins);
}
} else {
#undef ALU_CASE
+/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
+ * optimized) versions of UBO #0 */
+
static void
-emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
+emit_ubo_read(
+ compiler_context *ctx,
+ unsigned dest,
+ unsigned offset,
+ nir_src *indirect_offset,
+ unsigned index)
{
/* TODO: half-floats */
- if (!indirect_offset && offset < ctx->uniform_cutoff) {
+ if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) {
/* Fast path: For the first 16 uniforms, direct accesses are
* 0-cycle, since they're just a register fetch in the usual
* case. So, we alias the registers while we're still in
if (indirect_offset) {
emit_indirect_offset(ctx, indirect_offset);
- ins.load_store.unknown = 0x8700; /* xxx: what is this? */
+ ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */
} else {
- ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+ ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */
}
+ /* TODO respect index */
+
emit_mir_instruction(ctx, ins);
}
}
static void
-emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
+emit_varying_read(
+ compiler_context *ctx,
+ unsigned dest, unsigned offset,
+ unsigned nr_comp, unsigned component,
+ nir_src *indirect_offset, nir_alu_type type)
{
- /* First, pull out the destination */
- unsigned dest = nir_dest_index(ctx, &instr->dest);
+ /* XXX: Half-floats? */
+ /* TODO: swizzle, mask */
+
+ midgard_instruction ins = m_ld_vary_32(dest, offset);
+ ins.load_store.mask = mask_of(nr_comp);
+ ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
+
+ midgard_varying_parameter p = {
+ .is_varying = 1,
+ .interpolation = midgard_interp_default,
+ .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
+ };
+
+ unsigned u;
+ memcpy(&u, &p, sizeof(p));
+ ins.load_store.varying_parameters = u;
- /* Now, figure out which uniform this is */
- int sysval = midgard_nir_sysval_for_intrinsic(instr);
+ if (indirect_offset) {
+ /* We need to add in the dynamic index, moved to r27.w */
+ emit_indirect_offset(ctx, indirect_offset);
+ ins.load_store.unknown = 0x79e; /* xxx: what is this? */
+ } else {
+ /* Just a direct load */
+ ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
+ }
+
+ /* Use the type appropriate load */
+ switch (type) {
+ case nir_type_uint:
+ case nir_type_bool:
+ ins.load_store.op = midgard_op_ld_vary_32u;
+ break;
+ case nir_type_int:
+ ins.load_store.op = midgard_op_ld_vary_32i;
+ break;
+ case nir_type_float:
+ ins.load_store.op = midgard_op_ld_vary_32;
+ break;
+ default:
+ unreachable("Attempted to load unknown type");
+ break;
+ }
+
+ emit_mir_instruction(ctx, ins);
+}
+
+static void
+emit_sysval_read(compiler_context *ctx, nir_instr *instr)
+{
+ unsigned dest;
+ /* Figure out which uniform this is */
+ int sysval = sysval_for_instr(ctx, instr, &dest);
void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
/* Sysvals are prefix uniforms */
unsigned uniform = ((uintptr_t) val) - 1;
/* Emit the read itself -- this is never indirect */
- emit_uniform_read(ctx, dest, uniform, NULL);
+ emit_ubo_read(ctx, dest, uniform, NULL, 0);
}
/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
.inline_constant = true
},
.alu = {
- .op = midgard_alu_op_u2f,
+ .op = midgard_alu_op_u2f_rtz,
.reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_none,
.mask = 0xF,
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
- unsigned offset, reg;
+ unsigned offset = 0, reg;
switch (instr->intrinsic) {
case nir_intrinsic_discard_if:
}
case nir_intrinsic_load_uniform:
- case nir_intrinsic_load_input:
- offset = nir_intrinsic_base(instr);
+ case nir_intrinsic_load_ubo:
+ case nir_intrinsic_load_input: {
+ bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
+ bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
+
+ /* Get the base type of the intrinsic */
+ /* TODO: Infer type? Does it matter? */
+ nir_alu_type t =
+ is_ubo ? nir_type_uint : nir_intrinsic_type(instr);
+ t = nir_alu_type_get_base_type(t);
+
+ if (!is_ubo) {
+ offset = nir_intrinsic_base(instr);
+ }
unsigned nr_comp = nir_intrinsic_dest_components(instr);
- bool direct = nir_src_is_const(instr->src[0]);
- if (direct) {
- offset += nir_src_as_uint(instr->src[0]);
- }
+ nir_src *src_offset = nir_get_io_offset_src(instr);
+
+ bool direct = nir_src_is_const(*src_offset);
+
+ if (direct)
+ offset += nir_src_as_uint(*src_offset);
/* We may need to apply a fractional offset */
int component = instr->intrinsic == nir_intrinsic_load_input ?
nir_intrinsic_component(instr) : 0;
reg = nir_dest_index(ctx, &instr->dest);
- if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
- emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
- } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
- /* XXX: Half-floats? */
- /* TODO: swizzle, mask */
+ if (is_uniform && !ctx->is_blend) {
+ emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0);
+ } else if (is_ubo) {
+ nir_src index = instr->src[0];
- midgard_instruction ins = m_ld_vary_32(reg, offset);
- ins.load_store.mask = (1 << nr_comp) - 1;
- ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
+ /* We don't yet support indirect UBOs. For indirect
+ * block numbers (if that's possible), we don't know
+ * enough about the hardware yet. For indirect sources,
+ * we know what we need but we need to add some NIR
+ * support for lowering correctly with respect to
+ * 128-bit reads */
- midgard_varying_parameter p = {
- .is_varying = 1,
- .interpolation = midgard_interp_default,
- .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
- };
+ assert(nir_src_is_const(index));
+ assert(nir_src_is_const(*src_offset));
- unsigned u;
- memcpy(&u, &p, sizeof(p));
- ins.load_store.varying_parameters = u;
+ /* TODO: Alignment */
+ assert((offset & 0xF) == 0);
- if (direct) {
- /* We have the offset totally ready */
- ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
- } else {
- /* We have it partially ready, but we need to
- * add in the dynamic index, moved to r27.w */
- emit_indirect_offset(ctx, &instr->src[0]);
- ins.load_store.unknown = 0x79e; /* xxx: what is this? */
- }
-
- emit_mir_instruction(ctx, ins);
+ uint32_t uindex = nir_src_as_uint(index) + 1;
+ emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
+ } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
+ emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t);
} else if (ctx->is_blend) {
/* For blend shaders, load the input color, which is
* preloaded to r0 */
- midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
emit_mir_instruction(ctx, move);
} else if (ctx->stage == MESA_SHADER_VERTEX) {
midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
- ins.load_store.mask = (1 << nr_comp) - 1;
+ ins.load_store.mask = mask_of(nr_comp);
+
+ /* Use the type appropriate load */
+ switch (t) {
+ case nir_type_uint:
+ case nir_type_bool:
+ ins.load_store.op = midgard_op_ld_attr_32u;
+ break;
+ case nir_type_int:
+ ins.load_store.op = midgard_op_ld_attr_32i;
+ break;
+ case nir_type_float:
+ ins.load_store.op = midgard_op_ld_attr_32;
+ break;
+ default:
+ unreachable("Attempted to load unknown type");
+ break;
+ }
+
emit_mir_instruction(ctx, ins);
} else {
DBG("Unknown load\n");
}
break;
+ }
case nir_intrinsic_load_output:
assert(nir_src_is_const(instr->src[0]));
/* Blend constants are embedded directly in the shader and
* patched in, so we use some magic routing */
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
ins.has_constants = true;
ins.has_blend_constant = true;
emit_mir_instruction(ctx, ins);
* framebuffer writeout dance. TODO: Defer
* writes */
- midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
emit_mir_instruction(ctx, move);
/* Save the index we're writing to for later reference
* minus the base of 26. E.g. write into r27 and then
* call st_vary(1) */
- midgard_instruction ins = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
+ midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
emit_mir_instruction(ctx, ins);
- /* We should have been vectorized. That also lets us
- * ignore the mask. because the mask component on
- * st_vary is (as far as I can tell) ignored [the blob
- * sets it to zero] */
- assert(nir_intrinsic_component(instr) == 0);
+ /* We should have been vectorized, though we don't
+ * currently check that st_vary is emitted only once
+ * per slot (this is relevant, since there's not a mask
+ * parameter available on the store [set to 0 by the
+ * blob]). We do respect the component by adjusting the
+ * swizzle. */
+
+ unsigned component = nir_intrinsic_component(instr);
midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
+ st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
emit_mir_instruction(ctx, st);
} else {
DBG("Unknown store\n");
case nir_intrinsic_load_viewport_scale:
case nir_intrinsic_load_viewport_offset:
- emit_sysval_read(ctx, instr);
+ emit_sysval_read(ctx, &instr->instr);
break;
default:
midgard_tex_format(enum glsl_sampler_dim dim)
{
switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ case GLSL_SAMPLER_DIM_BUF:
+ return MALI_TEX_1D;
+
case GLSL_SAMPLER_DIM_2D:
case GLSL_SAMPLER_DIM_EXTERNAL:
- return TEXTURE_2D;
+ return MALI_TEX_2D;
case GLSL_SAMPLER_DIM_3D:
- return TEXTURE_3D;
+ return MALI_TEX_3D;
case GLSL_SAMPLER_DIM_CUBE:
- return TEXTURE_CUBE;
+ return MALI_TEX_CUBE;
default:
DBG("Unknown sampler dim type\n");
}
}
+/* Tries to attach an explicit LOD / bias as a constant. Returns whether this
+ * was successful */
+
+static bool
+pan_attach_constant_bias(
+ compiler_context *ctx,
+ nir_src lod,
+ midgard_texture_word *word)
+{
+ /* To attach as constant, it has to *be* constant */
+
+ if (!nir_src_is_const(lod))
+ return false;
+
+ float f = nir_src_as_float(lod);
+
+ /* Break into fixed-point */
+ signed lod_int = f;
+ float lod_frac = f - lod_int;
+
+ /* Carry over negative fractions */
+ if (lod_frac < 0.0) {
+ lod_int--;
+ lod_frac += 1.0;
+ }
+
+ /* Encode */
+ word->bias = float_to_ubyte(lod_frac);
+ word->bias_int = lod_int;
+
+ return true;
+}
+
+static enum mali_sampler_type
+midgard_sampler_type(nir_alu_type t)
+{
+ switch (nir_alu_type_get_base_type(t)) {
+ case nir_type_float:
+ return MALI_SAMPLER_FLOAT;
+ case nir_type_int:
+ return MALI_SAMPLER_SIGNED;
+ case nir_type_uint:
+ return MALI_SAMPLER_UNSIGNED;
+ default:
+ unreachable("Unknown sampler type");
+ }
+}
+
static void
-emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
+ unsigned midgard_texop)
{
/* TODO */
//assert (!instr->sampler);
//assert (!instr->texture_array_size);
- assert (instr->op == nir_texop_tex);
/* Allocate registers via a round robin scheme to alternate between the two registers */
int reg = ctx->texture_op_count & 1;
int texture_index = instr->texture_index;
int sampler_index = texture_index;
- for (unsigned i = 0; i < instr->num_srcs; ++i) {
- switch (instr->src[i].src_type) {
- case nir_tex_src_coord: {
- int index = nir_src_index(ctx, &instr->src[i].src);
+ /* No helper to build texture words -- we do it all here */
+ midgard_instruction ins = {
+ .type = TAG_TEXTURE_4,
+ .texture = {
+ .op = midgard_texop,
+ .format = midgard_tex_format(instr->sampler_dim),
+ .texture_handle = texture_index,
+ .sampler_handle = sampler_index,
- midgard_vector_alu_src alu_src = blank_alu_src;
+ /* TODO: Regalloc it in */
+ .swizzle = SWIZZLE_XYZW,
+ .mask = 0xF,
- int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
+ /* TODO: half */
+ .in_reg_full = 1,
+ .out_full = 1,
+
+ .sampler_type = midgard_sampler_type(instr->dest_type),
+ }
+ };
+
+ for (unsigned i = 0; i < instr->num_srcs; ++i) {
+ int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
+ int index = nir_src_index(ctx, &instr->src[i].src);
+ int nr_comp = nir_src_num_components(instr->src[i].src);
+ midgard_vector_alu_src alu_src = blank_alu_src;
+ switch (instr->src[i].src_type) {
+ case nir_tex_src_coord: {
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+ /* texelFetch is undefined on samplerCube */
+ assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
+
/* For cubemaps, we need to load coords into
* special r27, and then use a special ld/st op
- * to copy into the texture register */
+ * to select the face and copy the xy into the
+ * texture register */
alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
- midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
+ midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27));
emit_mir_instruction(ctx, move);
midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
- st.load_store.mask = 0x3; /* xy? */
+ st.load_store.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
+ ins.texture.in_reg_swizzle = swizzle_of(2);
} else {
- alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X);
-
- midgard_instruction ins = v_fmov(index, alu_src, reg);
- emit_mir_instruction(ctx, ins);
+ ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
+
+ midgard_instruction mov = v_mov(index, alu_src, reg);
+ mov.alu.mask = expand_writemask(mask_of(nr_comp));
+ emit_mir_instruction(ctx, mov);
+
+ if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
+ /* Texel fetch opcodes care about the
+ * values of z and w, so we actually
+ * need to spill into a second register
+ * for a texel fetch with register bias
+ * (for non-2D). TODO: Implement that
+ */
+
+ assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
+
+ midgard_instruction zero = v_mov(index, alu_src, reg);
+ zero.ssa_args.inline_constant = true;
+ zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ zero.has_constants = true;
+ zero.alu.mask = ~mov.alu.mask;
+ emit_mir_instruction(ctx, zero);
+
+ ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
+ } else {
+ /* Non-texel fetch doesn't need that
+ * nonsense. However we do use the Z
+ * for array indexing */
+ bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
+ ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
+ }
}
break;
}
- default: {
- DBG("Unknown source type\n");
- //assert(0);
- break;
- }
- }
- }
+ case nir_tex_src_bias:
+ case nir_tex_src_lod: {
+ /* Try as a constant if we can */
- /* No helper to build texture words -- we do it all here */
- midgard_instruction ins = {
- .type = TAG_TEXTURE_4,
- .texture = {
- .op = TEXTURE_OP_NORMAL,
- .format = midgard_tex_format(instr->sampler_dim),
- .texture_handle = texture_index,
- .sampler_handle = sampler_index,
+ bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
+ if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
+ break;
- /* TODO: Don't force xyzw */
- .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- .mask = 0xF,
+ /* Otherwise we use a register. To keep RA simple, we
+ * put the bias/LOD into the w component of the input
+ * source, which is otherwise in xy */
- /* TODO: half */
- //.in_reg_full = 1,
- .out_full = 1,
+ alu_src.swizzle = SWIZZLE_XXXX;
- .filter = 1,
+ midgard_instruction mov = v_mov(index, alu_src, reg);
+ mov.alu.mask = expand_writemask(1 << COMPONENT_W);
+ emit_mir_instruction(ctx, mov);
- /* Always 1 */
- .unknown7 = 1,
+ ins.texture.lod_register = true;
- /* Assume we can continue; hint it out later */
- .cont = 1,
+ midgard_tex_register_select sel = {
+ .select = in_reg,
+ .full = 1,
+
+ /* w */
+ .component_lo = 1,
+ .component_hi = 1
+ };
+
+ uint8_t packed;
+ memcpy(&packed, &sel, sizeof(packed));
+ ins.texture.bias = packed;
+
+ break;
+ };
+
+ default:
+ unreachable("Unknown texture source type\n");
}
- };
+ }
/* Set registers to read and write from the same place */
ins.texture.in_reg_select = in_reg;
ins.texture.out_reg_select = out_reg;
- /* TODO: Dynamic swizzle input selection, half-swizzles? */
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_3D) {
- ins.texture.in_reg_swizzle_right = COMPONENT_X;
- ins.texture.in_reg_swizzle_left = COMPONENT_Y;
- //ins.texture.in_reg_swizzle_third = COMPONENT_Z;
- } else {
- ins.texture.in_reg_swizzle_left = COMPONENT_X;
- ins.texture.in_reg_swizzle_right = COMPONENT_Y;
- //ins.texture.in_reg_swizzle_third = COMPONENT_X;
- }
-
emit_mir_instruction(ctx, ins);
/* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
ctx->texture_index[reg] = o_index;
- midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
+ midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
emit_mir_instruction(ctx, ins2);
/* Used for .cont and .last hinting */
ctx->texture_op_count++;
}
+static void
+emit_tex(compiler_context *ctx, nir_tex_instr *instr)
+{
+ /* Fixup op, since only textureLod is permitted in VS but NIR can give
+ * generic tex in some cases (which confuses the hardware) */
+
+ bool is_vertex = ctx->stage == MESA_SHADER_VERTEX;
+
+ if (is_vertex && instr->op == nir_texop_tex)
+ instr->op = nir_texop_txl;
+
+ switch (instr->op) {
+ case nir_texop_tex:
+ case nir_texop_txb:
+ emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
+ break;
+ case nir_texop_txl:
+ emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
+ break;
+ case nir_texop_txf:
+ emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
+ break;
+ case nir_texop_txs:
+ emit_sysval_read(ctx, &instr->instr);
+ break;
+ default:
+ unreachable("Unhanlded texture op");
+ }
+}
+
static void
emit_jump(compiler_context *ctx, nir_jump_instr *instr)
{
unsigned scratch = alu->ssa_args.dest;
if (entry) {
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
/* Force a break XXX Defer r31 writes */
return progress;
}
+/* Dead code elimination for branches at the end of a block - only one branch
+ * per block is legal semantically */
+
+static void
+midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
+{
+ bool branched = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (!midgard_is_branch_unit(ins->unit)) continue;
+
+ /* We ignore prepacked branches since the fragment epilogue is
+ * just generally special */
+ if (ins->prepacked_branch) continue;
+
+ /* Discards are similarly special and may not correspond to the
+ * end of a block */
+
+ if (ins->branch.target_type == TARGET_DISCARD) continue;
+
+ if (branched) {
+ /* We already branched, so this is dead */
+ mir_remove_instruction(ins);
+ }
+
+ branched = true;
+ }
+}
+
static bool
mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
{
return mir_nontrivial_mod(src2, is_int, mask);
}
+static bool
+mir_nontrivial_outmod(midgard_instruction *ins)
+{
+ bool is_int = midgard_is_integer_op(ins->alu.op);
+ unsigned mod = ins->alu.outmod;
+
+ if (is_int)
+ return mod != midgard_outmod_int_wrap;
+ else
+ return mod != midgard_outmod_none;
+}
+
static bool
midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
{
if (ins->has_constants) continue;
if (mir_nontrivial_source2_mod(ins)) continue;
- if (ins->alu.outmod != midgard_outmod_none) continue;
+ if (mir_nontrivial_outmod(ins)) continue;
/* We're clear -- rewrite */
mir_rewrite_index_src(ctx, to, from);
* the move can be propagated away entirely */
static bool
-mir_compose_outmod(midgard_outmod *outmod, midgard_outmod comp)
+mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
{
/* Nothing to do */
if (comp == midgard_outmod_none)
/* TODO: Registers? */
unsigned src = ins->ssa_args.src1;
if (src >= ctx->func->impl->ssa_alloc) continue;
+ assert(!mir_has_multiple_writes(ctx, src));
/* There might be a source modifier, too */
if (mir_nontrivial_source2_mod(ins)) continue;
if (v->type != TAG_ALU_4) continue;
if (v->ssa_args.dest != src) continue;
- midgard_outmod temp = v->alu.outmod;
- progress |= mir_compose_outmod(&temp, ins->alu.outmod);
+ /* Can we even take a float outmod? */
+ if (midgard_is_integer_out_op(v->alu.op)) continue;
+
+ midgard_outmod_float temp = v->alu.outmod;
+ progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
/* Throw in the towel.. */
if (!progress) break;
int mapped = base;
map_ssa_to_alias(ctx, &mapped);
- EMIT(fmov, mapped, blank_alu_src, base);
+ EMIT(mov, mapped, blank_alu_src, base);
}
}
void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
if (constant_value) {
- midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
emit_mir_instruction(ctx, ins);
}
emit_mir_instruction(ctx, scale);
- /* vadd.f2u8.pos.low hr0, hr48, #0 */
+ /* vadd.f2u_rte.pos.low hr0, hr48, #0 */
midgard_vector_alu_src alu_src = blank_alu_src;
alu_src.half = true;
- midgard_instruction f2u8 = {
+ midgard_instruction f2u_rte = {
.type = TAG_ALU_4,
.ssa_args = {
.src0 = SSA_FIXED_REGISTER(24),
.inline_constant = true
},
.alu = {
- .op = midgard_alu_op_f2u8,
+ .op = midgard_alu_op_f2u_rte,
.reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_lower,
.outmod = midgard_outmod_pos,
}
};
- emit_mir_instruction(ctx, f2u8);
-
- /* vmul.imov.quarter r0, r0, r0 */
-
- midgard_instruction imov_8 = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = SSA_UNUSED_1,
- .src1 = SSA_FIXED_REGISTER(0),
- .dest = SSA_FIXED_REGISTER(0),
- },
- .alu = {
- .op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_8,
- .dest_override = midgard_dest_override_none,
- .outmod = midgard_outmod_int,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(blank_alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- /* Emit branch epilogue with the 8-bit move as the source */
+ emit_mir_instruction(ctx, f2u_rte);
- emit_mir_instruction(ctx, imov_8);
EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
-
- emit_mir_instruction(ctx, imov_8);
EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
}
unsigned loc = var->data.driver_location;
unsigned sz = glsl_type_size(var->type, FALSE);
- for (int c = loc; c < (loc + sz); ++c) {
- program->varyings[c] = var->data.location;
- max_varying = MAX2(max_varying, c);
+ for (int c = 0; c < sz; ++c) {
+ program->varyings[loc + c] = var->data.location + c;
+ max_varying = MAX2(max_varying, loc + c);
}
}
}
} while (progress);
+ /* Nested control-flow can result in dead branches at the end of the
+ * block. This messes with our analysis and is just dead code, so cull
+ * them */
+ mir_foreach_block(ctx, block) {
+ midgard_opt_cull_dead_branch(ctx, block);
+ }
+
/* Schedule! */
schedule_program(ctx);
* last is an ALU, then it's also 1... */
mir_foreach_block(ctx, block) {
- util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
+ mir_foreach_bundle_in_block(block, bundle) {
int lookahead = 1;
if (current_bundle + 1 < bundle_count) {