static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
+ .mask = 0xF, \
.ssa_args = { \
.rname = ssa, \
.uname = -1, \
}, \
.load_store = { \
.op = midgard_op_##name, \
- .mask = 0xF, \
.swizzle = SWIZZLE_XYZW, \
.address = address \
} \
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count)
+vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
+ bool half, bool sext)
{
if (!src) return blank_alu_src;
midgard_vector_alu_src alu_src = {
.rep_low = 0,
.rep_high = 0,
- .half = 0, /* TODO */
+ .half = half,
.swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
};
if (is_int) {
- /* TODO: sign-extend/zero-extend */
alu_src.mod = midgard_int_normal;
+ /* Sign/zero-extend if needed */
+
+ if (half) {
+ alu_src.mod = sext ?
+ midgard_int_sign_extend
+ : midgard_int_zero_extend;
+ }
+
/* These should have been lowered away */
assert(!(src->abs || src->negate));
} else {
/* We need to set the conditional as close as possible */
.precede_break = true,
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = condition,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
.precede_break = true,
+ .mask = mask_of(nr_comp),
.ssa_args = {
.src0 = condition,
.src1 = condition,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = expand_writemask(mask_of(nr_comp)),
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = SSA_UNUSED_1,
.src1 = offset,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(zero_alu_src),
.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
},
#define ALU_CASE(nir, _op) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
+ assert(src_bitsize == dst_bitsize); \
break;
#define ALU_CASE_BCAST(nir, _op, count) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
broadcast_swizzle = count; \
+ assert(src_bitsize == dst_bitsize); \
break;
static bool
nir_is_fzero_constant(nir_src src)
return true;
}
-/* Analyze the sizes of inputs/outputs to determine which reg mode to run an
- * ALU instruction in. Right now, we just consider the size of the first
- * argument. This will fail for upconverting, for instance (TODO) */
+/* Analyze the sizes of the inputs to determine which reg mode. Ops needed
+ * special treatment override this anyway. */
static midgard_reg_mode
reg_mode_for_nir(nir_alu_instr *instr)
unsigned broadcast_swizzle = 0;
+ /* What register mode should we operate in? */
+ midgard_reg_mode reg_mode =
+ reg_mode_for_nir(instr);
+
/* Do we need a destination override? Used for inline
* type conversion */
midgard_dest_override dest_override =
midgard_dest_override_none;
+ /* Should we use a smaller respective source and sign-extend? */
+
+ bool half_1 = false, sext_1 = false;
+ bool half_2 = false, sext_2 = false;
+
+ unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+ unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+
switch (instr->op) {
ALU_CASE(fadd, fadd);
ALU_CASE(fmul, fmul);
ALU_CASE(i2f32, i2f_rtz);
ALU_CASE(u2f32, u2f_rtz);
+ ALU_CASE(f2i16, f2i_rtz);
+ ALU_CASE(f2u16, f2u_rtz);
+ ALU_CASE(i2f16, i2f_rtz);
+ ALU_CASE(u2f16, u2f_rtz);
+
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
/* For size conversion, we use a move. Ideally though we would squash
* these ops together; maybe that has to happen after in NIR as part of
* propagation...? An earlier algebraic pass ensured we step down by
- * only / exactly one size */
+ * only / exactly one size. If stepping down, we use a dest override to
+ * reduce the size; if stepping up, we use a larger-sized move with a
+ * half source and a sign/zero-extension modifier */
+ case nir_op_i2i8:
+ case nir_op_i2i16:
+ case nir_op_i2i32:
+ /* If we end up upscale, we'll need a sign-extend on the
+ * operand (the second argument) */
+
+ sext_2 = true;
case nir_op_u2u8:
case nir_op_u2u16:
- case nir_op_i2i8:
- case nir_op_i2i16: {
+ case nir_op_u2u32: {
op = midgard_alu_op_imov;
+
+ if (dst_bitsize == (src_bitsize * 2)) {
+ /* Converting up */
+ half_2 = true;
+
+ /* Use a greater register mode */
+ reg_mode++;
+ } else if (src_bitsize == (dst_bitsize * 2)) {
+ /* Converting down */
+ dest_override = midgard_dest_override_lower;
+ }
+
+ break;
+ }
+
+ case nir_op_f2f16: {
+ assert(src_bitsize == 32);
+
+ op = midgard_alu_op_fmov;
dest_override = midgard_dest_override_lower;
break;
}
+ case nir_op_f2f32: {
+ assert(src_bitsize == 16);
+
+ op = midgard_alu_op_fmov;
+ half_2 = true;
+ reg_mode++;
+ break;
+ }
+
+
/* For greater-or-equal, we lower to less-or-equal and flip the
* arguments */
bool is_int = midgard_is_integer_op(op);
+ ins.mask = mask_of(nr_components);
+
midgard_vector_alu alu = {
.op = op,
- .reg_mode = reg_mode_for_nir(instr),
+ .reg_mode = reg_mode,
.dest_override = dest_override,
.outmod = outmod,
- /* Writemask only valid for non-SSA NIR */
- .mask = expand_writemask(mask_of(nr_components)),
-
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
if (!is_ssa)
- alu.mask &= expand_writemask(instr->dest.write_mask);
+ ins.mask &= instr->dest.write_mask;
ins.alu = alu;
uint8_t original_swizzle[4];
memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
+ unsigned orig_mask = ins.mask;
for (int i = 0; i < nr_components; ++i) {
/* Mask the associated component, dropping the
* instruction if needed */
- ins.alu.mask = (0x3) << (2 * i);
- ins.alu.mask &= alu.mask;
+ ins.mask = 1 << i;
+ ins.mask &= orig_mask;
- if (!ins.alu.mask)
+ if (!ins.mask)
continue;
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, false));
emit_mir_instruction(ctx, ins);
}
} else {
/* TODO: swizzle, mask */
midgard_instruction ins = m_ld_vary_32(dest, offset);
- ins.load_store.mask = mask_of(nr_comp);
+ ins.mask = mask_of(nr_comp);
ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
midgard_varying_parameter p = {
emit_ubo_read(ctx, dest, uniform, NULL, 0);
}
-/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
- * using scalar ops functional on earlier Midgard generations. Newer Midgard
- * generations have faster vectorized reads. This operation is for blend
- * shaders in particular; reading the tilebuffer from the fragment shader
- * remains an open problem. */
-
-static void
-emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
-{
- midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
- ins.load_store.swizzle = 0; /* xxxx */
-
- /* Read each component sequentially */
-
- for (unsigned c = 0; c < 4; ++c) {
- ins.load_store.mask = (1 << c);
- ins.load_store.unknown = c;
- emit_mir_instruction(ctx, ins);
- }
-
- /* vadd.u2f hr2, zext(hr2), #0 */
-
- midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.mod = midgard_int_zero_extend;
- alu_src.half = true;
-
- midgard_instruction u2f = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = reg,
- .src1 = SSA_UNUSED_0,
- .dest = reg,
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_u2f_rtz,
- .reg_mode = midgard_reg_mode_16,
- .dest_override = midgard_dest_override_none,
- .mask = 0xF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, u2f);
-
- /* vmul.fmul.sat r1, hr2, #0.00392151 */
-
- alu_src.mod = 0;
-
- midgard_instruction fmul = {
- .type = TAG_ALU_4,
- .inline_constant = _mesa_float_to_half(1.0 / 255.0),
- .ssa_args = {
- .src0 = reg,
- .dest = reg,
- .src1 = SSA_UNUSED_0,
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_none,
- .outmod = midgard_outmod_sat,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, fmul);
-}
-
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
} else if (ctx->stage == MESA_SHADER_VERTEX) {
midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
- ins.load_store.mask = mask_of(nr_comp);
+ ins.mask = mask_of(nr_comp);
/* Use the type appropriate load */
switch (t) {
break;
}
- /* Reads off the tilebuffer during blending, tasty */
+ /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
+
case nir_intrinsic_load_raw_output_pan:
reg = nir_dest_index(ctx, &instr->dest);
assert(ctx->is_blend);
- emit_fb_read_blend_scalar(ctx, reg);
+
+ midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
+ emit_mir_instruction(ctx, ins);
break;
case nir_intrinsic_load_blend_const_color_rgba: {
/* No helper to build texture words -- we do it all here */
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
+ .mask = 0xF,
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
/* TODO: Regalloc it in */
.swizzle = SWIZZLE_XYZW,
- .mask = 0xF,
/* TODO: half */
.in_reg_full = 1,
midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
- st.load_store.mask = 0x3; /* xy */
+ st.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.alu.mask = expand_writemask(mask_of(nr_comp));
+ mov.mask = mask_of(nr_comp);
emit_mir_instruction(ctx, mov);
if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
zero.ssa_args.inline_constant = true;
zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
zero.has_constants = true;
- zero.alu.mask = ~mov.alu.mask;
+ zero.mask = ~mov.mask;
emit_mir_instruction(ctx, zero);
ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
alu_src.swizzle = SWIZZLE_XXXX;
midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.alu.mask = expand_writemask(1 << COMPONENT_W);
+ mov.mask = 1 << COMPONENT_W;
emit_mir_instruction(ctx, mov);
ins.texture.lod_register = true;
uint32_t value = cons[component];
bool is_vector = false;
- unsigned mask = effective_writemask(&ins->alu);
+ unsigned mask = effective_writemask(&ins->alu, ins->mask);
for (int c = 1; c < 4; ++c) {
/* We only care if this component is actually used */
/* abs or neg */
if (!is_int && src.mod) return true;
+ /* Other int mods don't matter in isolation */
+ if (is_int && src.mod == midgard_int_shift) return true;
+
+ /* size-conversion */
+ if (src.half) return true;
+
/* swizzle */
for (unsigned c = 0; c < 4; ++c) {
if (!(mask & (1 << c))) continue;
static bool
mir_nontrivial_source2_mod(midgard_instruction *ins)
{
- unsigned mask = squeeze_writemask(ins->alu.mask);
bool is_int = midgard_is_integer_op(ins->alu.op);
midgard_vector_alu_src src2 =
vector_alu_from_unsigned(ins->alu.src2);
- return mir_nontrivial_mod(src2, is_int, mask);
+ return mir_nontrivial_mod(src2, is_int, ins->mask);
}
static bool
if (v->ssa_args.dest == from) {
/* We don't want to track partial writes ... */
- if (v->alu.mask == 0xF) {
+ if (v->mask == 0xF) {
v->ssa_args.dest = to;
eliminated = true;
}