static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
+ .mask = 0xF, \
.ssa_args = { \
.rname = ssa, \
.uname = -1, \
}, \
.load_store = { \
.op = midgard_op_##name, \
- .mask = 0xF, \
.swizzle = SWIZZLE_XYZW, \
.address = address \
} \
/* We need to set the conditional as close as possible */
.precede_break = true,
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = condition,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
.precede_break = true,
+ .mask = mask_of(nr_comp),
.ssa_args = {
.src0 = condition,
.src1 = condition,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = expand_writemask(mask_of(nr_comp)),
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = SSA_UNUSED_1,
.src1 = offset,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(zero_alu_src),
.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
},
#define ALU_CASE(nir, _op) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
+ assert(src_bitsize == dst_bitsize); \
break;
#define ALU_CASE_BCAST(nir, _op, count) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
broadcast_swizzle = count; \
+ assert(src_bitsize == dst_bitsize); \
break;
static bool
nir_is_fzero_constant(nir_src src)
bool half_1 = false, sext_1 = false;
bool half_2 = false, sext_2 = false;
+ unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+ unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+
switch (instr->op) {
ALU_CASE(fadd, fadd);
ALU_CASE(fmul, fmul);
ALU_CASE(i2f32, i2f_rtz);
ALU_CASE(u2f32, u2f_rtz);
+ ALU_CASE(f2i16, f2i_rtz);
+ ALU_CASE(f2u16, f2u_rtz);
+ ALU_CASE(i2f16, i2f_rtz);
+ ALU_CASE(u2f16, u2f_rtz);
+
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
case nir_op_u2u32: {
op = midgard_alu_op_imov;
- unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
- unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
-
-
if (dst_bitsize == (src_bitsize * 2)) {
/* Converting up */
half_2 = true;
break;
}
+ case nir_op_f2f16: {
+ assert(src_bitsize == 32);
+
+ op = midgard_alu_op_fmov;
+ dest_override = midgard_dest_override_lower;
+ break;
+ }
+
+ case nir_op_f2f32: {
+ assert(src_bitsize == 16);
+
+ op = midgard_alu_op_fmov;
+ half_2 = true;
+ reg_mode++;
+ break;
+ }
+
+
/* For greater-or-equal, we lower to less-or-equal and flip the
* arguments */
bool is_int = midgard_is_integer_op(op);
+ ins.mask = mask_of(nr_components);
+
midgard_vector_alu alu = {
.op = op,
.reg_mode = reg_mode,
.dest_override = dest_override,
.outmod = outmod,
- /* Writemask only valid for non-SSA NIR */
- .mask = expand_writemask(mask_of(nr_components)),
-
.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
.src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
if (!is_ssa)
- alu.mask &= expand_writemask(instr->dest.write_mask);
+ ins.mask &= instr->dest.write_mask;
ins.alu = alu;
uint8_t original_swizzle[4];
memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
+ unsigned orig_mask = ins.mask;
for (int i = 0; i < nr_components; ++i) {
/* Mask the associated component, dropping the
* instruction if needed */
- ins.alu.mask = (0x3) << (2 * i);
- ins.alu.mask &= alu.mask;
+ ins.mask = 1 << i;
+ ins.mask &= orig_mask;
- if (!ins.alu.mask)
+ if (!ins.mask)
continue;
for (int j = 0; j < 4; ++j)
/* TODO: swizzle, mask */
midgard_instruction ins = m_ld_vary_32(dest, offset);
- ins.load_store.mask = mask_of(nr_comp);
+ ins.mask = mask_of(nr_comp);
ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
midgard_varying_parameter p = {
emit_ubo_read(ctx, dest, uniform, NULL, 0);
}
-/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
- * using scalar ops functional on earlier Midgard generations. Newer Midgard
- * generations have faster vectorized reads. This operation is for blend
- * shaders in particular; reading the tilebuffer from the fragment shader
- * remains an open problem. */
-
-static void
-emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
-{
- midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
- ins.load_store.swizzle = 0; /* xxxx */
-
- /* Read each component sequentially */
-
- for (unsigned c = 0; c < 4; ++c) {
- ins.load_store.mask = (1 << c);
- ins.load_store.unknown = c;
- emit_mir_instruction(ctx, ins);
- }
-}
-
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
} else if (ctx->stage == MESA_SHADER_VERTEX) {
midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
- ins.load_store.mask = mask_of(nr_comp);
+ ins.mask = mask_of(nr_comp);
/* Use the type appropriate load */
switch (t) {
break;
}
- /* Reads off the tilebuffer during blending, tasty */
+ /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
+
case nir_intrinsic_load_raw_output_pan:
reg = nir_dest_index(ctx, &instr->dest);
assert(ctx->is_blend);
- emit_fb_read_blend_scalar(ctx, reg);
+
+ midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
+ emit_mir_instruction(ctx, ins);
break;
case nir_intrinsic_load_blend_const_color_rgba: {
/* No helper to build texture words -- we do it all here */
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
+ .mask = 0xF,
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
/* TODO: Regalloc it in */
.swizzle = SWIZZLE_XYZW,
- .mask = 0xF,
/* TODO: half */
.in_reg_full = 1,
midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
- st.load_store.mask = 0x3; /* xy */
+ st.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.alu.mask = expand_writemask(mask_of(nr_comp));
+ mov.mask = mask_of(nr_comp);
emit_mir_instruction(ctx, mov);
if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
zero.ssa_args.inline_constant = true;
zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
zero.has_constants = true;
- zero.alu.mask = ~mov.alu.mask;
+ zero.mask = ~mov.mask;
emit_mir_instruction(ctx, zero);
ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
alu_src.swizzle = SWIZZLE_XXXX;
midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.alu.mask = expand_writemask(1 << COMPONENT_W);
+ mov.mask = 1 << COMPONENT_W;
emit_mir_instruction(ctx, mov);
ins.texture.lod_register = true;
emit_mir_instruction(ctx, ins);
- /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
-
int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
- alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
- ctx->texture_index[reg] = o_index;
-
midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
emit_mir_instruction(ctx, ins2);
/* Blend constants must not be inlined by definition */
if (ins->has_blend_constant) continue;
+ /* We can inline 32-bit (sometimes) or 16-bit (usually) */
+ bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
+ bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
+
+ if (!(is_16 || is_32))
+ continue;
+
/* src1 cannot be an inline constant due to encoding
* restrictions. So, if possible we try to flip the arguments
* in that case */
/* Scale constant appropriately, if we can legally */
uint16_t scaled_constant = 0;
- if (midgard_is_integer_op(op)) {
+ if (midgard_is_integer_op(op) || is_16) {
unsigned int *iconstants = (unsigned int *) ins->constants;
scaled_constant = (uint16_t) iconstants[component];
uint32_t value = cons[component];
bool is_vector = false;
- unsigned mask = effective_writemask(&ins->alu);
+ unsigned mask = effective_writemask(&ins->alu, ins->mask);
for (int c = 1; c < 4; ++c) {
/* We only care if this component is actually used */
static bool
mir_nontrivial_source2_mod(midgard_instruction *ins)
{
- unsigned mask = squeeze_writemask(ins->alu.mask);
bool is_int = midgard_is_integer_op(ins->alu.op);
midgard_vector_alu_src src2 =
vector_alu_from_unsigned(ins->alu.src2);
- return mir_nontrivial_mod(src2, is_int, mask);
+ return mir_nontrivial_mod(src2, is_int, ins->mask);
}
static bool
return progress;
}
-static bool
-midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
-{
- bool progress = false;
-
- mir_foreach_instr_in_block_safe(block, ins) {
- if (ins->type != TAG_ALU_4) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
-
- unsigned from = ins->ssa_args.src1;
- unsigned to = ins->ssa_args.dest;
-
- /* Make sure it's simple enough for us to handle */
-
- if (from >= SSA_FIXED_MINIMUM) continue;
- if (from >= ctx->func->impl->ssa_alloc) continue;
- if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
- if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
-
- bool eliminated = false;
-
- mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
- /* The texture registers are not SSA so be careful.
- * Conservatively, just stop if we hit a texture op
- * (even if it may not write) to where we are */
-
- if (v->type != TAG_ALU_4)
- break;
-
- if (v->ssa_args.dest == from) {
- /* We don't want to track partial writes ... */
- if (v->alu.mask == 0xF) {
- v->ssa_args.dest = to;
- eliminated = true;
- }
-
- break;
- }
- }
-
- if (eliminated)
- mir_remove_instruction(ins);
-
- progress |= eliminated;
- }
-
- return progress;
-}
-
/* The following passes reorder MIR instructions to enable better scheduling */
static void
.stage = nir->info.stage,
.is_blend = is_blend,
- .blend_constant_offset = -1,
+ .blend_constant_offset = 0,
.alpha_ref = program->alpha_ref
};
mir_foreach_block(ctx, block) {
progress |= midgard_opt_pos_propagate(ctx, block);
progress |= midgard_opt_copy_prop(ctx, block);
- progress |= midgard_opt_copy_prop_tex(ctx, block);
progress |= midgard_opt_dead_code_eliminate(ctx, block);
}
} while (progress);