#include "main/imports.h"
#include "compiler/nir/nir_builder.h"
#include "util/half_float.h"
+#include "util/u_math.h"
#include "util/u_debug.h"
#include "util/u_dynarray.h"
#include "util/list.h"
* driver seems to do it that way */
#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define SWIZZLE_XXXX SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X)
-#define SWIZZLE_XYXX SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X)
-#define SWIZZLE_XYZX SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X)
-#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
-#define SWIZZLE_XYZZ SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_Z)
-#define SWIZZLE_XYXZ SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_Z)
-#define SWIZZLE_WWWW SWIZZLE(COMPONENT_W, COMPONENT_W, COMPONENT_W, COMPONENT_W)
-
-static inline unsigned
-swizzle_of(unsigned comp)
-{
- switch (comp) {
- case 1:
- return SWIZZLE_XXXX;
- case 2:
- return SWIZZLE_XYXX;
- case 3:
- return SWIZZLE_XYZX;
- case 4:
- return SWIZZLE_XYZW;
- default:
- unreachable("Invalid component count");
- }
-}
-
-static inline unsigned
-mask_of(unsigned nr_comp)
-{
- return (1 << nr_comp) - 1;
-}
#define M_LOAD_STORE(name, rname, uname) \
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
+ .mask = 0xF, \
.ssa_args = { \
.rname = ssa, \
.uname = -1, \
}, \
.load_store = { \
.op = midgard_op_##name, \
- .mask = 0xF, \
.swizzle = SWIZZLE_XYZW, \
.address = address \
} \
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count)
+vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
+ bool half, bool sext)
{
if (!src) return blank_alu_src;
midgard_vector_alu_src alu_src = {
.rep_low = 0,
.rep_high = 0,
- .half = 0, /* TODO */
+ .half = half,
.swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
};
if (is_int) {
- /* TODO: sign-extend/zero-extend */
alu_src.mod = midgard_int_normal;
+ /* Sign/zero-extend if needed */
+
+ if (half) {
+ alu_src.mod = sext ?
+ midgard_int_sign_extend
+ : midgard_int_zero_extend;
+ }
+
/* These should have been lowered away */
assert(!(src->abs || src->negate));
} else {
/* We need to set the conditional as close as possible */
.precede_break = true,
.unit = for_branch ? UNIT_SMUL : UNIT_SADD,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = condition,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
.precede_break = true,
+ .mask = mask_of(nr_comp),
.ssa_args = {
.src0 = condition,
.src1 = condition,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = expand_writemask(mask_of(nr_comp)),
.src1 = vector_alu_srco_unsigned(alu_src),
.src2 = vector_alu_srco_unsigned(alu_src)
},
midgard_instruction ins = {
.type = TAG_ALU_4,
+ .mask = 1 << COMPONENT_W,
.ssa_args = {
.src0 = SSA_UNUSED_1,
.src1 = offset,
.outmod = midgard_outmod_int_wrap,
.reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
- .mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(zero_alu_src),
.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
},
#define ALU_CASE(nir, _op) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
+ assert(src_bitsize == dst_bitsize); \
break;
#define ALU_CASE_BCAST(nir, _op, count) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
broadcast_swizzle = count; \
+ assert(src_bitsize == dst_bitsize); \
break;
static bool
nir_is_fzero_constant(nir_src src)
return true;
}
+/* Analyze the sizes of the inputs to determine which reg mode. Ops needed
+ * special treatment override this anyway. */
+
+static midgard_reg_mode
+reg_mode_for_nir(nir_alu_instr *instr)
+{
+ unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+
+ switch (src_bitsize) {
+ case 8:
+ return midgard_reg_mode_8;
+ case 16:
+ return midgard_reg_mode_16;
+ case 32:
+ return midgard_reg_mode_32;
+ case 64:
+ return midgard_reg_mode_64;
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
{
bool is_ssa = instr->dest.dest.is_ssa;
unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
- unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components;
+ unsigned nr_components = nir_dest_num_components(instr->dest.dest);
unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
/* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
unsigned broadcast_swizzle = 0;
+ /* What register mode should we operate in? */
+ midgard_reg_mode reg_mode =
+ reg_mode_for_nir(instr);
+
+ /* Do we need a destination override? Used for inline
+ * type conversion */
+
+ midgard_dest_override dest_override =
+ midgard_dest_override_none;
+
+ /* Should we use a smaller respective source and sign-extend? */
+
+ bool half_1 = false, sext_1 = false;
+ bool half_2 = false, sext_2 = false;
+
+ unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+ unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+
switch (instr->op) {
ALU_CASE(fadd, fadd);
ALU_CASE(fmul, fmul);
ALU_CASE(i2f32, i2f_rtz);
ALU_CASE(u2f32, u2f_rtz);
+ ALU_CASE(f2i16, f2i_rtz);
+ ALU_CASE(f2u16, f2u_rtz);
+ ALU_CASE(i2f16, i2f_rtz);
+ ALU_CASE(u2f16, u2f_rtz);
+
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
ALU_CASE(fneg, fmov);
ALU_CASE(fsat, fmov);
+ /* For size conversion, we use a move. Ideally though we would squash
+ * these ops together; maybe that has to happen after in NIR as part of
+ * propagation...? An earlier algebraic pass ensured we step down by
+ * only / exactly one size. If stepping down, we use a dest override to
+ * reduce the size; if stepping up, we use a larger-sized move with a
+ * half source and a sign/zero-extension modifier */
+
+ case nir_op_i2i8:
+ case nir_op_i2i16:
+ case nir_op_i2i32:
+ /* If we end up upscale, we'll need a sign-extend on the
+ * operand (the second argument) */
+
+ sext_2 = true;
+ case nir_op_u2u8:
+ case nir_op_u2u16:
+ case nir_op_u2u32: {
+ op = midgard_alu_op_imov;
+
+ if (dst_bitsize == (src_bitsize * 2)) {
+ /* Converting up */
+ half_2 = true;
+
+ /* Use a greater register mode */
+ reg_mode++;
+ } else if (src_bitsize == (dst_bitsize * 2)) {
+ /* Converting down */
+ dest_override = midgard_dest_override_lower;
+ }
+
+ break;
+ }
+
+ case nir_op_f2f16: {
+ assert(src_bitsize == 32);
+
+ op = midgard_alu_op_fmov;
+ dest_override = midgard_dest_override_lower;
+ break;
+ }
+
+ case nir_op_f2f32: {
+ assert(src_bitsize == 16);
+
+ op = midgard_alu_op_fmov;
+ half_2 = true;
+ reg_mode++;
+ break;
+ }
+
+
/* For greater-or-equal, we lower to less-or-equal and flip the
* arguments */
bool is_int = midgard_is_integer_op(op);
+ ins.mask = mask_of(nr_components);
+
midgard_vector_alu alu = {
.op = op,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_none,
+ .reg_mode = reg_mode,
+ .dest_override = dest_override,
.outmod = outmod,
- /* Writemask only valid for non-SSA NIR */
- .mask = expand_writemask(mask_of(nr_components)),
-
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
if (!is_ssa)
- alu.mask &= expand_writemask(instr->dest.write_mask);
+ ins.mask &= instr->dest.write_mask;
ins.alu = alu;
uint8_t original_swizzle[4];
memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
+ unsigned orig_mask = ins.mask;
for (int i = 0; i < nr_components; ++i) {
/* Mask the associated component, dropping the
* instruction if needed */
- ins.alu.mask = (0x3) << (2 * i);
- ins.alu.mask &= alu.mask;
+ ins.mask = 1 << i;
+ ins.mask &= orig_mask;
- if (!ins.alu.mask)
+ if (!ins.mask)
continue;
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, false));
emit_mir_instruction(ctx, ins);
}
} else {
compiler_context *ctx,
unsigned dest, unsigned offset,
unsigned nr_comp, unsigned component,
- nir_src *indirect_offset)
+ nir_src *indirect_offset, nir_alu_type type)
{
/* XXX: Half-floats? */
/* TODO: swizzle, mask */
midgard_instruction ins = m_ld_vary_32(dest, offset);
- ins.load_store.mask = mask_of(nr_comp);
+ ins.mask = mask_of(nr_comp);
ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
midgard_varying_parameter p = {
ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
}
+ /* Use the type appropriate load */
+ switch (type) {
+ case nir_type_uint:
+ case nir_type_bool:
+ ins.load_store.op = midgard_op_ld_vary_32u;
+ break;
+ case nir_type_int:
+ ins.load_store.op = midgard_op_ld_vary_32i;
+ break;
+ case nir_type_float:
+ ins.load_store.op = midgard_op_ld_vary_32;
+ break;
+ default:
+ unreachable("Attempted to load unknown type");
+ break;
+ }
+
emit_mir_instruction(ctx, ins);
}
emit_ubo_read(ctx, dest, uniform, NULL, 0);
}
-/* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
- * using scalar ops functional on earlier Midgard generations. Newer Midgard
- * generations have faster vectorized reads. This operation is for blend
- * shaders in particular; reading the tilebuffer from the fragment shader
- * remains an open problem. */
-
-static void
-emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
-{
- midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
- ins.load_store.swizzle = 0; /* xxxx */
-
- /* Read each component sequentially */
-
- for (unsigned c = 0; c < 4; ++c) {
- ins.load_store.mask = (1 << c);
- ins.load_store.unknown = c;
- emit_mir_instruction(ctx, ins);
- }
-
- /* vadd.u2f hr2, zext(hr2), #0 */
-
- midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.mod = midgard_int_zero_extend;
- alu_src.half = true;
-
- midgard_instruction u2f = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = reg,
- .src1 = SSA_UNUSED_0,
- .dest = reg,
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_u2f_rtz,
- .reg_mode = midgard_reg_mode_16,
- .dest_override = midgard_dest_override_none,
- .mask = 0xF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, u2f);
-
- /* vmul.fmul.sat r1, hr2, #0.00392151 */
-
- alu_src.mod = 0;
-
- midgard_instruction fmul = {
- .type = TAG_ALU_4,
- .inline_constant = _mesa_float_to_half(1.0 / 255.0),
- .ssa_args = {
- .src0 = reg,
- .dest = reg,
- .src1 = SSA_UNUSED_0,
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_none,
- .outmod = midgard_outmod_sat,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, fmul);
-}
-
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
+ /* Get the base type of the intrinsic */
+ /* TODO: Infer type? Does it matter? */
+ nir_alu_type t =
+ is_ubo ? nir_type_uint : nir_intrinsic_type(instr);
+ t = nir_alu_type_get_base_type(t);
+
if (!is_ubo) {
offset = nir_intrinsic_base(instr);
}
uint32_t uindex = nir_src_as_uint(index) + 1;
emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
- emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL);
+ emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t);
} else if (ctx->is_blend) {
/* For blend shaders, load the input color, which is
* preloaded to r0 */
} else if (ctx->stage == MESA_SHADER_VERTEX) {
midgard_instruction ins = m_ld_attr_32(reg, offset);
ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
- ins.load_store.mask = mask_of(nr_comp);
+ ins.mask = mask_of(nr_comp);
+
+ /* Use the type appropriate load */
+ switch (t) {
+ case nir_type_uint:
+ case nir_type_bool:
+ ins.load_store.op = midgard_op_ld_attr_32u;
+ break;
+ case nir_type_int:
+ ins.load_store.op = midgard_op_ld_attr_32i;
+ break;
+ case nir_type_float:
+ ins.load_store.op = midgard_op_ld_attr_32;
+ break;
+ default:
+ unreachable("Attempted to load unknown type");
+ break;
+ }
+
emit_mir_instruction(ctx, ins);
} else {
DBG("Unknown load\n");
break;
}
- case nir_intrinsic_load_output:
- assert(nir_src_is_const(instr->src[0]));
- reg = nir_dest_index(ctx, &instr->dest);
+ /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
- if (ctx->is_blend) {
- /* TODO: MRT */
- emit_fb_read_blend_scalar(ctx, reg);
- } else {
- DBG("Unknown output load\n");
- assert(0);
- }
+ case nir_intrinsic_load_raw_output_pan:
+ reg = nir_dest_index(ctx, &instr->dest);
+ assert(ctx->is_blend);
+ midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
+ emit_mir_instruction(ctx, ins);
break;
case nir_intrinsic_load_blend_const_color_rgba: {
break;
+ /* Special case of store_output for lowered blend shaders */
+ case nir_intrinsic_store_raw_output_pan:
+ assert (ctx->stage == MESA_SHADER_FRAGMENT);
+ reg = nir_src_index(ctx, &instr->src[0]);
+
+ midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ emit_mir_instruction(ctx, move);
+ ctx->fragment_output = reg;
+
+ break;
+
case nir_intrinsic_load_alpha_ref_float:
assert(instr->dest.is_ssa);
}
}
+/* Tries to attach an explicit LOD / bias as a constant. Returns whether this
+ * was successful */
+
+static bool
+pan_attach_constant_bias(
+ compiler_context *ctx,
+ nir_src lod,
+ midgard_texture_word *word)
+{
+ /* To attach as constant, it has to *be* constant */
+
+ if (!nir_src_is_const(lod))
+ return false;
+
+ float f = nir_src_as_float(lod);
+
+ /* Break into fixed-point */
+ signed lod_int = f;
+ float lod_frac = f - lod_int;
+
+ /* Carry over negative fractions */
+ if (lod_frac < 0.0) {
+ lod_int--;
+ lod_frac += 1.0;
+ }
+
+ /* Encode */
+ word->bias = float_to_ubyte(lod_frac);
+ word->bias_int = lod_int;
+
+ return true;
+}
+
+static enum mali_sampler_type
+midgard_sampler_type(nir_alu_type t)
+{
+ switch (nir_alu_type_get_base_type(t)) {
+ case nir_type_float:
+ return MALI_SAMPLER_FLOAT;
+ case nir_type_int:
+ return MALI_SAMPLER_SIGNED;
+ case nir_type_uint:
+ return MALI_SAMPLER_UNSIGNED;
+ default:
+ unreachable("Unknown sampler type");
+ }
+}
+
static void
emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
unsigned midgard_texop)
int texture_index = instr->texture_index;
int sampler_index = texture_index;
- unsigned position_swizzle = 0;
+ /* No helper to build texture words -- we do it all here */
+ midgard_instruction ins = {
+ .type = TAG_TEXTURE_4,
+ .mask = 0xF,
+ .texture = {
+ .op = midgard_texop,
+ .format = midgard_tex_format(instr->sampler_dim),
+ .texture_handle = texture_index,
+ .sampler_handle = sampler_index,
+
+ /* TODO: Regalloc it in */
+ .swizzle = SWIZZLE_XYZW,
+
+ /* TODO: half */
+ .in_reg_full = 1,
+ .out_full = 1,
+
+ .sampler_type = midgard_sampler_type(instr->dest_type),
+ }
+ };
for (unsigned i = 0; i < instr->num_srcs; ++i) {
int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
switch (instr->src[i].src_type) {
case nir_tex_src_coord: {
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+ /* texelFetch is undefined on samplerCube */
+ assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
+
/* For cubemaps, we need to load coords into
* special r27, and then use a special ld/st op
* to select the face and copy the xy into the
midgard_instruction st = m_st_cubemap_coords(reg, 0);
st.load_store.unknown = 0x24; /* XXX: What is this? */
- st.load_store.mask = 0x3; /* xy */
+ st.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
- position_swizzle = swizzle_of(2);
+ ins.texture.in_reg_swizzle = swizzle_of(2);
} else {
- position_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
+ ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
- midgard_instruction ins = v_mov(index, alu_src, reg);
- ins.alu.mask = expand_writemask(mask_of(nr_comp));
- emit_mir_instruction(ctx, ins);
+ midgard_instruction mov = v_mov(index, alu_src, reg);
+ mov.mask = mask_of(nr_comp);
+ emit_mir_instruction(ctx, mov);
if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
/* Texel fetch opcodes care about the
zero.ssa_args.inline_constant = true;
zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
zero.has_constants = true;
- zero.alu.mask = ~ins.alu.mask;
+ zero.mask = ~mov.mask;
emit_mir_instruction(ctx, zero);
- position_swizzle = SWIZZLE_XYZZ;
+ ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
} else {
- /* To the hardware, z is depth, w is array
- * layer. To NIR, z is array layer for a 2D
- * array */
-
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D)
- position_swizzle = SWIZZLE_XYXZ;
+ /* Non-texel fetch doesn't need that
+ * nonsense. However we do use the Z
+ * for array indexing */
+ bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
+ ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
}
}
case nir_tex_src_bias:
case nir_tex_src_lod: {
- /* To keep RA simple, we put the bias/LOD into the w
- * component of the input source, which is otherwise in xy */
+ /* Try as a constant if we can */
+
+ bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
+ if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
+ break;
+
+ /* Otherwise we use a register. To keep RA simple, we
+ * put the bias/LOD into the w component of the input
+ * source, which is otherwise in xy */
alu_src.swizzle = SWIZZLE_XXXX;
- midgard_instruction ins = v_mov(index, alu_src, reg);
- ins.alu.mask = expand_writemask(1 << COMPONENT_W);
- emit_mir_instruction(ctx, ins);
+ midgard_instruction mov = v_mov(index, alu_src, reg);
+ mov.mask = 1 << COMPONENT_W;
+ emit_mir_instruction(ctx, mov);
+
+ ins.texture.lod_register = true;
+
+ midgard_tex_register_select sel = {
+ .select = in_reg,
+ .full = 1,
+
+ /* w */
+ .component_lo = 1,
+ .component_hi = 1
+ };
+
+ uint8_t packed;
+ memcpy(&packed, &sel, sizeof(packed));
+ ins.texture.bias = packed;
+
break;
};
}
}
- /* No helper to build texture words -- we do it all here */
- midgard_instruction ins = {
- .type = TAG_TEXTURE_4,
- .texture = {
- .op = midgard_texop,
- .format = midgard_tex_format(instr->sampler_dim),
- .texture_handle = texture_index,
- .sampler_handle = sampler_index,
-
- /* TODO: Regalloc it in */
- .swizzle = SWIZZLE_XYZW,
- .mask = 0xF,
-
- /* TODO: half */
- .in_reg_full = 1,
- .in_reg_swizzle = position_swizzle,
- .out_full = 1,
-
- /* Always 1 */
- .unknown7 = 1,
- }
- };
-
/* Set registers to read and write from the same place */
ins.texture.in_reg_select = in_reg;
ins.texture.out_reg_select = out_reg;
- /* Setup bias/LOD if necessary. Only register mode support right now.
- * TODO: Immediate mode for performance gains */
-
- bool needs_lod =
- instr->op == nir_texop_txb ||
- instr->op == nir_texop_txl ||
- instr->op == nir_texop_txf;
-
- if (needs_lod) {
- ins.texture.lod_register = true;
-
- midgard_tex_register_select sel = {
- .select = in_reg,
- .full = 1,
-
- /* w */
- .component_lo = 1,
- .component_hi = 1
- };
-
- uint8_t packed;
- memcpy(&packed, &sel, sizeof(packed));
- ins.texture.bias = packed;
- }
-
emit_mir_instruction(ctx, ins);
/* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
uint32_t value = cons[component];
bool is_vector = false;
- unsigned mask = effective_writemask(&ins->alu);
+ unsigned mask = effective_writemask(&ins->alu, ins->mask);
for (int c = 1; c < 4; ++c) {
/* We only care if this component is actually used */
/* abs or neg */
if (!is_int && src.mod) return true;
+ /* Other int mods don't matter in isolation */
+ if (is_int && src.mod == midgard_int_shift) return true;
+
+ /* size-conversion */
+ if (src.half) return true;
+
/* swizzle */
for (unsigned c = 0; c < 4; ++c) {
if (!(mask & (1 << c))) continue;
static bool
mir_nontrivial_source2_mod(midgard_instruction *ins)
{
- unsigned mask = squeeze_writemask(ins->alu.mask);
bool is_int = midgard_is_integer_op(ins->alu.op);
midgard_vector_alu_src src2 =
vector_alu_from_unsigned(ins->alu.src2);
- return mir_nontrivial_mod(src2, is_int, mask);
+ return mir_nontrivial_mod(src2, is_int, ins->mask);
}
static bool
bool is_int = midgard_is_integer_op(ins->alu.op);
unsigned mod = ins->alu.outmod;
+ /* Type conversion is a sort of outmod */
+ if (ins->alu.dest_override != midgard_dest_override_none)
+ return true;
+
if (is_int)
return mod != midgard_outmod_int_wrap;
else
if (v->ssa_args.dest == from) {
/* We don't want to track partial writes ... */
- if (v->alu.mask == 0xF) {
+ if (v->mask == 0xF) {
v->ssa_args.dest = to;
eliminated = true;
}
EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
}
-/* For the blend epilogue, we need to convert the blended fragment vec4 (stored
- * in r0) to a RGBA8888 value by scaling and type converting. We then output it
- * with the int8 analogue to the fragment epilogue */
-
-static void
-emit_blend_epilogue(compiler_context *ctx)
-{
- /* vmul.fmul.none.fulllow hr48, r0, #255 */
-
- midgard_instruction scale = {
- .type = TAG_ALU_4,
- .unit = UNIT_VMUL,
- .inline_constant = _mesa_float_to_half(255.0),
- .ssa_args = {
- .src0 = SSA_FIXED_REGISTER(0),
- .src1 = SSA_UNUSED_0,
- .dest = SSA_FIXED_REGISTER(24),
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_32,
- .dest_override = midgard_dest_override_lower,
- .mask = 0xFF,
- .src1 = vector_alu_srco_unsigned(blank_alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, scale);
-
- /* vadd.f2u_rte.pos.low hr0, hr48, #0 */
-
- midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.half = true;
-
- midgard_instruction f2u_rte = {
- .type = TAG_ALU_4,
- .ssa_args = {
- .src0 = SSA_FIXED_REGISTER(24),
- .src1 = SSA_UNUSED_0,
- .dest = SSA_FIXED_REGISTER(0),
- .inline_constant = true
- },
- .alu = {
- .op = midgard_alu_op_f2u_rte,
- .reg_mode = midgard_reg_mode_16,
- .dest_override = midgard_dest_override_lower,
- .outmod = midgard_outmod_pos,
- .mask = 0xF,
- .src1 = vector_alu_srco_unsigned(alu_src),
- .src2 = vector_alu_srco_unsigned(blank_alu_src),
- }
- };
-
- emit_mir_instruction(ctx, f2u_rte);
-
- EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
- EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
-}
-
static midgard_block *
emit_block(compiler_context *ctx, nir_block *block)
{
/* Append fragment shader epilogue (value writeout) */
if (ctx->stage == MESA_SHADER_FRAGMENT) {
if (block == nir_impl_last_block(ctx->func->impl)) {
- if (ctx->is_blend)
- emit_blend_epilogue(ctx);
- else
- emit_fragment_epilogue(ctx);
+ emit_fragment_epilogue(ctx);
}
}