M_LOAD(ld_color_buffer_8);
//M_STORE(st_vary_16);
M_STORE(st_vary_32);
-M_STORE(st_cubemap_coords);
+M_LOAD(st_cubemap_coords);
static midgard_instruction
v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
unsigned dest_tag,
signed quadword_offset)
{
- /* For unclear reasons, the condition code is repeated 8 times */
+ /* The condition code is actually a LUT describing a function to
+ * combine multiple condition codes. However, we only support a single
+ * condition code at the moment, so we just duplicate over a bunch of
+ * times. */
+
uint16_t duplicated_cond =
(cond << 14) |
(cond << 12) |
}
}
-static unsigned
-nir_dest_index(compiler_context *ctx, nir_dest *dst)
-{
- if (dst->is_ssa)
- return dst->ssa.index;
- else {
- assert(!dst->reg.indirect);
- return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
- }
-}
-
static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
unsigned *dest)
{
float *v = rzalloc_array(NULL, float, 4);
nir_const_load_to_arr(v, instr, f32);
- _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
-}
-static unsigned
-nir_src_index(compiler_context *ctx, nir_src *src)
-{
- if (src->is_ssa)
- return src->ssa->index;
- else {
- assert(!src->reg.indirect);
- return ctx->func->impl->ssa_alloc + src->reg.reg->index;
- }
+ /* Shifted for SSA, +1 for off-by-one */
+ _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, v);
}
-static unsigned
-nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
+/* Normally constants are embedded implicitly, but for I/O and such we have to
+ * explicitly emit a move with the constant source */
+
+static void
+emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
{
- return nir_src_index(ctx, &src->src);
+ void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
+
+ if (constant_value) {
+ midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, to);
+ attach_constants(ctx, &ins, constant_value, node + 1);
+ emit_mir_instruction(ctx, ins);
+ }
}
static bool
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
{
+ /* Derivatives end up emitted on the texture pipe, not the ALUs. This
+ * is handled elsewhere */
+
+ if (instr->op == nir_op_fddx || instr->op == nir_op_fddy) {
+ midgard_emit_derivatives(ctx, instr);
+ return;
+ }
+
bool is_ssa = instr->dest.dest.is_ssa;
unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
- /* Second op implicit #0 */
- ALU_CASE(inot, inor);
+ /* We'll set invert */
+ ALU_CASE(inot, imov);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
ins.constants[0] = 0.0f;
ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
} else if (instr->op == nir_op_inot) {
- /* ~b = ~(b & b), so duplicate the source */
- ins.ssa_args.src1 = ins.ssa_args.src0;
- ins.alu.src2 = ins.alu.src1;
+ ins.invert = true;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
struct midgard_instruction discard = v_branch(conditional, false);
discard.branch.target_type = TARGET_DISCARD;
emit_mir_instruction(ctx, discard);
-
- ctx->can_discard = true;
break;
}
/* For blend shaders, load the input color, which is
* preloaded to r0 */
- midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
+ midgard_instruction move = v_mov(SSA_FIXED_REGISTER(0), blank_alu_src, reg);
emit_mir_instruction(ctx, move);
} else if (ctx->stage == MESA_SHADER_VERTEX) {
midgard_instruction ins = m_ld_attr_32(reg, offset);
ctx->fragment_output = reg;
} else if (ctx->stage == MESA_SHADER_VERTEX) {
- /* Varyings are written into one of two special
- * varying register, r26 or r27. The register itself is
- * selected as the register in the st_vary instruction,
- * minus the base of 26. E.g. write into r27 and then
- * call st_vary(1) */
-
- midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
- emit_mir_instruction(ctx, ins);
-
/* We should have been vectorized, though we don't
* currently check that st_vary is emitted only once
* per slot (this is relevant, since there's not a mask
* parameter available on the store [set to 0 by the
* blob]). We do respect the component by adjusting the
- * swizzle. */
+ * swizzle. If this is a constant source, we'll need to
+ * emit that explicitly. */
+
+ emit_explicit_constant(ctx, reg, reg);
unsigned component = nir_intrinsic_component(instr);
- midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
+ midgard_instruction st = m_st_vary_32(reg, offset);
st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
emit_mir_instruction(ctx, st);
float ref_value = ctx->alpha_ref;
+ /* See emit_load_const */
float *v = ralloc_array(NULL, float, 4);
memcpy(v, &ref_value, sizeof(float));
- _mesa_hash_table_u64_insert(ctx->ssa_constants, instr->dest.ssa.index + 1, v);
+ _mesa_hash_table_u64_insert(ctx->ssa_constants, (instr->dest.ssa.index << 1) + 1, v);
break;
case nir_intrinsic_load_viewport_scale:
//assert (!instr->sampler);
//assert (!instr->texture_array_size);
- /* Allocate registers via a round robin scheme to alternate between the two registers */
- int reg = ctx->texture_op_count & 1;
- int in_reg = reg, out_reg = reg;
-
int texture_index = instr->texture_index;
int sampler_index = texture_index;
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = 0xF,
+ .ssa_args = {
+ .dest = nir_dest_index(ctx, &instr->dest),
+ .src0 = -1,
+ .src1 = -1,
+ },
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
.texture_handle = texture_index,
.sampler_handle = sampler_index,
-
- /* TODO: Regalloc it in */
.swizzle = SWIZZLE_XYZW,
+ .in_reg_swizzle = SWIZZLE_XYZW,
/* TODO: half */
.in_reg_full = 1,
};
for (unsigned i = 0; i < instr->num_srcs; ++i) {
- int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
int index = nir_src_index(ctx, &instr->src[i].src);
- int nr_comp = nir_src_num_components(instr->src[i].src);
midgard_vector_alu_src alu_src = blank_alu_src;
switch (instr->src[i].src_type) {
case nir_tex_src_coord: {
+ emit_explicit_constant(ctx, index, index);
+
+ /* Texelfetch coordinates uses all four elements
+ * (xyz/index) regardless of texture dimensionality,
+ * which means it's necessary to zero the unused
+ * components to keep everything happy */
+
+ if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
+ unsigned old_index = index;
+
+ index = make_compiler_temp(ctx);
+
+ /* mov index, old_index */
+ midgard_instruction mov = v_mov(old_index, blank_alu_src, index);
+ mov.mask = 0x3;
+ emit_mir_instruction(ctx, mov);
+
+ /* mov index.zw, #0 */
+ mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT),
+ blank_alu_src, index);
+ mov.has_constants = true;
+ mov.mask = (1 << COMPONENT_Z) | (1 << COMPONENT_W);
+ emit_mir_instruction(ctx, mov);
+ }
+
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
/* texelFetch is undefined on samplerCube */
assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
- /* For cubemaps, we need to load coords into
- * special r27, and then use a special ld/st op
- * to select the face and copy the xy into the
+ /* For cubemaps, we use a special ld/st op to
+ * select the face and copy the xy into the
* texture register */
- alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
+ unsigned temp = make_compiler_temp(ctx);
- midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27));
- emit_mir_instruction(ctx, move);
-
- midgard_instruction st = m_st_cubemap_coords(reg, 0);
+ midgard_instruction st = m_st_cubemap_coords(temp, 0);
+ st.ssa_args.src0 = index;
st.load_store.unknown = 0x24; /* XXX: What is this? */
st.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
- ins.texture.in_reg_swizzle = swizzle_of(2);
+ ins.ssa_args.src0 = temp;
} else {
- ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
-
- midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.mask = mask_of(nr_comp);
- emit_mir_instruction(ctx, mov);
-
- if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
- /* Texel fetch opcodes care about the
- * values of z and w, so we actually
- * need to spill into a second register
- * for a texel fetch with register bias
- * (for non-2D). TODO: Implement that
- */
-
- assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
-
- midgard_instruction zero = v_mov(index, alu_src, reg);
- zero.ssa_args.inline_constant = true;
- zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
- zero.has_constants = true;
- zero.mask = ~mov.mask;
- emit_mir_instruction(ctx, zero);
+ ins.ssa_args.src0 = index;
+ }
- ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
- } else {
- /* Non-texel fetch doesn't need that
- * nonsense. However we do use the Z
- * for array indexing */
- bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
- ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
- }
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
+ /* Array component in w but NIR wants it in z */
+ ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
}
break;
if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
break;
- /* Otherwise we use a register. To keep RA simple, we
- * put the bias/LOD into the w component of the input
- * source, which is otherwise in xy */
-
- alu_src.swizzle = SWIZZLE_XXXX;
-
- midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.mask = 1 << COMPONENT_W;
- emit_mir_instruction(ctx, mov);
-
ins.texture.lod_register = true;
-
- midgard_tex_register_select sel = {
- .select = in_reg,
- .full = 1,
-
- /* w */
- .component_lo = 1,
- .component_hi = 1
- };
-
- uint8_t packed;
- memcpy(&packed, &sel, sizeof(packed));
- ins.texture.bias = packed;
+ ins.ssa_args.src1 = index;
+ emit_explicit_constant(ctx, index, index);
break;
};
}
}
- /* Set registers to read and write from the same place */
- ins.texture.in_reg_select = in_reg;
- ins.texture.out_reg_select = out_reg;
-
emit_mir_instruction(ctx, ins);
- int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
- midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
- emit_mir_instruction(ctx, ins2);
-
/* Used for .cont and .last hinting */
ctx->texture_op_count++;
}
/* We don't know how to handle these with a constant */
- if (src->mod || src->half || src->rep_low || src->rep_high) {
+ if (mir_nontrivial_source2_mod_simple(ins) || src->rep_low || src->rep_high) {
DBG("Bailing inline constant...\n");
continue;
}
}
}
-/* Basic dead code elimination on the MIR itself, which cleans up e.g. the
- * texture pipeline */
-
-static bool
-midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
-{
- bool progress = false;
-
- mir_foreach_instr_in_block_safe(block, ins) {
- if (ins->type != TAG_ALU_4) continue;
- if (ins->compact_branch) continue;
-
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
- if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
-
- mir_remove_instruction(ins);
- progress = true;
- }
-
- return progress;
-}
-
/* Dead code elimination for branches at the end of a block - only one branch
* per block is legal semantically */
}
}
-static bool
-mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
-{
- /* abs or neg */
- if (!is_int && src.mod) return true;
-
- /* Other int mods don't matter in isolation */
- if (is_int && src.mod == midgard_int_shift) return true;
-
- /* size-conversion */
- if (src.half) return true;
-
- /* swizzle */
- for (unsigned c = 0; c < 4; ++c) {
- if (!(mask & (1 << c))) continue;
- if (((src.swizzle >> (2*c)) & 3) != c) return true;
- }
-
- return false;
-}
-
-static bool
-mir_nontrivial_source2_mod(midgard_instruction *ins)
-{
- bool is_int = midgard_is_integer_op(ins->alu.op);
-
- midgard_vector_alu_src src2 =
- vector_alu_from_unsigned(ins->alu.src2);
-
- return mir_nontrivial_mod(src2, is_int, ins->mask);
-}
-
-static bool
-mir_nontrivial_outmod(midgard_instruction *ins)
-{
- bool is_int = midgard_is_integer_op(ins->alu.op);
- unsigned mod = ins->alu.outmod;
-
- /* Type conversion is a sort of outmod */
- if (ins->alu.dest_override != midgard_dest_override_none)
- return true;
-
- if (is_int)
- return mod != midgard_outmod_int_wrap;
- else
- return mod != midgard_outmod_none;
-}
-
-static bool
-midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
-{
- bool progress = false;
-
- mir_foreach_instr_in_block_safe(block, ins) {
- if (ins->type != TAG_ALU_4) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
-
- unsigned from = ins->ssa_args.src1;
- unsigned to = ins->ssa_args.dest;
-
- /* We only work on pure SSA */
-
- if (to >= SSA_FIXED_MINIMUM) continue;
- if (from >= SSA_FIXED_MINIMUM) continue;
- if (to >= ctx->func->impl->ssa_alloc) continue;
- if (from >= ctx->func->impl->ssa_alloc) continue;
-
- /* Constant propagation is not handled here, either */
- if (ins->ssa_args.inline_constant) continue;
- if (ins->has_constants) continue;
-
- if (mir_nontrivial_source2_mod(ins)) continue;
- if (mir_nontrivial_outmod(ins)) continue;
-
- /* We're clear -- rewrite */
- mir_rewrite_index_src(ctx, to, from);
- mir_remove_instruction(ins);
- progress |= true;
- }
-
- return progress;
-}
-
/* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
* the move can be propagated away entirely */
/* TODO: Registers? */
unsigned src = ins->ssa_args.src1;
- if (src >= ctx->func->impl->ssa_alloc) continue;
+ if (src & IS_REG) continue;
assert(!mir_has_multiple_writes(ctx, src));
/* There might be a source modifier, too */
static void
emit_fragment_epilogue(compiler_context *ctx)
{
- /* Special case: writing out constants requires us to include the move
- * explicitly now, so shove it into r0 */
-
- void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
-
- if (constant_value) {
- midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
- attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
- emit_mir_instruction(ctx, ins);
- }
+ emit_explicit_constant(ctx, ctx->fragment_output, SSA_FIXED_REGISTER(0));
/* Perform the actual fragment writeout. We have two writeout/branch
* instructions, forming a loop until writeout is successful as per the
.nir = nir,
.screen = screen,
.stage = nir->info.stage,
+ .temp_alloc = 0,
.is_blend = is_blend,
.blend_constant_offset = 0,
program->sysval_count = ctx->sysval_count;
memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
- program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
- program->varying_count = max_varying + 1; /* Fencepost off-by-one */
-
nir_foreach_function(func, nir) {
if (!func->impl)
continue;
progress |= midgard_opt_pos_propagate(ctx, block);
progress |= midgard_opt_copy_prop(ctx, block);
progress |= midgard_opt_dead_code_eliminate(ctx, block);
+ progress |= midgard_opt_combine_projection(ctx, block);
+ progress |= midgard_opt_varying_projection(ctx, block);
}
} while (progress);
+ mir_foreach_block(ctx, block) {
+ midgard_lower_invert(ctx, block);
+ midgard_lower_derivatives(ctx, block);
+ }
+
/* Nested control-flow can result in dead branches at the end of the
* block. This messes with our analysis and is just dead code, so cull
* them */
midgard_opt_cull_dead_branch(ctx, block);
}
+ /* Ensure we were lowered */
+ mir_foreach_instr_global(ctx, ins) {
+ assert(!ins->invert);
+ }
+
/* Schedule! */
schedule_program(ctx);
/* Deal with off-by-one related to the fencepost problem */
program->work_register_count = ctx->work_registers + 1;
-
- program->can_discard = ctx->can_discard;
program->uniform_cutoff = ctx->uniform_cutoff;
program->blend_patch_offset = ctx->blend_constant_offset;