return progress;
}
+static bool
+mdg_is_64(const nir_instr *instr, const void *_unused)
+{
+ const nir_alu_instr *alu = nir_instr_as_alu(instr);
+
+ if (nir_dest_bit_size(alu->dest.dest) == 64)
+ return true;
+
+ switch (alu->op) {
+ case nir_op_umul_high:
+ case nir_op_imul_high:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* Flushes undefined values to zero */
static void
NIR_PASS(progress, nir, nir_opt_vectorize);
} while (progress);
+ NIR_PASS_V(nir, nir_lower_alu_to_scalar, mdg_is_64, NULL);
+
/* Run after opts so it can hit more */
if (!is_blend)
NIR_PASS(progress, nir, nir_fuse_io_16);
/* Should we swap arguments? */
bool flip_src12 = false;
- unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
- unsigned dst_bitsize = nir_dest_bit_size(*dest);
+ ASSERTED unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
+ ASSERTED unsigned dst_bitsize = nir_dest_bit_size(*dest);
enum midgard_roundmode roundmode = MIDGARD_RTE;
ALU_CASE(iadd, iadd);
ALU_CASE(isub, isub);
ALU_CASE(imul, imul);
+ ALU_CASE(imul_high, imul);
+ ALU_CASE(umul_high, imul);
/* Zero shoved as second-arg */
ALU_CASE(iabs, iabsdiff);
ALU_CASE(mov, imov);
ALU_CASE_CMP(feq32, feq, false);
- ALU_CASE_CMP(fne32, fne, false);
+ ALU_CASE_CMP(fneu32, fne, false);
ALU_CASE_CMP(flt32, flt, false);
ALU_CASE_CMP(ieq32, ieq, true);
ALU_CASE_CMP(ine32, ine, true);
unsigned outmod = 0;
bool is_int = midgard_is_integer_op(op);
- if (midgard_is_integer_out_op(op)) {
+ if (instr->op == nir_op_umul_high || instr->op == nir_op_imul_high) {
+ outmod = midgard_outmod_int_high;
+ } else if (midgard_is_integer_out_op(op)) {
outmod = midgard_outmod_int_wrap;
} else if (instr->op == nir_op_fsat) {
outmod = midgard_outmod_sat;
mir_set_offset(ctx, &ins, offset, is_shared);
mir_set_intr_mask(instr, &ins, is_read);
+ /* Set a valid swizzle for masked out components */
+ assert(ins.mask);
+ unsigned first_component = __builtin_ffs(ins.mask) - 1;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i) {
+ if (!(ins.mask & (1 << i)))
+ ins.swizzle[0][i] = first_component;
+ }
+
emit_mir_instruction(ctx, ins);
}
}
static void
-emit_msaa_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
+emit_special(compiler_context *ctx, nir_intrinsic_instr *instr, unsigned idx)
{
unsigned reg = nir_dest_index(&instr->dest);
midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
ld.op = midgard_op_ld_color_buffer_32u_old;
- ld.load_store.address = 97;
+ ld.load_store.address = idx;
ld.load_store.arg_2 = 0x1E;
for (int i = 0; i < 4; ++i)
else if (combined)
rt = MIDGARD_ZS_RT;
else
- assert(0);
+ unreachable("bad rt");
unsigned reg_z = ~0, reg_s = ~0;
if (combined) {
emit_vertex_builtin(ctx, instr);
break;
+ case nir_intrinsic_load_sample_mask_in:
+ emit_special(ctx, instr, 96);
+ break;
+
case nir_intrinsic_load_sample_id:
- emit_msaa_builtin(ctx, instr);
+ emit_special(ctx, instr, 97);
break;
case nir_intrinsic_memory_barrier_buffer:
break;
}
+ /* High implies computing at a higher bitsize, e.g umul_high of 32-bit
+ * requires computing at 64-bit */
+ if (midgard_is_integer_out_op(ins->op) && ins->outmod == midgard_outmod_int_high) {
+ max_bitsize *= 2;
+ assert(max_bitsize <= 64);
+ }
+
return max_bitsize;
}
/* Initialize at a global (not block) level hash tables */
ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
- ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
/* Lower gl_Position pre-optimisation, but after lowering vars to ssa
* (so we don't accidentally duplicate the epilogue since mesa/st has
/* Assign sysvals and counts, now that we're sure
* (post-optimisation) */
- panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
+ panfrost_nir_assign_sysvals(&ctx->sysvals, ctx, nir);
program->sysval_count = ctx->sysvals.sysval_count;
memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);