append_flrp_to_dead_list(dead_flrp, alu);
}
+/**
+ * Replace flrp(a, b, c) with ffma(a, (1 - c), bc)
+ */
+static void
+replace_with_single_ffma(struct nir_builder *bld, struct u_vector *dead_flrp,
+ struct nir_alu_instr *alu)
+{
+ nir_ssa_def *const a = nir_ssa_for_alu_src(bld, alu, 0);
+ nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
+ nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
+
+ nir_ssa_def *const neg_c = nir_fneg(bld, c);
+ nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def *const one_minus_c =
+ nir_fadd(bld, nir_imm_floatN_t(bld, 1.0f, c->bit_size), neg_c);
+ nir_instr_as_alu(one_minus_c->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def *const b_times_c = nir_fmul(bld, b, c);
+ nir_instr_as_alu(b_times_c->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def *const final_ffma = nir_ffma(bld, a, one_minus_c, b_times_c);
+ nir_instr_as_alu(final_ffma->parent_instr)->exact = alu->exact;
+
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(final_ffma));
+
+ /* DO NOT REMOVE the original flrp yet. Many of the lowering choices are
+ * based on other uses of the sources. Removing the flrp may cause the
+ * last flrp in a sequence to make a different, incorrect choice.
+ */
+ append_flrp_to_dead_list(dead_flrp, alu);
+}
+
/**
* Replace flrp(a, b, c) with a(1-c) + bc.
*/
nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
nir_ssa_def *const one_minus_c =
- nir_fadd(bld, nir_imm_float(bld, 1.0f), neg_c);
+ nir_fadd(bld, nir_imm_floatN_t(bld, 1.0f, c->bit_size), neg_c);
nir_instr_as_alu(one_minus_c->parent_instr)->exact = alu->exact;
nir_ssa_def *const first_product = nir_fmul(bld, a, one_minus_c);
}
/**
- * Replace flrp(a, b, c) with (b*c ± c) + a
+ * Replace flrp(a, b, c) with (b*c ± c) + a => b*c + (a ± c)
+ *
+ * \note: This only works if a = ±1.
*/
static void
replace_with_expanded_ffma_and_add(struct nir_builder *bld,
nir_ssa_def *const b = nir_ssa_for_alu_src(bld, alu, 1);
nir_ssa_def *const c = nir_ssa_for_alu_src(bld, alu, 2);
- nir_ssa_def *const b_times_c = nir_fadd(bld, b, c);
+ nir_ssa_def *const b_times_c = nir_fmul(bld, b, c);
nir_instr_as_alu(b_times_c->parent_instr)->exact = alu->exact;
nir_ssa_def *inner_sum;
nir_ssa_def *const neg_c = nir_fneg(bld, c);
nir_instr_as_alu(neg_c->parent_instr)->exact = alu->exact;
- inner_sum = nir_fadd(bld, b_times_c, neg_c);
+ inner_sum = nir_fadd(bld, a, neg_c);
} else {
- inner_sum = nir_fadd(bld, b_times_c, c);
+ inner_sum = nir_fadd(bld, a, c);
}
nir_instr_as_alu(inner_sum->parent_instr)->exact = alu->exact;
- nir_ssa_def *const outer_sum = nir_fadd(bld, inner_sum, a);
+ nir_ssa_def *const outer_sum = nir_fadd(bld, inner_sum, b_times_c);
nir_instr_as_alu(outer_sum->parent_instr)->exact = alu->exact;
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(outer_sum));
return true;
}
+/**
+ * Counts of similar types of nir_op_flrp instructions
+ *
+ * If a similar instruction fits into more than one category, it will only be
+ * counted once. The assumption is that no other instruction will have all
+ * sources the same, or CSE would have removed one of the instructions.
+ */
+struct similar_flrp_stats {
+ unsigned src2;
+ unsigned src0_and_src2;
+ unsigned src1_and_src2;
+};
+
+/**
+ * Collection counts of similar FLRP instructions.
+ *
+ * This function only cares about similar instructions that have src2 in
+ * common.
+ */
+static void
+get_similar_flrp_stats(nir_alu_instr *alu, struct similar_flrp_stats *st)
+{
+ memset(st, 0, sizeof(*st));
+
+ nir_foreach_use(other_use, alu->src[2].src.ssa) {
+ /* Is the use also a flrp? */
+ nir_instr *const other_instr = other_use->parent_instr;
+ if (other_instr->type != nir_instr_type_alu)
+ continue;
+
+ /* Eh-hem... don't match the instruction with itself. */
+ if (other_instr == &alu->instr)
+ continue;
+
+ nir_alu_instr *const other_alu = nir_instr_as_alu(other_instr);
+ if (other_alu->op != nir_op_flrp)
+ continue;
+
+ /* Does the other flrp use source 2 from the first flrp as its source 2
+ * as well?
+ */
+ if (!nir_alu_srcs_equal(alu, other_alu, 2, 2))
+ continue;
+
+ if (nir_alu_srcs_equal(alu, other_alu, 0, 0))
+ st->src0_and_src2++;
+ else if (nir_alu_srcs_equal(alu, other_alu, 1, 1))
+ st->src1_and_src2++;
+ else
+ st->src2++;
+ }
+}
+
static void
convert_flrp_instruction(nir_builder *bld,
struct u_vector *dead_flrp,
replace_with_strict_ffma(bld, dead_flrp, alu);
return;
}
+
+ /*
+ * - If FMA is supported and other flrp(x, _, t) exists:
+ *
+ * fma(y, t, fma(-x, t, x))
+ *
+ * The hope is that the inner FMA calculation will be shared with the
+ * other lowered flrp. This results in two FMA instructions for the
+ * first flrp and one FMA instruction for each additional flrp. It
+ * also means that the live range for x might be complete after the
+ * inner ffma instead of after the last flrp.
+ */
+ struct similar_flrp_stats st;
+
+ get_similar_flrp_stats(alu, &st);
+ if (st.src0_and_src2 > 0) {
+ replace_with_strict_ffma(bld, dead_flrp, alu);
+ return;
+ }
+
+ /*
+ * - If FMA is supported and another flrp(_, y, t) exists:
+ *
+ * fma(x, (1 - t), yt)
+ *
+ * The hope is that the (1 - t) and the yt will be shared with the
+ * other lowered flrp. This results in 3 insructions for the first
+ * flrp and 1 for each additional flrp.
+ */
+ if (st.src1_and_src2 > 0) {
+ replace_with_single_ffma(bld, dead_flrp, alu);
+ return;
+ }
} else {
if (always_precise) {
replace_with_strict(bld, dead_flrp, alu);
return;
}
+
+ /*
+ * - If FMA is not supported and another flrp(x, _, t) exists:
+ *
+ * x(1 - t) + yt
+ *
+ * The hope is that the x(1 - t) will be shared with the other lowered
+ * flrp. This results in 4 insructions for the first flrp and 2 for
+ * each additional flrp.
+ *
+ * - If FMA is not supported and another flrp(_, y, t) exists:
+ *
+ * x(1 - t) + yt
+ *
+ * The hope is that the (1 - t) and the yt will be shared with the
+ * other lowered flrp. This results in 4 insructions for the first
+ * flrp and 2 for each additional flrp.
+ */
+ struct similar_flrp_stats st;
+
+ get_similar_flrp_stats(alu, &st);
+ if (st.src0_and_src2 > 0 || st.src1_and_src2 > 0) {
+ replace_with_strict(bld, dead_flrp, alu);
+ return;
+ }
+ }
+
+ /*
+ * - If t is constant:
+ *
+ * x(1 - t) + yt
+ *
+ * The cost is three instructions without FMA or two instructions with
+ * FMA. This is the same cost as the imprecise lowering, but it gives
+ * the instruction scheduler a little more freedom.
+ *
+ * There is no need to handle t = 0.5 specially. nir_opt_algebraic
+ * already has optimizations to convert 0.5x + 0.5y to 0.5(x + y).
+ */
+ if (alu->src[2].src.ssa->parent_instr->type == nir_instr_type_load_const) {
+ replace_with_strict(bld, dead_flrp, alu);
+ return;
}
/*