+static nir_ssa_def *
+lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
+ bool src_is_signed)
+{
+ nir_ssa_def *x_sign = NULL;
+
+ if (src_is_signed) {
+ x_sign = nir_bcsel(b, COND_LOWER_CMP(b, ilt, x, nir_imm_int64(b, 0)),
+ nir_imm_floatN_t(b, -1, dest_bit_size),
+ nir_imm_floatN_t(b, 1, dest_bit_size));
+ x = COND_LOWER_OP(b, iabs, x);
+ }
+
+ nir_ssa_def *exp = COND_LOWER_OP(b, ufind_msb, x);
+ unsigned significand_bits;
+
+ switch (dest_bit_size) {
+ case 32:
+ significand_bits = 23;
+ break;
+ case 16:
+ significand_bits = 10;
+ break;
+ default:
+ unreachable("Invalid dest_bit_size");
+ }
+
+ /* We keep one more bit than can fit in the significand field to let the
+ * u2f32 conversion do the rounding for us.
+ */
+ nir_ssa_def *discard =
+ nir_imax(b, nir_isub(b, exp, nir_imm_int(b, significand_bits + 1)),
+ nir_imm_int(b, 0));
+
+ /* Part of the "round to nearest" has to be taken care of before we discard
+ * the LSB, and that's what this extra iadd is for.
+ * "Round to nearest even" is handled by u2f. That works because the
+ * shifted value either fits in the significand field (which means no
+ * rounding is required) or contains one extra bit that forces the
+ * conversion op to round things properly.
+ */
+ nir_ssa_def *add = COND_LOWER_OP(b, ishl, nir_imm_int64(b, 1), discard);
+ add = COND_LOWER_OP(b, isub, add, nir_imm_int64(b, 1));
+ nir_ssa_def *rounded_x = COND_LOWER_OP(b, iadd, x, add);
+
+ /* Signed Values can't overflow because we've saved the sign and promoted
+ * them to unsigned values.
+ */
+ if (!src_is_signed) {
+ nir_ssa_def *overflow = COND_LOWER_CMP(b, ult, rounded_x, x);
+ rounded_x = COND_LOWER_OP(b, bcsel, overflow,
+ nir_imm_int64(b, UINT64_MAX), rounded_x);
+ }
+
+ nir_ssa_def *significand = COND_LOWER_OP(b, ushr, rounded_x, discard);
+ significand = COND_LOWER_CAST(b, u2u32, significand);
+
+ nir_ssa_def *res;
+
+ if (dest_bit_size == 32)
+ res = nir_fmul(b, nir_u2f32(b, significand),
+ nir_fexp2(b, nir_u2f32(b, discard)));
+ else
+ res = nir_fmul(b, nir_u2f16(b, significand),
+ nir_fexp2(b, nir_u2f16(b, discard)));
+
+ if (src_is_signed)
+ res = nir_fmul(b, res, x_sign);
+
+ return res;
+}
+
+static nir_ssa_def *
+lower_f2(nir_builder *b, nir_ssa_def *x, bool dst_is_signed)
+{
+ assert(x->bit_size == 16 || x->bit_size == 32);
+ nir_ssa_def *x_sign = NULL;
+
+ if (dst_is_signed)
+ x_sign = nir_fsign(b, x);
+ else
+ x = nir_fmin(b, x, nir_imm_floatN_t(b, UINT64_MAX, x->bit_size));
+
+ x = nir_ftrunc(b, x);
+
+ if (dst_is_signed) {
+ x = nir_fmin(b, x, nir_imm_floatN_t(b, INT64_MAX, x->bit_size));
+ x = nir_fmax(b, x, nir_imm_floatN_t(b, INT64_MIN, x->bit_size));
+ x = nir_fabs(b, x);
+ }
+
+ nir_ssa_def *div = nir_imm_floatN_t(b, 1ULL << 32, x->bit_size);
+ nir_ssa_def *res_hi = nir_f2u32(b, nir_fdiv(b, x, div));
+ nir_ssa_def *res_lo = nir_f2u32(b, nir_frem(b, x, div));
+ nir_ssa_def *res = nir_pack_64_2x32_split(b, res_lo, res_hi);
+
+ if (dst_is_signed)
+ res = nir_bcsel(b, nir_flt(b, x_sign, nir_imm_float(b, 0)),
+ nir_ineg(b, res), res);
+
+ return res;
+}
+