if (d == 0) {
return nir_imm_intN_t(b, 0, n->bit_size);
} else if (util_is_power_of_two_or_zero64(d)) {
- return nir_ushr(b, n, nir_imm_int(b, util_logbase2_64(d)));
+ return nir_ushr_imm(b, n, util_logbase2_64(d));
} else {
struct util_fast_udiv_info m =
util_compute_fast_udiv_info(d, n->bit_size, n->bit_size);
if (m.pre_shift)
- n = nir_ushr(b, n, nir_imm_int(b, m.pre_shift));
+ n = nir_ushr_imm(b, n, m.pre_shift);
if (m.increment)
n = nir_uadd_sat(b, n, nir_imm_intN_t(b, m.increment, n->bit_size));
n = nir_umul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
if (m.post_shift)
- n = nir_ushr(b, n, nir_imm_int(b, m.post_shift));
+ n = nir_ushr_imm(b, n, m.post_shift);
return n;
}
static nir_ssa_def *
build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d)
{
+ uint64_t abs_d = d < 0 ? -d : d;
+
if (d == 0) {
return nir_imm_intN_t(b, 0, n->bit_size);
} else if (d == 1) {
return n;
} else if (d == -1) {
return nir_ineg(b, n);
- } else if (util_is_power_of_two_or_zero64(d)) {
- uint64_t abs_d = d < 0 ? -d : d;
- nir_ssa_def *uq = nir_ishr(b, n, nir_imm_int(b, util_logbase2_64(abs_d)));
+ } else if (util_is_power_of_two_or_zero64(abs_d)) {
+ nir_ssa_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d));
nir_ssa_def *n_neg = nir_ilt(b, n, nir_imm_intN_t(b, 0, n->bit_size));
nir_ssa_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg;
return nir_bcsel(b, neg, nir_ineg(b, uq), uq);
if (d < 0 && m.multiplier > 0)
res = nir_isub(b, res, n);
if (m.shift)
- res = nir_ishr(b, res, nir_imm_int(b, m.shift));
- res = nir_iadd(b, res, nir_ushr(b, res, nir_imm_int(b, n->bit_size - 1)));
+ res = nir_ishr_imm(b, res, m.shift);
+ res = nir_iadd(b, res, nir_ushr_imm(b, res, n->bit_size - 1));
return res;
}
if (progress) {
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
+ } else {
+ nir_metadata_preserve(impl, nir_metadata_all);
}
return progress;