X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_lower_int64.c;h=e18a54812259f8d3ee4f5fdc6d1b382e8e18c1a9;hb=274f3815a5efaf1b82f6c2d29122f7aac2fc9a19;hp=4de905b68917fe70a8946f5bf27e99b66683bcef;hpb=424ac809bfac60ed8482abfb7ef8c8aa88877b55;p=mesa.git diff --git a/src/compiler/nir/nir_lower_int64.c b/src/compiler/nir/nir_lower_int64.c index 4de905b6891..e18a5481225 100644 --- a/src/compiler/nir/nir_lower_int64.c +++ b/src/compiler/nir/nir_lower_int64.c @@ -24,6 +24,375 @@ #include "nir.h" #include "nir_builder.h" +static nir_ssa_def * +lower_b2i64(nir_builder *b, nir_ssa_def *x) +{ + return nir_pack_64_2x32_split(b, nir_b2i32(b, x), nir_imm_int(b, 0)); +} + +static nir_ssa_def * +lower_i2b(nir_builder *b, nir_ssa_def *x) +{ + return nir_ine(b, nir_ior(b, nir_unpack_64_2x32_split_x(b, x), + nir_unpack_64_2x32_split_y(b, x)), + nir_imm_int(b, 0)); +} + +static nir_ssa_def * +lower_i2i8(nir_builder *b, nir_ssa_def *x) +{ + return nir_i2i8(b, nir_unpack_64_2x32_split_x(b, x)); +} + +static nir_ssa_def * +lower_i2i16(nir_builder *b, nir_ssa_def *x) +{ + return nir_i2i16(b, nir_unpack_64_2x32_split_x(b, x)); +} + + +static nir_ssa_def * +lower_i2i32(nir_builder *b, nir_ssa_def *x) +{ + return nir_unpack_64_2x32_split_x(b, x); +} + +static nir_ssa_def * +lower_i2i64(nir_builder *b, nir_ssa_def *x) +{ + nir_ssa_def *x32 = x->bit_size == 32 ? x : nir_i2i32(b, x); + return nir_pack_64_2x32_split(b, x32, nir_ishr(b, x32, nir_imm_int(b, 31))); +} + +static nir_ssa_def * +lower_u2u8(nir_builder *b, nir_ssa_def *x) +{ + return nir_u2u8(b, nir_unpack_64_2x32_split_x(b, x)); +} + +static nir_ssa_def * +lower_u2u16(nir_builder *b, nir_ssa_def *x) +{ + return nir_u2u16(b, nir_unpack_64_2x32_split_x(b, x)); +} + +static nir_ssa_def * +lower_u2u32(nir_builder *b, nir_ssa_def *x) +{ + return nir_unpack_64_2x32_split_x(b, x); +} + +static nir_ssa_def * +lower_u2u64(nir_builder *b, nir_ssa_def *x) +{ + nir_ssa_def *x32 = x->bit_size == 32 ? x : nir_u2u32(b, x); + return nir_pack_64_2x32_split(b, x32, nir_imm_int(b, 0)); +} + +static nir_ssa_def * +lower_bcsel64(nir_builder *b, nir_ssa_def *cond, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + return nir_pack_64_2x32_split(b, nir_bcsel(b, cond, x_lo, y_lo), + nir_bcsel(b, cond, x_hi, y_hi)); +} + +static nir_ssa_def * +lower_inot64(nir_builder *b, nir_ssa_def *x) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + + return nir_pack_64_2x32_split(b, nir_inot(b, x_lo), nir_inot(b, x_hi)); +} + +static nir_ssa_def * +lower_iand64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + return nir_pack_64_2x32_split(b, nir_iand(b, x_lo, y_lo), + nir_iand(b, x_hi, y_hi)); +} + +static nir_ssa_def * +lower_ior64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + return nir_pack_64_2x32_split(b, nir_ior(b, x_lo, y_lo), + nir_ior(b, x_hi, y_hi)); +} + +static nir_ssa_def * +lower_ixor64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + return nir_pack_64_2x32_split(b, nir_ixor(b, x_lo, y_lo), + nir_ixor(b, x_hi, y_hi)); +} + +static nir_ssa_def * +lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + /* Implemented as + * + * uint64_t lshift(uint64_t x, int c) + * { + * if (c == 0) return x; + * + * uint32_t lo = LO(x), hi = HI(x); + * + * if (c < 32) { + * uint32_t lo_shifted = lo << c; + * uint32_t hi_shifted = hi << c; + * uint32_t lo_shifted_hi = lo >> abs(32 - c); + * return pack_64(lo_shifted, hi_shifted | lo_shifted_hi); + * } else { + * uint32_t lo_shifted_hi = lo << abs(32 - c); + * return pack_64(0, lo_shifted_hi); + * } + * } + */ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + + nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32))); + nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y); + nir_ssa_def *hi_shifted = nir_ishl(b, x_hi, y); + nir_ssa_def *lo_shifted_hi = nir_ushr(b, x_lo, reverse_count); + + nir_ssa_def *res_if_lt_32 = + nir_pack_64_2x32_split(b, lo_shifted, + nir_ior(b, hi_shifted, lo_shifted_hi)); + nir_ssa_def *res_if_ge_32 = + nir_pack_64_2x32_split(b, nir_imm_int(b, 0), + nir_ishl(b, x_lo, reverse_count)); + + return nir_bcsel(b, + nir_ieq(b, y, nir_imm_int(b, 0)), x, + nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)), + res_if_ge_32, res_if_lt_32)); +} + +static nir_ssa_def * +lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + /* Implemented as + * + * uint64_t arshift(uint64_t x, int c) + * { + * if (c == 0) return x; + * + * uint32_t lo = LO(x); + * int32_t hi = HI(x); + * + * if (c < 32) { + * uint32_t lo_shifted = lo >> c; + * uint32_t hi_shifted = hi >> c; + * uint32_t hi_shifted_lo = hi << abs(32 - c); + * return pack_64(hi_shifted, hi_shifted_lo | lo_shifted); + * } else { + * uint32_t hi_shifted = hi >> 31; + * uint32_t hi_shifted_lo = hi >> abs(32 - c); + * return pack_64(hi_shifted, hi_shifted_lo); + * } + * } + */ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + + nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32))); + nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y); + nir_ssa_def *hi_shifted = nir_ishr(b, x_hi, y); + nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count); + + nir_ssa_def *res_if_lt_32 = + nir_pack_64_2x32_split(b, nir_ior(b, lo_shifted, hi_shifted_lo), + hi_shifted); + nir_ssa_def *res_if_ge_32 = + nir_pack_64_2x32_split(b, nir_ishr(b, x_hi, reverse_count), + nir_ishr(b, x_hi, nir_imm_int(b, 31))); + + return nir_bcsel(b, + nir_ieq(b, y, nir_imm_int(b, 0)), x, + nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)), + res_if_ge_32, res_if_lt_32)); +} + +static nir_ssa_def * +lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + /* Implemented as + * + * uint64_t rshift(uint64_t x, int c) + * { + * if (c == 0) return x; + * + * uint32_t lo = LO(x), hi = HI(x); + * + * if (c < 32) { + * uint32_t lo_shifted = lo >> c; + * uint32_t hi_shifted = hi >> c; + * uint32_t hi_shifted_lo = hi << abs(32 - c); + * return pack_64(hi_shifted, hi_shifted_lo | lo_shifted); + * } else { + * uint32_t hi_shifted_lo = hi >> abs(32 - c); + * return pack_64(0, hi_shifted_lo); + * } + * } + */ + + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + + nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32))); + nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y); + nir_ssa_def *hi_shifted = nir_ushr(b, x_hi, y); + nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count); + + nir_ssa_def *res_if_lt_32 = + nir_pack_64_2x32_split(b, nir_ior(b, lo_shifted, hi_shifted_lo), + hi_shifted); + nir_ssa_def *res_if_ge_32 = + nir_pack_64_2x32_split(b, nir_ushr(b, x_hi, reverse_count), + nir_imm_int(b, 0)); + + return nir_bcsel(b, + nir_ieq(b, y, nir_imm_int(b, 0)), x, + nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)), + res_if_ge_32, res_if_lt_32)); +} + +static nir_ssa_def * +lower_iadd64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + nir_ssa_def *res_lo = nir_iadd(b, x_lo, y_lo); + nir_ssa_def *carry = nir_b2i32(b, nir_ult(b, res_lo, x_lo)); + nir_ssa_def *res_hi = nir_iadd(b, carry, nir_iadd(b, x_hi, y_hi)); + + return nir_pack_64_2x32_split(b, res_lo, res_hi); +} + +static nir_ssa_def * +lower_isub64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + nir_ssa_def *res_lo = nir_isub(b, x_lo, y_lo); + nir_ssa_def *borrow = nir_ineg(b, nir_b2i32(b, nir_ult(b, x_lo, y_lo))); + nir_ssa_def *res_hi = nir_iadd(b, nir_isub(b, x_hi, y_hi), borrow); + + return nir_pack_64_2x32_split(b, res_lo, res_hi); +} + +static nir_ssa_def * +lower_ineg64(nir_builder *b, nir_ssa_def *x) +{ + /* Since isub is the same number of instructions (with better dependencies) + * as iadd, subtraction is actually more efficient for ineg than the usual + * 2's complement "flip the bits and add one". + */ + return lower_isub64(b, nir_imm_int64(b, 0), x); +} + +static nir_ssa_def * +lower_iabs64(nir_builder *b, nir_ssa_def *x) +{ + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *x_is_neg = nir_ilt(b, x_hi, nir_imm_int(b, 0)); + return nir_bcsel(b, x_is_neg, nir_ineg(b, x), x); +} + +static nir_ssa_def * +lower_int64_compare(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *y) +{ + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); + nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); + + switch (op) { + case nir_op_ieq: + return nir_iand(b, nir_ieq(b, x_hi, y_hi), nir_ieq(b, x_lo, y_lo)); + case nir_op_ine: + return nir_ior(b, nir_ine(b, x_hi, y_hi), nir_ine(b, x_lo, y_lo)); + case nir_op_ult: + return nir_ior(b, nir_ult(b, x_hi, y_hi), + nir_iand(b, nir_ieq(b, x_hi, y_hi), + nir_ult(b, x_lo, y_lo))); + case nir_op_ilt: + return nir_ior(b, nir_ilt(b, x_hi, y_hi), + nir_iand(b, nir_ieq(b, x_hi, y_hi), + nir_ult(b, x_lo, y_lo))); + break; + case nir_op_uge: + /* Lower as !(x < y) in the hopes of better CSE */ + return nir_inot(b, lower_int64_compare(b, nir_op_ult, x, y)); + case nir_op_ige: + /* Lower as !(x < y) in the hopes of better CSE */ + return nir_inot(b, lower_int64_compare(b, nir_op_ilt, x, y)); + default: + unreachable("Invalid comparison"); + } +} + +static nir_ssa_def * +lower_umax64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + return nir_bcsel(b, lower_int64_compare(b, nir_op_ult, x, y), y, x); +} + +static nir_ssa_def * +lower_imax64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + return nir_bcsel(b, lower_int64_compare(b, nir_op_ilt, x, y), y, x); +} + +static nir_ssa_def * +lower_umin64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + return nir_bcsel(b, lower_int64_compare(b, nir_op_ult, x, y), x, y); +} + +static nir_ssa_def * +lower_imin64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) +{ + return nir_bcsel(b, lower_int64_compare(b, nir_op_ilt, x, y), x, y); +} + +static nir_ssa_def * +lower_mul_2x32_64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, + bool sign_extend) +{ + nir_ssa_def *res_hi = sign_extend ? nir_imul_high(b, x, y) + : nir_umul_high(b, x, y); + + return nir_pack_64_2x32_split(b, nir_imul(b, x, y), res_hi); +} + static nir_ssa_def * lower_imul64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) { @@ -32,12 +401,70 @@ lower_imul64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y); nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y); - nir_ssa_def *res_lo = nir_imul(b, x_lo, y_lo); - nir_ssa_def *res_hi = nir_iadd(b, nir_umul_high(b, x_lo, y_lo), + nir_ssa_def *mul_lo = nir_umul_2x32_64(b, x_lo, y_lo); + nir_ssa_def *res_hi = nir_iadd(b, nir_unpack_64_2x32_split_y(b, mul_lo), nir_iadd(b, nir_imul(b, x_lo, y_hi), nir_imul(b, x_hi, y_lo))); - return nir_pack_64_2x32_split(b, res_lo, res_hi); + return nir_pack_64_2x32_split(b, nir_unpack_64_2x32_split_x(b, mul_lo), + res_hi); +} + +static nir_ssa_def * +lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, + bool sign_extend) +{ + nir_ssa_def *x32[4], *y32[4]; + x32[0] = nir_unpack_64_2x32_split_x(b, x); + x32[1] = nir_unpack_64_2x32_split_y(b, x); + if (sign_extend) { + x32[2] = x32[3] = nir_ishr(b, x32[1], nir_imm_int(b, 31)); + } else { + x32[2] = x32[3] = nir_imm_int(b, 0); + } + + y32[0] = nir_unpack_64_2x32_split_x(b, y); + y32[1] = nir_unpack_64_2x32_split_y(b, y); + if (sign_extend) { + y32[2] = y32[3] = nir_ishr(b, y32[1], nir_imm_int(b, 31)); + } else { + y32[2] = y32[3] = nir_imm_int(b, 0); + } + + nir_ssa_def *res[8] = { NULL, }; + + /* Yes, the following generates a pile of code. However, we throw res[0] + * and res[1] away in the end and, if we're in the umul case, four of our + * eight dword operands will be constant zero and opt_algebraic will clean + * this up nicely. + */ + for (unsigned i = 0; i < 4; i++) { + nir_ssa_def *carry = NULL; + for (unsigned j = 0; j < 4; j++) { + /* The maximum values of x32[i] and y32[i] are UINT32_MAX so the + * maximum value of tmp is UINT32_MAX * UINT32_MAX. The maximum + * value that will fit in tmp is + * + * UINT64_MAX = UINT32_MAX << 32 + UINT32_MAX + * = UINT32_MAX * (UINT32_MAX + 1) + UINT32_MAX + * = UINT32_MAX * UINT32_MAX + 2 * UINT32_MAX + * + * so we're guaranteed that we can add in two more 32-bit values + * without overflowing tmp. + */ + nir_ssa_def *tmp = nir_umul_2x32_64(b, x32[i], y32[i]); + + if (res[i + j]) + tmp = nir_iadd(b, tmp, nir_u2u64(b, res[i + j])); + if (carry) + tmp = nir_iadd(b, tmp, carry); + res[i + j] = nir_u2u32(b, tmp); + carry = nir_ushr(b, tmp, nir_imm_int(b, 32)); + } + res[i + 4] = nir_u2u32(b, carry); + } + + return nir_pack_64_2x32_split(b, res[2], res[3]); } static nir_ssa_def * @@ -48,7 +475,7 @@ lower_isign64(nir_builder *b, nir_ssa_def *x) nir_ssa_def *is_non_zero = nir_i2b(b, nir_ior(b, x_lo, x_hi)); nir_ssa_def *res_hi = nir_ishr(b, x_hi, nir_imm_int(b, 31)); - nir_ssa_def *res_lo = nir_ior(b, res_hi, nir_b2i(b, is_non_zero)); + nir_ssa_def *res_lo = nir_ior(b, res_hi, nir_b2i32(b, is_non_zero)); return nir_pack_64_2x32_split(b, res_lo, res_hi); } @@ -66,9 +493,8 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d, nir_ssa_def *d_lo = nir_unpack_64_2x32_split_x(b, d); nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d); - nir_const_value v = { .u32 = { 0, 0, 0, 0 } }; - nir_ssa_def *q_lo = nir_build_imm(b, n->num_components, 32, v); - nir_ssa_def *q_hi = nir_build_imm(b, n->num_components, 32, v); + nir_ssa_def *q_lo = nir_imm_zero(b, n->num_components, 32); + nir_ssa_def *q_hi = nir_imm_zero(b, n->num_components, 32); nir_ssa_def *n_hi_before_if = n_hi; nir_ssa_def *q_hi_before_if = q_hi; @@ -86,7 +512,7 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d, * this is always true within the if statement. */ if (n->num_components == 1) - need_high_div = nir_imm_int(b, NIR_TRUE); + need_high_div = nir_imm_true(b); nir_ssa_def *log2_d_lo = nir_ufind_msb(b, d_lo); @@ -179,20 +605,83 @@ lower_imod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d) { nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n); nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d); + nir_ssa_def *n_is_neg = nir_ilt(b, n_hi, nir_imm_int(b, 0)); + nir_ssa_def *d_is_neg = nir_ilt(b, d_hi, nir_imm_int(b, 0)); - nir_ssa_def *negate = nir_ine(b, nir_ilt(b, n_hi, nir_imm_int(b, 0)), - nir_ilt(b, d_hi, nir_imm_int(b, 0))); nir_ssa_def *q, *r; lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r); - return nir_bcsel(b, negate, nir_ineg(b, r), r); + + nir_ssa_def *rem = nir_bcsel(b, n_is_neg, nir_ineg(b, r), r); + + return nir_bcsel(b, nir_ieq(b, r, nir_imm_int64(b, 0)), nir_imm_int64(b, 0), + nir_bcsel(b, nir_ieq(b, n_is_neg, d_is_neg), rem, + nir_iadd(b, rem, d))); } -static nir_lower_int64_options -opcode_to_options_mask(nir_op opcode) +static nir_ssa_def * +lower_irem64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d) +{ + nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n); + nir_ssa_def *n_is_neg = nir_ilt(b, n_hi, nir_imm_int(b, 0)); + + nir_ssa_def *q, *r; + lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r); + return nir_bcsel(b, n_is_neg, nir_ineg(b, r), r); +} + +static nir_ssa_def * +lower_extract(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *c) +{ + assert(op == nir_op_extract_u8 || op == nir_op_extract_i8 || + op == nir_op_extract_u16 || op == nir_op_extract_i16); + + const int chunk = nir_src_as_uint(nir_src_for_ssa(c)); + const int chunk_bits = + (op == nir_op_extract_u8 || op == nir_op_extract_i8) ? 8 : 16; + const int num_chunks_in_32 = 32 / chunk_bits; + + nir_ssa_def *extract32; + if (chunk < num_chunks_in_32) { + extract32 = nir_build_alu(b, op, nir_unpack_64_2x32_split_x(b, x), + nir_imm_int(b, chunk), + NULL, NULL); + } else { + extract32 = nir_build_alu(b, op, nir_unpack_64_2x32_split_y(b, x), + nir_imm_int(b, chunk - num_chunks_in_32), + NULL, NULL); + } + + if (op == nir_op_extract_i8 || op == nir_op_extract_i16) + return lower_i2i64(b, extract32); + else + return lower_u2u64(b, extract32); +} + +static nir_ssa_def * +lower_ufind_msb64(nir_builder *b, nir_ssa_def *x) +{ + + nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); + nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + nir_ssa_def *lo_count = nir_ufind_msb(b, x_lo); + nir_ssa_def *hi_count = nir_ufind_msb(b, x_hi); + nir_ssa_def *valid_hi_bits = nir_ine(b, x_hi, nir_imm_int(b, 0)); + nir_ssa_def *hi_res = nir_iadd(b, nir_imm_intN_t(b, 32, 32), hi_count); + return nir_bcsel(b, valid_hi_bits, hi_res, lo_count); +} + +nir_lower_int64_options +nir_lower_int64_op_to_options_mask(nir_op opcode) { switch (opcode) { case nir_op_imul: return nir_lower_imul64; + case nir_op_imul_2x32_64: + case nir_op_umul_2x32_64: + return nir_lower_imul_2x32_64; + case nir_op_imul_high: + case nir_op_umul_high: + return nir_lower_imul_high64; case nir_op_isign: return nir_lower_isign64; case nir_op_udiv: @@ -201,14 +690,63 @@ opcode_to_options_mask(nir_op opcode) case nir_op_imod: case nir_op_irem: return nir_lower_divmod64; + case nir_op_b2i64: + case nir_op_i2b1: + case nir_op_i2i8: + case nir_op_i2i16: + case nir_op_i2i32: + case nir_op_i2i64: + case nir_op_u2u8: + case nir_op_u2u16: + case nir_op_u2u32: + case nir_op_u2u64: + case nir_op_bcsel: + return nir_lower_mov64; + case nir_op_ieq: + case nir_op_ine: + case nir_op_ult: + case nir_op_ilt: + case nir_op_uge: + case nir_op_ige: + return nir_lower_icmp64; + case nir_op_iadd: + case nir_op_isub: + return nir_lower_iadd64; + case nir_op_imin: + case nir_op_imax: + case nir_op_umin: + case nir_op_umax: + return nir_lower_minmax64; + case nir_op_iabs: + return nir_lower_iabs64; + case nir_op_ineg: + return nir_lower_ineg64; + case nir_op_iand: + case nir_op_ior: + case nir_op_ixor: + case nir_op_inot: + return nir_lower_logic64; + case nir_op_ishl: + case nir_op_ishr: + case nir_op_ushr: + return nir_lower_shift64; + case nir_op_extract_u8: + case nir_op_extract_i8: + case nir_op_extract_u16: + case nir_op_extract_i16: + return nir_lower_extract64; + case nir_op_ufind_msb: + return nir_lower_ufind_msb64; default: return 0; } } static nir_ssa_def * -lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu) +lower_int64_alu_instr(nir_builder *b, nir_instr *instr, void *_state) { + nir_alu_instr *alu = nir_instr_as_alu(instr); + nir_ssa_def *src[4]; for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) src[i] = nir_ssa_for_alu_src(b, alu, i); @@ -216,6 +754,14 @@ lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu) switch (alu->op) { case nir_op_imul: return lower_imul64(b, src[0], src[1]); + case nir_op_imul_2x32_64: + return lower_mul_2x32_64(b, src[0], src[1], true); + case nir_op_umul_2x32_64: + return lower_mul_2x32_64(b, src[0], src[1], false); + case nir_op_imul_high: + return lower_mul_high64(b, src[0], src[1], true); + case nir_op_umul_high: + return lower_mul_high64(b, src[0], src[1], false); case nir_op_isign: return lower_isign64(b, src[0]); case nir_op_udiv: @@ -226,53 +772,144 @@ lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu) return lower_umod64(b, src[0], src[1]); case nir_op_imod: return lower_imod64(b, src[0], src[1]); + case nir_op_irem: + return lower_irem64(b, src[0], src[1]); + case nir_op_b2i64: + return lower_b2i64(b, src[0]); + case nir_op_i2b1: + return lower_i2b(b, src[0]); + case nir_op_i2i8: + return lower_i2i8(b, src[0]); + case nir_op_i2i16: + return lower_i2i16(b, src[0]); + case nir_op_i2i32: + return lower_i2i32(b, src[0]); + case nir_op_i2i64: + return lower_i2i64(b, src[0]); + case nir_op_u2u8: + return lower_u2u8(b, src[0]); + case nir_op_u2u16: + return lower_u2u16(b, src[0]); + case nir_op_u2u32: + return lower_u2u32(b, src[0]); + case nir_op_u2u64: + return lower_u2u64(b, src[0]); + case nir_op_bcsel: + return lower_bcsel64(b, src[0], src[1], src[2]); + case nir_op_ieq: + case nir_op_ine: + case nir_op_ult: + case nir_op_ilt: + case nir_op_uge: + case nir_op_ige: + return lower_int64_compare(b, alu->op, src[0], src[1]); + case nir_op_iadd: + return lower_iadd64(b, src[0], src[1]); + case nir_op_isub: + return lower_isub64(b, src[0], src[1]); + case nir_op_imin: + return lower_imin64(b, src[0], src[1]); + case nir_op_imax: + return lower_imax64(b, src[0], src[1]); + case nir_op_umin: + return lower_umin64(b, src[0], src[1]); + case nir_op_umax: + return lower_umax64(b, src[0], src[1]); + case nir_op_iabs: + return lower_iabs64(b, src[0]); + case nir_op_ineg: + return lower_ineg64(b, src[0]); + case nir_op_iand: + return lower_iand64(b, src[0], src[1]); + case nir_op_ior: + return lower_ior64(b, src[0], src[1]); + case nir_op_ixor: + return lower_ixor64(b, src[0], src[1]); + case nir_op_inot: + return lower_inot64(b, src[0]); + case nir_op_ishl: + return lower_ishl64(b, src[0], src[1]); + case nir_op_ishr: + return lower_ishr64(b, src[0], src[1]); + case nir_op_ushr: + return lower_ushr64(b, src[0], src[1]); + case nir_op_extract_u8: + case nir_op_extract_i8: + case nir_op_extract_u16: + case nir_op_extract_i16: + return lower_extract(b, alu->op, src[0], src[1]); + case nir_op_ufind_msb: + return lower_ufind_msb64(b, src[0]); + break; default: unreachable("Invalid ALU opcode to lower"); } } static bool -lower_int64_impl(nir_function_impl *impl, nir_lower_int64_options options) +should_lower_int64_alu_instr(const nir_instr *instr, const void *_options) { - nir_builder b; - nir_builder_init(&b, impl); - - bool progress = false; - nir_foreach_block(block, impl) { - nir_foreach_instr_safe(instr, block) { - if (instr->type != nir_instr_type_alu) - continue; - - nir_alu_instr *alu = nir_instr_as_alu(instr); - assert(alu->dest.dest.is_ssa); - if (alu->dest.dest.ssa.bit_size != 64) - continue; + const nir_lower_int64_options options = + *(const nir_lower_int64_options *)_options; - if (!(options & opcode_to_options_mask(alu->op))) - continue; + if (instr->type != nir_instr_type_alu) + return false; - b.cursor = nir_before_instr(instr); + const nir_alu_instr *alu = nir_instr_as_alu(instr); - nir_ssa_def *lowered = lower_int64_alu_instr(&b, alu); - nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, - nir_src_for_ssa(lowered)); - nir_instr_remove(&alu->instr); - progress = true; - } + switch (alu->op) { + case nir_op_i2b1: + case nir_op_i2i8: + case nir_op_i2i16: + case nir_op_i2i32: + case nir_op_u2u8: + case nir_op_u2u16: + case nir_op_u2u32: + assert(alu->src[0].src.is_ssa); + if (alu->src[0].src.ssa->bit_size != 64) + return false; + break; + case nir_op_bcsel: + assert(alu->src[1].src.is_ssa); + assert(alu->src[2].src.is_ssa); + assert(alu->src[1].src.ssa->bit_size == + alu->src[2].src.ssa->bit_size); + if (alu->src[1].src.ssa->bit_size != 64) + return false; + break; + case nir_op_ieq: + case nir_op_ine: + case nir_op_ult: + case nir_op_ilt: + case nir_op_uge: + case nir_op_ige: + assert(alu->src[0].src.is_ssa); + assert(alu->src[1].src.is_ssa); + assert(alu->src[0].src.ssa->bit_size == + alu->src[1].src.ssa->bit_size); + if (alu->src[0].src.ssa->bit_size != 64) + return false; + break; + case nir_op_ufind_msb: + assert(alu->src[0].src.is_ssa); + if (alu->src[0].src.ssa->bit_size != 64) + return false; + break; + default: + assert(alu->dest.dest.is_ssa); + if (alu->dest.dest.ssa.bit_size != 64) + return false; + break; } - return progress; + return (options & nir_lower_int64_op_to_options_mask(alu->op)) != 0; } bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options) { - bool progress = false; - - nir_foreach_function(function, shader) { - if (function->impl) - progress |= lower_int64_impl(function->impl, options); - } - - return progress; + return nir_shader_lower_instructions(shader, + should_lower_int64_alu_instr, + lower_int64_alu_instr, + &options); }