nir: Add a nir_tex_instr_has_implicit_derivatives helper
[mesa.git] / src / compiler / nir / nir_lower_int64.c
index 575aa8395815ef020868bcb832f145e404845b16..72745134308cb6c3648788d06c1acf67b81a737a 100644 (file)
@@ -146,6 +146,138 @@ lower_ixor64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
                                     nir_ixor(b, x_hi, y_hi));
 }
 
+static nir_ssa_def *
+lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+   /* Implemented as
+    *
+    * uint64_t lshift(uint64_t x, int c)
+    * {
+    *    if (c == 0) return x;
+    *
+    *    uint32_t lo = LO(x), hi = HI(x);
+    *
+    *    if (c < 32) {
+    *       uint32_t lo_shifted = lo << c;
+    *       uint32_t hi_shifted = hi << c;
+    *       uint32_t lo_shifted_hi = lo >> abs(32 - c);
+    *       return pack_64(lo_shifted, hi_shifted | lo_shifted_hi);
+    *    } else {
+    *       uint32_t lo_shifted_hi = lo << abs(32 - c);
+    *       return pack_64(0, lo_shifted_hi);
+    *    }
+    * }
+    */
+   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+
+   nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
+   nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y);
+   nir_ssa_def *hi_shifted = nir_ishl(b, x_hi, y);
+   nir_ssa_def *lo_shifted_hi = nir_ushr(b, x_lo, reverse_count);
+
+   nir_ssa_def *res_if_lt_32 =
+      nir_pack_64_2x32_split(b, lo_shifted,
+                                nir_ior(b, hi_shifted, lo_shifted_hi));
+   nir_ssa_def *res_if_ge_32 =
+      nir_pack_64_2x32_split(b, nir_imm_int(b, 0),
+                                nir_ishl(b, x_lo, reverse_count));
+
+   return nir_bcsel(b,
+                    nir_ieq(b, y, nir_imm_int(b, 0)), x,
+                    nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)),
+                                 res_if_ge_32, res_if_lt_32));
+}
+
+static nir_ssa_def *
+lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+   /* Implemented as
+    *
+    * uint64_t arshift(uint64_t x, int c)
+    * {
+    *    if (c == 0) return x;
+    *
+    *    uint32_t lo = LO(x);
+    *    int32_t  hi = HI(x);
+    *
+    *    if (c < 32) {
+    *       uint32_t lo_shifted = lo >> c;
+    *       uint32_t hi_shifted = hi >> c;
+    *       uint32_t hi_shifted_lo = hi << abs(32 - c);
+    *       return pack_64(hi_shifted, hi_shifted_lo | lo_shifted);
+    *    } else {
+    *       uint32_t hi_shifted = hi >> 31;
+    *       uint32_t hi_shifted_lo = hi >> abs(32 - c);
+    *       return pack_64(hi_shifted, hi_shifted_lo);
+    *    }
+    * }
+    */
+   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+
+   nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
+   nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
+   nir_ssa_def *hi_shifted = nir_ishr(b, x_hi, y);
+   nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
+
+   nir_ssa_def *res_if_lt_32 =
+      nir_pack_64_2x32_split(b, nir_ior(b, lo_shifted, hi_shifted_lo),
+                                hi_shifted);
+   nir_ssa_def *res_if_ge_32 =
+      nir_pack_64_2x32_split(b, nir_ishr(b, x_hi, reverse_count),
+                                nir_ishr(b, x_hi, nir_imm_int(b, 31)));
+
+   return nir_bcsel(b,
+                    nir_ieq(b, y, nir_imm_int(b, 0)), x,
+                    nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)),
+                                 res_if_ge_32, res_if_lt_32));
+}
+
+static nir_ssa_def *
+lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
+{
+   /* Implemented as
+    *
+    * uint64_t rshift(uint64_t x, int c)
+    * {
+    *    if (c == 0) return x;
+    *
+    *    uint32_t lo = LO(x), hi = HI(x);
+    *
+    *    if (c < 32) {
+    *       uint32_t lo_shifted = lo >> c;
+    *       uint32_t hi_shifted = hi >> c;
+    *       uint32_t hi_shifted_lo = hi << abs(32 - c);
+    *       return pack_64(hi_shifted, hi_shifted_lo | lo_shifted);
+    *    } else {
+    *       uint32_t hi_shifted_lo = hi >> abs(32 - c);
+    *       return pack_64(0, hi_shifted_lo);
+    *    }
+    * }
+    */
+
+   nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
+   nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
+
+   nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
+   nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
+   nir_ssa_def *hi_shifted = nir_ushr(b, x_hi, y);
+   nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
+
+   nir_ssa_def *res_if_lt_32 =
+      nir_pack_64_2x32_split(b, nir_ior(b, lo_shifted, hi_shifted_lo),
+                                hi_shifted);
+   nir_ssa_def *res_if_ge_32 =
+      nir_pack_64_2x32_split(b, nir_ushr(b, x_hi, reverse_count),
+                                nir_imm_int(b, 0));
+
+   return nir_bcsel(b,
+                    nir_ieq(b, y, nir_imm_int(b, 0)), x,
+                    nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)),
+                                 res_if_ge_32, res_if_lt_32));
+}
+
 static nir_ssa_def *
 lower_iadd64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
 {
@@ -251,6 +383,16 @@ lower_imin64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
    return nir_bcsel(b, lower_int64_compare(b, nir_op_ilt, x, y), x, y);
 }
 
+static nir_ssa_def *
+lower_mul_2x32_64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
+                  bool sign_extend)
+{
+   nir_ssa_def *res_hi = sign_extend ? nir_imul_high(b, x, y)
+                                     : nir_umul_high(b, x, y);
+
+   return nir_pack_64_2x32_split(b, nir_imul(b, x, y), res_hi);
+}
+
 static nir_ssa_def *
 lower_imul64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
 {
@@ -259,12 +401,13 @@ lower_imul64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
    nir_ssa_def *y_lo = nir_unpack_64_2x32_split_x(b, y);
    nir_ssa_def *y_hi = nir_unpack_64_2x32_split_y(b, y);
 
-   nir_ssa_def *res_lo = nir_imul(b, x_lo, y_lo);
-   nir_ssa_def *res_hi = nir_iadd(b, nir_umul_high(b, x_lo, y_lo),
+   nir_ssa_def *mul_lo = nir_umul_2x32_64(b, x_lo, y_lo);
+   nir_ssa_def *res_hi = nir_iadd(b, nir_unpack_64_2x32_split_y(b, mul_lo),
                          nir_iadd(b, nir_imul(b, x_lo, y_hi),
                                      nir_imul(b, x_hi, y_lo)));
 
-   return nir_pack_64_2x32_split(b, res_lo, res_hi);
+   return nir_pack_64_2x32_split(b, nir_unpack_64_2x32_split_x(b, mul_lo),
+                                 res_hi);
 }
 
 static nir_ssa_def *
@@ -309,9 +452,8 @@ lower_mul_high64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y,
           * so we're guaranteed that we can add in two more 32-bit values
           * without overflowing tmp.
           */
-         nir_ssa_def *tmp =
-            nir_pack_64_2x32_split(b, nir_imul(b, x32[i], y32[j]),
-                                      nir_umul_high(b, x32[i], y32[j]));
+         nir_ssa_def *tmp = nir_umul_2x32_64(b, x32[i], y32[i]);
+
          if (res[i + j])
             tmp = nir_iadd(b, tmp, nir_u2u64(b, res[i + j]));
          if (carry)
@@ -351,9 +493,8 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
    nir_ssa_def *d_lo = nir_unpack_64_2x32_split_x(b, d);
    nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d);
 
-   nir_const_value v = { .u32 = { 0, 0, 0, 0 } };
-   nir_ssa_def *q_lo = nir_build_imm(b, n->num_components, 32, v);
-   nir_ssa_def *q_hi = nir_build_imm(b, n->num_components, 32, v);
+   nir_ssa_def *q_lo = nir_imm_zero(b, n->num_components, 32);
+   nir_ssa_def *q_hi = nir_imm_zero(b, n->num_components, 32);
 
    nir_ssa_def *n_hi_before_if = n_hi;
    nir_ssa_def *q_hi_before_if = q_hi;
@@ -488,12 +629,43 @@ lower_irem64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d)
    return nir_bcsel(b, n_is_neg, nir_ineg(b, r), r);
 }
 
-static nir_lower_int64_options
-opcode_to_options_mask(nir_op opcode)
+static nir_ssa_def *
+lower_extract(nir_builder *b, nir_op op, nir_ssa_def *x, nir_ssa_def *c)
+{
+   assert(op == nir_op_extract_u8 || op == nir_op_extract_i8 ||
+          op == nir_op_extract_u16 || op == nir_op_extract_i16);
+
+   const int chunk = nir_src_as_uint(nir_src_for_ssa(c));
+   const int chunk_bits =
+      (op == nir_op_extract_u8 || op == nir_op_extract_i8) ? 8 : 16;
+   const int num_chunks_in_32 = 32 / chunk_bits;
+
+   nir_ssa_def *extract32;
+   if (chunk < num_chunks_in_32) {
+      extract32 = nir_build_alu(b, op, nir_unpack_64_2x32_split_x(b, x),
+                                   nir_imm_int(b, chunk),
+                                   NULL, NULL);
+   } else {
+      extract32 = nir_build_alu(b, op, nir_unpack_64_2x32_split_y(b, x),
+                                   nir_imm_int(b, chunk - num_chunks_in_32),
+                                   NULL, NULL);
+   }
+
+   if (op == nir_op_extract_i8 || op == nir_op_extract_i16)
+      return lower_i2i64(b, extract32);
+   else
+      return lower_u2u64(b, extract32);
+}
+
+nir_lower_int64_options
+nir_lower_int64_op_to_options_mask(nir_op opcode)
 {
    switch (opcode) {
    case nir_op_imul:
       return nir_lower_imul64;
+   case nir_op_imul_2x32_64:
+   case nir_op_umul_2x32_64:
+      return nir_lower_imul_2x32_64;
    case nir_op_imul_high:
    case nir_op_umul_high:
       return nir_lower_imul_high64;
@@ -505,14 +677,57 @@ opcode_to_options_mask(nir_op opcode)
    case nir_op_imod:
    case nir_op_irem:
       return nir_lower_divmod64;
+   case nir_op_b2i64:
+   case nir_op_i2b1:
+   case nir_op_i2i32:
+   case nir_op_i2i64:
+   case nir_op_u2u32:
+   case nir_op_u2u64:
+   case nir_op_bcsel:
+      return nir_lower_mov64;
+   case nir_op_ieq:
+   case nir_op_ine:
+   case nir_op_ult:
+   case nir_op_ilt:
+   case nir_op_uge:
+   case nir_op_ige:
+      return nir_lower_icmp64;
+   case nir_op_iadd:
+   case nir_op_isub:
+      return nir_lower_iadd64;
+   case nir_op_imin:
+   case nir_op_imax:
+   case nir_op_umin:
+   case nir_op_umax:
+      return nir_lower_minmax64;
+   case nir_op_iabs:
+      return nir_lower_iabs64;
+   case nir_op_ineg:
+      return nir_lower_ineg64;
+   case nir_op_iand:
+   case nir_op_ior:
+   case nir_op_ixor:
+   case nir_op_inot:
+      return nir_lower_logic64;
+   case nir_op_ishl:
+   case nir_op_ishr:
+   case nir_op_ushr:
+      return nir_lower_shift64;
+   case nir_op_extract_u8:
+   case nir_op_extract_i8:
+   case nir_op_extract_u16:
+   case nir_op_extract_i16:
+      return nir_lower_extract64;
    default:
       return 0;
    }
 }
 
 static nir_ssa_def *
-lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu)
+lower_int64_alu_instr(nir_builder *b, nir_instr *instr, void *_state)
 {
+   nir_alu_instr *alu = nir_instr_as_alu(instr);
+
    nir_ssa_def *src[4];
    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
       src[i] = nir_ssa_for_alu_src(b, alu, i);
@@ -520,6 +735,10 @@ lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu)
    switch (alu->op) {
    case nir_op_imul:
       return lower_imul64(b, src[0], src[1]);
+   case nir_op_imul_2x32_64:
+      return lower_mul_2x32_64(b, src[0], src[1], true);
+   case nir_op_umul_2x32_64:
+      return lower_mul_2x32_64(b, src[0], src[1], false);
    case nir_op_imul_high:
       return lower_mul_high64(b, src[0], src[1], true);
    case nir_op_umul_high:
@@ -536,56 +755,130 @@ lower_int64_alu_instr(nir_builder *b, nir_alu_instr *alu)
       return lower_imod64(b, src[0], src[1]);
    case nir_op_irem:
       return lower_irem64(b, src[0], src[1]);
+   case nir_op_b2i64:
+      return lower_b2i64(b, src[0]);
+   case nir_op_i2b1:
+      return lower_i2b(b, src[0]);
+   case nir_op_i2i8:
+      return lower_i2i8(b, src[0]);
+   case nir_op_i2i16:
+      return lower_i2i16(b, src[0]);
+   case nir_op_i2i32:
+      return lower_i2i32(b, src[0]);
+   case nir_op_i2i64:
+      return lower_i2i64(b, src[0]);
+   case nir_op_u2u8:
+      return lower_u2u8(b, src[0]);
+   case nir_op_u2u16:
+      return lower_u2u16(b, src[0]);
+   case nir_op_u2u32:
+      return lower_u2u32(b, src[0]);
+   case nir_op_u2u64:
+      return lower_u2u64(b, src[0]);
+   case nir_op_bcsel:
+      return lower_bcsel64(b, src[0], src[1], src[2]);
+   case nir_op_ieq:
+   case nir_op_ine:
+   case nir_op_ult:
+   case nir_op_ilt:
+   case nir_op_uge:
+   case nir_op_ige:
+      return lower_int64_compare(b, alu->op, src[0], src[1]);
+   case nir_op_iadd:
+      return lower_iadd64(b, src[0], src[1]);
+   case nir_op_isub:
+      return lower_isub64(b, src[0], src[1]);
+   case nir_op_imin:
+      return lower_imin64(b, src[0], src[1]);
+   case nir_op_imax:
+      return lower_imax64(b, src[0], src[1]);
+   case nir_op_umin:
+      return lower_umin64(b, src[0], src[1]);
+   case nir_op_umax:
+      return lower_umax64(b, src[0], src[1]);
+   case nir_op_iabs:
+      return lower_iabs64(b, src[0]);
+   case nir_op_ineg:
+      return lower_ineg64(b, src[0]);
+   case nir_op_iand:
+      return lower_iand64(b, src[0], src[1]);
+   case nir_op_ior:
+      return lower_ior64(b, src[0], src[1]);
+   case nir_op_ixor:
+      return lower_ixor64(b, src[0], src[1]);
+   case nir_op_inot:
+      return lower_inot64(b, src[0]);
+   case nir_op_ishl:
+      return lower_ishl64(b, src[0], src[1]);
+   case nir_op_ishr:
+      return lower_ishr64(b, src[0], src[1]);
+   case nir_op_ushr:
+      return lower_ushr64(b, src[0], src[1]);
+   case nir_op_extract_u8:
+   case nir_op_extract_i8:
+   case nir_op_extract_u16:
+   case nir_op_extract_i16:
+      return lower_extract(b, alu->op, src[0], src[1]);
    default:
       unreachable("Invalid ALU opcode to lower");
    }
 }
 
 static bool
-lower_int64_impl(nir_function_impl *impl, nir_lower_int64_options options)
+should_lower_int64_alu_instr(const nir_instr *instr, const void *_options)
 {
-   nir_builder b;
-   nir_builder_init(&b, impl);
-
-   bool progress = false;
-   nir_foreach_block(block, impl) {
-      nir_foreach_instr_safe(instr, block) {
-         if (instr->type != nir_instr_type_alu)
-            continue;
+   const nir_lower_int64_options options =
+      *(const nir_lower_int64_options *)_options;
 
-         nir_alu_instr *alu = nir_instr_as_alu(instr);
-         assert(alu->dest.dest.is_ssa);
-         if (alu->dest.dest.ssa.bit_size != 64)
-            continue;
+   if (instr->type != nir_instr_type_alu)
+      return false;
 
-         if (!(options & opcode_to_options_mask(alu->op)))
-            continue;
+   const nir_alu_instr *alu = nir_instr_as_alu(instr);
 
-         b.cursor = nir_before_instr(instr);
-
-         nir_ssa_def *lowered = lower_int64_alu_instr(&b, alu);
-         nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
-                                  nir_src_for_ssa(lowered));
-         nir_instr_remove(&alu->instr);
-         progress = true;
-      }
+   switch (alu->op) {
+   case nir_op_i2b1:
+   case nir_op_i2i32:
+   case nir_op_u2u32:
+      assert(alu->src[0].src.is_ssa);
+      if (alu->src[0].src.ssa->bit_size != 64)
+         return false;
+      break;
+   case nir_op_bcsel:
+      assert(alu->src[1].src.is_ssa);
+      assert(alu->src[2].src.is_ssa);
+      assert(alu->src[1].src.ssa->bit_size ==
+             alu->src[2].src.ssa->bit_size);
+      if (alu->src[1].src.ssa->bit_size != 64)
+         return false;
+      break;
+   case nir_op_ieq:
+   case nir_op_ine:
+   case nir_op_ult:
+   case nir_op_ilt:
+   case nir_op_uge:
+   case nir_op_ige:
+      assert(alu->src[0].src.is_ssa);
+      assert(alu->src[1].src.is_ssa);
+      assert(alu->src[0].src.ssa->bit_size ==
+             alu->src[1].src.ssa->bit_size);
+      if (alu->src[0].src.ssa->bit_size != 64)
+         return false;
+      break;
+   default:
+      assert(alu->dest.dest.is_ssa);
+      if (alu->dest.dest.ssa.bit_size != 64)
+         return false;
+      break;
    }
 
-   if (progress)
-      nir_metadata_preserve(impl, nir_metadata_none);
-
-   return progress;
+   return (options & nir_lower_int64_op_to_options_mask(alu->op)) != 0;
 }
 
 bool
 nir_lower_int64(nir_shader *shader, nir_lower_int64_options options)
 {
-   bool progress = false;
-
-   nir_foreach_function(function, shader) {
-      if (function->impl)
-         progress |= lower_int64_impl(function->impl, options);
-   }
-
-   return progress;
+   return nir_shader_lower_instructions(shader,
+                                        should_lower_int64_alu_instr,
+                                        lower_int64_alu_instr,
+                                        &options);
 }