dri: Use DRM_FORMAT_* instead of defining our own copy.
[mesa.git] / src / compiler / spirv / vtn_glsl450.c
index dd38cc95ccf70709bdc193acfe622c3d4860d3ed..753e74cf73cb93ac660eecd81e6a791a2df0296d 100644 (file)
  *
  */
 
+#include <math.h>
+
+#include "nir/nir_builtin_builder.h"
+
 #include "vtn_private.h"
 #include "GLSL.std.450.h"
 
 static nir_ssa_def *
 build_mat2_det(nir_builder *b, nir_ssa_def *col[2])
 {
-   unsigned swiz[4] = {1, 0, 0, 0};
-   nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2, true));
+   unsigned swiz[2] = {1, 0 };
+   nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2));
    return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1));
 }
 
 static nir_ssa_def *
 build_mat3_det(nir_builder *b, nir_ssa_def *col[3])
 {
-   unsigned yzx[4] = {1, 2, 0, 0};
-   unsigned zxy[4] = {2, 0, 1, 0};
+   unsigned yzx[3] = {1, 2, 0 };
+   unsigned zxy[3] = {2, 0, 1 };
 
    nir_ssa_def *prod0 =
       nir_fmul(b, col[0],
-               nir_fmul(b, nir_swizzle(b, col[1], yzx, 3, true),
-                           nir_swizzle(b, col[2], zxy, 3, true)));
+               nir_fmul(b, nir_swizzle(b, col[1], yzx, 3),
+                           nir_swizzle(b, col[2], zxy, 3)));
    nir_ssa_def *prod1 =
       nir_fmul(b, col[0],
-               nir_fmul(b, nir_swizzle(b, col[1], zxy, 3, true),
-                           nir_swizzle(b, col[2], yzx, 3, true)));
+               nir_fmul(b, nir_swizzle(b, col[1], zxy, 3),
+                           nir_swizzle(b, col[2], yzx, 3)));
 
    nir_ssa_def *diff = nir_fsub(b, prod0, prod1);
 
@@ -72,9 +76,9 @@ build_mat4_det(nir_builder *b, nir_ssa_def **col)
          swiz[j] = j + (j >= i);
 
       nir_ssa_def *subcol[3];
-      subcol[0] = nir_swizzle(b, col[1], swiz, 3, true);
-      subcol[1] = nir_swizzle(b, col[2], swiz, 3, true);
-      subcol[2] = nir_swizzle(b, col[3], swiz, 3, true);
+      subcol[0] = nir_swizzle(b, col[1], swiz, 3);
+      subcol[1] = nir_swizzle(b, col[2], swiz, 3);
+      subcol[2] = nir_swizzle(b, col[3], swiz, 3);
 
       subdet[i] = build_mat3_det(b, subcol);
    }
@@ -101,7 +105,7 @@ build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
    case 3: return build_mat3_det(&b->nb, cols);
    case 4: return build_mat4_det(&b->nb, cols);
    default:
-      unreachable("Invalid matrix size");
+      vtn_fail("Invalid matrix size");
    }
 }
 
@@ -126,7 +130,7 @@ build_mat_subdet(struct nir_builder *b, struct vtn_ssa_value *src,
       for (unsigned j = 0; j < size; j++) {
          if (j != col) {
             subcol[j - (j > col)] = nir_swizzle(b, src->elems[j]->def,
-                                                swiz, size - 1, true);
+                                                swiz, size - 1);
          }
       }
 
@@ -167,33 +171,13 @@ matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
    return val;
 }
 
-static nir_ssa_def*
-build_length(nir_builder *b, nir_ssa_def *vec)
-{
-   switch (vec->num_components) {
-   case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
-   case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
-   case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
-   case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
-   default:
-      unreachable("Invalid number of components");
-   }
-}
-
-static inline nir_ssa_def *
-build_fclamp(nir_builder *b,
-             nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
-{
-   return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
-}
-
 /**
  * Return e^x.
  */
 static nir_ssa_def *
 build_exp(nir_builder *b, nir_ssa_def *x)
 {
-   return nir_fexp2(b, nir_fmul(b, x, nir_imm_float(b, M_LOG2E)));
+   return nir_fexp2(b, nir_fmul_imm(b, x, M_LOG2E));
 }
 
 /**
@@ -202,7 +186,7 @@ build_exp(nir_builder *b, nir_ssa_def *x)
 static nir_ssa_def *
 build_log(nir_builder *b, nir_ssa_def *x)
 {
-   return nir_fmul(b, nir_flog2(b, x), nir_imm_float(b, 1.0 / M_LOG2E));
+   return nir_fmul_imm(b, nir_flog2(b, x), 1.0 / M_LOG2E);
 }
 
 /**
@@ -218,17 +202,36 @@ build_log(nir_builder *b, nir_ssa_def *x)
 static nir_ssa_def *
 build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1)
 {
+   if (x->bit_size == 16) {
+      /* The polynomial approximation isn't precise enough to meet half-float
+       * precision requirements. Alternatively, we could implement this using
+       * the formula:
+       *
+       * asin(x) = atan2(x, sqrt(1 - x*x))
+       *
+       * But that is very expensive, so instead we just do the polynomial
+       * approximation in 32-bit math and then we convert the result back to
+       * 16-bit.
+       */
+      return nir_f2f16(b, build_asin(b, nir_f2f32(b, x), p0, p1));
+   }
+
+   nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, x->bit_size);
    nir_ssa_def *abs_x = nir_fabs(b, x);
+
+   nir_ssa_def *p0_plus_xp1 = nir_fadd_imm(b, nir_fmul_imm(b, abs_x, p1), p0);
+
+   nir_ssa_def *expr_tail =
+      nir_fadd_imm(b, nir_fmul(b, abs_x,
+                                  nir_fadd_imm(b, nir_fmul(b, abs_x,
+                                                               p0_plus_xp1),
+                                                  M_PI_4f - 1.0f)),
+                      M_PI_2f);
+
    return nir_fmul(b, nir_fsign(b, x),
-                   nir_fsub(b, nir_imm_float(b, M_PI_2f),
-                            nir_fmul(b, nir_fsqrt(b, nir_fsub(b, nir_imm_float(b, 1.0f), abs_x)),
-                                     nir_fadd(b, nir_imm_float(b, M_PI_2f),
-                                              nir_fmul(b, abs_x,
-                                                       nir_fadd(b, nir_imm_float(b, M_PI_4f - 1.0f),
-                                                                nir_fmul(b, abs_x,
-                                                                         nir_fadd(b, nir_imm_float(b, p0),
-                                                                                  nir_fmul(b, abs_x,
-                                                                                           nir_imm_float(b, p1))))))))));
+                      nir_fsub(b, nir_imm_floatN_t(b, M_PI_2f, x->bit_size),
+                                  nir_fmul(b, nir_fsqrt(b, nir_fsub(b, one, abs_x)),
+                                                           expr_tail)));
 }
 
 /**
@@ -248,8 +251,10 @@ build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
 static nir_ssa_def *
 build_atan(nir_builder *b, nir_ssa_def *y_over_x)
 {
+   const uint32_t bit_size = y_over_x->bit_size;
+
    nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
-   nir_ssa_def *one = nir_imm_float(b, 1.0f);
+   nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, bit_size);
 
    /*
     * range-reduction, first step:
@@ -276,12 +281,12 @@ build_atan(nir_builder *b, nir_ssa_def *y_over_x)
    nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
 
    nir_ssa_def *polynomial_terms[] = {
-      nir_fmul(b, x,    nir_imm_float(b,  0.9999793128310355f)),
-      nir_fmul(b, x_3,  nir_imm_float(b, -0.3326756418091246f)),
-      nir_fmul(b, x_5,  nir_imm_float(b,  0.1938924977115610f)),
-      nir_fmul(b, x_7,  nir_imm_float(b, -0.1173503194786851f)),
-      nir_fmul(b, x_9,  nir_imm_float(b,  0.0536813784310406f)),
-      nir_fmul(b, x_11, nir_imm_float(b, -0.0121323213173444f)),
+      nir_fmul_imm(b, x,     0.9999793128310355f),
+      nir_fmul_imm(b, x_3,  -0.3326756418091246f),
+      nir_fmul_imm(b, x_5,   0.1938924977115610f),
+      nir_fmul_imm(b, x_7,  -0.1173503194786851f),
+      nir_fmul_imm(b, x_9,   0.0536813784310406f),
+      nir_fmul_imm(b, x_11, -0.0121323213173444f),
    };
 
    nir_ssa_def *tmp =
@@ -289,11 +294,8 @@ build_atan(nir_builder *b, nir_ssa_def *y_over_x)
 
    /* range-reduction fixup */
    tmp = nir_fadd(b, tmp,
-                  nir_fmul(b,
-                           nir_b2f(b, nir_flt(b, one, abs_y_over_x)),
-                           nir_fadd(b, nir_fmul(b, tmp,
-                                                nir_imm_float(b, -2.0f)),
-                                       nir_imm_float(b, M_PI_2f))));
+                  nir_fmul(b, nir_b2f(b, nir_flt(b, one, abs_y_over_x), bit_size),
+                           nir_fadd_imm(b, nir_fmul_imm(b, tmp, -2.0f), M_PI_2f)));
 
    /* sign fixup */
    return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
@@ -302,8 +304,11 @@ build_atan(nir_builder *b, nir_ssa_def *y_over_x)
 static nir_ssa_def *
 build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
 {
-   nir_ssa_def *zero = nir_imm_float(b, 0);
-   nir_ssa_def *one = nir_imm_float(b, 1);
+   assert(y->bit_size == x->bit_size);
+   const uint32_t bit_size = x->bit_size;
+
+   nir_ssa_def *zero = nir_imm_floatN_t(b, 0, bit_size);
+   nir_ssa_def *one = nir_imm_floatN_t(b, 1, bit_size);
 
    /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
     * for the y=0 discontinuity to end up aligned with the vertical
@@ -333,9 +338,10 @@ build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
     * floating point representations with at least the dynamic range of ATI's
     * 24-bit representation.
     */
-   nir_ssa_def *huge = nir_imm_float(b, 1e18f);
+   const double huge_val = bit_size >= 32 ? 1e18 : 16384;
+   nir_ssa_def *huge = nir_imm_floatN_t(b,  huge_val, bit_size);
    nir_ssa_def *scale = nir_bcsel(b, nir_fge(b, nir_fabs(b, t), huge),
-                                  nir_imm_float(b, 0.25), one);
+                                  nir_imm_floatN_t(b, 0.25, bit_size), one);
    nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale));
    nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t);
 
@@ -362,9 +368,9 @@ build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
    /* Calculate the arctangent and fix up the result if we had flipped the
     * coordinate system.
     */
-   nir_ssa_def *arc = nir_fadd(b, nir_fmul(b, nir_b2f(b, flip),
-                                           nir_imm_float(b, M_PI_2f)),
-                               build_atan(b, tan));
+   nir_ssa_def *arc =
+      nir_fadd(b, nir_fmul_imm(b, nir_b2f(b, flip, bit_size), M_PI_2f),
+                  build_atan(b, tan));
 
    /* Rather convoluted calculation of the sign of the result.  When x < 0 we
     * cannot use fsign because we need to be able to distinguish between
@@ -379,41 +385,9 @@ build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
                     nir_fneg(b, arc), arc);
 }
 
-static nir_ssa_def *
-build_frexp(nir_builder *b, nir_ssa_def *x, nir_ssa_def **exponent)
-{
-   nir_ssa_def *abs_x = nir_fabs(b, x);
-   nir_ssa_def *zero = nir_imm_float(b, 0.0f);
-
-   /* Single-precision floating-point values are stored as
-    *   1 sign bit;
-    *   8 exponent bits;
-    *   23 mantissa bits.
-    *
-    * An exponent shift of 23 will shift the mantissa out, leaving only the
-    * exponent and sign bit (which itself may be zero, if the absolute value
-    * was taken before the bitcast and shift.
-    */
-   nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
-   nir_ssa_def *exponent_bias = nir_imm_int(b, -126);
-
-   nir_ssa_def *sign_mantissa_mask = nir_imm_int(b, 0x807fffffu);
-
-   /* Exponent of floating-point values in the range [0.5, 1.0). */
-   nir_ssa_def *exponent_value = nir_imm_int(b, 0x3f000000u);
-
-   nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero);
-
-   *exponent =
-      nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
-                  nir_bcsel(b, is_not_zero, exponent_bias, zero));
-
-   return nir_ior(b, nir_iand(b, x, sign_mantissa_mask),
-                     nir_bcsel(b, is_not_zero, exponent_value, zero));
-}
-
 static nir_op
-vtn_nir_alu_op_for_spirv_glsl_opcode(enum GLSLstd450 opcode)
+vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder *b,
+                                     enum GLSLstd450 opcode)
 {
    switch (opcode) {
    case GLSLstd450Round:         return nir_op_fround_even;
@@ -433,9 +407,11 @@ vtn_nir_alu_op_for_spirv_glsl_opcode(enum GLSLstd450 opcode)
    case GLSLstd450Log2:          return nir_op_flog2;
    case GLSLstd450Sqrt:          return nir_op_fsqrt;
    case GLSLstd450InverseSqrt:   return nir_op_frsq;
+   case GLSLstd450NMin:          return nir_op_fmin;
    case GLSLstd450FMin:          return nir_op_fmin;
    case GLSLstd450UMin:          return nir_op_umin;
    case GLSLstd450SMin:          return nir_op_imin;
+   case GLSLstd450NMax:          return nir_op_fmax;
    case GLSLstd450FMax:          return nir_op_fmax;
    case GLSLstd450UMax:          return nir_op_umax;
    case GLSLstd450SMax:          return nir_op_imax;
@@ -452,19 +428,21 @@ vtn_nir_alu_op_for_spirv_glsl_opcode(enum GLSLstd450 opcode)
    case GLSLstd450PackSnorm2x16:    return nir_op_pack_snorm_2x16;
    case GLSLstd450PackUnorm2x16:    return nir_op_pack_unorm_2x16;
    case GLSLstd450PackHalf2x16:     return nir_op_pack_half_2x16;
-   case GLSLstd450PackDouble2x32:   return nir_op_pack_double_2x32;
+   case GLSLstd450PackDouble2x32:   return nir_op_pack_64_2x32;
    case GLSLstd450UnpackSnorm4x8:   return nir_op_unpack_snorm_4x8;
    case GLSLstd450UnpackUnorm4x8:   return nir_op_unpack_unorm_4x8;
    case GLSLstd450UnpackSnorm2x16:  return nir_op_unpack_snorm_2x16;
    case GLSLstd450UnpackUnorm2x16:  return nir_op_unpack_unorm_2x16;
    case GLSLstd450UnpackHalf2x16:   return nir_op_unpack_half_2x16;
-   case GLSLstd450UnpackDouble2x32: return nir_op_unpack_double_2x32;
+   case GLSLstd450UnpackDouble2x32: return nir_op_unpack_64_2x32;
 
    default:
-      unreachable("No NIR equivalent");
+      vtn_fail("No NIR equivalent");
    }
 }
 
+#define NIR_IMM_FP(n, v) (nir_imm_floatN_t(n, v, src[0]->bit_size))
+
 static void
 handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
                    const uint32_t *w, unsigned count)
@@ -479,15 +457,20 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
    /* Collect the various SSA sources */
    unsigned num_inputs = count - 5;
    nir_ssa_def *src[3] = { NULL, };
-   for (unsigned i = 0; i < num_inputs; i++)
+   for (unsigned i = 0; i < num_inputs; i++) {
+      /* These are handled specially below */
+      if (vtn_untyped_value(b, w[i + 5])->value_type == vtn_value_type_pointer)
+         continue;
+
       src[i] = vtn_ssa_value(b, w[i + 5])->def;
+   }
 
    switch (entrypoint) {
    case GLSLstd450Radians:
-      val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251));
+      val->ssa->def = nir_radians(nb, src[0]);
       return;
    case GLSLstd450Degrees:
-      val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131));
+      val->ssa->def = nir_degrees(nb, src[0]);
       return;
    case GLSLstd450Tan:
       val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]),
@@ -498,15 +481,15 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       nir_ssa_def *sign = nir_fsign(nb, src[0]);
       nir_ssa_def *abs = nir_fabs(nb, src[0]);
       val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
-      nir_store_deref_var(nb, vtn_nir_deref(b, w[6]),
-                          nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
+      nir_store_deref(nb, vtn_nir_deref(b, w[6]),
+                      nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
       return;
    }
 
    case GLSLstd450ModfStruct: {
       nir_ssa_def *sign = nir_fsign(nb, src[0]);
       nir_ssa_def *abs = nir_fabs(nb, src[0]);
-      assert(glsl_type_is_struct(val->ssa->type));
+      vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
       val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
       val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
       return;
@@ -517,13 +500,13 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       return;
 
    case GLSLstd450Length:
-      val->ssa->def = build_length(nb, src[0]);
+      val->ssa->def = nir_fast_length(nb, src[0]);
       return;
    case GLSLstd450Distance:
-      val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1]));
+      val->ssa->def = nir_fast_distance(nb, src[0], src[1]);
       return;
    case GLSLstd450Normalize:
-      val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0]));
+      val->ssa->def = nir_fast_normalize(nb, src[0]);
       return;
 
    case GLSLstd450Exp:
@@ -535,51 +518,37 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       return;
 
    case GLSLstd450FClamp:
-      val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]);
+   case GLSLstd450NClamp:
+      val->ssa->def = nir_fclamp(nb, src[0], src[1], src[2]);
       return;
    case GLSLstd450UClamp:
-      val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]);
+      val->ssa->def = nir_uclamp(nb, src[0], src[1], src[2]);
       return;
    case GLSLstd450SClamp:
-      val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]);
+      val->ssa->def = nir_iclamp(nb, src[0], src[1], src[2]);
       return;
 
    case GLSLstd450Cross: {
-      unsigned yzx[4] = { 1, 2, 0, 0 };
-      unsigned zxy[4] = { 2, 0, 1, 0 };
-      val->ssa->def =
-         nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true),
-                                   nir_swizzle(nb, src[1], zxy, 3, true)),
-                      nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true),
-                                   nir_swizzle(nb, src[1], yzx, 3, true)));
+      val->ssa->def = nir_cross3(nb, src[0], src[1]);
       return;
    }
 
    case GLSLstd450SmoothStep: {
-      /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
-      nir_ssa_def *t =
-         build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]),
-                                       nir_fsub(nb, src[1], src[0])),
-                          nir_imm_float(nb, 0.0), nir_imm_float(nb, 1.0));
-      /* result = t * t * (3 - 2 * t) */
-      val->ssa->def =
-         nir_fmul(nb, t, nir_fmul(nb, t,
-            nir_fsub(nb, nir_imm_float(nb, 3.0),
-                         nir_fmul(nb, nir_imm_float(nb, 2.0), t))));
+      val->ssa->def = nir_smoothstep(nb, src[0], src[1], src[2]);
       return;
    }
 
    case GLSLstd450FaceForward:
       val->ssa->def =
          nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
-                                   nir_imm_float(nb, 0.0)),
+                                   NIR_IMM_FP(nb, 0.0)),
                        src[0], nir_fneg(nb, src[0]));
       return;
 
    case GLSLstd450Reflect:
       /* I - 2 * dot(N, I) * N */
       val->ssa->def =
-         nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0),
+         nir_fsub(nb, src[0], nir_fmul(nb, NIR_IMM_FP(nb, 2.0),
                               nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
                                            src[1])));
       return;
@@ -589,8 +558,22 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       nir_ssa_def *N = src[1];
       nir_ssa_def *eta = src[2];
       nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
-      nir_ssa_def *one = nir_imm_float(nb, 1.0);
-      nir_ssa_def *zero = nir_imm_float(nb, 0.0);
+      nir_ssa_def *one = NIR_IMM_FP(nb, 1.0);
+      nir_ssa_def *zero = NIR_IMM_FP(nb, 0.0);
+      /* According to the SPIR-V and GLSL specs, eta is always a float
+       * regardless of the type of the other operands. However in practice it
+       * seems that if you try to pass it a float then glslang will just
+       * promote it to a double and generate invalid SPIR-V. In order to
+       * support a hypothetical fixed version of glslang we’ll promote eta to
+       * double if the other operands are double also.
+       */
+      if (I->bit_size != eta->bit_size) {
+         nir_op conversion_op =
+            nir_type_conversion_op(nir_type_float | eta->bit_size,
+                                   nir_type_float | I->bit_size,
+                                   nir_rounding_mode_undef);
+         eta = nir_build_alu(nb, conversion_op, eta, NULL, NULL, NULL);
+      }
       /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
       nir_ssa_def *k =
          nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta,
@@ -607,17 +590,17 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
    case GLSLstd450Sinh:
       /* 0.5 * (e^x - e^(-x)) */
       val->ssa->def =
-         nir_fmul(nb, nir_imm_float(nb, 0.5f),
-                      nir_fsub(nb, build_exp(nb, src[0]),
-                                   build_exp(nb, nir_fneg(nb, src[0]))));
+         nir_fmul_imm(nb, nir_fsub(nb, build_exp(nb, src[0]),
+                                       build_exp(nb, nir_fneg(nb, src[0]))),
+                          0.5f);
       return;
 
    case GLSLstd450Cosh:
       /* 0.5 * (e^x + e^(-x)) */
       val->ssa->def =
-         nir_fmul(nb, nir_imm_float(nb, 0.5f),
-                      nir_fadd(nb, build_exp(nb, src[0]),
-                                   build_exp(nb, nir_fneg(nb, src[0]))));
+         nir_fmul_imm(nb, nir_fadd(nb, build_exp(nb, src[0]),
+                                       build_exp(nb, nir_fneg(nb, src[0]))),
+                          0.5f);
       return;
 
    case GLSLstd450Tanh: {
@@ -628,30 +611,38 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
        * We clamp x to (-inf, +10] to avoid precision problems.  When x > 10,
        * e^2x is so much larger than 1.0 that 1.0 gets flushed to zero in the
        * computation e^2x +/- 1 so it can be ignored.
+       *
+       * For 16-bit precision we clamp x to (-inf, +4.2] since the maximum
+       * representable number is only 65,504 and e^(2*6) exceeds that. Also,
+       * if x > 4.2, tanh(x) will return 1.0 in fp16.
        */
-      nir_ssa_def *x = nir_fmin(nb, src[0], nir_imm_float(nb, 10));
-      nir_ssa_def *exp2x = build_exp(nb, nir_fmul(nb, x, nir_imm_float(nb, 2)));
-      val->ssa->def = nir_fdiv(nb, nir_fsub(nb, exp2x, nir_imm_float(nb, 1)),
-                                   nir_fadd(nb, exp2x, nir_imm_float(nb, 1)));
+      const uint32_t bit_size = src[0]->bit_size;
+      const double clamped_x = bit_size > 16 ? 10.0 : 4.2;
+      nir_ssa_def *x = nir_fmin(nb, src[0],
+                                    nir_imm_floatN_t(nb, clamped_x, bit_size));
+      nir_ssa_def *exp2x = build_exp(nb, nir_fmul_imm(nb, x, 2.0));
+      val->ssa->def = nir_fdiv(nb, nir_fadd_imm(nb, exp2x, -1.0),
+                                   nir_fadd_imm(nb, exp2x, 1.0));
       return;
    }
 
    case GLSLstd450Asinh:
       val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]),
          build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
-                       nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]),
-                                                  nir_imm_float(nb, 1.0f))))));
+                       nir_fsqrt(nb, nir_fadd_imm(nb, nir_fmul(nb, src[0], src[0]),
+                                                      1.0f)))));
       return;
    case GLSLstd450Acosh:
       val->ssa->def = build_log(nb, nir_fadd(nb, src[0],
-         nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]),
-                                    nir_imm_float(nb, 1.0f)))));
+         nir_fsqrt(nb, nir_fadd_imm(nb, nir_fmul(nb, src[0], src[0]),
+                                        -1.0f))));
       return;
    case GLSLstd450Atanh: {
-      nir_ssa_def *one = nir_imm_float(nb, 1.0);
-      val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f),
-         build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]),
-                                    nir_fsub(nb, one, src[0]))));
+      nir_ssa_def *one = nir_imm_floatN_t(nb, 1.0, src[0]->bit_size);
+      val->ssa->def =
+         nir_fmul_imm(nb, build_log(nb, nir_fdiv(nb, nir_fadd(nb, src[0], one),
+                                        nir_fsub(nb, one, src[0]))),
+                          0.5f);
       return;
    }
 
@@ -660,8 +651,9 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       return;
 
    case GLSLstd450Acos:
-      val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f),
-                               build_asin(nb, src[0], 0.08132463, -0.02363318));
+      val->ssa->def =
+         nir_fsub(nb, nir_imm_floatN_t(nb, M_PI_2f, src[0]->bit_size),
+                      build_asin(nb, src[0], 0.08132463, -0.02363318));
       return;
 
    case GLSLstd450Atan:
@@ -673,22 +665,23 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
       return;
 
    case GLSLstd450Frexp: {
-      nir_ssa_def *exponent;
-      val->ssa->def = build_frexp(nb, src[0], &exponent);
-      nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
+      nir_ssa_def *exponent = nir_frexp_exp(nb, src[0]);
+      val->ssa->def = nir_frexp_sig(nb, src[0]);
+      nir_store_deref(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
       return;
    }
 
    case GLSLstd450FrexpStruct: {
-      assert(glsl_type_is_struct(val->ssa->type));
-      val->ssa->elems[0]->def = build_frexp(nb, src[0],
-                                            &val->ssa->elems[1]->def);
+      vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
+      val->ssa->elems[0]->def = nir_frexp_sig(nb, src[0]);
+      val->ssa->elems[1]->def = nir_frexp_exp(nb, src[0]);
       return;
    }
 
    default:
       val->ssa->def =
-         nir_build_alu(&b->nb, vtn_nir_alu_op_for_spirv_glsl_opcode(entrypoint),
+         nir_build_alu(&b->nb,
+                       vtn_nir_alu_op_for_spirv_glsl_opcode(b, entrypoint),
                        src[0], src[1], src[2], NULL);
       return;
    }
@@ -707,45 +700,73 @@ handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
    nir_intrinsic_op op;
    switch (opcode) {
    case GLSLstd450InterpolateAtCentroid:
-      op = nir_intrinsic_interp_var_at_centroid;
+      op = nir_intrinsic_interp_deref_at_centroid;
       break;
    case GLSLstd450InterpolateAtSample:
-      op = nir_intrinsic_interp_var_at_sample;
+      op = nir_intrinsic_interp_deref_at_sample;
       break;
    case GLSLstd450InterpolateAtOffset:
-      op = nir_intrinsic_interp_var_at_offset;
+      op = nir_intrinsic_interp_deref_at_offset;
       break;
    default:
-      unreachable("Invalid opcode");
+      vtn_fail("Invalid opcode");
    }
 
    nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
 
-   nir_deref_var *deref = vtn_nir_deref(b, w[5]);
-   intrin->variables[0] = nir_deref_var_clone(deref, intrin);
+   struct vtn_pointer *ptr =
+      vtn_value(b, w[5], vtn_value_type_pointer)->pointer;
+   nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+
+   /* If the value we are interpolating has an index into a vector then
+    * interpolate the vector and index the result of that instead. This is
+    * necessary because the index will get generated as a series of nir_bcsel
+    * instructions so it would no longer be an input variable.
+    */
+   const bool vec_array_deref = deref->deref_type == nir_deref_type_array &&
+      glsl_type_is_vector(nir_deref_instr_parent(deref)->type);
+
+   nir_deref_instr *vec_deref = NULL;
+   if (vec_array_deref) {
+      vec_deref = deref;
+      deref = nir_deref_instr_parent(deref);
+   }
+   intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
 
    switch (opcode) {
    case GLSLstd450InterpolateAtCentroid:
       break;
    case GLSLstd450InterpolateAtSample:
    case GLSLstd450InterpolateAtOffset:
-      intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+      intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
       break;
    default:
-      unreachable("Invalid opcode");
+      vtn_fail("Invalid opcode");
    }
 
-   intrin->num_components = glsl_get_vector_elements(dest_type);
+   intrin->num_components = glsl_get_vector_elements(deref->type);
    nir_ssa_dest_init(&intrin->instr, &intrin->dest,
-                     glsl_get_vector_elements(dest_type),
-                     glsl_get_bit_size(dest_type), NULL);
-   val->ssa->def = &intrin->dest.ssa;
+                     glsl_get_vector_elements(deref->type),
+                     glsl_get_bit_size(deref->type), NULL);
 
    nir_builder_instr_insert(&b->nb, &intrin->instr);
+
+   if (vec_array_deref) {
+      assert(vec_deref);
+      if (nir_src_is_const(vec_deref->arr.index)) {
+         val->ssa->def = vtn_vector_extract(b, &intrin->dest.ssa,
+                                            nir_src_as_uint(vec_deref->arr.index));
+      } else {
+         val->ssa->def = vtn_vector_extract_dynamic(b, &intrin->dest.ssa,
+                                                    vec_deref->arr.index.ssa);
+      }
+   } else {
+      val->ssa->def = &intrin->dest.ssa;
+   }
 }
 
 bool
-vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
                                const uint32_t *w, unsigned count)
 {
    switch ((enum GLSLstd450)ext_opcode) {
@@ -766,7 +787,7 @@ vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
    case GLSLstd450InterpolateAtCentroid:
    case GLSLstd450InterpolateAtSample:
    case GLSLstd450InterpolateAtOffset:
-      handle_glsl450_interpolation(b, ext_opcode, w, count);
+      handle_glsl450_interpolation(b, (enum GLSLstd450)ext_opcode, w, count);
       break;
 
    default: