gallivm/nir: support passing image index into image code.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_nir.c
index f2fb76aeba24540e5f1febf19c317f9d0d4f66fb..8daa0e09d8d1740e092db4d38428f2d77a431623 100644 (file)
@@ -116,6 +116,9 @@ static unsigned glsl_sampler_to_pipe(int sampler_dim, bool is_array)
    case GLSL_SAMPLER_DIM_3D:
       pipe_target = PIPE_TEXTURE_3D;
       break;
+   case GLSL_SAMPLER_DIM_MS:
+      pipe_target = is_array ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
+      break;
    case GLSL_SAMPLER_DIM_CUBE:
       pipe_target = is_array ? PIPE_TEXTURE_CUBE_ARRAY : PIPE_TEXTURE_CUBE;
       break;
@@ -278,7 +281,7 @@ static LLVMValueRef get_alu_src(struct lp_build_nir_context *bld_base,
          value = LLVMBuildExtractValue(gallivm->builder, value,
                                        src.swizzle[0], "");
       } else if (src_components == 1 && num_components > 1) {
-         LLVMValueRef values[] = {value, value, value, value};
+         LLVMValueRef values[] = {value, value, value, value, value, value, value, value, value, value, value, value, value, value, value, value};
          value = lp_nir_array_build_gather_values(builder, values, num_components);
       } else {
          LLVMValueRef arr = LLVMGetUndef(LLVMArrayType(LLVMTypeOf(LLVMBuildExtractValue(builder, value, 0, "")), num_components));
@@ -350,8 +353,13 @@ static LLVMValueRef split_64bit(struct lp_build_nir_context *bld_base,
    LLVMValueRef shuffles2[LP_MAX_VECTOR_WIDTH/32];
    int len = bld_base->base.type.length * 2;
    for (unsigned i = 0; i < bld_base->base.type.length; i++) {
+#if UTIL_ARCH_LITTLE_ENDIAN
       shuffles[i] = lp_build_const_int32(gallivm, i * 2);
       shuffles2[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
+#else
+      shuffles[i] = lp_build_const_int32(gallivm, (i * 2) + 1);
+      shuffles2[i] = lp_build_const_int32(gallivm, (i * 2));
+#endif
    }
 
    src = LLVMBuildBitCast(gallivm->builder, src, LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), len), "");
@@ -375,8 +383,13 @@ merge_64bit(struct lp_build_nir_context *bld_base,
    assert(len <= (2 * (LP_MAX_VECTOR_WIDTH/32)));
 
    for (i = 0; i < bld_base->base.type.length * 2; i+=2) {
+#if UTIL_ARCH_LITTLE_ENDIAN
       shuffles[i] = lp_build_const_int32(gallivm, i / 2);
       shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
+#else
+      shuffles[i] = lp_build_const_int32(gallivm, i / 2 + bld_base->base.type.length);
+      shuffles[i + 1] = lp_build_const_int32(gallivm, i / 2);
+#endif
    }
    return LLVMBuildShuffleVector(builder, input, input2, LLVMConstVector(shuffles, len), "");
 }
@@ -385,6 +398,36 @@ static LLVMValueRef
 do_int_divide(struct lp_build_nir_context *bld_base,
               bool is_unsigned, unsigned src_bit_size,
               LLVMValueRef src, LLVMValueRef src2)
+{
+   struct gallivm_state *gallivm = bld_base->base.gallivm;
+   LLVMBuilderRef builder = gallivm->builder;
+   struct lp_build_context *int_bld = get_int_bld(bld_base, is_unsigned, src_bit_size);
+   struct lp_build_context *mask_bld = get_int_bld(bld_base, true, src_bit_size);
+   LLVMValueRef div_mask = lp_build_cmp(mask_bld, PIPE_FUNC_EQUAL, src2,
+                                        mask_bld->zero);
+
+   if (!is_unsigned) {
+      /* INT_MIN (0x80000000) / -1 (0xffffffff) causes sigfpe, seen with blender. */
+      div_mask = LLVMBuildAnd(builder, div_mask, lp_build_const_int_vec(gallivm, int_bld->type, 0x7fffffff), "");
+   }
+   LLVMValueRef divisor = LLVMBuildOr(builder,
+                                      div_mask,
+                                      src2, "");
+   LLVMValueRef result = lp_build_div(int_bld, src, divisor);
+
+   if (!is_unsigned) {
+      LLVMValueRef not_div_mask = LLVMBuildNot(builder, div_mask, "");
+      return LLVMBuildAnd(builder, not_div_mask, result, "");
+   } else
+      /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
+       * may as well do same for idiv */
+      return LLVMBuildOr(builder, div_mask, result, "");
+}
+
+static LLVMValueRef
+do_int_mod(struct lp_build_nir_context *bld_base,
+           bool is_unsigned, unsigned src_bit_size,
+           LLVMValueRef src, LLVMValueRef src2)
 {
    struct gallivm_state *gallivm = bld_base->base.gallivm;
    LLVMBuilderRef builder = gallivm->builder;
@@ -394,9 +437,7 @@ do_int_divide(struct lp_build_nir_context *bld_base,
    LLVMValueRef divisor = LLVMBuildOr(builder,
                                       div_mask,
                                       src2, "");
-   LLVMValueRef result = lp_build_div(int_bld, src, divisor);
-   /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10
-    * may as well do same for idiv */
+   LLVMValueRef result = lp_build_mod(int_bld, src, divisor);
    return LLVMBuildOr(builder, div_mask, result, "");
 }
 
@@ -474,9 +515,13 @@ static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
       result = lp_build_cos(&bld_base->base, src[0]);
       break;
    case nir_op_fddx:
+   case nir_op_fddx_coarse:
+   case nir_op_fddx_fine:
       result = lp_build_ddx(&bld_base->base, src[0]);
       break;
    case nir_op_fddy:
+   case nir_op_fddy_coarse:
+   case nir_op_fddy_fine:
       result = lp_build_ddy(&bld_base->base, src[0]);
       break;
    case nir_op_fdiv:
@@ -645,8 +690,7 @@ static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
                            src[0], src[1]);
       break;
    case nir_op_irem:
-      result = lp_build_mod(get_int_bld(bld_base, false, src_bit_size[0]),
-                            src[0], src[1]);
+      result = do_int_mod(bld_base, false, src_bit_size[0], src[0], src[1]);
       break;
    case nir_op_ishl: {
       struct lp_build_context *uint_bld = get_int_bld(bld_base, true, src_bit_size[0]);
@@ -676,7 +720,7 @@ static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
    case nir_op_isub:
       result = lp_build_sub(get_int_bld(bld_base, false, src_bit_size[0]),
                             src[0], src[1]);
-
+      break;
    case nir_op_ixor:
       result = lp_build_xor(get_int_bld(bld_base, false, src_bit_size[0]),
                             src[0], src[1]);
@@ -742,7 +786,7 @@ static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base,
       result = lp_build_min(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
       break;
    case nir_op_umod:
-      result = lp_build_mod(get_int_bld(bld_base, true, src_bit_size[0]), src[0], src[1]);
+      result = do_int_mod(bld_base, true, src_bit_size[0], src[0], src[1]);
       break;
    case nir_op_umul_high: {
       LLVMValueRef hi_bits;
@@ -778,6 +822,8 @@ static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr
    case nir_op_vec2:
    case nir_op_vec3:
    case nir_op_vec4:
+   case nir_op_vec8:
+   case nir_op_vec16:
       src_components = 1;
       break;
    case nir_op_pack_half_2x16:
@@ -800,7 +846,7 @@ static void visit_alu(struct lp_build_nir_context *bld_base, const nir_alu_instr
    }
 
    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
-   if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2) {
+   if (instr->op == nir_op_vec4 || instr->op == nir_op_vec3 || instr->op == nir_op_vec2 || instr->op == nir_op_vec8 || instr->op == nir_op_vec16) {
       for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
          result[i] = cast_type(bld_base, src[i], nir_op_infos[instr->op].input_types[i], src_bit_size[i]);
       }
@@ -829,7 +875,7 @@ static void visit_load_const(struct lp_build_nir_context *bld_base,
    LLVMValueRef result[NIR_MAX_VEC_COMPONENTS];
    struct lp_build_context *int_bld = get_int_bld(bld_base, true, instr->def.bit_size);
    for (unsigned i = 0; i < instr->def.num_components; i++)
-      result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->value[i].u64);
+      result[i] = lp_build_const_int_vec(bld_base->base.gallivm, int_bld->type, instr->def.bit_size == 32 ? instr->value[i].u32 : instr->value[i].u64);
    assign_ssa_dest(bld_base, &instr->def, result);
 }
 
@@ -913,6 +959,7 @@ static void visit_load_var(struct lp_build_nir_context *bld_base,
    nir_variable_mode mode = deref->mode;
    unsigned const_index;
    LLVMValueRef indir_index;
+   LLVMValueRef indir_vertex_index = NULL;
    unsigned vertex_index = 0;
    unsigned nc = nir_dest_num_components(instr->dest);
    unsigned bit_size = nir_dest_bit_size(instr->dest);
@@ -921,12 +968,19 @@ static void visit_load_var(struct lp_build_nir_context *bld_base,
          var->data.mode == nir_var_shader_in;
       bool gs_in = bld_base->shader->info.stage == MESA_SHADER_GEOMETRY &&
          var->data.mode == nir_var_shader_in;
+      bool tcs_in = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
+         var->data.mode == nir_var_shader_in;
+      bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
+         var->data.mode == nir_var_shader_out && !var->data.patch;
+      bool tes_in = bld_base->shader->info.stage == MESA_SHADER_TESS_EVAL &&
+         var->data.mode == nir_var_shader_in && !var->data.patch;
+
       mode = var->data.mode;
 
-      get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, NULL,
+      get_deref_offset(bld_base, deref, vs_in, gs_in ? &vertex_index : NULL, (tcs_in || tcs_out || tes_in) ? &indir_vertex_index : NULL,
                        &const_index, &indir_index);
    }
-   bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, const_index, indir_index, result);
+   bld_base->load_var(bld_base, mode, nc, bit_size, var, vertex_index, indir_vertex_index, const_index, indir_index, result);
 }
 
 static void
@@ -940,11 +994,14 @@ visit_store_var(struct lp_build_nir_context *bld_base,
    unsigned bit_size = nir_src_bit_size(instr->src[1]);
    LLVMValueRef src = get_src(bld_base, instr->src[1]);
    unsigned const_index = 0;
-   LLVMValueRef indir_index;
-   if (var)
-      get_deref_offset(bld_base, deref, false, NULL, NULL,
+   LLVMValueRef indir_index, indir_vertex_index = NULL;
+   if (var) {
+      bool tcs_out = bld_base->shader->info.stage == MESA_SHADER_TESS_CTRL &&
+         var->data.mode == nir_var_shader_out && !var->data.patch;
+      get_deref_offset(bld_base, deref, false, NULL, tcs_out ? &indir_vertex_index : NULL,
                        &const_index, &indir_index);
-   bld_base->store_var(bld_base, mode, bit_size, instr->num_components, writemask, const_index, var, src);
+   }
+   bld_base->store_var(bld_base, mode, instr->num_components, bit_size, var, writemask, indir_vertex_index, const_index, indir_index, src);
 }
 
 static void visit_load_ubo(struct lp_build_nir_context *bld_base,
@@ -1020,6 +1077,10 @@ static void visit_load_image(struct lp_build_nir_context *bld_base,
    LLVMValueRef coords[5];
    struct lp_img_params params;
    const struct glsl_type *type = glsl_without_array(var->type);
+   unsigned const_index;
+   LLVMValueRef indir_index;
+   get_deref_offset(bld_base, deref, false, NULL, NULL,
+                    &const_index, &indir_index);
 
    memset(&params, 0, sizeof(params));
    params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
@@ -1031,7 +1092,10 @@ static void visit_load_image(struct lp_build_nir_context *bld_base,
    params.coords = coords;
    params.outdata = result;
    params.img_op = LP_IMG_LOAD;
-   params.image_index = var->data.binding;
+   if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
+      params.ms_index = get_src(bld_base, instr->src[2]);
+   params.image_index = var->data.binding + (indir_index ? 0 : const_index);
+   params.image_index_offset = indir_index;
    bld_base->image_op(bld_base, &params);
 }
 
@@ -1047,6 +1111,10 @@ static void visit_store_image(struct lp_build_nir_context *bld_base,
    LLVMValueRef coords[5];
    struct lp_img_params params;
    const struct glsl_type *type = glsl_without_array(var->type);
+   unsigned const_index;
+   LLVMValueRef indir_index;
+   get_deref_offset(bld_base, deref, false, NULL, NULL,
+                    &const_index, &indir_index);
 
    memset(&params, 0, sizeof(params));
    params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
@@ -1060,8 +1128,11 @@ static void visit_store_image(struct lp_build_nir_context *bld_base,
       params.indata[i] = LLVMBuildExtractValue(builder, in_val, i, "");
       params.indata[i] = LLVMBuildBitCast(builder, params.indata[i], bld_base->base.vec_type, "");
    }
+   if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
+      params.ms_index = get_src(bld_base, instr->src[2]);
    params.img_op = LP_IMG_STORE;
-   params.image_index = var->data.binding;
+   params.image_index = var->data.binding + (indir_index ? 0 : const_index);
+   params.image_index_offset = indir_index;
 
    if (params.target == PIPE_TEXTURE_1D_ARRAY)
       coords[2] = coords[1];
@@ -1081,6 +1152,10 @@ static void visit_atomic_image(struct lp_build_nir_context *bld_base,
    LLVMValueRef in_val = get_src(bld_base, instr->src[3]);
    LLVMValueRef coords[5];
    const struct glsl_type *type = glsl_without_array(var->type);
+   unsigned const_index;
+   LLVMValueRef indir_index;
+   get_deref_offset(bld_base, deref, false, NULL, NULL,
+                    &const_index, &indir_index);
 
    memset(&params, 0, sizeof(params));
 
@@ -1122,6 +1197,8 @@ static void visit_atomic_image(struct lp_build_nir_context *bld_base,
    if (params.target == PIPE_TEXTURE_1D_ARRAY)
       coords[2] = coords[1];
    params.coords = coords;
+   if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS)
+      params.ms_index = get_src(bld_base, instr->src[2]);
    if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
       LLVMValueRef cas_val = get_src(bld_base, instr->src[4]);
       params.indata[0] = in_val;
@@ -1131,7 +1208,8 @@ static void visit_atomic_image(struct lp_build_nir_context *bld_base,
 
    params.outdata = result;
    params.img_op = (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) ? LP_IMG_ATOMIC_CAS : LP_IMG_ATOMIC;
-   params.image_index = var->data.binding;
+   params.image_index = var->data.binding + (indir_index ? 0 : const_index);
+   params.image_index_offset = indir_index;
 
    bld_base->image_op(bld_base, &params);
 }
@@ -1144,9 +1222,37 @@ static void visit_image_size(struct lp_build_nir_context *bld_base,
    nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
    nir_variable *var = nir_deref_instr_get_variable(deref);
    struct lp_sampler_size_query_params params = { 0 };
-   params.texture_unit = var->data.binding;
-   params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(var->type), glsl_sampler_type_is_array(var->type));
+   unsigned const_index;
+   LLVMValueRef indir_index;
+   const struct glsl_type *type = glsl_without_array(var->type);
+   get_deref_offset(bld_base, deref, false, NULL, NULL,
+                    &const_index, &indir_index);
+   params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
+   params.texture_unit_offset = indir_index;
+   params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
+   params.sizes_out = result;
+
+   bld_base->image_size(bld_base, &params);
+}
+
+static void visit_image_samples(struct lp_build_nir_context *bld_base,
+                                nir_intrinsic_instr *instr,
+                                LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
+{
+   nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
+   nir_variable *var = nir_deref_instr_get_variable(deref);
+   struct lp_sampler_size_query_params params = { 0 };
+   unsigned const_index;
+   LLVMValueRef indir_index;
+   const struct glsl_type *type = glsl_without_array(var->type);
+   get_deref_offset(bld_base, deref, false, NULL, NULL,
+                    &const_index, &indir_index);
+
+   params.texture_unit = var->data.binding + (indir_index ? 0 : const_index);
+   params.texture_unit_offset = indir_index;
+   params.target = glsl_sampler_to_pipe(glsl_get_sampler_dim(type), glsl_sampler_type_is_array(type));
    params.sizes_out = result;
+   params.samples_only = true;
 
    bld_base->image_size(bld_base, &params);
 }
@@ -1247,6 +1353,35 @@ static void visit_global_atomic(struct lp_build_nir_context *bld_base,
    bld_base->atomic_global(bld_base, instr->intrinsic, addr_bitsize, addr, val, val2, &result[0]);
 }
 
+static void visit_interp(struct lp_build_nir_context *bld_base,
+                         nir_intrinsic_instr *instr,
+                         LLVMValueRef result[NIR_MAX_VEC_COMPONENTS])
+{
+   struct gallivm_state *gallivm = bld_base->base.gallivm;
+   LLVMBuilderRef builder = gallivm->builder;
+   nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
+   unsigned num_components = nir_dest_num_components(instr->dest);
+   nir_variable *var = nir_deref_instr_get_variable(deref);
+   unsigned const_index;
+   LLVMValueRef indir_index;
+   LLVMValueRef offsets[2] = { NULL, NULL };
+   get_deref_offset(bld_base, deref, false, NULL, NULL,
+                    &const_index, &indir_index);
+   bool centroid = instr->intrinsic == nir_intrinsic_interp_deref_at_centroid;
+   bool sample = false;
+   if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
+      for (unsigned i = 0; i < 2; i++) {
+         offsets[i] = LLVMBuildExtractValue(builder, get_src(bld_base, instr->src[1]), i, "");
+         offsets[i] = cast_type(bld_base, offsets[i], nir_type_float, 32);
+      }
+   } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
+      offsets[0] = get_src(bld_base, instr->src[1]);
+      offsets[0] = cast_type(bld_base, offsets[0], nir_type_int, 32);
+      sample = true;
+   }
+   bld_base->interp_at(bld_base, num_components, var, centroid, sample, const_index, indir_index, offsets, result);
+}
+
 static void visit_intrinsic(struct lp_build_nir_context *bld_base,
                             nir_intrinsic_instr *instr)
 {
@@ -1283,8 +1418,18 @@ static void visit_intrinsic(struct lp_build_nir_context *bld_base,
    case nir_intrinsic_load_draw_id:
    case nir_intrinsic_load_local_group_size:
    case nir_intrinsic_load_work_dim:
+   case nir_intrinsic_load_tess_coord:
+   case nir_intrinsic_load_tess_level_outer:
+   case nir_intrinsic_load_tess_level_inner:
+   case nir_intrinsic_load_patch_vertices_in:
+   case nir_intrinsic_load_sample_id:
+   case nir_intrinsic_load_sample_pos:
+   case nir_intrinsic_load_sample_mask_in:
       bld_base->sysval_intrin(bld_base, instr, result);
       break;
+   case nir_intrinsic_load_helper_invocation:
+      bld_base->helper_invocation(bld_base, &result[0]);
+      break;
    case nir_intrinsic_discard_if:
    case nir_intrinsic_discard:
       visit_discard(bld_base, instr);
@@ -1328,6 +1473,9 @@ static void visit_intrinsic(struct lp_build_nir_context *bld_base,
    case nir_intrinsic_image_deref_size:
       visit_image_size(bld_base, instr, result);
       break;
+   case nir_intrinsic_image_deref_samples:
+      visit_image_samples(bld_base, instr, result);
+      break;
    case nir_intrinsic_load_shared:
       visit_shared_load(bld_base, instr, result);
       break;
@@ -1346,10 +1494,15 @@ static void visit_intrinsic(struct lp_build_nir_context *bld_base,
    case nir_intrinsic_shared_atomic_comp_swap:
       visit_shared_atomic(bld_base, instr, result);
       break;
-   case nir_intrinsic_barrier:
+   case nir_intrinsic_control_barrier:
       visit_barrier(bld_base);
       break;
+   case nir_intrinsic_group_memory_barrier:
    case nir_intrinsic_memory_barrier:
+   case nir_intrinsic_memory_barrier_shared:
+   case nir_intrinsic_memory_barrier_buffer:
+   case nir_intrinsic_memory_barrier_image:
+   case nir_intrinsic_memory_barrier_tcs_patch:
       break;
    case nir_intrinsic_load_kernel_input:
       visit_load_kernel_input(bld_base, instr, result);
@@ -1372,6 +1525,16 @@ static void visit_intrinsic(struct lp_build_nir_context *bld_base,
    case nir_intrinsic_global_atomic_comp_swap:
       visit_global_atomic(bld_base, instr, result);
       break;
+   case nir_intrinsic_vote_all:
+   case nir_intrinsic_vote_any:
+   case nir_intrinsic_vote_ieq:
+      bld_base->vote(bld_base, cast_type(bld_base, get_src(bld_base, instr->src[0]), nir_type_int, 32), instr, result);
+      break;
+   case nir_intrinsic_interp_deref_at_offset:
+   case nir_intrinsic_interp_deref_at_centroid:
+   case nir_intrinsic_interp_deref_at_sample:
+      visit_interp(bld_base, instr, result);
+      break;
    default:
       assert(0);
       break;
@@ -1383,15 +1546,18 @@ static void visit_intrinsic(struct lp_build_nir_context *bld_base,
 
 static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
 {
-   struct lp_sampler_size_query_params params;
+   struct lp_sampler_size_query_params params = { 0 };
    LLVMValueRef sizes_out[NIR_MAX_VEC_COMPONENTS];
    LLVMValueRef explicit_lod = NULL;
-
+   LLVMValueRef texture_unit_offset = NULL;
    for (unsigned i = 0; i < instr->num_srcs; i++) {
       switch (instr->src[i].src_type) {
       case nir_tex_src_lod:
          explicit_lod = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
          break;
+      case nir_tex_src_texture_offset:
+         texture_unit_offset = get_src(bld_base, instr->src[i].src);
+         break;
       default:
          break;
       }
@@ -1402,6 +1568,8 @@ static void visit_txs(struct lp_build_nir_context *bld_base, nir_tex_instr *inst
    params.explicit_lod = explicit_lod;
    params.is_sviewinfo = TRUE;
    params.sizes_out = sizes_out;
+   params.samples_only = (instr->op == nir_texop_texture_samples);
+   params.texture_unit_offset = texture_unit_offset;
 
    if (instr->op == nir_texop_query_levels)
       params.explicit_lod = bld_base->uint_bld.zero;
@@ -1433,12 +1601,13 @@ static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *inst
    LLVMBuilderRef builder = gallivm->builder;
    LLVMValueRef coords[5];
    LLVMValueRef offsets[3] = { NULL };
-   LLVMValueRef explicit_lod = NULL, projector = NULL;
+   LLVMValueRef explicit_lod = NULL, projector = NULL, ms_index = NULL;
    struct lp_sampler_params params;
    struct lp_derivatives derivs;
    unsigned sample_key = 0;
    nir_deref_instr *texture_deref_instr = NULL;
    nir_deref_instr *sampler_deref_instr = NULL;
+   LLVMValueRef texture_unit_offset = NULL;
    LLVMValueRef texel[NIR_MAX_VEC_COMPONENTS];
    unsigned lod_src = 0;
    LLVMValueRef coord_undef = LLVMGetUndef(bld_base->base.int_vec_type);
@@ -1446,15 +1615,16 @@ static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *inst
    memset(&params, 0, sizeof(params));
    enum lp_sampler_lod_property lod_property = LP_SAMPLER_LOD_SCALAR;
 
-   if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
+   if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels || instr->op == nir_texop_texture_samples) {
       visit_txs(bld_base, instr);
       return;
    }
    if (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)
       sample_key |= LP_SAMPLER_OP_FETCH << LP_SAMPLER_OP_TYPE_SHIFT;
-   else if (instr->op == nir_texop_tg4)
+   else if (instr->op == nir_texop_tg4) {
       sample_key |= LP_SAMPLER_OP_GATHER << LP_SAMPLER_OP_TYPE_SHIFT;
-   else if (instr->op == nir_texop_lod)
+      sample_key |= (instr->component << LP_SAMPLER_GATHER_COMP_SHIFT);
+   } else if (instr->op == nir_texop_lod)
       sample_key |= LP_SAMPLER_OP_LODQ << LP_SAMPLER_OP_TYPE_SHIFT;
    for (unsigned i = 0; i < instr->num_srcs; i++) {
       switch (instr->src[i].src_type) {
@@ -1536,15 +1706,25 @@ static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *inst
          LLVMValueRef offset_val = get_src(bld_base, instr->src[i].src);
          sample_key |= LP_SAMPLER_OFFSETS;
          if (offset_cnt == 1)
-            offsets[0] = offset_val;
+            offsets[0] = cast_type(bld_base, offset_val, nir_type_int, 32);
          else {
-            for (unsigned chan = 0; chan < offset_cnt; ++chan)
+            for (unsigned chan = 0; chan < offset_cnt; ++chan) {
                offsets[chan] = LLVMBuildExtractValue(builder, offset_val,
                                                      chan, "");
+               offsets[chan] = cast_type(bld_base, offsets[chan], nir_type_int, 32);
+            }
          }
          break;
       }
       case nir_tex_src_ms_index:
+         sample_key |= LP_SAMPLER_FETCH_MS;
+         ms_index = cast_type(bld_base, get_src(bld_base, instr->src[i].src), nir_type_int, 32);
+         break;
+
+      case nir_tex_src_texture_offset:
+         texture_unit_offset = get_src(bld_base, instr->src[i].src);
+         break;
+      case nir_tex_src_sampler_offset:
          break;
       default:
          assert(0);
@@ -1602,10 +1782,12 @@ static void visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *inst
    params.sample_key = sample_key;
    params.offsets = offsets;
    params.texture_index = base_index;
+   params.texture_index_offset = texture_unit_offset;
    params.sampler_index = base_index;
    params.coords = coords;
    params.texel = texel;
    params.lod = explicit_lod;
+   params.ms_index = ms_index;
    bld_base->tex(bld_base, &params);
    assign_dest(bld_base, &instr->dest, texel);
 }
@@ -1779,7 +1961,7 @@ bool lp_build_nir_llvm(
    nir_convert_from_ssa(nir, true);
    nir_lower_locals_to_regs(nir);
    nir_remove_dead_derefs(nir);
-   nir_remove_dead_variables(nir, nir_var_function_temp);
+   nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
 
    nir_foreach_variable(variable, &nir->outputs)
       handle_shader_output_decl(bld_base, nir, variable);
@@ -1815,6 +1997,10 @@ void lp_build_opt_nir(struct nir_shader *nir)
       progress = false;
       NIR_PASS_V(nir, nir_opt_constant_folding);
       NIR_PASS_V(nir, nir_opt_algebraic);
+      NIR_PASS_V(nir, nir_lower_pack);
+
+      nir_lower_tex_options options = { .lower_tex_without_implicit_lod = true };
+      NIR_PASS_V(nir, nir_lower_tex, &options);
    } while (progress);
    nir_lower_bool_to_int32(nir);
 }