gallivm: optimize lp_build_minify for sse
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample_soa.c
index 9f781c55bc5dcb6326270b9047cd9db3366184d0..e8c04d1e6c53343a9a9503202b1dcd49b378dcf4 100644 (file)
@@ -73,7 +73,6 @@
  */
 static void
 lp_build_sample_texel_soa(struct lp_build_sample_context *bld,
-                          unsigned sampler_unit,
                           LLVMValueRef width,
                           LLVMValueRef height,
                           LLVMValueRef depth,
@@ -696,7 +695,6 @@ lp_build_sample_comparefunc(struct lp_build_sample_context *bld,
  */
 static void
 lp_build_sample_image_nearest(struct lp_build_sample_context *bld,
-                              unsigned sampler_unit,
                               LLVMValueRef size,
                               LLVMValueRef row_stride_vec,
                               LLVMValueRef img_stride_vec,
@@ -764,7 +762,7 @@ lp_build_sample_image_nearest(struct lp_build_sample_context *bld,
    /*
     * Get texture colors.
     */
-   lp_build_sample_texel_soa(bld, sampler_unit,
+   lp_build_sample_texel_soa(bld,
                              width_vec, height_vec, depth_vec,
                              x, y, z,
                              row_stride_vec, img_stride_vec,
@@ -824,14 +822,22 @@ lp_build_masklerp2d(struct lp_build_context *bld,
    return lp_build_lerp(bld, weight1, val0, val1, 0);
 }
 
+/*
+ * this is a bit excessive code for something OpenGL just recommends
+ * but does not require.
+ */
+#define ACCURATE_CUBE_CORNERS 1
+
 /**
  * Generate code to sample a mipmap level with linear filtering.
  * If sampling a cube texture, r = cube face in [0,5].
+ * If linear_mask is present, only pixels having their mask set
+ * will receive linear filtering, the rest will use nearest.
  */
 static void
 lp_build_sample_image_linear(struct lp_build_sample_context *bld,
-                             unsigned sampler_unit,
                              LLVMValueRef size,
+                             LLVMValueRef linear_mask,
                              LLVMValueRef row_stride_vec,
                              LLVMValueRef img_stride_vec,
                              LLVMValueRef data_ptr,
@@ -840,6 +846,9 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld,
                              const LLVMValueRef *offsets,
                              LLVMValueRef colors_out[4])
 {
+   LLVMBuilderRef builder = bld->gallivm->builder;
+   struct lp_build_context *ivec_bld = &bld->int_coord_bld;
+   struct lp_build_context *coord_bld = &bld->coord_bld;
    const unsigned dims = bld->dims;
    LLVMValueRef width_vec;
    LLVMValueRef height_vec;
@@ -848,10 +857,20 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld,
    LLVMValueRef flt_width_vec;
    LLVMValueRef flt_height_vec;
    LLVMValueRef flt_depth_vec;
-   LLVMValueRef x0, y0 = NULL, z0 = NULL, x1, y1 = NULL, z1 = NULL;
+   LLVMValueRef fall_off[4], have_corners;
+   LLVMValueRef z1 = NULL;
+   LLVMValueRef z00 = NULL, z01 = NULL, z10 = NULL, z11 = NULL;
+   LLVMValueRef x00 = NULL, x01 = NULL, x10 = NULL, x11 = NULL;
+   LLVMValueRef y00 = NULL, y01 = NULL, y10 = NULL, y11 = NULL;
    LLVMValueRef s_fpart, t_fpart = NULL, r_fpart = NULL;
+   LLVMValueRef xs[4], ys[4], zs[4];
    LLVMValueRef neighbors[2][2][4];
-   int chan;
+   int chan, texel_index;
+   boolean seamless_cube_filter, accurate_cube_corners;
+
+   seamless_cube_filter = bld->static_texture_state->target == PIPE_TEXTURE_CUBE &&
+                          bld->static_sampler_state->seamless_cube_map;
+   accurate_cube_corners = ACCURATE_CUBE_CORNERS && seamless_cube_filter;
 
    lp_build_extract_image_sizes(bld,
                                 &bld->int_size_bld,
@@ -870,54 +889,246 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld,
    /*
     * Compute integer texcoords.
     */
-   lp_build_sample_wrap_linear(bld, coords[0], width_vec,
-                               flt_width_vec, offsets[0],
-                               bld->static_texture_state->pot_width,
-                               bld->static_sampler_state->wrap_s,
-                               &x0, &x1, &s_fpart);
-   lp_build_name(x0, "tex.x0.wrapped");
-   lp_build_name(x1, "tex.x1.wrapped");
 
-   if (dims >= 2) {
-      lp_build_sample_wrap_linear(bld, coords[1], height_vec,
-                                  flt_height_vec, offsets[1],
-                                  bld->static_texture_state->pot_height,
-                                  bld->static_sampler_state->wrap_t,
-                                  &y0, &y1, &t_fpart);
-      lp_build_name(y0, "tex.y0.wrapped");
-      lp_build_name(y1, "tex.y1.wrapped");
+   if (!seamless_cube_filter) {
+      lp_build_sample_wrap_linear(bld, coords[0], width_vec,
+                                  flt_width_vec, offsets[0],
+                                  bld->static_texture_state->pot_width,
+                                  bld->static_sampler_state->wrap_s,
+                                  &x00, &x01, &s_fpart);
+      lp_build_name(x00, "tex.x0.wrapped");
+      lp_build_name(x01, "tex.x1.wrapped");
+      x10 = x00;
+      x11 = x01;
 
-      if (dims == 3) {
-         lp_build_sample_wrap_linear(bld, coords[2], depth_vec,
-                                     flt_depth_vec, offsets[2],
-                                     bld->static_texture_state->pot_depth,
-                                     bld->static_sampler_state->wrap_r,
-                                     &z0, &z1, &r_fpart);
-         lp_build_name(z0, "tex.z0.wrapped");
-         lp_build_name(z1, "tex.z1.wrapped");
+      if (dims >= 2) {
+         lp_build_sample_wrap_linear(bld, coords[1], height_vec,
+                                     flt_height_vec, offsets[1],
+                                     bld->static_texture_state->pot_height,
+                                     bld->static_sampler_state->wrap_t,
+                                     &y00, &y10, &t_fpart);
+         lp_build_name(y00, "tex.y0.wrapped");
+         lp_build_name(y10, "tex.y1.wrapped");
+         y01 = y00;
+         y11 = y10;
+
+         if (dims == 3) {
+            lp_build_sample_wrap_linear(bld, coords[2], depth_vec,
+                                        flt_depth_vec, offsets[2],
+                                        bld->static_texture_state->pot_depth,
+                                        bld->static_sampler_state->wrap_r,
+                                        &z00, &z1, &r_fpart);
+            z01 = z10 = z11 = z00;
+            lp_build_name(z00, "tex.z0.wrapped");
+            lp_build_name(z1, "tex.z1.wrapped");
+         }
+      }
+      if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
+          bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
+          bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
+         z00 = z01 = z10 = z11 = z1 = coords[2];  /* cube face or layer */
+         lp_build_name(z00, "tex.z0.layer");
+         lp_build_name(z1, "tex.z1.layer");
       }
    }
-   if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
-       bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
-       bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
-      z0 = z1 = coords[2];  /* cube face or layer */
-      lp_build_name(z0, "tex.z0.layer");
-      lp_build_name(z1, "tex.z1.layer");
-   }
+   else {
+      struct lp_build_if_state edge_if;
+      LLVMTypeRef int1t;
+      LLVMValueRef new_faces[4], new_xcoords[4][2], new_ycoords[4][2];
+      LLVMValueRef coord, have_edge, have_corner;
+      LLVMValueRef fall_off_ym_notxm, fall_off_ym_notxp, fall_off_x, fall_off_y;
+      LLVMValueRef fall_off_yp_notxm, fall_off_yp_notxp;
+      LLVMValueRef x0, x1, y0, y1, y0_clamped, y1_clamped;
+      LLVMValueRef face = coords[2];
+      LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5f);
+      LLVMValueRef length_minus_one = lp_build_sub(ivec_bld, width_vec, ivec_bld->one);
+      /* XXX drop height calcs. Could (should) do this without seamless filtering too */
+      height_vec = width_vec;
+      flt_height_vec = flt_width_vec;
+
+      /* XXX the overflow logic is actually sort of duplicated with trilinear,
+       * since an overflow in one mip should also have a corresponding overflow
+       * in another.
+       */
+      /* should always have normalized coords, and offsets are undefined */
+      assert(bld->static_sampler_state->normalized_coords);
+      coord = lp_build_mul(coord_bld, coords[0], flt_width_vec);
+      /* instead of clamp, build mask if overflowed */
+      coord = lp_build_sub(coord_bld, coord, half);
+      /* convert to int, compute lerp weight */
+      /* not ideal with AVX (and no AVX2) */
+      lp_build_ifloor_fract(coord_bld, coord, &x0, &s_fpart);
+      x1 = lp_build_add(ivec_bld, x0, ivec_bld->one);
+      coord = lp_build_mul(coord_bld, coords[1], flt_height_vec);
+      coord = lp_build_sub(coord_bld, coord, half);
+      lp_build_ifloor_fract(coord_bld, coord, &y0, &t_fpart);
+      y1 = lp_build_add(ivec_bld, y0, ivec_bld->one);
+
+      fall_off[0] = lp_build_cmp(ivec_bld, PIPE_FUNC_LESS, x0, ivec_bld->zero);
+      fall_off[1] = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, x1, length_minus_one);
+      fall_off[2] = lp_build_cmp(ivec_bld, PIPE_FUNC_LESS, y0, ivec_bld->zero);
+      fall_off[3] = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, y1, length_minus_one);
 
+      fall_off_x = lp_build_or(ivec_bld, fall_off[0], fall_off[1]);
+      fall_off_y = lp_build_or(ivec_bld, fall_off[2], fall_off[3]);
+      have_edge = lp_build_or(ivec_bld, fall_off_x, fall_off_y);
+      have_edge = lp_build_any_true_range(ivec_bld, ivec_bld->type.length, have_edge);
+
+      /* needed for accurate corner filtering branch later, rely on 0 init */
+      int1t = LLVMInt1TypeInContext(bld->gallivm->context);
+      have_corners = lp_build_alloca(bld->gallivm, int1t, "have_corner");
+
+      for (texel_index = 0; texel_index < 4; texel_index++) {
+         xs[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "xs");
+         ys[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "ys");
+         zs[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "zs");
+      }
+
+      lp_build_if(&edge_if, bld->gallivm, have_edge);
+
+      have_corner = lp_build_and(ivec_bld, fall_off_x, fall_off_y);
+      have_corner = lp_build_any_true_range(ivec_bld, ivec_bld->type.length, have_corner);
+      LLVMBuildStore(builder, have_corner, have_corners);
+
+      /*
+       * Need to feed clamped values here for cheap corner handling,
+       * but only for y coord (as when falling off both edges we only
+       * fall off the x one) - this should be sufficient.
+       */
+      y0_clamped = lp_build_max(ivec_bld, y0, ivec_bld->zero);
+      y1_clamped = lp_build_min(ivec_bld, y1, length_minus_one);
+
+      /*
+       * Get all possible new coords.
+       */
+      lp_build_cube_new_coords(ivec_bld, face,
+                               x0, x1, y0_clamped, y1_clamped,
+                               length_minus_one,
+                               new_faces, new_xcoords, new_ycoords);
+
+      /* handle fall off x-, x+ direction */
+      /* determine new coords, face (not both fall_off vars can be true at same time) */
+      x00 = lp_build_select(ivec_bld, fall_off[0], new_xcoords[0][0], x0);
+      y00 = lp_build_select(ivec_bld, fall_off[0], new_ycoords[0][0], y0_clamped);
+      x10 = lp_build_select(ivec_bld, fall_off[0], new_xcoords[0][1], x0);
+      y10 = lp_build_select(ivec_bld, fall_off[0], new_ycoords[0][1], y1_clamped);
+      x01 = lp_build_select(ivec_bld, fall_off[1], new_xcoords[1][0], x1);
+      y01 = lp_build_select(ivec_bld, fall_off[1], new_ycoords[1][0], y0_clamped);
+      x11 = lp_build_select(ivec_bld, fall_off[1], new_xcoords[1][1], x1);
+      y11 = lp_build_select(ivec_bld, fall_off[1], new_ycoords[1][1], y1_clamped);
+
+      z00 = z10 = lp_build_select(ivec_bld, fall_off[0], new_faces[0], face);
+      z01 = z11 = lp_build_select(ivec_bld, fall_off[1], new_faces[1], face);
+
+      /* handle fall off y-, y+ direction */
+      /*
+       * Cheap corner logic: just hack up things so a texel doesn't fall
+       * off both sides (which means filter weights will be wrong but we'll only
+       * use valid texels in the filter).
+       * This means however (y) coords must additionally be clamped (see above).
+       * This corner handling should be fully OpenGL (but not d3d10) compliant.
+       */
+      fall_off_ym_notxm = lp_build_andnot(ivec_bld, fall_off[2], fall_off[0]);
+      fall_off_ym_notxp = lp_build_andnot(ivec_bld, fall_off[2], fall_off[1]);
+      fall_off_yp_notxm = lp_build_andnot(ivec_bld, fall_off[3], fall_off[0]);
+      fall_off_yp_notxp = lp_build_andnot(ivec_bld, fall_off[3], fall_off[1]);
+
+      x00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_xcoords[2][0], x00);
+      y00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_ycoords[2][0], y00);
+      x01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_xcoords[2][1], x01);
+      y01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_ycoords[2][1], y01);
+      x10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_xcoords[3][0], x10);
+      y10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_ycoords[3][0], y10);
+      x11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_xcoords[3][1], x11);
+      y11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_ycoords[3][1], y11);
+
+      z00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_faces[2], z00);
+      z01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_faces[2], z01);
+      z10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_faces[3], z10);
+      z11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_faces[3], z11);
+
+      LLVMBuildStore(builder, x00, xs[0]);
+      LLVMBuildStore(builder, x01, xs[1]);
+      LLVMBuildStore(builder, x10, xs[2]);
+      LLVMBuildStore(builder, x11, xs[3]);
+      LLVMBuildStore(builder, y00, ys[0]);
+      LLVMBuildStore(builder, y01, ys[1]);
+      LLVMBuildStore(builder, y10, ys[2]);
+      LLVMBuildStore(builder, y11, ys[3]);
+      LLVMBuildStore(builder, z00, zs[0]);
+      LLVMBuildStore(builder, z01, zs[1]);
+      LLVMBuildStore(builder, z10, zs[2]);
+      LLVMBuildStore(builder, z11, zs[3]);
+
+      lp_build_else(&edge_if);
+
+      LLVMBuildStore(builder, x0, xs[0]);
+      LLVMBuildStore(builder, x1, xs[1]);
+      LLVMBuildStore(builder, x0, xs[2]);
+      LLVMBuildStore(builder, x1, xs[3]);
+      LLVMBuildStore(builder, y0, ys[0]);
+      LLVMBuildStore(builder, y0, ys[1]);
+      LLVMBuildStore(builder, y1, ys[2]);
+      LLVMBuildStore(builder, y1, ys[3]);
+      LLVMBuildStore(builder, face, zs[0]);
+      LLVMBuildStore(builder, face, zs[1]);
+      LLVMBuildStore(builder, face, zs[2]);
+      LLVMBuildStore(builder, face, zs[3]);
+
+      lp_build_endif(&edge_if);
+
+      x00 = LLVMBuildLoad(builder, xs[0], "");
+      x01 = LLVMBuildLoad(builder, xs[1], "");
+      x10 = LLVMBuildLoad(builder, xs[2], "");
+      x11 = LLVMBuildLoad(builder, xs[3], "");
+      y00 = LLVMBuildLoad(builder, ys[0], "");
+      y01 = LLVMBuildLoad(builder, ys[1], "");
+      y10 = LLVMBuildLoad(builder, ys[2], "");
+      y11 = LLVMBuildLoad(builder, ys[3], "");
+      z00 = LLVMBuildLoad(builder, zs[0], "");
+      z01 = LLVMBuildLoad(builder, zs[1], "");
+      z10 = LLVMBuildLoad(builder, zs[2], "");
+      z11 = LLVMBuildLoad(builder, zs[3], "");
+   }
+
+   if (linear_mask) {
+      /*
+       * Whack filter weights into place. Whatever texel had more weight is
+       * the one which should have been selected by nearest filtering hence
+       * just use 100% weight for it.
+       */
+      struct lp_build_context *c_bld = &bld->coord_bld;
+      LLVMValueRef w1_mask, w1_weight;
+      LLVMValueRef half = lp_build_const_vec(bld->gallivm, c_bld->type, 0.5f);
+
+      w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, s_fpart, half);
+      /* this select is really just a "and" */
+      w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
+      s_fpart = lp_build_select(c_bld, linear_mask, s_fpart, w1_weight);
+      if (dims >= 2) {
+         w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, t_fpart, half);
+         w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
+         t_fpart = lp_build_select(c_bld, linear_mask, t_fpart, w1_weight);
+         if (dims == 3) {
+            w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, r_fpart, half);
+            w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
+            r_fpart = lp_build_select(c_bld, linear_mask, r_fpart, w1_weight);
+         }
+      }
+   }
 
    /*
     * Get texture colors.
     */
    /* get x0/x1 texels */
-   lp_build_sample_texel_soa(bld, sampler_unit,
+   lp_build_sample_texel_soa(bld,
                              width_vec, height_vec, depth_vec,
-                             x0, y0, z0,
+                             x00, y00, z00,
                              row_stride_vec, img_stride_vec,
                              data_ptr, mipoffsets, neighbors[0][0]);
-   lp_build_sample_texel_soa(bld, sampler_unit,
+   lp_build_sample_texel_soa(bld,
                              width_vec, height_vec, depth_vec,
-                             x1, y0, z0,
+                             x01, y01, z01,
                              row_stride_vec, img_stride_vec,
                              data_ptr, mipoffsets, neighbors[0][1]);
 
@@ -943,20 +1154,125 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld,
    }
    else {
       /* 2D/3D texture */
-      LLVMValueRef colors0[4];
+      struct lp_build_if_state corner_if;
+      LLVMValueRef colors0[4], colorss[4];
 
       /* get x0/x1 texels at y1 */
-      lp_build_sample_texel_soa(bld, sampler_unit,
+      lp_build_sample_texel_soa(bld,
                                 width_vec, height_vec, depth_vec,
-                                x0, y1, z0,
+                                x10, y10, z10,
                                 row_stride_vec, img_stride_vec,
                                 data_ptr, mipoffsets, neighbors[1][0]);
-      lp_build_sample_texel_soa(bld, sampler_unit,
+      lp_build_sample_texel_soa(bld,
                                 width_vec, height_vec, depth_vec,
-                                x1, y1, z0,
+                                x11, y11, z11,
                                 row_stride_vec, img_stride_vec,
                                 data_ptr, mipoffsets, neighbors[1][1]);
 
+      /*
+       * To avoid having to duplicate linear_mask / fetch code use
+       * another branch (with corner condition though edge would work
+       * as well) here.
+       */
+      if (accurate_cube_corners) {
+         LLVMValueRef w00, w01, w10, w11, wx0, wy0;
+         LLVMValueRef c_weight, c00, c01, c10, c11;
+         LLVMValueRef have_corner, one_third, tmp;
+
+         colorss[0] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
+         colorss[1] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
+         colorss[2] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
+         colorss[3] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
+
+         have_corner = LLVMBuildLoad(builder, have_corners, "");
+
+         lp_build_if(&corner_if, bld->gallivm, have_corner);
+
+         /*
+          * we can't use standard 2d lerp as we need per-element weight
+          * in case of corners, so just calculate bilinear result as
+          * w00*s00 + w01*s01 + w10*s10 + w11*s11.
+          * (This is actually less work than using 2d lerp, 7 vs. 9 instructions,
+          * however calculating the weights needs another 6, so actually probably
+          * not slower than 2d lerp only for 4 channels as weights only need
+          * to be calculated once - of course fixing the weights has additional cost.)
+          */
+         wx0 = lp_build_sub(coord_bld, coord_bld->one, s_fpart);
+         wy0 = lp_build_sub(coord_bld, coord_bld->one, t_fpart);
+         w00 = lp_build_mul(coord_bld, wx0, wy0);
+         w01 = lp_build_mul(coord_bld, s_fpart, wy0);
+         w10 = lp_build_mul(coord_bld, wx0, t_fpart);
+         w11 = lp_build_mul(coord_bld, s_fpart, t_fpart);
+
+         /* find corner weight */
+         c00 = lp_build_and(ivec_bld, fall_off[0], fall_off[2]);
+         c_weight = lp_build_select(coord_bld, c00, w00, coord_bld->zero);
+         c01 = lp_build_and(ivec_bld, fall_off[1], fall_off[2]);
+         c_weight = lp_build_select(coord_bld, c01, w01, c_weight);
+         c10 = lp_build_and(ivec_bld, fall_off[0], fall_off[3]);
+         c_weight = lp_build_select(coord_bld, c10, w10, c_weight);
+         c11 = lp_build_and(ivec_bld, fall_off[1], fall_off[3]);
+         c_weight = lp_build_select(coord_bld, c11, w11, c_weight);
+
+         /*
+          * add 1/3 of the corner weight to each of the 3 other samples
+          * and null out corner weight
+          */
+         one_third = lp_build_const_vec(bld->gallivm, coord_bld->type, 1.0f/3.0f);
+         c_weight = lp_build_mul(coord_bld, c_weight, one_third);
+         w00 = lp_build_add(coord_bld, w00, c_weight);
+         c00 = LLVMBuildBitCast(builder, c00, coord_bld->vec_type, "");
+         w00 = lp_build_andnot(coord_bld, w00, c00);
+         w01 = lp_build_add(coord_bld, w01, c_weight);
+         c01 = LLVMBuildBitCast(builder, c01, coord_bld->vec_type, "");
+         w01 = lp_build_andnot(coord_bld, w01, c01);
+         w10 = lp_build_add(coord_bld, w10, c_weight);
+         c10 = LLVMBuildBitCast(builder, c10, coord_bld->vec_type, "");
+         w10 = lp_build_andnot(coord_bld, w10, c10);
+         w11 = lp_build_add(coord_bld, w11, c_weight);
+         c11 = LLVMBuildBitCast(builder, c11, coord_bld->vec_type, "");
+         w11 = lp_build_andnot(coord_bld, w11, c11);
+
+         if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
+            for (chan = 0; chan < 4; chan++) {
+               colors0[chan] = lp_build_mul(coord_bld, w00, neighbors[0][0][chan]);
+               tmp = lp_build_mul(coord_bld, w01, neighbors[0][1][chan]);
+               colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
+               tmp = lp_build_mul(coord_bld, w10, neighbors[1][0][chan]);
+               colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
+               tmp = lp_build_mul(coord_bld, w11, neighbors[1][1][chan]);
+               colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
+            }
+         }
+         else {
+            LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
+            cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
+            cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
+            cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
+            cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
+            /* inputs to interpolation are just masks so just add masked weights together */
+            cmpval00 = LLVMBuildBitCast(builder, cmpval00, coord_bld->vec_type, "");
+            cmpval01 = LLVMBuildBitCast(builder, cmpval01, coord_bld->vec_type, "");
+            cmpval10 = LLVMBuildBitCast(builder, cmpval10, coord_bld->vec_type, "");
+            cmpval11 = LLVMBuildBitCast(builder, cmpval11, coord_bld->vec_type, "");
+            colors0[0] = lp_build_and(coord_bld, w00, cmpval00);
+            tmp = lp_build_and(coord_bld, w01, cmpval01);
+            colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
+            tmp = lp_build_and(coord_bld, w10, cmpval10);
+            colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
+            tmp = lp_build_and(coord_bld, w11, cmpval11);
+            colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
+            colors0[1] = colors0[2] = colors0[3] = colors0[0];
+         }
+
+         LLVMBuildStore(builder, colors0[0], colorss[0]);
+         LLVMBuildStore(builder, colors0[1], colorss[1]);
+         LLVMBuildStore(builder, colors0[2], colorss[2]);
+         LLVMBuildStore(builder, colors0[3], colorss[3]);
+
+         lp_build_else(&corner_if);
+      }
+
       if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
          /* Bilinear interpolate the four samples from the 2D image / 3D slice */
          for (chan = 0; chan < 4; chan++) {
@@ -980,29 +1296,43 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld,
          colors0[1] = colors0[2] = colors0[3] = colors0[0];
       }
 
+      if (accurate_cube_corners) {
+         LLVMBuildStore(builder, colors0[0], colorss[0]);
+         LLVMBuildStore(builder, colors0[1], colorss[1]);
+         LLVMBuildStore(builder, colors0[2], colorss[2]);
+         LLVMBuildStore(builder, colors0[3], colorss[3]);
+
+         lp_build_endif(&corner_if);
+
+         colors0[0] = LLVMBuildLoad(builder, colorss[0], "");
+         colors0[1] = LLVMBuildLoad(builder, colorss[1], "");
+         colors0[2] = LLVMBuildLoad(builder, colorss[2], "");
+         colors0[3] = LLVMBuildLoad(builder, colorss[3], "");
+      }
+
       if (dims == 3) {
          LLVMValueRef neighbors1[2][2][4];
          LLVMValueRef colors1[4];
 
          /* get x0/x1/y0/y1 texels at z1 */
-         lp_build_sample_texel_soa(bld, sampler_unit,
+         lp_build_sample_texel_soa(bld,
                                    width_vec, height_vec, depth_vec,
-                                   x0, y0, z1,
+                                   x00, y00, z1,
                                    row_stride_vec, img_stride_vec,
                                    data_ptr, mipoffsets, neighbors1[0][0]);
-         lp_build_sample_texel_soa(bld, sampler_unit,
+         lp_build_sample_texel_soa(bld,
                                    width_vec, height_vec, depth_vec,
-                                   x1, y0, z1,
+                                   x01, y01, z1,
                                    row_stride_vec, img_stride_vec,
                                    data_ptr, mipoffsets, neighbors1[0][1]);
-         lp_build_sample_texel_soa(bld, sampler_unit,
+         lp_build_sample_texel_soa(bld,
                                    width_vec, height_vec, depth_vec,
-                                   x0, y1, z1,
+                                   x10, y10, z1,
                                    row_stride_vec, img_stride_vec,
                                    data_ptr, mipoffsets, neighbors1[1][0]);
-         lp_build_sample_texel_soa(bld, sampler_unit,
+         lp_build_sample_texel_soa(bld,
                                    width_vec, height_vec, depth_vec,
-                                   x1, y1, z1,
+                                   x11, y11, z1,
                                    row_stride_vec, img_stride_vec,
                                    data_ptr, mipoffsets, neighbors1[1][1]);
 
@@ -1053,13 +1383,12 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld,
 
 /**
  * Sample the texture/mipmap using given image filter and mip filter.
- * data0_ptr and data1_ptr point to the two mipmap levels to sample
- * from.  width0/1_vec, height0/1_vec, depth0/1_vec indicate their sizes.
+ * ilevel0 and ilevel1 indicate the two mipmap levels to sample
+ * from (vectors or scalars).
  * If we're using nearest miplevel sampling the '1' values will be null/unused.
  */
 static void
 lp_build_sample_mipmap(struct lp_build_sample_context *bld,
-                       unsigned sampler_unit,
                        unsigned img_filter,
                        unsigned mip_filter,
                        LLVMValueRef *coords,
@@ -1087,7 +1416,7 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld,
    lp_build_mipmap_level_sizes(bld, ilevel0,
                                &size0,
                                &row_stride0_vec, &img_stride0_vec);
-   if (bld->num_lods == 1) {
+   if (bld->num_mips == 1) {
       data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
    }
    else {
@@ -1096,16 +1425,14 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld,
       mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
    }
    if (img_filter == PIPE_TEX_FILTER_NEAREST) {
-      lp_build_sample_image_nearest(bld, sampler_unit,
-                                    size0,
+      lp_build_sample_image_nearest(bld, size0,
                                     row_stride0_vec, img_stride0_vec,
                                     data_ptr0, mipoff0, coords, offsets,
                                     colors0);
    }
    else {
       assert(img_filter == PIPE_TEX_FILTER_LINEAR);
-      lp_build_sample_image_linear(bld, sampler_unit,
-                                   size0,
+      lp_build_sample_image_linear(bld, size0, NULL,
                                    row_stride0_vec, img_stride0_vec,
                                    data_ptr0, mipoff0, coords, offsets,
                                    colors0);
@@ -1123,7 +1450,7 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld,
       /* need_lerp = lod_fpart > 0 */
       if (bld->num_lods == 1) {
          need_lerp = LLVMBuildFCmp(builder, LLVMRealUGT,
-                                   lod_fpart, bld->levelf_bld.zero,
+                                   lod_fpart, bld->lodf_bld.zero,
                                    "need_lerp");
       }
       else {
@@ -1131,28 +1458,28 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld,
           * We'll do mip filtering if any of the quads (or individual
           * pixel in case of per-pixel lod) need it.
           * It might be better to split the vectors here and only fetch/filter
-          * quads which need it.
-          */
-         /*
-          * We unfortunately need to clamp lod_fpart here since we can get
-          * negative values which would screw up filtering if not all
-          * lod_fpart values have same sign.
+          * quads which need it (if there's one lod per quad).
           */
-         lod_fpart = lp_build_max(&bld->levelf_bld, lod_fpart,
-                                  bld->levelf_bld.zero);
-         need_lerp = lp_build_compare(bld->gallivm, bld->levelf_bld.type,
+         need_lerp = lp_build_compare(bld->gallivm, bld->lodf_bld.type,
                                       PIPE_FUNC_GREATER,
-                                      lod_fpart, bld->levelf_bld.zero);
-         need_lerp = lp_build_any_true_range(&bld->leveli_bld, bld->num_lods, need_lerp);
+                                      lod_fpart, bld->lodf_bld.zero);
+         need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, need_lerp);
       }
 
       lp_build_if(&if_ctx, bld->gallivm, need_lerp);
       {
+         /*
+          * We unfortunately need to clamp lod_fpart here since we can get
+          * negative values which would screw up filtering if not all
+          * lod_fpart values have same sign.
+          */
+         lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
+                                  bld->lodf_bld.zero);
          /* sample the second mipmap level */
          lp_build_mipmap_level_sizes(bld, ilevel1,
                                      &size1,
                                      &row_stride1_vec, &img_stride1_vec);
-         if (bld->num_lods == 1) {
+         if (bld->num_mips == 1) {
             data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
          }
          else {
@@ -1160,15 +1487,13 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld,
             mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
          }
          if (img_filter == PIPE_TEX_FILTER_NEAREST) {
-            lp_build_sample_image_nearest(bld, sampler_unit,
-                                          size1,
+            lp_build_sample_image_nearest(bld, size1,
                                           row_stride1_vec, img_stride1_vec,
                                           data_ptr1, mipoff1, coords, offsets,
                                           colors1);
          }
          else {
-            lp_build_sample_image_linear(bld, sampler_unit,
-                                         size1,
+            lp_build_sample_image_linear(bld, size1, NULL,
                                          row_stride1_vec, img_stride1_vec,
                                          data_ptr1, mipoff1, coords, offsets,
                                          colors1);
@@ -1178,7 +1503,124 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld,
 
          if (bld->num_lods != bld->coord_type.length)
             lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
-                                                              bld->levelf_bld.type,
+                                                              bld->lodf_bld.type,
+                                                              bld->texel_bld.type,
+                                                              lod_fpart);
+
+         for (chan = 0; chan < 4; chan++) {
+            colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
+                                          colors0[chan], colors1[chan],
+                                          0);
+            LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
+         }
+      }
+      lp_build_endif(&if_ctx);
+   }
+}
+
+
+/**
+ * Sample the texture/mipmap using given mip filter, and using
+ * both nearest and linear filtering at the same time depending
+ * on linear_mask.
+ * lod can be per quad but linear_mask is always per pixel.
+ * ilevel0 and ilevel1 indicate the two mipmap levels to sample
+ * from (vectors or scalars).
+ * If we're using nearest miplevel sampling the '1' values will be null/unused.
+ */
+static void
+lp_build_sample_mipmap_both(struct lp_build_sample_context *bld,
+                            LLVMValueRef linear_mask,
+                            unsigned mip_filter,
+                            LLVMValueRef *coords,
+                            const LLVMValueRef *offsets,
+                            LLVMValueRef ilevel0,
+                            LLVMValueRef ilevel1,
+                            LLVMValueRef lod_fpart,
+                            LLVMValueRef lod_positive,
+                            LLVMValueRef *colors_out)
+{
+   LLVMBuilderRef builder = bld->gallivm->builder;
+   LLVMValueRef size0 = NULL;
+   LLVMValueRef size1 = NULL;
+   LLVMValueRef row_stride0_vec = NULL;
+   LLVMValueRef row_stride1_vec = NULL;
+   LLVMValueRef img_stride0_vec = NULL;
+   LLVMValueRef img_stride1_vec = NULL;
+   LLVMValueRef data_ptr0 = NULL;
+   LLVMValueRef data_ptr1 = NULL;
+   LLVMValueRef mipoff0 = NULL;
+   LLVMValueRef mipoff1 = NULL;
+   LLVMValueRef colors0[4], colors1[4];
+   unsigned chan;
+
+   /* sample the first mipmap level */
+   lp_build_mipmap_level_sizes(bld, ilevel0,
+                               &size0,
+                               &row_stride0_vec, &img_stride0_vec);
+   if (bld->num_mips == 1) {
+      data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
+   }
+   else {
+      /* This path should work for num_lods 1 too but slightly less efficient */
+      data_ptr0 = bld->base_ptr;
+      mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
+   }
+
+   lp_build_sample_image_linear(bld, size0, linear_mask,
+                                row_stride0_vec, img_stride0_vec,
+                                data_ptr0, mipoff0, coords, offsets,
+                                colors0);
+
+   /* Store the first level's colors in the output variables */
+   for (chan = 0; chan < 4; chan++) {
+       LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
+   }
+
+   if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
+      struct lp_build_if_state if_ctx;
+      LLVMValueRef need_lerp;
+
+      /*
+       * We'll do mip filtering if any of the quads (or individual
+       * pixel in case of per-pixel lod) need it.
+       * Note using lod_positive here not lod_fpart since it may be the same
+       * condition as that used in the outer "if" in the caller hence llvm
+       * should be able to merge the branches in this case.
+       */
+      need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, lod_positive);
+
+      lp_build_if(&if_ctx, bld->gallivm, need_lerp);
+      {
+         /*
+          * We unfortunately need to clamp lod_fpart here since we can get
+          * negative values which would screw up filtering if not all
+          * lod_fpart values have same sign.
+          */
+         lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
+                                  bld->lodf_bld.zero);
+         /* sample the second mipmap level */
+         lp_build_mipmap_level_sizes(bld, ilevel1,
+                                     &size1,
+                                     &row_stride1_vec, &img_stride1_vec);
+         if (bld->num_mips == 1) {
+            data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
+         }
+         else {
+            data_ptr1 = bld->base_ptr;
+            mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
+         }
+
+         lp_build_sample_image_linear(bld, size1, linear_mask,
+                                      row_stride1_vec, img_stride1_vec,
+                                      data_ptr1, mipoff1, coords, offsets,
+                                      colors1);
+
+         /* interpolate samples from the two mipmap levels */
+
+         if (bld->num_lods != bld->coord_type.length)
+            lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
+                                                              bld->lodf_bld.type,
                                                               bld->texel_bld.type,
                                                               lod_fpart);
 
@@ -1239,7 +1681,7 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
                        const struct lp_derivatives *derivs, /* optional */
                        LLVMValueRef lod_bias, /* optional */
                        LLVMValueRef explicit_lod, /* optional */
-                       LLVMValueRef *lod_ipart,
+                       LLVMValueRef *lod_pos_or_zero,
                        LLVMValueRef *lod_fpart,
                        LLVMValueRef *ilevel0,
                        LLVMValueRef *ilevel1)
@@ -1249,6 +1691,8 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
    const unsigned mag_filter = bld->static_sampler_state->mag_img_filter;
    const unsigned target = bld->static_texture_state->target;
    LLVMValueRef first_level, cube_rho = NULL;
+   LLVMValueRef lod_ipart = NULL;
+   struct lp_derivatives cube_derivs;
 
    /*
    printf("%s mip %d  min %d  mag %d\n", __FUNCTION__,
@@ -1265,7 +1709,8 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
                       mip_filter != PIPE_TEX_MIPFILTER_NONE) &&
                       !bld->static_sampler_state->min_max_lod_equal &&
                       !explicit_lod);
-      lp_build_cube_lookup(bld, coords, derivs, &cube_rho, need_derivs);
+      lp_build_cube_lookup(bld, coords, derivs, &cube_rho, &cube_derivs, need_derivs);
+      derivs = &cube_derivs;
    }
    else if (target == PIPE_TEXTURE_1D_ARRAY ||
             target == PIPE_TEXTURE_2D_ARRAY) {
@@ -1309,9 +1754,16 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
                             coords[0], coords[1], coords[2], cube_rho,
                             derivs, lod_bias, explicit_lod,
                             mip_filter,
-                            lod_ipart, lod_fpart);
+                            &lod_ipart, lod_fpart, lod_pos_or_zero);
    } else {
-      *lod_ipart = bld->leveli_bld.zero;
+      lod_ipart = bld->lodi_bld.zero;
+      *lod_pos_or_zero = bld->lodi_bld.zero;
+   }
+
+   if (bld->num_lods != bld->num_mips) {
+      /* only makes sense if there's just a single mip level */
+      assert(bld->num_mips == 1);
+      lod_ipart = lp_build_extract_range(bld->gallivm, lod_ipart, 0, 1);
    }
 
    /*
@@ -1328,8 +1780,8 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
           * We should be able to set ilevel0 = const(0) but that causes
           * bad x86 code to be emitted.
           */
-         assert(*lod_ipart);
-         lp_build_nearest_mip_level(bld, texture_index, *lod_ipart, ilevel0, NULL);
+         assert(lod_ipart);
+         lp_build_nearest_mip_level(bld, texture_index, lod_ipart, ilevel0, NULL);
       }
       else {
          first_level = bld->dynamic_state->first_level(bld->dynamic_state,
@@ -1339,14 +1791,14 @@ lp_build_sample_common(struct lp_build_sample_context *bld,
       }
       break;
    case PIPE_TEX_MIPFILTER_NEAREST:
-      assert(*lod_ipart);
-      lp_build_nearest_mip_level(bld, texture_index, *lod_ipart, ilevel0, NULL);
+      assert(lod_ipart);
+      lp_build_nearest_mip_level(bld, texture_index, lod_ipart, ilevel0, NULL);
       break;
    case PIPE_TEX_MIPFILTER_LINEAR:
-      assert(*lod_ipart);
+      assert(lod_ipart);
       assert(*lod_fpart);
       lp_build_linear_mip_levels(bld, texture_index,
-                                 *lod_ipart, lod_fpart,
+                                 lod_ipart, lod_fpart,
                                  ilevel0, ilevel1);
       break;
    }
@@ -1423,7 +1875,7 @@ lp_build_clamp_border_color(struct lp_build_sample_context *bld,
                 * Border color was stored as int, hence need min/max clamp
                 * only if chan has less than 32 bits..
                 */
-               unsigned chan_size = format_desc->channel[chan].size < 32;
+               unsigned chan_size = format_desc->channel[chan].size;
                if (chan_size < 32) {
                   min_clamp = lp_build_const_int_vec(gallivm, vec4_type,
                                                      0 - (1 << (chan_size - 1)));
@@ -1451,7 +1903,7 @@ lp_build_clamp_border_color(struct lp_build_sample_context *bld,
                 * Border color was stored as uint, hence never need min
                 * clamp, and only need max clamp if chan has less than 32 bits.
                 */
-               unsigned chan_size = format_desc->channel[chan].size < 32;
+               unsigned chan_size = format_desc->channel[chan].size;
                if (chan_size < 32) {
                   max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
                                                      (1 << chan_size) - 1);
@@ -1466,6 +1918,7 @@ lp_build_clamp_border_color(struct lp_build_sample_context *bld,
       /* mixed plain formats (or different pure size) */
       switch (format_desc->format) {
       case PIPE_FORMAT_B10G10R10A2_UINT:
+      case PIPE_FORMAT_R10G10B10A2_UINT:
       {
          unsigned max10 = (1 << 10) - 1;
          max_clamp = lp_build_const_aos(gallivm, vec4_type, max10, max10,
@@ -1580,13 +2033,12 @@ lp_build_sample_general(struct lp_build_sample_context *bld,
                         unsigned sampler_unit,
                         LLVMValueRef *coords,
                         const LLVMValueRef *offsets,
-                        LLVMValueRef lod_ipart,
+                        LLVMValueRef lod_positive,
                         LLVMValueRef lod_fpart,
                         LLVMValueRef ilevel0,
                         LLVMValueRef ilevel1,
                         LLVMValueRef *colors_out)
 {
-   struct lp_build_context *int_bld = &bld->int_bld;
    LLVMBuilderRef builder = bld->gallivm->builder;
    const struct lp_static_sampler_state *sampler_state = bld->static_sampler_state;
    const unsigned mip_filter = sampler_state->min_mip_filter;
@@ -1622,55 +2074,97 @@ lp_build_sample_general(struct lp_build_sample_context *bld,
 
    if (min_filter == mag_filter) {
       /* no need to distinguish between minification and magnification */
-      lp_build_sample_mipmap(bld, sampler_unit,
-                             min_filter, mip_filter,
+      lp_build_sample_mipmap(bld, min_filter, mip_filter,
                              coords, offsets,
                              ilevel0, ilevel1, lod_fpart,
                              texels);
    }
    else {
-      /* Emit conditional to choose min image filter or mag image filter
-       * depending on the lod being > 0 or <= 0, respectively.
-       */
-      struct lp_build_if_state if_ctx;
-      LLVMValueRef minify;
-
       /*
-       * XXX this should to all lods into account, if some are min
-       * some max probably could hack up the coords/weights in the linear
-       * path with selects to work for nearest.
-       * If that's just two quads sitting next to each other it seems
-       * quite ok to do the same filtering method on both though, at
-       * least unless we have explicit lod (and who uses different
-       * min/mag filter with that?)
+       * Could also get rid of the if-logic and always use mipmap_both, both
+       * for the single lod and multi-lod case if nothing really uses this.
        */
-      if (bld->num_lods > 1)
-         lod_ipart = LLVMBuildExtractElement(builder, lod_ipart,
-                                             lp_build_const_int32(bld->gallivm, 0), "");
+      if (bld->num_lods == 1) {
+         /* Emit conditional to choose min image filter or mag image filter
+          * depending on the lod being > 0 or <= 0, respectively.
+          */
+         struct lp_build_if_state if_ctx;
+
+         lod_positive = LLVMBuildTrunc(builder, lod_positive,
+                                       LLVMInt1TypeInContext(bld->gallivm->context), "");
+
+         lp_build_if(&if_ctx, bld->gallivm, lod_positive);
+         {
+            /* Use the minification filter */
+            lp_build_sample_mipmap(bld, min_filter, mip_filter,
+                                   coords, offsets,
+                                   ilevel0, ilevel1, lod_fpart,
+                                   texels);
+         }
+         lp_build_else(&if_ctx);
+         {
+            /* Use the magnification filter */
+            lp_build_sample_mipmap(bld, mag_filter, PIPE_TEX_MIPFILTER_NONE,
+                                   coords, offsets,
+                                   ilevel0, NULL, NULL,
+                                   texels);
+         }
+         lp_build_endif(&if_ctx);
+      }
+      else {
+         LLVMValueRef need_linear, linear_mask;
+         unsigned mip_filter_for_nearest;
+         struct lp_build_if_state if_ctx;
 
-      /* minify = lod >= 0.0 */
-      minify = LLVMBuildICmp(builder, LLVMIntSGE,
-                             lod_ipart, int_bld->zero, "");
+         if (min_filter == PIPE_TEX_FILTER_LINEAR) {
+            linear_mask = lod_positive;
+            mip_filter_for_nearest = PIPE_TEX_MIPFILTER_NONE;
+         }
+         else {
+            linear_mask = lp_build_not(&bld->lodi_bld, lod_positive);
+            mip_filter_for_nearest = mip_filter;
+         }
+         need_linear = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods,
+                                               linear_mask);
+
+         if (bld->num_lods != bld->coord_type.length) {
+            linear_mask = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
+                                                                bld->lodi_type,
+                                                                bld->int_coord_type,
+                                                                linear_mask);
+         }
 
-      lp_build_if(&if_ctx, bld->gallivm, minify);
-      {
-         /* Use the minification filter */
-         lp_build_sample_mipmap(bld, sampler_unit,
-                                min_filter, mip_filter,
-                                coords, offsets,
-                                ilevel0, ilevel1, lod_fpart,
-                                texels);
-      }
-      lp_build_else(&if_ctx);
-      {
-         /* Use the magnification filter */
-         lp_build_sample_mipmap(bld, sampler_unit,
-                                mag_filter, PIPE_TEX_MIPFILTER_NONE,
-                                coords, offsets,
-                                ilevel0, NULL, NULL,
-                                texels);
+         lp_build_if(&if_ctx, bld->gallivm, need_linear);
+         {
+            /*
+             * Do sampling with both filters simultaneously. This means using
+             * a linear filter and doing some tricks (with weights) for the pixels
+             * which need nearest filter.
+             * Note that it's probably rare some pixels need nearest and some
+             * linear filter but the fixups required for the nearest pixels
+             * aren't all that complicated so just always run a combined path
+             * if at least some pixels require linear.
+             */
+            lp_build_sample_mipmap_both(bld, linear_mask, mip_filter,
+                                        coords, offsets,
+                                        ilevel0, ilevel1,
+                                        lod_fpart, lod_positive,
+                                        texels);
+         }
+         lp_build_else(&if_ctx);
+         {
+            /*
+             * All pixels require just nearest filtering, which is way
+             * cheaper than linear, hence do a separate path for that.
+             */
+            lp_build_sample_mipmap(bld, PIPE_TEX_FILTER_NEAREST,
+                                   mip_filter_for_nearest,
+                                   coords, offsets,
+                                   ilevel0, ilevel1, lod_fpart,
+                                   texels);
+         }
+         lp_build_endif(&if_ctx);
       }
-      lp_build_endif(&if_ctx);
    }
 
    for (chan = 0; chan < 4; ++chan) {
@@ -1696,7 +2190,7 @@ lp_build_fetch_texel(struct lp_build_sample_context *bld,
                      const LLVMValueRef *offsets,
                      LLVMValueRef *colors_out)
 {
-   struct lp_build_context *perquadi_bld = &bld->leveli_bld;
+   struct lp_build_context *perquadi_bld = &bld->lodi_bld;
    struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
    unsigned dims = bld->dims, chan;
    unsigned target = bld->static_texture_state->target;
@@ -1710,7 +2204,7 @@ lp_build_fetch_texel(struct lp_build_sample_context *bld,
    out_of_bounds = int_coord_bld->zero;
 
    if (explicit_lod && bld->static_texture_state->target != PIPE_BUFFER) {
-      if (bld->num_lods != int_coord_bld->type.length) {
+      if (bld->num_mips != int_coord_bld->type.length) {
          ilevel = lp_build_pack_aos_scalars(bld->gallivm, int_coord_bld->type,
                                             perquadi_bld->type, explicit_lod, 0);
       }
@@ -1721,7 +2215,7 @@ lp_build_fetch_texel(struct lp_build_sample_context *bld,
                                  out_of_bound_ret_zero ? &out_of_bounds : NULL);
    }
    else {
-      assert(bld->num_lods == 1);
+      assert(bld->num_mips == 1);
       if (bld->static_texture_state->target != PIPE_BUFFER) {
          ilevel = bld->dynamic_state->first_level(bld->dynamic_state,
                                                   bld->gallivm, texture_unit);
@@ -1860,7 +2354,7 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
    unsigned target = static_texture_state->target;
    unsigned dims = texture_dims(target);
    unsigned num_quads = type.length / 4;
-   unsigned mip_filter, i;
+   unsigned mip_filter, min_img_filter, mag_img_filter, i;
    struct lp_build_sample_context bld;
    struct lp_static_sampler_state derived_sampler_state = *static_sampler_state;
    LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
@@ -1872,6 +2366,19 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
       debug_printf("Sample from %s\n", util_format_name(fmt));
    }
 
+   if (static_texture_state->format == PIPE_FORMAT_NONE) {
+      /*
+       * If there's nothing bound, format is NONE, and we must return
+       * all zero as mandated by d3d10 in this case.
+       */
+      unsigned chan;
+      LLVMValueRef zero = lp_build_const_vec(gallivm, type, 0.0F);
+      for (chan = 0; chan < 4; chan++) {
+         texel_out[chan] = zero;
+      }
+      return;
+   }
+
    assert(type.floating);
 
    /* Setup our build context */
@@ -1923,6 +2430,27 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
       debug_printf("  .min_mip_filter = %u\n", derived_sampler_state.min_mip_filter);
    }
 
+   if (static_texture_state->target == PIPE_TEXTURE_CUBE ||
+       static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY)
+   {
+      /*
+       * Seamless filtering ignores wrap modes.
+       * Setting to CLAMP_TO_EDGE is correct for nearest filtering, for
+       * bilinear it's not correct but way better than using for instance repeat.
+       * Note we even set this for non-seamless. Technically GL allows any wrap
+       * mode, which made sense when supporting true borders (can get seamless
+       * effect with border and CLAMP_TO_BORDER), but gallium doesn't support
+       * borders and d3d9 requires wrap modes to be ignored and it's a pain to fix
+       * up the sampler state (as it makes it texture dependent).
+       */
+      derived_sampler_state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+      derived_sampler_state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+   }
+
+   min_img_filter = derived_sampler_state.min_img_filter;
+   mag_img_filter = derived_sampler_state.mag_img_filter;
+
+
    /*
     * This is all a bit complicated different paths are chosen for performance
     * reasons.
@@ -1941,31 +2469,65 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
     * There are other situations where at least the multiple int lods could be
     * avoided like min and max lod being equal.
     */
-   if (explicit_lod && lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
-       ((is_fetch && target != PIPE_BUFFER) ||
-        (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)))
+   bld.num_mips = bld.num_lods = 1;
+
+   if ((gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) &&
+       (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
+       (static_texture_state->target == PIPE_TEXTURE_CUBE) &&
+       (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+      /*
+       * special case for using per-pixel lod even for implicit lod,
+       * which is generally never required (ok by APIs) except to please
+       * some (somewhat broken imho) tests (because per-pixel face selection
+       * can cause derivatives to be different for pixels outside the primitive
+       * due to the major axis division even if pre-project derivatives are
+       * looking normal).
+       */
+      bld.num_mips = type.length;
       bld.num_lods = type.length;
+   }
+   else if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT ||
+       (explicit_lod || lod_bias || derivs)) {
+      if ((is_fetch && target != PIPE_BUFFER) ||
+          (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+         bld.num_mips = type.length;
+         bld.num_lods = type.length;
+      }
+      else if (!is_fetch && min_img_filter != mag_img_filter) {
+         bld.num_mips = 1;
+         bld.num_lods = type.length;
+      }
+   }
    /* TODO: for true scalar_lod should only use 1 lod value */
-   else if ((is_fetch && explicit_lod && target != PIPE_BUFFER ) ||
+   else if ((is_fetch && explicit_lod && target != PIPE_BUFFER) ||
             (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+      bld.num_mips = num_quads;
       bld.num_lods = num_quads;
    }
-   else {
-      bld.num_lods = 1;
+   else if (!is_fetch && min_img_filter != mag_img_filter) {
+      bld.num_mips = 1;
+      bld.num_lods = num_quads;
    }
 
-   bld.levelf_type = type;
+
+   bld.lodf_type = type;
    /* we want native vector size to be able to use our intrinsics */
    if (bld.num_lods != type.length) {
-      bld.levelf_type.length = type.length > 4 ? ((type.length + 15) / 16) * 4 : 1;
+      /* TODO: this currently always has to be per-quad or per-element */
+      bld.lodf_type.length = type.length > 4 ? ((type.length + 15) / 16) * 4 : 1;
+   }
+   bld.lodi_type = lp_int_type(bld.lodf_type);
+   bld.levelf_type = bld.lodf_type;
+   if (bld.num_mips == 1) {
+      bld.levelf_type.length = 1;
    }
    bld.leveli_type = lp_int_type(bld.levelf_type);
    bld.float_size_type = bld.float_size_in_type;
    /* Note: size vectors may not be native. They contain minified w/h/d/_ values,
     * with per-element lod that is w0/h0/d0/_/w1/h1/d1_/... so up to 8x4f32 */
-   if (bld.num_lods > 1) {
-      bld.float_size_type.length = bld.num_lods == type.length ?
-                                      bld.num_lods * bld.float_size_in_type.length :
+   if (bld.num_mips > 1) {
+      bld.float_size_type.length = bld.num_mips == type.length ?
+                                      bld.num_mips * bld.float_size_in_type.length :
                                       type.length;
    }
    bld.int_size_type = lp_int_type(bld.float_size_type);
@@ -1982,6 +2544,8 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
    lp_build_context_init(&bld.texel_bld, gallivm, bld.texel_type);
    lp_build_context_init(&bld.levelf_bld, gallivm, bld.levelf_type);
    lp_build_context_init(&bld.leveli_bld, gallivm, bld.leveli_type);
+   lp_build_context_init(&bld.lodf_bld, gallivm, bld.lodf_type);
+   lp_build_context_init(&bld.lodi_bld, gallivm, bld.lodi_type);
 
    /* Get the dynamic state */
    tex_width = dynamic_state->width(dynamic_state, gallivm, texture_index);
@@ -2031,56 +2595,65 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
    }
 
    else {
-      LLVMValueRef lod_ipart = NULL, lod_fpart = NULL;
+      LLVMValueRef lod_fpart = NULL, lod_positive = NULL;
       LLVMValueRef ilevel0 = NULL, ilevel1 = NULL;
       boolean use_aos = util_format_fits_8unorm(bld.format_desc) &&
-                        lp_is_simple_wrap_mode(static_sampler_state->wrap_s) &&
-                        lp_is_simple_wrap_mode(static_sampler_state->wrap_t) &&
                         /* not sure this is strictly needed or simply impossible */
-                        static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE;
+                        derived_sampler_state.compare_mode == PIPE_TEX_COMPARE_NONE &&
+                        lp_is_simple_wrap_mode(derived_sampler_state.wrap_s);
+
+      use_aos &= bld.num_lods <= num_quads ||
+                 derived_sampler_state.min_img_filter ==
+                    derived_sampler_state.mag_img_filter;
+      if (dims > 1) {
+         use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_t);
+         if (dims > 2) {
+            use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_r);
+         }
+      }
+      if (static_texture_state->target == PIPE_TEXTURE_CUBE &&
+          derived_sampler_state.seamless_cube_map &&
+          (derived_sampler_state.min_img_filter == PIPE_TEX_FILTER_LINEAR ||
+           derived_sampler_state.mag_img_filter == PIPE_TEX_FILTER_LINEAR)) {
+         /* theoretically possible with AoS filtering but not implemented (complex!) */
+         use_aos = 0;
+      }
 
       if ((gallivm_debug & GALLIVM_DEBUG_PERF) &&
           !use_aos && util_format_fits_8unorm(bld.format_desc)) {
          debug_printf("%s: using floating point linear filtering for %s\n",
                       __FUNCTION__, bld.format_desc->short_name);
-         debug_printf("  min_img %d  mag_img %d  mip %d  wraps %d  wrapt %d\n",
-                      static_sampler_state->min_img_filter,
-                      static_sampler_state->mag_img_filter,
-                      static_sampler_state->min_mip_filter,
-                      static_sampler_state->wrap_s,
-                      static_sampler_state->wrap_t);
+         debug_printf("  min_img %d  mag_img %d  mip %d  target %d  seamless %d"
+                      "  wraps %d  wrapt %d  wrapr %d\n",
+                      derived_sampler_state.min_img_filter,
+                      derived_sampler_state.mag_img_filter,
+                      derived_sampler_state.min_mip_filter,
+                      static_texture_state->target,
+                      derived_sampler_state.seamless_cube_map,
+                      derived_sampler_state.wrap_s,
+                      derived_sampler_state.wrap_t,
+                      derived_sampler_state.wrap_r);
       }
 
       lp_build_sample_common(&bld, texture_index, sampler_index,
                              newcoords,
                              derivs, lod_bias, explicit_lod,
-                             &lod_ipart, &lod_fpart,
+                             &lod_positive, &lod_fpart,
                              &ilevel0, &ilevel1);
 
       /*
        * we only try 8-wide sampling with soa as it appears to
-       * be a loss with aos with AVX (but it should work).
+       * be a loss with aos with AVX (but it should work, except
+       * for conformance if min_filter != mag_filter if num_lods > 1).
        * (It should be faster if we'd support avx2)
        */
       if (num_quads == 1 || !use_aos) {
-
-         if (num_quads > 1) {
-            if (mip_filter == PIPE_TEX_MIPFILTER_NONE) {
-               LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
-               /*
-                * These parameters are the same for all quads,
-                * could probably simplify.
-                */
-               lod_ipart = LLVMBuildExtractElement(builder, lod_ipart, index0, "");
-               ilevel0 = LLVMBuildExtractElement(builder, ilevel0, index0, "");
-            }
-         }
          if (use_aos) {
             /* do sampling/filtering with fixed pt arithmetic */
             lp_build_sample_aos(&bld, sampler_index,
                                 newcoords[0], newcoords[1],
                                 newcoords[2],
-                                offsets, lod_ipart, lod_fpart,
+                                offsets, lod_positive, lod_fpart,
                                 ilevel0, ilevel1,
                                 texel_out);
          }
@@ -2088,7 +2661,7 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
          else {
             lp_build_sample_general(&bld, sampler_index,
                                     newcoords, offsets,
-                                    lod_ipart, lod_fpart,
+                                    lod_positive, lod_fpart,
                                     ilevel0, ilevel1,
                                     texel_out);
          }
@@ -2128,28 +2701,43 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
          bld4.int_size_in_type = lp_int_type(bld4.float_size_in_type);
          bld4.texel_type = bld.texel_type;
          bld4.texel_type.length = 4;
-         bld4.levelf_type = type4;
-         /* we want native vector size to be able to use our intrinsics */
-         bld4.levelf_type.length = 1;
-         bld4.leveli_type = lp_int_type(bld4.levelf_type);
 
-         if (explicit_lod && lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
-             ((is_fetch && target != PIPE_BUFFER) ||
-              (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)))
+         bld4.num_mips = bld4.num_lods = 1;
+         if ((gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) &&
+             (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
+             (static_texture_state->target == PIPE_TEXTURE_CUBE) &&
+             (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+            bld4.num_mips = type4.length;
             bld4.num_lods = type4.length;
-         else
-            bld4.num_lods = 1;
+         }
+         if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
+             (explicit_lod || lod_bias || derivs)) {
+            if ((is_fetch && target != PIPE_BUFFER) ||
+                (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+               bld4.num_mips = type4.length;
+               bld4.num_lods = type4.length;
+            }
+            else if (!is_fetch && min_img_filter != mag_img_filter) {
+               bld4.num_mips = 1;
+               bld4.num_lods = type4.length;
+            }
+         }
 
-         bld4.levelf_type = type4;
          /* we want native vector size to be able to use our intrinsics */
+         bld4.lodf_type = type4;
          if (bld4.num_lods != type4.length) {
+            bld4.lodf_type.length = 1;
+         }
+         bld4.lodi_type = lp_int_type(bld4.lodf_type);
+         bld4.levelf_type = type4;
+         if (bld4.num_mips != type4.length) {
             bld4.levelf_type.length = 1;
          }
          bld4.leveli_type = lp_int_type(bld4.levelf_type);
          bld4.float_size_type = bld4.float_size_in_type;
-         if (bld4.num_lods > 1) {
-            bld4.float_size_type.length = bld4.num_lods == type4.length ?
-                                            bld4.num_lods * bld4.float_size_in_type.length :
+         if (bld4.num_mips > 1) {
+            bld4.float_size_type.length = bld4.num_mips == type4.length ?
+                                            bld4.num_mips * bld4.float_size_in_type.length :
                                             type4.length;
          }
          bld4.int_size_type = lp_int_type(bld4.float_size_type);
@@ -2166,10 +2754,12 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
          lp_build_context_init(&bld4.texel_bld, gallivm, bld4.texel_type);
          lp_build_context_init(&bld4.levelf_bld, gallivm, bld4.levelf_type);
          lp_build_context_init(&bld4.leveli_bld, gallivm, bld4.leveli_type);
+         lp_build_context_init(&bld4.lodf_bld, gallivm, bld4.lodf_type);
+         lp_build_context_init(&bld4.lodi_bld, gallivm, bld4.lodi_type);
 
          for (i = 0; i < num_quads; i++) {
             LLVMValueRef s4, t4, r4;
-            LLVMValueRef lod_ipart4, lod_fpart4 = NULL;
+            LLVMValueRef lod_positive4, lod_fpart4 = NULL;
             LLVMValueRef ilevel04, ilevel14 = NULL;
             LLVMValueRef offsets4[4] = { NULL };
             unsigned num_lods = bld4.num_lods;
@@ -2187,8 +2777,9 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
                   }
                }
             }
-            lod_ipart4 = lp_build_extract_range(gallivm, lod_ipart, num_lods * i, num_lods);
-            ilevel04 = lp_build_extract_range(gallivm, ilevel0, num_lods * i, num_lods);
+            lod_positive4 = lp_build_extract_range(gallivm, lod_positive, num_lods * i, num_lods);
+            ilevel04 = bld.num_mips == 1 ? ilevel0 :
+                          lp_build_extract_range(gallivm, ilevel0, num_lods * i, num_lods);
             if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
                ilevel14 = lp_build_extract_range(gallivm, ilevel1, num_lods * i, num_lods);
                lod_fpart4 = lp_build_extract_range(gallivm, lod_fpart, num_lods * i, num_lods);
@@ -2198,7 +2789,7 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
                /* do sampling/filtering with fixed pt arithmetic */
                lp_build_sample_aos(&bld4, sampler_index,
                                    s4, t4, r4, offsets4,
-                                   lod_ipart4, lod_fpart4,
+                                   lod_positive4, lod_fpart4,
                                    ilevel04, ilevel14,
                                    texelout4);
             }
@@ -2214,7 +2805,7 @@ lp_build_sample_soa(struct gallivm_state *gallivm,
 
                lp_build_sample_general(&bld4, sampler_index,
                                        newcoords4, offsets4,
-                                       lod_ipart4, lod_fpart4,
+                                       lod_positive4, lod_fpart4,
                                        ilevel04, ilevel14,
                                        texelout4);
             }
@@ -2265,6 +2856,19 @@ lp_build_size_query_soa(struct gallivm_state *gallivm,
    unsigned num_lods = 1;
    struct lp_build_context bld_int_vec4;
 
+   if (static_state->format == PIPE_FORMAT_NONE) {
+      /*
+       * If there's nothing bound, format is NONE, and we must return
+       * all zero as mandated by d3d10 in this case.
+       */
+      unsigned chan;
+      LLVMValueRef zero = lp_build_const_vec(gallivm, int_type, 0.0F);
+      for (chan = 0; chan < 4; chan++) {
+         sizes_out[chan] = zero;
+      }
+      return;
+   }
+
    /*
     * Do some sanity verification about bound texture and shader dcl target.
     * Not entirely sure what's possible but assume array/non-array
@@ -2336,7 +2940,7 @@ lp_build_size_query_soa(struct gallivm_state *gallivm,
                                     lp_build_const_int32(gallivm, 2), "");
    }
 
-   size = lp_build_minify(&bld_int_vec4, size, lod);
+   size = lp_build_minify(&bld_int_vec4, size, lod, TRUE);
 
    if (has_array)
       size = LLVMBuildInsertElement(gallivm->builder, size,