i965/miptree: Set level_x/h in create_for_dri_image
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.c
index 616bddb852e3f7b21aa753de6ef20daa1edbdb5c..0d989d5b0639bebed0a87e7e3a1314af2a436193 100644 (file)
@@ -27,8 +27,8 @@
 #include <GL/internal/dri_interface.h>
 
 #include "intel_batchbuffer.h"
+#include "intel_image.h"
 #include "intel_mipmap_tree.h"
-#include "intel_resolve_map.h"
 #include "intel_tex.h"
 #include "intel_blit.h"
 #include "intel_fbo.h"
@@ -49,7 +49,8 @@
 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
 
 static void *intel_miptree_map_raw(struct brw_context *brw,
-                                   struct intel_mipmap_tree *mt);
+                                   struct intel_mipmap_tree *mt,
+                                   GLbitfield mode);
 
 static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt);
 
@@ -64,7 +65,7 @@ intel_miptree_alloc_mcs(struct brw_context *brw,
  */
 static enum intel_msaa_layout
 compute_msaa_layout(struct brw_context *brw, mesa_format format,
-                    bool disable_aux_buffers)
+                    uint32_t layout_flags)
 {
    /* Prior to Gen7, all MSAA surfaces used IMS layout. */
    if (brw->gen < 7)
@@ -90,7 +91,7 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format,
        */
       if (brw->gen == 7 && _mesa_get_format_datatype(format) == GL_INT) {
          return INTEL_MSAA_LAYOUT_UMS;
-      } else if (disable_aux_buffers) {
+      } else if (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) {
          /* We can't use the CMS layout because it uses an aux buffer, the MCS
           * buffer. So fallback to UMS, which is identical to CMS without the
           * MCS. */
@@ -101,69 +102,8 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format,
    }
 }
 
-
-/**
- * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
- * scaled-down bitfield representation of the color buffer which is capable of
- * recording when blocks of the color buffer are equal to the clear value.
- * This function returns the block size that will be used by the MCS buffer
- * corresponding to a certain color miptree.
- *
- * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
- * beneath the "Fast Color Clear" bullet (p327):
- *
- *     The following table describes the RT alignment
- *
- *                       Pixels  Lines
- *         TiledY RT CL
- *             bpp
- *              32          8      4
- *              64          4      4
- *             128          2      4
- *         TiledX RT CL
- *             bpp
- *              32         16      2
- *              64          8      2
- *             128          4      2
- *
- * This alignment has the following uses:
- *
- * - For figuring out the size of the MCS buffer.  Each 4k tile in the MCS
- *   buffer contains 128 blocks horizontally and 256 blocks vertically.
- *
- * - For figuring out alignment restrictions for a fast clear operation.  Fast
- *   clear operations must always clear aligned multiples of 16 blocks
- *   horizontally and 32 blocks vertically.
- *
- * - For scaling down the coordinates sent through the render pipeline during
- *   a fast clear.  X coordinates must be scaled down by 8 times the block
- *   width, and Y coordinates by 16 times the block height.
- *
- * - For scaling down the coordinates sent through the render pipeline during
- *   a "Render Target Resolve" operation.  X coordinates must be scaled down
- *   by half the block width, and Y coordinates by half the block height.
- */
-void
-intel_get_non_msrt_mcs_alignment(const struct intel_mipmap_tree *mt,
-                                 unsigned *width_px, unsigned *height)
-{
-   switch (mt->tiling) {
-   default:
-      unreachable("Non-MSRT MCS requires X or Y tiling");
-      /* In release builds, fall through */
-   case I915_TILING_Y:
-      *width_px = 32 / mt->cpp;
-      *height = 4;
-      break;
-   case I915_TILING_X:
-      *width_px = 64 / mt->cpp;
-      *height = 2;
-   }
-}
-
-bool
-intel_tiling_supports_non_msrt_mcs(const struct brw_context *brw,
-                                   unsigned tiling)
+static bool
+intel_tiling_supports_ccs(const struct brw_context *brw, unsigned tiling)
 {
    /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
     * Target(s)", beneath the "Fast Color Clear" bullet (p326):
@@ -201,17 +141,14 @@ intel_tiling_supports_non_msrt_mcs(const struct brw_context *brw,
  *     - MCS and Lossless compression is supported for TiledY/TileYs/TileYf
  *     non-MSRTs only.
  */
-bool
-intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
-                                           const struct intel_mipmap_tree *mt)
+static bool
+intel_miptree_supports_ccs(struct brw_context *brw,
+                           const struct intel_mipmap_tree *mt)
 {
    /* MCS support does not exist prior to Gen7 */
    if (brw->gen < 7)
       return false;
 
-   if (mt->disable_aux_buffers)
-      return false;
-
    /* This function applies only to non-multisampled render targets. */
    if (mt->num_samples > 1)
       return false;
@@ -261,36 +198,41 @@ intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
    if (brw->gen < 8 && (mip_mapped || arrayed))
       return false;
 
-   /* Not implemented yet. */
-   if (mip_mapped) {
-      perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n",
-                 mt->logical_width0, mt->logical_height0, mt->last_level);
-      return false;
-   }
-
-   /* Not implemented yet. */
-   if (arrayed) {
-      perf_debug("Layered fast clear - giving up. (%dx%d%d)\n",
-                 mt->logical_width0, mt->logical_height0,
-                 mt->physical_depth0);
-      return false;
-   }
-
    /* There's no point in using an MCS buffer if the surface isn't in a
     * renderable format.
     */
-   if (!brw->format_supported_as_render_target[mt->format])
+   if (!brw->mesa_format_supports_render[mt->format])
       return false;
 
    if (brw->gen >= 9) {
       mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format);
-      const uint32_t brw_format = brw_format_for_mesa_format(linear_format);
-      return isl_format_supports_lossless_compression(&brw->screen->devinfo,
-                                                      brw_format);
+      const enum isl_format isl_format =
+         brw_isl_format_for_mesa_format(linear_format);
+      return isl_format_supports_ccs_e(&brw->screen->devinfo, isl_format);
    } else
       return true;
 }
 
+static bool
+intel_miptree_supports_hiz(struct brw_context *brw,
+                           struct intel_mipmap_tree *mt)
+{
+   if (!brw->has_hiz)
+      return false;
+
+   switch (mt->format) {
+   case MESA_FORMAT_Z_FLOAT32:
+   case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
+   case MESA_FORMAT_Z24_UNORM_X8_UINT:
+   case MESA_FORMAT_Z24_UNORM_S8_UINT:
+   case MESA_FORMAT_Z_UNORM16:
+      return true;
+   default:
+      return false;
+   }
+}
+
+
 /* On Gen9 support for color buffer compression was extended to single
  * sampled surfaces. This is a helper considering both auxiliary buffer
  * type and number of samples telling if the given miptree represents
@@ -318,9 +260,9 @@ intel_miptree_is_lossless_compressed(const struct brw_context *brw,
    return mt->num_samples <= 1;
 }
 
-bool
-intel_miptree_supports_lossless_compressed(struct brw_context *brw,
-                                           const struct intel_mipmap_tree *mt)
+static bool
+intel_miptree_supports_ccs_e(struct brw_context *brw,
+                             const struct intel_mipmap_tree *mt)
 {
    /* For now compression is only enabled for integer formats even though
     * there exist supported floating point formats also. This is a heuristic
@@ -332,8 +274,7 @@ intel_miptree_supports_lossless_compressed(struct brw_context *brw,
    if (_mesa_get_format_datatype(mt->format) == GL_FLOAT)
       return false;
 
-   /* Fast clear mechanism and lossless compression go hand in hand. */
-   if (!intel_miptree_supports_non_msrt_fast_clear(brw, mt))
+   if (!intel_miptree_supports_ccs(brw, mt))
       return false;
 
    /* Fast clear can be also used to clear srgb surfaces by using equivalent
@@ -360,6 +301,27 @@ intel_depth_format_for_depthstencil_format(mesa_format format) {
    }
 }
 
+static bool
+create_mapping_table(GLenum target, unsigned first_level, unsigned last_level,
+                     unsigned depth0, struct intel_mipmap_level *table)
+{
+   for (unsigned level = first_level; level <= last_level; level++) {
+      const unsigned d =
+         target == GL_TEXTURE_3D ? minify(depth0, level) : depth0;
+
+      table[level].slice = calloc(d, sizeof(*table[0].slice));
+      if (!table[level].slice)
+         goto unwind;
+   }
+
+   return true;
+
+unwind:
+   for (unsigned level = first_level; level <= last_level; level++)
+      free(table[level].slice);
+
+   return false;
+}
 
 /**
  * @param for_bo Indicates that the caller is
@@ -397,22 +359,23 @@ intel_miptree_create_layout(struct brw_context *brw,
    mt->logical_width0 = width0;
    mt->logical_height0 = height0;
    mt->logical_depth0 = depth0;
-   mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
-   mt->disable_aux_buffers = (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) != 0;
-   mt->no_ccs = true;
    mt->is_scanout = (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT) != 0;
-   exec_list_make_empty(&mt->hiz_map);
+   mt->aux_usage = ISL_AUX_USAGE_NONE;
+   mt->supports_fast_clear = false;
+   mt->aux_state = NULL;
    mt->cpp = _mesa_get_format_bytes(format);
    mt->num_samples = num_samples;
    mt->compressed = _mesa_is_format_compressed(format);
    mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
    mt->refcount = 1;
 
+   if (brw->gen == 6 && format == MESA_FORMAT_S_UINT8)
+      layout_flags |= MIPTREE_LAYOUT_GEN6_HIZ_STENCIL;
+
    int depth_multiply = 1;
    if (num_samples > 1) {
       /* Adjust width/height/depth for MSAA */
-      mt->msaa_layout = compute_msaa_layout(brw, format,
-                                            mt->disable_aux_buffers);
+      mt->msaa_layout = compute_msaa_layout(brw, format, layout_flags);
       if (mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
          /* From the Ivybridge PRM, Volume 1, Part 1, page 108:
           * "If the surface is multisampled and it is a depth or stencil
@@ -500,6 +463,12 @@ intel_miptree_create_layout(struct brw_context *brw,
       }
    }
 
+   if (!create_mapping_table(target, first_level, last_level, depth0,
+                             mt->level)) {
+      free(mt);
+      return NULL;
+   }
+
    /* Set array_layout to ALL_SLICES_AT_EACH_LOD when array_spacing_lod0 can
     * be used. array_spacing_lod0 is only used for non-IMS MSAA surfaces on
     * Gen 7 and 8. On Gen 8 and 9 this layout is not available but it is still
@@ -535,12 +504,10 @@ intel_miptree_create_layout(struct brw_context *brw,
    if (!(layout_flags & MIPTREE_LAYOUT_FOR_BO) &&
        _mesa_get_format_base_format(format) == GL_DEPTH_STENCIL &&
        (brw->must_use_separate_stencil ||
-       (brw->has_separate_stencil &&
-         intel_miptree_wants_hiz_buffer(brw, mt)))) {
+       (brw->has_separate_stencil && intel_miptree_supports_hiz(brw, mt)))) {
       uint32_t stencil_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
       if (brw->gen == 6) {
-         stencil_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD |
-                          MIPTREE_LAYOUT_TILING_ANY;
+         stencil_flags |= MIPTREE_LAYOUT_TILING_ANY;
       }
 
       mt->stencil_mt = intel_miptree_create(brw,
@@ -572,8 +539,8 @@ intel_miptree_create_layout(struct brw_context *brw,
       }
    }
 
-   if (layout_flags & MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD)
-      mt->array_layout = ALL_SLICES_AT_EACH_LOD;
+   if (layout_flags & MIPTREE_LAYOUT_GEN6_HIZ_STENCIL)
+      mt->array_layout = GEN6_HIZ_STENCIL;
 
    /*
     * Obey HALIGN_16 constraints for Gen8 and Gen9 buffers which are
@@ -586,7 +553,7 @@ intel_miptree_create_layout(struct brw_context *brw,
     *  7   |      ?         |        ?
     *  6   |      ?         |        ?
     */
-   if (intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
+   if (intel_miptree_supports_ccs(brw, mt)) {
       if (brw->gen >= 9 || (brw->gen == 8 && num_samples <= 1))
          layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
    } else if (brw->gen >= 9 && num_samples > 1) {
@@ -601,15 +568,48 @@ intel_miptree_create_layout(struct brw_context *brw,
              (layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
    }
 
-   brw_miptree_layout(brw, mt, layout_flags);
-
-   if (mt->disable_aux_buffers)
-      assert(mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS);
+   if (!brw_miptree_layout(brw, mt, layout_flags)) {
+      intel_miptree_release(&mt);
+      return NULL;
+   }
 
    return mt;
 }
 
 
+/**
+ * Choose the aux usage for this miptree.  This function must be called fairly
+ * late in the miptree create process after we have a tiling.
+ */
+static void
+intel_miptree_choose_aux_usage(struct brw_context *brw,
+                               struct intel_mipmap_tree *mt)
+{
+   assert(mt->aux_usage == ISL_AUX_USAGE_NONE);
+
+   if (mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
+      mt->aux_usage = ISL_AUX_USAGE_MCS;
+   } else if (intel_tiling_supports_ccs(brw, mt->tiling) &&
+              intel_miptree_supports_ccs(brw, mt)) {
+      if (!unlikely(INTEL_DEBUG & DEBUG_NO_RBC) &&
+          brw->gen >= 9 && !mt->is_scanout &&
+          intel_miptree_supports_ccs_e(brw, mt)) {
+         mt->aux_usage = ISL_AUX_USAGE_CCS_E;
+      } else {
+         mt->aux_usage = ISL_AUX_USAGE_CCS_D;
+      }
+   } else if (intel_miptree_supports_hiz(brw, mt)) {
+      mt->aux_usage = ISL_AUX_USAGE_HIZ;
+   }
+
+   /* We can do fast-clear on all auxiliary surface types that are
+    * allocated through the normal texture creation paths.
+    */
+   if (mt->aux_usage != ISL_AUX_USAGE_NONE)
+      mt->supports_fast_clear = true;
+}
+
+
 /**
  * Choose an appropriate uncompressed format for a requested
  * compressed format, if unsupported.
@@ -649,32 +649,137 @@ intel_lower_compressed_format(struct brw_context *brw, mesa_format format)
    }
 }
 
-/* This function computes Yf/Ys tiled bo size, alignment and pitch. */
-static unsigned long
-intel_get_yf_ys_bo_size(struct intel_mipmap_tree *mt, unsigned *alignment,
-                        unsigned long *pitch)
+/** \brief Assert that the level and layer are valid for the miptree. */
+void
+intel_miptree_check_level_layer(const struct intel_mipmap_tree *mt,
+                                uint32_t level,
+                                uint32_t layer)
+{
+   (void) mt;
+   (void) level;
+   (void) layer;
+
+   assert(level >= mt->first_level);
+   assert(level <= mt->last_level);
+
+   if (mt->surf.size > 0)
+      assert(layer < (mt->surf.dim == ISL_SURF_DIM_3D ?
+                         minify(mt->surf.phys_level0_sa.depth, level) :
+                         mt->surf.phys_level0_sa.array_len));
+   else
+      assert(layer < mt->level[level].depth);
+}
+
+static enum isl_aux_state **
+create_aux_state_map(struct intel_mipmap_tree *mt,
+                     enum isl_aux_state initial)
+{
+   const uint32_t levels = mt->last_level + 1;
+
+   uint32_t total_slices = 0;
+   for (uint32_t level = 0; level < levels; level++)
+      total_slices += mt->level[level].depth;
+
+   const size_t per_level_array_size = levels * sizeof(enum isl_aux_state *);
+
+   /* We're going to allocate a single chunk of data for both the per-level
+    * reference array and the arrays of aux_state.  This makes cleanup
+    * significantly easier.
+    */
+   const size_t total_size = per_level_array_size +
+                             total_slices * sizeof(enum isl_aux_state);
+   void *data = malloc(total_size);
+   if (data == NULL)
+      return NULL;
+
+   enum isl_aux_state **per_level_arr = data;
+   enum isl_aux_state *s = data + per_level_array_size;
+   for (uint32_t level = 0; level < levels; level++) {
+      per_level_arr[level] = s;
+      for (uint32_t a = 0; a < mt->level[level].depth; a++)
+         *(s++) = initial;
+   }
+   assert((void *)s == data + total_size);
+
+   return per_level_arr;
+}
+
+static void
+free_aux_state_map(enum isl_aux_state **state)
+{
+   free(state);
+}
+
+static struct intel_mipmap_tree *
+make_surface(struct brw_context *brw, GLenum target, mesa_format format,
+             unsigned first_level, unsigned last_level,
+             unsigned width0, unsigned height0, unsigned depth0,
+             unsigned num_samples, enum isl_tiling isl_tiling,
+             isl_surf_usage_flags_t isl_usage_flags, uint32_t alloc_flags,
+             struct brw_bo *bo)
 {
-   uint32_t tile_width, tile_height;
-   unsigned long stride, size, aligned_y;
+   struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
+   if (!mt)
+      return NULL;
+
+   if (!create_mapping_table(target, first_level, last_level, depth0,
+                             mt->level)) {
+      free(mt);
+      return NULL;
+   }
+
+   if (target == GL_TEXTURE_CUBE_MAP ||
+       target == GL_TEXTURE_CUBE_MAP_ARRAY)
+      isl_usage_flags |= ISL_SURF_USAGE_CUBE_BIT;
+
+   DBG("%s: %s %s %ux %u:%u:%u %d..%d <-- %p\n",
+        __func__,
+       _mesa_enum_to_string(target),
+       _mesa_get_format_name(format),
+       num_samples, width0, height0, depth0,
+       first_level, last_level, mt);
+
+   struct isl_surf_init_info init_info = {
+      .dim = get_isl_surf_dim(target),
+      .format = translate_tex_format(brw, format, false),
+      .width = width0,
+      .height = height0,
+      .depth = target == GL_TEXTURE_3D ? depth0 : 1,
+      .levels = last_level - first_level + 1,
+      .array_len = target == GL_TEXTURE_3D ? 1 : depth0,
+      .samples = MAX2(num_samples, 1),
+      .usage = isl_usage_flags, 
+      .tiling_flags = 1u << isl_tiling
+   };
 
-   assert(mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE);
-   intel_get_tile_dims(mt->tiling, mt->tr_mode, mt->cpp,
-                       &tile_width, &tile_height);
+   if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info))
+      goto fail;
 
-   aligned_y = ALIGN(mt->total_height, tile_height);
-   stride = mt->total_width * mt->cpp;
-   stride = ALIGN(stride, tile_width);
-   size = stride * aligned_y;
+   assert(mt->surf.size % mt->surf.row_pitch == 0);
 
-   if (mt->tr_mode == INTEL_MIPTREE_TRMODE_YF) {
-      assert(size % 4096 == 0);
-      *alignment = 4096;
+   if (!bo) {
+      mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "isl-miptree",
+                                  mt->surf.size,
+                                  isl_tiling_to_bufmgr_tiling(isl_tiling),
+                                  mt->surf.row_pitch, alloc_flags);
+      if (!mt->bo)
+         goto fail;
    } else {
-      assert(size % (64 * 1024) == 0);
-      *alignment = 64 * 1024;
+      mt->bo = bo;
    }
-   *pitch = stride;
-   return size;
+
+   mt->first_level = first_level;
+   mt->last_level = last_level;
+   mt->target = target;
+   mt->format = format;
+   mt->refcount = 1;
+   mt->aux_state = NULL;
+
+   return mt;
+
+fail:
+   intel_miptree_release(&mt);
+   return NULL;
 }
 
 static struct intel_mipmap_tree *
@@ -689,6 +794,13 @@ miptree_create(struct brw_context *brw,
                GLuint num_samples,
                uint32_t layout_flags)
 {
+   if (brw->gen == 6 && format == MESA_FORMAT_S_UINT8)
+      return make_surface(brw, target, format, first_level, last_level,
+                          width0, height0, depth0, num_samples, ISL_TILING_W,
+                          ISL_SURF_USAGE_STENCIL_BIT |
+                          ISL_SURF_USAGE_TEXTURE_BIT,
+                          BO_ALLOC_FOR_RENDER, NULL);
+
    struct intel_mipmap_tree *mt;
    mesa_format tex_format = format;
    mesa_format etc_format = MESA_FORMAT_NONE;
@@ -703,13 +815,8 @@ miptree_create(struct brw_context *brw,
                                     first_level, last_level, width0,
                                     height0, depth0, num_samples,
                                     layout_flags);
-   /*
-    * pitch == 0 || height == 0  indicates the null texture
-    */
-   if (!mt || !mt->total_width || !mt->total_height) {
-      intel_miptree_release(&mt);
+   if (!mt)
       return NULL;
-   }
 
    if (mt->tiling == (I915_TILING_Y | I915_TILING_X))
       mt->tiling = I915_TILING_Y;
@@ -717,33 +824,27 @@ miptree_create(struct brw_context *brw,
    if (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD)
       alloc_flags |= BO_ALLOC_FOR_RENDER;
 
-   unsigned long pitch;
    mt->etc_format = etc_format;
 
-   if (mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) {
-      unsigned alignment = 0;
-      unsigned long size;
-      size = intel_get_yf_ys_bo_size(mt, &alignment, &pitch);
-      assert(size);
-      mt->bo = drm_intel_bo_alloc_for_render(brw->bufmgr, "miptree",
-                                             size, alignment);
+   if (format == MESA_FORMAT_S_UINT8) {
+      /* Align to size of W tile, 64x64. */
+      mt->bo = brw_bo_alloc_tiled_2d(brw->bufmgr, "miptree",
+                                     ALIGN(mt->total_width, 64),
+                                     ALIGN(mt->total_height, 64),
+                                     mt->cpp, mt->tiling, &mt->pitch,
+                                     alloc_flags);
    } else {
-      if (format == MESA_FORMAT_S_UINT8) {
-         /* Align to size of W tile, 64x64. */
-         mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
-                                           ALIGN(mt->total_width, 64),
-                                           ALIGN(mt->total_height, 64),
-                                           mt->cpp, &mt->tiling, &pitch,
-                                           alloc_flags);
-      } else {
-         mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
-                                           mt->total_width, mt->total_height,
-                                           mt->cpp, &mt->tiling, &pitch,
-                                           alloc_flags);
-      }
+      mt->bo = brw_bo_alloc_tiled_2d(brw->bufmgr, "miptree",
+                                     mt->total_width, mt->total_height,
+                                     mt->cpp, mt->tiling, &mt->pitch,
+                                     alloc_flags);
    }
 
-   mt->pitch = pitch;
+   if (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT)
+      mt->bo->cache_coherent = false;
+
+   if (!(layout_flags & MIPTREE_LAYOUT_DISABLE_AUX))
+      intel_miptree_choose_aux_usage(brw, mt);
 
    return mt;
 }
@@ -772,7 +873,6 @@ intel_miptree_create(struct brw_context *brw,
     */
    if (brw->gen < 6 && mt->bo->size >= brw->max_gtt_map_object_size &&
        mt->tiling == I915_TILING_Y) {
-      unsigned long pitch = mt->pitch;
       const uint32_t alloc_flags =
          (layout_flags & MIPTREE_LAYOUT_ACCELERATED_UPLOAD) ?
          BO_ALLOC_FOR_RENDER : 0;
@@ -780,11 +880,10 @@ intel_miptree_create(struct brw_context *brw,
                  mt->total_width, mt->total_height);
 
       mt->tiling = I915_TILING_X;
-      drm_intel_bo_unreference(mt->bo);
-      mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
-                                  mt->total_width, mt->total_height, mt->cpp,
-                                  &mt->tiling, &pitch, alloc_flags);
-      mt->pitch = pitch;
+      brw_bo_unreference(mt->bo);
+      mt->bo = brw_bo_alloc_tiled_2d(brw->bufmgr, "miptree",
+                                     mt->total_width, mt->total_height, mt->cpp,
+                                     mt->tiling, &mt->pitch, alloc_flags);
    }
 
    mt->offset = 0;
@@ -803,29 +902,14 @@ intel_miptree_create(struct brw_context *brw,
       }
    }
 
-   /* If this miptree is capable of supporting fast color clears, set
-    * fast_clear_state appropriately to ensure that fast clears will occur.
-    * Allocation of the MCS miptree will be deferred until the first fast
-    * clear actually occurs or when compressed single sampled buffer is
-    * written by the GPU for the first time.
+   /* Since CCS_E can compress more than just clear color, we create the CCS
+    * for it up-front.  For CCS_D which only compresses clears, we create the
+    * CCS on-demand when a clear occurs that wants one.
     */
-   if (intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) &&
-       intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
-      mt->no_ccs = false;
-      assert(brw->gen < 8 || mt->halign == 16 || num_samples <= 1);
-
-      /* On Gen9+ clients are not currently capable of consuming compressed
-       * single-sampled buffers. Disabling compression allows us to skip
-       * resolves.
-       */
-      const bool lossless_compression_disabled = INTEL_DEBUG & DEBUG_NO_RBC;
-      const bool is_lossless_compressed =
-         unlikely(!lossless_compression_disabled) &&
-         brw->gen >= 9 && !mt->is_scanout &&
-         intel_miptree_supports_lossless_compressed(brw, mt);
-
-      if (is_lossless_compressed) {
-         intel_miptree_alloc_non_msrt_mcs(brw, mt, is_lossless_compressed);
+   if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
+      if (!intel_miptree_alloc_ccs(brw, mt)) {
+         intel_miptree_release(&mt);
+         return NULL;
       }
    }
 
@@ -834,7 +918,7 @@ intel_miptree_create(struct brw_context *brw,
 
 struct intel_mipmap_tree *
 intel_miptree_create_for_bo(struct brw_context *brw,
-                            drm_intel_bo *bo,
+                            struct brw_bo *bo,
                             mesa_format format,
                             uint32_t offset,
                             uint32_t width,
@@ -845,9 +929,24 @@ intel_miptree_create_for_bo(struct brw_context *brw,
 {
    struct intel_mipmap_tree *mt;
    uint32_t tiling, swizzle;
-   GLenum target;
+   const GLenum target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
+
+   if (brw->gen == 6 && format == MESA_FORMAT_S_UINT8) {
+      mt = make_surface(brw, target, MESA_FORMAT_S_UINT8,
+                        0, 0, width, height, depth, 1, ISL_TILING_W,
+                        ISL_SURF_USAGE_STENCIL_BIT |
+                        ISL_SURF_USAGE_TEXTURE_BIT,
+                        BO_ALLOC_FOR_RENDER, bo);
+      if (!mt)
+         return NULL;
+
+      assert(bo->size >= mt->surf.size);
+
+      brw_bo_reference(bo);
+      return mt;
+   }
 
-   drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
+   brw_bo_get_tiling(bo, &tiling, &swizzle);
 
    /* Nothing will be able to use this miptree with the BO if the offset isn't
     * aligned.
@@ -860,8 +959,6 @@ intel_miptree_create_for_bo(struct brw_context *brw,
     */
    assert(pitch >= 0);
 
-   target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
-
    /* The BO already has a tiling format and we shouldn't confuse the lower
     * layers by making it try to find a tiling format again.
     */
@@ -876,12 +973,132 @@ intel_miptree_create_for_bo(struct brw_context *brw,
    if (!mt)
       return NULL;
 
-   drm_intel_bo_reference(bo);
+   brw_bo_reference(bo);
    mt->bo = bo;
    mt->pitch = pitch;
    mt->offset = offset;
    mt->tiling = tiling;
 
+   if (!(layout_flags & MIPTREE_LAYOUT_DISABLE_AUX)) {
+      intel_miptree_choose_aux_usage(brw, mt);
+
+      /* Since CCS_E can compress more than just clear color, we create the
+       * CCS for it up-front.  For CCS_D which only compresses clears, we
+       * create the CCS on-demand when a clear occurs that wants one.
+       */
+      if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
+         if (!intel_miptree_alloc_ccs(brw, mt)) {
+            intel_miptree_release(&mt);
+            return NULL;
+         }
+      }
+   }
+
+   return mt;
+}
+
+static struct intel_mipmap_tree *
+miptree_create_for_planar_image(struct brw_context *brw,
+                                __DRIimage *image, GLenum target)
+{
+   struct intel_image_format *f = image->planar_format;
+   struct intel_mipmap_tree *planar_mt;
+
+   for (int i = 0; i < f->nplanes; i++) {
+      const int index = f->planes[i].buffer_index;
+      const uint32_t dri_format = f->planes[i].dri_format;
+      const mesa_format format = driImageFormatToGLFormat(dri_format);
+      const uint32_t width = image->width >> f->planes[i].width_shift;
+      const uint32_t height = image->height >> f->planes[i].height_shift;
+
+      /* Disable creation of the texture's aux buffers because the driver
+       * exposes no EGL API to manage them. That is, there is no API for
+       * resolving the aux buffer's content to the main buffer nor for
+       * invalidating the aux buffer's content.
+       */
+      struct intel_mipmap_tree *mt =
+         intel_miptree_create_for_bo(brw, image->bo, format,
+                                     image->offsets[index],
+                                     width, height, 1,
+                                     image->strides[index],
+                                     MIPTREE_LAYOUT_DISABLE_AUX);
+      if (mt == NULL)
+         return NULL;
+
+      mt->target = target;
+      mt->total_width = width;
+      mt->total_height = height;
+
+      if (i == 0)
+         planar_mt = mt;
+      else
+         planar_mt->plane[i - 1] = mt;
+   }
+
+   return planar_mt;
+}
+
+struct intel_mipmap_tree *
+intel_miptree_create_for_dri_image(struct brw_context *brw,
+                                   __DRIimage *image, GLenum target)
+{
+   if (image->planar_format && image->planar_format->nplanes > 0)
+      return miptree_create_for_planar_image(brw, image, target);
+
+   mesa_format format = image->format;
+
+   if (!brw->ctx.TextureFormatSupported[format]) {
+      /* The texture storage paths in core Mesa detect if the driver does not
+       * support the user-requested format, and then searches for a
+       * fallback format. The DRIimage code bypasses core Mesa, though. So we
+       * do the fallbacks here for important formats.
+       *
+       * We must support DRM_FOURCC_XBGR8888 textures because the Android
+       * framework produces HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces, which
+       * the Chrome OS compositor consumes as dma_buf EGLImages.
+       */
+      format = _mesa_format_fallback_rgbx_to_rgba(format);
+   }
+
+   if (!brw->ctx.TextureFormatSupported[format])
+      return NULL;
+
+   /* Disable creation of the texture's aux buffers because the driver exposes
+    * no EGL API to manage them. That is, there is no API for resolving the aux
+    * buffer's content to the main buffer nor for invalidating the aux buffer's
+    * content.
+    */
+   struct intel_mipmap_tree *mt =
+      intel_miptree_create_for_bo(brw, image->bo, format,
+                                  image->offset, image->width, image->height, 1,
+                                  image->pitch,
+                                  MIPTREE_LAYOUT_DISABLE_AUX);
+   if (mt == NULL)
+      return NULL;
+
+   mt->target = target;
+   mt->level[0].level_x = image->tile_x;
+   mt->level[0].level_y = image->tile_y;
+   mt->level[0].slice[0].x_offset = image->tile_x;
+   mt->level[0].slice[0].y_offset = image->tile_y;
+   mt->total_width += image->tile_x;
+   mt->total_height += image->tile_y;
+
+   /* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION
+    * for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has
+    * trouble resolving back to destination image due to alignment issues.
+    */
+   if (!brw->has_surface_tile_offset) {
+      uint32_t draw_x, draw_y;
+      intel_miptree_get_tile_offsets(mt, 0, 0, &draw_x, &draw_y);
+
+      if (draw_x != 0 || draw_y != 0) {
+         _mesa_error(&brw->ctx, GL_INVALID_OPERATION, __func__);
+         intel_miptree_release(&mt);
+         return NULL;
+      }
+   }
+
    return mt;
 }
 
@@ -898,7 +1115,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
 void
 intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
                                          struct intel_renderbuffer *irb,
-                                         drm_intel_bo *bo,
+                                         struct brw_bo *bo,
                                          uint32_t width, uint32_t height,
                                          uint32_t pitch)
 {
@@ -926,16 +1143,6 @@ intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
    if (!singlesample_mt)
       goto fail;
 
-   /* If this miptree is capable of supporting fast color clears, set
-    * mcs_state appropriately to ensure that fast clears will occur.
-    * Allocation of the MCS miptree will be deferred until the first fast
-    * clear actually occurs.
-    */
-   if (intel_tiling_supports_non_msrt_mcs(intel, singlesample_mt->tiling) &&
-       intel_miptree_supports_non_msrt_fast_clear(intel, singlesample_mt)) {
-      singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
-   }
-
    if (num_samples == 0) {
       intel_miptree_release(&irb->mt);
       irb->mt = singlesample_mt;
@@ -990,7 +1197,7 @@ intel_miptree_create_for_renderbuffer(struct brw_context *brw,
    if (!mt)
       goto fail;
 
-   if (intel_miptree_wants_hiz_buffer(brw, mt)) {
+   if (mt->aux_usage == ISL_AUX_USAGE_HIZ) {
       ok = intel_miptree_alloc_hiz(brw, mt);
       if (!ok)
          goto fail;
@@ -1020,6 +1227,16 @@ intel_miptree_reference(struct intel_mipmap_tree **dst,
    *dst = src;
 }
 
+static void
+intel_miptree_aux_buffer_free(struct intel_miptree_aux_buffer *aux_buf)
+{
+   if (aux_buf == NULL)
+      return;
+
+   brw_bo_unreference(aux_buf->bo);
+
+   free(aux_buf);
+}
 
 void
 intel_miptree_release(struct intel_mipmap_tree **mt)
@@ -1033,21 +1250,12 @@ intel_miptree_release(struct intel_mipmap_tree **mt)
 
       DBG("%s deleting %p\n", __func__, *mt);
 
-      drm_intel_bo_unreference((*mt)->bo);
+      brw_bo_unreference((*mt)->bo);
       intel_miptree_release(&(*mt)->stencil_mt);
       intel_miptree_release(&(*mt)->r8stencil_mt);
-      if ((*mt)->hiz_buf) {
-         if ((*mt)->hiz_buf->mt)
-            intel_miptree_release(&(*mt)->hiz_buf->mt);
-         else
-            drm_intel_bo_unreference((*mt)->hiz_buf->aux_base.bo);
-         free((*mt)->hiz_buf);
-      }
-      if ((*mt)->mcs_buf) {
-         drm_intel_bo_unreference((*mt)->mcs_buf->bo);
-         free((*mt)->mcs_buf);
-      }
-      intel_resolve_map_clear(&(*mt)->hiz_map);
+      intel_miptree_aux_buffer_free((*mt)->hiz_buf);
+      intel_miptree_aux_buffer_free((*mt)->mcs_buf);
+      free_aux_state_map((*mt)->aux_state);
 
       intel_miptree_release(&(*mt)->plane[0]);
       intel_miptree_release(&(*mt)->plane[1]);
@@ -1131,6 +1339,21 @@ intel_miptree_match_image(struct intel_mipmap_tree *mt,
    if (mt->target == GL_TEXTURE_CUBE_MAP)
       depth = 6;
 
+   if (mt->surf.size > 0) {
+      if (level >= mt->surf.levels)
+         return false;
+
+      const unsigned level_depth =
+         mt->surf.dim == ISL_SURF_DIM_3D ?
+            minify(mt->surf.logical_level0_px.depth, level) :
+            mt->surf.logical_level0_px.array_len;
+
+      return width == minify(mt->surf.logical_level0_px.width, level) &&
+             height == minify(mt->surf.logical_level0_px.height, level) &&
+             depth == level_depth &&
+             MAX2(image->NumSamples, 1) == mt->surf.samples;
+   }
+
    int level_depth = mt->level[level].depth;
    if (mt->num_samples > 1) {
       switch (mt->msaa_layout) {
@@ -1173,9 +1396,8 @@ intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
    DBG("%s level %d, depth %d, offset %d,%d\n", __func__,
        level, d, x, y);
 
-   assert(mt->level[level].slice == NULL);
+   assert(mt->level[level].slice);
 
-   mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
    mt->level[level].slice[0].x_offset = mt->level[level].level_x;
    mt->level[level].slice[0].y_offset = mt->level[level].level_y;
 }
@@ -1205,6 +1427,25 @@ intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt,
                               GLuint level, GLuint slice,
                               GLuint *x, GLuint *y)
 {
+   if (mt->surf.size > 0) {
+      uint32_t x_offset_sa, y_offset_sa;
+
+      /* Given level is relative to level zero while the miptree may be
+       * represent just a subset of all levels starting from 'first_level'.
+       */
+      assert(level >= mt->first_level);
+      level -= mt->first_level;
+
+      const unsigned z = mt->surf.dim == ISL_SURF_DIM_3D ? slice : 0;
+      slice = mt->surf.dim == ISL_SURF_DIM_3D ? 0 : slice;
+      isl_surf_get_image_offset_sa(&mt->surf, level, slice, z,
+                                   &x_offset_sa, &y_offset_sa);
+
+      *x = x_offset_sa;
+      *y = y_offset_sa;
+      return;
+   }
+
    assert(slice < mt->level[level].depth);
 
    *x = mt->level[level].slice[slice].x_offset;
@@ -1218,53 +1459,24 @@ intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt,
  * and tile_h is set to 1.
  */
 void
-intel_get_tile_dims(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
+intel_get_tile_dims(uint32_t tiling, uint32_t cpp,
                     uint32_t *tile_w, uint32_t *tile_h)
 {
-   if (tr_mode == INTEL_MIPTREE_TRMODE_NONE) {
-      switch (tiling) {
-      case I915_TILING_X:
-         *tile_w = 512;
-         *tile_h = 8;
-         break;
-      case I915_TILING_Y:
-         *tile_w = 128;
-         *tile_h = 32;
-         break;
-      case I915_TILING_NONE:
-         *tile_w = cpp;
-         *tile_h = 1;
-         break;
-      default:
-         unreachable("not reached");
-      }
-   } else {
-      uint32_t aspect_ratio = 1;
-      assert(_mesa_is_pow_two(cpp));
-
-      switch (cpp) {
-      case 1:
-         *tile_h = 64;
-         break;
-      case 2:
-      case 4:
-         *tile_h = 32;
-         break;
-      case 8:
-      case 16:
-         *tile_h = 16;
-         break;
-      default:
-         unreachable("not reached");
-      }
-
-      if (cpp == 2 || cpp == 8)
-         aspect_ratio = 2;
-
-      if (tr_mode == INTEL_MIPTREE_TRMODE_YS)
-         *tile_h *= 4;
-
-      *tile_w = *tile_h * aspect_ratio * cpp;
+   switch (tiling) {
+   case I915_TILING_X:
+      *tile_w = 512;
+      *tile_h = 8;
+      break;
+   case I915_TILING_Y:
+      *tile_w = 128;
+      *tile_h = 32;
+      break;
+   case I915_TILING_NONE:
+      *tile_w = cpp;
+      *tile_h = 1;
+      break;
+   default:
+      unreachable("not reached");
    }
 }
 
@@ -1275,12 +1487,12 @@ intel_get_tile_dims(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
  * untiled, the masks are set to 0.
  */
 void
-intel_get_tile_masks(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
+intel_get_tile_masks(uint32_t tiling, uint32_t cpp,
                      uint32_t *mask_x, uint32_t *mask_y)
 {
    uint32_t tile_w_bytes, tile_h;
 
-   intel_get_tile_dims(tiling, tr_mode, cpp, &tile_w_bytes, &tile_h);
+   intel_get_tile_dims(tiling, cpp, &tile_w_bytes, &tile_h);
 
    *mask_x = tile_w_bytes / cpp - 1;
    *mask_y = tile_h - 1;
@@ -1334,7 +1546,7 @@ intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
    uint32_t x, y;
    uint32_t mask_x, mask_y;
 
-   intel_get_tile_masks(mt->tiling, mt->tr_mode, mt->cpp, &mask_x, &mask_y);
+   intel_get_tile_masks(mt->tiling, mt->cpp, &mask_x, &mask_y);
    intel_miptree_get_image_offset(mt, level, slice, &x, &y);
 
    *tile_x = x & mask_x;
@@ -1345,26 +1557,26 @@ intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
 
 static void
 intel_miptree_copy_slice_sw(struct brw_context *brw,
-                            struct intel_mipmap_tree *dst_mt,
                             struct intel_mipmap_tree *src_mt,
-                            int level,
-                            int slice,
-                            int width,
-                            int height)
+                            unsigned src_level, unsigned src_layer,
+                            struct intel_mipmap_tree *dst_mt,
+                            unsigned dst_level, unsigned dst_layer,
+                            unsigned width, unsigned height)
 {
    void *src, *dst;
    ptrdiff_t src_stride, dst_stride;
-   int cpp = dst_mt->cpp;
+   const unsigned cpp = dst_mt->surf.size > 0 ?
+      (isl_format_get_layout(dst_mt->surf.format)->bpb / 8) : dst_mt->cpp;
 
    intel_miptree_map(brw, src_mt,
-                     level, slice,
+                     src_level, src_layer,
                      0, 0,
                      width, height,
                      GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT,
                      &src, &src_stride);
 
    intel_miptree_map(brw, dst_mt,
-                     level, slice,
+                     dst_level, dst_layer,
                      0, 0,
                      width, height,
                      GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT |
@@ -1390,8 +1602,8 @@ intel_miptree_copy_slice_sw(struct brw_context *brw,
       }
    }
 
-   intel_miptree_unmap(brw, dst_mt, level, slice);
-   intel_miptree_unmap(brw, src_mt, level, slice);
+   intel_miptree_unmap(brw, dst_mt, dst_level, dst_layer);
+   intel_miptree_unmap(brw, src_mt, src_level, src_layer);
 
    /* Don't forget to copy the stencil data over, too.  We could have skipped
     * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map
@@ -1400,31 +1612,43 @@ intel_miptree_copy_slice_sw(struct brw_context *brw,
     */
    if (dst_mt->stencil_mt) {
       assert(src_mt->stencil_mt);
-      intel_miptree_copy_slice_sw(brw, dst_mt->stencil_mt, src_mt->stencil_mt,
-                                  level, slice, width, height);
+      intel_miptree_copy_slice_sw(brw,
+                                  src_mt->stencil_mt, src_level, src_layer,
+                                  dst_mt->stencil_mt, dst_level, dst_layer,
+                                  width, height);
    }
 }
 
-static void
+void
 intel_miptree_copy_slice(struct brw_context *brw,
-                        struct intel_mipmap_tree *dst_mt,
-                        struct intel_mipmap_tree *src_mt,
-                        int level,
-                        int face,
-                        int depth)
+                         struct intel_mipmap_tree *src_mt,
+                         unsigned src_level, unsigned src_layer,
+                         struct intel_mipmap_tree *dst_mt,
+                         unsigned dst_level, unsigned dst_layer)
 
 {
    mesa_format format = src_mt->format;
-   uint32_t width = minify(src_mt->physical_width0, level - src_mt->first_level);
-   uint32_t height = minify(src_mt->physical_height0, level - src_mt->first_level);
-   int slice;
-
-   if (face > 0)
-      slice = face;
-   else
-      slice = depth;
+   uint32_t width, height;
+
+   if (src_mt->surf.size > 0) {
+      width = minify(src_mt->surf.phys_level0_sa.width,
+                     src_level - src_mt->first_level);
+      height = minify(src_mt->surf.phys_level0_sa.height,
+                      src_level - src_mt->first_level);
+
+      if (src_mt->surf.dim == ISL_SURF_DIM_3D)
+         assert(src_layer < minify(src_mt->surf.phys_level0_sa.depth,
+                                   src_level - src_mt->first_level));
+      else
+         assert(src_layer < src_mt->surf.phys_level0_sa.array_len);
+   } else {
+      width = minify(src_mt->physical_width0,
+                     src_level - src_mt->first_level);
+      height = minify(src_mt->physical_height0,
+                      src_level - src_mt->first_level);
+      assert(src_layer < src_mt->level[src_level].depth);
+   }
 
-   assert(depth < src_mt->level[level].depth);
    assert(src_mt->format == dst_mt->format);
 
    if (dst_mt->compressed) {
@@ -1440,15 +1664,17 @@ intel_miptree_copy_slice(struct brw_context *brw,
     */
    if (src_mt->stencil_mt) {
       intel_miptree_copy_slice_sw(brw,
-                                  dst_mt, src_mt,
-                                  level, slice,
+                                  src_mt, src_level, src_layer,
+                                  dst_mt, dst_level, dst_layer,
                                   width, height);
       return;
    }
 
    uint32_t dst_x, dst_y, src_x, src_y;
-   intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
-   intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
+   intel_miptree_get_image_offset(dst_mt, dst_level, dst_layer,
+                                  &dst_x, &dst_y);
+   intel_miptree_get_image_offset(src_mt, src_level, src_layer,
+                                  &src_x, &src_y);
 
    DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
        _mesa_get_format_name(src_mt->format),
@@ -1458,13 +1684,15 @@ intel_miptree_copy_slice(struct brw_context *brw,
        width, height);
 
    if (!intel_miptree_blit(brw,
-                           src_mt, level, slice, 0, 0, false,
-                           dst_mt, level, slice, 0, 0, false,
+                           src_mt, src_level, src_layer, 0, 0, false,
+                           dst_mt, dst_level, dst_layer, 0, 0, false,
                            width, height, GL_COPY)) {
       perf_debug("miptree validate blit for %s failed\n",
                  _mesa_get_format_name(format));
 
-      intel_miptree_copy_slice_sw(brw, dst_mt, src_mt, level, slice,
+      intel_miptree_copy_slice_sw(brw,
+                                  src_mt, src_level, src_layer,
+                                  dst_mt, dst_level, dst_layer,
                                   width, height);
    }
 }
@@ -1487,17 +1715,28 @@ intel_miptree_copy_teximage(struct brw_context *brw,
    struct intel_texture_object *intel_obj =
       intel_texture_object(intelImage->base.Base.TexObject);
    int level = intelImage->base.Base.Level;
-   int face = intelImage->base.Base.Face;
-
-   GLuint depth;
-   if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY)
-      depth = intelImage->base.Base.Height;
-   else
-      depth = intelImage->base.Base.Depth;
+   const unsigned face = intelImage->base.Base.Face;
+   unsigned start_layer, end_layer;
+
+   if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY) {
+      assert(face == 0);
+      assert(intelImage->base.Base.Height);
+      start_layer = 0;
+      end_layer = intelImage->base.Base.Height - 1;
+   } else if (face > 0) {
+      start_layer = face;
+      end_layer = face;
+   } else {
+      assert(intelImage->base.Base.Depth);
+      start_layer = 0;
+      end_layer = intelImage->base.Base.Depth - 1;
+   }
 
    if (!invalidate) {
-      for (int slice = 0; slice < depth; slice++) {
-         intel_miptree_copy_slice(brw, dst_mt, src_mt, level, face, slice);
+      for (unsigned i = start_layer; i <= end_layer; i++) {
+         intel_miptree_copy_slice(brw,
+                                  src_mt, level, i,
+                                  dst_mt, level, i);
       }
    }
 
@@ -1522,64 +1761,45 @@ intel_miptree_init_mcs(struct brw_context *brw,
     *
     * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
     */
-   const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo, "miptree");
-   if (unlikely(ret)) {
+   void *map = brw_bo_map(brw, mt->mcs_buf->bo, MAP_WRITE);
+   if (unlikely(map == NULL)) {
       fprintf(stderr, "Failed to map mcs buffer into GTT\n");
-      drm_intel_bo_unreference(mt->mcs_buf->bo);
+      brw_bo_unreference(mt->mcs_buf->bo);
       free(mt->mcs_buf);
       return;
    }
-   void *data = mt->mcs_buf->bo->virtual;
+   void *data = map;
    memset(data, init_value, mt->mcs_buf->size);
-   drm_intel_bo_unmap(mt->mcs_buf->bo);
+   brw_bo_unmap(mt->mcs_buf->bo);
 }
 
 static struct intel_miptree_aux_buffer *
-intel_mcs_miptree_buf_create(struct brw_context *brw,
-                             struct intel_mipmap_tree *mt,
-                             mesa_format format,
-                             unsigned mcs_width,
-                             unsigned mcs_height,
-                             uint32_t layout_flags)
+intel_alloc_aux_buffer(struct brw_context *brw,
+                       const char *name,
+                       const struct isl_surf *aux_surf,
+                       uint32_t alloc_flags,
+                       struct intel_mipmap_tree *mt)
 {
    struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
-   struct intel_mipmap_tree *temp_mt;
-
    if (!buf)
-      return NULL;
+      return false;
 
-   /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
-    *
-    *     "The MCS surface must be stored as Tile Y."
+   buf->size = aux_surf->size;
+   buf->pitch = aux_surf->row_pitch;
+   buf->qpitch = isl_surf_get_array_pitch_sa_rows(aux_surf);
+
+   /* ISL has stricter set of alignment rules then the drm allocator.
+    * Therefore one can pass the ISL dimensions in terms of bytes instead of
+    * trying to recalculate based on different format block sizes.
     */
-   layout_flags |= MIPTREE_LAYOUT_TILING_Y;
-   temp_mt = miptree_create(brw,
-                            mt->target,
-                            format,
-                            mt->first_level,
-                            mt->last_level,
-                            mcs_width,
-                            mcs_height,
-                            mt->logical_depth0,
-                            0 /* num_samples */,
-                            layout_flags);
-   if (!temp_mt) {
+   buf->bo = brw_bo_alloc_tiled(brw->bufmgr, name, buf->size,
+                                I915_TILING_Y, buf->pitch, alloc_flags);
+   if (!buf->bo) {
       free(buf);
       return NULL;
    }
 
-   buf->bo = temp_mt->bo;
-   buf->offset = temp_mt->offset;
-   buf->size = temp_mt->total_height * temp_mt->pitch;
-   buf->pitch = temp_mt->pitch;
-   buf->qpitch = temp_mt->qpitch;
-
-   /* Just hang on to the BO which backs the AUX buffer; the rest of the miptree
-    * structure should go away. We use miptree create simply as a means to make
-    * sure all the constraints for the buffer are satisfied.
-    */
-   drm_intel_bo_reference(temp_mt->bo);
-   intel_miptree_release(&temp_mt);
+   buf->surf = *aux_surf;
 
    return buf;
 }
@@ -1591,118 +1811,93 @@ intel_miptree_alloc_mcs(struct brw_context *brw,
 {
    assert(brw->gen >= 7); /* MCS only used on Gen7+ */
    assert(mt->mcs_buf == NULL);
-   assert(!mt->disable_aux_buffers);
+   assert(mt->aux_usage == ISL_AUX_USAGE_MCS);
 
-   /* Choose the correct format for the MCS buffer.  All that really matters
-    * is that we allocate the right buffer size, since we'll always be
-    * accessing this miptree using MCS-specific hardware mechanisms, which
-    * infer the correct format based on num_samples.
-    */
-   mesa_format format;
-   switch (num_samples) {
-   case 2:
-   case 4:
-      /* 8 bits/pixel are required for MCS data when using 4x MSAA (2 bits for
-       * each sample).
-       */
-      format = MESA_FORMAT_R_UNORM8;
-      break;
-   case 8:
-      /* 32 bits/pixel are required for MCS data when using 8x MSAA (3 bits
-       * for each sample, plus 8 padding bits).
-       */
-      format = MESA_FORMAT_R_UINT32;
-      break;
-   case 16:
-      /* 64 bits/pixel are required for MCS data when using 16x MSAA (4 bits
-       * for each sample).
-       */
-      format = MESA_FORMAT_RG_UINT32;
-      break;
-   default:
-      unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
-   };
+   /* Multisampled miptrees are only supported for single level. */
+   assert(mt->first_level == 0);
+   enum isl_aux_state **aux_state =
+      create_aux_state_map(mt, ISL_AUX_STATE_CLEAR);
+   if (!aux_state)
+      return false;
 
-   mt->mcs_buf =
-      intel_mcs_miptree_buf_create(brw, mt,
-                                   format,
-                                   mt->logical_width0,
-                                   mt->logical_height0,
-                                   MIPTREE_LAYOUT_ACCELERATED_UPLOAD);
-   if (!mt->mcs_buf)
+   struct isl_surf temp_main_surf;
+   struct isl_surf temp_mcs_surf;
+
+   /* Create first an ISL presentation for the main color surface and let ISL
+    * calculate equivalent MCS surface against it.
+    */
+   intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
+   MAYBE_UNUSED bool ok =
+      isl_surf_get_mcs_surf(&brw->isl_dev, &temp_main_surf, &temp_mcs_surf);
+   assert(ok);
+
+   /* Buffer needs to be initialised requiring the buffer to be immediately
+    * mapped to cpu space for writing. Therefore do not use the gpu access
+    * flag which can cause an unnecessary delay if the backing pages happened
+    * to be just used by the GPU.
+    */
+   const uint32_t alloc_flags = 0;
+   mt->mcs_buf = intel_alloc_aux_buffer(brw, "mcs-miptree",
+                                        &temp_mcs_surf, alloc_flags, mt);
+   if (!mt->mcs_buf) {
+      free(aux_state);
       return false;
+   }
+
+   mt->aux_state = aux_state;
 
    intel_miptree_init_mcs(brw, mt, 0xFF);
-   mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
 
    return true;
 }
 
-
 bool
-intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
-                                 struct intel_mipmap_tree *mt,
-                                 bool is_lossless_compressed)
+intel_miptree_alloc_ccs(struct brw_context *brw,
+                        struct intel_mipmap_tree *mt)
 {
    assert(mt->mcs_buf == NULL);
-   assert(!mt->disable_aux_buffers);
-   assert(!mt->no_ccs);
-
-   /* The format of the MCS buffer is opaque to the driver; all that matters
-    * is that we get its size and pitch right.  We'll pretend that the format
-    * is R32.  Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
-    * R32 buffer is 32 pixels across, we'll need to scale the width down by
-    * the block width and then a further factor of 4.  Since an MCS tile
-    * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
-    * we'll need to scale the height down by the block height and then a
-    * further factor of 8.
-    */
-   const mesa_format format = MESA_FORMAT_R_UINT32;
-   unsigned block_width_px;
-   unsigned block_height;
-   intel_get_non_msrt_mcs_alignment(mt, &block_width_px, &block_height);
-   unsigned width_divisor = block_width_px * 4;
-   unsigned height_divisor = block_height * 8;
-
-   /* The Skylake MCS is twice as tall as the Broadwell MCS.
-    *
-    * In pre-Skylake, each bit in the MCS contained the state of 2 cachelines
-    * in the main surface. In Skylake, it's two bits.  The extra bit
-    * doubles the MCS height, not width, because in Skylake the MCS is always
-    * Y-tiled.
+   assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E ||
+          mt->aux_usage == ISL_AUX_USAGE_CCS_D);
+
+   struct isl_surf temp_main_surf;
+   struct isl_surf temp_ccs_surf;
+
+   /* Create first an ISL presentation for the main color surface and let ISL
+    * calculate equivalent CCS surface against it.
     */
-   if (brw->gen >= 9)
-      height_divisor /= 2;
+   intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
+   if (!isl_surf_get_ccs_surf(&brw->isl_dev, &temp_main_surf, &temp_ccs_surf))
+      return false;
+
+   assert(temp_ccs_surf.size &&
+          (temp_ccs_surf.size % temp_ccs_surf.row_pitch == 0));
 
-   unsigned mcs_width =
-      ALIGN(mt->logical_width0, width_divisor) / width_divisor;
-   unsigned mcs_height =
-      ALIGN(mt->logical_height0, height_divisor) / height_divisor;
-   assert(mt->logical_depth0 == 1);
+   enum isl_aux_state **aux_state =
+      create_aux_state_map(mt, ISL_AUX_STATE_PASS_THROUGH);
+   if (!aux_state)
+      return false;
 
-   uint32_t layout_flags =
-      (brw->gen >= 8) ? MIPTREE_LAYOUT_FORCE_HALIGN16 : 0;
    /* In case of compression mcs buffer needs to be initialised requiring the
     * buffer to be immediately mapped to cpu space for writing. Therefore do
     * not use the gpu access flag which can cause an unnecessary delay if the
     * backing pages happened to be just used by the GPU.
     */
-   if (!is_lossless_compressed)
-      layout_flags |= MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
-
-   mt->mcs_buf = intel_mcs_miptree_buf_create(brw, mt,
-                                              format,
-                                              mcs_width,
-                                              mcs_height,
-                                              layout_flags);
-   if (!mt->mcs_buf)
+   const uint32_t alloc_flags =
+      mt->aux_usage == ISL_AUX_USAGE_CCS_E ? 0 : BO_ALLOC_FOR_RENDER;
+   mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
+                                        &temp_ccs_surf, alloc_flags, mt);
+   if (!mt->mcs_buf) {
+      free(aux_state);
       return false;
+   }
+  
+   mt->aux_state = aux_state;
 
    /* From Gen9 onwards single-sampled (non-msrt) auxiliary buffers are
     * used for lossless compression which requires similar initialisation
     * as multi-sample compression.
     */
-   if (is_lossless_compressed) {
+   if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
       /* Hardware sets the auxiliary buffer to all zeroes when it does full
        * resolve. Initialize it accordingly in case the first renderer is
        * cpu (or other none compression aware party).
@@ -1713,7 +1908,6 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
        *    Software needs to initialize MCS with zeros."
        */
       intel_miptree_init_mcs(brw, mt, 0);
-      mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
       mt->msaa_layout = INTEL_MSAA_LAYOUT_CMS;
    }
 
@@ -1753,289 +1947,40 @@ intel_miptree_level_enable_hiz(struct brw_context *brw,
    return true;
 }
 
-
-/**
- * Helper for intel_miptree_alloc_hiz() that determines the required hiz
- * buffer dimensions and allocates a bo for the hiz buffer.
- */
-static struct intel_miptree_hiz_buffer *
-intel_gen7_hiz_buf_create(struct brw_context *brw,
-                          struct intel_mipmap_tree *mt)
-{
-   unsigned z_width = mt->logical_width0;
-   unsigned z_height = mt->logical_height0;
-   const unsigned z_depth = MAX2(mt->logical_depth0, 1);
-   unsigned hz_width, hz_height;
-   struct intel_miptree_hiz_buffer *buf = calloc(sizeof(*buf), 1);
-
-   if (!buf)
-      return NULL;
-
-   /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
-    * adjustments required for Z_Height and Z_Width based on multisampling.
-    */
-   switch (mt->num_samples) {
-   case 0:
-   case 1:
-      break;
-   case 2:
-   case 4:
-      z_width *= 2;
-      z_height *= 2;
-      break;
-   case 8:
-      z_width *= 4;
-      z_height *= 2;
-      break;
-   default:
-      unreachable("unsupported sample count");
-   }
-
-   const unsigned vertical_align = 8; /* 'j' in the docs */
-   const unsigned H0 = z_height;
-   const unsigned h0 = ALIGN(H0, vertical_align);
-   const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
-   const unsigned Z0 = z_depth;
-
-   /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
-   hz_width = ALIGN(z_width, 16);
-
-   if (mt->target == GL_TEXTURE_3D) {
-      unsigned H_i = H0;
-      unsigned Z_i = Z0;
-      hz_height = 0;
-      for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
-         unsigned h_i = ALIGN(H_i, vertical_align);
-         /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
-         hz_height += h_i * Z_i;
-         H_i = minify(H_i, 1);
-         Z_i = minify(Z_i, 1);
-      }
-      /* HZ_Height =
-       *    (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i)))
-       */
-      hz_height = DIV_ROUND_UP(hz_height, 2);
-   } else {
-      const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align);
-      /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
-      hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
-   }
-
-   unsigned long pitch;
-   uint32_t tiling = I915_TILING_Y;
-   buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
-                                               hz_width, hz_height, 1,
-                                               &tiling, &pitch,
-                                               BO_ALLOC_FOR_RENDER);
-   if (!buf->aux_base.bo) {
-      free(buf);
-      return NULL;
-   } else if (tiling != I915_TILING_Y) {
-      drm_intel_bo_unreference(buf->aux_base.bo);
-      free(buf);
-      return NULL;
-   }
-
-   buf->aux_base.size = hz_width * hz_height;
-   buf->aux_base.pitch = pitch;
-
-   return buf;
-}
-
-
-/**
- * Helper for intel_miptree_alloc_hiz() that determines the required hiz
- * buffer dimensions and allocates a bo for the hiz buffer.
- */
-static struct intel_miptree_hiz_buffer *
-intel_gen8_hiz_buf_create(struct brw_context *brw,
-                          struct intel_mipmap_tree *mt)
-{
-   unsigned z_width = mt->logical_width0;
-   unsigned z_height = mt->logical_height0;
-   const unsigned z_depth = MAX2(mt->logical_depth0, 1);
-   unsigned hz_width, hz_height;
-   struct intel_miptree_hiz_buffer *buf = calloc(sizeof(*buf), 1);
-
-   if (!buf)
-      return NULL;
-
-   /* Gen7 PRM Volume 2, Part 1, 11.5.3 "Hierarchical Depth Buffer" documents
-    * adjustments required for Z_Height and Z_Width based on multisampling.
-    */
-   if (brw->gen < 9) {
-      switch (mt->num_samples) {
-      case 0:
-      case 1:
-         break;
-      case 2:
-      case 4:
-         z_width *= 2;
-         z_height *= 2;
-         break;
-      case 8:
-         z_width *= 4;
-         z_height *= 2;
-         break;
-      default:
-         unreachable("unsupported sample count");
-      }
-   }
-
-   const unsigned vertical_align = 8; /* 'j' in the docs */
-   const unsigned H0 = z_height;
-   const unsigned h0 = ALIGN(H0, vertical_align);
-   const unsigned h1 = ALIGN(minify(H0, 1), vertical_align);
-   const unsigned Z0 = z_depth;
-
-   /* HZ_Width (bytes) = ceiling(Z_Width / 16) * 16 */
-   hz_width = ALIGN(z_width, 16);
-
-   unsigned H_i = H0;
-   unsigned Z_i = Z0;
-   unsigned sum_h_i = 0;
-   unsigned hz_height_3d_sum = 0;
-   for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
-      unsigned i = level - mt->first_level;
-      unsigned h_i = ALIGN(H_i, vertical_align);
-      /* sum(i=2 to m; h_i) */
-      if (i >= 2) {
-         sum_h_i += h_i;
-      }
-      /* sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
-      hz_height_3d_sum += h_i * Z_i;
-      H_i = minify(H_i, 1);
-      Z_i = minify(Z_i, 1);
-   }
-   /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
-   buf->aux_base.qpitch = h0 + MAX2(h1, sum_h_i);
-
-   if (mt->target == GL_TEXTURE_3D) {
-      /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
-      hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2);
-   } else {
-      /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
-      hz_height = DIV_ROUND_UP(buf->aux_base.qpitch, 2 * 8) * 8 * Z0;
-   }
-
-   unsigned long pitch;
-   uint32_t tiling = I915_TILING_Y;
-   buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
-                                               hz_width, hz_height, 1,
-                                               &tiling, &pitch,
-                                               BO_ALLOC_FOR_RENDER);
-   if (!buf->aux_base.bo) {
-      free(buf);
-      return NULL;
-   } else if (tiling != I915_TILING_Y) {
-      drm_intel_bo_unreference(buf->aux_base.bo);
-      free(buf);
-      return NULL;
-   }
-
-   buf->aux_base.size = hz_width * hz_height;
-   buf->aux_base.pitch = pitch;
-
-   return buf;
-}
-
-
-static struct intel_miptree_hiz_buffer *
-intel_hiz_miptree_buf_create(struct brw_context *brw,
-                             struct intel_mipmap_tree *mt)
-{
-   struct intel_miptree_hiz_buffer *buf = calloc(sizeof(*buf), 1);
-   uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
-
-   if (brw->gen == 6)
-      layout_flags |= MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD;
-
-   if (!buf)
-      return NULL;
-
-   layout_flags |= MIPTREE_LAYOUT_TILING_ANY;
-   buf->mt = intel_miptree_create(brw,
-                                  mt->target,
-                                  mt->format,
-                                  mt->first_level,
-                                  mt->last_level,
-                                  mt->logical_width0,
-                                  mt->logical_height0,
-                                  mt->logical_depth0,
-                                  mt->num_samples,
-                                  layout_flags);
-   if (!buf->mt) {
-      free(buf);
-      return NULL;
-   }
-
-   buf->aux_base.bo = buf->mt->bo;
-   buf->aux_base.size = buf->mt->total_height * buf->mt->pitch;
-   buf->aux_base.pitch = buf->mt->pitch;
-   buf->aux_base.qpitch = buf->mt->qpitch;
-
-   return buf;
-}
-
-bool
-intel_miptree_wants_hiz_buffer(struct brw_context *brw,
-                               struct intel_mipmap_tree *mt)
-{
-   if (!brw->has_hiz)
-      return false;
-
-   if (mt->hiz_buf != NULL)
-      return false;
-
-   if (mt->disable_aux_buffers)
-      return false;
-
-   switch (mt->format) {
-   case MESA_FORMAT_Z_FLOAT32:
-   case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
-   case MESA_FORMAT_Z24_UNORM_X8_UINT:
-   case MESA_FORMAT_Z24_UNORM_S8_UINT:
-   case MESA_FORMAT_Z_UNORM16:
-      return true;
-   default:
-      return false;
-   }
-}
-
 bool
 intel_miptree_alloc_hiz(struct brw_context *brw,
                        struct intel_mipmap_tree *mt)
 {
    assert(mt->hiz_buf == NULL);
-   assert(!mt->disable_aux_buffers);
+   assert(mt->aux_usage == ISL_AUX_USAGE_HIZ);
 
-   if (brw->gen == 7) {
-      mt->hiz_buf = intel_gen7_hiz_buf_create(brw, mt);
-   } else if (brw->gen >= 8) {
-      mt->hiz_buf = intel_gen8_hiz_buf_create(brw, mt);
-   } else {
-      mt->hiz_buf = intel_hiz_miptree_buf_create(brw, mt);
-   }
-
-   if (!mt->hiz_buf)
+   enum isl_aux_state **aux_state =
+      create_aux_state_map(mt, ISL_AUX_STATE_AUX_INVALID);
+   if (!aux_state)
       return false;
 
-   /* Mark that all slices need a HiZ resolve. */
-   for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
-      if (!intel_miptree_level_enable_hiz(brw, mt, level))
-         continue;
+   struct isl_surf temp_main_surf;
+   struct isl_surf temp_hiz_surf;
 
-      for (unsigned layer = 0; layer < mt->level[level].depth; ++layer) {
-         struct intel_resolve_map *m = malloc(sizeof(struct intel_resolve_map));
-         exec_node_init(&m->link);
-         m->level = level;
-         m->layer = layer;
-         m->need = BLORP_HIZ_OP_HIZ_RESOLVE;
+   intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
+   MAYBE_UNUSED bool ok =
+      isl_surf_get_hiz_surf(&brw->isl_dev, &temp_main_surf, &temp_hiz_surf);
+   assert(ok);
 
-         exec_list_push_tail(&mt->hiz_map, &m->link);
-      }
+   const uint32_t alloc_flags = BO_ALLOC_FOR_RENDER;
+   mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
+                                        &temp_hiz_surf, alloc_flags, mt);
+
+   if (!mt->hiz_buf) {
+      free(aux_state);
+      return false;
    }
 
+   for (unsigned level = mt->first_level; level <= mt->last_level; ++level)
+      intel_miptree_level_enable_hiz(brw, mt, level);
+
+   mt->aux_state = aux_state;
+
    return true;
 }
 
@@ -2087,189 +2032,658 @@ intel_miptree_sample_with_hiz(struct brw_context *brw,
  * Does the miptree slice have hiz enabled?
  */
 bool
-intel_miptree_level_has_hiz(struct intel_mipmap_tree *mt, uint32_t level)
+intel_miptree_level_has_hiz(const struct intel_mipmap_tree *mt, uint32_t level)
 {
    intel_miptree_check_level_layer(mt, level, 0);
    return mt->level[level].has_hiz;
 }
 
-void
-intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
-                                         uint32_t level,
-                                         uint32_t layer)
+bool
+intel_miptree_has_color_unresolved(const struct intel_mipmap_tree *mt,
+                                   unsigned start_level, unsigned num_levels,
+                                   unsigned start_layer, unsigned num_layers)
 {
-   if (!intel_miptree_level_has_hiz(mt, level))
-      return;
+   assert(_mesa_is_format_color_format(mt->format));
 
-   intel_resolve_map_set(&mt->hiz_map,
-                        level, layer, BLORP_HIZ_OP_HIZ_RESOLVE);
-}
+   if (!mt->mcs_buf)
+      return false;
 
+   /* Clamp the level range to fit the miptree */
+   assert(start_level + num_levels >= start_level);
+   const uint32_t last_level =
+      MIN2(mt->last_level, start_level + num_levels - 1);
+   start_level = MAX2(mt->first_level, start_level);
+   num_levels = last_level - start_level + 1;
+
+   for (uint32_t level = start_level; level <= last_level; level++) {
+      const uint32_t level_layers = MIN2(num_layers, mt->level[level].depth);
+      for (unsigned a = 0; a < level_layers; a++) {
+         enum isl_aux_state aux_state =
+            intel_miptree_get_aux_state(mt, level, start_layer + a);
+         assert(aux_state != ISL_AUX_STATE_AUX_INVALID);
+         if (aux_state != ISL_AUX_STATE_PASS_THROUGH)
+            return true;
+      }
+   }
 
-void
-intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
-                                            uint32_t level,
-                                            uint32_t layer)
+   return false;
+}
+
+static void
+intel_miptree_check_color_resolve(const struct brw_context *brw,
+                                  const struct intel_mipmap_tree *mt,
+                                  unsigned level, unsigned layer)
 {
-   if (!intel_miptree_level_has_hiz(mt, level))
+
+   if (!mt->mcs_buf)
       return;
 
-   intel_resolve_map_set(&mt->hiz_map,
-                        level, layer, BLORP_HIZ_OP_DEPTH_RESOLVE);
+   /* Fast color clear is supported for mipmapped surfaces only on Gen8+. */
+   assert(brw->gen >= 8 ||
+          (level == 0 && mt->first_level == 0 && mt->last_level == 0));
+
+   /* Compression of arrayed msaa surfaces is supported. */
+   if (mt->num_samples > 1)
+      return;
+
+   /* Fast color clear is supported for non-msaa arrays only on Gen8+. */
+   assert(brw->gen >= 8 || (layer == 0 && mt->logical_depth0 == 1));
+
+   (void)level;
+   (void)layer;
 }
 
-void
-intel_miptree_set_all_slices_need_depth_resolve(struct intel_mipmap_tree *mt,
-                                                uint32_t level)
+static enum blorp_fast_clear_op
+get_ccs_d_resolve_op(enum isl_aux_state aux_state,
+                     bool ccs_supported, bool fast_clear_supported)
 {
-   uint32_t layer;
-   uint32_t end_layer = mt->level[level].depth;
+   assert(ccs_supported == fast_clear_supported);
+
+   switch (aux_state) {
+   case ISL_AUX_STATE_CLEAR:
+   case ISL_AUX_STATE_COMPRESSED_CLEAR:
+      if (!ccs_supported)
+         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+      else
+         return BLORP_FAST_CLEAR_OP_NONE;
 
-   for (layer = 0; layer < end_layer; layer++) {
-      intel_miptree_slice_set_needs_depth_resolve(mt, level, layer);
+   case ISL_AUX_STATE_PASS_THROUGH:
+      return BLORP_FAST_CLEAR_OP_NONE;
+
+   case ISL_AUX_STATE_RESOLVED:
+   case ISL_AUX_STATE_AUX_INVALID:
+   case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+      break;
    }
+
+   unreachable("Invalid aux state for CCS_D");
 }
 
-static bool
-intel_miptree_slice_resolve(struct brw_context *brw,
-                           struct intel_mipmap_tree *mt,
-                           uint32_t level,
-                           uint32_t layer,
-                           enum blorp_hiz_op need)
+static enum blorp_fast_clear_op
+get_ccs_e_resolve_op(enum isl_aux_state aux_state,
+                     bool ccs_supported, bool fast_clear_supported)
 {
-   intel_miptree_check_level_layer(mt, level, layer);
+   switch (aux_state) {
+   case ISL_AUX_STATE_CLEAR:
+   case ISL_AUX_STATE_COMPRESSED_CLEAR:
+      if (!ccs_supported)
+         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+      else if (!fast_clear_supported)
+         return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
+      else
+         return BLORP_FAST_CLEAR_OP_NONE;
 
-   struct intel_resolve_map *item =
-        intel_resolve_map_get(&mt->hiz_map, level, layer);
+   case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+      if (!ccs_supported)
+         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+      else
+         return BLORP_FAST_CLEAR_OP_NONE;
 
-   if (!item || item->need != need)
-      return false;
+   case ISL_AUX_STATE_PASS_THROUGH:
+      return BLORP_FAST_CLEAR_OP_NONE;
 
-   intel_hiz_exec(brw, mt, level, layer, need);
-   intel_resolve_map_remove(item);
-   return true;
+   case ISL_AUX_STATE_RESOLVED:
+   case ISL_AUX_STATE_AUX_INVALID:
+      break;
+   }
+
+   unreachable("Invalid aux state for CCS_E");
 }
 
-bool
-intel_miptree_slice_resolve_hiz(struct brw_context *brw,
-                               struct intel_mipmap_tree *mt,
-                               uint32_t level,
-                               uint32_t layer)
+static void
+intel_miptree_prepare_ccs_access(struct brw_context *brw,
+                                 struct intel_mipmap_tree *mt,
+                                 uint32_t level, uint32_t layer,
+                                 bool aux_supported,
+                                 bool fast_clear_supported)
 {
-   return intel_miptree_slice_resolve(brw, mt, level, layer,
-                                     BLORP_HIZ_OP_HIZ_RESOLVE);
+   enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer);
+
+   enum blorp_fast_clear_op resolve_op;
+   if (intel_miptree_is_lossless_compressed(brw, mt)) {
+      resolve_op = get_ccs_e_resolve_op(aux_state, aux_supported,
+                                        fast_clear_supported);
+   } else {
+      resolve_op = get_ccs_d_resolve_op(aux_state, aux_supported,
+                                        fast_clear_supported);
+   }
+
+   if (resolve_op != BLORP_FAST_CLEAR_OP_NONE) {
+      intel_miptree_check_color_resolve(brw, mt, level, layer);
+      brw_blorp_resolve_color(brw, mt, level, layer, resolve_op);
+
+      switch (resolve_op) {
+      case BLORP_FAST_CLEAR_OP_RESOLVE_FULL:
+         /* The CCS full resolve operation destroys the CCS and sets it to the
+          * pass-through state.  (You can also think of this as being both a
+          * resolve and an ambiguate in one operation.)
+          */
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_PASS_THROUGH);
+         break;
+
+      case BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL:
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+         break;
+
+      default:
+         unreachable("Invalid resolve op");
+      }
+   }
 }
 
-bool
-intel_miptree_slice_resolve_depth(struct brw_context *brw,
-                                 struct intel_mipmap_tree *mt,
-                                 uint32_t level,
-                                 uint32_t layer)
+static void
+intel_miptree_finish_ccs_write(struct brw_context *brw,
+                               struct intel_mipmap_tree *mt,
+                               uint32_t level, uint32_t layer,
+                               bool written_with_ccs)
 {
-   return intel_miptree_slice_resolve(brw, mt, level, layer,
-                                     BLORP_HIZ_OP_DEPTH_RESOLVE);
+   enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer);
+
+   if (intel_miptree_is_lossless_compressed(brw, mt)) {
+      switch (aux_state) {
+      case ISL_AUX_STATE_CLEAR:
+         assert(written_with_ccs);
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_COMPRESSED_CLEAR);
+         break;
+
+      case ISL_AUX_STATE_COMPRESSED_CLEAR:
+      case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+         assert(written_with_ccs);
+         break; /* Nothing to do */
+
+      case ISL_AUX_STATE_PASS_THROUGH:
+         if (written_with_ccs) {
+            intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                        ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+         } else {
+            /* Nothing to do */
+         }
+         break;
+
+      case ISL_AUX_STATE_RESOLVED:
+      case ISL_AUX_STATE_AUX_INVALID:
+         unreachable("Invalid aux state for CCS_E");
+      }
+   } else {
+      /* CCS_D is a bit simpler */
+      switch (aux_state) {
+      case ISL_AUX_STATE_CLEAR:
+         assert(written_with_ccs);
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_COMPRESSED_CLEAR);
+         break;
+
+      case ISL_AUX_STATE_COMPRESSED_CLEAR:
+         assert(written_with_ccs);
+         break; /* Nothing to do */
+
+      case ISL_AUX_STATE_PASS_THROUGH:
+         /* Nothing to do */
+         break;
+
+      case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+      case ISL_AUX_STATE_RESOLVED:
+      case ISL_AUX_STATE_AUX_INVALID:
+         unreachable("Invalid aux state for CCS_D");
+      }
+   }
 }
 
-static bool
-intel_miptree_all_slices_resolve(struct brw_context *brw,
-                                struct intel_mipmap_tree *mt,
-                                enum blorp_hiz_op need)
-{
-   bool did_resolve = false;
+static void
+intel_miptree_finish_mcs_write(struct brw_context *brw,
+                               struct intel_mipmap_tree *mt,
+                               uint32_t level, uint32_t layer,
+                               bool written_with_aux)
+{
+   switch (intel_miptree_get_aux_state(mt, level, layer)) {
+   case ISL_AUX_STATE_CLEAR:
+      assert(written_with_aux);
+      intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                  ISL_AUX_STATE_COMPRESSED_CLEAR);
+      break;
+
+   case ISL_AUX_STATE_COMPRESSED_CLEAR:
+      assert(written_with_aux);
+      break; /* Nothing to do */
+
+   case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+   case ISL_AUX_STATE_RESOLVED:
+   case ISL_AUX_STATE_PASS_THROUGH:
+   case ISL_AUX_STATE_AUX_INVALID:
+      unreachable("Invalid aux state for MCS");
+   }
+}
+
+static void
+intel_miptree_prepare_hiz_access(struct brw_context *brw,
+                                 struct intel_mipmap_tree *mt,
+                                 uint32_t level, uint32_t layer,
+                                 bool hiz_supported, bool fast_clear_supported)
+{
+   enum blorp_hiz_op hiz_op = BLORP_HIZ_OP_NONE;
+   switch (intel_miptree_get_aux_state(mt, level, layer)) {
+   case ISL_AUX_STATE_CLEAR:
+   case ISL_AUX_STATE_COMPRESSED_CLEAR:
+      if (!hiz_supported || !fast_clear_supported)
+         hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
+      break;
+
+   case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+      if (!hiz_supported)
+         hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
+      break;
 
-   foreach_list_typed_safe(struct intel_resolve_map, map, link, &mt->hiz_map) {
-      if (map->need != need)
-        continue;
+   case ISL_AUX_STATE_PASS_THROUGH:
+   case ISL_AUX_STATE_RESOLVED:
+      break;
 
-      intel_hiz_exec(brw, mt, map->level, map->layer, need);
-      intel_resolve_map_remove(map);
-      did_resolve = true;
+   case ISL_AUX_STATE_AUX_INVALID:
+      if (hiz_supported)
+         hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
+      break;
    }
 
-   return did_resolve;
+   if (hiz_op != BLORP_HIZ_OP_NONE) {
+      intel_hiz_exec(brw, mt, level, layer, 1, hiz_op);
+
+      switch (hiz_op) {
+      case BLORP_HIZ_OP_DEPTH_RESOLVE:
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_RESOLVED);
+         break;
+
+      case BLORP_HIZ_OP_HIZ_RESOLVE:
+         /* The HiZ resolve operation is actually an ambiguate */
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_PASS_THROUGH);
+         break;
+
+      default:
+         unreachable("Invalid HiZ op");
+      }
+   }
 }
 
-bool
-intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
-                                    struct intel_mipmap_tree *mt)
+static void
+intel_miptree_finish_hiz_write(struct brw_context *brw,
+                               struct intel_mipmap_tree *mt,
+                               uint32_t level, uint32_t layer,
+                               bool written_with_hiz)
+{
+   switch (intel_miptree_get_aux_state(mt, level, layer)) {
+   case ISL_AUX_STATE_CLEAR:
+      assert(written_with_hiz);
+      intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                  ISL_AUX_STATE_COMPRESSED_CLEAR);
+      break;
+
+   case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+   case ISL_AUX_STATE_COMPRESSED_CLEAR:
+      assert(written_with_hiz);
+      break; /* Nothing to do */
+
+   case ISL_AUX_STATE_RESOLVED:
+      if (written_with_hiz) {
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+      } else {
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_AUX_INVALID);
+      }
+      break;
+
+   case ISL_AUX_STATE_PASS_THROUGH:
+      if (written_with_hiz) {
+         intel_miptree_set_aux_state(brw, mt, level, layer, 1,
+                                     ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+      }
+      break;
+
+   case ISL_AUX_STATE_AUX_INVALID:
+      assert(!written_with_hiz);
+      break;
+   }
+}
+
+static inline uint32_t
+miptree_level_range_length(const struct intel_mipmap_tree *mt,
+                           uint32_t start_level, uint32_t num_levels)
 {
-   return intel_miptree_all_slices_resolve(brw, mt,
-                                          BLORP_HIZ_OP_HIZ_RESOLVE);
+   assert(start_level >= mt->first_level);
+   assert(start_level <= mt->last_level);
+
+   if (num_levels == INTEL_REMAINING_LAYERS)
+      num_levels = mt->last_level - start_level + 1;
+   /* Check for overflow */
+   assert(start_level + num_levels >= start_level);
+   assert(start_level + num_levels <= mt->last_level + 1);
+
+   return num_levels;
 }
 
-bool
-intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
-                                      struct intel_mipmap_tree *mt)
+static inline uint32_t
+miptree_layer_range_length(const struct intel_mipmap_tree *mt, uint32_t level,
+                           uint32_t start_layer, uint32_t num_layers)
 {
-   return intel_miptree_all_slices_resolve(brw, mt,
-                                          BLORP_HIZ_OP_DEPTH_RESOLVE);
+   assert(level <= mt->last_level);
+   uint32_t total_num_layers;
+
+   if (mt->surf.size > 0)
+      total_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
+         minify(mt->surf.phys_level0_sa.depth, level) :
+         mt->surf.phys_level0_sa.array_len;
+   else 
+      total_num_layers = mt->level[level].depth;
+
+   assert(start_layer < total_num_layers);
+   if (num_layers == INTEL_REMAINING_LAYERS)
+      num_layers = total_num_layers - start_layer;
+   /* Check for overflow */
+   assert(start_layer + num_layers >= start_layer);
+   assert(start_layer + num_layers <= total_num_layers);
+
+   return num_layers;
 }
 
-static void
-intel_miptree_check_color_resolve(const struct intel_mipmap_tree *mt,
-                                  unsigned level, unsigned layer)
+void
+intel_miptree_prepare_access(struct brw_context *brw,
+                             struct intel_mipmap_tree *mt,
+                             uint32_t start_level, uint32_t num_levels,
+                             uint32_t start_layer, uint32_t num_layers,
+                             bool aux_supported, bool fast_clear_supported)
 {
-   if (mt->no_ccs || !mt->mcs_buf)
-      return;
+   num_levels = miptree_level_range_length(mt, start_level, num_levels);
 
-   /* Fast color clear is not supported for mipmapped surfaces. */
-   assert(level == 0 && mt->first_level == 0 && mt->last_level == 0);
+   if (_mesa_is_format_color_format(mt->format)) {
+      if (!mt->mcs_buf)
+         return;
 
-   /* Compression of arrayed msaa surfaces is supported. */
-   if (mt->num_samples > 1)
-      return;
+      if (mt->num_samples > 1) {
+         /* Nothing to do for MSAA */
+         assert(aux_supported && fast_clear_supported);
+      } else {
+         for (uint32_t l = 0; l < num_levels; l++) {
+            const uint32_t level = start_level + l;
+            const uint32_t level_layers =
+               miptree_layer_range_length(mt, level, start_layer, num_layers);
+            for (uint32_t a = 0; a < level_layers; a++) {
+               intel_miptree_prepare_ccs_access(brw, mt, level,
+                                                start_layer + a, aux_supported,
+                                                fast_clear_supported);
+            }
+         }
+      }
+   } else if (mt->format == MESA_FORMAT_S_UINT8) {
+      /* Nothing to do for stencil */
+   } else {
+      if (!mt->hiz_buf)
+         return;
+
+      for (uint32_t l = 0; l < num_levels; l++) {
+         const uint32_t level = start_level + l;
+         if (!intel_miptree_level_has_hiz(mt, level))
+            continue;
+
+         const uint32_t level_layers =
+            miptree_layer_range_length(mt, level, start_layer, num_layers);
+         for (uint32_t a = 0; a < level_layers; a++) {
+            intel_miptree_prepare_hiz_access(brw, mt, level, start_layer + a,
+                                             aux_supported,
+                                             fast_clear_supported);
+         }
+      }
+   }
+}
 
-   /* Fast color clear is not supported for non-msaa arrays. */
-   assert(layer == 0 && mt->logical_depth0 == 1);
+void
+intel_miptree_finish_write(struct brw_context *brw,
+                           struct intel_mipmap_tree *mt, uint32_t level,
+                           uint32_t start_layer, uint32_t num_layers,
+                           bool written_with_aux)
+{
+   num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
 
-   (void)level;
-   (void)layer;
+   if (_mesa_is_format_color_format(mt->format)) {
+      if (!mt->mcs_buf)
+         return;
+
+      if (mt->num_samples > 1) {
+         for (uint32_t a = 0; a < num_layers; a++) {
+            intel_miptree_finish_mcs_write(brw, mt, level, start_layer + a,
+                                           written_with_aux);
+         }
+      } else {
+         for (uint32_t a = 0; a < num_layers; a++) {
+            intel_miptree_finish_ccs_write(brw, mt, level, start_layer + a,
+                                           written_with_aux);
+         }
+      }
+   } else if (mt->format == MESA_FORMAT_S_UINT8) {
+      /* Nothing to do for stencil */
+   } else {
+      if (!intel_miptree_level_has_hiz(mt, level))
+         return;
+
+      for (uint32_t a = 0; a < num_layers; a++) {
+         intel_miptree_finish_hiz_write(brw, mt, level, start_layer + a,
+                                        written_with_aux);
+      }
+   }
 }
 
-bool
-intel_miptree_resolve_color(struct brw_context *brw,
-                            struct intel_mipmap_tree *mt, unsigned level,
-                            unsigned start_layer, unsigned num_layers,
-                            int flags)
+enum isl_aux_state
+intel_miptree_get_aux_state(const struct intel_mipmap_tree *mt,
+                            uint32_t level, uint32_t layer)
 {
-   intel_miptree_check_color_resolve(mt, level, start_layer);
+   intel_miptree_check_level_layer(mt, level, layer);
 
-   /* From gen9 onwards there is new compression scheme for single sampled
-    * surfaces called "lossless compressed". These don't need to be always
-    * resolved.
-    */
-   if ((flags & INTEL_MIPTREE_IGNORE_CCS_E) &&
-       intel_miptree_is_lossless_compressed(brw, mt))
+   if (_mesa_is_format_color_format(mt->format)) {
+      assert(mt->mcs_buf != NULL);
+      assert(mt->num_samples <= 1 || mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
+   } else if (mt->format == MESA_FORMAT_S_UINT8) {
+      unreachable("Cannot get aux state for stencil");
+   } else {
+      assert(intel_miptree_level_has_hiz(mt, level));
+   }
+
+   return mt->aux_state[level][layer];
+}
+
+void
+intel_miptree_set_aux_state(struct brw_context *brw,
+                            struct intel_mipmap_tree *mt, uint32_t level,
+                            uint32_t start_layer, uint32_t num_layers,
+                            enum isl_aux_state aux_state)
+{
+   num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
+
+   if (_mesa_is_format_color_format(mt->format)) {
+      assert(mt->mcs_buf != NULL);
+      assert(mt->num_samples <= 1 || mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
+   } else if (mt->format == MESA_FORMAT_S_UINT8) {
+      unreachable("Cannot get aux state for stencil");
+   } else {
+      assert(intel_miptree_level_has_hiz(mt, level));
+   }
+
+   for (unsigned a = 0; a < num_layers; a++)
+      mt->aux_state[level][start_layer + a] = aux_state;
+}
+
+/* On Gen9 color buffers may be compressed by the hardware (lossless
+ * compression). There are, however, format restrictions and care needs to be
+ * taken that the sampler engine is capable for re-interpreting a buffer with
+ * format different the buffer was originally written with.
+ *
+ * For example, SRGB formats are not compressible and the sampler engine isn't
+ * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
+ * color buffer needs to be resolved so that the sampling surface can be
+ * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
+ * set).
+ */
+static bool
+can_texture_with_ccs(struct brw_context *brw,
+                     struct intel_mipmap_tree *mt,
+                     mesa_format view_format)
+{
+   if (!intel_miptree_is_lossless_compressed(brw, mt))
       return false;
 
-   switch (mt->fast_clear_state) {
-   case INTEL_FAST_CLEAR_STATE_RESOLVED:
-      /* No resolve needed */
+   enum isl_format isl_mt_format = brw_isl_format_for_mesa_format(mt->format);
+   enum isl_format isl_view_format = brw_isl_format_for_mesa_format(view_format);
+
+   if (!isl_formats_are_ccs_e_compatible(&brw->screen->devinfo,
+                                         isl_mt_format, isl_view_format)) {
+      perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
+                 _mesa_get_format_name(view_format),
+                 _mesa_get_format_name(mt->format));
       return false;
-   case INTEL_FAST_CLEAR_STATE_UNRESOLVED:
-   case INTEL_FAST_CLEAR_STATE_CLEAR:
-      /* For now arrayed fast clear is not supported. */
-      assert(num_layers == 1);
-
-      /* Fast color clear resolves only make sense for non-MSAA buffers. */
-      if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE ||
-          intel_miptree_is_lossless_compressed(brw, mt)) {
-         brw_blorp_resolve_color(brw, mt, level, start_layer);
-         return true;
+   }
+
+   return true;
+}
+
+static void
+intel_miptree_prepare_texture_slices(struct brw_context *brw,
+                                     struct intel_mipmap_tree *mt,
+                                     mesa_format view_format,
+                                     uint32_t start_level, uint32_t num_levels,
+                                     uint32_t start_layer, uint32_t num_layers,
+                                     bool *aux_supported_out)
+{
+   bool aux_supported, clear_supported;
+   if (_mesa_is_format_color_format(mt->format)) {
+      if (mt->num_samples > 1) {
+         aux_supported = clear_supported = true;
       } else {
-         return false;
+         aux_supported = can_texture_with_ccs(brw, mt, view_format);
+
+         /* Clear color is specified as ints or floats and the conversion is
+          * done by the sampler.  If we have a texture view, we would have to
+          * perform the clear color conversion manually.  Just disable clear
+          * color.
+          */
+         clear_supported = aux_supported && (mt->format == view_format);
       }
-   default:
-      unreachable("Invalid fast clear state");
+   } else if (mt->format == MESA_FORMAT_S_UINT8) {
+      aux_supported = clear_supported = false;
+   } else {
+      aux_supported = clear_supported = intel_miptree_sample_with_hiz(brw, mt);
    }
+
+   intel_miptree_prepare_access(brw, mt, start_level, num_levels,
+                                start_layer, num_layers,
+                                aux_supported, clear_supported);
+   if (aux_supported_out)
+      *aux_supported_out = aux_supported;
+}
+
+void
+intel_miptree_prepare_texture(struct brw_context *brw,
+                              struct intel_mipmap_tree *mt,
+                              mesa_format view_format,
+                              bool *aux_supported_out)
+{
+   intel_miptree_prepare_texture_slices(brw, mt, view_format,
+                                        0, INTEL_REMAINING_LEVELS,
+                                        0, INTEL_REMAINING_LAYERS,
+                                        aux_supported_out);
+}
+
+void
+intel_miptree_prepare_image(struct brw_context *brw,
+                            struct intel_mipmap_tree *mt)
+{
+   /* The data port doesn't understand any compression */
+   intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS,
+                                0, INTEL_REMAINING_LAYERS, false, false);
+}
+
+void
+intel_miptree_prepare_fb_fetch(struct brw_context *brw,
+                               struct intel_mipmap_tree *mt, uint32_t level,
+                               uint32_t start_layer, uint32_t num_layers)
+{
+   intel_miptree_prepare_texture_slices(brw, mt, mt->format, level, 1,
+                                        start_layer, num_layers, NULL);
+}
+
+void
+intel_miptree_prepare_render(struct brw_context *brw,
+                             struct intel_mipmap_tree *mt, uint32_t level,
+                             uint32_t start_layer, uint32_t layer_count,
+                             bool srgb_enabled)
+{
+   /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of
+    * the single-sampled color renderbuffers because the CCS buffer isn't
+    * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
+    * enabled because otherwise the surface state will be programmed with
+    * the linear equivalent format anyway.
+    */
+   if (brw->gen == 9 && srgb_enabled && mt->num_samples <= 1 &&
+       _mesa_get_srgb_format_linear(mt->format) != mt->format) {
+
+      /* Lossless compression is not supported for SRGB formats, it
+       * should be impossible to get here with such surfaces.
+       */
+      assert(!intel_miptree_is_lossless_compressed(brw, mt));
+      intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
+                                   false, false);
+   }
+}
+
+void
+intel_miptree_finish_render(struct brw_context *brw,
+                            struct intel_mipmap_tree *mt, uint32_t level,
+                            uint32_t start_layer, uint32_t layer_count)
+{
+   assert(_mesa_is_format_color_format(mt->format));
+   intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
+                              mt->mcs_buf != NULL);
 }
 
 void
-intel_miptree_all_slices_resolve_color(struct brw_context *brw,
-                                       struct intel_mipmap_tree *mt,
-                                       int flags)
+intel_miptree_prepare_depth(struct brw_context *brw,
+                            struct intel_mipmap_tree *mt, uint32_t level,
+                            uint32_t start_layer, uint32_t layer_count)
 {
-   intel_miptree_resolve_color(brw, mt, 0, 0, 1, flags);
+   intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
+                                mt->hiz_buf != NULL, mt->hiz_buf != NULL);
+}
+
+void
+intel_miptree_finish_depth(struct brw_context *brw,
+                           struct intel_mipmap_tree *mt, uint32_t level,
+                           uint32_t start_layer, uint32_t layer_count,
+                           bool depth_written)
+{
+   if (depth_written) {
+      intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
+                                 mt->hiz_buf != NULL);
+   }
 }
 
 /**
@@ -2279,6 +2693,8 @@ intel_miptree_all_slices_resolve_color(struct brw_context *brw,
  * Fast color clears are unsafe with shared buffers, so we need to resolve and
  * then discard the MCS buffer, if present.  We also set the no_ccs flag to
  * ensure that no MCS buffer gets allocated in the future.
+ *
+ * HiZ is similarly unsafe with shared buffers.
  */
 void
 intel_miptree_make_shareable(struct brw_context *brw,
@@ -2289,12 +2705,41 @@ intel_miptree_make_shareable(struct brw_context *brw,
     * pixel data is stored.  Fortunately this code path should never be
     * reached for multisample buffers.
     */
-   assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
+   assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE || mt->num_samples <= 1);
+
+   intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS,
+                                0, INTEL_REMAINING_LAYERS, false, false);
 
    if (mt->mcs_buf) {
-      intel_miptree_all_slices_resolve_color(brw, mt, 0);
-      mt->no_ccs = true;
+      brw_bo_unreference(mt->mcs_buf->bo);
+      free(mt->mcs_buf);
+      mt->mcs_buf = NULL;
+
+      /* Any pending MCS/CCS operations are no longer needed. Trying to
+       * execute any will likely crash due to the missing aux buffer. So let's
+       * delete all pending ops.
+       */
+      free(mt->aux_state);
+      mt->aux_state = NULL;
    }
+
+   if (mt->hiz_buf) {
+      intel_miptree_aux_buffer_free(mt->hiz_buf);
+      mt->hiz_buf = NULL;
+
+      for (uint32_t l = mt->first_level; l <= mt->last_level; ++l) {
+         mt->level[l].has_hiz = false;
+      }
+
+      /* Any pending HiZ operations are no longer needed. Trying to execute
+       * any will likely crash due to the missing aux buffer. So let's delete
+       * all pending ops.
+       */
+      free(mt->aux_state);
+      mt->aux_state = NULL;
+   }
+
+   mt->aux_usage = ISL_AUX_USAGE_NONE;
 }
 
 
@@ -2359,27 +2804,57 @@ intel_miptree_updownsample(struct brw_context *brw,
                            struct intel_mipmap_tree *src,
                            struct intel_mipmap_tree *dst)
 {
+   unsigned src_w, src_h, dst_w, dst_h;
+
+   if (src->surf.size > 0) {
+      src_w = src->surf.logical_level0_px.width;
+      src_h = src->surf.logical_level0_px.height;
+   } else {
+      src_w = src->logical_width0;
+      src_h = src->logical_height0;
+   }
+
+   if (dst->surf.size > 0) {
+      dst_w = dst->surf.logical_level0_px.width;
+      dst_h = dst->surf.logical_level0_px.height;
+   } else {
+      dst_w = dst->logical_width0;
+      dst_h = dst->logical_height0;
+   }
+
    brw_blorp_blit_miptrees(brw,
                            src, 0 /* level */, 0 /* layer */,
                            src->format, SWIZZLE_XYZW,
                            dst, 0 /* level */, 0 /* layer */, dst->format,
-                           0, 0,
-                           src->logical_width0, src->logical_height0,
-                           0, 0,
-                           dst->logical_width0, dst->logical_height0,
+                           0, 0, src_w, src_h,
+                           0, 0, dst_w, dst_h,
                            GL_NEAREST, false, false /*mirror x, y*/,
                            false, false);
 
    if (src->stencil_mt) {
+      if (src->stencil_mt->surf.size > 0) {
+         src_w = src->stencil_mt->surf.logical_level0_px.width;
+         src_h = src->stencil_mt->surf.logical_level0_px.height;
+      } else {
+         src_w = src->stencil_mt->logical_width0;
+         src_h = src->stencil_mt->logical_height0;
+      }
+
+      if (dst->stencil_mt->surf.size > 0) {
+         dst_w = dst->stencil_mt->surf.logical_level0_px.width;
+         dst_h = dst->stencil_mt->surf.logical_level0_px.height;
+      } else {
+         dst_w = dst->stencil_mt->logical_width0;
+         dst_h = dst->stencil_mt->logical_height0;
+      }
+
       brw_blorp_blit_miptrees(brw,
                               src->stencil_mt, 0 /* level */, 0 /* layer */,
                               src->stencil_mt->format, SWIZZLE_XYZW,
                               dst->stencil_mt, 0 /* level */, 0 /* layer */,
                               dst->stencil_mt->format,
-                              0, 0,
-                              src->logical_width0, src->logical_height0,
-                              0, 0,
-                              dst->logical_width0, dst->logical_height0,
+                              0, 0, src_w, src_h,
+                              0, 0, dst_w, dst_h,
                               GL_NEAREST, false, false /*mirror x, y*/,
                               false, false /* decode/encode srgb */);
    }
@@ -2399,7 +2874,7 @@ intel_update_r8stencil(struct brw_context *brw,
       const uint32_t r8stencil_flags =
          MIPTREE_LAYOUT_ACCELERATED_UPLOAD | MIPTREE_LAYOUT_TILING_Y |
          MIPTREE_LAYOUT_DISABLE_AUX;
-      assert(brw->gen > 6); /* Handle MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD */
+      assert(brw->gen > 6); /* Handle MIPTREE_LAYOUT_GEN6_HIZ_STENCIL */
       mt->r8stencil_mt = intel_miptree_create(brw,
                                               src->target,
                                               MESA_FORMAT_R_UINT8,
@@ -2417,25 +2892,14 @@ intel_update_r8stencil(struct brw_context *brw,
 
    for (int level = src->first_level; level <= src->last_level; level++) {
       const unsigned depth = src->level[level].depth;
-      const int layers_per_blit =
-         (dst->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
-          dst->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
-         dst->num_samples : 1;
 
       for (unsigned layer = 0; layer < depth; layer++) {
-         brw_blorp_blit_miptrees(brw,
+         brw_blorp_copy_miptrees(brw,
                                  src, level, layer,
-                                 src->format, SWIZZLE_X,
-                                 dst, level, layers_per_blit * layer,
-                                 MESA_FORMAT_R_UNORM8,
-                                 0, 0,
+                                 dst, level, layer,
+                                 0, 0, 0, 0,
                                  minify(src->logical_width0, level),
-                                 minify(src->logical_height0, level),
-                                 0, 0,
-                                 minify(dst->logical_width0, level),
-                                 minify(dst->logical_height0, level),
-                                 GL_NEAREST, false, false /*mirror x, y*/,
-                                 false, false /* decode/encode srgb */);
+                                 minify(src->logical_height0, level));
       }
    }
 
@@ -2444,30 +2908,22 @@ intel_update_r8stencil(struct brw_context *brw,
 }
 
 static void *
-intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
+intel_miptree_map_raw(struct brw_context *brw,
+                      struct intel_mipmap_tree *mt,
+                      GLbitfield mode)
 {
-   /* CPU accesses to color buffers don't understand fast color clears, so
-    * resolve any pending fast color clears before we map.
-    */
-   intel_miptree_all_slices_resolve_color(brw, mt, 0);
-
-   drm_intel_bo *bo = mt->bo;
+   struct brw_bo *bo = mt->bo;
 
-   if (drm_intel_bo_references(brw->batch.bo, bo))
+   if (brw_batch_references(&brw->batch, bo))
       intel_batchbuffer_flush(brw);
 
-   if (mt->tiling != I915_TILING_NONE)
-      brw_bo_map_gtt(brw, bo, "miptree");
-   else
-      brw_bo_map(brw, bo, true, "miptree");
-
-   return bo->virtual;
+   return brw_bo_map(brw, bo, mode);
 }
 
 static void
 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
 {
-   drm_intel_bo_unmap(mt->bo);
+   brw_bo_unmap(mt->bo);
 }
 
 static void
@@ -2492,7 +2948,7 @@ intel_miptree_map_gtt(struct brw_context *brw,
    y /= bh;
    x /= bw;
 
-   base = intel_miptree_map_raw(brw, mt) + mt->offset;
+   base = intel_miptree_map_raw(brw, mt, map->mode) + mt->offset;
 
    if (base == NULL)
       map->ptr = NULL;
@@ -2546,18 +3002,16 @@ intel_miptree_map_blit(struct brw_context *brw,
     * temporary buffer back out.
     */
    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
-      if (!intel_miptree_blit(brw,
-                              mt, level, slice,
-                              map->x, map->y, false,
-                              map->linear_mt, 0, 0,
-                              0, 0, false,
-                              map->w, map->h, GL_COPY)) {
+      if (!intel_miptree_copy(brw,
+                              mt, level, slice, map->x, map->y,
+                              map->linear_mt, 0, 0, 0, 0,
+                              map->w, map->h)) {
          fprintf(stderr, "Failed to blit\n");
          goto fail;
       }
    }
 
-   map->ptr = intel_miptree_map_raw(brw, map->linear_mt);
+   map->ptr = intel_miptree_map_raw(brw, map->linear_mt, map->mode);
 
    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
        map->x, map->y, map->w, map->h,
@@ -2584,12 +3038,10 @@ intel_miptree_unmap_blit(struct brw_context *brw,
    intel_miptree_unmap_raw(map->linear_mt);
 
    if (map->mode & GL_MAP_WRITE_BIT) {
-      bool ok = intel_miptree_blit(brw,
-                                   map->linear_mt, 0, 0,
-                                   0, 0, false,
-                                   mt, level, slice,
-                                   map->x, map->y, false,
-                                   map->w, map->h, GL_COPY);
+      bool ok = intel_miptree_copy(brw,
+                                   map->linear_mt, 0, 0, 0, 0,
+                                   mt, level, slice, map->x, map->y,
+                                   map->w, map->h);
       WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
    }
 
@@ -2621,7 +3073,7 @@ intel_miptree_map_movntdqa(struct brw_context *brw,
    image_x += map->x;
    image_y += map->y;
 
-   void *src = intel_miptree_map_raw(brw, mt);
+   void *src = intel_miptree_map_raw(brw, mt, map->mode);
    if (!src)
       return;
 
@@ -2689,15 +3141,21 @@ intel_miptree_map_s8(struct brw_context *brw,
     * temporary buffer back out.
     */
    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+      /* ISL uses a stencil pitch value that is expected by hardware whereas
+       * traditional miptree uses half of that. Below the value gets supplied
+       * to intel_offset_S8() which expects the legacy interpretation.
+       */
+      const unsigned pitch = mt->surf.size > 0 ?
+                             mt->surf.row_pitch / 2 : mt->pitch;
       uint8_t *untiled_s8_map = map->ptr;
-      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
+      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_READ_BIT);
       unsigned int image_x, image_y;
 
       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
 
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
-           ptrdiff_t offset = intel_offset_S8(mt->pitch,
+           ptrdiff_t offset = intel_offset_S8(pitch,
                                               x + image_x + map->x,
                                               y + image_y + map->y,
                                               brw->has_swizzling);
@@ -2725,15 +3183,21 @@ intel_miptree_unmap_s8(struct brw_context *brw,
                       unsigned int slice)
 {
    if (map->mode & GL_MAP_WRITE_BIT) {
+      /* ISL uses a stencil pitch value that is expected by hardware whereas
+       * traditional miptree uses half of that. Below the value gets supplied
+       * to intel_offset_S8() which expects the legacy interpretation.
+       */
+      const unsigned pitch = mt->surf.size > 0 ?
+                             mt->surf.row_pitch / 2: mt->pitch;
       unsigned int image_x, image_y;
       uint8_t *untiled_s8_map = map->ptr;
-      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt);
+      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT);
 
       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
 
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
-           ptrdiff_t offset = intel_offset_S8(mt->pitch,
+           ptrdiff_t offset = intel_offset_S8(pitch,
                                               image_x + x + map->x,
                                               image_y + y + map->y,
                                               brw->has_swizzling);
@@ -2782,7 +3246,7 @@ intel_miptree_unmap_etc(struct brw_context *brw,
    image_x += map->x;
    image_y += map->y;
 
-   uint8_t *dst = intel_miptree_map_raw(brw, mt)
+   uint8_t *dst = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT)
                 + image_y * mt->pitch
                 + image_x * mt->cpp;
 
@@ -2832,9 +3296,15 @@ intel_miptree_map_depthstencil(struct brw_context *brw,
     * temporary buffer back out.
     */
    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+      /* ISL uses a stencil pitch value that is expected by hardware whereas
+       * traditional miptree uses half of that. Below the value gets supplied
+       * to intel_offset_S8() which expects the legacy interpretation.
+       */
+      const unsigned s_pitch = s_mt->surf.size > 0 ?
+                               s_mt->surf.row_pitch / 2 : s_mt->pitch;
       uint32_t *packed_map = map->ptr;
-      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
-      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
+      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_READ_BIT);
+      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_READ_BIT);
       unsigned int s_image_x, s_image_y;
       unsigned int z_image_x, z_image_y;
 
@@ -2846,7 +3316,7 @@ intel_miptree_map_depthstencil(struct brw_context *brw,
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
            int map_x = map->x + x, map_y = map->y + y;
-           ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
+           ptrdiff_t s_offset = intel_offset_S8(s_pitch,
                                                 map_x + s_image_x,
                                                 map_y + s_image_y,
                                                 brw->has_swizzling);
@@ -2893,9 +3363,15 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw,
    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
 
    if (map->mode & GL_MAP_WRITE_BIT) {
+      /* ISL uses a stencil pitch value that is expected by hardware whereas
+       * traditional miptree uses half of that. Below the value gets supplied
+       * to intel_offset_S8() which expects the legacy interpretation.
+       */
+      const unsigned s_pitch = s_mt->surf.size > 0 ?
+                               s_mt->surf.row_pitch / 2 : s_mt->pitch;
       uint32_t *packed_map = map->ptr;
-      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt);
-      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt);
+      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_WRITE_BIT);
+      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_WRITE_BIT);
       unsigned int s_image_x, s_image_y;
       unsigned int z_image_x, z_image_y;
 
@@ -2906,7 +3382,7 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw,
 
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
-           ptrdiff_t s_offset = intel_offset_S8(s_mt->pitch,
+           ptrdiff_t s_offset = intel_offset_S8(s_pitch,
                                                 x + s_image_x + map->x,
                                                 y + s_image_y + map->y,
                                                 brw->has_swizzling);
@@ -3006,11 +3482,9 @@ use_intel_mipree_map_blit(struct brw_context *brw,
 {
    if (brw->has_llc &&
       /* It's probably not worth swapping to the blit ring because of
-       * all the overhead involved. But, we must use blitter for the
-       * surfaces with INTEL_MIPTREE_TRMODE_{YF,YS}.
+       * all the overhead involved.
        */
-       (!(mode & GL_MAP_WRITE_BIT) ||
-        mt->tr_mode != INTEL_MIPTREE_TRMODE_NONE) &&
+       !(mode & GL_MAP_WRITE_BIT) &&
        !mt->compressed &&
        (mt->tiling == I915_TILING_X ||
         /* Prior to Sandybridge, the blitter can't handle Y tiling */
@@ -3064,10 +3538,8 @@ intel_miptree_map(struct brw_context *brw,
       return;
    }
 
-   intel_miptree_slice_resolve_depth(brw, mt, level, slice);
-   if (map->mode & GL_MAP_WRITE_BIT) {
-      intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
-   }
+   intel_miptree_access_raw(brw, mt, level, slice,
+                            map->mode & GL_MAP_WRITE_BIT);
 
    if (mt->format == MESA_FORMAT_S_UINT8) {
       intel_miptree_map_s8(brw, mt, map, level, slice);
@@ -3085,8 +3557,6 @@ intel_miptree_map(struct brw_context *brw,
       intel_miptree_map_movntdqa(brw, mt, map, level, slice);
 #endif
    } else {
-      /* intel_miptree_map_gtt() doesn't support surfaces with Yf/Ys tiling. */
-      assert(mt->tr_mode == INTEL_MIPTREE_TRMODE_NONE);
       intel_miptree_map_gtt(brw, mt, map, level, slice);
    }
 
@@ -3160,8 +3630,11 @@ get_isl_surf_dim(GLenum target)
 
 enum isl_dim_layout
 get_isl_dim_layout(const struct gen_device_info *devinfo, uint32_t tiling,
-                   GLenum target)
+                   GLenum target, enum miptree_array_layout array_layout)
 {
+   if (array_layout == GEN6_HIZ_STENCIL)
+      return ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ;
+
    switch (target) {
    case GL_TEXTURE_1D:
    case GL_TEXTURE_1D_ARRAY:
@@ -3201,16 +3674,7 @@ intel_miptree_get_isl_tiling(const struct intel_mipmap_tree *mt)
       case I915_TILING_X:
          return ISL_TILING_X;
       case I915_TILING_Y:
-         switch (mt->tr_mode) {
-         case INTEL_MIPTREE_TRMODE_NONE:
             return ISL_TILING_Y0;
-         case INTEL_MIPTREE_TRMODE_YF:
-            return ISL_TILING_Yf;
-         case INTEL_MIPTREE_TRMODE_YS:
-            return ISL_TILING_Ys;
-         default:
-            unreachable("Invalid tiled resource mode");
-         }
       default:
          unreachable("Invalid tiling mode");
       }
@@ -3224,7 +3688,8 @@ intel_miptree_get_isl_surf(struct brw_context *brw,
 {
    surf->dim = get_isl_surf_dim(mt->target);
    surf->dim_layout = get_isl_dim_layout(&brw->screen->devinfo,
-                                         mt->tiling, mt->target);
+                                         mt->tiling, mt->target,
+                                         mt->array_layout);
 
    if (mt->num_samples > 1) {
       switch (mt->msaa_layout) {
@@ -3294,7 +3759,7 @@ intel_miptree_get_isl_surf(struct brw_context *brw,
       surf->phys_level0_sa.array_len = mt->physical_depth0;
    }
 
-   surf->levels = mt->last_level + 1;
+   surf->levels = mt->last_level - mt->first_level + 1;
    surf->samples = MAX2(mt->num_samples, 1);
 
    surf->size = 0; /* TODO */
@@ -3303,6 +3768,7 @@ intel_miptree_get_isl_surf(struct brw_context *brw,
    switch (surf->dim_layout) {
    case ISL_DIM_LAYOUT_GEN4_2D:
    case ISL_DIM_LAYOUT_GEN4_3D:
+   case ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ:
       if (brw->gen >= 9) {
          surf->array_pitch_el_rows = mt->qpitch;
       } else {
@@ -3322,6 +3788,7 @@ intel_miptree_get_isl_surf(struct brw_context *brw,
       surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_FULL;
       break;
    case ALL_SLICES_AT_EACH_LOD:
+   case GEN6_HIZ_STENCIL:
       surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_COMPACT;
       break;
    default:
@@ -3345,7 +3812,7 @@ intel_miptree_get_isl_surf(struct brw_context *brw,
       break;
    default:
       surf->usage = ISL_SURF_USAGE_TEXTURE_BIT;
-      if (brw->format_supported_as_render_target[mt->format])
+      if (brw->mesa_format_supports_render[mt->format])
          surf->usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
       break;
    }
@@ -3354,130 +3821,15 @@ intel_miptree_get_isl_surf(struct brw_context *brw,
       surf->usage |= ISL_SURF_USAGE_CUBE_BIT;
 }
 
-/* WARNING: THE SURFACE CREATED BY THIS FUNCTION IS NOT COMPLETE AND CANNOT BE
- * USED FOR ANY REAL CALCULATIONS.  THE ONLY VALID USE OF SUCH A SURFACE IS TO
- * PASS IT INTO isl_surf_fill_state.
- */
-void
-intel_miptree_get_aux_isl_surf(struct brw_context *brw,
-                               const struct intel_mipmap_tree *mt,
-                               struct isl_surf *surf,
-                               enum isl_aux_usage *usage)
+enum isl_aux_usage
+intel_miptree_get_aux_isl_usage(const struct brw_context *brw,
+                                const struct intel_mipmap_tree *mt)
 {
-   uint32_t aux_pitch, aux_qpitch;
-   if (mt->mcs_buf) {
-      aux_pitch = mt->mcs_buf->pitch;
-      aux_qpitch = mt->mcs_buf->qpitch;
-
-      if (mt->num_samples > 1) {
-         assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
-         *usage = ISL_AUX_USAGE_MCS;
-      } else if (intel_miptree_is_lossless_compressed(brw, mt)) {
-         assert(brw->gen >= 9);
-         *usage = ISL_AUX_USAGE_CCS_E;
-      } else if (!mt->no_ccs) {
-         *usage = ISL_AUX_USAGE_CCS_D;
-      } else {
-         unreachable("Invalid MCS miptree");
-      }
-   } else if (mt->hiz_buf) {
-      if (mt->hiz_buf->mt) {
-         aux_pitch = mt->hiz_buf->mt->pitch;
-         aux_qpitch = mt->hiz_buf->mt->qpitch;
-      } else {
-         aux_pitch = mt->hiz_buf->aux_base.pitch;
-         aux_qpitch = mt->hiz_buf->aux_base.qpitch;
-      }
+   if (mt->hiz_buf)
+      return ISL_AUX_USAGE_HIZ;
 
-      *usage = ISL_AUX_USAGE_HIZ;
-   } else {
-      *usage = ISL_AUX_USAGE_NONE;
-      return;
-   }
-
-   /* Start with a copy of the original surface. */
-   intel_miptree_get_isl_surf(brw, mt, surf);
-
-   /* Figure out the format and tiling of the auxiliary surface */
-   switch (*usage) {
-   case ISL_AUX_USAGE_NONE:
-      unreachable("Invalid auxiliary usage");
-
-   case ISL_AUX_USAGE_HIZ:
-      isl_surf_get_hiz_surf(&brw->isl_dev, surf, surf);
-      break;
-
-   case ISL_AUX_USAGE_MCS:
-      /*
-       * From the SKL PRM:
-       *    "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E,
-       *    HALIGN 16 must be used."
-       */
-      if (brw->gen >= 9)
-         assert(mt->halign == 16);
-
-      isl_surf_get_mcs_surf(&brw->isl_dev, surf, surf);
-      break;
-
-   case ISL_AUX_USAGE_CCS_D:
-   case ISL_AUX_USAGE_CCS_E:
-      /*
-       * From the BDW PRM, Volume 2d, page 260 (RENDER_SURFACE_STATE):
-       *
-       *    "When MCS is enabled for non-MSRT, HALIGN_16 must be used"
-       *
-       * From the hardware spec for GEN9:
-       *
-       *    "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E,
-       *    HALIGN 16 must be used."
-       */
-      assert(mt->num_samples <= 1);
-      if (brw->gen >= 8)
-         assert(mt->halign == 16);
-
-      isl_surf_get_ccs_surf(&brw->isl_dev, surf, surf);
-      break;
-   }
-
-   /* We want the pitch of the actual aux buffer. */
-   surf->row_pitch = aux_pitch;
-
-   /* Auxiliary surfaces in ISL have compressed formats and array_pitch_el_rows
-    * is in elements.  This doesn't match intel_mipmap_tree::qpitch which is
-    * in elements of the primary color surface so we have to divide by the
-    * compression block height.
-    */
-   surf->array_pitch_el_rows =
-      aux_qpitch / isl_format_get_layout(surf->format)->bh;
-}
-
-union isl_color_value
-intel_miptree_get_isl_clear_color(struct brw_context *brw,
-                                  const struct intel_mipmap_tree *mt)
-{
-   union isl_color_value clear_color;
-
-   if (_mesa_get_format_base_format(mt->format) == GL_DEPTH_COMPONENT) {
-      clear_color.i32[0] = mt->depth_clear_value;
-      clear_color.i32[1] = 0;
-      clear_color.i32[2] = 0;
-      clear_color.i32[3] = 0;
-   } else if (brw->gen >= 9) {
-      clear_color.i32[0] = mt->gen9_fast_clear_color.i[0];
-      clear_color.i32[1] = mt->gen9_fast_clear_color.i[1];
-      clear_color.i32[2] = mt->gen9_fast_clear_color.i[2];
-      clear_color.i32[3] = mt->gen9_fast_clear_color.i[3];
-   } else if (_mesa_is_format_integer(mt->format)) {
-      clear_color.i32[0] = (mt->fast_clear_color_value & (1u << 31)) != 0;
-      clear_color.i32[1] = (mt->fast_clear_color_value & (1u << 30)) != 0;
-      clear_color.i32[2] = (mt->fast_clear_color_value & (1u << 29)) != 0;
-      clear_color.i32[3] = (mt->fast_clear_color_value & (1u << 28)) != 0;
-   } else {
-      clear_color.f32[0] = (mt->fast_clear_color_value & (1u << 31)) != 0;
-      clear_color.f32[1] = (mt->fast_clear_color_value & (1u << 30)) != 0;
-      clear_color.f32[2] = (mt->fast_clear_color_value & (1u << 29)) != 0;
-      clear_color.f32[3] = (mt->fast_clear_color_value & (1u << 28)) != 0;
-   }
+   if (!mt->mcs_buf)
+      return ISL_AUX_USAGE_NONE;
 
-   return clear_color;
+   return mt->aux_usage;
 }