Revert "i965: Make the miptree clear color setter take a gl_color_union"
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.c
index 82f5a814a1ed2ad81a1e60244b4556fb3e9af39b..506cf73e62cd8dcd26fe77f419138ba89786f7be 100644 (file)
@@ -45,6 +45,9 @@
 #include "main/texcompress_etc.h"
 #include "main/teximage.h"
 #include "main/streaming-load-memcpy.h"
+
+#include "util/format_srgb.h"
+
 #include "x86/common_x86_asm.h"
 
 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
@@ -55,10 +58,6 @@ static void *intel_miptree_map_raw(struct brw_context *brw,
 
 static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt);
 
-static bool
-intel_miptree_alloc_aux(struct brw_context *brw,
-                        struct intel_mipmap_tree *mt);
-
 static bool
 intel_miptree_supports_mcs(struct brw_context *brw,
                            const struct intel_mipmap_tree *mt)
@@ -73,6 +72,10 @@ intel_miptree_supports_mcs(struct brw_context *brw,
    if (devinfo->gen < 7)
       return false;
 
+   /* See isl_surf_get_mcs_surf for details. */
+   if (mt->surf.samples == 16 && mt->surf.logical_level0_px.width > 8192)
+      return false;
+
    /* In Gen7, IMS layout is only used for depth and stencil buffers. */
    switch (_mesa_get_format_base_format(mt->format)) {
    case GL_DEPTH_COMPONENT:
@@ -783,7 +786,12 @@ intel_miptree_create(struct brw_context *brw,
 
    mt->offset = 0;
 
-   if (!intel_miptree_alloc_aux(brw, mt)) {
+   /* Create the auxiliary surface up-front. CCS_D, on the other hand, can only
+    * compress clear color so we wait until an actual fast-clear to allocate
+    * it.
+    */
+   if (mt->aux_usage != ISL_AUX_USAGE_CCS_D &&
+       !intel_miptree_alloc_aux(brw, mt)) {
       intel_miptree_release(&mt);
       return NULL;
    }
@@ -800,11 +808,11 @@ intel_miptree_create_for_bo(struct brw_context *brw,
                             uint32_t height,
                             uint32_t depth,
                             int pitch,
+                            enum isl_tiling tiling,
                             enum intel_miptree_create_flags flags)
 {
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct intel_mipmap_tree *mt;
-   uint32_t tiling, swizzle;
    const GLenum target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
    const GLenum base_format = _mesa_get_format_base_format(format);
 
@@ -816,7 +824,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
                         devinfo->gen >= 6 ? depth_only_format : format,
                         0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT,
                         ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
-                        BO_ALLOC_BUSY, pitch, bo);
+                        0, pitch, bo);
       if (!mt)
          return NULL;
 
@@ -832,7 +840,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
                         ISL_TILING_W_BIT,
                         ISL_SURF_USAGE_STENCIL_BIT |
                         ISL_SURF_USAGE_TEXTURE_BIT,
-                        BO_ALLOC_BUSY, pitch, bo);
+                        0, pitch, bo);
       if (!mt)
          return NULL;
 
@@ -842,12 +850,10 @@ intel_miptree_create_for_bo(struct brw_context *brw,
       return mt;
    }
 
-   brw_bo_get_tiling(bo, &tiling, &swizzle);
-
    /* Nothing will be able to use this miptree with the BO if the offset isn't
     * aligned.
     */
-   if (tiling != I915_TILING_NONE)
+   if (tiling != ISL_TILING_LINEAR)
       assert(offset % 4096 == 0);
 
    /* miptrees can't handle negative pitch.  If you need flipping of images,
@@ -862,7 +868,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
 
    mt = make_surface(brw, target, format,
                      0, 0, width, height, depth, 1,
-                     1lu << isl_tiling_from_i915_tiling(tiling),
+                     1lu << tiling,
                      ISL_SURF_USAGE_RENDER_TARGET_BIT |
                      ISL_SURF_USAGE_TEXTURE_BIT,
                      0, pitch, bo);
@@ -876,7 +882,12 @@ intel_miptree_create_for_bo(struct brw_context *brw,
    if (!(flags & MIPTREE_CREATE_NO_AUX)) {
       intel_miptree_choose_aux_usage(brw, mt);
 
-      if (!intel_miptree_alloc_aux(brw, mt)) {
+      /* Create the auxiliary surface up-front. CCS_D, on the other hand, can
+       * only compress clear color so we wait until an actual fast-clear to
+       * allocate it.
+       */
+      if (mt->aux_usage != ISL_AUX_USAGE_CCS_D &&
+          !intel_miptree_alloc_aux(brw, mt)) {
          intel_miptree_release(&mt);
          return NULL;
       }
@@ -887,7 +898,8 @@ intel_miptree_create_for_bo(struct brw_context *brw,
 
 static struct intel_mipmap_tree *
 miptree_create_for_planar_image(struct brw_context *brw,
-                                __DRIimage *image, GLenum target)
+                                __DRIimage *image, GLenum target,
+                                enum isl_tiling tiling)
 {
    const struct intel_image_format *f = image->planar_format;
    struct intel_mipmap_tree *planar_mt = NULL;
@@ -909,6 +921,7 @@ miptree_create_for_planar_image(struct brw_context *brw,
                                      image->offsets[index],
                                      width, height, 1,
                                      image->strides[index],
+                                     tiling,
                                      MIPTREE_CREATE_NO_AUX);
       if (mt == NULL)
          return NULL;
@@ -945,7 +958,7 @@ create_ccs_buf_for_image(struct brw_context *brw,
    assert(mt->last_level == 0);
 
    /* We shouldn't already have a CCS */
-   assert(!mt->mcs_buf);
+   assert(!mt->aux_buf);
 
    if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf,
                               image->aux_pitch))
@@ -954,25 +967,39 @@ create_ccs_buf_for_image(struct brw_context *brw,
    assert(image->aux_offset < image->bo->size);
    assert(temp_ccs_surf.size <= image->bo->size - image->aux_offset);
 
-   mt->mcs_buf = calloc(sizeof(*mt->mcs_buf), 1);
-   if (mt->mcs_buf == NULL)
+   mt->aux_buf = calloc(sizeof(*mt->aux_buf), 1);
+   if (mt->aux_buf == NULL)
       return false;
 
    mt->aux_state = create_aux_state_map(mt, initial_state);
    if (!mt->aux_state) {
-      free(mt->mcs_buf);
-      mt->mcs_buf = NULL;
+      free(mt->aux_buf);
+      mt->aux_buf = NULL;
       return false;
    }
 
-   mt->mcs_buf->bo = image->bo;
+   /* On gen10+ we start using an extra space in the aux buffer to store the
+    * indirect clear color. However, if we imported an image from the window
+    * system with CCS, we don't have the extra space at the end of the aux
+    * buffer. So create a new bo here that will store that clear color.
+    */
+   if (brw->isl_dev.ss.clear_color_state_size > 0) {
+      mt->aux_buf->clear_color_bo =
+         brw_bo_alloc_tiled(brw->bufmgr, "clear_color_bo",
+                            brw->isl_dev.ss.clear_color_state_size,
+                            I915_TILING_NONE, 0, BO_ALLOC_ZEROED);
+      if (!mt->aux_buf->clear_color_bo) {
+         free(mt->aux_buf);
+         mt->aux_buf = NULL;
+         return false;
+      }
+   }
+
+   mt->aux_buf->bo = image->bo;
    brw_bo_reference(image->bo);
 
-   mt->mcs_buf->offset = image->aux_offset;
-   mt->mcs_buf->size = image->bo->size - image->aux_offset;
-   mt->mcs_buf->pitch = image->aux_pitch;
-   mt->mcs_buf->qpitch = 0;
-   mt->mcs_buf->surf = temp_ccs_surf;
+   mt->aux_buf->offset = image->aux_offset;
+   mt->aux_buf->surf = temp_ccs_surf;
 
    return true;
 }
@@ -983,8 +1010,17 @@ intel_miptree_create_for_dri_image(struct brw_context *brw,
                                    mesa_format format,
                                    bool is_winsys_image)
 {
+   uint32_t bo_tiling, bo_swizzle;
+   brw_bo_get_tiling(image->bo, &bo_tiling, &bo_swizzle);
+
+   const struct isl_drm_modifier_info *mod_info =
+      isl_drm_modifier_get_info(image->modifier);
+
+   const enum isl_tiling tiling =
+      mod_info ? mod_info->tiling : isl_tiling_from_i915_tiling(bo_tiling);
+
    if (image->planar_format && image->planar_format->nplanes > 1)
-      return miptree_create_for_planar_image(brw, image, target);
+      return miptree_create_for_planar_image(brw, image, target, tiling);
 
    if (image->planar_format)
       assert(image->planar_format->planes[0].dri_format == image->dri_format);
@@ -1005,9 +1041,6 @@ intel_miptree_create_for_dri_image(struct brw_context *brw,
    if (!brw->ctx.TextureFormatSupported[format])
       return NULL;
 
-   const struct isl_drm_modifier_info *mod_info =
-      isl_drm_modifier_get_info(image->modifier);
-
    enum intel_miptree_create_flags mt_create_flags = 0;
 
    /* If this image comes in from a window system, we have different
@@ -1033,7 +1066,7 @@ intel_miptree_create_for_dri_image(struct brw_context *brw,
    struct intel_mipmap_tree *mt =
       intel_miptree_create_for_bo(brw, image->bo, format,
                                   image->offset, image->width, image->height, 1,
-                                  image->pitch, mt_create_flags);
+                                  image->pitch, tiling, mt_create_flags);
    if (mt == NULL)
       return NULL;
 
@@ -1201,6 +1234,7 @@ intel_miptree_aux_buffer_free(struct intel_miptree_aux_buffer *aux_buf)
       return;
 
    brw_bo_unreference(aux_buf->bo);
+   brw_bo_unreference(aux_buf->clear_color_bo);
 
    free(aux_buf);
 }
@@ -1220,8 +1254,7 @@ intel_miptree_release(struct intel_mipmap_tree **mt)
       brw_bo_unreference((*mt)->bo);
       intel_miptree_release(&(*mt)->stencil_mt);
       intel_miptree_release(&(*mt)->r8stencil_mt);
-      intel_miptree_aux_buffer_free((*mt)->hiz_buf);
-      intel_miptree_aux_buffer_free((*mt)->mcs_buf);
+      intel_miptree_aux_buffer_free((*mt)->aux_buf);
       free_aux_state_map((*mt)->aux_state);
 
       intel_miptree_release(&(*mt)->plane[0]);
@@ -1298,7 +1331,8 @@ intel_miptree_match_image(struct intel_mipmap_tree *mt,
    if (mt->etc_format != MESA_FORMAT_NONE)
       mt_format = mt->etc_format;
 
-   if (image->TexFormat != mt_format)
+   if (_mesa_get_srgb_format_linear(image->TexFormat) !=
+       _mesa_get_srgb_format_linear(mt_format))
       return false;
 
    intel_get_image_dims(image, &width, &height, &depth);
@@ -1537,7 +1571,8 @@ intel_miptree_copy_slice(struct brw_context *brw,
    assert(src_layer < get_num_phys_layers(&src_mt->surf,
                                           src_level - src_mt->first_level));
 
-   assert(src_mt->format == dst_mt->format);
+   assert(_mesa_get_srgb_format_linear(src_mt->format) ==
+          _mesa_get_srgb_format_linear(dst_mt->format));
 
    if (dst_mt->compressed) {
       unsigned int i, j;
@@ -1574,7 +1609,7 @@ intel_miptree_copy_slice(struct brw_context *brw,
    if (!intel_miptree_blit(brw,
                            src_mt, src_level, src_layer, 0, 0, false,
                            dst_mt, dst_level, dst_layer, 0, 0, false,
-                           width, height, GL_COPY)) {
+                           width, height, COLOR_LOGICOP_COPY)) {
       perf_debug("miptree validate blit for %s failed\n",
                  _mesa_get_format_name(format));
 
@@ -1625,157 +1660,87 @@ intel_miptree_copy_teximage(struct brw_context *brw,
    intel_obj->needs_validate = true;
 }
 
-static void
-intel_miptree_init_mcs(struct brw_context *brw,
-                       struct intel_mipmap_tree *mt,
-                       int init_value)
-{
-   assert(mt->mcs_buf != NULL);
-
-   /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
-    *
-    *     When MCS buffer is enabled and bound to MSRT, it is required that it
-    *     is cleared prior to any rendering.
-    *
-    * Since we don't use the MCS buffer for any purpose other than rendering,
-    * it makes sense to just clear it immediately upon allocation.
-    *
-    * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
-    */
-   void *map = brw_bo_map(brw, mt->mcs_buf->bo, MAP_WRITE);
-   if (unlikely(map == NULL)) {
-      fprintf(stderr, "Failed to map mcs buffer into GTT\n");
-      brw_bo_unreference(mt->mcs_buf->bo);
-      free(mt->mcs_buf);
-      return;
-   }
-   void *data = map;
-   memset(data, init_value, mt->mcs_buf->size);
-   brw_bo_unmap(mt->mcs_buf->bo);
-}
-
 static struct intel_miptree_aux_buffer *
 intel_alloc_aux_buffer(struct brw_context *brw,
-                       const char *name,
                        const struct isl_surf *aux_surf,
-                       uint32_t alloc_flags,
-                       struct intel_mipmap_tree *mt)
+                       bool wants_memset,
+                       uint8_t memset_value)
 {
    struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
    if (!buf)
       return false;
 
-   buf->size = aux_surf->size;
-   buf->pitch = aux_surf->row_pitch;
-   buf->qpitch = isl_surf_get_array_pitch_sa_rows(aux_surf);
+   uint64_t size = aux_surf->size;
+
+   const bool has_indirect_clear = brw->isl_dev.ss.clear_color_state_size > 0;
+   if (has_indirect_clear) {
+      /* On CNL+, instead of setting the clear color in the SURFACE_STATE, we
+       * will set a pointer to a dword somewhere that contains the color. So,
+       * allocate the space for the clear color value here on the aux buffer.
+       */
+      buf->clear_color_offset = size;
+      size += brw->isl_dev.ss.clear_color_state_size;
+   }
+
+   /* If the buffer needs to be initialised (requiring the buffer to be
+    * immediately mapped to cpu space for writing), do not use the gpu access
+    * flag which can cause an unnecessary delay if the backing pages happened
+    * to be just used by the GPU.
+    */
+   const bool alloc_zeroed = wants_memset && memset_value == 0;
+   const bool needs_memset =
+      !alloc_zeroed && (wants_memset || has_indirect_clear);
+   const uint32_t alloc_flags =
+      alloc_zeroed ? BO_ALLOC_ZEROED : (needs_memset ? 0 : BO_ALLOC_BUSY);
 
    /* ISL has stricter set of alignment rules then the drm allocator.
     * Therefore one can pass the ISL dimensions in terms of bytes instead of
     * trying to recalculate based on different format block sizes.
     */
-   buf->bo = brw_bo_alloc_tiled(brw->bufmgr, name, buf->size,
-                                I915_TILING_Y, buf->pitch, alloc_flags);
+   buf->bo = brw_bo_alloc_tiled(brw->bufmgr, "aux-miptree", size,
+                                I915_TILING_Y, aux_surf->row_pitch,
+                                alloc_flags);
    if (!buf->bo) {
       free(buf);
       return NULL;
    }
 
-   buf->surf = *aux_surf;
-
-   return buf;
-}
-
-static bool
-intel_miptree_alloc_mcs(struct brw_context *brw,
-                        struct intel_mipmap_tree *mt,
-                        GLuint num_samples)
-{
-   assert(brw->screen->devinfo.gen >= 7); /* MCS only used on Gen7+ */
-   assert(mt->mcs_buf == NULL);
-   assert(mt->aux_usage == ISL_AUX_USAGE_MCS);
+   /* Initialize the bo to the desired value */
+   if (needs_memset) {
+      assert(!(alloc_flags & BO_ALLOC_BUSY));
 
-   /* Multisampled miptrees are only supported for single level. */
-   assert(mt->first_level == 0);
-   enum isl_aux_state **aux_state =
-      create_aux_state_map(mt, ISL_AUX_STATE_CLEAR);
-   if (!aux_state)
-      return false;
+      void *map = brw_bo_map(brw, buf->bo, MAP_WRITE | MAP_RAW);
+      if (map == NULL) {
+         intel_miptree_aux_buffer_free(buf);
+         return NULL;
+      }
 
-   struct isl_surf temp_mcs_surf;
+      /* Memset the aux_surf portion of the BO. */
+      if (wants_memset)
+         memset(map, memset_value, aux_surf->size);
 
-   MAYBE_UNUSED bool ok =
-      isl_surf_get_mcs_surf(&brw->isl_dev, &mt->surf, &temp_mcs_surf);
-   assert(ok);
+      /* Zero the indirect clear color to match ::fast_clear_color. */
+      if (has_indirect_clear) {
+         memset((char *)map + buf->clear_color_offset, 0,
+                brw->isl_dev.ss.clear_color_state_size);
+      }
 
-   /* Buffer needs to be initialised requiring the buffer to be immediately
-    * mapped to cpu space for writing. Therefore do not use the gpu access
-    * flag which can cause an unnecessary delay if the backing pages happened
-    * to be just used by the GPU.
-    */
-   const uint32_t alloc_flags = 0;
-   mt->mcs_buf = intel_alloc_aux_buffer(brw, "mcs-miptree",
-                                        &temp_mcs_surf, alloc_flags, mt);
-   if (!mt->mcs_buf) {
-      free(aux_state);
-      return false;
+      brw_bo_unmap(buf->bo);
    }
 
-   mt->aux_state = aux_state;
-
-   intel_miptree_init_mcs(brw, mt, 0xFF);
-
-   return true;
-}
-
-bool
-intel_miptree_alloc_ccs(struct brw_context *brw,
-                        struct intel_mipmap_tree *mt)
-{
-   assert(mt->mcs_buf == NULL);
-   assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E ||
-          mt->aux_usage == ISL_AUX_USAGE_CCS_D);
-
-   struct isl_surf temp_ccs_surf;
-
-   if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf, 0))
-      return false;
-
-   assert(temp_ccs_surf.size &&
-          (temp_ccs_surf.size % temp_ccs_surf.row_pitch == 0));
-
-   enum isl_aux_state **aux_state =
-      create_aux_state_map(mt, ISL_AUX_STATE_PASS_THROUGH);
-   if (!aux_state)
-      return false;
-
-   /* When CCS_E is used, we need to ensure that the CCS starts off in a valid
-    * state.  From the Sky Lake PRM, "MCS Buffer for Render Target(s)":
-    *
-    *    "If Software wants to enable Color Compression without Fast clear,
-    *    Software needs to initialize MCS with zeros."
-    *
-    * A CCS value of 0 indicates that the corresponding block is in the
-    * pass-through state which is what we want.
-    *
-    * For CCS_D, on the other hand, we don't care as we're about to perform a
-    * fast-clear operation.  In that case, being hot in caches more useful.
-    */
-   const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
-                                BO_ALLOC_ZEROED : BO_ALLOC_BUSY;
-   mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
-                                        &temp_ccs_surf, alloc_flags, mt);
-   if (!mt->mcs_buf) {
-      free(aux_state);
-      return false;
+   if (has_indirect_clear) {
+      buf->clear_color_bo = buf->bo;
+      brw_bo_reference(buf->clear_color_bo);
    }
 
-   mt->aux_state = aux_state;
+   buf->surf = *aux_surf;
 
-   return true;
+   return buf;
 }
 
+
 /**
- * Helper for intel_miptree_alloc_hiz() that sets
+ * Helper for intel_miptree_alloc_aux() that sets
  * \c mt->level[level].has_hiz. Return true if and only if
  * \c has_hiz was set.
  */
@@ -1786,7 +1751,7 @@ intel_miptree_level_enable_hiz(struct brw_context *brw,
 {
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
 
-   assert(mt->hiz_buf);
+   assert(mt->aux_buf);
    assert(mt->surf.size > 0);
 
    if (devinfo->gen >= 8 || devinfo->is_haswell) {
@@ -1810,41 +1775,6 @@ intel_miptree_level_enable_hiz(struct brw_context *brw,
    return true;
 }
 
-bool
-intel_miptree_alloc_hiz(struct brw_context *brw,
-                       struct intel_mipmap_tree *mt)
-{
-   assert(mt->hiz_buf == NULL);
-   assert(mt->aux_usage == ISL_AUX_USAGE_HIZ);
-
-   enum isl_aux_state **aux_state =
-      create_aux_state_map(mt, ISL_AUX_STATE_AUX_INVALID);
-   if (!aux_state)
-      return false;
-
-   struct isl_surf temp_hiz_surf;
-
-   MAYBE_UNUSED bool ok =
-      isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf);
-   assert(ok);
-
-   const uint32_t alloc_flags = BO_ALLOC_BUSY;
-   mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
-                                        &temp_hiz_surf, alloc_flags, mt);
-
-   if (!mt->hiz_buf) {
-      free(aux_state);
-      return false;
-   }
-
-   for (unsigned level = mt->first_level; level <= mt->last_level; ++level)
-      intel_miptree_level_enable_hiz(brw, mt, level);
-
-   mt->aux_state = aux_state;
-
-   return true;
-}
-
 
 /**
  * Allocate the initial aux surface for a miptree based on mt->aux_usage
@@ -1853,42 +1783,105 @@ intel_miptree_alloc_hiz(struct brw_context *brw,
  * create the auxiliary surfaces up-front.  CCS_D, on the other hand, can only
  * compress clear color so we wait until an actual fast-clear to allocate it.
  */
-static bool
+bool
 intel_miptree_alloc_aux(struct brw_context *brw,
                         struct intel_mipmap_tree *mt)
 {
+   assert(mt->aux_buf == NULL);
+
+   /* Get the aux buf allocation parameters for this miptree. */
+   enum isl_aux_state initial_state;
+   uint8_t memset_value;
+   struct isl_surf aux_surf;
+   bool aux_surf_ok;
+
    switch (mt->aux_usage) {
    case ISL_AUX_USAGE_NONE:
-      return true;
-
+      aux_surf.size = 0;
+      aux_surf_ok = true;
+      break;
    case ISL_AUX_USAGE_HIZ:
       assert(!_mesa_is_format_color_format(mt->format));
-      if (!intel_miptree_alloc_hiz(brw, mt))
-         return false;
-      return true;
 
+      initial_state = ISL_AUX_STATE_AUX_INVALID;
+      aux_surf_ok = isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &aux_surf);
+      assert(aux_surf_ok);
+      break;
    case ISL_AUX_USAGE_MCS:
       assert(_mesa_is_format_color_format(mt->format));
-      assert(mt->surf.samples > 1);
-      if (!intel_miptree_alloc_mcs(brw, mt, mt->surf.samples))
-         return false;
-      return true;
+      assert(brw->screen->devinfo.gen >= 7); /* MCS only used on Gen7+ */
 
-   case ISL_AUX_USAGE_CCS_D:
-      /* Since CCS_D can only compress clear color so we wait until an actual
-       * fast-clear to allocate it.
+      /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
+       *
+       *     When MCS buffer is enabled and bound to MSRT, it is required that
+       *     it is cleared prior to any rendering.
+       *
+       * Since we don't use the MCS buffer for any purpose other than
+       * rendering, it makes sense to just clear it immediately upon
+       * allocation.
+       *
+       * Note: the clear value for MCS buffers is all 1's, so we memset to
+       * 0xff.
        */
-      return true;
-
+      initial_state = ISL_AUX_STATE_CLEAR;
+      memset_value = 0xFF;
+      aux_surf_ok = isl_surf_get_mcs_surf(&brw->isl_dev, &mt->surf, &aux_surf);
+      assert(aux_surf_ok);
+      break;
+   case ISL_AUX_USAGE_CCS_D:
    case ISL_AUX_USAGE_CCS_E:
       assert(_mesa_is_format_color_format(mt->format));
-      assert(mt->surf.samples == 1);
-      if (!intel_miptree_alloc_ccs(brw, mt))
-         return false;
+
+      /* When CCS_E is used, we need to ensure that the CCS starts off in a
+       * valid state.  From the Sky Lake PRM, "MCS Buffer for Render
+       * Target(s)":
+       *
+       *    "If Software wants to enable Color Compression without Fast
+       *    clear, Software needs to initialize MCS with zeros."
+       *
+       * A CCS value of 0 indicates that the corresponding block is in the
+       * pass-through state which is what we want.
+       *
+       * For CCS_D, do the same thing. On gen9+, this avoids having any
+       * undefined bits in the aux buffer.
+       */
+      initial_state = ISL_AUX_STATE_PASS_THROUGH;
+      memset_value = 0;
+      aux_surf_ok =
+         isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &aux_surf, 0);
+      break;
+   }
+
+   /* Ensure we have a valid aux_surf. */
+   if (aux_surf_ok == false)
+      return false;
+
+   /* No work is needed for a zero-sized auxiliary buffer. */
+   if (aux_surf.size == 0)
       return true;
+
+   /* Create the aux_state for the auxiliary buffer. */
+   mt->aux_state = create_aux_state_map(mt, initial_state);
+   if (mt->aux_state == NULL)
+      return false;
+
+   /* Allocate the auxiliary buffer. */
+   const bool needs_memset = initial_state != ISL_AUX_STATE_AUX_INVALID;
+   mt->aux_buf = intel_alloc_aux_buffer(brw, &aux_surf, needs_memset,
+                                        memset_value);
+   if (mt->aux_buf == NULL) {
+      free_aux_state_map(mt->aux_state);
+      mt->aux_state = NULL;
+      return false;
+   }
+
+   /* Perform aux_usage-specific initialization. */
+   if (mt->aux_usage == ISL_AUX_USAGE_HIZ) {
+      for (unsigned level = mt->first_level; level <= mt->last_level; ++level)
+         intel_miptree_level_enable_hiz(brw, mt, level);
    }
 
-   unreachable("Invalid aux usage");
+   return true;
 }
 
 
@@ -1901,14 +1894,11 @@ intel_miptree_sample_with_hiz(struct brw_context *brw,
 {
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
 
-   /* It's unclear how well supported sampling from the hiz buffer is on GEN8,
-    * so keep things conservative for now and never enable it unless we're SKL+.
-    */
-   if (devinfo->gen < 9) {
+   if (!devinfo->has_sample_with_hiz) {
       return false;
    }
 
-   if (!mt->hiz_buf) {
+   if (!mt->aux_buf) {
       return false;
    }
 
@@ -1988,7 +1978,7 @@ intel_miptree_has_color_unresolved(const struct intel_mipmap_tree *mt,
 {
    assert(_mesa_is_format_color_format(mt->format));
 
-   if (!mt->mcs_buf)
+   if (!mt->aux_buf)
       return false;
 
    /* Clamp the level range to fit the miptree */
@@ -2015,7 +2005,7 @@ intel_miptree_check_color_resolve(const struct brw_context *brw,
                                   const struct intel_mipmap_tree *mt,
                                   unsigned level, unsigned layer)
 {
-   if (!mt->mcs_buf)
+   if (!mt->aux_buf)
       return;
 
    /* Fast color clear is supported for mipmapped surfaces only on Gen8+. */
@@ -2036,7 +2026,7 @@ intel_miptree_check_color_resolve(const struct brw_context *brw,
    (void)layer;
 }
 
-static enum blorp_fast_clear_op
+static enum isl_aux_op
 get_ccs_d_resolve_op(enum isl_aux_state aux_state,
                      enum isl_aux_usage aux_usage,
                      bool fast_clear_supported)
@@ -2051,12 +2041,12 @@ get_ccs_d_resolve_op(enum isl_aux_state aux_state,
    case ISL_AUX_STATE_CLEAR:
    case ISL_AUX_STATE_PARTIAL_CLEAR:
       if (!ccs_supported)
-         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+         return ISL_AUX_OP_FULL_RESOLVE;
       else
-         return BLORP_FAST_CLEAR_OP_NONE;
+         return ISL_AUX_OP_NONE;
 
    case ISL_AUX_STATE_PASS_THROUGH:
-      return BLORP_FAST_CLEAR_OP_NONE;
+      return ISL_AUX_OP_NONE;
 
    case ISL_AUX_STATE_RESOLVED:
    case ISL_AUX_STATE_AUX_INVALID:
@@ -2068,7 +2058,7 @@ get_ccs_d_resolve_op(enum isl_aux_state aux_state,
    unreachable("Invalid aux state for CCS_D");
 }
 
-static enum blorp_fast_clear_op
+static enum isl_aux_op
 get_ccs_e_resolve_op(enum isl_aux_state aux_state,
                      enum isl_aux_usage aux_usage,
                      bool fast_clear_supported)
@@ -2085,28 +2075,28 @@ get_ccs_e_resolve_op(enum isl_aux_state aux_state,
    case ISL_AUX_STATE_CLEAR:
    case ISL_AUX_STATE_PARTIAL_CLEAR:
       if (fast_clear_supported)
-         return BLORP_FAST_CLEAR_OP_NONE;
+         return ISL_AUX_OP_NONE;
       else if (aux_usage == ISL_AUX_USAGE_CCS_E)
-         return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
+         return ISL_AUX_OP_PARTIAL_RESOLVE;
       else
-         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+         return ISL_AUX_OP_FULL_RESOLVE;
 
    case ISL_AUX_STATE_COMPRESSED_CLEAR:
       if (aux_usage != ISL_AUX_USAGE_CCS_E)
-         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+         return ISL_AUX_OP_FULL_RESOLVE;
       else if (!fast_clear_supported)
-         return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
+         return ISL_AUX_OP_PARTIAL_RESOLVE;
       else
-         return BLORP_FAST_CLEAR_OP_NONE;
+         return ISL_AUX_OP_NONE;
 
    case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
       if (aux_usage != ISL_AUX_USAGE_CCS_E)
-         return BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
+         return ISL_AUX_OP_FULL_RESOLVE;
       else
-         return BLORP_FAST_CLEAR_OP_NONE;
+         return ISL_AUX_OP_NONE;
 
    case ISL_AUX_STATE_PASS_THROUGH:
-      return BLORP_FAST_CLEAR_OP_NONE;
+      return ISL_AUX_OP_NONE;
 
    case ISL_AUX_STATE_RESOLVED:
    case ISL_AUX_STATE_AUX_INVALID:
@@ -2125,7 +2115,7 @@ intel_miptree_prepare_ccs_access(struct brw_context *brw,
 {
    enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer);
 
-   enum blorp_fast_clear_op resolve_op;
+   enum isl_aux_op resolve_op;
    if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) {
       resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage,
                                         fast_clear_supported);
@@ -2135,12 +2125,12 @@ intel_miptree_prepare_ccs_access(struct brw_context *brw,
                                         fast_clear_supported);
    }
 
-   if (resolve_op != BLORP_FAST_CLEAR_OP_NONE) {
+   if (resolve_op != ISL_AUX_OP_NONE) {
       intel_miptree_check_color_resolve(brw, mt, level, layer);
       brw_blorp_resolve_color(brw, mt, level, layer, resolve_op);
 
       switch (resolve_op) {
-      case BLORP_FAST_CLEAR_OP_RESOLVE_FULL:
+      case ISL_AUX_OP_FULL_RESOLVE:
          /* The CCS full resolve operation destroys the CCS and sets it to the
           * pass-through state.  (You can also think of this as being both a
           * resolve and an ambiguate in one operation.)
@@ -2149,7 +2139,7 @@ intel_miptree_prepare_ccs_access(struct brw_context *brw,
                                      ISL_AUX_STATE_PASS_THROUGH);
          break;
 
-      case BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL:
+      case ISL_AUX_OP_PARTIAL_RESOLVE:
          intel_miptree_set_aux_state(brw, mt, level, layer, 1,
                                      ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
          break;
@@ -2298,17 +2288,17 @@ intel_miptree_prepare_hiz_access(struct brw_context *brw,
 {
    assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
 
-   enum blorp_hiz_op hiz_op = BLORP_HIZ_OP_NONE;
+   enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
    switch (intel_miptree_get_aux_state(mt, level, layer)) {
    case ISL_AUX_STATE_CLEAR:
    case ISL_AUX_STATE_COMPRESSED_CLEAR:
       if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported)
-         hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
+         hiz_op = ISL_AUX_OP_FULL_RESOLVE;
       break;
 
    case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
       if (aux_usage != ISL_AUX_USAGE_HIZ)
-         hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
+         hiz_op = ISL_AUX_OP_FULL_RESOLVE;
       break;
 
    case ISL_AUX_STATE_PASS_THROUGH:
@@ -2317,23 +2307,23 @@ intel_miptree_prepare_hiz_access(struct brw_context *brw,
 
    case ISL_AUX_STATE_AUX_INVALID:
       if (aux_usage == ISL_AUX_USAGE_HIZ)
-         hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
+         hiz_op = ISL_AUX_OP_AMBIGUATE;
       break;
 
    case ISL_AUX_STATE_PARTIAL_CLEAR:
       unreachable("Invalid HiZ state");
    }
 
-   if (hiz_op != BLORP_HIZ_OP_NONE) {
+   if (hiz_op != ISL_AUX_OP_NONE) {
       intel_hiz_exec(brw, mt, level, layer, 1, hiz_op);
 
       switch (hiz_op) {
-      case BLORP_HIZ_OP_DEPTH_RESOLVE:
+      case ISL_AUX_OP_FULL_RESOLVE:
          intel_miptree_set_aux_state(brw, mt, level, layer, 1,
                                      ISL_AUX_STATE_RESOLVED);
          break;
 
-      case BLORP_HIZ_OP_HIZ_RESOLVE:
+      case ISL_AUX_OP_AMBIGUATE:
          /* The HiZ resolve operation is actually an ambiguate */
          intel_miptree_set_aux_state(brw, mt, level, layer, 1,
                                      ISL_AUX_STATE_PASS_THROUGH);
@@ -2407,7 +2397,7 @@ intel_miptree_prepare_access(struct brw_context *brw,
       break;
 
    case ISL_AUX_USAGE_MCS:
-      assert(mt->mcs_buf);
+      assert(mt->aux_buf);
       assert(start_level == 0 && num_levels == 1);
       const uint32_t level_layers =
          miptree_layer_range_length(mt, 0, start_layer, num_layers);
@@ -2419,7 +2409,7 @@ intel_miptree_prepare_access(struct brw_context *brw,
 
    case ISL_AUX_USAGE_CCS_D:
    case ISL_AUX_USAGE_CCS_E:
-      if (!mt->mcs_buf)
+      if (!mt->aux_buf)
          return;
 
       for (uint32_t l = 0; l < num_levels; l++) {
@@ -2435,7 +2425,7 @@ intel_miptree_prepare_access(struct brw_context *brw,
       break;
 
    case ISL_AUX_USAGE_HIZ:
-      assert(mt->hiz_buf);
+      assert(mt->aux_buf);
       for (uint32_t l = 0; l < num_levels; l++) {
          const uint32_t level = start_level + l;
          if (!intel_miptree_level_has_hiz(mt, level))
@@ -2469,7 +2459,7 @@ intel_miptree_finish_write(struct brw_context *brw,
       break;
 
    case ISL_AUX_USAGE_MCS:
-      assert(mt->mcs_buf);
+      assert(mt->aux_buf);
       for (uint32_t a = 0; a < num_layers; a++) {
          intel_miptree_finish_mcs_write(brw, mt, start_layer + a,
                                         aux_usage);
@@ -2478,7 +2468,7 @@ intel_miptree_finish_write(struct brw_context *brw,
 
    case ISL_AUX_USAGE_CCS_D:
    case ISL_AUX_USAGE_CCS_E:
-      if (!mt->mcs_buf)
+      if (!mt->aux_buf)
          return;
 
       for (uint32_t a = 0; a < num_layers; a++) {
@@ -2509,7 +2499,7 @@ intel_miptree_get_aux_state(const struct intel_mipmap_tree *mt,
    intel_miptree_check_level_layer(mt, level, layer);
 
    if (_mesa_is_format_color_format(mt->format)) {
-      assert(mt->mcs_buf != NULL);
+      assert(mt->aux_buf != NULL);
       assert(mt->surf.samples == 1 ||
              mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
    } else if (mt->format == MESA_FORMAT_S_UINT8) {
@@ -2530,7 +2520,7 @@ intel_miptree_set_aux_state(struct brw_context *brw,
    num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers);
 
    if (_mesa_is_format_color_format(mt->format)) {
-      assert(mt->mcs_buf != NULL);
+      assert(mt->aux_buf != NULL);
       assert(mt->surf.samples == 1 ||
              mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
    } else if (mt->format == MESA_FORMAT_S_UINT8) {
@@ -2566,9 +2556,8 @@ can_texture_with_ccs(struct brw_context *brw,
    if (mt->aux_usage != ISL_AUX_USAGE_CCS_E)
       return false;
 
-   /* TODO: Replace with format_ccs_e_compat_with_miptree for better perf. */
-   if (!isl_formats_are_ccs_e_compatible(&brw->screen->devinfo,
-                                         mt->surf.format, view_format)) {
+   if (!format_ccs_e_compat_with_miptree(&brw->screen->devinfo,
+                                         mt, view_format)) {
       perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
                  isl_format_get_layout(view_format)->name,
                  _mesa_get_format_name(mt->format));
@@ -2594,7 +2583,7 @@ intel_miptree_texture_aux_usage(struct brw_context *brw,
 
    case ISL_AUX_USAGE_CCS_D:
    case ISL_AUX_USAGE_CCS_E:
-      if (!mt->mcs_buf) {
+      if (!mt->aux_buf) {
          assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
          return ISL_AUX_USAGE_NONE;
       }
@@ -2640,10 +2629,9 @@ intel_miptree_prepare_texture(struct brw_context *brw,
                               struct intel_mipmap_tree *mt,
                               enum isl_format view_format,
                               uint32_t start_level, uint32_t num_levels,
-                              uint32_t start_layer, uint32_t num_layers,
-                              bool disable_aux)
+                              uint32_t start_layer, uint32_t num_layers)
 {
-   enum isl_aux_usage aux_usage = disable_aux ? ISL_AUX_USAGE_NONE :
+   enum isl_aux_usage aux_usage =
       intel_miptree_texture_aux_usage(brw, mt, view_format);
    bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE;
 
@@ -2673,36 +2661,42 @@ enum isl_aux_usage
 intel_miptree_render_aux_usage(struct brw_context *brw,
                                struct intel_mipmap_tree *mt,
                                enum isl_format render_format,
-                               bool blend_enabled)
+                               bool blend_enabled,
+                               bool draw_aux_disabled)
 {
+   struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   if (draw_aux_disabled)
+      return ISL_AUX_USAGE_NONE;
+
    switch (mt->aux_usage) {
    case ISL_AUX_USAGE_MCS:
-      assert(mt->mcs_buf);
+      assert(mt->aux_buf);
       return ISL_AUX_USAGE_MCS;
 
    case ISL_AUX_USAGE_CCS_D:
-      return mt->mcs_buf ? ISL_AUX_USAGE_CCS_D : ISL_AUX_USAGE_NONE;
-
-   case ISL_AUX_USAGE_CCS_E: {
-      /* If the format supports CCS_E and is compatible with the miptree,
-       * then we can use it.
-       */
-      if (format_ccs_e_compat_with_miptree(&brw->screen->devinfo,
-                                           mt, render_format))
-         return ISL_AUX_USAGE_CCS_E;
-
-      /* Otherwise, we have to fall back to CCS_D */
+   case ISL_AUX_USAGE_CCS_E:
+      if (!mt->aux_buf) {
+         assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D);
+         return ISL_AUX_USAGE_NONE;
+      }
 
-      /* gen9 hardware technically supports non-0/1 clear colors with sRGB
+      /* gen9+ hardware technically supports non-0/1 clear colors with sRGB
        * formats.  However, there are issues with blending where it doesn't
        * properly apply the sRGB curve to the clear color when blending.
        */
-      if (blend_enabled && isl_format_is_srgb(render_format) &&
+      if (devinfo->gen >= 9 && blend_enabled &&
+          isl_format_is_srgb(render_format) &&
           !isl_color_value_is_zero_one(mt->fast_clear_color, render_format))
          return ISL_AUX_USAGE_NONE;
 
+      if (mt->aux_usage == ISL_AUX_USAGE_CCS_E &&
+          format_ccs_e_compat_with_miptree(&brw->screen->devinfo,
+                                           mt, render_format))
+         return ISL_AUX_USAGE_CCS_E;
+
+      /* Otherwise, we have to fall back to CCS_D */
       return ISL_AUX_USAGE_CCS_D;
-   }
 
    default:
       return ISL_AUX_USAGE_NONE;
@@ -2713,11 +2707,8 @@ void
 intel_miptree_prepare_render(struct brw_context *brw,
                              struct intel_mipmap_tree *mt, uint32_t level,
                              uint32_t start_layer, uint32_t layer_count,
-                             enum isl_format render_format,
-                             bool blend_enabled)
+                             enum isl_aux_usage aux_usage)
 {
-   enum isl_aux_usage aux_usage =
-      intel_miptree_render_aux_usage(brw, mt, render_format, blend_enabled);
    intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
                                 aux_usage, aux_usage != ISL_AUX_USAGE_NONE);
 }
@@ -2726,13 +2717,10 @@ void
 intel_miptree_finish_render(struct brw_context *brw,
                             struct intel_mipmap_tree *mt, uint32_t level,
                             uint32_t start_layer, uint32_t layer_count,
-                            enum isl_format render_format,
-                            bool blend_enabled)
+                            enum isl_aux_usage aux_usage)
 {
    assert(_mesa_is_format_color_format(mt->format));
 
-   enum isl_aux_usage aux_usage =
-      intel_miptree_render_aux_usage(brw, mt, render_format, blend_enabled);
    intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
                               aux_usage);
 }
@@ -2743,7 +2731,7 @@ intel_miptree_prepare_depth(struct brw_context *brw,
                             uint32_t start_layer, uint32_t layer_count)
 {
    intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count,
-                                mt->aux_usage, mt->hiz_buf != NULL);
+                                mt->aux_usage, mt->aux_buf != NULL);
 }
 
 void
@@ -2754,7 +2742,7 @@ intel_miptree_finish_depth(struct brw_context *brw,
 {
    if (depth_written) {
       intel_miptree_finish_write(brw, mt, level, start_layer, layer_count,
-                                 mt->hiz_buf != NULL);
+                                 mt->aux_buf != NULL);
    }
 }
 
@@ -2778,7 +2766,7 @@ intel_miptree_prepare_external(struct brw_context *brw,
       assert(mt->surf.logical_level0_px.depth == 1);
       assert(mt->surf.logical_level0_px.array_len == 1);
       assert(mt->surf.samples == 1);
-      assert(mt->mcs_buf != NULL);
+      assert(mt->aux_buf != NULL);
 
       aux_usage = mod_info->aux_usage;
       supports_fast_clear = mod_info->supports_clear_color;
@@ -2789,6 +2777,25 @@ intel_miptree_prepare_external(struct brw_context *brw,
                                 aux_usage, supports_fast_clear);
 }
 
+void
+intel_miptree_finish_external(struct brw_context *brw,
+                              struct intel_mipmap_tree *mt)
+{
+   if (!mt->aux_buf)
+      return;
+
+   /* We don't know the actual aux state of the aux surface.  The previous
+    * owner could have given it to us in a number of different states.
+    * Because we don't know the aux state, we reset the aux state to the
+    * least common denominator of possible valid states.
+    */
+   enum isl_aux_state default_aux_state =
+      isl_drm_modifier_get_default_aux_state(mt->drm_modifier);
+   assert(mt->last_level == mt->first_level);
+   intel_miptree_set_aux_state(brw, mt, 0, 0, INTEL_REMAINING_LAYERS,
+                               default_aux_state);
+}
+
 /**
  * Make it possible to share the BO backing the given miptree with another
  * process or another miptree.
@@ -2815,32 +2822,15 @@ intel_miptree_make_shareable(struct brw_context *brw,
                                 0, INTEL_REMAINING_LAYERS,
                                 ISL_AUX_USAGE_NONE, false);
 
-   if (mt->mcs_buf) {
-      brw_bo_unreference(mt->mcs_buf->bo);
-      free(mt->mcs_buf);
-      mt->mcs_buf = NULL;
-
-      /* Any pending MCS/CCS operations are no longer needed. Trying to
-       * execute any will likely crash due to the missing aux buffer. So let's
-       * delete all pending ops.
-       */
-      free(mt->aux_state);
-      mt->aux_state = NULL;
-      brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
-   }
-
-   if (mt->hiz_buf) {
-      intel_miptree_aux_buffer_free(mt->hiz_buf);
-      mt->hiz_buf = NULL;
+   if (mt->aux_buf) {
+      intel_miptree_aux_buffer_free(mt->aux_buf);
+      mt->aux_buf = NULL;
 
+      /* Make future calls of intel_miptree_level_has_hiz() return false. */
       for (uint32_t l = mt->first_level; l <= mt->last_level; ++l) {
          mt->level[l].has_hiz = false;
       }
 
-      /* Any pending HiZ operations are no longer needed. Trying to execute
-       * any will likely crash due to the missing aux buffer. So let's delete
-       * all pending ops.
-       */
       free(mt->aux_state);
       mt->aux_state = NULL;
       brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
@@ -2996,7 +2986,7 @@ intel_update_r8stencil(struct brw_context *brw,
       }
    }
 
-   brw_render_cache_set_check_flush(brw, dst->bo);
+   brw_cache_flush_for_read(brw, dst->bo);
    src->r8stencil_needs_update = false;
 }
 
@@ -3019,6 +3009,15 @@ intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
    brw_bo_unmap(mt->bo);
 }
 
+static void
+intel_miptree_unmap_gtt(struct brw_context *brw,
+                        struct intel_mipmap_tree *mt,
+                        struct intel_miptree_map *map,
+                        unsigned int level, unsigned int slice)
+{
+   intel_miptree_unmap_raw(mt);
+}
+
 static void
 intel_miptree_map_gtt(struct brw_context *brw,
                      struct intel_mipmap_tree *mt,
@@ -3064,12 +3063,30 @@ intel_miptree_map_gtt(struct brw_context *brw,
        map->x, map->y, map->w, map->h,
        mt, _mesa_get_format_name(mt->format),
        x, y, map->ptr, map->stride);
+
+   map->unmap = intel_miptree_unmap_gtt;
 }
 
 static void
-intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
+intel_miptree_unmap_blit(struct brw_context *brw,
+                        struct intel_mipmap_tree *mt,
+                        struct intel_miptree_map *map,
+                        unsigned int level,
+                        unsigned int slice)
 {
-   intel_miptree_unmap_raw(mt);
+   struct gl_context *ctx = &brw->ctx;
+
+   intel_miptree_unmap_raw(map->linear_mt);
+
+   if (map->mode & GL_MAP_WRITE_BIT) {
+      bool ok = intel_miptree_copy(brw,
+                                   map->linear_mt, 0, 0, 0, 0,
+                                   mt, level, slice, map->x, map->y,
+                                   map->w, map->h);
+      WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
+   }
+
+   intel_miptree_release(&map->linear_mt);
 }
 
 static void
@@ -3113,6 +3130,7 @@ intel_miptree_map_blit(struct brw_context *brw,
        mt, _mesa_get_format_name(mt->format),
        level, slice, map->ptr, map->stride);
 
+   map->unmap = intel_miptree_unmap_blit;
    return;
 
 fail:
@@ -3121,32 +3139,22 @@ fail:
    map->stride = 0;
 }
 
-static void
-intel_miptree_unmap_blit(struct brw_context *brw,
-                        struct intel_mipmap_tree *mt,
-                        struct intel_miptree_map *map,
-                        unsigned int level,
-                        unsigned int slice)
-{
-   struct gl_context *ctx = &brw->ctx;
-
-   intel_miptree_unmap_raw(map->linear_mt);
-
-   if (map->mode & GL_MAP_WRITE_BIT) {
-      bool ok = intel_miptree_copy(brw,
-                                   map->linear_mt, 0, 0, 0, 0,
-                                   mt, level, slice, map->x, map->y,
-                                   map->w, map->h);
-      WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
-   }
-
-   intel_miptree_release(&map->linear_mt);
-}
-
 /**
  * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA.
  */
 #if defined(USE_SSE41)
+static void
+intel_miptree_unmap_movntdqa(struct brw_context *brw,
+                             struct intel_mipmap_tree *mt,
+                             struct intel_miptree_map *map,
+                             unsigned int level,
+                             unsigned int slice)
+{
+   _mesa_align_free(map->buffer);
+   map->buffer = NULL;
+   map->ptr = NULL;
+}
+
 static void
 intel_miptree_map_movntdqa(struct brw_context *brw,
                            struct intel_mipmap_tree *mt,
@@ -3204,20 +3212,40 @@ intel_miptree_map_movntdqa(struct brw_context *brw,
    }
 
    intel_miptree_unmap_raw(mt);
+
+   map->unmap = intel_miptree_unmap_movntdqa;
 }
+#endif
 
 static void
-intel_miptree_unmap_movntdqa(struct brw_context *brw,
-                             struct intel_mipmap_tree *mt,
-                             struct intel_miptree_map *map,
-                             unsigned int level,
-                             unsigned int slice)
+intel_miptree_unmap_s8(struct brw_context *brw,
+                      struct intel_mipmap_tree *mt,
+                      struct intel_miptree_map *map,
+                      unsigned int level,
+                      unsigned int slice)
 {
-   _mesa_align_free(map->buffer);
-   map->buffer = NULL;
-   map->ptr = NULL;
+   if (map->mode & GL_MAP_WRITE_BIT) {
+      unsigned int image_x, image_y;
+      uint8_t *untiled_s8_map = map->ptr;
+      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT);
+
+      intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
+
+      for (uint32_t y = 0; y < map->h; y++) {
+        for (uint32_t x = 0; x < map->w; x++) {
+           ptrdiff_t offset = intel_offset_S8(mt->surf.row_pitch,
+                                              image_x + x + map->x,
+                                              image_y + y + map->y,
+                                              brw->has_swizzling);
+           tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
+        }
+      }
+
+      intel_miptree_unmap_raw(mt);
+   }
+
+   free(map->buffer);
 }
-#endif
 
 static void
 intel_miptree_map_s8(struct brw_context *brw,
@@ -3262,57 +3290,8 @@ intel_miptree_map_s8(struct brw_context *brw,
          map->x, map->y, map->w, map->h,
          mt, map->ptr, map->stride);
    }
-}
-
-static void
-intel_miptree_unmap_s8(struct brw_context *brw,
-                      struct intel_mipmap_tree *mt,
-                      struct intel_miptree_map *map,
-                      unsigned int level,
-                      unsigned int slice)
-{
-   if (map->mode & GL_MAP_WRITE_BIT) {
-      unsigned int image_x, image_y;
-      uint8_t *untiled_s8_map = map->ptr;
-      uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT);
-
-      intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
-
-      for (uint32_t y = 0; y < map->h; y++) {
-        for (uint32_t x = 0; x < map->w; x++) {
-           ptrdiff_t offset = intel_offset_S8(mt->surf.row_pitch,
-                                              image_x + x + map->x,
-                                              image_y + y + map->y,
-                                              brw->has_swizzling);
-           tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
-        }
-      }
-
-      intel_miptree_unmap_raw(mt);
-   }
 
-   free(map->buffer);
-}
-
-static void
-intel_miptree_map_etc(struct brw_context *brw,
-                      struct intel_mipmap_tree *mt,
-                      struct intel_miptree_map *map,
-                      unsigned int level,
-                      unsigned int slice)
-{
-   assert(mt->etc_format != MESA_FORMAT_NONE);
-   if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
-      assert(mt->format == MESA_FORMAT_R8G8B8X8_UNORM);
-   }
-
-   assert(map->mode & GL_MAP_WRITE_BIT);
-   assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
-
-   map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
-   map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
-                                                map->w, map->h, 1));
-   map->ptr = map->buffer;
+   map->unmap = intel_miptree_unmap_s8;
 }
 
 static void
@@ -3346,8 +3325,30 @@ intel_miptree_unmap_etc(struct brw_context *brw,
    free(map->buffer);
 }
 
+static void
+intel_miptree_map_etc(struct brw_context *brw,
+                      struct intel_mipmap_tree *mt,
+                      struct intel_miptree_map *map,
+                      unsigned int level,
+                      unsigned int slice)
+{
+   assert(mt->etc_format != MESA_FORMAT_NONE);
+   if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) {
+      assert(mt->format == MESA_FORMAT_R8G8B8X8_UNORM);
+   }
+
+   assert(map->mode & GL_MAP_WRITE_BIT);
+   assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT);
+
+   map->stride = _mesa_format_row_stride(mt->etc_format, map->w);
+   map->buffer = malloc(_mesa_format_image_size(mt->etc_format,
+                                                map->w, map->h, 1));
+   map->ptr = map->buffer;
+   map->unmap = intel_miptree_unmap_etc;
+}
+
 /**
- * Mapping function for packed depth/stencil miptrees backed by real separate
+ * Mapping functions for packed depth/stencil miptrees backed by real separate
  * miptrees for depth and stencil.
  *
  * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
@@ -3358,30 +3359,20 @@ intel_miptree_unmap_etc(struct brw_context *brw,
  * copying the data between the actual backing store and the temporary.
  */
 static void
-intel_miptree_map_depthstencil(struct brw_context *brw,
-                              struct intel_mipmap_tree *mt,
-                              struct intel_miptree_map *map,
-                              unsigned int level, unsigned int slice)
+intel_miptree_unmap_depthstencil(struct brw_context *brw,
+                                struct intel_mipmap_tree *mt,
+                                struct intel_miptree_map *map,
+                                unsigned int level,
+                                unsigned int slice)
 {
    struct intel_mipmap_tree *z_mt = mt;
    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
-   int packed_bpp = map_z32f_x24s8 ? 8 : 4;
 
-   map->stride = map->w * packed_bpp;
-   map->buffer = map->ptr = malloc(map->stride * map->h);
-   if (!map->buffer)
-      return;
-
-   /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
-    * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
-    * invalidate is set, since we'll be writing the whole rectangle from our
-    * temporary buffer back out.
-    */
-   if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
+   if (map->mode & GL_MAP_WRITE_BIT) {
       uint32_t *packed_map = map->ptr;
-      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_READ_BIT);
-      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_READ_BIT);
+      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_WRITE_BIT);
+      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_WRITE_BIT);
       unsigned int s_image_x, s_image_y;
       unsigned int z_image_x, z_image_y;
 
@@ -3392,22 +3383,21 @@ intel_miptree_map_depthstencil(struct brw_context *brw,
 
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
-           int map_x = map->x + x, map_y = map->y + y;
            ptrdiff_t s_offset = intel_offset_S8(s_mt->surf.row_pitch,
-                                                map_x + s_image_x,
-                                                map_y + s_image_y,
+                                                x + s_image_x + map->x,
+                                                y + s_image_y + map->y,
                                                 brw->has_swizzling);
-           ptrdiff_t z_offset = ((map_y + z_image_y) *
+           ptrdiff_t z_offset = ((y + z_image_y + map->y) *
                                   (z_mt->surf.row_pitch / 4) +
-                                 (map_x + z_image_x));
-           uint8_t s = s_map[s_offset];
-           uint32_t z = z_map[z_offset];
+                                 (x + z_image_x + map->x));
 
            if (map_z32f_x24s8) {
-              packed_map[(y * map->w + x) * 2 + 0] = z;
-              packed_map[(y * map->w + x) * 2 + 1] = s;
+              z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
+              s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
            } else {
-              packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
+              uint32_t packed = packed_map[y * map->w + x];
+              s_map[s_offset] = packed >> 24;
+              z_map[z_offset] = packed;
            }
         }
       }
@@ -3415,34 +3405,43 @@ intel_miptree_map_depthstencil(struct brw_context *brw,
       intel_miptree_unmap_raw(s_mt);
       intel_miptree_unmap_raw(z_mt);
 
-      DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
+      DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
          __func__,
          map->x, map->y, map->w, map->h,
-         z_mt, map->x + z_image_x, map->y + z_image_y,
+         z_mt, _mesa_get_format_name(z_mt->format),
+         map->x + z_image_x, map->y + z_image_y,
          s_mt, map->x + s_image_x, map->y + s_image_y,
          map->ptr, map->stride);
-   } else {
-      DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
-         map->x, map->y, map->w, map->h,
-         mt, map->ptr, map->stride);
    }
+
+   free(map->buffer);
 }
 
 static void
-intel_miptree_unmap_depthstencil(struct brw_context *brw,
-                                struct intel_mipmap_tree *mt,
-                                struct intel_miptree_map *map,
-                                unsigned int level,
-                                unsigned int slice)
+intel_miptree_map_depthstencil(struct brw_context *brw,
+                              struct intel_mipmap_tree *mt,
+                              struct intel_miptree_map *map,
+                              unsigned int level, unsigned int slice)
 {
    struct intel_mipmap_tree *z_mt = mt;
    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32;
+   int packed_bpp = map_z32f_x24s8 ? 8 : 4;
 
-   if (map->mode & GL_MAP_WRITE_BIT) {
+   map->stride = map->w * packed_bpp;
+   map->buffer = map->ptr = malloc(map->stride * map->h);
+   if (!map->buffer)
+      return;
+
+   /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
+    * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
+    * invalidate is set, since we'll be writing the whole rectangle from our
+    * temporary buffer back out.
+    */
+   if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
       uint32_t *packed_map = map->ptr;
-      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_WRITE_BIT);
-      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_WRITE_BIT);
+      uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_READ_BIT);
+      uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_READ_BIT);
       unsigned int s_image_x, s_image_y;
       unsigned int z_image_x, z_image_y;
 
@@ -3453,21 +3452,22 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw,
 
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
+           int map_x = map->x + x, map_y = map->y + y;
            ptrdiff_t s_offset = intel_offset_S8(s_mt->surf.row_pitch,
-                                                x + s_image_x + map->x,
-                                                y + s_image_y + map->y,
+                                                map_x + s_image_x,
+                                                map_y + s_image_y,
                                                 brw->has_swizzling);
-           ptrdiff_t z_offset = ((y + z_image_y + map->y) *
+           ptrdiff_t z_offset = ((map_y + z_image_y) *
                                   (z_mt->surf.row_pitch / 4) +
-                                 (x + z_image_x + map->x));
+                                 (map_x + z_image_x));
+           uint8_t s = s_map[s_offset];
+           uint32_t z = z_map[z_offset];
 
            if (map_z32f_x24s8) {
-              z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
-              s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
+              packed_map[(y * map->w + x) * 2 + 0] = z;
+              packed_map[(y * map->w + x) * 2 + 1] = s;
            } else {
-              uint32_t packed = packed_map[y * map->w + x];
-              s_map[s_offset] = packed >> 24;
-              z_map[z_offset] = packed;
+              packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
            }
         }
       }
@@ -3475,16 +3475,19 @@ intel_miptree_unmap_depthstencil(struct brw_context *brw,
       intel_miptree_unmap_raw(s_mt);
       intel_miptree_unmap_raw(z_mt);
 
-      DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
+      DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
          __func__,
          map->x, map->y, map->w, map->h,
-         z_mt, _mesa_get_format_name(z_mt->format),
-         map->x + z_image_x, map->y + z_image_y,
+         z_mt, map->x + z_image_x, map->y + z_image_y,
          s_mt, map->x + s_image_x, map->y + s_image_y,
          map->ptr, map->stride);
+   } else {
+      DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__,
+         map->x, map->y, map->w, map->h,
+         mt, map->ptr, map->stride);
    }
 
-   free(map->buffer);
+   map->unmap = intel_miptree_unmap_depthstencil;
 }
 
 /**
@@ -3656,22 +3659,8 @@ intel_miptree_unmap(struct brw_context *brw,
    DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
        mt, _mesa_get_format_name(mt->format), level, slice);
 
-   if (mt->format == MESA_FORMAT_S_UINT8) {
-      intel_miptree_unmap_s8(brw, mt, map, level, slice);
-   } else if (mt->etc_format != MESA_FORMAT_NONE &&
-              !(map->mode & BRW_MAP_DIRECT_BIT)) {
-      intel_miptree_unmap_etc(brw, mt, map, level, slice);
-   } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) {
-      intel_miptree_unmap_depthstencil(brw, mt, map, level, slice);
-   } else if (map->linear_mt) {
-      intel_miptree_unmap_blit(brw, mt, map, level, slice);
-#if defined(USE_SSE41)
-   } else if (map->buffer && cpu_has_sse4_1) {
-      intel_miptree_unmap_movntdqa(brw, mt, map, level, slice);
-#endif
-   } else {
-      intel_miptree_unmap_gtt(mt);
-   }
+   if (map->unmap)
+          map->unmap(brw, mt, map, level, slice);
 
    intel_miptree_release_map(mt, level, slice);
 }
@@ -3732,15 +3721,59 @@ get_isl_dim_layout(const struct gen_device_info *devinfo,
    unreachable("Invalid texture target");
 }
 
-enum isl_aux_usage
-intel_miptree_get_aux_isl_usage(const struct brw_context *brw,
-                                const struct intel_mipmap_tree *mt)
+bool
+intel_miptree_set_clear_color(struct brw_context *brw,
+                              struct intel_mipmap_tree *mt,
+                              union isl_color_value clear_color)
+{
+   if (memcmp(&mt->fast_clear_color, &clear_color, sizeof(clear_color)) != 0) {
+      mt->fast_clear_color = clear_color;
+      brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
+      return true;
+   }
+   return false;
+}
+
+bool
+intel_miptree_set_depth_clear_value(struct brw_context *brw,
+                                    struct intel_mipmap_tree *mt,
+                                    float clear_value)
 {
-   if (mt->hiz_buf)
-      return ISL_AUX_USAGE_HIZ;
+   if (mt->fast_clear_color.f32[0] != clear_value) {
+      mt->fast_clear_color.f32[0] = clear_value;
+      brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
+      return true;
+   }
+   return false;
+}
 
-   if (!mt->mcs_buf)
-      return ISL_AUX_USAGE_NONE;
+union isl_color_value
+intel_miptree_get_clear_color(const struct gen_device_info *devinfo,
+                              const struct intel_mipmap_tree *mt,
+                              enum isl_format view_format, bool sampling,
+                              struct brw_bo **clear_color_bo,
+                              uint32_t *clear_color_offset)
+{
+   assert(mt->aux_buf);
 
-   return mt->aux_usage;
+   if (devinfo->gen == 10 && isl_format_is_srgb(view_format) && sampling) {
+      /* The gen10 sampler doesn't gamma-correct the clear color. In this case,
+       * we switch to using the inline clear color and do the sRGB color
+       * conversion process defined in the OpenGL spec. The red, green, and
+       * blue channels take part in gamma correction, while the alpha channel
+       * is unchanged.
+       */
+      union isl_color_value srgb_decoded_value = mt->fast_clear_color;
+      for (unsigned i = 0; i < 3; i++) {
+         srgb_decoded_value.f32[i] =
+            util_format_srgb_to_linear_float(mt->fast_clear_color.f32[i]);
+      }
+      *clear_color_bo = 0;
+      *clear_color_offset = 0;
+      return srgb_decoded_value;
+   } else {
+      *clear_color_bo = mt->aux_buf->clear_color_bo;
+      *clear_color_offset = mt->aux_buf->clear_color_offset;
+      return mt->fast_clear_color;
+   }
 }