i965: Enable fast clears for multi-lod
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.c
index 1f4c59b6cc3773a3273c383394a5d372be562c78..a9b350e25e84747ebb67adb4aa5753cbec7c7ba3 100644 (file)
@@ -101,68 +101,9 @@ compute_msaa_layout(struct brw_context *brw, mesa_format format,
    }
 }
 
-
-/**
- * For single-sampled render targets ("non-MSRT"), the MCS buffer is a
- * scaled-down bitfield representation of the color buffer which is capable of
- * recording when blocks of the color buffer are equal to the clear value.
- * This function returns the block size that will be used by the MCS buffer
- * corresponding to a certain color miptree.
- *
- * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)",
- * beneath the "Fast Color Clear" bullet (p327):
- *
- *     The following table describes the RT alignment
- *
- *                       Pixels  Lines
- *         TiledY RT CL
- *             bpp
- *              32          8      4
- *              64          4      4
- *             128          2      4
- *         TiledX RT CL
- *             bpp
- *              32         16      2
- *              64          8      2
- *             128          4      2
- *
- * This alignment has the following uses:
- *
- * - For figuring out the size of the MCS buffer.  Each 4k tile in the MCS
- *   buffer contains 128 blocks horizontally and 256 blocks vertically.
- *
- * - For figuring out alignment restrictions for a fast clear operation.  Fast
- *   clear operations must always clear aligned multiples of 16 blocks
- *   horizontally and 32 blocks vertically.
- *
- * - For scaling down the coordinates sent through the render pipeline during
- *   a fast clear.  X coordinates must be scaled down by 8 times the block
- *   width, and Y coordinates by 16 times the block height.
- *
- * - For scaling down the coordinates sent through the render pipeline during
- *   a "Render Target Resolve" operation.  X coordinates must be scaled down
- *   by half the block width, and Y coordinates by half the block height.
- */
-void
-intel_get_non_msrt_mcs_alignment(struct intel_mipmap_tree *mt,
-                                 unsigned *width_px, unsigned *height)
-{
-   switch (mt->tiling) {
-   default:
-      unreachable("Non-MSRT MCS requires X or Y tiling");
-      /* In release builds, fall through */
-   case I915_TILING_Y:
-      *width_px = 32 / mt->cpp;
-      *height = 4;
-      break;
-   case I915_TILING_X:
-      *width_px = 64 / mt->cpp;
-      *height = 2;
-   }
-}
-
-static bool
-intel_tiling_supports_non_msrt_mcs(struct brw_context *brw, unsigned tiling)
+bool
+intel_tiling_supports_non_msrt_mcs(const struct brw_context *brw,
+                                   unsigned tiling)
 {
    /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render
     * Target(s)", beneath the "Fast Color Clear" bullet (p326):
@@ -200,9 +141,9 @@ intel_tiling_supports_non_msrt_mcs(struct brw_context *brw, unsigned tiling)
  *     - MCS and Lossless compression is supported for TiledY/TileYs/TileYf
  *     non-MSRTs only.
  */
-static bool
+bool
 intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
-                                           struct intel_mipmap_tree *mt)
+                                           const struct intel_mipmap_tree *mt)
 {
    /* MCS support does not exist prior to Gen7 */
    if (brw->gen < 7)
@@ -225,32 +166,40 @@ intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
 
    if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16)
       return false;
-   if (mt->first_level != 0 || mt->last_level != 0) {
-      if (brw->gen >= 8) {
-         perf_debug("Multi-LOD fast clear - giving up (%dx%dx%d).\n",
-                    mt->logical_width0, mt->logical_height0, mt->last_level);
-      }
 
-      return false;
-   }
+   const bool mip_mapped = mt->first_level != 0 || mt->last_level != 0;
+   const bool arrayed = mt->physical_depth0 != 1;
 
-   /* Check for layered surfaces. */
-   if (mt->physical_depth0 != 1) {
+   if (arrayed) {
        /* Multisample surfaces with the CMS layout are not layered surfaces,
         * yet still have physical_depth0 > 1. Assert that we don't
         * accidentally reject a multisampled surface here. We should have
         * rejected it earlier by explicitly checking the sample count.
         */
       assert(mt->num_samples <= 1);
+   }
 
-      if (brw->gen >= 8) {
-         perf_debug("Layered fast clear - giving up. (%dx%d%d)\n",
-                    mt->logical_width0, mt->logical_height0,
-                    mt->physical_depth0);
-      }
-
+   /* Handle the hardware restrictions...
+    *
+    * All GENs have the following restriction: "MCS buffer for non-MSRT is
+    * supported only for RT formats 32bpp, 64bpp, and 128bpp."
+    *
+    * From the HSW PRM Volume 7: 3D-Media-GPGPU, page 652: (Color Clear of
+    * Non-MultiSampler Render Target Restrictions) Support is for
+    * non-mip-mapped and non-array surface types only.
+    *
+    * From the BDW PRM Volume 7: 3D-Media-GPGPU, page 649: (Color Clear of
+    * Non-MultiSampler Render Target Restriction). Mip-mapped and arrayed
+    * surfaces are supported with MCS buffer layout with these alignments in
+    * the RT space: Horizontal Alignment = 256 and Vertical Alignment = 128.
+    *
+    * From the SKL PRM Volume 7: 3D-Media-GPGPU, page 632: (Color Clear of
+    * Non-MultiSampler Render Target Restriction). Mip-mapped and arrayed
+    * surfaces are supported with MCS buffer layout with these alignments in
+    * the RT space: Horizontal Alignment = 128 and Vertical Alignment = 64.
+    */
+   if (brw->gen < 8 && (mip_mapped || arrayed))
       return false;
-   }
 
    /* There's no point in using an MCS buffer if the surface isn't in a
     * renderable format.
@@ -261,11 +210,64 @@ intel_miptree_supports_non_msrt_fast_clear(struct brw_context *brw,
    if (brw->gen >= 9) {
       mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format);
       const uint32_t brw_format = brw_format_for_mesa_format(linear_format);
-      return brw_losslessly_compressible_format(brw, brw_format);
+      return isl_format_supports_lossless_compression(&brw->screen->devinfo,
+                                                      brw_format);
    } else
       return true;
 }
 
+/* On Gen9 support for color buffer compression was extended to single
+ * sampled surfaces. This is a helper considering both auxiliary buffer
+ * type and number of samples telling if the given miptree represents
+ * the new single sampled case - also called lossless compression.
+ */
+bool
+intel_miptree_is_lossless_compressed(const struct brw_context *brw,
+                                     const struct intel_mipmap_tree *mt)
+{
+   /* Only available from Gen9 onwards. */
+   if (brw->gen < 9)
+      return false;
+
+   /* Compression always requires auxiliary buffer. */
+   if (!mt->mcs_buf)
+      return false;
+
+   /* Single sample compression is represented re-using msaa compression
+    * layout type: "Compressed Multisampled Surfaces".
+    */
+   if (mt->msaa_layout != INTEL_MSAA_LAYOUT_CMS)
+      return false;
+
+   /* And finally distinguish between msaa and single sample case. */
+   return mt->num_samples <= 1;
+}
+
+bool
+intel_miptree_supports_lossless_compressed(struct brw_context *brw,
+                                           const struct intel_mipmap_tree *mt)
+{
+   /* For now compression is only enabled for integer formats even though
+    * there exist supported floating point formats also. This is a heuristic
+    * decision based on current public benchmarks. In none of the cases these
+    * formats provided any improvement but a few cases were seen to regress.
+    * Hence these are left to to be enabled in the future when they are known
+    * to improve things.
+    */
+   if (_mesa_get_format_datatype(mt->format) == GL_FLOAT)
+      return false;
+
+   /* Fast clear mechanism and lossless compression go hand in hand. */
+   if (!intel_miptree_supports_non_msrt_fast_clear(brw, mt))
+      return false;
+
+   /* Fast clear can be also used to clear srgb surfaces by using equivalent
+    * linear format. This trick, however, can't be extended to be used with
+    * lossless compression and therefore a check is needed to see if the format
+    * really is linear.
+    */
+   return _mesa_get_srgb_format_linear(mt->format) == mt->format;
+}
 
 /**
  * Determine depth format corresponding to a depth+stencil format,
@@ -310,25 +312,8 @@ intel_miptree_create_layout(struct brw_context *brw,
        _mesa_get_format_name(format),
        first_level, last_level, depth0, mt);
 
-   if (target == GL_TEXTURE_1D_ARRAY) {
-      /* For a 1D Array texture the OpenGL API will treat the height0
-       * parameter as the number of array slices. For Intel hardware, we treat
-       * the 1D array as a 2D Array with a height of 1.
-       *
-       * So, when we first come through this path to create a 1D Array
-       * texture, height0 stores the number of slices, and depth0 is 1. In
-       * this case, we want to swap height0 and depth0.
-       *
-       * Since some miptrees will be created based on the base miptree, we may
-       * come through this path and see height0 as 1 and depth0 being the
-       * number of slices. In this case we don't need to do the swap.
-       */
-      assert(height0 == 1 || depth0 == 1);
-      if (height0 > 1) {
-         depth0 = height0;
-         height0 = 1;
-      }
-   }
+   if (target == GL_TEXTURE_1D_ARRAY)
+      assert(height0 == 1);
 
    mt->target = target;
    mt->format = format;
@@ -337,15 +322,18 @@ intel_miptree_create_layout(struct brw_context *brw,
    mt->logical_width0 = width0;
    mt->logical_height0 = height0;
    mt->logical_depth0 = depth0;
-   mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
    mt->disable_aux_buffers = (layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) != 0;
+   mt->no_ccs = true;
+   mt->is_scanout = (layout_flags & MIPTREE_LAYOUT_FOR_SCANOUT) != 0;
    exec_list_make_empty(&mt->hiz_map);
+   exec_list_make_empty(&mt->color_resolve_map);
    mt->cpp = _mesa_get_format_bytes(format);
    mt->num_samples = num_samples;
    mt->compressed = _mesa_is_format_compressed(format);
    mt->msaa_layout = INTEL_MSAA_LAYOUT_NONE;
    mt->refcount = 1;
 
+   int depth_multiply = 1;
    if (num_samples > 1) {
       /* Adjust width/height/depth for MSAA */
       mt->msaa_layout = compute_msaa_layout(brw, format,
@@ -432,7 +420,8 @@ intel_miptree_create_layout(struct brw_context *brw,
          }
       } else {
          /* Non-interleaved */
-         depth0 *= num_samples;
+         depth_multiply = num_samples;
+         depth0 *= depth_multiply;
       }
    }
 
@@ -461,10 +450,8 @@ intel_miptree_create_layout(struct brw_context *brw,
       }
    }
 
-   if (target == GL_TEXTURE_CUBE_MAP) {
-      assert(depth0 == 1);
-      depth0 = 6;
-   }
+   if (target == GL_TEXTURE_CUBE_MAP)
+      assert(depth0 == 6 * depth_multiply);
 
    mt->physical_width0 = width0;
    mt->physical_height0 = height0;
@@ -496,6 +483,7 @@ intel_miptree_create_layout(struct brw_context *brw,
         intel_miptree_release(&mt);
         return NULL;
       }
+      mt->stencil_mt->r8stencil_needs_update = true;
 
       /* Fix up the Z miptree format for how we're splitting out separate
        * stencil.  Gen7 expects there to be no stencil bits in its depth buffer.
@@ -529,8 +517,13 @@ intel_miptree_create_layout(struct brw_context *brw,
    } else if (brw->gen >= 9 && num_samples > 1) {
       layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
    } else {
+      const UNUSED bool is_lossless_compressed_aux =
+         brw->gen >= 9 && num_samples == 1 &&
+         mt->format == MESA_FORMAT_R_UINT32;
+
       /* For now, nothing else has this requirement */
-      assert((layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
+      assert(is_lossless_compressed_aux ||
+             (layout_flags & MIPTREE_LAYOUT_FORCE_HALIGN16) == 0);
    }
 
    brw_miptree_layout(brw, mt, layout_flags);
@@ -630,7 +623,6 @@ miptree_create(struct brw_context *brw,
 
    etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
 
-   assert((layout_flags & MIPTREE_LAYOUT_DISABLE_AUX) == 0);
    assert((layout_flags & MIPTREE_LAYOUT_FOR_BO) == 0);
    mt = intel_miptree_create_layout(brw, target, format,
                                     first_level, last_level, width0,
@@ -739,12 +731,27 @@ intel_miptree_create(struct brw_context *brw,
    /* If this miptree is capable of supporting fast color clears, set
     * fast_clear_state appropriately to ensure that fast clears will occur.
     * Allocation of the MCS miptree will be deferred until the first fast
-    * clear actually occurs.
+    * clear actually occurs or when compressed single sampled buffer is
+    * written by the GPU for the first time.
     */
    if (intel_tiling_supports_non_msrt_mcs(brw, mt->tiling) &&
        intel_miptree_supports_non_msrt_fast_clear(brw, mt)) {
-      mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
+      mt->no_ccs = false;
       assert(brw->gen < 8 || mt->halign == 16 || num_samples <= 1);
+
+      /* On Gen9+ clients are not currently capable of consuming compressed
+       * single-sampled buffers. Disabling compression allows us to skip
+       * resolves.
+       */
+      const bool lossless_compression_disabled = INTEL_DEBUG & DEBUG_NO_RBC;
+      const bool is_lossless_compressed =
+         unlikely(!lossless_compression_disabled) &&
+         brw->gen >= 9 && !mt->is_scanout &&
+         intel_miptree_supports_lossless_compressed(brw, mt);
+
+      if (is_lossless_compressed) {
+         intel_miptree_alloc_non_msrt_mcs(brw, mt, is_lossless_compressed);
+      }
    }
 
    return mt;
@@ -840,7 +847,7 @@ intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
                                                  height,
                                                  1,
                                                  pitch,
-                                                 0);
+                                                 MIPTREE_LAYOUT_FOR_SCANOUT);
    if (!singlesample_mt)
       goto fail;
 
@@ -851,7 +858,7 @@ intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
     */
    if (intel_tiling_supports_non_msrt_mcs(intel, singlesample_mt->tiling) &&
        intel_miptree_supports_non_msrt_fast_clear(intel, singlesample_mt)) {
-      singlesample_mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_RESOLVED;
+      singlesample_mt->no_ccs = false;
    }
 
    if (num_samples == 0) {
@@ -899,8 +906,8 @@ intel_miptree_create_for_renderbuffer(struct brw_context *brw,
    bool ok;
    GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
    const uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
-                                 MIPTREE_LAYOUT_TILING_ANY;
-
+                                 MIPTREE_LAYOUT_TILING_ANY |
+                                 MIPTREE_LAYOUT_FOR_SCANOUT;
 
    mt = intel_miptree_create(brw, target, format, 0, 0,
                              width, height, depth, num_samples,
@@ -953,15 +960,23 @@ intel_miptree_release(struct intel_mipmap_tree **mt)
 
       drm_intel_bo_unreference((*mt)->bo);
       intel_miptree_release(&(*mt)->stencil_mt);
+      intel_miptree_release(&(*mt)->r8stencil_mt);
       if ((*mt)->hiz_buf) {
          if ((*mt)->hiz_buf->mt)
             intel_miptree_release(&(*mt)->hiz_buf->mt);
          else
-            drm_intel_bo_unreference((*mt)->hiz_buf->bo);
+            drm_intel_bo_unreference((*mt)->hiz_buf->aux_base.bo);
          free((*mt)->hiz_buf);
       }
-      intel_miptree_release(&(*mt)->mcs_mt);
+      if ((*mt)->mcs_buf) {
+         drm_intel_bo_unreference((*mt)->mcs_buf->bo);
+         free((*mt)->mcs_buf);
+      }
       intel_resolve_map_clear(&(*mt)->hiz_map);
+      intel_resolve_map_clear(&(*mt)->color_resolve_map);
+
+      intel_miptree_release(&(*mt)->plane[0]);
+      intel_miptree_release(&(*mt)->plane[1]);
 
       for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
         free((*mt)->level[i].slice);
@@ -984,10 +999,20 @@ intel_get_image_dims(struct gl_texture_image *image,
        * as a 2D Array with a height of 1. So, here we want to swap image
        * height and depth.
        */
+      assert(image->Depth == 1);
       *width = image->Width;
       *height = 1;
       *depth = image->Height;
       break;
+   case GL_TEXTURE_CUBE_MAP:
+      /* For Cube maps, the mesa/main api layer gives us a depth of 1 even
+       * though we really have 6 slices.
+       */
+      assert(image->Depth == 1);
+      *width = image->Width;
+      *height = image->Height;
+      *depth = 6;
+      break;
    default:
       *width = image->Width;
       *height = image->Height;
@@ -1177,12 +1202,9 @@ intel_get_tile_dims(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
  */
 void
 intel_get_tile_masks(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
-                     bool map_stencil_as_y_tiled,
                      uint32_t *mask_x, uint32_t *mask_y)
 {
    uint32_t tile_w_bytes, tile_h;
-   if (map_stencil_as_y_tiled)
-      tiling = I915_TILING_Y;
 
    intel_get_tile_dims(tiling, tr_mode, cpp, &tile_w_bytes, &tile_h);
 
@@ -1197,26 +1219,12 @@ intel_get_tile_masks(uint32_t tiling, uint32_t tr_mode, uint32_t cpp,
  */
 uint32_t
 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt,
-                                 uint32_t x, uint32_t y,
-                                 bool map_stencil_as_y_tiled)
+                                 uint32_t x, uint32_t y)
 {
    int cpp = mt->cpp;
    uint32_t pitch = mt->pitch;
    uint32_t tiling = mt->tiling;
 
-   if (map_stencil_as_y_tiled) {
-      tiling = I915_TILING_Y;
-
-      /* When mapping a W-tiled stencil buffer as Y-tiled, each 64-high W-tile
-       * gets transformed into a 32-high Y-tile.  Accordingly, the pitch of
-       * the resulting surface is twice the pitch of the original miptree,
-       * since each row in the Y-tiled view corresponds to two rows in the
-       * actual W-tiled surface.  So we need to correct the pitch before
-       * computing the offsets.
-       */
-      pitch *= 2;
-   }
-
    switch (tiling) {
    default:
       unreachable("not reached");
@@ -1252,13 +1260,13 @@ intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
    uint32_t x, y;
    uint32_t mask_x, mask_y;
 
-   intel_get_tile_masks(mt->tiling, mt->tr_mode, mt->cpp, false, &mask_x, &mask_y);
+   intel_get_tile_masks(mt->tiling, mt->tr_mode, mt->cpp, &mask_x, &mask_y);
    intel_miptree_get_image_offset(mt, level, slice, &x, &y);
 
    *tile_x = x & mask_x;
    *tile_y = y & mask_y;
 
-   return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y, false);
+   return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y);
 }
 
 static void
@@ -1423,13 +1431,92 @@ intel_miptree_copy_teximage(struct brw_context *brw,
    intel_obj->needs_validate = true;
 }
 
+static void
+intel_miptree_init_mcs(struct brw_context *brw,
+                       struct intel_mipmap_tree *mt,
+                       int init_value)
+{
+   assert(mt->mcs_buf != NULL);
+
+   /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
+    *
+    *     When MCS buffer is enabled and bound to MSRT, it is required that it
+    *     is cleared prior to any rendering.
+    *
+    * Since we don't use the MCS buffer for any purpose other than rendering,
+    * it makes sense to just clear it immediately upon allocation.
+    *
+    * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
+    */
+   const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo, "miptree");
+   if (unlikely(ret)) {
+      fprintf(stderr, "Failed to map mcs buffer into GTT\n");
+      drm_intel_bo_unreference(mt->mcs_buf->bo);
+      free(mt->mcs_buf);
+      return;
+   }
+   void *data = mt->mcs_buf->bo->virtual;
+   memset(data, init_value, mt->mcs_buf->size);
+   drm_intel_bo_unmap(mt->mcs_buf->bo);
+}
+
+static struct intel_miptree_aux_buffer *
+intel_mcs_miptree_buf_create(struct brw_context *brw,
+                             struct intel_mipmap_tree *mt,
+                             mesa_format format,
+                             unsigned mcs_width,
+                             unsigned mcs_height,
+                             uint32_t layout_flags)
+{
+   struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
+   struct intel_mipmap_tree *temp_mt;
+
+   if (!buf)
+      return NULL;
+
+   /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
+    *
+    *     "The MCS surface must be stored as Tile Y."
+    */
+   layout_flags |= MIPTREE_LAYOUT_TILING_Y;
+   temp_mt = miptree_create(brw,
+                            mt->target,
+                            format,
+                            mt->first_level,
+                            mt->last_level,
+                            mcs_width,
+                            mcs_height,
+                            mt->logical_depth0,
+                            0 /* num_samples */,
+                            layout_flags);
+   if (!temp_mt) {
+      free(buf);
+      return NULL;
+   }
+
+   buf->bo = temp_mt->bo;
+   buf->offset = temp_mt->offset;
+   buf->size = temp_mt->total_height * temp_mt->pitch;
+   buf->pitch = temp_mt->pitch;
+   buf->qpitch = temp_mt->qpitch;
+
+   /* Just hang on to the BO which backs the AUX buffer; the rest of the miptree
+    * structure should go away. We use miptree create simply as a means to make
+    * sure all the constraints for the buffer are satisfied.
+    */
+   drm_intel_bo_reference(temp_mt->bo);
+   intel_miptree_release(&temp_mt);
+
+   return buf;
+}
+
 static bool
 intel_miptree_alloc_mcs(struct brw_context *brw,
                         struct intel_mipmap_tree *mt,
                         GLuint num_samples)
 {
    assert(brw->gen >= 7); /* MCS only used on Gen7+ */
-   assert(mt->mcs_mt == NULL);
+   assert(mt->mcs_buf == NULL);
    assert(!mt->disable_aux_buffers);
 
    /* Choose the correct format for the MCS buffer.  All that really matters
@@ -1462,99 +1549,104 @@ intel_miptree_alloc_mcs(struct brw_context *brw,
       unreachable("Unrecognized sample count in intel_miptree_alloc_mcs");
    };
 
-   /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
-    *
-    *     "The MCS surface must be stored as Tile Y."
-    */
-   const uint32_t mcs_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
-                              MIPTREE_LAYOUT_TILING_Y;
-   mt->mcs_mt = miptree_create(brw,
-                               mt->target,
-                               format,
-                               mt->first_level,
-                               mt->last_level,
-                               mt->logical_width0,
-                               mt->logical_height0,
-                               mt->logical_depth0,
-                               0 /* num_samples */,
-                               mcs_flags);
+   mt->mcs_buf =
+      intel_mcs_miptree_buf_create(brw, mt,
+                                   format,
+                                   mt->logical_width0,
+                                   mt->logical_height0,
+                                   MIPTREE_LAYOUT_ACCELERATED_UPLOAD);
+   if (!mt->mcs_buf)
+      return false;
 
-   /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
-    *
-    *     When MCS buffer is enabled and bound to MSRT, it is required that it
-    *     is cleared prior to any rendering.
-    *
-    * Since we don't use the MCS buffer for any purpose other than rendering,
-    * it makes sense to just clear it immediately upon allocation.
-    *
-    * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
-    */
-   void *data = intel_miptree_map_raw(brw, mt->mcs_mt);
-   memset(data, 0xff, mt->mcs_mt->total_height * mt->mcs_mt->pitch);
-   intel_miptree_unmap_raw(mt->mcs_mt);
-   mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_CLEAR;
+   intel_miptree_init_mcs(brw, mt, 0xFF);
+
+   /* Multisampled miptrees are only supported for single level. */
+   assert(mt->first_level == 0);
+   intel_miptree_set_fast_clear_state(brw, mt, mt->first_level, 0,
+                                      mt->logical_depth0,
+                                      INTEL_FAST_CLEAR_STATE_CLEAR);
 
-   return mt->mcs_mt;
+   return true;
 }
 
 
 bool
 intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
-                                 struct intel_mipmap_tree *mt)
+                                 struct intel_mipmap_tree *mt,
+                                 bool is_lossless_compressed)
 {
-   assert(mt->mcs_mt == NULL);
+   assert(mt->mcs_buf == NULL);
    assert(!mt->disable_aux_buffers);
+   assert(!mt->no_ccs);
+
+   struct isl_surf temp_main_surf;
+   struct isl_surf temp_ccs_surf;
 
-   /* The format of the MCS buffer is opaque to the driver; all that matters
-    * is that we get its size and pitch right.  We'll pretend that the format
-    * is R32.  Since an MCS tile covers 128 blocks horizontally, and a Y-tiled
-    * R32 buffer is 32 pixels across, we'll need to scale the width down by
-    * the block width and then a further factor of 4.  Since an MCS tile
-    * covers 256 blocks vertically, and a Y-tiled R32 buffer is 32 rows high,
-    * we'll need to scale the height down by the block height and then a
-    * further factor of 8.
+   /* Create first an ISL presentation for the main color surface and let ISL
+    * calculate equivalent CCS surface against it.
     */
-   const mesa_format format = MESA_FORMAT_R_UINT32;
-   unsigned block_width_px;
-   unsigned block_height;
-   intel_get_non_msrt_mcs_alignment(mt, &block_width_px, &block_height);
-   unsigned width_divisor = block_width_px * 4;
-   unsigned height_divisor = block_height * 8;
-
-   /* The Skylake MCS is twice as tall as the Broadwell MCS.
-    *
-    * In pre-Skylake, each bit in the MCS contained the state of 2 cachelines
-    * in the main surface. In Skylake, it's two bits.  The extra bit
-    * doubles the MCS height, not width, because in Skylake the MCS is always
-    * Y-tiled.
+   intel_miptree_get_isl_surf(brw, mt, &temp_main_surf);
+   if (!isl_surf_get_ccs_surf(&brw->isl_dev, &temp_main_surf, &temp_ccs_surf))
+      return false;
+
+   assert(temp_ccs_surf.size &&
+          (temp_ccs_surf.size % temp_ccs_surf.row_pitch == 0));
+
+   struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
+   if (!buf)
+      return false;
+
+   buf->size = temp_ccs_surf.size;
+   buf->pitch = temp_ccs_surf.row_pitch;
+   buf->qpitch = isl_surf_get_array_pitch_sa_rows(&temp_ccs_surf);
+
+   /* In case of compression mcs buffer needs to be initialised requiring the
+    * buffer to be immediately mapped to cpu space for writing. Therefore do
+    * not use the gpu access flag which can cause an unnecessary delay if the
+    * backing pages happened to be just used by the GPU.
     */
-   if (brw->gen >= 9)
-      height_divisor /= 2;
-
-   unsigned mcs_width =
-      ALIGN(mt->logical_width0, width_divisor) / width_divisor;
-   unsigned mcs_height =
-      ALIGN(mt->logical_height0, height_divisor) / height_divisor;
-   assert(mt->logical_depth0 == 1);
-   uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
-                           MIPTREE_LAYOUT_TILING_Y;
-   if (brw->gen >= 8) {
-      layout_flags |= MIPTREE_LAYOUT_FORCE_HALIGN16;
+   const uint32_t alloc_flags =
+      is_lossless_compressed ? 0 : BO_ALLOC_FOR_RENDER;
+   uint32_t tiling = I915_TILING_Y;
+   unsigned long pitch;
+
+   /* ISL has stricter set of alignment rules then the drm allocator.
+    * Therefore one can pass the ISL dimensions in terms of bytes instead of
+    * trying to recalculate based on different format block sizes.
+    */
+   buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
+                                      buf->pitch, buf->size / buf->pitch,
+                                      1, &tiling, &pitch, alloc_flags);
+   if (buf->bo) {
+      assert(pitch == buf->pitch);
+      assert(tiling == I915_TILING_Y);
+   } else {
+      free(buf);
+      return false;
    }
-   mt->mcs_mt = miptree_create(brw,
-                               mt->target,
-                               format,
-                               mt->first_level,
-                               mt->last_level,
-                               mcs_width,
-                               mcs_height,
-                               mt->logical_depth0,
-                               0 /* num_samples */,
-                               layout_flags);
 
-   return mt->mcs_mt;
-}
+   mt->mcs_buf = buf;
 
+   /* From Gen9 onwards single-sampled (non-msrt) auxiliary buffers are
+    * used for lossless compression which requires similar initialisation
+    * as multi-sample compression.
+    */
+   if (is_lossless_compressed) {
+      /* Hardware sets the auxiliary buffer to all zeroes when it does full
+       * resolve. Initialize it accordingly in case the first renderer is
+       * cpu (or other none compression aware party).
+       *
+       * This is also explicitly stated in the spec (MCS Buffer for Render
+       * Target(s)):
+       *   "If Software wants to enable Color Compression without Fast clear,
+       *    Software needs to initialize MCS with zeros."
+       */
+      intel_miptree_init_mcs(brw, mt, 0);
+      mt->msaa_layout = INTEL_MSAA_LAYOUT_CMS;
+   }
+
+   return true;
+}
 
 /**
  * Helper for intel_miptree_alloc_hiz() that sets
@@ -1594,7 +1686,7 @@ intel_miptree_level_enable_hiz(struct brw_context *brw,
  * Helper for intel_miptree_alloc_hiz() that determines the required hiz
  * buffer dimensions and allocates a bo for the hiz buffer.
  */
-static struct intel_miptree_aux_buffer *
+static struct intel_miptree_hiz_buffer *
 intel_gen7_hiz_buf_create(struct brw_context *brw,
                           struct intel_mipmap_tree *mt)
 {
@@ -1602,7 +1694,7 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
    unsigned z_height = mt->logical_height0;
    const unsigned z_depth = MAX2(mt->logical_depth0, 1);
    unsigned hz_width, hz_height;
-   struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
+   struct intel_miptree_hiz_buffer *buf = calloc(sizeof(*buf), 1);
 
    if (!buf)
       return NULL;
@@ -1653,32 +1745,27 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
       hz_height = DIV_ROUND_UP(hz_height, 2);
    } else {
       const unsigned hz_qpitch = h0 + h1 + (12 * vertical_align);
-      if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
-          mt->target == GL_TEXTURE_CUBE_MAP) {
-         /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth * 6/2) /8 ) * 8 */
-         hz_height = DIV_ROUND_UP(hz_qpitch * Z0 * 6, 2 * 8) * 8;
-      } else {
-         /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
-         hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
-      }
+      /* HZ_Height (rows) = Ceiling ( ( Q_pitch * Z_depth/2) /8 ) * 8 */
+      hz_height = DIV_ROUND_UP(hz_qpitch * Z0, 2 * 8) * 8;
    }
 
    unsigned long pitch;
    uint32_t tiling = I915_TILING_Y;
-   buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
-                                      hz_width, hz_height, 1,
-                                      &tiling, &pitch,
-                                      BO_ALLOC_FOR_RENDER);
-   if (!buf->bo) {
+   buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
+                                               hz_width, hz_height, 1,
+                                               &tiling, &pitch,
+                                               BO_ALLOC_FOR_RENDER);
+   if (!buf->aux_base.bo) {
       free(buf);
       return NULL;
    } else if (tiling != I915_TILING_Y) {
-      drm_intel_bo_unreference(buf->bo);
+      drm_intel_bo_unreference(buf->aux_base.bo);
       free(buf);
       return NULL;
    }
 
-   buf->pitch = pitch;
+   buf->aux_base.size = hz_width * hz_height;
+   buf->aux_base.pitch = pitch;
 
    return buf;
 }
@@ -1688,7 +1775,7 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
  * Helper for intel_miptree_alloc_hiz() that determines the required hiz
  * buffer dimensions and allocates a bo for the hiz buffer.
  */
-static struct intel_miptree_aux_buffer *
+static struct intel_miptree_hiz_buffer *
 intel_gen8_hiz_buf_create(struct brw_context *brw,
                           struct intel_mipmap_tree *mt)
 {
@@ -1696,7 +1783,7 @@ intel_gen8_hiz_buf_create(struct brw_context *brw,
    unsigned z_height = mt->logical_height0;
    const unsigned z_depth = MAX2(mt->logical_depth0, 1);
    unsigned hz_width, hz_height;
-   struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
+   struct intel_miptree_hiz_buffer *buf = calloc(sizeof(*buf), 1);
 
    if (!buf)
       return NULL;
@@ -1749,51 +1836,43 @@ intel_gen8_hiz_buf_create(struct brw_context *brw,
       Z_i = minify(Z_i, 1);
    }
    /* HZ_QPitch = h0 + max(h1, sum(i=2 to m; h_i)) */
-   buf->qpitch = h0 + MAX2(h1, sum_h_i);
+   buf->aux_base.qpitch = h0 + MAX2(h1, sum_h_i);
 
    if (mt->target == GL_TEXTURE_3D) {
       /* (1/2) * sum(i=0 to m; h_i * max(1, floor(Z_Depth/2**i))) */
       hz_height = DIV_ROUND_UP(hz_height_3d_sum, 2);
    } else {
       /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * Z_Depth */
-      hz_height = DIV_ROUND_UP(buf->qpitch, 2 * 8) * 8 * Z0;
-      if (mt->target == GL_TEXTURE_CUBE_MAP_ARRAY ||
-          mt->target == GL_TEXTURE_CUBE_MAP) {
-         /* HZ_Height (rows) = ceiling( (HZ_QPitch/2)/8) *8 * 6 * Z_Depth
-          *
-          * We can can just take our hz_height calculation from above, and
-          * multiply by 6 for the cube map and cube map array types.
-          */
-         hz_height *= 6;
-      }
+      hz_height = DIV_ROUND_UP(buf->aux_base.qpitch, 2 * 8) * 8 * Z0;
    }
 
    unsigned long pitch;
    uint32_t tiling = I915_TILING_Y;
-   buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
-                                      hz_width, hz_height, 1,
-                                      &tiling, &pitch,
-                                      BO_ALLOC_FOR_RENDER);
-   if (!buf->bo) {
+   buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
+                                               hz_width, hz_height, 1,
+                                               &tiling, &pitch,
+                                               BO_ALLOC_FOR_RENDER);
+   if (!buf->aux_base.bo) {
       free(buf);
       return NULL;
    } else if (tiling != I915_TILING_Y) {
-      drm_intel_bo_unreference(buf->bo);
+      drm_intel_bo_unreference(buf->aux_base.bo);
       free(buf);
       return NULL;
    }
 
-   buf->pitch = pitch;
+   buf->aux_base.size = hz_width * hz_height;
+   buf->aux_base.pitch = pitch;
 
    return buf;
 }
 
 
-static struct intel_miptree_aux_buffer *
+static struct intel_miptree_hiz_buffer *
 intel_hiz_miptree_buf_create(struct brw_context *brw,
                              struct intel_mipmap_tree *mt)
 {
-   struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1);
+   struct intel_miptree_hiz_buffer *buf = calloc(sizeof(*buf), 1);
    uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD;
 
    if (brw->gen == 6)
@@ -1818,9 +1897,10 @@ intel_hiz_miptree_buf_create(struct brw_context *brw,
       return NULL;
    }
 
-   buf->bo = buf->mt->bo;
-   buf->pitch = buf->mt->pitch;
-   buf->qpitch = buf->mt->qpitch;
+   buf->aux_base.bo = buf->mt->bo;
+   buf->aux_base.size = buf->mt->total_height * buf->mt->pitch;
+   buf->aux_base.pitch = buf->mt->pitch;
+   buf->aux_base.qpitch = buf->mt->qpitch;
 
    return buf;
 }
@@ -1878,7 +1958,7 @@ intel_miptree_alloc_hiz(struct brw_context *brw,
          exec_node_init(&m->link);
          m->level = level;
          m->layer = layer;
-         m->need = GEN6_HIZ_OP_HIZ_RESOLVE;
+         m->need = BLORP_HIZ_OP_HIZ_RESOLVE;
 
          exec_list_push_tail(&mt->hiz_map, &m->link);
       }
@@ -1887,6 +1967,50 @@ intel_miptree_alloc_hiz(struct brw_context *brw,
    return true;
 }
 
+/**
+ * Can the miptree sample using the hiz buffer?
+ */
+bool
+intel_miptree_sample_with_hiz(struct brw_context *brw,
+                              struct intel_mipmap_tree *mt)
+{
+   /* It's unclear how well supported sampling from the hiz buffer is on GEN8,
+    * so keep things conservative for now and never enable it unless we're SKL+.
+    */
+   if (brw->gen < 9) {
+      return false;
+   }
+
+   if (!mt->hiz_buf) {
+      return false;
+   }
+
+   /* It seems the hardware won't fallback to the depth buffer if some of the
+    * mipmap levels aren't available in the HiZ buffer. So we need all levels
+    * of the texture to be HiZ enabled.
+    */
+   for (unsigned level = mt->first_level; level <= mt->last_level; ++level) {
+      if (!intel_miptree_level_has_hiz(mt, level))
+         return false;
+   }
+
+   /* If compressed multisampling is enabled, then we use it for the auxiliary
+    * buffer instead.
+    *
+    * From the BDW PRM (Volume 2d: Command Reference: Structures
+    *                   RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
+    *
+    *  "If this field is set to AUX_HIZ, Number of Multisamples must be
+    *   MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
+    *
+    * There is no such blurb for 1D textures, but there is sufficient evidence
+    * that this is broken on SKL+.
+    */
+   return (mt->num_samples <= 1 &&
+           mt->target != GL_TEXTURE_3D &&
+           mt->target != GL_TEXTURE_1D /* gen9+ restriction */);
+}
+
 /**
  * Does the miptree slice have hiz enabled?
  */
@@ -1906,7 +2030,7 @@ intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
       return;
 
    intel_resolve_map_set(&mt->hiz_map,
-                        level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
+                        level, layer, BLORP_HIZ_OP_HIZ_RESOLVE);
 }
 
 
@@ -1919,7 +2043,7 @@ intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
       return;
 
    intel_resolve_map_set(&mt->hiz_map,
-                        level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
+                        level, layer, BLORP_HIZ_OP_DEPTH_RESOLVE);
 }
 
 void
@@ -1939,7 +2063,7 @@ intel_miptree_slice_resolve(struct brw_context *brw,
                            struct intel_mipmap_tree *mt,
                            uint32_t level,
                            uint32_t layer,
-                           enum gen6_hiz_op need)
+                           enum blorp_hiz_op need)
 {
    intel_miptree_check_level_layer(mt, level, layer);
 
@@ -1961,7 +2085,7 @@ intel_miptree_slice_resolve_hiz(struct brw_context *brw,
                                uint32_t layer)
 {
    return intel_miptree_slice_resolve(brw, mt, level, layer,
-                                     GEN6_HIZ_OP_HIZ_RESOLVE);
+                                     BLORP_HIZ_OP_HIZ_RESOLVE);
 }
 
 bool
@@ -1971,13 +2095,13 @@ intel_miptree_slice_resolve_depth(struct brw_context *brw,
                                  uint32_t layer)
 {
    return intel_miptree_slice_resolve(brw, mt, level, layer,
-                                     GEN6_HIZ_OP_DEPTH_RESOLVE);
+                                     BLORP_HIZ_OP_DEPTH_RESOLVE);
 }
 
 static bool
 intel_miptree_all_slices_resolve(struct brw_context *brw,
                                 struct intel_mipmap_tree *mt,
-                                enum gen6_hiz_op need)
+                                enum blorp_hiz_op need)
 {
    bool did_resolve = false;
 
@@ -1998,7 +2122,7 @@ intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
                                     struct intel_mipmap_tree *mt)
 {
    return intel_miptree_all_slices_resolve(brw, mt,
-                                          GEN6_HIZ_OP_HIZ_RESOLVE);
+                                          BLORP_HIZ_OP_HIZ_RESOLVE);
 }
 
 bool
@@ -2006,40 +2130,187 @@ intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
                                       struct intel_mipmap_tree *mt)
 {
    return intel_miptree_all_slices_resolve(brw, mt,
-                                          GEN6_HIZ_OP_DEPTH_RESOLVE);
+                                          BLORP_HIZ_OP_DEPTH_RESOLVE);
+}
+
+enum intel_fast_clear_state
+intel_miptree_get_fast_clear_state(const struct intel_mipmap_tree *mt,
+                                   unsigned level, unsigned layer)
+{
+   intel_miptree_check_level_layer(mt, level, layer);
+
+   const struct intel_resolve_map *item =
+      intel_resolve_map_const_get(&mt->color_resolve_map, level, layer);
+
+   if (!item)
+      return INTEL_FAST_CLEAR_STATE_RESOLVED;
+
+   return item->fast_clear_state;
 }
 
+static void
+intel_miptree_check_color_resolve(const struct brw_context *brw,
+                                  const struct intel_mipmap_tree *mt,
+                                  unsigned level, unsigned layer)
+{
+   if (mt->no_ccs || !mt->mcs_buf)
+      return;
+
+   /* Fast color clear is supported for mipmapped surfaces only on Gen8+. */
+   assert(brw->gen >= 8 ||
+          (level == 0 && mt->first_level == 0 && mt->last_level == 0));
+
+   /* Compression of arrayed msaa surfaces is supported. */
+   if (mt->num_samples > 1)
+      return;
+
+   /* Fast color clear is supported for non-msaa arrays only on Gen8+. */
+   assert(brw->gen >= 8 || (layer == 0 && mt->logical_depth0 == 1));
+
+   (void)level;
+   (void)layer;
+}
+
+void
+intel_miptree_set_fast_clear_state(const struct brw_context *brw,
+                                   struct intel_mipmap_tree *mt,
+                                   unsigned level,
+                                   unsigned first_layer,
+                                   unsigned num_layers,
+                                   enum intel_fast_clear_state new_state)
+{
+   /* Setting the state to resolved means removing the item from the list
+    * altogether.
+    */
+   assert(new_state != INTEL_FAST_CLEAR_STATE_RESOLVED);
+
+   intel_miptree_check_color_resolve(brw, mt, level, first_layer);
+
+   assert(first_layer + num_layers <= mt->physical_depth0);
+
+   for (unsigned i = 0; i < num_layers; i++)
+      intel_resolve_map_set(&mt->color_resolve_map, level,
+                            first_layer + i, new_state);
+}
+
+bool
+intel_miptree_has_color_unresolved(const struct intel_mipmap_tree *mt,
+                                   unsigned start_level, unsigned num_levels,
+                                   unsigned start_layer, unsigned num_layers)
+{
+   return intel_resolve_map_find_any(&mt->color_resolve_map,
+                                     start_level, num_levels,
+                                     start_layer, num_layers) != NULL;
+}
 
 void
+intel_miptree_used_for_rendering(const struct brw_context *brw,
+                                 struct intel_mipmap_tree *mt, unsigned level,
+                                 unsigned start_layer, unsigned num_layers)
+{
+   const bool is_lossless_compressed =
+      intel_miptree_is_lossless_compressed(brw, mt);
+
+   for (unsigned i = 0; i < num_layers; ++i) {
+      const enum intel_fast_clear_state fast_clear_state =
+         intel_miptree_get_fast_clear_state(mt, level, start_layer + i);
+
+      /* If the buffer was previously in fast clear state, change it to
+       * unresolved state, since it won't be guaranteed to be clear after
+       * rendering occurs.
+       */
+      if (is_lossless_compressed ||
+          fast_clear_state == INTEL_FAST_CLEAR_STATE_CLEAR) {
+         intel_miptree_set_fast_clear_state(
+            brw, mt, level, start_layer + i, 1,
+            INTEL_FAST_CLEAR_STATE_UNRESOLVED);
+      }
+   }
+}
+
+static bool
+intel_miptree_needs_color_resolve(const struct brw_context *brw,
+                                  const struct intel_mipmap_tree *mt,
+                                  int flags)
+{
+   if (mt->no_ccs)
+      return false;
+
+   const bool is_lossless_compressed =
+      intel_miptree_is_lossless_compressed(brw, mt);
+
+   /* From gen9 onwards there is new compression scheme for single sampled
+    * surfaces called "lossless compressed". These don't need to be always
+    * resolved.
+    */
+   if ((flags & INTEL_MIPTREE_IGNORE_CCS_E) && is_lossless_compressed)
+      return false;
+
+   /* Fast color clear resolves only make sense for non-MSAA buffers. */
+   if (mt->msaa_layout != INTEL_MSAA_LAYOUT_NONE && !is_lossless_compressed)
+      return false;
+
+   return true;
+}
+
+bool
 intel_miptree_resolve_color(struct brw_context *brw,
-                            struct intel_mipmap_tree *mt,
+                            struct intel_mipmap_tree *mt, unsigned level,
+                            unsigned start_layer, unsigned num_layers,
                             int flags)
 {
-   (void)flags;
+   intel_miptree_check_color_resolve(brw, mt, level, start_layer);
 
-   switch (mt->fast_clear_state) {
-   case INTEL_FAST_CLEAR_STATE_NO_MCS:
-   case INTEL_FAST_CLEAR_STATE_RESOLVED:
-      /* No resolve needed */
-      break;
-   case INTEL_FAST_CLEAR_STATE_UNRESOLVED:
-   case INTEL_FAST_CLEAR_STATE_CLEAR:
-      /* Fast color clear resolves only make sense for non-MSAA buffers. */
-      if (mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE)
-         brw_meta_resolve_color(brw, mt);
-      break;
+   if (!intel_miptree_needs_color_resolve(brw, mt, flags))
+      return false;
+
+   /* Arrayed fast clear is only supported for gen8+. */
+   assert(brw->gen >= 8 || num_layers == 1);
+
+   bool resolved = false;
+   for (unsigned i = 0; i < num_layers; ++i) {
+      intel_miptree_check_level_layer(mt, level, start_layer + i);
+
+      struct intel_resolve_map *item =
+         intel_resolve_map_get(&mt->color_resolve_map, level,
+                               start_layer + i);
+
+      if (item) {
+         assert(item->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED);
+
+         brw_blorp_resolve_color(brw, mt, level, start_layer);
+         intel_resolve_map_remove(item);
+         resolved = true;
+      }
    }
+
+   return resolved;
 }
 
+void
+intel_miptree_all_slices_resolve_color(struct brw_context *brw,
+                                       struct intel_mipmap_tree *mt,
+                                       int flags)
+{
+   if (!intel_miptree_needs_color_resolve(brw, mt, flags))
+      return;
+      
+   foreach_list_typed_safe(struct intel_resolve_map, map, link,
+                           &mt->color_resolve_map) {
+      assert(map->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED);
+
+      brw_blorp_resolve_color(brw, mt, map->level, map->layer);
+      intel_resolve_map_remove(map);
+   }
+}
 
 /**
  * Make it possible to share the BO backing the given miptree with another
  * process or another miptree.
  *
  * Fast color clears are unsafe with shared buffers, so we need to resolve and
- * then discard the MCS buffer, if present.  We also set the fast_clear_state
- * to INTEL_FAST_CLEAR_STATE_NO_MCS to ensure that no MCS buffer gets
- * allocated in the future.
+ * then discard the MCS buffer, if present.  We also set the no_ccs flag to
+ * ensure that no MCS buffer gets allocated in the future.
  */
 void
 intel_miptree_make_shareable(struct brw_context *brw,
@@ -2050,12 +2321,11 @@ intel_miptree_make_shareable(struct brw_context *brw,
     * pixel data is stored.  Fortunately this code path should never be
     * reached for multisample buffers.
     */
-   assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE);
+   assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_NONE || mt->num_samples <= 1);
 
-   if (mt->mcs_mt) {
-      intel_miptree_resolve_color(brw, mt, 0);
-      intel_miptree_release(&mt->mcs_mt);
-      mt->fast_clear_state = INTEL_FAST_CLEAR_STATE_NO_MCS;
+   if (mt->mcs_buf) {
+      intel_miptree_all_slices_resolve_color(brw, mt, 0);
+      mt->no_ccs = true;
    }
 }
 
@@ -2121,47 +2391,97 @@ intel_miptree_updownsample(struct brw_context *brw,
                            struct intel_mipmap_tree *src,
                            struct intel_mipmap_tree *dst)
 {
-   if (brw->gen < 8) {
-      brw_blorp_blit_miptrees(brw,
-                              src, 0 /* level */, 0 /* layer */, src->format,
-                              dst, 0 /* level */, 0 /* layer */, dst->format,
-                              0, 0,
-                              src->logical_width0, src->logical_height0,
-                              0, 0,
-                              dst->logical_width0, dst->logical_height0,
-                              GL_NEAREST, false, false /*mirror x, y*/);
-   } else if (src->format == MESA_FORMAT_S_UINT8) {
-      brw_meta_stencil_updownsample(brw, src, dst);
-   } else {
-      brw_meta_updownsample(brw, src, dst);
-   }
+   brw_blorp_blit_miptrees(brw,
+                           src, 0 /* level */, 0 /* layer */,
+                           src->format, SWIZZLE_XYZW,
+                           dst, 0 /* level */, 0 /* layer */, dst->format,
+                           0, 0,
+                           src->logical_width0, src->logical_height0,
+                           0, 0,
+                           dst->logical_width0, dst->logical_height0,
+                           GL_NEAREST, false, false /*mirror x, y*/,
+                           false, false);
 
    if (src->stencil_mt) {
-      if (brw->gen >= 8) {
-         brw_meta_stencil_updownsample(brw, src->stencil_mt, dst);
-         return;
-      }
-
       brw_blorp_blit_miptrees(brw,
                               src->stencil_mt, 0 /* level */, 0 /* layer */,
-                              src->stencil_mt->format,
+                              src->stencil_mt->format, SWIZZLE_XYZW,
                               dst->stencil_mt, 0 /* level */, 0 /* layer */,
                               dst->stencil_mt->format,
                               0, 0,
                               src->logical_width0, src->logical_height0,
                               0, 0,
                               dst->logical_width0, dst->logical_height0,
-                              GL_NEAREST, false, false /*mirror x, y*/);
+                              GL_NEAREST, false, false /*mirror x, y*/,
+                              false, false /* decode/encode srgb */);
    }
 }
 
-void *
+void
+intel_update_r8stencil(struct brw_context *brw,
+                       struct intel_mipmap_tree *mt)
+{
+   assert(brw->gen >= 7);
+   struct intel_mipmap_tree *src =
+      mt->format == MESA_FORMAT_S_UINT8 ? mt : mt->stencil_mt;
+   if (!src || brw->gen >= 8 || !src->r8stencil_needs_update)
+      return;
+
+   if (!mt->r8stencil_mt) {
+      const uint32_t r8stencil_flags =
+         MIPTREE_LAYOUT_ACCELERATED_UPLOAD | MIPTREE_LAYOUT_TILING_Y |
+         MIPTREE_LAYOUT_DISABLE_AUX;
+      assert(brw->gen > 6); /* Handle MIPTREE_LAYOUT_FORCE_ALL_SLICE_AT_LOD */
+      mt->r8stencil_mt = intel_miptree_create(brw,
+                                              src->target,
+                                              MESA_FORMAT_R_UINT8,
+                                              src->first_level,
+                                              src->last_level,
+                                              src->logical_width0,
+                                              src->logical_height0,
+                                              src->logical_depth0,
+                                              src->num_samples,
+                                              r8stencil_flags);
+      assert(mt->r8stencil_mt);
+   }
+
+   struct intel_mipmap_tree *dst = mt->r8stencil_mt;
+
+   for (int level = src->first_level; level <= src->last_level; level++) {
+      const unsigned depth = src->level[level].depth;
+      const int layers_per_blit =
+         (dst->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
+          dst->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
+         dst->num_samples : 1;
+
+      for (unsigned layer = 0; layer < depth; layer++) {
+         brw_blorp_blit_miptrees(brw,
+                                 src, level, layer,
+                                 src->format, SWIZZLE_X,
+                                 dst, level, layers_per_blit * layer,
+                                 MESA_FORMAT_R_UNORM8,
+                                 0, 0,
+                                 minify(src->logical_width0, level),
+                                 minify(src->logical_height0, level),
+                                 0, 0,
+                                 minify(dst->logical_width0, level),
+                                 minify(dst->logical_height0, level),
+                                 GL_NEAREST, false, false /*mirror x, y*/,
+                                 false, false /* decode/encode srgb */);
+      }
+   }
+
+   brw_render_cache_set_check_flush(brw, dst->bo);
+   src->r8stencil_needs_update = false;
+}
+
+static void *
 intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
 {
    /* CPU accesses to color buffers don't understand fast color clears, so
     * resolve any pending fast color clears before we map.
     */
-   intel_miptree_resolve_color(brw, mt, 0);
+   intel_miptree_all_slices_resolve_color(brw, mt, 0);
 
    drm_intel_bo *bo = mt->bo;
 
@@ -2176,7 +2496,7 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
    return bo->virtual;
 }
 
-void
+static void
 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
 {
    drm_intel_bo_unmap(mt->bo);
@@ -2336,6 +2656,9 @@ intel_miptree_map_movntdqa(struct brw_context *brw,
    void *src = intel_miptree_map_raw(brw, mt);
    if (!src)
       return;
+
+   src += mt->offset;
+
    src += image_y * mt->pitch;
    src += image_x * mt->cpp;
 
@@ -2443,8 +2766,8 @@ intel_miptree_unmap_s8(struct brw_context *brw,
       for (uint32_t y = 0; y < map->h; y++) {
         for (uint32_t x = 0; x < map->w; x++) {
            ptrdiff_t offset = intel_offset_S8(mt->pitch,
-                                              x + map->x,
-                                              y + map->y,
+                                              image_x + x + map->x,
+                                              image_y + y + map->y,
                                               brw->has_swizzling);
            tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
         }
@@ -2699,12 +3022,6 @@ static bool
 can_blit_slice(struct intel_mipmap_tree *mt,
                unsigned int level, unsigned int slice)
 {
-   uint32_t image_x;
-   uint32_t image_y;
-   intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
-   if (image_x >= 32768 || image_y >= 32768)
-      return false;
-
    /* See intel_miptree_blit() for details on the 32k pitch limit. */
    if (mt->pitch >= 32768)
       return false;
@@ -2847,3 +3164,352 @@ intel_miptree_unmap(struct brw_context *brw,
 
    intel_miptree_release_map(mt, level, slice);
 }
+
+enum isl_surf_dim
+get_isl_surf_dim(GLenum target)
+{
+   switch (target) {
+   case GL_TEXTURE_1D:
+   case GL_TEXTURE_1D_ARRAY:
+      return ISL_SURF_DIM_1D;
+
+   case GL_TEXTURE_2D:
+   case GL_TEXTURE_2D_ARRAY:
+   case GL_TEXTURE_RECTANGLE:
+   case GL_TEXTURE_CUBE_MAP:
+   case GL_TEXTURE_CUBE_MAP_ARRAY:
+   case GL_TEXTURE_2D_MULTISAMPLE:
+   case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
+   case GL_TEXTURE_EXTERNAL_OES:
+      return ISL_SURF_DIM_2D;
+
+   case GL_TEXTURE_3D:
+      return ISL_SURF_DIM_3D;
+   }
+
+   unreachable("Invalid texture target");
+}
+
+enum isl_dim_layout
+get_isl_dim_layout(const struct gen_device_info *devinfo, uint32_t tiling,
+                   GLenum target)
+{
+   switch (target) {
+   case GL_TEXTURE_1D:
+   case GL_TEXTURE_1D_ARRAY:
+      return (devinfo->gen >= 9 && tiling == I915_TILING_NONE ?
+              ISL_DIM_LAYOUT_GEN9_1D : ISL_DIM_LAYOUT_GEN4_2D);
+
+   case GL_TEXTURE_2D:
+   case GL_TEXTURE_2D_ARRAY:
+   case GL_TEXTURE_RECTANGLE:
+   case GL_TEXTURE_2D_MULTISAMPLE:
+   case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
+   case GL_TEXTURE_EXTERNAL_OES:
+      return ISL_DIM_LAYOUT_GEN4_2D;
+
+   case GL_TEXTURE_CUBE_MAP:
+   case GL_TEXTURE_CUBE_MAP_ARRAY:
+      return (devinfo->gen == 4 ? ISL_DIM_LAYOUT_GEN4_3D :
+              ISL_DIM_LAYOUT_GEN4_2D);
+
+   case GL_TEXTURE_3D:
+      return (devinfo->gen >= 9 ?
+              ISL_DIM_LAYOUT_GEN4_2D : ISL_DIM_LAYOUT_GEN4_3D);
+   }
+
+   unreachable("Invalid texture target");
+}
+
+enum isl_tiling
+intel_miptree_get_isl_tiling(const struct intel_mipmap_tree *mt)
+{
+   if (mt->format == MESA_FORMAT_S_UINT8) {
+      return ISL_TILING_W;
+   } else {
+      switch (mt->tiling) {
+      case I915_TILING_NONE:
+         return ISL_TILING_LINEAR;
+      case I915_TILING_X:
+         return ISL_TILING_X;
+      case I915_TILING_Y:
+         switch (mt->tr_mode) {
+         case INTEL_MIPTREE_TRMODE_NONE:
+            return ISL_TILING_Y0;
+         case INTEL_MIPTREE_TRMODE_YF:
+            return ISL_TILING_Yf;
+         case INTEL_MIPTREE_TRMODE_YS:
+            return ISL_TILING_Ys;
+         default:
+            unreachable("Invalid tiled resource mode");
+         }
+      default:
+         unreachable("Invalid tiling mode");
+      }
+   }
+}
+
+void
+intel_miptree_get_isl_surf(struct brw_context *brw,
+                           const struct intel_mipmap_tree *mt,
+                           struct isl_surf *surf)
+{
+   surf->dim = get_isl_surf_dim(mt->target);
+   surf->dim_layout = get_isl_dim_layout(&brw->screen->devinfo,
+                                         mt->tiling, mt->target);
+
+   if (mt->num_samples > 1) {
+      switch (mt->msaa_layout) {
+      case INTEL_MSAA_LAYOUT_IMS:
+         surf->msaa_layout = ISL_MSAA_LAYOUT_INTERLEAVED;
+         break;
+      case INTEL_MSAA_LAYOUT_UMS:
+      case INTEL_MSAA_LAYOUT_CMS:
+         surf->msaa_layout = ISL_MSAA_LAYOUT_ARRAY;
+         break;
+      default:
+         unreachable("Invalid MSAA layout");
+      }
+   } else {
+      surf->msaa_layout = ISL_MSAA_LAYOUT_NONE;
+   }
+
+   surf->tiling = intel_miptree_get_isl_tiling(mt);
+
+   if (mt->format == MESA_FORMAT_S_UINT8) {
+      /* The ISL definition of row_pitch matches the surface state pitch field
+       * a bit better than intel_mipmap_tree.  In particular, ISL incorporates
+       * the factor of 2 for W-tiling in row_pitch.
+       */
+      surf->row_pitch = 2 * mt->pitch;
+   } else {
+      surf->row_pitch = mt->pitch;
+   }
+
+   surf->format = translate_tex_format(brw, mt->format, false);
+
+   if (brw->gen >= 9) {
+      if (surf->dim == ISL_SURF_DIM_1D && surf->tiling == ISL_TILING_LINEAR) {
+         /* For gen9 1-D surfaces, intel_mipmap_tree has a bogus alignment. */
+         surf->image_alignment_el = isl_extent3d(64, 1, 1);
+      } else {
+         /* On gen9+, intel_mipmap_tree stores the horizontal and vertical
+          * alignment in terms of surface elements like we want.
+          */
+         surf->image_alignment_el = isl_extent3d(mt->halign, mt->valign, 1);
+      }
+   } else {
+      /* On earlier gens it's stored in pixels. */
+      unsigned bw, bh;
+      _mesa_get_format_block_size(mt->format, &bw, &bh);
+      surf->image_alignment_el =
+         isl_extent3d(mt->halign / bw, mt->valign / bh, 1);
+   }
+
+   surf->logical_level0_px.width = mt->logical_width0;
+   surf->logical_level0_px.height = mt->logical_height0;
+   if (surf->dim == ISL_SURF_DIM_3D) {
+      surf->logical_level0_px.depth = mt->logical_depth0;
+      surf->logical_level0_px.array_len = 1;
+   } else {
+      surf->logical_level0_px.depth = 1;
+      surf->logical_level0_px.array_len = mt->logical_depth0;
+   }
+
+   surf->phys_level0_sa.width = mt->physical_width0;
+   surf->phys_level0_sa.height = mt->physical_height0;
+   if (surf->dim == ISL_SURF_DIM_3D) {
+      surf->phys_level0_sa.depth = mt->physical_depth0;
+      surf->phys_level0_sa.array_len = 1;
+   } else {
+      surf->phys_level0_sa.depth = 1;
+      surf->phys_level0_sa.array_len = mt->physical_depth0;
+   }
+
+   surf->levels = mt->last_level + 1;
+   surf->samples = MAX2(mt->num_samples, 1);
+
+   surf->size = 0; /* TODO */
+   surf->alignment = 0; /* TODO */
+
+   switch (surf->dim_layout) {
+   case ISL_DIM_LAYOUT_GEN4_2D:
+   case ISL_DIM_LAYOUT_GEN4_3D:
+      if (brw->gen >= 9) {
+         surf->array_pitch_el_rows = mt->qpitch;
+      } else {
+         unsigned bw, bh;
+         _mesa_get_format_block_size(mt->format, &bw, &bh);
+         assert(mt->qpitch % bh == 0);
+         surf->array_pitch_el_rows = mt->qpitch / bh;
+      }
+      break;
+   case ISL_DIM_LAYOUT_GEN9_1D:
+      surf->array_pitch_el_rows = 1;
+      break;
+   }
+
+   switch (mt->array_layout) {
+   case ALL_LOD_IN_EACH_SLICE:
+      surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_FULL;
+      break;
+   case ALL_SLICES_AT_EACH_LOD:
+      surf->array_pitch_span = ISL_ARRAY_PITCH_SPAN_COMPACT;
+      break;
+   default:
+      unreachable("Invalid array layout");
+   }
+
+   GLenum base_format = _mesa_get_format_base_format(mt->format);
+   switch (base_format) {
+   case GL_DEPTH_COMPONENT:
+      surf->usage = ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT;
+      break;
+   case GL_STENCIL_INDEX:
+      surf->usage = ISL_SURF_USAGE_STENCIL_BIT;
+      if (brw->gen >= 8)
+         surf->usage |= ISL_SURF_USAGE_TEXTURE_BIT;
+      break;
+   case GL_DEPTH_STENCIL:
+      /* In this case we only texture from the depth part */
+      surf->usage = ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_STENCIL_BIT |
+                    ISL_SURF_USAGE_TEXTURE_BIT;
+      break;
+   default:
+      surf->usage = ISL_SURF_USAGE_TEXTURE_BIT;
+      if (brw->format_supported_as_render_target[mt->format])
+         surf->usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
+      break;
+   }
+
+   if (_mesa_is_cube_map_texture(mt->target))
+      surf->usage |= ISL_SURF_USAGE_CUBE_BIT;
+}
+
+/* WARNING: THE SURFACE CREATED BY THIS FUNCTION IS NOT COMPLETE AND CANNOT BE
+ * USED FOR ANY REAL CALCULATIONS.  THE ONLY VALID USE OF SUCH A SURFACE IS TO
+ * PASS IT INTO isl_surf_fill_state.
+ */
+void
+intel_miptree_get_aux_isl_surf(struct brw_context *brw,
+                               const struct intel_mipmap_tree *mt,
+                               struct isl_surf *surf,
+                               enum isl_aux_usage *usage)
+{
+   uint32_t aux_pitch, aux_qpitch;
+   if (mt->mcs_buf) {
+      aux_pitch = mt->mcs_buf->pitch;
+      aux_qpitch = mt->mcs_buf->qpitch;
+
+      if (mt->num_samples > 1) {
+         assert(mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS);
+         *usage = ISL_AUX_USAGE_MCS;
+      } else if (intel_miptree_is_lossless_compressed(brw, mt)) {
+         assert(brw->gen >= 9);
+         *usage = ISL_AUX_USAGE_CCS_E;
+      } else if (!mt->no_ccs) {
+         *usage = ISL_AUX_USAGE_CCS_D;
+      } else {
+         unreachable("Invalid MCS miptree");
+      }
+   } else if (mt->hiz_buf) {
+      if (mt->hiz_buf->mt) {
+         aux_pitch = mt->hiz_buf->mt->pitch;
+         aux_qpitch = mt->hiz_buf->mt->qpitch;
+      } else {
+         aux_pitch = mt->hiz_buf->aux_base.pitch;
+         aux_qpitch = mt->hiz_buf->aux_base.qpitch;
+      }
+
+      *usage = ISL_AUX_USAGE_HIZ;
+   } else {
+      *usage = ISL_AUX_USAGE_NONE;
+      return;
+   }
+
+   /* Start with a copy of the original surface. */
+   intel_miptree_get_isl_surf(brw, mt, surf);
+
+   /* Figure out the format and tiling of the auxiliary surface */
+   switch (*usage) {
+   case ISL_AUX_USAGE_NONE:
+      unreachable("Invalid auxiliary usage");
+
+   case ISL_AUX_USAGE_HIZ:
+      isl_surf_get_hiz_surf(&brw->isl_dev, surf, surf);
+      break;
+
+   case ISL_AUX_USAGE_MCS:
+      /*
+       * From the SKL PRM:
+       *    "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E,
+       *    HALIGN 16 must be used."
+       */
+      if (brw->gen >= 9)
+         assert(mt->halign == 16);
+
+      isl_surf_get_mcs_surf(&brw->isl_dev, surf, surf);
+      break;
+
+   case ISL_AUX_USAGE_CCS_D:
+   case ISL_AUX_USAGE_CCS_E:
+      /*
+       * From the BDW PRM, Volume 2d, page 260 (RENDER_SURFACE_STATE):
+       *
+       *    "When MCS is enabled for non-MSRT, HALIGN_16 must be used"
+       *
+       * From the hardware spec for GEN9:
+       *
+       *    "When Auxiliary Surface Mode is set to AUX_CCS_D or AUX_CCS_E,
+       *    HALIGN 16 must be used."
+       */
+      assert(mt->num_samples <= 1);
+      if (brw->gen >= 8)
+         assert(mt->halign == 16);
+
+      isl_surf_get_ccs_surf(&brw->isl_dev, surf, surf);
+      break;
+   }
+
+   /* We want the pitch of the actual aux buffer. */
+   surf->row_pitch = aux_pitch;
+
+   /* Auxiliary surfaces in ISL have compressed formats and array_pitch_el_rows
+    * is in elements.  This doesn't match intel_mipmap_tree::qpitch which is
+    * in elements of the primary color surface so we have to divide by the
+    * compression block height.
+    */
+   surf->array_pitch_el_rows =
+      aux_qpitch / isl_format_get_layout(surf->format)->bh;
+}
+
+union isl_color_value
+intel_miptree_get_isl_clear_color(struct brw_context *brw,
+                                  const struct intel_mipmap_tree *mt)
+{
+   union isl_color_value clear_color;
+
+   if (_mesa_get_format_base_format(mt->format) == GL_DEPTH_COMPONENT) {
+      clear_color.i32[0] = mt->depth_clear_value;
+      clear_color.i32[1] = 0;
+      clear_color.i32[2] = 0;
+      clear_color.i32[3] = 0;
+   } else if (brw->gen >= 9) {
+      clear_color.i32[0] = mt->gen9_fast_clear_color.i[0];
+      clear_color.i32[1] = mt->gen9_fast_clear_color.i[1];
+      clear_color.i32[2] = mt->gen9_fast_clear_color.i[2];
+      clear_color.i32[3] = mt->gen9_fast_clear_color.i[3];
+   } else if (_mesa_is_format_integer(mt->format)) {
+      clear_color.i32[0] = (mt->fast_clear_color_value & (1u << 31)) != 0;
+      clear_color.i32[1] = (mt->fast_clear_color_value & (1u << 30)) != 0;
+      clear_color.i32[2] = (mt->fast_clear_color_value & (1u << 29)) != 0;
+      clear_color.i32[3] = (mt->fast_clear_color_value & (1u << 28)) != 0;
+   } else {
+      clear_color.f32[0] = (mt->fast_clear_color_value & (1u << 31)) != 0;
+      clear_color.f32[1] = (mt->fast_clear_color_value & (1u << 30)) != 0;
+      clear_color.f32[2] = (mt->fast_clear_color_value & (1u << 29)) != 0;
+      clear_color.f32[3] = (mt->fast_clear_color_value & (1u << 28)) != 0;
+   }
+
+   return clear_color;
+}