iris: use the images_used mask in resolve pass
[mesa.git] / src / gallium / drivers / iris / iris_resolve.c
index 20b25c7b76b96dbeabc948b7e91e03ae4d6a14c7..a46d7bf45b33aaddb416a67e8fd5eb5111de1e3c 100644 (file)
 #include "util/hash_table.h"
 #include "util/set.h"
 #include "iris_context.h"
+#include "compiler/nir/nir.h"
 
-static void
-resolve_sampler_views(struct iris_batch *batch,
-                      struct iris_shader_state *shs)
+/**
+ * Disable auxiliary buffers if a renderbuffer is also bound as a texture
+ * or shader image.  This causes a self-dependency, where both rendering
+ * and sampling may concurrently read or write the CCS buffer, causing
+ * incorrect pixels.
+ */
+static bool
+disable_rb_aux_buffer(struct iris_context *ice,
+                      bool *draw_aux_buffer_disabled,
+                      struct iris_resource *tex_res,
+                      unsigned min_level, unsigned num_levels,
+                      const char *usage)
 {
-   for (int i = 0; i < shs->num_textures; i++) {
-      struct iris_sampler_view *isv = shs->textures[i];
-      if (!isv)
+   struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+   bool found = false;
+
+   /* We only need to worry about color compression and fast clears. */
+   if (tex_res->aux.usage != ISL_AUX_USAGE_CCS_D &&
+       tex_res->aux.usage != ISL_AUX_USAGE_CCS_E)
+      return false;
+
+   for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+      struct iris_surface *surf = (void *) cso_fb->cbufs[i];
+      if (!surf)
          continue;
 
+      struct iris_resource *rb_res = (void *) surf->base.texture;
+
+      if (rb_res->bo == tex_res->bo &&
+          surf->base.u.tex.level >= min_level &&
+          surf->base.u.tex.level < min_level + num_levels) {
+         found = draw_aux_buffer_disabled[i] = true;
+      }
+   }
+
+   if (found) {
+      perf_debug(&ice->dbg,
+                 "Disabling CCS because a renderbuffer is also bound %s.\n",
+                 usage);
+   }
+
+   return found;
+}
+
+static void
+resolve_sampler_views(struct iris_context *ice,
+                      struct iris_batch *batch,
+                      struct iris_shader_state *shs,
+                      const struct shader_info *info,
+                      bool *draw_aux_buffer_disabled,
+                      bool consider_framebuffer)
+{
+   uint32_t views = info ? (shs->bound_sampler_views & info->textures_used) : 0;
+
+   while (views) {
+      const int i = u_bit_scan(&views);
+      struct iris_sampler_view *isv = shs->textures[i];
       struct iris_resource *res = (void *) isv->base.texture;
 
-      // XXX: aux tracking
+      if (res->base.target != PIPE_BUFFER) {
+         if (consider_framebuffer) {
+            disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
+                                  res, isv->view.base_level, isv->view.levels,
+                                  "for sampling");
+         }
+
+         iris_resource_prepare_texture(ice, batch, res, isv->view.format,
+                                       isv->view.base_level, isv->view.levels,
+                                       isv->view.base_array_layer,
+                                       isv->view.array_len);
+      }
+
+      iris_cache_flush_for_read(batch, res->bo);
+   }
+}
+
+static void
+resolve_image_views(struct iris_context *ice,
+                    struct iris_batch *batch,
+                    struct iris_shader_state *shs,
+                    const struct shader_info *info,
+                    bool *draw_aux_buffer_disabled,
+                    bool consider_framebuffer)
+{
+   uint32_t views = info ? (shs->bound_image_views & info->images_used) : 0;
+
+   while (views) {
+      const int i = u_bit_scan(&views);
+      struct pipe_image_view *pview = &shs->image[i].base;
+      struct iris_resource *res = (void *) pview->resource;
+
+      if (res->base.target != PIPE_BUFFER) {
+         if (consider_framebuffer) {
+            disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
+                                  res, pview->u.tex.level, 1,
+                                  "as a shader image");
+         }
+
+         unsigned num_layers =
+            pview->u.tex.last_layer - pview->u.tex.first_layer + 1;
+
+         /* The data port doesn't understand any compression */
+         iris_resource_prepare_access(ice, batch, res,
+                                      pview->u.tex.level, 1,
+                                      pview->u.tex.first_layer, num_layers,
+                                      ISL_AUX_USAGE_NONE, false);
+      }
+
       iris_cache_flush_for_read(batch, res->bo);
    }
 }
 
+
 /**
  * \brief Resolve buffers before drawing.
  *
@@ -59,38 +157,101 @@ resolve_sampler_views(struct iris_batch *batch,
  */
 void
 iris_predraw_resolve_inputs(struct iris_context *ice,
-                            struct iris_batch *batch)
+                            struct iris_batch *batch,
+                            bool *draw_aux_buffer_disabled,
+                            gl_shader_stage stage,
+                            bool consider_framebuffer)
 {
-   for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
-      struct iris_shader_state *shs = &ice->state.shaders[stage];
-      resolve_sampler_views(batch, shs);
-   }
+   struct iris_shader_state *shs = &ice->state.shaders[stage];
+   const struct shader_info *info = iris_get_shader_info(ice, stage);
 
-   // XXX: storage images
+   uint64_t dirty = (IRIS_DIRTY_BINDINGS_VS << stage) |
+                    (consider_framebuffer ? IRIS_DIRTY_BINDINGS_FS : 0);
+
+   if (ice->state.dirty & dirty) {
+      resolve_sampler_views(ice, batch, shs, info, draw_aux_buffer_disabled,
+                            consider_framebuffer);
+      resolve_image_views(ice, batch, shs, info, draw_aux_buffer_disabled,
+                          consider_framebuffer);
+   }
 }
 
 void
 iris_predraw_resolve_framebuffer(struct iris_context *ice,
-                                 struct iris_batch *batch)
+                                 struct iris_batch *batch,
+                                 bool *draw_aux_buffer_disabled)
 {
    struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
-   struct pipe_surface *zs_surf = cso_fb->zsbuf;
+   struct iris_screen *screen = (void *) ice->ctx.screen;
+   struct gen_device_info *devinfo = &screen->devinfo;
+   struct iris_uncompiled_shader *ish =
+      ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
+   const nir_shader *nir = ish->nir;
 
-   if (zs_surf) {
-      // XXX: HiZ resolves
+   if (ice->state.dirty & IRIS_DIRTY_DEPTH_BUFFER) {
+      struct pipe_surface *zs_surf = cso_fb->zsbuf;
+
+      if (zs_surf) {
+         struct iris_resource *z_res, *s_res;
+         iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
+         unsigned num_layers =
+            zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
+
+         if (z_res) {
+            iris_resource_prepare_depth(ice, batch, z_res,
+                                        zs_surf->u.tex.level,
+                                        zs_surf->u.tex.first_layer,
+                                        num_layers);
+            iris_cache_flush_for_depth(batch, z_res->bo);
+         }
+
+         if (s_res) {
+            iris_cache_flush_for_depth(batch, s_res->bo);
+         }
+      }
    }
 
-   for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
-      struct iris_surface *surf = (void *) cso_fb->cbufs[i];
-      if (!surf)
-         continue;
+   if (devinfo->gen == 8 && nir->info.outputs_read != 0) {
+      for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+         if (cso_fb->cbufs[i]) {
+            struct iris_surface *surf = (void *) cso_fb->cbufs[i];
+            struct iris_resource *res = (void *) cso_fb->cbufs[i]->texture;
 
-      struct iris_resource *res = (void *) surf->base.texture;
+            iris_resource_prepare_texture(ice, batch, res, surf->view.format,
+                                          surf->view.base_level, 1,
+                                          surf->view.base_array_layer,
+                                          surf->view.array_len);
+         }
+      }
+   }
+
+   if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE)) {
+      for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+         struct iris_surface *surf = (void *) cso_fb->cbufs[i];
+         if (!surf)
+            continue;
+
+         struct iris_resource *res = (void *) surf->base.texture;
 
-      // XXX: aux tracking
+         enum isl_aux_usage aux_usage =
+            iris_resource_render_aux_usage(ice, res, surf->view.format,
+                                           ice->state.blend_enables & (1u << i),
+                                           draw_aux_buffer_disabled[i]);
 
-      iris_cache_flush_for_render(batch, res->bo, surf->view.format,
-                                  ISL_AUX_USAGE_NONE);
+         if (ice->state.draw_aux_usage[i] != aux_usage) {
+            ice->state.draw_aux_usage[i] = aux_usage;
+            /* XXX: Need to track which bindings to make dirty */
+            ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+         }
+
+         iris_resource_prepare_render(ice, batch, res, surf->view.base_level,
+                                      surf->view.base_array_layer,
+                                      surf->view.array_len,
+                                      aux_usage);
+
+         iris_cache_flush_for_render(batch, res->bo, surf->view.format,
+                                     aux_usage);
+      }
    }
 }
 
@@ -111,39 +272,65 @@ iris_postdraw_update_resolve_tracking(struct iris_context *ice,
                                       struct iris_batch *batch)
 {
    struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
-   struct pipe_surface *zs_surf = cso_fb->zsbuf;
 
    // XXX: front buffer drawing?
 
+   bool may_have_resolved_depth =
+      ice->state.dirty & (IRIS_DIRTY_DEPTH_BUFFER |
+                          IRIS_DIRTY_WM_DEPTH_STENCIL);
+
+   struct pipe_surface *zs_surf = cso_fb->zsbuf;
    if (zs_surf) {
       struct iris_resource *z_res, *s_res;
       iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
+      unsigned num_layers =
+         zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
 
       if (z_res) {
-         // XXX: aux tracking
+         if (may_have_resolved_depth) {
+            iris_resource_finish_depth(ice, z_res, zs_surf->u.tex.level,
+                                       zs_surf->u.tex.first_layer, num_layers,
+                                       ice->state.depth_writes_enabled);
+         }
 
          if (ice->state.depth_writes_enabled)
             iris_depth_cache_add_bo(batch, z_res->bo);
       }
 
       if (s_res) {
-         // XXX: aux tracking
+         if (may_have_resolved_depth && ice->state.stencil_writes_enabled) {
+            iris_resource_finish_write(ice, s_res, zs_surf->u.tex.level,
+                                       zs_surf->u.tex.first_layer, num_layers,
+                                       s_res->aux.usage);
+         }
 
          if (ice->state.stencil_writes_enabled)
-            iris_depth_cache_add_bo(batch, z_res->bo);
+            iris_depth_cache_add_bo(batch, s_res->bo);
       }
    }
 
+   bool may_have_resolved_color =
+      ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE);
+
    for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
       struct iris_surface *surf = (void *) cso_fb->cbufs[i];
       if (!surf)
          continue;
 
       struct iris_resource *res = (void *) surf->base.texture;
+      enum isl_aux_usage aux_usage = ice->state.draw_aux_usage[i];
 
-      // XXX: aux tracking
       iris_render_cache_add_bo(batch, res->bo, surf->view.format,
-                               ISL_AUX_USAGE_NONE);
+                               aux_usage);
+
+      if (may_have_resolved_color) {
+         union pipe_surface_desc *desc = &surf->base.u;
+         unsigned num_layers =
+            desc->tex.last_layer - desc->tex.first_layer + 1;
+         iris_resource_finish_render(ice, res, desc->tex.level,
+                                     desc->tex.first_layer, num_layers,
+                                     aux_usage);
+      }
    }
 }
 
@@ -153,11 +340,9 @@ iris_postdraw_update_resolve_tracking(struct iris_context *ice,
 void
 iris_cache_sets_clear(struct iris_batch *batch)
 {
-   struct hash_entry *render_entry;
    hash_table_foreach(batch->cache.render, render_entry)
       _mesa_hash_table_remove(batch->cache.render, render_entry);
 
-   struct set_entry *depth_entry;
    set_foreach(batch->cache.depth, depth_entry)
       _mesa_set_remove(batch->cache.depth, depth_entry);
 }
@@ -178,11 +363,13 @@ void
 iris_flush_depth_and_render_caches(struct iris_batch *batch)
 {
    iris_emit_pipe_control_flush(batch,
+                                "cache tracker: render-to-texture",
                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
                                 PIPE_CONTROL_RENDER_TARGET_FLUSH |
                                 PIPE_CONTROL_CS_STALL);
 
    iris_emit_pipe_control_flush(batch,
+                                "cache tracker: render-to-texture",
                                 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                                 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
 
@@ -193,8 +380,8 @@ void
 iris_cache_flush_for_read(struct iris_batch *batch,
                           struct iris_bo *bo)
 {
-   if (_mesa_hash_table_search(batch->cache.render, bo) ||
-       _mesa_set_search(batch->cache.depth, bo))
+   if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo) ||
+       _mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo))
       iris_flush_depth_and_render_caches(batch);
 }
 
@@ -210,7 +397,7 @@ iris_cache_flush_for_render(struct iris_batch *batch,
                             enum isl_format format,
                             enum isl_aux_usage aux_usage)
 {
-   if (_mesa_set_search(batch->cache.depth, bo))
+   if (_mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo))
       iris_flush_depth_and_render_caches(batch);
 
    /* Check to see if this bo has been used by a previous rendering operation
@@ -236,7 +423,8 @@ iris_cache_flush_for_render(struct iris_batch *batch,
     * and flush on format changes too.  We can always relax this later if we
     * find it to be a performance problem.
     */
-   struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
+   struct hash_entry *entry =
+      _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
    if (entry && entry->data != format_aux_tuple(format, aux_usage))
       iris_flush_depth_and_render_caches(batch);
 }
@@ -248,7 +436,8 @@ iris_render_cache_add_bo(struct iris_batch *batch,
                          enum isl_aux_usage aux_usage)
 {
 #ifndef NDEBUG
-   struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
+   struct hash_entry *entry =
+      _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
    if (entry) {
       /* Otherwise, someone didn't do a flush_for_render and that would be
        * very bad indeed.
@@ -257,20 +446,670 @@ iris_render_cache_add_bo(struct iris_batch *batch,
    }
 #endif
 
-   _mesa_hash_table_insert(batch->cache.render, bo,
-                           format_aux_tuple(format, aux_usage));
+   _mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo,
+                                      format_aux_tuple(format, aux_usage));
 }
 
 void
 iris_cache_flush_for_depth(struct iris_batch *batch,
                            struct iris_bo *bo)
 {
-   if (_mesa_hash_table_search(batch->cache.render, bo))
+   if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo))
       iris_flush_depth_and_render_caches(batch);
 }
 
 void
 iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
 {
-   _mesa_set_add(batch->cache.depth, bo);
+   _mesa_set_add_pre_hashed(batch->cache.depth, bo->hash, bo);
+}
+
+static void
+iris_resolve_color(struct iris_context *ice,
+                   struct iris_batch *batch,
+                   struct iris_resource *res,
+                   unsigned level, unsigned layer,
+                   enum isl_aux_op resolve_op)
+{
+   //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
+
+   struct blorp_surf surf;
+   iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+                                &res->base, res->aux.usage, level, true);
+
+   iris_batch_maybe_flush(batch, 1500);
+
+   /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
+    *
+    *    "Any transition from any value in {Clear, Render, Resolve} to a
+    *     different value in {Clear, Render, Resolve} requires end of pipe
+    *     synchronization."
+    *
+    * In other words, fast clear ops are not properly synchronized with
+    * other drawing.  We need to use a PIPE_CONTROL to ensure that the
+    * contents of the previous draw hit the render target before we resolve
+    * and again afterwards to ensure that the resolve is complete before we
+    * do any more regular drawing.
+    */
+   iris_emit_end_of_pipe_sync(batch, "color resolve: pre-flush",
+                              PIPE_CONTROL_RENDER_TARGET_FLUSH);
+
+   struct blorp_batch blorp_batch;
+   blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
+   /* On Gen >= 12, Stencil buffer with lossless compression needs to be
+    * resolve with WM_HZ_OP packet.
+    */
+   if (res->aux.usage == ISL_AUX_USAGE_STC_CCS) {
+      blorp_hiz_stencil_op(&blorp_batch, &surf, level, layer,
+                           1, resolve_op);
+   } else {
+      blorp_ccs_resolve(&blorp_batch, &surf, level, layer, 1,
+                        isl_format_srgb_to_linear(res->surf.format),
+                        resolve_op);
+   }
+   blorp_batch_finish(&blorp_batch);
+
+   /* See comment above */
+   iris_emit_end_of_pipe_sync(batch, "color resolve: post-flush",
+                              PIPE_CONTROL_RENDER_TARGET_FLUSH);
+}
+
+static void
+iris_mcs_partial_resolve(struct iris_context *ice,
+                         struct iris_batch *batch,
+                         struct iris_resource *res,
+                         uint32_t start_layer,
+                         uint32_t num_layers)
+{
+   //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
+       //start_layer, start_layer + num_layers - 1);
+
+   assert(isl_aux_usage_has_mcs(res->aux.usage));
+
+   struct blorp_surf surf;
+   iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+                                &res->base, res->aux.usage, 0, true);
+
+   struct blorp_batch blorp_batch;
+   blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
+   blorp_mcs_partial_resolve(&blorp_batch, &surf,
+                             isl_format_srgb_to_linear(res->surf.format),
+                             start_layer, num_layers);
+   blorp_batch_finish(&blorp_batch);
+}
+
+
+/**
+ * Return true if the format that will be used to access the resource is
+ * CCS_E-compatible with the resource's linear/non-sRGB format.
+ *
+ * Why use the linear format?  Well, although the resourcemay be specified
+ * with an sRGB format, the usage of that color space/format can be toggled.
+ * Since our HW tends to support more linear formats than sRGB ones, we use
+ * this format variant for check for CCS_E compatibility.
+ */
+static bool
+format_ccs_e_compat_with_resource(const struct gen_device_info *devinfo,
+                                  const struct iris_resource *res,
+                                  enum isl_format access_format)
+{
+   assert(res->aux.usage == ISL_AUX_USAGE_CCS_E);
+
+   enum isl_format isl_format = isl_format_srgb_to_linear(res->surf.format);
+   return isl_formats_are_ccs_e_compatible(devinfo, isl_format, access_format);
+}
+
+bool
+iris_sample_with_depth_aux(const struct gen_device_info *devinfo,
+                           const struct iris_resource *res)
+{
+   switch (res->aux.usage) {
+   case ISL_AUX_USAGE_HIZ:
+      if (devinfo->has_sample_with_hiz)
+         break;
+      return false;
+   case ISL_AUX_USAGE_HIZ_CCS:
+      return false;
+   case ISL_AUX_USAGE_HIZ_CCS_WT:
+      break;
+   default:
+      return false;
+   }
+
+   /* It seems the hardware won't fallback to the depth buffer if some of the
+    * mipmap levels aren't available in the HiZ buffer. So we need all levels
+    * of the texture to be HiZ enabled.
+    */
+   for (unsigned level = 0; level < res->surf.levels; ++level) {
+      if (!iris_resource_level_has_hiz(res, level))
+         return false;
+   }
+
+   /* If compressed multisampling is enabled, then we use it for the auxiliary
+    * buffer instead.
+    *
+    * From the BDW PRM (Volume 2d: Command Reference: Structures
+    *                   RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
+    *
+    *  "If this field is set to AUX_HIZ, Number of Multisamples must be
+    *   MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
+    *
+    * There is no such blurb for 1D textures, but there is sufficient evidence
+    * that this is broken on SKL+.
+    */
+   // XXX: i965 disables this for arrays too, is that reasonable?
+   return res->surf.samples == 1 && res->surf.dim == ISL_SURF_DIM_2D;
+}
+
+/**
+ * Perform a HiZ or depth resolve operation.
+ *
+ * For an overview of HiZ ops, see the following sections of the Sandy Bridge
+ * PRM, Volume 1, Part 2:
+ *   - 7.5.3.1 Depth Buffer Clear
+ *   - 7.5.3.2 Depth Buffer Resolve
+ *   - 7.5.3.3 Hierarchical Depth Buffer Resolve
+ */
+void
+iris_hiz_exec(struct iris_context *ice,
+              struct iris_batch *batch,
+              struct iris_resource *res,
+              unsigned int level, unsigned int start_layer,
+              unsigned int num_layers, enum isl_aux_op op,
+              bool update_clear_depth)
+{
+   assert(iris_resource_level_has_hiz(res, level));
+   assert(op != ISL_AUX_OP_NONE);
+   UNUSED const char *name = NULL;
+
+   switch (op) {
+   case ISL_AUX_OP_FULL_RESOLVE:
+      name = "depth resolve";
+      break;
+   case ISL_AUX_OP_AMBIGUATE:
+      name = "hiz ambiguate";
+      break;
+   case ISL_AUX_OP_FAST_CLEAR:
+      name = "depth clear";
+      break;
+   case ISL_AUX_OP_PARTIAL_RESOLVE:
+   case ISL_AUX_OP_NONE:
+      unreachable("Invalid HiZ op");
+   }
+
+   //DBG("%s %s to mt %p level %d layers %d-%d\n",
+       //__func__, name, mt, level, start_layer, start_layer + num_layers - 1);
+
+   /* The following stalls and flushes are only documented to be required
+    * for HiZ clear operations.  However, they also seem to be required for
+    * resolve operations.
+    *
+    * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
+    *
+    *   "If other rendering operations have preceded this clear, a
+    *    PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
+    *    enabled must be issued before the rectangle primitive used for
+    *    the depth buffer clear operation."
+    *
+    * Same applies for Gen8 and Gen9.
+    *
+    * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1
+    * PIPE_CONTROL, Depth Cache Flush Enable:
+    *
+    *   "This bit must not be set when Depth Stall Enable bit is set in
+    *    this packet."
+    *
+    * This is confirmed to hold for real, Haswell gets immediate gpu hangs.
+    *
+    * Therefore issue two pipe control flushes, one for cache flush and
+    * another for depth stall.
+    */
+   iris_emit_pipe_control_flush(batch,
+                                "hiz op: pre-flushes (1/2)",
+                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                PIPE_CONTROL_CS_STALL);
+
+   iris_emit_pipe_control_flush(batch, "hiz op: pre-flushes (2/2)",
+                                PIPE_CONTROL_DEPTH_STALL);
+
+   assert(isl_aux_usage_has_hiz(res->aux.usage) && res->aux.bo);
+
+   iris_batch_maybe_flush(batch, 1500);
+
+   struct blorp_surf surf;
+   iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+                                &res->base, res->aux.usage, level, true);
+
+   struct blorp_batch blorp_batch;
+   enum blorp_batch_flags flags = 0;
+   flags |= update_clear_depth ? 0 : BLORP_BATCH_NO_UPDATE_CLEAR_COLOR;
+   blorp_batch_init(&ice->blorp, &blorp_batch, batch, flags);
+   blorp_hiz_op(&blorp_batch, &surf, level, start_layer, num_layers, op);
+   blorp_batch_finish(&blorp_batch);
+
+   /* The following stalls and flushes are only documented to be required
+    * for HiZ clear operations.  However, they also seem to be required for
+    * resolve operations.
+    *
+    * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
+    *
+    *    "Depth buffer clear pass using any of the methods (WM_STATE,
+    *     3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
+    *     PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
+    *     "set" before starting to render.  DepthStall and DepthFlush are
+    *     not needed between consecutive depth clear passes nor is it
+    *     required if the depth clear pass was done with
+    *     'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
+    *
+    * TODO: Such as the spec says, this could be conditional.
+    */
+   iris_emit_pipe_control_flush(batch,
+                                "hiz op: post flush",
+                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                PIPE_CONTROL_DEPTH_STALL);
+}
+
+static bool
+level_has_aux(const struct iris_resource *res, uint32_t level)
+{
+   return isl_aux_usage_has_hiz(res->aux.usage) ?
+          iris_resource_level_has_hiz(res, level) :
+          res->aux.usage != ISL_AUX_USAGE_NONE;
+}
+
+/**
+ * Does the resource's slice have hiz enabled?
+ */
+bool
+iris_resource_level_has_hiz(const struct iris_resource *res, uint32_t level)
+{
+   iris_resource_check_level_layer(res, level, 0);
+   return res->aux.has_hiz & 1 << level;
+}
+
+/** \brief Assert that the level and layer are valid for the resource. */
+void
+iris_resource_check_level_layer(UNUSED const struct iris_resource *res,
+                                UNUSED uint32_t level, UNUSED uint32_t layer)
+{
+   assert(level < res->surf.levels);
+   assert(layer < util_num_layers(&res->base, level));
+}
+
+static inline uint32_t
+miptree_level_range_length(const struct iris_resource *res,
+                           uint32_t start_level, uint32_t num_levels)
+{
+   assert(start_level < res->surf.levels);
+
+   if (num_levels == INTEL_REMAINING_LAYERS)
+      num_levels = res->surf.levels;
+
+   /* Check for overflow */
+   assert(start_level + num_levels >= start_level);
+   assert(start_level + num_levels <= res->surf.levels);
+
+   return num_levels;
+}
+
+static inline uint32_t
+miptree_layer_range_length(const struct iris_resource *res, uint32_t level,
+                           uint32_t start_layer, uint32_t num_layers)
+{
+   assert(level <= res->base.last_level);
+
+   const uint32_t total_num_layers = iris_get_num_logical_layers(res, level);
+   assert(start_layer < total_num_layers);
+   if (num_layers == INTEL_REMAINING_LAYERS)
+      num_layers = total_num_layers - start_layer;
+   /* Check for overflow */
+   assert(start_layer + num_layers >= start_layer);
+   assert(start_layer + num_layers <= total_num_layers);
+
+   return num_layers;
+}
+
+bool
+iris_has_color_unresolved(const struct iris_resource *res,
+                          unsigned start_level, unsigned num_levels,
+                          unsigned start_layer, unsigned num_layers)
+{
+   if (!res->aux.bo)
+      return false;
+
+   /* Clamp the level range to fit the resource */
+   num_levels = miptree_level_range_length(res, start_level, num_levels);
+
+   for (uint32_t l = 0; l < num_levels; l++) {
+      const uint32_t level = start_level + l;
+      const uint32_t level_layers =
+         miptree_layer_range_length(res, level, start_layer, num_layers);
+      for (unsigned a = 0; a < level_layers; a++) {
+         enum isl_aux_state aux_state =
+            iris_resource_get_aux_state(res, level, start_layer + a);
+         assert(aux_state != ISL_AUX_STATE_AUX_INVALID);
+         if (aux_state != ISL_AUX_STATE_PASS_THROUGH)
+            return true;
+      }
+   }
+
+   return false;
+}
+
+void
+iris_resource_prepare_access(struct iris_context *ice,
+                             struct iris_batch *batch,
+                             struct iris_resource *res,
+                             uint32_t start_level, uint32_t num_levels,
+                             uint32_t start_layer, uint32_t num_layers,
+                             enum isl_aux_usage aux_usage,
+                             bool fast_clear_supported)
+{
+   const uint32_t clamped_levels =
+      miptree_level_range_length(res, start_level, num_levels);
+   for (uint32_t l = 0; l < clamped_levels; l++) {
+      const uint32_t level = start_level + l;
+      if (!level_has_aux(res, level))
+         continue;
+
+      const uint32_t level_layers =
+         miptree_layer_range_length(res, level, start_layer, num_layers);
+      for (uint32_t a = 0; a < level_layers; a++) {
+         const uint32_t layer = start_layer + a;
+         const enum isl_aux_state aux_state =
+            iris_resource_get_aux_state(res, level, layer);
+         const enum isl_aux_op aux_op =
+            isl_aux_prepare_access(aux_state, aux_usage, fast_clear_supported);
+
+         if (aux_op == ISL_AUX_OP_NONE) {
+            /* Nothing to do here. */
+         } else if (isl_aux_usage_has_mcs(res->aux.usage)) {
+            assert(aux_op == ISL_AUX_OP_PARTIAL_RESOLVE);
+            iris_mcs_partial_resolve(ice, batch, res, layer, 1);
+         } else if (isl_aux_usage_has_hiz(res->aux.usage)) {
+            iris_hiz_exec(ice, batch, res, level, layer, 1, aux_op, false);
+         } else {
+            assert(isl_aux_usage_has_ccs(res->aux.usage));
+            iris_resolve_color(ice, batch, res, level, layer, aux_op);
+         }
+
+         const enum isl_aux_state new_state =
+            isl_aux_state_transition_aux_op(aux_state, res->aux.usage, aux_op);
+         iris_resource_set_aux_state(ice, res, level, layer, 1, new_state);
+      }
+   }
+}
+
+void
+iris_resource_finish_write(struct iris_context *ice,
+                           struct iris_resource *res, uint32_t level,
+                           uint32_t start_layer, uint32_t num_layers,
+                           enum isl_aux_usage aux_usage)
+{
+   if (!level_has_aux(res, level))
+      return;
+
+   const uint32_t level_layers =
+      miptree_layer_range_length(res, level, start_layer, num_layers);
+
+   for (uint32_t a = 0; a < level_layers; a++) {
+      const uint32_t layer = start_layer + a;
+      const enum isl_aux_state aux_state =
+         iris_resource_get_aux_state(res, level, layer);
+      const enum isl_aux_state new_aux_state =
+         isl_aux_state_transition_write(aux_state, aux_usage, false);
+      iris_resource_set_aux_state(ice, res, level, layer, 1, new_aux_state);
+   }
+}
+
+enum isl_aux_state
+iris_resource_get_aux_state(const struct iris_resource *res,
+                            uint32_t level, uint32_t layer)
+{
+   iris_resource_check_level_layer(res, level, layer);
+
+   if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
+      assert(iris_resource_level_has_hiz(res, level));
+   } else {
+      assert(res->surf.samples == 1 ||
+             res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
+   }
+
+   return res->aux.state[level][layer];
+}
+
+void
+iris_resource_set_aux_state(struct iris_context *ice,
+                            struct iris_resource *res, uint32_t level,
+                            uint32_t start_layer, uint32_t num_layers,
+                            enum isl_aux_state aux_state)
+{
+   num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
+
+   if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
+      assert(iris_resource_level_has_hiz(res, level));
+   } else {
+      assert(res->surf.samples == 1 ||
+             res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
+   }
+
+   for (unsigned a = 0; a < num_layers; a++) {
+      if (res->aux.state[level][start_layer + a] != aux_state) {
+         res->aux.state[level][start_layer + a] = aux_state;
+         /* XXX: Need to track which bindings to make dirty */
+         ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+      }
+   }
+}
+
+/* On Gen9 color buffers may be compressed by the hardware (lossless
+ * compression). There are, however, format restrictions and care needs to be
+ * taken that the sampler engine is capable for re-interpreting a buffer with
+ * format different the buffer was originally written with.
+ *
+ * For example, SRGB formats are not compressible and the sampler engine isn't
+ * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
+ * color buffer needs to be resolved so that the sampling surface can be
+ * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
+ * set).
+ */
+static bool
+can_texture_with_ccs(const struct gen_device_info *devinfo,
+                     struct pipe_debug_callback *dbg,
+                     const struct iris_resource *res,
+                     enum isl_format view_format)
+{
+   if (res->aux.usage != ISL_AUX_USAGE_CCS_E)
+      return false;
+
+   if (!format_ccs_e_compat_with_resource(devinfo, res, view_format)) {
+      const struct isl_format_layout *res_fmtl =
+         isl_format_get_layout(res->surf.format);
+      const struct isl_format_layout *view_fmtl =
+         isl_format_get_layout(view_format);
+
+      perf_debug(dbg, "Incompatible sampling format (%s) for CCS (%s)\n",
+                 view_fmtl->name, res_fmtl->name);
+
+      return false;
+   }
+
+   return true;
+}
+
+enum isl_aux_usage
+iris_resource_texture_aux_usage(struct iris_context *ice,
+                                const struct iris_resource *res,
+                                enum isl_format view_format)
+{
+   struct iris_screen *screen = (void *) ice->ctx.screen;
+   struct gen_device_info *devinfo = &screen->devinfo;
+
+   switch (res->aux.usage) {
+   case ISL_AUX_USAGE_HIZ:
+      if (iris_sample_with_depth_aux(devinfo, res))
+         return ISL_AUX_USAGE_HIZ;
+      break;
+
+   case ISL_AUX_USAGE_HIZ_CCS:
+      assert(!iris_sample_with_depth_aux(devinfo, res));
+      return ISL_AUX_USAGE_NONE;
+
+   case ISL_AUX_USAGE_HIZ_CCS_WT:
+      if (iris_sample_with_depth_aux(devinfo, res))
+         return ISL_AUX_USAGE_HIZ_CCS_WT;
+      break;
+
+   case ISL_AUX_USAGE_MCS:
+   case ISL_AUX_USAGE_MCS_CCS:
+   case ISL_AUX_USAGE_STC_CCS:
+      return res->aux.usage;
+
+   case ISL_AUX_USAGE_CCS_D:
+   case ISL_AUX_USAGE_CCS_E:
+      /* If we don't have any unresolved color, report an aux usage of
+       * ISL_AUX_USAGE_NONE.  This way, texturing won't even look at the
+       * aux surface and we can save some bandwidth.
+       */
+      if (!iris_has_color_unresolved(res, 0, INTEL_REMAINING_LEVELS,
+                                     0, INTEL_REMAINING_LAYERS))
+         return ISL_AUX_USAGE_NONE;
+
+      if (can_texture_with_ccs(devinfo, &ice->dbg, res, view_format))
+         return ISL_AUX_USAGE_CCS_E;
+      break;
+
+   default:
+      break;
+   }
+
+   return ISL_AUX_USAGE_NONE;
+}
+
+static bool
+isl_formats_are_fast_clear_compatible(enum isl_format a, enum isl_format b)
+{
+   /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
+    * values so sRGB curve application was a no-op for all fast-clearable
+    * formats.
+    *
+    * On gen9+, the hardware supports arbitrary clear values.  For sRGB clear
+    * values, the hardware interprets the floats, not as what would be
+    * returned from the sampler (or written by the shader), but as being
+    * between format conversion and sRGB curve application.  This means that
+    * we can switch between sRGB and UNORM without having to whack the clear
+    * color.
+    */
+   return isl_format_srgb_to_linear(a) == isl_format_srgb_to_linear(b);
+}
+
+void
+iris_resource_prepare_texture(struct iris_context *ice,
+                              struct iris_batch *batch,
+                              struct iris_resource *res,
+                              enum isl_format view_format,
+                              uint32_t start_level, uint32_t num_levels,
+                              uint32_t start_layer, uint32_t num_layers)
+{
+   enum isl_aux_usage aux_usage =
+      iris_resource_texture_aux_usage(ice, res, view_format);
+
+   bool clear_supported = isl_aux_usage_has_fast_clears(aux_usage);
+
+   /* Clear color is specified as ints or floats and the conversion is done by
+    * the sampler.  If we have a texture view, we would have to perform the
+    * clear color conversion manually.  Just disable clear color.
+    */
+   if (!isl_formats_are_fast_clear_compatible(res->surf.format, view_format))
+      clear_supported = false;
+
+   iris_resource_prepare_access(ice, batch, res, start_level, num_levels,
+                                start_layer, num_layers,
+                                aux_usage, clear_supported);
+}
+
+enum isl_aux_usage
+iris_resource_render_aux_usage(struct iris_context *ice,
+                               struct iris_resource *res,
+                               enum isl_format render_format,
+                               bool blend_enabled,
+                               bool draw_aux_disabled)
+{
+   struct iris_screen *screen = (void *) ice->ctx.screen;
+   struct gen_device_info *devinfo = &screen->devinfo;
+
+   if (draw_aux_disabled)
+      return ISL_AUX_USAGE_NONE;
+
+   switch (res->aux.usage) {
+   case ISL_AUX_USAGE_MCS:
+   case ISL_AUX_USAGE_MCS_CCS:
+      return res->aux.usage;
+
+   case ISL_AUX_USAGE_CCS_D:
+   case ISL_AUX_USAGE_CCS_E:
+      /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
+       * formats.  However, there are issues with blending where it doesn't
+       * properly apply the sRGB curve to the clear color when blending.
+       */
+      if (devinfo->gen >= 9 && blend_enabled &&
+          isl_format_is_srgb(render_format) &&
+          !isl_color_value_is_zero_one(res->aux.clear_color, render_format))
+         return ISL_AUX_USAGE_NONE;
+
+      if (res->aux.usage == ISL_AUX_USAGE_CCS_E &&
+          format_ccs_e_compat_with_resource(devinfo, res, render_format))
+         return ISL_AUX_USAGE_CCS_E;
+
+      /* Otherwise, we try to fall back to CCS_D */
+      if (isl_format_supports_ccs_d(devinfo, render_format))
+         return ISL_AUX_USAGE_CCS_D;
+
+   default:
+      return ISL_AUX_USAGE_NONE;
+   }
+}
+
+void
+iris_resource_prepare_render(struct iris_context *ice,
+                             struct iris_batch *batch,
+                             struct iris_resource *res, uint32_t level,
+                             uint32_t start_layer, uint32_t layer_count,
+                             enum isl_aux_usage aux_usage)
+{
+   iris_resource_prepare_access(ice, batch, res, level, 1, start_layer,
+                                layer_count, aux_usage,
+                                isl_aux_usage_has_fast_clears(aux_usage));
+}
+
+void
+iris_resource_finish_render(struct iris_context *ice,
+                            struct iris_resource *res, uint32_t level,
+                            uint32_t start_layer, uint32_t layer_count,
+                            enum isl_aux_usage aux_usage)
+{
+   iris_resource_finish_write(ice, res, level, start_layer, layer_count,
+                              aux_usage);
+}
+
+void
+iris_resource_prepare_depth(struct iris_context *ice,
+                            struct iris_batch *batch,
+                            struct iris_resource *res, uint32_t level,
+                            uint32_t start_layer, uint32_t layer_count)
+{
+   iris_resource_prepare_access(ice, batch, res, level, 1, start_layer,
+                                layer_count, res->aux.usage, !!res->aux.bo);
+}
+
+void
+iris_resource_finish_depth(struct iris_context *ice,
+                           struct iris_resource *res, uint32_t level,
+                           uint32_t start_layer, uint32_t layer_count,
+                           bool depth_written)
+{
+   if (depth_written) {
+      iris_resource_finish_write(ice, res, level, start_layer, layer_count,
+                                 res->aux.usage);
+   }
 }