iris: Pin HiZ buffers when rendering.
[mesa.git] / src / gallium / drivers / iris / iris_state.c
index 0a2a5997ab80065668e9b33148bcbd58ff75bcd9..45d90e42515f8c87f6e8ffbb8dcb10649e0e5cc4 100644 (file)
@@ -617,6 +617,41 @@ init_state_base_address(struct iris_batch *batch)
    }
 }
 
+static void
+iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg,
+                    bool has_slm, bool wants_dc_cache)
+{
+   uint32_t reg_val;
+   iris_pack_state(GENX(L3CNTLREG), &reg_val, reg) {
+      reg.SLMEnable = has_slm;
+#if GEN_GEN == 11
+      /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
+       * in L3CNTLREG register. The default setting of the bit is not the
+       * desirable behavior.
+       */
+      reg.ErrorDetectionBehaviorControl = true;
+#endif
+      reg.URBAllocation = cfg->n[GEN_L3P_URB];
+      reg.ROAllocation = cfg->n[GEN_L3P_RO];
+      reg.DCAllocation = cfg->n[GEN_L3P_DC];
+      reg.AllAllocation = cfg->n[GEN_L3P_ALL];
+   }
+   iris_emit_lri(batch, L3CNTLREG, reg_val);
+}
+
+static void
+iris_emit_default_l3_config(struct iris_batch *batch,
+                            const struct gen_device_info *devinfo,
+                            bool compute)
+{
+   bool wants_dc_cache = true;
+   bool has_slm = compute;
+   const struct gen_l3_weights w =
+      gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
+   const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
+   iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache);
+}
+
 /**
  * Upload the initial GPU state for a render context.
  *
@@ -634,10 +669,11 @@ iris_init_render_context(struct iris_screen *screen,
 
    emit_pipeline_select(batch, _3D);
 
+   iris_emit_default_l3_config(batch, devinfo, false);
+
    init_state_base_address(batch);
 
 #if GEN_GEN >= 9
-   // XXX: INSTPM on Gen8
    iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
@@ -705,11 +741,11 @@ iris_init_render_context(struct iris_screen *screen,
    iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
 
    /* No polygon stippling offsets are necessary. */
-   // XXX: may need to set an offset for origin-UL framebuffers
+   /* TODO: may need to set an offset for origin-UL framebuffers */
    iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
 
    /* Set a static partitioning of the push constant area. */
-   // XXX: this may be a bad idea...could starve the push ringbuffers...
+   /* TODO: this may be a bad idea...could starve the push ringbuffers... */
    for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
       iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
          alloc._3DCommandSubOpcode = 18 + i;
@@ -729,29 +765,7 @@ iris_init_compute_context(struct iris_screen *screen,
 
    emit_pipeline_select(batch, GPGPU);
 
-   const bool has_slm = true;
-   const bool wants_dc_cache = true;
-
-   const struct gen_l3_weights w =
-      gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
-   const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
-
-   uint32_t reg_val;
-   iris_pack_state(GENX(L3CNTLREG), &reg_val, reg) {
-      reg.SLMEnable = has_slm;
-#if GEN_GEN == 11
-      /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
-       * in L3CNTLREG register. The default setting of the bit is not the
-       * desirable behavior.
-       */
-      reg.ErrorDetectionBehaviorControl = true;
-#endif
-      reg.URBAllocation = cfg->n[GEN_L3P_URB];
-      reg.ROAllocation = cfg->n[GEN_L3P_RO];
-      reg.DCAllocation = cfg->n[GEN_L3P_DC];
-      reg.AllAllocation = cfg->n[GEN_L3P_ALL];
-   }
-   iris_emit_lri(batch, L3CNTLREG, reg_val);
+   iris_emit_default_l3_config(batch, devinfo, true);
 
    init_state_base_address(batch);
 
@@ -786,9 +800,6 @@ struct iris_depth_buffer_state {
 struct iris_genx_state {
    struct iris_vertex_buffer_state vertex_buffers[33];
 
-   /** The number of bound vertex buffers. */
-   uint64_t bound_vertex_buffers;
-
    struct iris_depth_buffer_state depth_buffer;
 
    uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
@@ -825,6 +836,9 @@ struct iris_blend_state {
 
    /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
    uint8_t blend_enables;
+
+   /** Bitfield of whether color writes are enabled for RT[i] */
+   uint8_t color_write_enables;
 };
 
 static enum pipe_blendfactor
@@ -854,6 +868,7 @@ iris_create_blend_state(struct pipe_context *ctx,
    uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
 
    cso->blend_enables = 0;
+   cso->color_write_enables = 0;
    STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
 
    cso->alpha_to_coverage = state->alpha_to_coverage;
@@ -880,6 +895,9 @@ iris_create_blend_state(struct pipe_context *ctx,
       if (rt->blend_enable)
          cso->blend_enables |= 1u << i;
 
+      if (rt->colormask)
+         cso->color_write_enables |= 1u << i;
+
       iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
          be.LogicOpEnable = state->logicop_enable;
          be.LogicOpFunction = state->logicop_func;
@@ -956,6 +974,25 @@ iris_bind_blend_state(struct pipe_context *ctx, void *state)
    ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
 }
 
+/**
+ * Return true if the FS writes to any color outputs which are not disabled
+ * via color masking.
+ */
+static bool
+has_writeable_rt(const struct iris_blend_state *cso_blend,
+                 const struct shader_info *fs_info)
+{
+   if (!fs_info)
+      return false;
+
+   unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
+
+   if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
+      rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
+
+   return cso_blend->color_write_enables & rt_outputs;
+}
+
 /**
  * Gallium CSO for depth, stencil, and alpha testing state.
  */
@@ -1124,21 +1161,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
    struct iris_rasterizer_state *cso =
       malloc(sizeof(struct iris_rasterizer_state));
 
-#if 0
-   point_quad_rasterization -> SBE?
-
-   not necessary?
-   {
-      poly_smooth
-      bottom_edge_rule
-
-      offset_units_unscaled - cap not exposed
-   }
-   #endif
-
-   // XXX: it may make more sense just to store the pipe_rasterizer_state,
-   // we're copying a lot of booleans here.  But we don't need all of them...
-
    cso->multisample = state->multisample;
    cso->force_persample_interp = state->force_persample_interp;
    cso->clip_halfz = state->clip_halfz;
@@ -1170,7 +1192,8 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
          state->line_smooth ? _10pixels : _05pixels;
       sf.LastPixelEnable = state->line_last_pixel;
       sf.LineWidth = line_width;
-      sf.SmoothPointEnable = state->point_smooth || state->multisample;
+      sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
+                             !state->point_quad_rasterization;
       sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
       sf.PointWidth = state->point_size;
 
@@ -1195,7 +1218,7 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
       rr.GlobalDepthOffsetConstant = state->offset_units * 2;
       rr.GlobalDepthOffsetScale = state->offset_scale;
       rr.GlobalDepthOffsetClamp = state->offset_clamp;
-      rr.SmoothPointEnable = state->point_smooth || state->multisample;
+      rr.SmoothPointEnable = state->point_smooth;
       rr.AntialiasingEnable = state->line_smooth;
       rr.ScissorRectangleEnable = state->scissor;
 #if GEN_GEN >= 9
@@ -1204,7 +1227,7 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
 #else
       rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
 #endif
-      //rr.ConservativeRasterizationEnable = not yet supported by Gallium...
+      /* TODO: ConservativeRasterizationEnable */
    }
 
    iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
@@ -1512,7 +1535,7 @@ fill_buffer_surface_state(struct isl_device *isl_dev,
                           unsigned size)
 {
    const struct isl_format_layout *fmtl = isl_format_get_layout(format);
-   const unsigned cpp = fmtl->bpb / 8;
+   const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
 
    /* The ARB_texture_buffer_specification says:
     *
@@ -1541,16 +1564,27 @@ fill_buffer_surface_state(struct isl_device *isl_dev,
                          .mocs = mocs(bo));
 }
 
+#define SURFACE_STATE_ALIGNMENT 64
+
 /**
- * Allocate a SURFACE_STATE structure.
+ * Allocate several contiguous SURFACE_STATE structures, one for each
+ * supported auxiliary surface mode.
  */
 static void *
 alloc_surface_states(struct u_upload_mgr *mgr,
-                     struct iris_state_ref *ref)
+                     struct iris_state_ref *ref,
+                     unsigned aux_usages)
 {
    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
 
-   void *map = upload_state(mgr, ref, surf_size, 64);
+   /* If this changes, update this to explicitly align pointers */
+   STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
+
+   assert(aux_usages != 0);
+
+   void *map =
+      upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size,
+                   SURFACE_STATE_ALIGNMENT);
 
    ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res));
 
@@ -1561,7 +1595,8 @@ static void
 fill_surface_state(struct isl_device *isl_dev,
                    void *map,
                    struct iris_resource *res,
-                   struct isl_view *view)
+                   struct isl_view *view,
+                   unsigned aux_usage)
 {
    struct isl_surf_fill_state_info f = {
       .surf = &res->surf,
@@ -1570,6 +1605,13 @@ fill_surface_state(struct isl_device *isl_dev,
       .address = res->bo->gtt_offset,
    };
 
+   if (aux_usage != ISL_AUX_USAGE_NONE) {
+      f.aux_surf = &res->aux.surf;
+      f.aux_usage = aux_usage;
+      f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
+      // XXX: clear color
+   }
+
    isl_surf_fill_state_s(isl_dev, map, &f);
 }
 
@@ -1596,11 +1638,6 @@ iris_create_sampler_view(struct pipe_context *ctx,
    pipe_reference_init(&isv->base.reference, 1);
    pipe_resource_reference(&isv->base.texture, tex);
 
-   void *map = alloc_surface_states(ice->state.surface_uploader,
-                                    &isv->surface_state);
-   if (!unlikely(map))
-      return NULL;
-
    if (util_format_is_depth_or_stencil(tmpl->format)) {
       struct iris_resource *zres, *sres;
       const struct util_format_description *desc =
@@ -1613,6 +1650,12 @@ iris_create_sampler_view(struct pipe_context *ctx,
 
    isv->res = (struct iris_resource *) tex;
 
+   void *map = alloc_surface_states(ice->state.surface_uploader,
+                                    &isv->surface_state,
+                                    isv->res->aux.possible_usages);
+   if (!unlikely(map))
+      return NULL;
+
    isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
 
    if (isv->base.target == PIPE_TEXTURE_CUBE ||
@@ -1642,7 +1685,15 @@ iris_create_sampler_view(struct pipe_context *ctx,
       isv->view.array_len =
          tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
 
-      fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view);
+      unsigned aux_modes = isv->res->aux.possible_usages;
+      while (aux_modes) {
+         enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+
+         fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view,
+                            aux_usage);
+
+         map += SURFACE_STATE_ALIGNMENT;
+      }
    } else {
       fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map,
                                 isv->view.format, tmpl->u.buf.offset,
@@ -1732,15 +1783,53 @@ iris_create_surface(struct pipe_context *ctx,
 
 
    void *map = alloc_surface_states(ice->state.surface_uploader,
-                                    &surf->surface_state);
+                                    &surf->surface_state,
+                                    res->aux.possible_usages);
    if (!unlikely(map))
       return NULL;
 
-   fill_surface_state(&screen->isl_dev, map, res, &surf->view);
+   unsigned aux_modes = res->aux.possible_usages;
+   while (aux_modes) {
+      enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
+
+      fill_surface_state(&screen->isl_dev, map, res, &surf->view, aux_usage);
+
+      map += SURFACE_STATE_ALIGNMENT;
+   }
 
    return psurf;
 }
 
+#if GEN_GEN < 9
+static void
+fill_default_image_param(struct brw_image_param *param)
+{
+   memset(param, 0, sizeof(*param));
+   /* Set the swizzling shifts to all-ones to effectively disable swizzling --
+    * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
+    * detailed explanation of these parameters.
+    */
+   param->swizzling[0] = 0xff;
+   param->swizzling[1] = 0xff;
+}
+
+static void
+fill_buffer_image_param(struct brw_image_param *param,
+                        enum pipe_format pfmt,
+                        unsigned size)
+{
+   const unsigned cpp = util_format_get_blocksize(pfmt);
+
+   fill_default_image_param(param);
+   param->size[0] = size / cpp;
+   param->stride[0] = cpp;
+}
+#else
+#define isl_surf_fill_image_param(x, ...)
+#define fill_default_image_param(x, ...)
+#define fill_buffer_image_param(x, ...)
+#endif
+
 /**
  * The pipe->set_shader_images() driver hook.
  */
@@ -1771,24 +1860,38 @@ iris_set_shader_images(struct pipe_context *ctx,
          // XXX: these are not retained forever, use a separate uploader?
          void *map =
             alloc_surface_states(ice->state.surface_uploader,
-                                 &shs->image[start_slot + i].surface_state);
+                                 &shs->image[start_slot + i].surface_state,
+                                 1 << ISL_AUX_USAGE_NONE);
          if (!unlikely(map)) {
             pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
             return;
          }
 
          isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
-         enum isl_format isl_format =
+         enum isl_format isl_fmt =
             iris_format_for_usage(devinfo, img->format, usage).fmt;
 
-         if (img->shader_access & PIPE_IMAGE_ACCESS_READ)
-            isl_format = isl_lower_storage_image_format(devinfo, isl_format);
+         bool untyped_fallback = false;
+
+         if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
+            /* On Gen8, try to use typed surfaces reads (which support a
+             * limited number of formats), and if not possible, fall back
+             * to untyped reads.
+             */
+            untyped_fallback = GEN_GEN == 8 &&
+               !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt);
+
+            if (untyped_fallback)
+               isl_fmt = ISL_FORMAT_RAW;
+            else
+               isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt);
+         }
 
          shs->image[start_slot + i].access = img->shader_access;
 
          if (res->base.target != PIPE_BUFFER) {
             struct isl_view view = {
-               .format = isl_format,
+               .format = isl_fmt,
                .base_level = img->u.tex.level,
                .levels = 1,
                .base_array_layer = img->u.tex.first_layer,
@@ -1797,20 +1900,46 @@ iris_set_shader_images(struct pipe_context *ctx,
                .usage = usage,
             };
 
-            fill_surface_state(&screen->isl_dev, map, res, &view);
+            if (untyped_fallback) {
+               fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
+                                         isl_fmt, 0, res->bo->size);
+            } else {
+               /* Images don't support compression */
+               unsigned aux_modes = 1 << ISL_AUX_USAGE_NONE;
+               while (aux_modes) {
+                  enum isl_aux_usage usage = u_bit_scan(&aux_modes);
+
+                  fill_surface_state(&screen->isl_dev, map, res, &view, usage);
+
+                  map += SURFACE_STATE_ALIGNMENT;
+               }
+            }
+
+            isl_surf_fill_image_param(&screen->isl_dev,
+                                      &shs->image[start_slot + i].param,
+                                      &res->surf, &view);
          } else {
             fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
-                                      isl_format, img->u.buf.offset,
+                                      isl_fmt, img->u.buf.offset,
                                       img->u.buf.size);
+            fill_buffer_image_param(&shs->image[start_slot + i].param,
+                                    img->format, img->u.buf.size);
          }
       } else {
          pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
          pipe_resource_reference(&shs->image[start_slot + i].surface_state.res,
                                  NULL);
+         fill_default_image_param(&shs->image[start_slot + i].param);
       }
    }
 
    ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
+
+   /* Broadwell also needs brw_image_params re-uploaded */
+   if (GEN_GEN < 9) {
+      ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
+      shs->cbuf0_needs_upload = true;
+   }
 }
 
 
@@ -2139,6 +2268,12 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
          info.mocs = mocs(zres->bo);
 
          view.format = zres->surf.format;
+
+         if (iris_resource_level_has_hiz(zres, view.base_level)) {
+            info.hiz_usage = ISL_AUX_USAGE_HIZ;
+            info.hiz_surf = &zres->aux.surf;
+            info.hiz_address = zres->aux.bo->gtt_offset;
+         }
       }
 
       if (stencil_res) {
@@ -2289,7 +2424,16 @@ upload_uniforms(struct iris_context *ice,
       uint32_t sysval = shader->system_values[i];
       uint32_t value = 0;
 
-      if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
+      if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
+         unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
+         unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
+         struct brw_image_param *param = &shs->image[img].param;
+
+         assert(offset < sizeof(struct brw_image_param));
+         value = ((uint32_t *) param)[offset];
+      } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
+         value = 0;
+      } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
          int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
          int comp  = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
          value = fui(ice->state.clip_planes.ucp[plane][comp]);
@@ -2410,13 +2554,13 @@ iris_set_vertex_buffers(struct pipe_context *ctx,
 
       assert(!buffer->is_user_buffer);
 
-      ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
-
       pipe_resource_reference(&state->resource, buffer->buffer.resource);
       struct iris_resource *res = (void *) state->resource;
 
-      if (res)
+      if (res) {
+         ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
          res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
+      }
 
       iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
          vb.VertexBufferIndex = start_slot + i;
@@ -3095,8 +3239,8 @@ iris_populate_fs_key(const struct iris_context *ice,
 
    key->coherent_fb_fetch = true;
 
-   // XXX: key->force_dual_color_blend for unigine
-   // XXX: respect hint for high_quality_derivatives:1;
+   /* TODO: support key->force_dual_color_blend for Unigine */
+   /* TODO: Respect glHint for key->high_quality_derivatives */
 }
 
 static void
@@ -3105,13 +3249,6 @@ iris_populate_cs_key(const struct iris_context *ice,
 {
 }
 
-#if 0
-   // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
-   pkt.SamplerCount =                                                     \
-      DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4);          \
-
-#endif
-
 static uint64_t
 KSP(const struct iris_compiled_shader *shader)
 {
@@ -3119,9 +3256,12 @@ KSP(const struct iris_compiled_shader *shader)
    return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
 }
 
-// Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
-// prefetching of binding tables in A0 and B0 steppings.  XXX: Revisit
-// this WA on C0 stepping.
+/* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
+ * prefetching of binding tables in A0 and B0 steppings.  XXX: Revisit
+ * this WA on C0 stepping.
+ *
+ * TODO: Fill out SamplerCount for prefetching?
+ */
 
 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage)                   \
    pkt.KernelStartPointer = KSP(shader);                                  \
@@ -3288,15 +3428,13 @@ iris_store_fs_state(struct iris_context *ice,
 
    iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
       ps.VectorMaskEnable = true;
-      //ps.SamplerCount = ...
       // XXX: WABTPPrefetchDisable, see above, drop at C0
       ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
          prog_data->binding_table.size_bytes / 4;
       ps.FloatingPointMode = prog_data->use_alt_mode;
       ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
 
-      ps.PushConstantEnable = shader->num_system_values > 0 ||
-                              prog_data->ubo_ranges[0].length > 0;
+      ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
 
       /* From the documentation for this packet:
        * "If the PS kernel does not need the Position XY Offsets to
@@ -3346,9 +3484,7 @@ iris_store_fs_state(struct iris_context *ice,
    iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
       psx.PixelShaderValid = true;
       psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
-      // XXX: alpha test / alpha to coverage :/
-      psx.PixelShaderKillsPixel = wm_prog_data->uses_kill ||
-                                  wm_prog_data->uses_omask;
+      psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
       psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
       psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
       psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
@@ -3533,6 +3669,14 @@ use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
    return ice->state.null_fb.offset;
 }
 
+static uint32_t
+surf_state_offset_for_aux(struct iris_resource *res,
+                          enum isl_aux_usage aux_usage)
+{
+   return SURFACE_STATE_ALIGNMENT *
+          util_bitcount(res->aux.possible_usages & ((1 << aux_usage) - 1));
+}
+
 /**
  * Add a surface to the validation list, as well as the buffer containing
  * the corresponding SURFACE_STATE.
@@ -3542,23 +3686,39 @@ use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
 static uint32_t
 use_surface(struct iris_batch *batch,
             struct pipe_surface *p_surf,
-            bool writeable)
+            bool writeable,
+            enum isl_aux_usage aux_usage)
 {
    struct iris_surface *surf = (void *) p_surf;
+   struct iris_resource *res = (void *) p_surf->texture;
 
    iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
    iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
 
-   return surf->surface_state.offset;
+   if (res->aux.bo)
+      iris_use_pinned_bo(batch, res->aux.bo, writeable);
+
+   return surf->surface_state.offset +
+          surf_state_offset_for_aux(res, aux_usage);
 }
 
 static uint32_t
-use_sampler_view(struct iris_batch *batch, struct iris_sampler_view *isv)
+use_sampler_view(struct iris_context *ice,
+                 struct iris_batch *batch,
+                 struct iris_sampler_view *isv)
 {
+   // XXX: ASTC hacks
+   enum isl_aux_usage aux_usage =
+      iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0);
+
    iris_use_pinned_bo(batch, isv->res->bo, false);
    iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
 
-   return isv->surface_state.offset;
+   if (isv->res->aux.bo)
+      iris_use_pinned_bo(batch, isv->res->aux.bo, false);
+
+   return isv->surface_state.offset +
+          surf_state_offset_for_aux(isv->res, aux_usage);
 }
 
 static uint32_t
@@ -3597,12 +3757,16 @@ use_image(struct iris_batch *batch, struct iris_context *ice,
    if (!shs->image[i].res)
       return use_null_surface(batch, ice);
 
+   struct iris_resource *res = (void *) shs->image[i].res;
    struct iris_state_ref *surf_state = &shs->image[i].surface_state;
+   bool write = shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE;
 
-   iris_use_pinned_bo(batch, iris_resource_bo(shs->image[i].res),
-                      shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE);
+   iris_use_pinned_bo(batch, res->bo, write);
    iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
 
+   if (res->aux.bo)
+      iris_use_pinned_bo(batch, res->aux.bo, write);
+
    return surf_state->offset;
 }
 
@@ -3662,9 +3826,13 @@ iris_populate_binding_table(struct iris_context *ice,
       /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
       if (cso_fb->nr_cbufs) {
          for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
-            uint32_t addr =
-               cso_fb->cbufs[i] ? use_surface(batch, cso_fb->cbufs[i], true)
-                                : use_null_fb_surface(batch, ice);
+            uint32_t addr;
+            if (cso_fb->cbufs[i]) {
+               addr = use_surface(batch, cso_fb->cbufs[i], true,
+                                  ice->state.draw_aux_usage[i]);
+            } else {
+               addr = use_null_fb_surface(batch, ice);
+            }
             push_bt_entry(addr);
          }
       } else {
@@ -3673,11 +3841,13 @@ iris_populate_binding_table(struct iris_context *ice,
       }
    }
 
-   bt_assert(texture_start, info->num_textures > 0);
+   unsigned num_textures = util_last_bit(info->textures_used);
+
+   bt_assert(texture_start, num_textures > 0);
 
-   for (int i = 0; i < info->num_textures; i++) {
+   for (int i = 0; i < num_textures; i++) {
       struct iris_sampler_view *view = shs->textures[i];
-      uint32_t addr = view ? use_sampler_view(batch, view)
+      uint32_t addr = view ? use_sampler_view(ice, batch, view)
                            : use_null_surface(batch, ice);
       push_bt_entry(addr);
    }
@@ -3689,11 +3859,9 @@ iris_populate_binding_table(struct iris_context *ice,
       push_bt_entry(addr);
    }
 
-   const int num_ubos = iris_get_shader_num_ubos(ice, stage);
+   bt_assert(ubo_start, shader->num_cbufs > 0);
 
-   bt_assert(ubo_start, num_ubos > 0);
-
-   for (int i = 0; i < num_ubos; i++) {
+   for (int i = 0; i < shader->num_cbufs; i++) {
       uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]);
       push_bt_entry(addr);
    }
@@ -3713,7 +3881,7 @@ iris_populate_binding_table(struct iris_context *ice,
    }
 
 #if 0
-      // XXX: not implemented yet
+      /* XXX: YUV surfaces not implemented yet */
       bt_assert(plane_start[1], ...);
       bt_assert(plane_start[2], ...);
 #endif
@@ -3856,10 +4024,19 @@ iris_restore_render_saved_bos(struct iris_context *ice,
          iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
                                           &zres, &sres);
          if (zres) {
+            iris_cache_flush_for_depth(batch, zres->bo);
+
             iris_use_pinned_bo(batch, zres->bo,
                                ice->state.depth_writes_enabled);
+            if (zres->aux.bo) {
+               iris_use_pinned_bo(batch, zres->aux.bo,
+                                  ice->state.depth_writes_enabled);
+            }
          }
+
          if (sres) {
+            iris_cache_flush_for_depth(batch, sres->bo);
+
             iris_use_pinned_bo(batch, sres->bo,
                                ice->state.stencil_writes_enabled);
          }
@@ -4065,7 +4242,14 @@ iris_upload_dirty_render_state(struct iris_context *ice,
       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
       const int header_dwords = GENX(BLEND_STATE_length);
-      const int rt_dwords = cso_fb->nr_cbufs * GENX(BLEND_STATE_ENTRY_length);
+
+      /* Always write at least one BLEND_STATE - the final RT message will
+       * reference BLEND_STATE[0] even if there aren't color writes.  There
+       * may still be alpha testing, computed depth, and so on.
+       */
+      const int rt_dwords =
+         MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
+
       uint32_t blend_offset;
       uint32_t *blend_map =
          stream_state(batch, ice->state.dynamic_uploader,
@@ -4250,7 +4434,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
 
    if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
-         ms.SampleMask = MAX2(ice->state.sample_mask, 1);
+         ms.SampleMask = ice->state.sample_mask;
       }
    }
 
@@ -4363,6 +4547,10 @@ iris_upload_dirty_render_state(struct iris_context *ice,
             wm.EarlyDepthStencilControl = EDSC_PREPS;
          else if (wm_prog_data->has_side_effects)
             wm.EarlyDepthStencilControl = EDSC_PSEXEC;
+
+         /* We could skip this bit if color writes are enabled. */
+         if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
+            wm.ForceThreadDispatchEnable = ForceON;
       }
       iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
    }
@@ -4374,9 +4562,12 @@ iris_upload_dirty_render_state(struct iris_context *ice,
    if (dirty & IRIS_DIRTY_PS_BLEND) {
       struct iris_blend_state *cso_blend = ice->state.cso_blend;
       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
+      const struct shader_info *fs_info =
+         iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
+
       uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
       iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
-         pb.HasWriteableRT = true; // XXX: comes from somewhere :(
+         pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
          pb.AlphaTestEnable = cso_zsa->alpha.enabled;
       }
 
@@ -4425,6 +4616,10 @@ iris_upload_dirty_render_state(struct iris_context *ice,
          if (zres) {
             iris_use_pinned_bo(batch, zres->bo,
                                ice->state.depth_writes_enabled);
+            if (zres->aux.bo) {
+               iris_use_pinned_bo(batch, zres->aux.bo,
+                                  ice->state.depth_writes_enabled);
+            }
          }
 
          if (sres) {
@@ -4481,7 +4676,8 @@ iris_upload_dirty_render_state(struct iris_context *ice,
 
                high_bits = res->bo->gtt_offset >> 32ull;
                if (high_bits != ice->state.last_vbo_high_bits[i]) {
-                  flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+                  flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
+                                 PIPE_CONTROL_CS_STALL;
                   ice->state.last_vbo_high_bits[i] = high_bits;
                }
 
@@ -4556,7 +4752,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
       }
    }
 
-   // XXX: Gen8 - PMA fix
+   /* TODO: Gen8 PMA fix */
 }
 
 static void
@@ -4606,7 +4802,8 @@ iris_upload_render_state(struct iris_context *ice,
       /* The VF cache key only uses 32-bits, see vertex buffer comment above */
       uint16_t high_bits = bo->gtt_offset >> 32ull;
       if (high_bits != ice->state.last_index_bo_high_bits) {
-         iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE);
+         iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE |
+                                             PIPE_CONTROL_CS_STALL);
          ice->state.last_index_bo_high_bits = high_bits;
       }
    }
@@ -4660,7 +4857,7 @@ iris_upload_render_state(struct iris_context *ice,
       struct iris_stream_output_target *so =
          (void *) draw->count_from_stream_output;
 
-      // XXX: avoid if possible
+      /* XXX: Replace with actual cache tracking */
       iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
 
       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
@@ -4769,18 +4966,14 @@ iris_upload_compute_state(struct iris_context *ice,
          vfe.NumberofURBEntries = 2;
          vfe.URBEntryAllocationSize = 2;
 
-         // XXX: Use Indirect Payload Storage?
          vfe.CURBEAllocationSize =
             ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
                   cs_prog_data->push.cross_thread.regs, 2);
       }
    }
 
-   // XXX: hack iris_set_constant_buffers to upload these thread counts
-   // XXX: along with regular uniforms for compute shaders, somehow.
-
+   /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
    uint32_t curbe_data_offset = 0;
-   // TODO: Move subgroup-id into uniforms ubo so we can push uniforms
    assert(cs_prog_data->push.cross_thread.dwords == 0 &&
           cs_prog_data->push.per_thread.dwords == 1 &&
           cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
@@ -4891,8 +5084,8 @@ iris_destroy_state(struct iris_context *ice)
       const int i = u_bit_scan64(&bound_vbs);
       pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
    }
+   free(ice->state.genx);
 
-   // XXX: unreference resources/surfaces.
    for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
       pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
    }
@@ -4901,9 +5094,28 @@ iris_destroy_state(struct iris_context *ice)
    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
       struct iris_shader_state *shs = &ice->state.shaders[stage];
       pipe_resource_reference(&shs->sampler_table.res, NULL);
+      for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
+         pipe_resource_reference(&shs->constbuf[i].data.res, NULL);
+         pipe_resource_reference(&shs->constbuf[i].surface_state.res, NULL);
+      }
+      for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
+         pipe_resource_reference(&shs->image[i].res, NULL);
+         pipe_resource_reference(&shs->image[i].surface_state.res, NULL);
+      }
+      for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
+         pipe_resource_reference(&shs->ssbo[i], NULL);
+         pipe_resource_reference(&shs->ssbo_surface_state[i].res, NULL);
+      }
+      for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
+         pipe_sampler_view_reference((struct pipe_sampler_view **)
+                                     &shs->textures[i], NULL);
+      }
    }
-   free(ice->state.genx);
 
+   pipe_resource_reference(&ice->state.grid_size.res, NULL);
+   pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
+
+   pipe_resource_reference(&ice->state.null_fb.res, NULL);
    pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
 
    pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);