iris: Fix compute scratch pinning
[mesa.git] / src / gallium / drivers / iris / iris_state.c
index 0a1127440f166965a5e1b886540a41e39b5e201b..a8d644eeae6b5bb3b0188321ac73861c018799fe 100644 (file)
 #include "intel/common/gen_sample_positions.h"
 #include "iris_batch.h"
 #include "iris_context.h"
+#include "iris_defines.h"
 #include "iris_pipe.h"
 #include "iris_resource.h"
 
@@ -160,7 +161,14 @@ __gen_combine_address(struct iris_batch *batch, void *location,
 #include "genxml/gen_macros.h"
 #include "genxml/genX_bits.h"
 
-#define MOCS_WB (2 << 1)
+#define MOCS_PTE (1 << 1)
+#define MOCS_WB  (2 << 1)
+
+static uint32_t
+mocs(struct iris_bo *bo)
+{
+   return bo && bo->external ? MOCS_PTE : MOCS_WB;
+}
 
 /**
  * Statically assert that PIPE_* enums match the hardware packets.
@@ -485,22 +493,86 @@ _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
 }
 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
 
-/**
- * Upload the initial GPU state for a render context.
- *
- * This sets some invariant state that needs to be programmed a particular
- * way, but we never actually change.
- */
 static void
-iris_init_render_context(struct iris_screen *screen,
-                         struct iris_batch *batch,
-                         struct iris_vtable *vtbl,
-                         struct pipe_debug_callback *dbg)
+_iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
 {
-   uint32_t reg_val;
+   iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
+      lrr.SourceRegisterAddress = src;
+      lrr.DestinationRegisterAddress = dst;
+   }
+}
+
+static void
+emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
+{
+#if GEN_GEN >= 8 && GEN_GEN < 10
+   /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
+    *
+    *   Software must clear the COLOR_CALC_STATE Valid field in
+    *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
+    *   with Pipeline Select set to GPGPU.
+    *
+    * The internal hardware docs recommend the same workaround for Gen9
+    * hardware too.
+    */
+   if (pipeline == GPGPU)
+      iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
+#endif
+
+
+   /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+    * PIPELINE_SELECT [DevBWR+]":
+    *
+    *    "Project: DEVSNB+
+    *
+    *     Software must ensure all the write caches are flushed through a
+    *     stalling PIPE_CONTROL command followed by another PIPE_CONTROL
+    *     command to invalidate read only caches prior to programming
+    *     MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
+    */
+    iris_emit_pipe_control_flush(batch,
+                                 PIPE_CONTROL_RENDER_TARGET_FLUSH |
+                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+                                 PIPE_CONTROL_DATA_CACHE_FLUSH |
+                                 PIPE_CONTROL_CS_STALL);
+
+    iris_emit_pipe_control_flush(batch,
+                                 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+                                 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+                                 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+                                 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
 
-   iris_init_batch(batch, screen, vtbl, dbg, I915_EXEC_RENDER);
+   iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
+#if GEN_GEN >= 9
+      sel.MaskBits = 3;
+#endif
+      sel.PipelineSelection = pipeline;
+   }
+}
 
+UNUSED static void
+init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
+{
+#if GEN_GEN == 9
+   /* Project: DevGLK
+    *
+    *    "This chicken bit works around a hardware issue with barrier
+    *     logic encountered when switching between GPGPU and 3D pipelines.
+    *     To workaround the issue, this mode bit should be set after a
+    *     pipeline is selected."
+    */
+   uint32_t reg_val;
+   iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
+      reg.GLKBarrierMode = value;
+      reg.GLKBarrierModeMask = 1;
+   }
+   iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
+#endif
+}
+
+static void
+init_state_base_address(struct iris_batch *batch)
+{
    flush_for_state_base_change(batch);
 
    /* We program most base addresses once at context initialization time.
@@ -511,15 +583,12 @@ iris_init_render_context(struct iris_screen *screen,
     * updated occasionally.  See iris_binder.c for the details there.
     */
    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
-   #if 0
-   // XXX: MOCS is stupid for this.
-      sba.GeneralStateMemoryObjectControlState            = MOCS_WB;
-      sba.StatelessDataPortAccessMemoryObjectControlState = MOCS_WB;
-      sba.DynamicStateMemoryObjectControlState            = MOCS_WB;
-      sba.IndirectObjectMemoryObjectControlState          = MOCS_WB;
-      sba.InstructionMemoryObjectControlState             = MOCS_WB;
-      sba.BindlessSurfaceStateMemoryObjectControlState    = MOCS_WB;
-   #endif
+      sba.GeneralStateMOCS            = MOCS_WB;
+      sba.StatelessDataPortAccessMOCS = MOCS_WB;
+      sba.DynamicStateMOCS            = MOCS_WB;
+      sba.IndirectObjectMOCS          = MOCS_WB;
+      sba.InstructionMOCS             = MOCS_WB;
+      sba.BindlessSurfaceStateMOCS    = MOCS_WB;
 
       sba.GeneralStateBaseAddressModifyEnable   = true;
       sba.DynamicStateBaseAddressModifyEnable   = true;
@@ -539,6 +608,26 @@ iris_init_render_context(struct iris_screen *screen,
       sba.InstructionBufferSize    = 0xfffff;
       sba.DynamicStateBufferSize   = 0xfffff;
    }
+}
+
+/**
+ * Upload the initial GPU state for a render context.
+ *
+ * This sets some invariant state that needs to be programmed a particular
+ * way, but we never actually change.
+ */
+static void
+iris_init_render_context(struct iris_screen *screen,
+                         struct iris_batch *batch,
+                         struct iris_vtable *vtbl,
+                         struct pipe_debug_callback *dbg)
+{
+   UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+   uint32_t reg_val;
+
+   emit_pipeline_select(batch, _3D);
+
+   init_state_base_address(batch);
 
    // XXX: INSTPM on Gen8
    iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
@@ -555,6 +644,9 @@ iris_init_render_context(struct iris_screen *screen,
       reg.PartialResolveDisableInVCMask = true;
    }
    iris_emit_lri(batch, CACHE_MODE_1, reg_val);
+
+   if (devinfo->is_geminilake)
+      init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
 #endif
 
 #if GEN_GEN == 11
@@ -616,60 +708,48 @@ iris_init_compute_context(struct iris_screen *screen,
                           struct iris_vtable *vtbl,
                           struct pipe_debug_callback *dbg)
 {
-   iris_init_batch(batch, screen, vtbl, dbg, I915_EXEC_RENDER);
+   UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
 
-   /* XXX: PIPE_CONTROLs */
+   emit_pipeline_select(batch, GPGPU);
 
-   iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
-#if GEN_GEN >= 9
-      sel.MaskBits = 3;
-#endif
-      sel.PipelineSelection = GPGPU;
-   }
+   const bool has_slm = true;
+   const bool wants_dc_cache = true;
 
-   iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
-   #if 0
-   // XXX: MOCS is stupid for this.
-      sba.GeneralStateMemoryObjectControlState            = MOCS_WB;
-      sba.StatelessDataPortAccessMemoryObjectControlState = MOCS_WB;
-      sba.SurfaceStateMemoryObjectControlState            = MOCS_WB;
-      sba.DynamicStateMemoryObjectControlState            = MOCS_WB;
-      sba.IndirectObjectMemoryObjectControlState          = MOCS_WB;
-      sba.InstructionMemoryObjectControlState             = MOCS_WB;
-      sba.BindlessSurfaceStateMemoryObjectControlState    = MOCS_WB;
-   #endif
+   const struct gen_l3_weights w =
+      gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
+   const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
 
-      sba.GeneralStateBaseAddressModifyEnable   = true;
-      sba.SurfaceStateBaseAddressModifyEnable   = true;
-      sba.DynamicStateBaseAddressModifyEnable   = true;
-      sba.IndirectObjectBaseAddressModifyEnable = true;
-      sba.InstructionBaseAddressModifyEnable    = true;
-      sba.GeneralStateBufferSizeModifyEnable    = true;
-      sba.DynamicStateBufferSizeModifyEnable    = true;
-      sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
-      sba.IndirectObjectBufferSizeModifyEnable  = true;
-      sba.InstructionBuffersizeModifyEnable     = true;
+   uint32_t reg_val;
+   iris_pack_state(GENX(L3CNTLREG), &reg_val, reg) {
+      reg.SLMEnable = has_slm;
+#if GEN_GEN == 11
+      /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
+       * in L3CNTLREG register. The default setting of the bit is not the
+       * desirable behavior.
+       */
+      reg.ErrorDetectionBehaviorControl = true;
+#endif
+      reg.URBAllocation = cfg->n[GEN_L3P_URB];
+      reg.ROAllocation = cfg->n[GEN_L3P_RO];
+      reg.DCAllocation = cfg->n[GEN_L3P_DC];
+      reg.AllAllocation = cfg->n[GEN_L3P_ALL];
+   }
+   iris_emit_lri(batch, L3CNTLREG, reg_val);
 
-      sba.InstructionBaseAddress  = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
-      sba.SurfaceStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SURFACE_START);
-      sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
+   init_state_base_address(batch);
 
-      sba.GeneralStateBufferSize   = 0xfffff;
-      sba.IndirectObjectBufferSize = 0xfffff;
-      sba.InstructionBufferSize    = 0xfffff;
-      sba.DynamicStateBufferSize   = 0xfffff;
-   }
+#if GEN_GEN == 9
+   if (devinfo->is_geminilake)
+      init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
+#endif
 }
 
 struct iris_vertex_buffer_state {
-   /** The 3DSTATE_VERTEX_BUFFERS hardware packet. */
-   uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
+   /** The VERTEX_BUFFER_STATE hardware structure. */
+   uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
 
    /** The resource to source vertex data from. */
-   struct pipe_resource *resources[33];
-
-   /** The number of bound vertex buffers. */
-   unsigned num_buffers;
+   struct pipe_resource *resource;
 };
 
 struct iris_depth_buffer_state {
@@ -687,14 +767,14 @@ struct iris_depth_buffer_state {
  * packets which vary by generation.
  */
 struct iris_genx_state {
-   /** SF_CLIP_VIEWPORT */
-   uint32_t sf_cl_vp[GENX(SF_CLIP_VIEWPORT_length) * IRIS_MAX_VIEWPORTS];
+   struct iris_vertex_buffer_state vertex_buffers[33];
+
+   /** The number of bound vertex buffers. */
+   uint64_t bound_vertex_buffers;
 
-   struct iris_vertex_buffer_state vertex_buffers;
    struct iris_depth_buffer_state depth_buffer;
 
    uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
-   uint32_t streamout[4 * GENX(3DSTATE_STREAMOUT_length)];
 };
 
 /**
@@ -725,8 +805,25 @@ struct iris_blend_state {
                         BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
 
    bool alpha_to_coverage; /* for shader key */
+
+   /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
+   uint8_t blend_enables;
 };
 
+static enum pipe_blendfactor
+fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
+{
+   if (alpha_to_one) {
+      if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
+         return PIPE_BLENDFACTOR_ONE;
+
+      if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
+         return PIPE_BLENDFACTOR_ZERO;
+   }
+
+   return f;
+}
+
 /**
  * The pipe->create_blend_state() driver hook.
  *
@@ -737,61 +834,88 @@ iris_create_blend_state(struct pipe_context *ctx,
                         const struct pipe_blend_state *state)
 {
    struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
-   uint32_t *blend_state = cso->blend_state;
+   uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
+
+   cso->blend_enables = 0;
+   STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
 
    cso->alpha_to_coverage = state->alpha_to_coverage;
 
+   bool indep_alpha_blend = false;
+
+   for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
+      const struct pipe_rt_blend_state *rt =
+         &state->rt[state->independent_blend_enable ? i : 0];
+
+      enum pipe_blendfactor src_rgb =
+         fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
+      enum pipe_blendfactor src_alpha =
+         fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
+      enum pipe_blendfactor dst_rgb =
+         fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
+      enum pipe_blendfactor dst_alpha =
+         fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
+
+      if (rt->rgb_func != rt->alpha_func ||
+          src_rgb != src_alpha || dst_rgb != dst_alpha)
+         indep_alpha_blend = true;
+
+      if (rt->blend_enable)
+         cso->blend_enables |= 1u << i;
+
+      iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
+         be.LogicOpEnable = state->logicop_enable;
+         be.LogicOpFunction = state->logicop_func;
+
+         be.PreBlendSourceOnlyClampEnable = false;
+         be.ColorClampRange = COLORCLAMP_RTFORMAT;
+         be.PreBlendColorClampEnable = true;
+         be.PostBlendColorClampEnable = true;
+
+         be.ColorBufferBlendEnable = rt->blend_enable;
+
+         be.ColorBlendFunction          = rt->rgb_func;
+         be.AlphaBlendFunction          = rt->alpha_func;
+         be.SourceBlendFactor           = src_rgb;
+         be.SourceAlphaBlendFactor      = src_alpha;
+         be.DestinationBlendFactor      = dst_rgb;
+         be.DestinationAlphaBlendFactor = dst_alpha;
+
+         be.WriteDisableRed   = !(rt->colormask & PIPE_MASK_R);
+         be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
+         be.WriteDisableBlue  = !(rt->colormask & PIPE_MASK_B);
+         be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
+      }
+      blend_entry += GENX(BLEND_STATE_ENTRY_length);
+   }
+
    iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
       /* pb.HasWriteableRT is filled in at draw time. */
       /* pb.AlphaTestEnable is filled in at draw time. */
       pb.AlphaToCoverageEnable = state->alpha_to_coverage;
-      pb.IndependentAlphaBlendEnable = state->independent_blend_enable;
+      pb.IndependentAlphaBlendEnable = indep_alpha_blend;
 
       pb.ColorBufferBlendEnable = state->rt[0].blend_enable;
 
-      pb.SourceBlendFactor           = state->rt[0].rgb_src_factor;
-      pb.SourceAlphaBlendFactor      = state->rt[0].alpha_func;
-      pb.DestinationBlendFactor      = state->rt[0].rgb_dst_factor;
-      pb.DestinationAlphaBlendFactor = state->rt[0].alpha_dst_factor;
+      pb.SourceBlendFactor =
+         fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
+      pb.SourceAlphaBlendFactor =
+         fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
+      pb.DestinationBlendFactor =
+         fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
+      pb.DestinationAlphaBlendFactor =
+         fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
    }
 
-   iris_pack_state(GENX(BLEND_STATE), blend_state, bs) {
+   iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
       bs.AlphaToCoverageEnable = state->alpha_to_coverage;
-      bs.IndependentAlphaBlendEnable = state->independent_blend_enable;
+      bs.IndependentAlphaBlendEnable = indep_alpha_blend;
       bs.AlphaToOneEnable = state->alpha_to_one;
       bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
       bs.ColorDitherEnable = state->dither;
       /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
    }
 
-   blend_state += GENX(BLEND_STATE_length);
-
-   for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
-      iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_state, be) {
-         be.LogicOpEnable = state->logicop_enable;
-         be.LogicOpFunction = state->logicop_func;
-
-         be.PreBlendSourceOnlyClampEnable = false;
-         be.ColorClampRange = COLORCLAMP_RTFORMAT;
-         be.PreBlendColorClampEnable = true;
-         be.PostBlendColorClampEnable = true;
-
-         be.ColorBufferBlendEnable = state->rt[i].blend_enable;
-
-         be.ColorBlendFunction          = state->rt[i].rgb_func;
-         be.AlphaBlendFunction          = state->rt[i].alpha_func;
-         be.SourceBlendFactor           = state->rt[i].rgb_src_factor;
-         be.SourceAlphaBlendFactor      = state->rt[i].alpha_func;
-         be.DestinationBlendFactor      = state->rt[i].rgb_dst_factor;
-         be.DestinationAlphaBlendFactor = state->rt[i].alpha_dst_factor;
-
-         be.WriteDisableRed   = !(state->rt[i].colormask & PIPE_MASK_R);
-         be.WriteDisableGreen = !(state->rt[i].colormask & PIPE_MASK_G);
-         be.WriteDisableBlue  = !(state->rt[i].colormask & PIPE_MASK_B);
-         be.WriteDisableAlpha = !(state->rt[i].colormask & PIPE_MASK_A);
-      }
-      blend_state += GENX(BLEND_STATE_ENTRY_length);
-   }
 
    return cso;
 }
@@ -805,7 +929,11 @@ static void
 iris_bind_blend_state(struct pipe_context *ctx, void *state)
 {
    struct iris_context *ice = (struct iris_context *) ctx;
-   ice->state.cso_blend = state;
+   struct iris_blend_state *cso = state;
+
+   ice->state.cso_blend = cso;
+   ice->state.blend_enables = cso ? cso->blend_enables : 0;
+
    ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
    ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
    ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
@@ -921,6 +1049,7 @@ struct iris_rasterizer_state {
    uint32_t wm[GENX(3DSTATE_WM_length)];
    uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
 
+   uint8_t num_clip_plane_consts;
    bool clip_halfz; /* for CC_VIEWPORT */
    bool depth_clip_near; /* for CC_VIEWPORT */
    bool depth_clip_far; /* for CC_VIEWPORT */
@@ -928,7 +1057,7 @@ struct iris_rasterizer_state {
    bool flatshade_first; /* for stream output */
    bool clamp_fragment_color; /* for shader state */
    bool light_twoside; /* for shader state */
-   bool rasterizer_discard; /* for 3DSTATE_STREAMOUT */
+   bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
    bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
    bool line_stipple_enable;
    bool poly_stipple_enable;
@@ -984,7 +1113,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
    not necessary?
    {
       poly_smooth
-      force_persample_interp - ?
       bottom_edge_rule
 
       offset_units_unscaled - cap not exposed
@@ -1010,6 +1138,11 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
    cso->line_stipple_enable = state->line_stipple_enable;
    cso->poly_stipple_enable = state->poly_stipple_enable;
 
+   if (state->clip_plane_enable != 0)
+      cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
+   else
+      cso->num_clip_plane_consts = 0;
+
    float line_width = get_line_width(state);
 
    iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
@@ -1020,7 +1153,7 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
          state->line_smooth ? _10pixels : _05pixels;
       sf.LastPixelEnable = state->line_last_pixel;
       sf.LineWidth = line_width;
-      sf.SmoothPointEnable = state->point_smooth;
+      sf.SmoothPointEnable = state->point_smooth || state->multisample;
       sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
       sf.PointWidth = state->point_size;
 
@@ -1045,7 +1178,7 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
       rr.GlobalDepthOffsetConstant = state->offset_units * 2;
       rr.GlobalDepthOffsetScale = state->offset_scale;
       rr.GlobalDepthOffsetClamp = state->offset_clamp;
-      rr.SmoothPointEnable = state->point_smooth;
+      rr.SmoothPointEnable = state->point_smooth || state->multisample;
       rr.AntialiasingEnable = state->line_smooth;
       rr.ScissorRectangleEnable = state->scissor;
       rr.ViewportZNearClipTestEnable = state->depth_clip_near;
@@ -1057,13 +1190,11 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
       /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
        * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
        */
-      cl.StatisticsEnable = true;
       cl.EarlyCullEnable = true;
       cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
       cl.ForceUserClipDistanceClipTestEnableBitmask = true;
       cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
       cl.GuardbandClipTestEnable = true;
-      cl.ClipMode = CLIPMODE_NORMAL;
       cl.ClipEnable = true;
       cl.ViewportXYClipTestEnable = state->point_tri_clip;
       cl.MinimumPointWidth = 0.125;
@@ -1085,7 +1216,6 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
       wm.LineAntialiasingRegionWidth = _10pixels;
       wm.LineEndCapAntialiasingRegionWidth = _05pixels;
       wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
-      wm.StatisticsEnable = true;
       wm.LineStippleEnable = state->line_stipple_enable;
       wm.PolygonStippleEnable = state->poly_stipple_enable;
    }
@@ -1125,14 +1255,19 @@ iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
       if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
          ice->state.dirty |= IRIS_DIRTY_WM;
 
-      if (cso_changed(rasterizer_discard) || cso_changed(flatshade_first))
+      if (cso_changed(rasterizer_discard))
+         ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
+
+      if (cso_changed(flatshade_first))
          ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
 
       if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
           cso_changed(clip_halfz))
          ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
 
-      if (cso_changed(sprite_coord_enable) || cso_changed(light_twoside))
+      if (cso_changed(sprite_coord_enable) ||
+          cso_changed(sprite_coord_mode) ||
+          cso_changed(light_twoside))
          ice->state.dirty |= IRIS_DIRTY_SBE;
    }
 
@@ -1282,7 +1417,6 @@ iris_bind_sampler_states(struct pipe_context *ctx,
    struct iris_shader_state *shs = &ice->state.shaders[stage];
 
    assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
-   shs->num_samplers = MAX2(shs->num_samplers, start + count);
 
    for (int i = 0; i < count; i++) {
       shs->samplers[start + i] = states[i];
@@ -1348,6 +1482,76 @@ fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
    }
 }
 
+static void
+fill_buffer_surface_state(struct isl_device *isl_dev,
+                          struct iris_bo *bo,
+                          void *map,
+                          enum isl_format format,
+                          unsigned offset,
+                          unsigned size)
+{
+   const struct isl_format_layout *fmtl = isl_format_get_layout(format);
+   const unsigned cpp = fmtl->bpb / 8;
+
+   /* The ARB_texture_buffer_specification says:
+    *
+    *    "The number of texels in the buffer texture's texel array is given by
+    *
+    *       floor(<buffer_size> / (<components> * sizeof(<base_type>)),
+    *
+    *     where <buffer_size> is the size of the buffer object, in basic
+    *     machine units and <components> and <base_type> are the element count
+    *     and base data type for elements, as specified in Table X.1.  The
+    *     number of texels in the texel array is then clamped to the
+    *     implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
+    *
+    * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
+    * so that when ISL divides by stride to obtain the number of texels, that
+    * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
+    */
+   unsigned final_size =
+      MIN3(size, bo->size - offset, IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
+
+   isl_buffer_fill_state(isl_dev, map,
+                         .address = bo->gtt_offset + offset,
+                         .size_B = final_size,
+                         .format = format,
+                         .stride_B = cpp,
+                         .mocs = mocs(bo));
+}
+
+/**
+ * Allocate a SURFACE_STATE structure.
+ */
+static void *
+alloc_surface_states(struct u_upload_mgr *mgr,
+                     struct iris_state_ref *ref)
+{
+   const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
+
+   void *map = upload_state(mgr, ref, surf_size, 64);
+
+   ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res));
+
+   return map;
+}
+
+static void
+fill_surface_state(struct isl_device *isl_dev,
+                   void *map,
+                   struct iris_resource *res,
+                   struct isl_view *view)
+{
+   struct isl_surf_fill_state_info f = {
+      .surf = &res->surf,
+      .view = view,
+      .mocs = mocs(res->bo),
+      .address = res->bo->gtt_offset,
+   };
+
+   isl_surf_fill_state_s(isl_dev, map, &f);
+}
+
 /**
  * The pipe->create_sampler_view() driver hook.
  */
@@ -1371,14 +1575,11 @@ iris_create_sampler_view(struct pipe_context *ctx,
    pipe_reference_init(&isv->base.reference, 1);
    pipe_resource_reference(&isv->base.texture, tex);
 
-   void *map = upload_state(ice->state.surface_uploader, &isv->surface_state,
-                            4 * GENX(RENDER_SURFACE_STATE_length), 64);
+   void *map = alloc_surface_states(ice->state.surface_uploader,
+                                    &isv->surface_state);
    if (!unlikely(map))
       return NULL;
 
-   struct iris_bo *state_bo = iris_resource_bo(isv->surface_state.res);
-   isv->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
-
    if (util_format_is_depth_or_stencil(tmpl->format)) {
       struct iris_resource *zres, *sres;
       const struct util_format_description *desc =
@@ -1391,9 +1592,11 @@ iris_create_sampler_view(struct pipe_context *ctx,
 
    isv->res = (struct iris_resource *) tex;
 
-   isl_surf_usage_flags_t usage =
-      ISL_SURF_USAGE_TEXTURE_BIT |
-      (isv->res->surf.usage & ISL_SURF_USAGE_CUBE_BIT);
+   isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
+
+   if (isv->base.target == PIPE_TEXTURE_CUBE ||
+       isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
+      usage |= ISL_SURF_USAGE_CUBE_BIT;
 
    const struct iris_format_info fmt =
       iris_format_for_usage(devinfo, tmpl->format, usage);
@@ -1413,30 +1616,16 @@ iris_create_sampler_view(struct pipe_context *ctx,
    if (tmpl->target != PIPE_BUFFER) {
       isv->view.base_level = tmpl->u.tex.first_level;
       isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
+      // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
       isv->view.base_array_layer = tmpl->u.tex.first_layer;
       isv->view.array_len =
          tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
 
-      isl_surf_fill_state(&screen->isl_dev, map,
-                          .surf = &isv->res->surf, .view = &isv->view,
-                          .mocs = MOCS_WB,
-                          .address = isv->res->bo->gtt_offset);
-                          // .aux_surf =
-                          // .clear_color = clear_color,
+      fill_surface_state(&screen->isl_dev, map, isv->res, &isv->view);
    } else {
-      // XXX: what to do about isv->view?  other drivers don't use it for bufs
-      const struct isl_format_layout *fmtl =
-         isl_format_get_layout(isv->view.format);
-      const unsigned cpp = fmtl->bpb / 8;
-
-      isl_buffer_fill_state(&screen->isl_dev, map,
-                            .address = isv->res->bo->gtt_offset +
-                                       tmpl->u.buf.offset,
-                            // XXX: buffer_texture_range_size from i965?
-                            .size_B = tmpl->u.buf.size,
-                            .format = isv->view.format,
-                            .stride_B = cpp,
-                            .mocs = MOCS_WB);
+      fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map,
+                                isv->view.format, tmpl->u.buf.offset,
+                                tmpl->u.buf.size);
    }
 
    return &isv->base;
@@ -1521,20 +1710,12 @@ iris_create_surface(struct pipe_context *ctx,
       return psurf;
 
 
-   void *map = upload_state(ice->state.surface_uploader, &surf->surface_state,
-                            4 * GENX(RENDER_SURFACE_STATE_length), 64);
+   void *map = alloc_surface_states(ice->state.surface_uploader,
+                                    &surf->surface_state);
    if (!unlikely(map))
       return NULL;
 
-   struct iris_bo *state_bo = iris_resource_bo(surf->surface_state.res);
-   surf->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
-
-   isl_surf_fill_state(&screen->isl_dev, map,
-                       .surf = &res->surf, .view = &surf->view,
-                       .mocs = MOCS_WB,
-                       .address = res->bo->gtt_offset);
-                       // .aux_surf =
-                       // .clear_color = clear_color,
+   fill_surface_state(&screen->isl_dev, map, res, &surf->view);
 
    return psurf;
 }
@@ -1554,27 +1735,27 @@ iris_set_shader_images(struct pipe_context *ctx,
    gl_shader_stage stage = stage_from_pipe(p_stage);
    struct iris_shader_state *shs = &ice->state.shaders[stage];
 
+   shs->bound_image_views &= ~u_bit_consecutive(start_slot, count);
+
    for (unsigned i = 0; i < count; i++) {
       if (p_images && p_images[i].resource) {
          const struct pipe_image_view *img = &p_images[i];
          struct iris_resource *res = (void *) img->resource;
          pipe_resource_reference(&shs->image[start_slot + i].res, &res->base);
 
+         shs->bound_image_views |= 1 << (start_slot + i);
+
+         res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+
          // XXX: these are not retained forever, use a separate uploader?
          void *map =
-            upload_state(ice->state.surface_uploader,
-                         &shs->image[start_slot + i].surface_state,
-                         4 * GENX(RENDER_SURFACE_STATE_length), 64);
+            alloc_surface_states(ice->state.surface_uploader,
+                                 &shs->image[start_slot + i].surface_state);
          if (!unlikely(map)) {
             pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
             return;
          }
 
-         struct iris_bo *surf_state_bo =
-            iris_resource_bo(shs->image[start_slot + i].surface_state.res);
-         shs->image[start_slot + i].surface_state.offset +=
-            iris_bo_offset_from_base_address(surf_state_bo);
-
          isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
          enum isl_format isl_format =
             iris_format_for_usage(devinfo, img->format, usage).fmt;
@@ -1595,25 +1776,11 @@ iris_set_shader_images(struct pipe_context *ctx,
                .usage = usage,
             };
 
-            isl_surf_fill_state(&screen->isl_dev, map,
-                                .surf = &res->surf, .view = &view,
-                                .mocs = MOCS_WB,
-                                .address = res->bo->gtt_offset);
-                                // .aux_surf =
-                                // .clear_color = clear_color,
+            fill_surface_state(&screen->isl_dev, map, res, &view);
          } else {
-            // XXX: what to do about view?  other drivers don't use it for bufs
-            const struct isl_format_layout *fmtl =
-               isl_format_get_layout(isl_format);
-            const unsigned cpp = fmtl->bpb / 8;
-
-            isl_buffer_fill_state(&screen->isl_dev, map,
-                                  .address = res->bo->gtt_offset,
-                                  // XXX: buffer_texture_range_size from i965?
-                                  .size_B = res->base.width0,
-                                  .format = isl_format,
-                                  .stride_B = cpp,
-                                  .mocs = MOCS_WB);
+            fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
+                                      isl_format, img->u.buf.offset,
+                                      img->u.buf.size);
          }
       } else {
          pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
@@ -1639,18 +1806,18 @@ iris_set_sampler_views(struct pipe_context *ctx,
    gl_shader_stage stage = stage_from_pipe(p_stage);
    struct iris_shader_state *shs = &ice->state.shaders[stage];
 
-   unsigned i;
-   for (i = 0; i < count; i++) {
-      pipe_sampler_view_reference((struct pipe_sampler_view **)
-                                  &shs->textures[i], views[i]);
-   }
-   for (; i < shs->num_textures; i++) {
+   shs->bound_sampler_views &= ~u_bit_consecutive(start, count);
+
+   for (unsigned i = 0; i < count; i++) {
       pipe_sampler_view_reference((struct pipe_sampler_view **)
-                                  &shs->textures[i], NULL);
+                                  &shs->textures[start + i], views[i]);
+      struct iris_sampler_view *view = (void *) views[i];
+      if (view) {
+         view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
+         shs->bound_sampler_views |= 1 << (start + i);
+      }
    }
 
-   shs->num_textures = count;
-
    ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
 }
 
@@ -1679,11 +1846,17 @@ iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
    free(surf);
 }
 
-// XXX: actually implement user clip planes
 static void
 iris_set_clip_state(struct pipe_context *ctx,
                     const struct pipe_clip_state *state)
 {
+   struct iris_context *ice = (struct iris_context *) ctx;
+   struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
+
+   memcpy(&ice->state.clip_planes, state, sizeof(*state));
+
+   ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS;
+   shs->cbuf0_needs_upload = true;
 }
 
 /**
@@ -1723,12 +1896,27 @@ static void
 iris_set_scissor_states(struct pipe_context *ctx,
                         unsigned start_slot,
                         unsigned num_scissors,
-                        const struct pipe_scissor_state *states)
+                        const struct pipe_scissor_state *rects)
 {
    struct iris_context *ice = (struct iris_context *) ctx;
 
    for (unsigned i = 0; i < num_scissors; i++) {
-      ice->state.scissors[start_slot + i] = states[i];
+      if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
+         /* If the scissor was out of bounds and got clamped to 0 width/height
+          * at the bounds, the subtraction of 1 from maximums could produce a
+          * negative number and thus not clip anything.  Instead, just provide
+          * a min > max scissor inside the bounds, which produces the expected
+          * no rendering.
+          */
+         ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
+            .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
+         };
+      } else {
+         ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
+            .minx = rects[i].minx,     .miny = rects[i].miny,
+            .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
+         };
+      }
    }
 
    ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
@@ -1754,7 +1942,6 @@ viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
    return copysignf(state->scale[axis], sign) + state->translate[axis];
 }
 
-#if 0
 static void
 calculate_guardband_size(uint32_t fb_width, uint32_t fb_height,
                          float m00, float m11, float m30, float m31,
@@ -1834,7 +2021,6 @@ calculate_guardband_size(uint32_t fb_width, uint32_t fb_height,
       *ymax = 0.0f;
    }
 }
-#endif
 
 /**
  * The pipe->set_viewport_states() driver hook.
@@ -1850,36 +2036,8 @@ iris_set_viewport_states(struct pipe_context *ctx,
                          const struct pipe_viewport_state *states)
 {
    struct iris_context *ice = (struct iris_context *) ctx;
-   struct iris_genx_state *genx = ice->state.genx;
-   uint32_t *vp_map = &genx->sf_cl_vp[start_slot];
 
-   for (unsigned i = 0; i < count; i++) {
-      const struct pipe_viewport_state *state = &states[i];
-
-      memcpy(&ice->state.viewports[start_slot + i], state, sizeof(*state));
-
-      iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
-         vp.ViewportMatrixElementm00 = state->scale[0];
-         vp.ViewportMatrixElementm11 = state->scale[1];
-         vp.ViewportMatrixElementm22 = state->scale[2];
-         vp.ViewportMatrixElementm30 = state->translate[0];
-         vp.ViewportMatrixElementm31 = state->translate[1];
-         vp.ViewportMatrixElementm32 = state->translate[2];
-         /* XXX: in i965 this is computed based on the drawbuffer size,
-          * but we don't have that here...
-          */
-         vp.XMinClipGuardband = -1.0;
-         vp.XMaxClipGuardband = 1.0;
-         vp.YMinClipGuardband = -1.0;
-         vp.YMaxClipGuardband = 1.0;
-         vp.XMinViewPort = viewport_extent(state, 0, -1.0f);
-         vp.XMaxViewPort = viewport_extent(state, 0,  1.0f) - 1;
-         vp.YMinViewPort = viewport_extent(state, 1, -1.0f);
-         vp.YMaxViewPort = viewport_extent(state, 1,  1.0f) - 1;
-      }
-
-      vp_map += GENX(SF_CLIP_VIEWPORT_length);
-   }
+   memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
 
    ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
 
@@ -1919,6 +2077,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
       ice->state.dirty |= IRIS_DIRTY_CLIP;
    }
 
+   if (cso->width != state->width || cso->height != state->height) {
+      ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
+   }
+
    util_copy_framebuffer_state(cso, state);
    cso->samples = samples;
 
@@ -1932,10 +2094,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
       .swizzle = ISL_SWIZZLE_IDENTITY,
    };
 
-   struct isl_depth_stencil_hiz_emit_info info = {
-      .view = &view,
-      .mocs = MOCS_WB,
-   };
+   struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
 
    if (cso->zsbuf) {
       iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
@@ -1951,7 +2110,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
 
          info.depth_surf = &zres->surf;
          info.depth_address = zres->bo->gtt_offset;
-         info.hiz_usage = ISL_AUX_USAGE_NONE;
+         info.mocs = mocs(zres->bo);
 
          view.format = zres->surf.format;
       }
@@ -1960,8 +2119,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
          view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
          info.stencil_surf = &stencil_res->surf;
          info.stencil_address = stencil_res->bo->gtt_offset;
-         if (!zres)
+         if (!zres) {
             view.format = stencil_res->surf.format;
+            info.mocs = mocs(stencil_res->bo);
+         }
       }
    }
 
@@ -1971,7 +2132,10 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
    void *null_surf_map =
       upload_state(ice->state.surface_uploader, &ice->state.null_fb,
                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
-   isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(cso->width, cso->height, cso->layers ? cso->layers : 1));
+   isl_null_fill_state(&screen->isl_dev, null_surf_map,
+                       isl_extent3d(MAX2(cso->width, 1),
+                                    MAX2(cso->height, 1),
+                                    cso->layers ? cso->layers : 1));
    ice->state.null_fb.offset +=
       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
 
@@ -1995,12 +2159,42 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
     *    be set in this packet."
     */
    // XXX: does this need to happen at 3DSTATE_BTP_PS time?
-   iris_emit_pipe_control_flush(&ice->render_batch,
+   iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
                                 PIPE_CONTROL_RENDER_TARGET_FLUSH |
                                 PIPE_CONTROL_STALL_AT_SCOREBOARD);
 #endif
 }
 
+static void
+upload_ubo_surf_state(struct iris_context *ice,
+                      struct iris_const_buffer *cbuf,
+                      unsigned buffer_size)
+{
+   struct pipe_context *ctx = &ice->ctx;
+   struct iris_screen *screen = (struct iris_screen *) ctx->screen;
+
+   // XXX: these are not retained forever, use a separate uploader?
+   void *map =
+      upload_state(ice->state.surface_uploader, &cbuf->surface_state,
+                   4 * GENX(RENDER_SURFACE_STATE_length), 64);
+   if (!unlikely(map)) {
+      pipe_resource_reference(&cbuf->data.res, NULL);
+      return;
+   }
+
+   struct iris_resource *res = (void *) cbuf->data.res;
+   struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
+   cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
+
+   isl_buffer_fill_state(&screen->isl_dev, map,
+                         .address = res->bo->gtt_offset + cbuf->data.offset,
+                         .size_B = MIN2(buffer_size,
+                                        res->bo->size - cbuf->data.offset),
+                         .format = ISL_FORMAT_R32G32B32A32_FLOAT,
+                         .stride_B = 1,
+                         .mocs = mocs(res->bo))
+}
+
 /**
  * The pipe->set_constant_buffer() driver hook.
  *
@@ -2013,44 +2207,34 @@ iris_set_constant_buffer(struct pipe_context *ctx,
                          const struct pipe_constant_buffer *input)
 {
    struct iris_context *ice = (struct iris_context *) ctx;
-   struct iris_screen *screen = (struct iris_screen *)ctx->screen;
    gl_shader_stage stage = stage_from_pipe(p_stage);
    struct iris_shader_state *shs = &ice->state.shaders[stage];
    struct iris_const_buffer *cbuf = &shs->constbuf[index];
 
-   if (input && (input->buffer || input->user_buffer)) {
-      if (input->user_buffer) {
-         u_upload_data(ctx->const_uploader, 0, input->buffer_size, 32,
-                       input->user_buffer, &cbuf->data.offset,
-                       &cbuf->data.res);
-      } else {
-         pipe_resource_reference(&cbuf->data.res, input->buffer);
-      }
+   if (input && input->buffer) {
+      assert(index > 0);
 
-      // XXX: these are not retained forever, use a separate uploader?
-      void *map =
-         upload_state(ice->state.surface_uploader, &cbuf->surface_state,
-                      4 * GENX(RENDER_SURFACE_STATE_length), 64);
-      if (!unlikely(map)) {
-         pipe_resource_reference(&cbuf->data.res, NULL);
-         return;
-      }
+      pipe_resource_reference(&cbuf->data.res, input->buffer);
+      cbuf->data.offset = input->buffer_offset;
 
       struct iris_resource *res = (void *) cbuf->data.res;
-      struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
-      cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
-
-      isl_buffer_fill_state(&screen->isl_dev, map,
-                            .address = res->bo->gtt_offset + cbuf->data.offset,
-                            .size_B = input->buffer_size,
-                            .format = ISL_FORMAT_R32G32B32A32_FLOAT,
-                            .stride_B = 1,
-                            .mocs = MOCS_WB)
+      res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
+
+      upload_ubo_surf_state(ice, cbuf, input->buffer_size);
    } else {
       pipe_resource_reference(&cbuf->data.res, NULL);
       pipe_resource_reference(&cbuf->surface_state.res, NULL);
    }
 
+   if (index == 0) {
+      if (input)
+         memcpy(&shs->cbuf0, input, sizeof(shs->cbuf0));
+      else
+         memset(&shs->cbuf0, 0, sizeof(shs->cbuf0));
+
+      shs->cbuf0_needs_upload = true;
+   }
+
    ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
    // XXX: maybe not necessary all the time...?
    // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
@@ -2058,6 +2242,56 @@ iris_set_constant_buffer(struct pipe_context *ctx,
    ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
 }
 
+static void
+upload_uniforms(struct iris_context *ice,
+                gl_shader_stage stage)
+{
+   struct iris_shader_state *shs = &ice->state.shaders[stage];
+   struct iris_const_buffer *cbuf = &shs->constbuf[0];
+   struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
+   unsigned upload_size = shader->num_system_values * sizeof(uint32_t) +
+                          shs->cbuf0.buffer_size;
+
+   if (upload_size == 0)
+      return;
+
+   uint32_t *map =
+      upload_state(ice->ctx.const_uploader, &cbuf->data, upload_size, 64);
+
+   for (int i = 0; i < shader->num_system_values; i++) {
+      uint32_t sysval = shader->system_values[i];
+      uint32_t value = 0;
+
+      if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
+         int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
+         int comp  = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
+         value = fui(ice->state.clip_planes.ucp[plane][comp]);
+      } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
+         if (stage == MESA_SHADER_TESS_CTRL) {
+            value = ice->state.vertices_per_patch;
+         } else {
+            assert(stage == MESA_SHADER_TESS_EVAL);
+            const struct shader_info *tcs_info =
+               iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
+            assert(tcs_info);
+
+            value = tcs_info->tess.tcs_vertices_out;
+         }
+      } else {
+         assert(!"unhandled system value");
+      }
+
+      *map++ = value;
+   }
+
+   if (shs->cbuf0.user_buffer) {
+      memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size);
+   }
+
+   upload_ubo_surf_state(ice, cbuf, upload_size);
+}
+
 /**
  * The pipe->set_shader_buffers() driver hook.
  *
@@ -2081,6 +2315,8 @@ iris_set_shader_buffers(struct pipe_context *ctx,
          struct iris_resource *res = (void *) buffer->buffer;
          pipe_resource_reference(&shs->ssbo[start_slot + i], &res->base);
 
+         res->bind_history |= PIPE_BIND_SHADER_BUFFER;
+
          // XXX: these are not retained forever, use a separate uploader?
          void *map =
             upload_state(ice->state.surface_uploader,
@@ -2099,10 +2335,12 @@ iris_set_shader_buffers(struct pipe_context *ctx,
          isl_buffer_fill_state(&screen->isl_dev, map,
                                .address =
                                   res->bo->gtt_offset + buffer->buffer_offset,
-                               .size_B = buffer->buffer_size,
+                               .size_B =
+                                  MIN2(buffer->buffer_size,
+                                       res->bo->size - buffer->buffer_offset),
                                .format = ISL_FORMAT_RAW,
                                .stride_B = 1,
-                               .mocs = MOCS_WB);
+                               .mocs = mocs(res->bo));
       } else {
          pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
          pipe_resource_reference(&shs->ssbo_surface_state[start_slot + i].res,
@@ -2119,13 +2357,6 @@ iris_delete_state(struct pipe_context *ctx, void *state)
    free(state);
 }
 
-static void
-iris_free_vertex_buffers(struct iris_vertex_buffer_state *cso)
-{
-   for (unsigned i = 0; i < cso->num_buffers; i++)
-      pipe_resource_reference(&cso->resources[i], NULL);
-}
-
 /**
  * The pipe->set_vertex_buffers() driver hook.
  *
@@ -2137,46 +2368,43 @@ iris_set_vertex_buffers(struct pipe_context *ctx,
                         const struct pipe_vertex_buffer *buffers)
 {
    struct iris_context *ice = (struct iris_context *) ctx;
-   struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
-
-   iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
+   struct iris_genx_state *genx = ice->state.genx;
 
-   if (!buffers)
-      count = 0;
+   ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
 
-   cso->num_buffers = count;
+   for (unsigned i = 0; i < count; i++) {
+      const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
+      struct iris_vertex_buffer_state *state =
+         &genx->vertex_buffers[start_slot + i];
 
-   iris_pack_command(GENX(3DSTATE_VERTEX_BUFFERS), cso->vertex_buffers, vb) {
-      vb.DWordLength = 4 * MAX2(cso->num_buffers, 1) - 1;
-   }
+      if (!buffer) {
+         pipe_resource_reference(&state->resource, NULL);
+         continue;
+      }
 
-   uint32_t *vb_pack_dest = &cso->vertex_buffers[1];
+      assert(!buffer->is_user_buffer);
 
-   if (count == 0) {
-      iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
-         vb.VertexBufferIndex = start_slot;
-         vb.NullVertexBuffer = true;
-         vb.AddressModifyEnable = true;
-      }
-   }
+      ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
 
-   for (unsigned i = 0; i < count; i++) {
-      assert(!buffers[i].is_user_buffer);
+      pipe_resource_reference(&state->resource, buffer->buffer.resource);
+      struct iris_resource *res = (void *) state->resource;
 
-      pipe_resource_reference(&cso->resources[i], buffers[i].buffer.resource);
-      struct iris_resource *res = (void *) cso->resources[i];
+      if (res)
+         res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
 
-      iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
+      iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
          vb.VertexBufferIndex = start_slot + i;
-         vb.MOCS = MOCS_WB;
          vb.AddressModifyEnable = true;
-         vb.BufferPitch = buffers[i].stride;
-         vb.BufferSize = res->bo->size;
-         vb.BufferStartingAddress =
-            ro_bo(NULL, res->bo->gtt_offset + buffers[i].buffer_offset);
+         vb.BufferPitch = buffer->stride;
+         if (res) {
+            vb.BufferSize = res->bo->size;
+            vb.BufferStartingAddress =
+               ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
+            vb.MOCS = mocs(res->bo);
+         } else {
+            vb.NullVertexBuffer = true;
+         }
       }
-
-      vb_pack_dest += GENX(VERTEX_BUFFER_STATE_length);
    }
 
    ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
@@ -2295,18 +2523,6 @@ iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
    ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
 }
 
-/**
- * Gallium CSO for stream output (transform feedback) targets.
- */
-struct iris_stream_output_target {
-   struct pipe_stream_output_target base;
-
-   uint32_t so_buffer[GENX(3DSTATE_SO_BUFFER_length)];
-
-   /** Storage holding the offset where we're writing in the buffer */
-   struct iris_state_ref offset;
-};
-
 /**
  * The pipe->create_stream_output_target() driver hook.
  *
@@ -2317,36 +2533,24 @@ struct iris_stream_output_target {
  */
 static struct pipe_stream_output_target *
 iris_create_stream_output_target(struct pipe_context *ctx,
-                                 struct pipe_resource *res,
+                                 struct pipe_resource *p_res,
                                  unsigned buffer_offset,
                                  unsigned buffer_size)
 {
+   struct iris_resource *res = (void *) p_res;
    struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
    if (!cso)
       return NULL;
 
+   res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
+
    pipe_reference_init(&cso->base.reference, 1);
-   pipe_resource_reference(&cso->base.buffer, res);
+   pipe_resource_reference(&cso->base.buffer, p_res);
    cso->base.buffer_offset = buffer_offset;
    cso->base.buffer_size = buffer_size;
    cso->base.context = ctx;
 
-   upload_state(ctx->stream_uploader, &cso->offset, 4 * sizeof(uint32_t), 4);
-
-   iris_pack_command(GENX(3DSTATE_SO_BUFFER), cso->so_buffer, sob) {
-      sob.SurfaceBaseAddress =
-         rw_bo(NULL, iris_resource_bo(res)->gtt_offset + buffer_offset);
-      sob.SOBufferEnable = true;
-      sob.StreamOffsetWriteEnable = true;
-      sob.StreamOutputBufferOffsetAddressEnable = true;
-      sob.MOCS = MOCS_WB; // XXX: MOCS
-
-      sob.SurfaceSize = MAX2(buffer_size / 4, 1) - 1;
-
-      /* .SOBufferIndex, .StreamOffset, and .StreamOutputBufferOffsetAddress
-       * are filled in later when we have stream IDs.
-       */
-   }
+   upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
 
    return &cso->base;
 }
@@ -2384,6 +2588,14 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
    if (ice->state.streamout_active != active) {
       ice->state.streamout_active = active;
       ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
+
+      /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
+       * it's a non-pipelined command.  If we're switching streamout on, we
+       * may have missed emitting it earlier, so do so now.  (We're already
+       * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
+       */
+      if (active)
+         ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
    }
 
    for (int i = 0; i < 4; i++) {
@@ -2405,6 +2617,7 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
       }
 
       struct iris_stream_output_target *tgt = (void *) targets[i];
+      struct iris_resource *res = (void *) tgt->base.buffer;
 
       /* Note that offsets[i] will either be 0, causing us to zero
        * the value in the buffer, or 0xFFFFFFFF, which happens to mean
@@ -2412,16 +2625,21 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
        */
       assert(offsets[i] == 0 || offsets[i] == 0xFFFFFFFF);
 
-      uint32_t dynamic[GENX(3DSTATE_SO_BUFFER_length)];
-      iris_pack_state(GENX(3DSTATE_SO_BUFFER), dynamic, dyns) {
-         dyns.SOBufferIndex = i;
-         dyns.StreamOffset = offsets[i];
-         dyns.StreamOutputBufferOffsetAddress =
-            rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset + tgt->offset.offset + i * sizeof(uint32_t));
-      }
-
-      for (uint32_t j = 0; j < GENX(3DSTATE_SO_BUFFER_length); j++) {
-         so_buffers[j] = tgt->so_buffer[j] | dynamic[j];
+      iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
+         sob.SurfaceBaseAddress =
+            rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset);
+         sob.SOBufferEnable = true;
+         sob.StreamOffsetWriteEnable = true;
+         sob.StreamOutputBufferOffsetAddressEnable = true;
+         sob.MOCS = mocs(res->bo);
+
+         sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
+
+         sob.SOBufferIndex = i;
+         sob.StreamOffset = offsets[i];
+         sob.StreamOutputBufferOffsetAddress =
+            rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
+                        tgt->offset.offset);
       }
    }
 
@@ -2637,8 +2855,6 @@ iris_emit_sbe_swiz(struct iris_batch *batch,
 
    /* XXX: this should be generated when putting programs in place */
 
-   // XXX: raster->sprite_coord_enable
-
    for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) {
       const int input_index = wm_prog_data->urb_setup[fs_attr];
       if (input_index < 0 || input_index >= 16)
@@ -2783,26 +2999,19 @@ iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
 
 /* ------------------------------------------------------------------- */
 
-/**
- * Set sampler-related program key fields based on the current state.
- */
-static void
-iris_populate_sampler_key(const struct iris_context *ice,
-                          struct brw_sampler_prog_key_data *key)
-{
-   for (int i = 0; i < MAX_SAMPLERS; i++) {
-      key->swizzles[i] = 0x688; /* XYZW */
-   }
-}
-
 /**
  * Populate VS program key fields based on the current state.
  */
 static void
 iris_populate_vs_key(const struct iris_context *ice,
+                     const struct shader_info *info,
                      struct brw_vs_prog_key *key)
 {
-   iris_populate_sampler_key(ice, &key->tex);
+   const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
+
+   if (info->clip_distance_array_size == 0 &&
+       (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)))
+      key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
 }
 
 /**
@@ -2812,7 +3021,6 @@ static void
 iris_populate_tcs_key(const struct iris_context *ice,
                       struct brw_tcs_prog_key *key)
 {
-   iris_populate_sampler_key(ice, &key->tex);
 }
 
 /**
@@ -2822,7 +3030,6 @@ static void
 iris_populate_tes_key(const struct iris_context *ice,
                       struct brw_tes_prog_key *key)
 {
-   iris_populate_sampler_key(ice, &key->tex);
 }
 
 /**
@@ -2832,7 +3039,6 @@ static void
 iris_populate_gs_key(const struct iris_context *ice,
                      struct brw_gs_prog_key *key)
 {
-   iris_populate_sampler_key(ice, &key->tex);
 }
 
 /**
@@ -2842,9 +3048,6 @@ static void
 iris_populate_fs_key(const struct iris_context *ice,
                      struct brw_wm_prog_key *key)
 {
-   iris_populate_sampler_key(ice, &key->tex);
-
-   /* XXX: dirty flags? */
    const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
    const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
    const struct iris_rasterizer_state *rast = ice->state.cso_rast;
@@ -2865,8 +3068,6 @@ iris_populate_fs_key(const struct iris_context *ice,
 
    key->coherent_fb_fetch = true;
 
-   // XXX: uint64_t input_slots_valid; - for >16 inputs
-
    // XXX: key->force_dual_color_blend for unigine
    // XXX: respect hint for high_quality_derivatives:1;
 }
@@ -2875,15 +3076,12 @@ static void
 iris_populate_cs_key(const struct iris_context *ice,
                      struct brw_cs_prog_key *key)
 {
-   iris_populate_sampler_key(ice, &key->tex);
 }
 
 #if 0
    // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
    pkt.SamplerCount =                                                     \
       DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4);          \
-   pkt.PerThreadScratchSpace = prog_data->total_scratch == 0 ? 0 :        \
-      ffs(stage_state->per_thread_scratch) - 11;                          \
 
 #endif
 
@@ -2898,7 +3096,7 @@ KSP(const struct iris_compiled_shader *shader)
 // prefetching of binding tables in A0 and B0 steppings.  XXX: Revisit
 // this WA on C0 stepping.
 
-#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix)                          \
+#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage)                   \
    pkt.KernelStartPointer = KSP(shader);                                  \
    pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 :                       \
       prog_data->binding_table.size_bytes / 4;                            \
@@ -2910,20 +3108,29 @@ KSP(const struct iris_compiled_shader *shader)
    pkt.prefix##URBEntryReadOffset = 0;                                    \
                                                                           \
    pkt.StatisticsEnable = true;                                           \
-   pkt.Enable           = true;
+   pkt.Enable           = true;                                           \
+                                                                          \
+   if (prog_data->total_scratch) {                                        \
+      struct iris_bo *bo =                                                \
+         iris_get_scratch_space(ice, prog_data->total_scratch, stage);    \
+      uint32_t scratch_addr = bo->gtt_offset;                             \
+      pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;     \
+      pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);            \
+   }
 
 /**
  * Encode most of 3DSTATE_VS based on the compiled shader.
  */
 static void
-iris_store_vs_state(const struct gen_device_info *devinfo,
+iris_store_vs_state(struct iris_context *ice,
+                    const struct gen_device_info *devinfo,
                     struct iris_compiled_shader *shader)
 {
    struct brw_stage_prog_data *prog_data = shader->prog_data;
    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
 
    iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
-      INIT_THREAD_DISPATCH_FIELDS(vs, Vertex);
+      INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
       vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
       vs.SIMD8DispatchEnable = true;
       vs.UserClipDistanceCullTestEnableBitmask =
@@ -2935,7 +3142,8 @@ iris_store_vs_state(const struct gen_device_info *devinfo,
  * Encode most of 3DSTATE_HS based on the compiled shader.
  */
 static void
-iris_store_tcs_state(const struct gen_device_info *devinfo,
+iris_store_tcs_state(struct iris_context *ice,
+                     const struct gen_device_info *devinfo,
                      struct iris_compiled_shader *shader)
 {
    struct brw_stage_prog_data *prog_data = shader->prog_data;
@@ -2943,7 +3151,7 @@ iris_store_tcs_state(const struct gen_device_info *devinfo,
    struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
 
    iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
-      INIT_THREAD_DISPATCH_FIELDS(hs, Vertex);
+      INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
 
       hs.InstanceCount = tcs_prog_data->instances - 1;
       hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
@@ -2955,7 +3163,8 @@ iris_store_tcs_state(const struct gen_device_info *devinfo,
  * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
  */
 static void
-iris_store_tes_state(const struct gen_device_info *devinfo,
+iris_store_tes_state(struct iris_context *ice,
+                     const struct gen_device_info *devinfo,
                      struct iris_compiled_shader *shader)
 {
    struct brw_stage_prog_data *prog_data = shader->prog_data;
@@ -2975,7 +3184,7 @@ iris_store_tes_state(const struct gen_device_info *devinfo,
    }
 
    iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
-      INIT_THREAD_DISPATCH_FIELDS(ds, Patch);
+      INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
 
       ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
       ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
@@ -2992,7 +3201,8 @@ iris_store_tes_state(const struct gen_device_info *devinfo,
  * Encode most of 3DSTATE_GS based on the compiled shader.
  */
 static void
-iris_store_gs_state(const struct gen_device_info *devinfo,
+iris_store_gs_state(struct iris_context *ice,
+                    const struct gen_device_info *devinfo,
                     struct iris_compiled_shader *shader)
 {
    struct brw_stage_prog_data *prog_data = shader->prog_data;
@@ -3000,7 +3210,7 @@ iris_store_gs_state(const struct gen_device_info *devinfo,
    struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
 
    iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
-      INIT_THREAD_DISPATCH_FIELDS(gs, Vertex);
+      INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
 
       gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
       gs.OutputTopology = gs_prog_data->output_topology;
@@ -3039,7 +3249,8 @@ iris_store_gs_state(const struct gen_device_info *devinfo,
  * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
  */
 static void
-iris_store_fs_state(const struct gen_device_info *devinfo,
+iris_store_fs_state(struct iris_context *ice,
+                    const struct gen_device_info *devinfo,
                     struct iris_compiled_shader *shader)
 {
    struct brw_stage_prog_data *prog_data = shader->prog_data;
@@ -3057,7 +3268,7 @@ iris_store_fs_state(const struct gen_device_info *devinfo,
       ps.FloatingPointMode = prog_data->use_alt_mode;
       ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
 
-      ps.PushConstantEnable = prog_data->nr_params > 0 ||
+      ps.PushConstantEnable = shader->num_system_values > 0 ||
                               prog_data->ubo_ranges[0].length > 0;
 
       /* From the documentation for this packet:
@@ -3094,12 +3305,23 @@ iris_store_fs_state(const struct gen_device_info *devinfo,
          KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
       ps.KernelStartPointer2 =
          KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
+
+      if (prog_data->total_scratch) {
+         struct iris_bo *bo =
+            iris_get_scratch_space(ice, prog_data->total_scratch,
+                                   MESA_SHADER_FRAGMENT);
+         uint32_t scratch_addr = bo->gtt_offset;
+         ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
+         ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
+      }
    }
 
    iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
       psx.PixelShaderValid = true;
       psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
-      psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
+      // XXX: alpha test / alpha to coverage :/
+      psx.PixelShaderKillsPixel = wm_prog_data->uses_kill ||
+                                  wm_prog_data->uses_omask;
       psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
       psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
       psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
@@ -3127,7 +3349,8 @@ iris_store_fs_state(const struct gen_device_info *devinfo,
  * This must match the data written by the iris_store_xs_state() functions.
  */
 static void
-iris_store_cs_state(const struct gen_device_info *devinfo,
+iris_store_cs_state(struct iris_context *ice,
+                    const struct gen_device_info *devinfo,
                     struct iris_compiled_shader *shader)
 {
    struct brw_stage_prog_data *prog_data = shader->prog_data;
@@ -3172,28 +3395,31 @@ iris_derived_program_state_size(enum iris_program_cache_id cache_id)
  * get most of the state packet without having to reconstruct it.
  */
 static void
-iris_store_derived_program_state(const struct gen_device_info *devinfo,
+iris_store_derived_program_state(struct iris_context *ice,
                                  enum iris_program_cache_id cache_id,
                                  struct iris_compiled_shader *shader)
 {
+   struct iris_screen *screen = (void *) ice->ctx.screen;
+   const struct gen_device_info *devinfo = &screen->devinfo;
+
    switch (cache_id) {
    case IRIS_CACHE_VS:
-      iris_store_vs_state(devinfo, shader);
+      iris_store_vs_state(ice, devinfo, shader);
       break;
    case IRIS_CACHE_TCS:
-      iris_store_tcs_state(devinfo, shader);
+      iris_store_tcs_state(ice, devinfo, shader);
       break;
    case IRIS_CACHE_TES:
-      iris_store_tes_state(devinfo, shader);
+      iris_store_tes_state(ice, devinfo, shader);
       break;
    case IRIS_CACHE_GS:
-      iris_store_gs_state(devinfo, shader);
+      iris_store_gs_state(ice, devinfo, shader);
       break;
    case IRIS_CACHE_FS:
-      iris_store_fs_state(devinfo, shader);
+      iris_store_fs_state(ice, devinfo, shader);
       break;
    case IRIS_CACHE_CS:
-      iris_store_cs_state(devinfo, shader);
+      iris_store_cs_state(ice, devinfo, shader);
    case IRIS_CACHE_BLORP:
       break;
    default:
@@ -3266,6 +3492,10 @@ use_null_surface(struct iris_batch *batch, struct iris_context *ice)
 static uint32_t
 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
 {
+   /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
+   if (!ice->state.null_fb.res)
+      return use_null_surface(batch, ice);
+
    struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
 
    iris_use_pinned_bo(batch, state_bo, false);
@@ -3348,8 +3578,13 @@ use_image(struct iris_batch *batch, struct iris_context *ice,
 
 #define push_bt_entry(addr) \
    assert(addr >= binder_addr); \
+   assert(s < prog_data->binding_table.size_bytes / sizeof(uint32_t)); \
    if (!pin_only) bt_map[s++] = (addr) - binder_addr;
 
+#define bt_assert(section, exists)                           \
+   if (!pin_only) assert(prog_data->binding_table.section == \
+                         (exists) ? s : 0xd0d0d0d0)
+
 /**
  * Populate the binding table for a given shader stage.
  *
@@ -3361,14 +3596,14 @@ static void
 iris_populate_binding_table(struct iris_context *ice,
                             struct iris_batch *batch,
                             gl_shader_stage stage,
-                            bool pin_only,
-                            struct iris_state_ref *grid_size_surf)
+                            bool pin_only)
 {
    const struct iris_binder *binder = &ice->state.binder;
    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
    if (!shader)
       return;
 
+   UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
    struct iris_shader_state *shs = &ice->state.shaders[stage];
    uint32_t binder_addr = binder->bo->gtt_offset;
 
@@ -3385,12 +3620,11 @@ iris_populate_binding_table(struct iris_context *ice,
 
    if (stage == MESA_SHADER_COMPUTE) {
       /* surface for gl_NumWorkGroups */
-      assert(grid_size_surf || pin_only);
-      if (grid_size_surf) {
-         struct iris_bo *bo = iris_resource_bo(grid_size_surf->res);
-         iris_use_pinned_bo(batch, bo, false);
-         push_bt_entry(grid_size_surf->offset);
-      }
+      struct iris_state_ref *grid_data = &ice->state.grid_size;
+      struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
+      iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res),  false);
+      iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false);
+      push_bt_entry(grid_state->offset);
    }
 
    if (stage == MESA_SHADER_FRAGMENT) {
@@ -3409,16 +3643,17 @@ iris_populate_binding_table(struct iris_context *ice,
       }
    }
 
-   //assert(prog_data->binding_table.texture_start ==
-          //(ice->state.num_textures[stage] ? s : 0xd0d0d0d0));
+   bt_assert(texture_start, info->num_textures > 0);
 
-   for (int i = 0; i < shs->num_textures; i++) {
+   for (int i = 0; i < info->num_textures; i++) {
       struct iris_sampler_view *view = shs->textures[i];
       uint32_t addr = view ? use_sampler_view(batch, view)
                            : use_null_surface(batch, ice);
       push_bt_entry(addr);
    }
 
+   bt_assert(image_start, info->num_images > 0);
+
    for (int i = 0; i < info->num_images; i++) {
       uint32_t addr = use_image(batch, ice, shs, i);
       push_bt_entry(addr);
@@ -3426,11 +3661,15 @@ iris_populate_binding_table(struct iris_context *ice,
 
    const int num_ubos = iris_get_shader_num_ubos(ice, stage);
 
+   bt_assert(ubo_start, num_ubos > 0);
+
    for (int i = 0; i < num_ubos; i++) {
       uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]);
       push_bt_entry(addr);
    }
 
+   bt_assert(ssbo_start, info->num_abos + info->num_ssbos > 0);
+
    /* XXX: st is wasting 16 binding table slots for ABOs.  Should add a cap
     * for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
     * in st_atom_storagebuf.c so it'll compact them into one range, with
@@ -3445,8 +3684,8 @@ iris_populate_binding_table(struct iris_context *ice,
 
 #if 0
       // XXX: not implemented yet
-      assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
-      assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
+      bt_assert(plane_start[1], ...);
+      bt_assert(plane_start[2], ...);
 #endif
 }
 
@@ -3481,7 +3720,7 @@ iris_restore_render_saved_bos(struct iris_context *ice,
                               struct iris_batch *batch,
                               const struct pipe_draw_info *draw)
 {
-   // XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch
+   struct iris_genx_state *genx = ice->state.genx;
 
    const uint64_t clean = ~ice->state.dirty;
 
@@ -3505,6 +3744,19 @@ iris_restore_render_saved_bos(struct iris_context *ice,
       iris_use_optional_res(batch, ice->state.last_res.scissor, false);
    }
 
+   if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
+      for (int i = 0; i < 4; i++) {
+         struct iris_stream_output_target *tgt =
+            (void *) ice->state.so_target[i];
+         if (tgt) {
+            iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
+                               true);
+            iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
+                               true);
+         }
+      }
+   }
+
    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
       if (!(clean & (IRIS_DIRTY_CONSTANTS_VS << stage)))
          continue;
@@ -3536,7 +3788,7 @@ iris_restore_render_saved_bos(struct iris_context *ice,
    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
       if (clean & (IRIS_DIRTY_BINDINGS_VS << stage)) {
          /* Re-pin any buffers referred to by the binding table. */
-         iris_populate_binding_table(ice, batch, stage, true, NULL);
+         iris_populate_binding_table(ice, batch, stage, true);
       }
    }
 
@@ -3550,12 +3802,19 @@ iris_restore_render_saved_bos(struct iris_context *ice,
    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
       if (clean & (IRIS_DIRTY_VS << stage)) {
          struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
          if (shader) {
             struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
             iris_use_pinned_bo(batch, bo, false);
-         }
 
-         // XXX: scratch buffer
+            struct brw_stage_prog_data *prog_data = shader->prog_data;
+
+            if (prog_data->total_scratch > 0) {
+               struct iris_bo *bo =
+                  iris_get_scratch_space(ice, prog_data->total_scratch, stage);
+               iris_use_pinned_bo(batch, bo, true);
+            }
+         }
       }
    }
 
@@ -3566,11 +3825,14 @@ iris_restore_render_saved_bos(struct iris_context *ice,
          struct iris_resource *zres, *sres;
          iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
                                           &zres, &sres);
-         // XXX: might not be writable...
-         if (zres)
-            iris_use_pinned_bo(batch, zres->bo, true);
-         if (sres)
-            iris_use_pinned_bo(batch, sres->bo, true);
+         if (zres) {
+            iris_use_pinned_bo(batch, zres->bo,
+                               ice->state.depth_writes_enabled);
+         }
+         if (sres) {
+            iris_use_pinned_bo(batch, sres->bo,
+                               ice->state.stencil_writes_enabled);
+         }
       }
    }
 
@@ -3583,10 +3845,11 @@ iris_restore_render_saved_bos(struct iris_context *ice,
    }
 
    if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
-      struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
-      for (unsigned i = 0; i < cso->num_buffers; i++) {
-         struct iris_resource *res = (void *) cso->resources[i];
-         iris_use_pinned_bo(batch, res->bo, false);
+      uint64_t bound = ice->state.bound_vertex_buffers;
+      while (bound) {
+         const int i = u_bit_scan64(&bound);
+         struct pipe_resource *res = genx->vertex_buffers[i].resource;
+         iris_use_pinned_bo(batch, iris_resource_bo(res), false);
       }
    }
 }
@@ -3622,7 +3885,7 @@ iris_restore_compute_saved_bos(struct iris_context *ice,
 
    if (clean & IRIS_DIRTY_BINDINGS_CS) {
       /* Re-pin any buffers referred to by the binding table. */
-      iris_populate_binding_table(ice, batch, stage, true, NULL);
+      iris_populate_binding_table(ice, batch, stage, true);
    }
 
    struct pipe_resource *sampler_res = shs->sampler_table.res;
@@ -3631,12 +3894,19 @@ iris_restore_compute_saved_bos(struct iris_context *ice,
 
    if (clean & IRIS_DIRTY_CS) {
       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
       if (shader) {
          struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
          iris_use_pinned_bo(batch, bo, false);
-      }
 
-      // XXX: scratch buffer
+         struct brw_stage_prog_data *prog_data = shader->prog_data;
+
+         if (prog_data->total_scratch > 0) {
+            struct iris_bo *bo =
+               iris_get_scratch_space(ice, prog_data->total_scratch, stage);
+            iris_use_pinned_bo(batch, bo, true);
+         }
+      }
    }
 }
 
@@ -3653,7 +3923,7 @@ iris_update_surface_base_address(struct iris_batch *batch,
    flush_for_state_base_change(batch);
 
    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
-      // XXX: sba.SurfaceStateMemoryObjectControlState = MOCS_WB;
+      sba.SurfaceStateMOCS = MOCS_WB;
       sba.SurfaceStateBaseAddressModifyEnable = true;
       sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
    }
@@ -3668,7 +3938,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
 {
    const uint64_t dirty = ice->state.dirty;
 
-   if (!dirty)
+   if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER))
       return;
 
    struct iris_genx_state *genx = ice->state.genx;
@@ -3709,18 +3979,53 @@ iris_upload_dirty_render_state(struct iris_context *ice,
    }
 
    if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
+      struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+      uint32_t sf_cl_vp_address;
+      uint32_t *vp_map =
+         stream_state(batch, ice->state.dynamic_uploader,
+                      &ice->state.last_res.sf_cl_vp,
+                      4 * ice->state.num_viewports *
+                      GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
+
+      for (unsigned i = 0; i < ice->state.num_viewports; i++) {
+         const struct pipe_viewport_state *state = &ice->state.viewports[i];
+         float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
+
+         float vp_xmin = viewport_extent(state, 0, -1.0f);
+         float vp_xmax = viewport_extent(state, 0,  1.0f);
+         float vp_ymin = viewport_extent(state, 1, -1.0f);
+         float vp_ymax = viewport_extent(state, 1,  1.0f);
+
+         calculate_guardband_size(cso_fb->width, cso_fb->height,
+                                  state->scale[0], state->scale[1],
+                                  state->translate[0], state->translate[1],
+                                  &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
+
+         iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
+            vp.ViewportMatrixElementm00 = state->scale[0];
+            vp.ViewportMatrixElementm11 = state->scale[1];
+            vp.ViewportMatrixElementm22 = state->scale[2];
+            vp.ViewportMatrixElementm30 = state->translate[0];
+            vp.ViewportMatrixElementm31 = state->translate[1];
+            vp.ViewportMatrixElementm32 = state->translate[2];
+            vp.XMinClipGuardband = gb_xmin;
+            vp.XMaxClipGuardband = gb_xmax;
+            vp.YMinClipGuardband = gb_ymin;
+            vp.YMaxClipGuardband = gb_ymax;
+            vp.XMinViewPort = MAX2(vp_xmin, 0);
+            vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
+            vp.YMinViewPort = MAX2(vp_ymin, 0);
+            vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
+         }
+
+         vp_map += GENX(SF_CLIP_VIEWPORT_length);
+      }
+
       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
-         ptr.SFClipViewportPointer =
-            emit_state(batch, ice->state.dynamic_uploader,
-                       &ice->state.last_res.sf_cl_vp,
-                       genx->sf_cl_vp, 4 * GENX(SF_CLIP_VIEWPORT_length) *
-                       ice->state.num_viewports, 64);
+         ptr.SFClipViewportPointer = sf_cl_vp_address;
       }
    }
 
-   /* XXX: L3 State */
-
-   // XXX: this is only flagged at setup, we assume a static configuration
    if (dirty & IRIS_DIRTY_URB) {
       iris_upload_urb_config(ice, batch);
    }
@@ -3821,6 +4126,9 @@ iris_upload_dirty_render_state(struct iris_context *ice,
       if (!shader)
          continue;
 
+      if (shs->cbuf0_needs_upload)
+         upload_uniforms(ice, stage);
+
       struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
 
       iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
@@ -3871,7 +4179,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
 
    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
       if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
-         iris_populate_binding_table(ice, batch, stage, false, NULL);
+         iris_populate_binding_table(ice, batch, stage, false);
       }
    }
 
@@ -3983,6 +4291,9 @@ iris_upload_dirty_render_state(struct iris_context *ice,
 
       uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
       iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
+         cl.StatisticsEnable = ice->state.statistics_counters_enabled;
+         cl.ClipMode = cso_rast->rasterizer_discard ? CLIPMODE_REJECT_ALL
+                                                    : CLIPMODE_NORMAL;
          if (wm_prog_data->barycentric_interp_modes &
              BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
             cl.NonPerspectiveBarycentricEnable = true;
@@ -4001,12 +4312,13 @@ iris_upload_dirty_render_state(struct iris_context *ice,
 
    }
 
-   /* XXX: FS program updates needs to flag IRIS_DIRTY_WM */
    if (dirty & IRIS_DIRTY_WM) {
       struct iris_rasterizer_state *cso = ice->state.cso_rast;
       uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
 
       iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
+         wm.StatisticsEnable = ice->state.statistics_counters_enabled;
+
          wm.BarycentricInterpolationMode =
             wm_prog_data->barycentric_interp_modes;
 
@@ -4067,9 +4379,18 @@ iris_upload_dirty_render_state(struct iris_context *ice,
       iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
 
       if (cso_fb->zsbuf) {
-         struct iris_resource *zres = (void *) cso_fb->zsbuf->texture;
-         // XXX: depth might not be writable...
-         iris_use_pinned_bo(batch, zres->bo, true);
+         struct iris_resource *zres, *sres;
+         iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
+                                          &zres, &sres);
+         if (zres) {
+            iris_use_pinned_bo(batch, zres->bo,
+                               ice->state.depth_writes_enabled);
+         }
+
+         if (sres) {
+            iris_use_pinned_bo(batch, sres->bo,
+                               ice->state.stencil_writes_enabled);
+         }
       }
    }
 
@@ -4094,16 +4415,65 @@ iris_upload_dirty_render_state(struct iris_context *ice,
    }
 
    if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
-      struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
-      const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
+      int count = util_bitcount64(ice->state.bound_vertex_buffers);
+
+      if (count) {
+         /* The VF cache designers cut corners, and made the cache key's
+          * <VertexBufferIndex, Memory Address> tuple only consider the bottom
+          * 32 bits of the address.  If you have two vertex buffers which get
+          * placed exactly 4 GiB apart and use them in back-to-back draw calls,
+          * you can get collisions (even within a single batch).
+          *
+          * So, we need to do a VF cache invalidate if the buffer for a VB
+          * slot slot changes [48:32] address bits from the previous time.
+          */
+         unsigned flush_flags = 0;
 
-      if (cso->num_buffers > 0) {
-         iris_batch_emit(batch, cso->vertex_buffers, sizeof(uint32_t) *
-                         (1 + vb_dwords * cso->num_buffers));
+         uint64_t bound = ice->state.bound_vertex_buffers;
+         while (bound) {
+            const int i = u_bit_scan64(&bound);
+            uint16_t high_bits = 0;
 
-         for (unsigned i = 0; i < cso->num_buffers; i++) {
-            struct iris_resource *res = (void *) cso->resources[i];
-            iris_use_pinned_bo(batch, res->bo, false);
+            struct iris_resource *res =
+               (void *) genx->vertex_buffers[i].resource;
+            if (res) {
+               iris_use_pinned_bo(batch, res->bo, false);
+
+               high_bits = res->bo->gtt_offset >> 32ull;
+               if (high_bits != ice->state.last_vbo_high_bits[i]) {
+                  flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+                  ice->state.last_vbo_high_bits[i] = high_bits;
+               }
+
+               /* If the buffer was written to by streamout, we may need
+                * to stall so those writes land and become visible to the
+                * vertex fetcher.
+                *
+                * TODO: This may stall more than necessary.
+                */
+               if (res->bind_history & PIPE_BIND_STREAM_OUTPUT)
+                  flush_flags |= PIPE_CONTROL_CS_STALL;
+            }
+         }
+
+         if (flush_flags)
+            iris_emit_pipe_control_flush(batch, flush_flags);
+
+         const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
+
+         uint32_t *map =
+            iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
+         _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
+            vb.DWordLength = (vb_dwords * count + 1) - 2;
+         }
+         map += 1;
+
+         bound = ice->state.bound_vertex_buffers;
+         while (bound) {
+            const int i = u_bit_scan64(&bound);
+            memcpy(map, genx->vertex_buffers[i].state,
+                   sizeof(uint32_t) * vb_dwords);
+            map += vb_dwords;
          }
       }
    }
@@ -4161,6 +4531,11 @@ iris_upload_render_state(struct iris_context *ice,
     */
    iris_use_pinned_bo(batch, ice->state.binder.bo, false);
 
+   if (!batch->contains_draw) {
+      iris_restore_render_saved_bos(ice, batch, draw);
+      batch->contains_draw = true;
+   }
+
    iris_upload_dirty_render_state(ice, batch, draw);
 
    if (draw->index_size > 0) {
@@ -4171,6 +4546,9 @@ iris_upload_render_state(struct iris_context *ice,
                        draw->count * draw->index_size, 4, draw->index.user,
                        &offset, &ice->state.last_res.index_buffer);
       } else {
+         struct iris_resource *res = (void *) draw->index.resource;
+         res->bind_history |= PIPE_BIND_INDEX_BUFFER;
+
          pipe_resource_reference(&ice->state.last_res.index_buffer,
                                  draw->index.resource);
          offset = 0;
@@ -4180,10 +4558,17 @@ iris_upload_render_state(struct iris_context *ice,
 
       iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
          ib.IndexFormat = draw->index_size >> 1;
-         ib.MOCS = MOCS_WB;
+         ib.MOCS = mocs(bo);
          ib.BufferSize = bo->size;
          ib.BufferStartingAddress = ro_bo(bo, offset);
       }
+
+      /* The VF cache key only uses 32-bits, see vertex buffer comment above */
+      uint16_t high_bits = bo->gtt_offset >> 32ull;
+      if (high_bits != ice->state.last_index_bo_high_bits) {
+         iris_emit_pipe_control_flush(batch, PIPE_CONTROL_VF_CACHE_INVALIDATE);
+         ice->state.last_index_bo_high_bits = high_bits;
+      }
    }
 
 #define _3DPRIM_END_OFFSET          0x2420
@@ -4231,31 +4616,50 @@ iris_upload_render_state(struct iris_context *ice,
             lri.DataDWord = 0;
          }
       }
+   } else if (draw->count_from_stream_output) {
+      struct iris_stream_output_target *so =
+         (void *) draw->count_from_stream_output;
+
+      // XXX: avoid if possible
+      iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
+
+      iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+         lrm.RegisterAddress = CS_GPR(0);
+         lrm.MemoryAddress =
+            ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
+      }
+      iris_math_div32_gpr0(ice, batch, so->stride);
+      _iris_emit_lrr(batch, _3DPRIM_VERTEX_COUNT, CS_GPR(0));
+
+      _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
+      _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
+      _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
+      _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
    }
 
    iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
-      prim.StartInstanceLocation = draw->start_instance;
-      prim.InstanceCount = draw->instance_count;
-      prim.VertexCountPerInstance = draw->count;
       prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
+      prim.PredicateEnable =
+         ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
 
-      // XXX: this is probably bonkers.
-      prim.StartVertexLocation = draw->start;
-
-      prim.IndirectParameterEnable = draw->indirect != NULL;
-
-      if (draw->index_size) {
-         prim.BaseVertexLocation += draw->index_bias;
+      if (draw->indirect || draw->count_from_stream_output) {
+         prim.IndirectParameterEnable = true;
       } else {
-         prim.StartVertexLocation += draw->index_bias;
-      }
+         prim.StartInstanceLocation = draw->start_instance;
+         prim.InstanceCount = draw->instance_count;
+         prim.VertexCountPerInstance = draw->count;
 
-      //prim.BaseVertexLocation = ...;
-   }
+         // XXX: this is probably bonkers.
+         prim.StartVertexLocation = draw->start;
 
-   if (!batch->contains_draw) {
-      iris_restore_render_saved_bos(ice, batch, draw);
-      batch->contains_draw = true;
+         if (draw->index_size) {
+            prim.BaseVertexLocation += draw->index_bias;
+         } else {
+            prim.StartVertexLocation += draw->index_bias;
+         }
+
+         //prim.BaseVertexLocation = ...;
+      }
    }
 }
 
@@ -4274,44 +4678,18 @@ iris_upload_compute_state(struct iris_context *ice,
    struct brw_stage_prog_data *prog_data = shader->prog_data;
    struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
 
-   struct pipe_resource *grid_size_res = NULL;
-   uint32_t grid_size_offset;
-   if (grid->indirect) {
-      grid_size_res = grid->indirect;
-      grid_size_offset = grid->indirect_offset;
-   } else {
-      uint32_t *grid_size_map =
-         stream_state(batch, ice->state.surface_uploader, &grid_size_res, 12, 4,
-                      &grid_size_offset);
-      grid_size_map[0] = grid->grid[0];
-      grid_size_map[1] = grid->grid[1];
-      grid_size_map[2] = grid->grid[2];
-      struct iris_bo *grid_size_bo = iris_resource_bo(grid_size_res);
-      grid_size_offset -= iris_bo_offset_from_base_address(grid_size_bo);
-   }
-
-   struct iris_state_ref grid_size_surf;
-   memset(&grid_size_surf, 0, sizeof(grid_size_surf));
-   void *grid_surf_state_map =
-      upload_state(ice->state.surface_uploader,
-                   &grid_size_surf,
-                   4 * GENX(RENDER_SURFACE_STATE_length), 64);
-   assert(grid_surf_state_map);
-   struct iris_bo *grid_size_bo = iris_resource_bo(grid_size_res);
-   iris_use_pinned_bo(batch, grid_size_bo, false);
-   grid_size_surf.offset +=
-      iris_bo_offset_from_base_address(iris_resource_bo(grid_size_surf.res));
-   isl_buffer_fill_state(&screen->isl_dev, grid_surf_state_map,
-                         .address =
-                            grid_size_bo->gtt_offset + grid_size_offset,
-                         .size_B = 12,
-                         .format = ISL_FORMAT_RAW,
-                         .stride_B = 1,
-                         .mocs = MOCS_WB);
+   /* Always pin the binder.  If we're emitting new binding table pointers,
+    * we need it.  If not, we're probably inheriting old tables via the
+    * context, and need it anyway.  Since true zero-bindings cases are
+    * practically non-existent, just pin it and avoid last_res tracking.
+    */
+   iris_use_pinned_bo(batch, ice->state.binder.bo, false);
 
-   if (dirty & IRIS_DIRTY_BINDINGS_CS || grid_size_res)
-      iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false,
-                                  &grid_size_surf);
+   if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->cbuf0_needs_upload)
+      upload_uniforms(ice, MESA_SHADER_COMPUTE);
+
+   if (dirty & IRIS_DIRTY_BINDINGS_CS)
+      iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
 
    iris_use_optional_res(batch, shs->sampler_table.res, false);
    iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false);
@@ -4319,43 +4697,45 @@ iris_upload_compute_state(struct iris_context *ice,
    if (ice->state.need_border_colors)
       iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
 
-   /* The MEDIA_VFE_STATE documentation for Gen8+ says:
-    *
-    *   "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
-    *    the only bits that are changed are scoreboard related: Scoreboard
-    *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
-    *    these scoreboard related states, a MEDIA_STATE_FLUSH is sufficient."
-    */
-   iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
-
-   iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
-      if (prog_data->total_scratch) {
-         /* Per Thread Scratch Space is in the range [0, 11] where
-          * 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
-          */
-         // XXX: vfe.ScratchSpaceBasePointer
-         //vfe.PerThreadScratchSpace =
-            //ffs(stage_state->per_thread_scratch) - 11;
-      }
+   if (dirty & IRIS_DIRTY_CS) {
+      /* The MEDIA_VFE_STATE documentation for Gen8+ says:
+       *
+       *   "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
+       *    the only bits that are changed are scoreboard related: Scoreboard
+       *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta.  For
+       *    these scoreboard related states, a MEDIA_STATE_FLUSH is
+       *    sufficient."
+       */
+      iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
+
+      iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
+         if (prog_data->total_scratch) {
+            struct iris_bo *bo =
+               iris_get_scratch_space(ice, prog_data->total_scratch,
+                                      MESA_SHADER_COMPUTE);
+            vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
+            vfe.ScratchSpaceBasePointer = rw_bo(bo, 0);
+         }
 
-      vfe.MaximumNumberofThreads =
-         devinfo->max_cs_threads * screen->subslice_total - 1;
+         vfe.MaximumNumberofThreads =
+            devinfo->max_cs_threads * screen->subslice_total - 1;
 #if GEN_GEN < 11
-      vfe.ResetGatewayTimer =
-         Resettingrelativetimerandlatchingtheglobaltimestamp;
+         vfe.ResetGatewayTimer =
+            Resettingrelativetimerandlatchingtheglobaltimestamp;
 #endif
 
-      vfe.NumberofURBEntries = 2;
-      vfe.URBEntryAllocationSize = 2;
+         vfe.NumberofURBEntries = 2;
+         vfe.URBEntryAllocationSize = 2;
 
-      // XXX: Use Indirect Payload Storage?
-      vfe.CURBEAllocationSize =
-         ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
-               cs_prog_data->push.cross_thread.regs, 2);
+         // XXX: Use Indirect Payload Storage?
+         vfe.CURBEAllocationSize =
+            ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
+                  cs_prog_data->push.cross_thread.regs, 2);
+      }
    }
 
-   // XXX: hack iris_set_constant_buffers to upload compute shader constants
-   // XXX: differently...?
+   // XXX: hack iris_set_constant_buffers to upload these thread counts
+   // XXX: along with regular uniforms for compute shaders, somehow.
 
    uint32_t curbe_data_offset = 0;
    // TODO: Move subgroup-id into uniforms ubo so we can push uniforms
@@ -4370,35 +4750,40 @@ iris_upload_compute_state(struct iris_context *ice,
    assert(curbe_data_map);
    memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64));
    iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map);
-   iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
-     curbe.CURBETotalDataLength =
-        ALIGN(cs_prog_data->push.total.size, 64);
-     curbe.CURBEDataStartAddress = curbe_data_offset;
+
+   if (dirty & IRIS_DIRTY_CONSTANTS_CS) {
+      iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
+         curbe.CURBETotalDataLength =
+            ALIGN(cs_prog_data->push.total.size, 64);
+         curbe.CURBEDataStartAddress = curbe_data_offset;
+      }
    }
 
-   struct pipe_resource *desc_res = NULL;
-   uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
+   if (dirty & (IRIS_DIRTY_SAMPLER_STATES_CS |
+                IRIS_DIRTY_BINDINGS_CS |
+                IRIS_DIRTY_CONSTANTS_CS |
+                IRIS_DIRTY_CS)) {
+      struct pipe_resource *desc_res = NULL;
+      uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
 
-   iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
-      idd.SamplerStatePointer = shs->sampler_table.offset;
-      idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
-      idd.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
-      idd.CrossThreadConstantDataReadLength =
-         cs_prog_data->push.cross_thread.regs;
-   }
+      iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
+         idd.SamplerStatePointer = shs->sampler_table.offset;
+         idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
+      }
 
-   for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
-      desc[i] |= ((uint32_t *) shader->derived_data)[i];
+      for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
+         desc[i] |= ((uint32_t *) shader->derived_data)[i];
 
-   iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
-      load.InterfaceDescriptorTotalLength =
-         GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
-      load.InterfaceDescriptorDataStartAddress =
-         emit_state(batch, ice->state.dynamic_uploader,
-                    &desc_res, desc, sizeof(desc), 32);
-   }
+      iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
+         load.InterfaceDescriptorTotalLength =
+            GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
+         load.InterfaceDescriptorDataStartAddress =
+            emit_state(batch, ice->state.dynamic_uploader,
+                       &desc_res, desc, sizeof(desc), 32);
+      }
 
-   pipe_resource_reference(&desc_res, NULL);
+      pipe_resource_reference(&desc_res, NULL);
+   }
 
    uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
    uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
@@ -4414,18 +4799,19 @@ iris_upload_compute_state(struct iris_context *ice,
 #define GPGPU_DISPATCHDIMZ 0x2508
 
    if (grid->indirect) {
-      struct iris_bo *bo = iris_resource_bo(grid_size_res);
+      struct iris_state_ref *grid_size = &ice->state.grid_size;
+      struct iris_bo *bo = iris_resource_bo(grid_size->res);
       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
          lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
-         lrm.MemoryAddress = ro_bo(bo, grid_size_offset + 0);
+         lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
       }
       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
          lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
-         lrm.MemoryAddress = ro_bo(bo, grid_size_offset + 4);
+         lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
       }
       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
          lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
-         lrm.MemoryAddress = ro_bo(bo, grid_size_offset + 8);
+         lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
       }
    }
 
@@ -4456,7 +4842,13 @@ iris_upload_compute_state(struct iris_context *ice,
 static void
 iris_destroy_state(struct iris_context *ice)
 {
-   iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
+   struct iris_genx_state *genx = ice->state.genx;
+
+   uint64_t bound_vbs = ice->state.bound_vertex_buffers;
+   while (bound_vbs) {
+      const int i = u_bit_scan64(&bound_vbs);
+      pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
+   }
 
    // XXX: unreference resources/surfaces.
    for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
@@ -4470,6 +4862,8 @@ iris_destroy_state(struct iris_context *ice)
    }
    free(ice->state.genx);
 
+   pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
+
    pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
    pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
    pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
@@ -4480,6 +4874,21 @@ iris_destroy_state(struct iris_context *ice)
 
 /* ------------------------------------------------------------------- */
 
+static void
+iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
+                         uint32_t src)
+{
+   _iris_emit_lrr(batch, dst, src);
+}
+
+static void
+iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
+                         uint32_t src)
+{
+   _iris_emit_lrr(batch, dst, src);
+   _iris_emit_lrr(batch, dst + 4, src + 4);
+}
+
 static void
 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
                          uint32_t val)
@@ -4623,8 +5032,7 @@ get_post_sync_flags(enum pipe_control_flags flags)
    return flags;
 }
 
-// XXX: compute support
-#define IS_COMPUTE_PIPELINE(batch) (batch->engine != I915_EXEC_RENDER)
+#define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
 
 /**
  * Emit a series of PIPE_CONTROL commands, taking into account any
@@ -4695,7 +5103,7 @@ iris_emit_raw_pipe_control(struct iris_batch *batch, uint32_t flags,
     * We do these now because they may add post-sync operations or CS stalls.
     */
 
-   if (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
+   if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
       /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
        *
        * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
@@ -5021,14 +5429,9 @@ genX(init_state)(struct iris_context *ice)
    ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
    ctx->delete_blend_state = iris_delete_state;
    ctx->delete_depth_stencil_alpha_state = iris_delete_state;
-   ctx->delete_fs_state = iris_delete_state;
    ctx->delete_rasterizer_state = iris_delete_state;
    ctx->delete_sampler_state = iris_delete_state;
    ctx->delete_vertex_elements_state = iris_delete_state;
-   ctx->delete_tcs_state = iris_delete_state;
-   ctx->delete_tes_state = iris_delete_state;
-   ctx->delete_gs_state = iris_delete_state;
-   ctx->delete_vs_state = iris_delete_state;
    ctx->set_blend_color = iris_set_blend_color;
    ctx->set_clip_state = iris_set_clip_state;
    ctx->set_constant_buffer = iris_set_constant_buffer;
@@ -5058,6 +5461,8 @@ genX(init_state)(struct iris_context *ice)
    ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
    ice->vtbl.upload_compute_state = iris_upload_compute_state;
    ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+   ice->vtbl.load_register_reg32 = iris_load_register_reg32;
+   ice->vtbl.load_register_reg64 = iris_load_register_reg64;
    ice->vtbl.load_register_imm32 = iris_load_register_imm32;
    ice->vtbl.load_register_imm64 = iris_load_register_imm64;
    ice->vtbl.load_register_mem32 = iris_load_register_mem32;
@@ -5079,6 +5484,8 @@ genX(init_state)(struct iris_context *ice)
 
    ice->state.dirty = ~0ull;
 
+   ice->state.statistics_counters_enabled = true;
+
    ice->state.sample_mask = 0xffff;
    ice->state.num_viewports = 1;
    ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
@@ -5090,4 +5497,11 @@ genX(init_state)(struct iris_context *ice)
    isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
    ice->state.unbound_tex.offset +=
       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
+
+   /* Default all scissor rectangles to be empty regions. */
+   for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
+      ice->state.scissors[i] = (struct pipe_scissor_state) {
+         .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
+      };
+   }
 }