i965: Explicitely handle swizzles for MESA_FORMAT_R_SRGB8
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
index c95fb3739b3816c1dabdf3f5a97e93df06b07f63..4daa0e2add1380cae252098d67c8d262af7bbbbd 100644 (file)
 #include "brw_defines.h"
 #include "brw_wm.h"
 
-enum {
-   INTEL_RENDERBUFFER_LAYERED = 1 << 0,
-   INTEL_AUX_BUFFER_DISABLED = 1 << 1,
-};
-
-uint32_t tex_mocs[] = {
+uint32_t wb_mocs[] = {
    [7] = GEN7_MOCS_L3,
    [8] = BDW_MOCS_WB,
    [9] = SKL_MOCS_WB,
+   [10] = CNL_MOCS_WB,
+   [11] = ICL_MOCS_WB,
 };
 
-uint32_t rb_mocs[] = {
+uint32_t pte_mocs[] = {
    [7] = GEN7_MOCS_L3,
    [8] = BDW_MOCS_PTE,
    [9] = SKL_MOCS_PTE,
+   [10] = CNL_MOCS_PTE,
+   [11] = ICL_MOCS_PTE,
 };
 
+uint32_t
+brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
+{
+   return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
+}
+
+static void
+get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
+             GLenum target, struct isl_view *view,
+             uint32_t *tile_x, uint32_t *tile_y,
+             uint32_t *offset, struct isl_surf *surf)
+{
+   *surf = mt->surf;
+
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const enum isl_dim_layout dim_layout =
+      get_isl_dim_layout(devinfo, mt->surf.tiling, target);
+
+   surf->dim = get_isl_surf_dim(target);
+
+   if (surf->dim_layout == dim_layout)
+      return;
+
+   /* The layout of the specified texture target is not compatible with the
+    * actual layout of the miptree structure in memory -- You're entering
+    * dangerous territory, this can only possibly work if you only intended
+    * to access a single level and slice of the texture, and the hardware
+    * supports the tile offset feature in order to allow non-tile-aligned
+    * base offsets, since we'll have to point the hardware to the first
+    * texel of the level instead of relying on the usual base level/layer
+    * controls.
+    */
+   assert(devinfo->has_surface_tile_offset);
+   assert(view->levels == 1 && view->array_len == 1);
+   assert(*tile_x == 0 && *tile_y == 0);
+
+   *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
+                                             view->base_array_layer,
+                                             tile_x, tile_y);
+
+   /* Minify the logical dimensions of the texture. */
+   const unsigned l = view->base_level - mt->first_level;
+   surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
+   surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
+      minify(surf->logical_level0_px.height, l);
+   surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
+      minify(surf->logical_level0_px.depth, l);
+
+   /* Only the base level and layer can be addressed with the overridden
+    * layout.
+    */
+   surf->logical_level0_px.array_len = 1;
+   surf->levels = 1;
+   surf->dim_layout = dim_layout;
+
+   /* The requested slice of the texture is now at the base level and
+    * layer.
+    */
+   view->base_level = 0;
+   view->base_array_layer = 0;
+}
+
 static void
 brw_emit_surface_state(struct brw_context *brw,
-                       struct intel_mipmap_tree *mt, uint32_t flags,
+                       struct intel_mipmap_tree *mt,
                        GLenum target, struct isl_view view,
-                       uint32_t mocs, uint32_t *surf_offset, int surf_index,
-                       unsigned read_domains, unsigned write_domains)
+                       enum isl_aux_usage aux_usage,
+                       uint32_t *surf_offset, int surf_index,
+                       unsigned reloc_flags)
 {
-   uint32_t tile_x = mt->level[0].slice[0].x_offset;
-   uint32_t tile_y = mt->level[0].slice[0].y_offset;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   uint32_t tile_x = mt->level[0].level_x;
+   uint32_t tile_y = mt->level[0].level_y;
    uint32_t offset = mt->offset;
 
    struct isl_surf surf;
-   intel_miptree_get_isl_surf(brw, mt, &surf);
 
-   surf.dim = get_isl_surf_dim(target);
-
-   const enum isl_dim_layout dim_layout =
-      get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
-
-   if (surf.dim_layout != dim_layout) {
-      /* The layout of the specified texture target is not compatible with the
-       * actual layout of the miptree structure in memory -- You're entering
-       * dangerous territory, this can only possibly work if you only intended
-       * to access a single level and slice of the texture, and the hardware
-       * supports the tile offset feature in order to allow non-tile-aligned
-       * base offsets, since we'll have to point the hardware to the first
-       * texel of the level instead of relying on the usual base level/layer
-       * controls.
-       */
-      assert(brw->has_surface_tile_offset);
-      assert(view.levels == 1 && view.array_len == 1);
-      assert(tile_x == 0 && tile_y == 0);
-
-      offset += intel_miptree_get_tile_offsets(mt, view.base_level,
-                                               view.base_array_layer,
-                                               &tile_x, &tile_y);
-
-      /* Minify the logical dimensions of the texture. */
-      const unsigned l = view.base_level - mt->first_level;
-      surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
-      surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
-         minify(surf.logical_level0_px.height, l);
-      surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
-         minify(surf.logical_level0_px.depth, l);
-
-      /* Only the base level and layer can be addressed with the overridden
-       * layout.
-       */
-      surf.logical_level0_px.array_len = 1;
-      surf.levels = 1;
-      surf.dim_layout = dim_layout;
-
-      /* The requested slice of the texture is now at the base level and
-       * layer.
-       */
-      view.base_level = 0;
-      view.base_array_layer = 0;
-   }
+   get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
 
    union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
 
-   struct brw_bo *aux_bo;
-   struct isl_surf *aux_surf = NULL, aux_surf_s;
+   struct brw_bo *aux_bo = NULL;
+   struct isl_surf *aux_surf = NULL;
    uint64_t aux_offset = 0;
-   enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
-   if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
-       !(flags & INTEL_AUX_BUFFER_DISABLED)) {
-      intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
-      aux_surf = &aux_surf_s;
-
-      if (mt->mcs_buf) {
-         aux_bo = mt->mcs_buf->bo;
-         aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
-      } else {
-         aux_bo = mt->hiz_buf->aux_base.bo;
-         aux_offset = mt->hiz_buf->aux_base.bo->offset64;
-      }
+   struct brw_bo *clear_bo = NULL;
+   uint32_t clear_offset = 0;
+
+   if (aux_usage != ISL_AUX_USAGE_NONE) {
+      aux_surf = &mt->aux_buf->surf;
+      aux_bo = mt->aux_buf->bo;
+      aux_offset = mt->aux_buf->offset;
 
       /* We only really need a clear color if we also have an auxiliary
        * surface.  Without one, it does nothing.
        */
-      clear_color = intel_miptree_get_isl_clear_color(brw, mt);
+      clear_color =
+         intel_miptree_get_clear_color(devinfo, mt, view.format,
+                                       view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
+                                       &clear_bo, &clear_offset);
    }
 
    void *state = brw_state_batch(brw,
@@ -162,74 +178,91 @@ brw_emit_surface_state(struct brw_context *brw,
                                  surf_offset);
 
    isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
-                       .address = mt->bo->offset64 + offset,
+                       .address = brw_state_reloc(&brw->batch,
+                                                  *surf_offset + brw->isl_dev.ss.addr_offset,
+                                                  mt->bo, offset, reloc_flags),
                        .aux_surf = aux_surf, .aux_usage = aux_usage,
                        .aux_address = aux_offset,
-                       .mocs = mocs, .clear_color = clear_color,
+                       .mocs = brw_get_bo_mocs(devinfo, mt->bo),
+                       .clear_color = clear_color,
+                       .use_clear_address = clear_bo != NULL,
+                       .clear_address = clear_offset,
                        .x_offset_sa = tile_x, .y_offset_sa = tile_y);
-
-   brw_emit_reloc(&brw->batch, *surf_offset + brw->isl_dev.ss.addr_offset,
-                  mt->bo, offset, read_domains, write_domains);
-
    if (aux_surf) {
       /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
        * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
        * contain other control information.  Since buffer addresses are always
        * on 4k boundaries (and thus have their lower 12 bits zero), we can use
        * an ordinary reloc to do the necessary address translation.
+       *
+       * FIXME: move to the point of assignment.
        */
       assert((aux_offset & 0xfff) == 0);
-      uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
-      brw_emit_reloc(&brw->batch,
-                     *surf_offset + brw->isl_dev.ss.aux_addr_offset,
-                     aux_bo, *aux_addr - aux_bo->offset64,
-                     read_domains, write_domains);
+
+      if (devinfo->gen >= 8) {
+         uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
+         *aux_addr = brw_state_reloc(&brw->batch,
+                                     *surf_offset +
+                                     brw->isl_dev.ss.aux_addr_offset,
+                                     aux_bo, *aux_addr,
+                                     reloc_flags);
+      } else {
+         uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
+         *aux_addr = brw_state_reloc(&brw->batch,
+                                     *surf_offset +
+                                     brw->isl_dev.ss.aux_addr_offset,
+                                     aux_bo, *aux_addr,
+                                     reloc_flags);
+
+      }
+   }
+
+   if (clear_bo != NULL) {
+      /* Make sure the offset is aligned with a cacheline. */
+      assert((clear_offset & 0x3f) == 0);
+      uint64_t *clear_address =
+            state + brw->isl_dev.ss.clear_color_state_offset;
+      *clear_address = brw_state_reloc(&brw->batch,
+                                       *surf_offset +
+                                       brw->isl_dev.ss.clear_color_state_offset,
+                                       clear_bo, *clear_address, reloc_flags);
    }
 }
 
-uint32_t
-brw_update_renderbuffer_surface(struct brw_context *brw,
-                                struct gl_renderbuffer *rb,
-                                uint32_t flags, unsigned unit /* unused */,
-                                uint32_t surf_index)
+static uint32_t
+gen6_update_renderbuffer_surface(struct brw_context *brw,
+                                 struct gl_renderbuffer *rb,
+                                 unsigned unit,
+                                 uint32_t surf_index)
 {
    struct gl_context *ctx = &brw->ctx;
    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    struct intel_mipmap_tree *mt = irb->mt;
 
-   if (brw->gen < 9) {
-      assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
-   }
-
    assert(brw_render_target_supported(brw, rb));
 
    mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
-   if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
+   if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
       _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
                     __func__, _mesa_get_format_name(rb_format));
    }
-
-   const unsigned layer_multiplier =
-      (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
-       irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
-      MAX2(irb->mt->num_samples, 1) : 1;
+   enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
 
    struct isl_view view = {
-      .format = brw->render_target_format[rb_format],
+      .format = isl_format,
       .base_level = irb->mt_level - irb->mt->first_level,
       .levels = 1,
-      .base_array_layer = irb->mt_layer / layer_multiplier,
+      .base_array_layer = irb->mt_layer,
       .array_len = MAX2(irb->layer_count, 1),
       .swizzle = ISL_SWIZZLE_IDENTITY,
       .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
    };
 
    uint32_t offset;
-   brw_emit_surface_state(brw, mt, flags, mt->target, view,
-                          rb_mocs[brw->gen],
+   brw_emit_surface_state(brw, mt, mt->target, view,
+                          brw->draw_aux_usage[unit],
                           &offset, surf_index,
-                          I915_GEM_DOMAIN_RENDER,
-                          I915_GEM_DOMAIN_RENDER);
+                          RELOC_WRITE);
    return offset;
 }
 
@@ -264,12 +297,12 @@ translate_tex_target(GLenum target)
 }
 
 uint32_t
-brw_get_surface_tiling_bits(uint32_t tiling)
+brw_get_surface_tiling_bits(enum isl_tiling tiling)
 {
    switch (tiling) {
-   case I915_TILING_X:
+   case ISL_TILING_X:
       return BRW_SURFACE_TILED;
-   case I915_TILING_Y:
+   case ISL_TILING_Y0:
       return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
    default:
       return 0;
@@ -387,9 +420,19 @@ brw_get_texture_swizzle(const struct gl_context *ctx,
       }
       break;
    case GL_RED:
+      if (img->TexFormat == MESA_FORMAT_R_SRGB8) {
+         swizzles[0] = SWIZZLE_X;
+         swizzles[1] = SWIZZLE_ZERO;
+         swizzles[2] = SWIZZLE_ZERO;
+         swizzles[3] = SWIZZLE_ONE;
+         break;
+      }
+      /* fallthrough */
    case GL_RG:
    case GL_RGB:
-      if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
+      if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
+          img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
+          img->TexFormat == MESA_FORMAT_SRGB_DXT1)
          swizzles[3] = SWIZZLE_ONE;
       break;
    }
@@ -421,105 +464,15 @@ swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
    return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
 }
 
-static unsigned
-brw_find_matching_rb(const struct gl_framebuffer *fb,
-                     const struct intel_mipmap_tree *mt)
-{
-   for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
-      const struct intel_renderbuffer *irb =
-         intel_renderbuffer(fb->_ColorDrawBuffers[i]);
-
-      if (irb && irb->mt == mt)
-         return i;
-   }
-
-   return fb->_NumColorDrawBuffers;
-}
-
-static inline bool
-brw_texture_view_sane(const struct brw_context *brw,
-                      const struct intel_mipmap_tree *mt,
-                      const struct isl_view *view)
-{
-   /* There are special cases only for lossless compression. */
-   if (!intel_miptree_is_lossless_compressed(brw, mt))
-      return true;
-
-   if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
-      return true;
-
-   /* Logic elsewhere needs to take care to resolve the color buffer prior
-    * to sampling it as non-compressed.
-    */
-   if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
-                                          view->base_array_layer,
-                                          view->array_len))
-      return false;
-
-   const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
-   const unsigned rb_index = brw_find_matching_rb(fb, mt);
-
-   if (rb_index == fb->_NumColorDrawBuffers)
-      return true;
-
-   /* Underlying surface is compressed but it is sampled using a format that
-    * the sampling engine doesn't support as compressed. Compression must be
-    * disabled for both sampling engine and data port in case the same surface
-    * is used also as render target.
-    */
-   return brw->draw_aux_buffer_disabled[rb_index];
-}
-
-static bool
-brw_disable_aux_surface(const struct brw_context *brw,
-                        const struct intel_mipmap_tree *mt,
-                        const struct isl_view *view)
-{
-   /* Nothing to disable. */
-   if (!mt->mcs_buf)
-      return false;
-
-   const bool is_unresolved = intel_miptree_has_color_unresolved(
-                                 mt, view->base_level, view->levels,
-                                 view->base_array_layer, view->array_len);
-
-   /* There are special cases only for lossless compression. */
-   if (!intel_miptree_is_lossless_compressed(brw, mt))
-      return !is_unresolved;
-
-   const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
-   const unsigned rb_index = brw_find_matching_rb(fb, mt);
-
-   /* If we are drawing into this with compression enabled, then we must also
-    * enable compression when texturing from it regardless of
-    * fast_clear_state.  If we don't then, after the first draw call with
-    * this setup, there will be data in the CCS which won't get picked up by
-    * subsequent texturing operations as required by ARB_texture_barrier.
-    * Since we don't want to re-emit the binding table or do a resolve
-    * operation every draw call, the easiest thing to do is just enable
-    * compression on the texturing side.  This is completely safe to do
-    * since, if compressed texturing weren't allowed, we would have disabled
-    * compression of render targets in whatever_that_function_is_called().
-    */
-   if (rb_index < fb->_NumColorDrawBuffers) {
-      if (brw->draw_aux_buffer_disabled[rb_index]) {
-         assert(!is_unresolved);
-      }
-
-      return brw->draw_aux_buffer_disabled[rb_index];
-   }
-
-   return !is_unresolved;
-}
-
-void
-brw_update_texture_surface(struct gl_context *ctx,
+static void brw_update_texture_surface(struct gl_context *ctx,
                            unsigned unit,
                            uint32_t *surf_offset,
                            bool for_gather,
+                           bool for_txf,
                            uint32_t plane)
 {
    struct brw_context *brw = brw_context(ctx);
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
 
    if (obj->Target == GL_TEXTURE_BUFFER) {
@@ -539,9 +492,14 @@ brw_update_texture_surface(struct gl_context *ctx,
       /* If this is a view with restricted NumLayers, then our effective depth
        * is not just the miptree depth.
        */
-      const unsigned view_num_layers =
-         (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
-                                                            mt->logical_depth0;
+      unsigned view_num_layers;
+      if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
+         view_num_layers = obj->NumLayers;
+      } else {
+         view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
+                              mt->surf.logical_level0_px.depth :
+                              mt->surf.logical_level0_px.array_len;
+      }
 
       /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
        * texturing functions that return a float, as our code generation always
@@ -554,19 +512,34 @@ brw_update_texture_surface(struct gl_context *ctx,
       const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
                                 brw_get_texture_swizzle(&brw->ctx, obj));
 
-      mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
-      unsigned format = translate_tex_format(brw, mesa_fmt,
-                                             sampler->sRGBDecode);
+      mesa_format mesa_fmt;
+      if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
+          firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
+         /* The format from intel_obj may be a combined depth stencil format
+          * when we just want depth.  Pull it from the miptree instead.  This
+          * is safe because texture views aren't allowed on depth/stencil.
+          */
+         mesa_fmt = mt->format;
+      } else if (mt->etc_format != MESA_FORMAT_NONE) {
+         mesa_fmt = mt->format;
+      } else if (plane > 0) {
+         mesa_fmt = mt->format;
+      } else {
+         mesa_fmt = intel_obj->_Format;
+      }
+      enum isl_format format = translate_tex_format(brw, mesa_fmt,
+                                                    for_txf ? GL_DECODE_EXT :
+                                                    sampler->sRGBDecode);
 
       /* Implement gen6 and gen7 gather work-around */
       bool need_green_to_blue = false;
       if (for_gather) {
-         if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
-                               format == ISL_FORMAT_R32G32_SINT ||
-                               format == ISL_FORMAT_R32G32_UINT)) {
+         if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
+                                   format == ISL_FORMAT_R32G32_SINT ||
+                                   format == ISL_FORMAT_R32G32_UINT)) {
             format = ISL_FORMAT_R32G32_FLOAT_LD;
-            need_green_to_blue = brw->is_haswell;
-         } else if (brw->gen == 6) {
+            need_green_to_blue = devinfo->is_haswell;
+         } else if (devinfo->gen == 6) {
             /* Sandybridge's gather4 message is broken for integer formats.
              * To work around this, we pretend the surface is UNORM for
              * 8 or 16-bit formats, and emit shader instructions to recover
@@ -597,14 +570,14 @@ brw_update_texture_surface(struct gl_context *ctx,
       }
 
       if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
-         if (brw->gen <= 7) {
+         if (devinfo->gen <= 7) {
             assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
             mt = mt->r8stencil_mt;
          } else {
             mt = mt->stencil_mt;
          }
          format = ISL_FORMAT_R8_UINT;
-      } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
+      } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
          assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
          mt = mt->r8stencil_mt;
          format = ISL_FORMAT_R8_UINT;
@@ -627,18 +600,23 @@ brw_update_texture_surface(struct gl_context *ctx,
          .usage = ISL_SURF_USAGE_TEXTURE_BIT,
       };
 
+      /* On Ivy Bridge and earlier, we handle texture swizzle with shader
+       * code.  The actual surface swizzle should be identity.
+       */
+      if (devinfo->gen <= 7 && !devinfo->is_haswell)
+         view.swizzle = ISL_SWIZZLE_IDENTITY;
+
       if (obj->Target == GL_TEXTURE_CUBE_MAP ||
           obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
          view.usage |= ISL_SURF_USAGE_CUBE_BIT;
 
-      assert(brw_texture_view_sane(brw, mt, &view));
+      enum isl_aux_usage aux_usage =
+         intel_miptree_texture_aux_usage(brw, mt, format,
+                                         brw->gen9_astc5x5_wa_tex_mask);
 
-      const int flags = brw_disable_aux_surface(brw, mt, &view) ?
-                           INTEL_AUX_BUFFER_DISABLED : 0;
-      brw_emit_surface_state(brw, mt, flags, mt->target, view,
-                             tex_mocs[brw->gen],
+      brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
                              surf_offset, surf_index,
-                             I915_GEM_DOMAIN_SAMPLER, 0);
+                             0);
    }
 }
 
@@ -650,47 +628,35 @@ brw_emit_buffer_surface_state(struct brw_context *brw,
                               unsigned surface_format,
                               unsigned buffer_size,
                               unsigned pitch,
-                              bool rw)
+                              unsigned reloc_flags)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    uint32_t *dw = brw_state_batch(brw,
                                   brw->isl_dev.ss.size,
                                   brw->isl_dev.ss.align,
                                   out_offset);
 
    isl_buffer_fill_state(&brw->isl_dev, dw,
-                         .address = (bo ? bo->offset64 : 0) + buffer_offset,
-                         .size = buffer_size,
+                         .address = !bo ? buffer_offset :
+                                    brw_state_reloc(&brw->batch,
+                                                    *out_offset + brw->isl_dev.ss.addr_offset,
+                                                    bo, buffer_offset,
+                                                    reloc_flags),
+                         .size_B = buffer_size,
                          .format = surface_format,
-                         .stride = pitch,
-                         .mocs = tex_mocs[brw->gen]);
-
-   if (bo) {
-      brw_emit_reloc(&brw->batch, *out_offset + brw->isl_dev.ss.addr_offset,
-                     bo, buffer_offset,
-                     I915_GEM_DOMAIN_SAMPLER,
-                     (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
-   }
+                         .stride_B = pitch,
+                         .mocs = brw_get_bo_mocs(devinfo, bo));
 }
 
-void
-brw_update_buffer_texture_surface(struct gl_context *ctx,
-                                  unsigned unit,
-                                  uint32_t *surf_offset)
+static unsigned
+buffer_texture_range_size(struct brw_context *brw,
+                          struct gl_texture_object *obj)
 {
-   struct brw_context *brw = brw_context(ctx);
-   struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
-   struct intel_buffer_object *intel_obj =
-      intel_buffer_object(tObj->BufferObject);
-   uint32_t size = tObj->BufferSize;
-   struct brw_bo *bo = NULL;
-   mesa_format format = tObj->_BufferObjectFormat;
-   uint32_t brw_format = brw_isl_format_for_mesa_format(format);
-   int texel_size = _mesa_get_format_bytes(format);
-
-   if (intel_obj) {
-      size = MIN2(size, intel_obj->Base.Size);
-      bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
-   }
+   assert(obj->Target == GL_TEXTURE_BUFFER);
+   const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
+   const unsigned buffer_size = (!obj->BufferObject ? 0 :
+                                 obj->BufferObject->Size);
+   const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
 
    /* The ARB_texture_buffer_specification says:
     *
@@ -708,57 +674,41 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
     * so that when ISL divides by stride to obtain the number of texels, that
     * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
     */
-   size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
+   return MIN3((unsigned)obj->BufferSize,
+               buffer_size - buffer_offset,
+               brw->ctx.Const.MaxTextureBufferSize * texel_size);
+}
+
+void
+brw_update_buffer_texture_surface(struct gl_context *ctx,
+                                  unsigned unit,
+                                  uint32_t *surf_offset)
+{
+   struct brw_context *brw = brw_context(ctx);
+   struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
+   struct intel_buffer_object *intel_obj =
+      intel_buffer_object(tObj->BufferObject);
+   const unsigned size = buffer_texture_range_size(brw, tObj);
+   struct brw_bo *bo = NULL;
+   mesa_format format = tObj->_BufferObjectFormat;
+   const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
+   int texel_size = _mesa_get_format_bytes(format);
 
-   if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
+   if (intel_obj)
+      bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
+                                  false);
+
+   if (isl_format == ISL_FORMAT_UNSUPPORTED) {
       _mesa_problem(NULL, "bad format %s for texture buffer\n",
                    _mesa_get_format_name(format));
    }
 
    brw_emit_buffer_surface_state(brw, surf_offset, bo,
                                  tObj->BufferOffset,
-                                 brw_format,
+                                 isl_format,
                                  size,
                                  texel_size,
-                                 false /* rw */);
-}
-
-/**
- * Create the constant buffer surface.  Vertex/fragment shader constants will be
- * read from this buffer with Data Port Read instructions/messages.
- */
-void
-brw_create_constant_surface(struct brw_context *brw,
-                           struct brw_bo *bo,
-                           uint32_t offset,
-                           uint32_t size,
-                           uint32_t *out_offset)
-{
-   brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
-                                 ISL_FORMAT_R32G32B32A32_FLOAT,
-                                 size, 1, false);
-}
-
-/**
- * Create the buffer surface. Shader buffer variables will be
- * read from / write to this buffer with Data Port Read/Write
- * instructions/messages.
- */
-void
-brw_create_buffer_surface(struct brw_context *brw,
-                          struct brw_bo *bo,
-                          uint32_t offset,
-                          uint32_t size,
-                          uint32_t *out_offset)
-{
-   /* Use a raw surface so we can reuse existing untyped read/write/atomic
-    * messages. We need these specifically for the fragment shader since they
-    * include a pixel mask header that we need to ensure correct behavior
-    * with helper invocations, which cannot write to the buffer.
-    */
-   brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
-                                 ISL_FORMAT_RAW,
-                                 size, 1, true);
+                                 0);
 }
 
 /**
@@ -777,7 +727,8 @@ brw_update_sol_surface(struct brw_context *brw,
    uint32_t offset_bytes = 4 * offset_dwords;
    struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
                                              offset_bytes,
-                                             buffer_obj->Size - offset_bytes);
+                                             buffer_obj->Size - offset_bytes,
+                                             true);
    uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
    uint32_t pitch_minus_1 = 4*stride_dwords - 1;
    size_t size_dwords = buffer_obj->Size / 4;
@@ -830,17 +781,14 @@ brw_update_sol_surface(struct brw_context *brw,
       BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
       surface_format << BRW_SURFACE_FORMAT_SHIFT |
       BRW_SURFACE_RC_READ_WRITE;
-   surf[1] = bo->offset64 + offset_bytes; /* reloc */
+   surf[1] = brw_state_reloc(&brw->batch,
+                             *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
    surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
              height << BRW_SURFACE_HEIGHT_SHIFT);
    surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
               pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
    surf[4] = 0;
    surf[5] = 0;
-
-   /* Emit relocation to surface contents. */
-   brw_emit_reloc(&brw->batch, *out_offset + 4, bo, offset_bytes,
-                  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
 }
 
 /* Creates a new WM constant buffer reflecting the current fragment program's
@@ -854,7 +802,9 @@ brw_upload_wm_pull_constants(struct brw_context *brw)
 {
    struct brw_stage_state *stage_state = &brw->wm.base;
    /* BRW_NEW_FRAGMENT_PROGRAM */
-   struct brw_program *fp = (struct brw_program *) brw->fragment_program;
+   struct brw_program *fp =
+      (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
+
    /* BRW_NEW_FS_PROG_DATA */
    struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
 
@@ -868,7 +818,6 @@ const struct brw_tracked_state brw_wm_pull_constants = {
    .dirty = {
       .mesa = _NEW_PROGRAM_CONSTANTS,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
              BRW_NEW_FRAGMENT_PROGRAM |
              BRW_NEW_FS_PROG_DATA,
    },
@@ -884,71 +833,53 @@ const struct brw_tracked_state brw_wm_pull_constants = {
  * hardware discard the target 0 color output..
  */
 static void
-brw_emit_null_surface_state(struct brw_context *brw,
-                            unsigned width,
-                            unsigned height,
-                            unsigned samples,
-                            uint32_t *out_offset)
+emit_null_surface_state(struct brw_context *brw,
+                        const struct gl_framebuffer *fb,
+                        uint32_t *out_offset)
 {
-   /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
-    * Notes):
-    *
-    *     A null surface will be used in instances where an actual surface is
-    *     not bound. When a write message is generated to a null surface, no
-    *     actual surface is written to. When a read message (including any
-    *     sampling engine message) is generated to a null surface, the result
-    *     is all zeros. Note that a null surface type is allowed to be used
-    *     with all messages, even if it is not specificially indicated as
-    *     supported. All of the remaining fields in surface state are ignored
-    *     for null surfaces, with the following exceptions:
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   uint32_t *surf = brw_state_batch(brw,
+                                    brw->isl_dev.ss.size,
+                                    brw->isl_dev.ss.align,
+                                    out_offset);
+
+   /* Use the fb dimensions or 1x1x1 */
+   const unsigned width   = fb ? _mesa_geometric_width(fb)   : 1;
+   const unsigned height  = fb ? _mesa_geometric_height(fb)  : 1;
+   const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
+
+   if (devinfo->gen != 6 || samples <= 1) {
+      isl_null_fill_state(&brw->isl_dev, surf,
+                          isl_extent3d(width, height, 1));
+      return;
+   }
+
+   /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
+    * So work around this problem by rendering into dummy color buffer.
     *
-    *     - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
-    *       depth buffer’s corresponding state for all render target surfaces,
-    *       including null.
+    * To decrease the amount of memory needed by the workaround buffer, we
+    * set its pitch to 128 bytes (the width of a Y tile).  This means that
+    * the amount of memory needed for the workaround buffer is
+    * (width_in_tiles + height_in_tiles - 1) tiles.
     *
-    *     - Surface Format must be R8G8B8A8_UNORM.
+    * Note that since the workaround buffer will be interpreted by the
+    * hardware as an interleaved multisampled buffer, we need to compute
+    * width_in_tiles and height_in_tiles by dividing the width and height
+    * by 16 rather than the normal Y-tile size of 32.
     */
-   unsigned surface_type = BRW_SURFACE_NULL;
-   struct brw_bo *bo = NULL;
-   unsigned pitch_minus_1 = 0;
-   uint32_t multisampling_state = 0;
-   uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
-
-   if (samples > 1) {
-      /* On Gen6, null render targets seem to cause GPU hangs when
-       * multisampling.  So work around this problem by rendering into dummy
-       * color buffer.
-       *
-       * To decrease the amount of memory needed by the workaround buffer, we
-       * set its pitch to 128 bytes (the width of a Y tile).  This means that
-       * the amount of memory needed for the workaround buffer is
-       * (width_in_tiles + height_in_tiles - 1) tiles.
-       *
-       * Note that since the workaround buffer will be interpreted by the
-       * hardware as an interleaved multisampled buffer, we need to compute
-       * width_in_tiles and height_in_tiles by dividing the width and height
-       * by 16 rather than the normal Y-tile size of 32.
-       */
-      unsigned width_in_tiles = ALIGN(width, 16) / 16;
-      unsigned height_in_tiles = ALIGN(height, 16) / 16;
-      unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
-      brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
-                         size_needed);
-      bo = brw->wm.multisampled_null_render_target_bo;
-      surface_type = BRW_SURFACE_2D;
-      pitch_minus_1 = 127;
-      multisampling_state = brw_get_surface_num_multisamples(samples);
-   }
+   unsigned width_in_tiles = ALIGN(width, 16) / 16;
+   unsigned height_in_tiles = ALIGN(height, 16) / 16;
+   unsigned pitch_minus_1 = 127;
+   unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
+   brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
+                      size_needed);
 
-   surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
+   surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
              ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
-   if (brw->gen < 6) {
-      surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
-                 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
-                 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
-                 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
-   }
-   surf[1] = bo ? bo->offset64 : 0;
+   surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
+                             brw->wm.multisampled_null_render_target_bo,
+                             0, RELOC_WRITE);
+
    surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
               (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
 
@@ -959,13 +890,8 @@ brw_emit_null_surface_state(struct brw_context *brw,
     */
    surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
               pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
-   surf[4] = multisampling_state;
+   surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
    surf[5] = 0;
-
-   if (bo) {
-      brw_emit_reloc(&brw->batch, *out_offset + 4, bo, 0,
-                     I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
-   }
 }
 
 /**
@@ -976,24 +902,22 @@ brw_emit_null_surface_state(struct brw_context *brw,
 static uint32_t
 gen4_update_renderbuffer_surface(struct brw_context *brw,
                                  struct gl_renderbuffer *rb,
-                                 uint32_t flags, unsigned unit,
+                                 unsigned unit,
                                  uint32_t surf_index)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct gl_context *ctx = &brw->ctx;
    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    struct intel_mipmap_tree *mt = irb->mt;
    uint32_t *surf;
    uint32_t tile_x, tile_y;
-   uint32_t format = 0;
+   enum isl_format format;
    uint32_t offset;
    /* _NEW_BUFFERS */
    mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
    /* BRW_NEW_FS_PROG_DATA */
 
-   assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
-   assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
-
-   if (rb->TexImage && !brw->has_surface_tile_offset) {
+   if (rb->TexImage && !devinfo->has_surface_tile_offset) {
       intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
 
       if (tile_x != 0 || tile_y != 0) {
@@ -1004,14 +928,15 @@ gen4_update_renderbuffer_surface(struct brw_context *brw,
          * miptree and render into that.
          */
         intel_renderbuffer_move_to_temp(brw, irb, false);
-        mt = irb->mt;
+        assert(irb->align_wa_mt);
+        mt = irb->align_wa_mt;
       }
    }
 
    surf = brw_state_batch(brw, 6 * 4, 32, &offset);
 
-   format = brw->render_target_format[rb_format];
-   if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
+   format = brw->mesa_to_isl_render_format[rb_format];
+   if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
       _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
                     __func__, _mesa_get_format_name(rb_format));
    }
@@ -1021,18 +946,22 @@ gen4_update_renderbuffer_surface(struct brw_context *brw,
 
    /* reloc */
    assert(mt->offset % mt->cpp == 0);
-   surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
-             mt->bo->offset64 + mt->offset);
+   surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
+                             mt->offset +
+                             intel_renderbuffer_get_tile_offsets(irb,
+                                                                 &tile_x,
+                                                                 &tile_y),
+                             RELOC_WRITE);
 
    surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
              (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
 
-   surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
-             (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
+   surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
+             (mt->surf.row_pitch_B - 1) << BRW_SURFACE_PITCH_SHIFT);
 
-   surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
+   surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
 
-   assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
+   assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
    /* Note that the low bits of these fields are missing, so
     * there's the possibility of getting in trouble.
     */
@@ -1040,90 +969,79 @@ gen4_update_renderbuffer_surface(struct brw_context *brw,
    assert(tile_y % 2 == 0);
    surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
              (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
-             (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
+             (mt->surf.image_alignment_el.height == 4 ?
+                  BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
 
-   if (brw->gen < 6) {
+   if (devinfo->gen < 6) {
       /* _NEW_COLOR */
       if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
           (ctx->Color.BlendEnabled & (1 << unit)))
         surf[0] |= BRW_SURFACE_BLEND_ENABLED;
 
-      if (!ctx->Color.ColorMask[unit][0])
+      if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
         surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
-      if (!ctx->Color.ColorMask[unit][1])
+      if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
         surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
-      if (!ctx->Color.ColorMask[unit][2])
+      if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
         surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
 
       /* As mentioned above, disable writes to the alpha component when the
        * renderbuffer is XRGB.
        */
       if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
-         !ctx->Color.ColorMask[unit][3]) {
+         !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
         surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
       }
    }
 
-   brw_emit_reloc(&brw->batch, offset + 4, mt->bo, surf[1] - mt->bo->offset64,
-                  I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
-
    return offset;
 }
 
-/**
- * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
- */
-void
-brw_update_renderbuffer_surfaces(struct brw_context *brw,
-                                 const struct gl_framebuffer *fb,
-                                 uint32_t render_target_start,
-                                 uint32_t *surf_offset)
+static void
+update_renderbuffer_surfaces(struct brw_context *brw)
 {
-   GLuint i;
-   const unsigned int w = _mesa_geometric_width(fb);
-   const unsigned int h = _mesa_geometric_height(fb);
-   const unsigned int s = _mesa_geometric_samples(fb);
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const struct gl_context *ctx = &brw->ctx;
+
+   /* _NEW_BUFFERS | _NEW_COLOR */
+   const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+   /* Render targets always start at binding table index 0. */
+   const unsigned rt_start = 0;
+
+   uint32_t *surf_offsets = brw->wm.base.surf_offset;
 
    /* Update surfaces for drawing buffers */
    if (fb->_NumColorDrawBuffers >= 1) {
-      for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
-         const uint32_t surf_index = render_target_start + i;
-         const int flags = (_mesa_geometric_layers(fb) > 0 ?
-                              INTEL_RENDERBUFFER_LAYERED : 0) |
-                           (brw->draw_aux_buffer_disabled[i] ?
-                              INTEL_AUX_BUFFER_DISABLED : 0);
-
-        if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
-            surf_offset[surf_index] =
-               brw->vtbl.update_renderbuffer_surface(
-                  brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
+      for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
+         struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
+
+        if (intel_renderbuffer(rb)) {
+            surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
+               gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
+               gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
         } else {
-            brw->vtbl.emit_null_surface_state(brw, w, h, s,
-               &surf_offset[surf_index]);
+            emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
         }
       }
    } else {
-      const uint32_t surf_index = render_target_start;
-      brw->vtbl.emit_null_surface_state(brw, w, h, s,
-         &surf_offset[surf_index]);
+      emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
    }
-}
-
-static void
-update_renderbuffer_surfaces(struct brw_context *brw)
-{
-   const struct gl_context *ctx = &brw->ctx;
 
-   /* BRW_NEW_FS_PROG_DATA */
-   const struct brw_wm_prog_data *wm_prog_data =
-      brw_wm_prog_data(brw->wm.base.prog_data);
+   /* The PIPE_CONTROL command description says:
+    *
+    * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+    *  points to a different RENDER_SURFACE_STATE, SW must issue a Render
+    *  Target Cache Flush by enabling this bit. When render target flush
+    *  is set due to new association of BTI, PS Scoreboard Stall bit must
+    *  be set in this packet."
+   */
+   if (devinfo->gen >= 11) {
+      brw_emit_pipe_control_flush(brw,
+                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
+                                  PIPE_CONTROL_STALL_AT_SCOREBOARD);
+   }
 
-   /* _NEW_BUFFERS | _NEW_COLOR */
-   const struct gl_framebuffer *fb = ctx->DrawBuffer;
-   brw_update_renderbuffer_surfaces(
-      brw, fb,
-      wm_prog_data->binding_table.render_target_start,
-      brw->wm.base.surf_offset);
    brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
 }
 
@@ -1131,9 +1049,7 @@ const struct brw_tracked_state brw_renderbuffer_surfaces = {
    .dirty = {
       .mesa = _NEW_BUFFERS |
               _NEW_COLOR,
-      .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
-             BRW_NEW_FS_PROG_DATA,
+      .brw = BRW_NEW_BATCH,
    },
    .emit = update_renderbuffer_surfaces,
 };
@@ -1142,7 +1058,7 @@ const struct brw_tracked_state gen6_renderbuffer_surfaces = {
    .dirty = {
       .mesa = _NEW_BUFFERS,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP,
+             BRW_NEW_AUX_STATE,
    },
    .emit = update_renderbuffer_surfaces,
 };
@@ -1156,9 +1072,8 @@ update_renderbuffer_read_surfaces(struct brw_context *brw)
    const struct brw_wm_prog_data *wm_prog_data =
       brw_wm_prog_data(brw->wm.base.prog_data);
 
-   /* BRW_NEW_FRAGMENT_PROGRAM */
-   if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
-       brw->fragment_program && brw->fragment_program->info.outputs_read) {
+   if (wm_prog_data->has_render_target_reads &&
+       !ctx->Extensions.EXT_shader_framebuffer_fetch) {
       /* _NEW_BUFFERS */
       const struct gl_framebuffer *fb = ctx->DrawBuffer;
 
@@ -1170,7 +1085,7 @@ update_renderbuffer_read_surfaces(struct brw_context *brw)
          uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
 
          if (irb) {
-            const unsigned format = brw->render_target_format[
+            const enum isl_format format = brw->mesa_to_isl_render_format[
                _mesa_get_render_format(ctx, intel_rb_format(irb))];
             assert(isl_format_supports_sampling(&brw->screen->devinfo,
                                                 format));
@@ -1190,37 +1105,28 @@ update_renderbuffer_read_surfaces(struct brw_context *brw)
                irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
                irb->mt->target;
 
-            /* intel_renderbuffer::mt_layer is expressed in sample units for
-             * the UMS and CMS multisample layouts, but
-             * intel_renderbuffer::layer_count is expressed in units of whole
-             * logical layers regardless of the multisample layout.
-             */
-            const unsigned mt_layer_unit =
-               (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
-                irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
-               MAX2(irb->mt->num_samples, 1) : 1;
-
             const struct isl_view view = {
                .format = format,
                .base_level = irb->mt_level - irb->mt->first_level,
                .levels = 1,
-               .base_array_layer = irb->mt_layer / mt_layer_unit,
+               .base_array_layer = irb->mt_layer,
                .array_len = irb->layer_count,
                .swizzle = ISL_SWIZZLE_IDENTITY,
                .usage = ISL_SURF_USAGE_TEXTURE_BIT,
             };
 
-            const int flags = brw->draw_aux_buffer_disabled[i] ?
-                                 INTEL_AUX_BUFFER_DISABLED : 0;
-            brw_emit_surface_state(brw, irb->mt, flags, target, view,
-                                   tex_mocs[brw->gen],
+            enum isl_aux_usage aux_usage =
+               intel_miptree_texture_aux_usage(brw, irb->mt, format,
+                                               brw->gen9_astc5x5_wa_tex_mask);
+            if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
+               aux_usage = ISL_AUX_USAGE_NONE;
+
+            brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
                                    surf_offset, surf_index,
-                                   I915_GEM_DOMAIN_SAMPLER, 0);
+                                   0);
 
          } else {
-            brw->vtbl.emit_null_surface_state(
-               brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
-               _mesa_geometric_samples(fb), surf_offset);
+            emit_null_surface_state(brw, fb, surf_offset);
          }
       }
 
@@ -1232,12 +1138,20 @@ const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
    .dirty = {
       .mesa = _NEW_BUFFERS,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_FRAGMENT_PROGRAM |
+             BRW_NEW_AUX_STATE |
              BRW_NEW_FS_PROG_DATA,
    },
    .emit = update_renderbuffer_read_surfaces,
 };
 
+static bool
+is_depth_texture(struct intel_texture_object *iobj)
+{
+   GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
+   return base_format == GL_DEPTH_COMPONENT ||
+          (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
+}
+
 static void
 update_stage_texture_surfaces(struct brw_context *brw,
                               const struct gl_program *prog,
@@ -1263,10 +1177,35 @@ update_stage_texture_surfaces(struct brw_context *brw,
 
       if (prog->SamplersUsed & (1 << s)) {
          const unsigned unit = prog->SamplerUnits[s];
+         const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
+         struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
+         struct intel_texture_object *iobj = intel_texture_object(obj);
 
          /* _NEW_TEXTURE */
-         if (ctx->Texture.Unit[unit]._Current) {
-            brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
+         if (!obj)
+            continue;
+
+         if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
+            /* A programming note for the sample_c message says:
+             *
+             *    "The Surface Format of the associated surface must be
+             *     indicated as supporting shadow mapping as indicated in the
+             *     surface format table."
+             *
+             * Accessing non-depth textures via a sampler*Shadow type is
+             * undefined.  GLSL 4.50 page 162 says:
+             *
+             *    "If a shadow texture call is made to a sampler that does not
+             *     represent a depth texture, then results are undefined."
+             *
+             * We give them a null surface (zeros) for undefined.  We've seen
+             * GPU hangs with color buffers and sample_c, so we try and avoid
+             * those with this hack.
+             */
+            emit_null_surface_state(brw, NULL, surf_offset + s);
+         } else {
+            brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
+                                       used_by_txf, plane);
          }
       }
    }
@@ -1279,18 +1218,20 @@ update_stage_texture_surfaces(struct brw_context *brw,
 static void
 brw_update_texture_surfaces(struct brw_context *brw)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
    /* BRW_NEW_VERTEX_PROGRAM */
-   struct gl_program *vs = (struct gl_program *) brw->vertex_program;
+   struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
 
    /* BRW_NEW_TESS_PROGRAMS */
-   struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
-   struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
+   struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
+   struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
 
    /* BRW_NEW_GEOMETRY_PROGRAM */
-   struct gl_program *gs = (struct gl_program *) brw->geometry_program;
+   struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
 
    /* BRW_NEW_FRAGMENT_PROGRAM */
-   struct gl_program *fs = (struct gl_program *) brw->fragment_program;
+   struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
 
    /* _NEW_TEXTURE */
    update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
@@ -1302,16 +1243,16 @@ brw_update_texture_surfaces(struct brw_context *brw)
    /* emit alternate set of surface state for gather. this
     * allows the surface format to be overriden for only the
     * gather4 messages. */
-   if (brw->gen < 8) {
-      if (vs && vs->nir->info.uses_texture_gather)
+   if (devinfo->gen < 8) {
+      if (vs && vs->info.uses_texture_gather)
          update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
-      if (tcs && tcs->nir->info.uses_texture_gather)
+      if (tcs && tcs->info.uses_texture_gather)
          update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
-      if (tes && tes->nir->info.uses_texture_gather)
+      if (tes && tes->info.uses_texture_gather)
          update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
-      if (gs && gs->nir->info.uses_texture_gather)
+      if (gs && gs->info.uses_texture_gather)
          update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
-      if (fs && fs->nir->info.uses_texture_gather)
+      if (fs && fs->info.uses_texture_gather)
          update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
    }
 
@@ -1327,7 +1268,7 @@ const struct brw_tracked_state brw_texture_surfaces = {
    .dirty = {
       .mesa = _NEW_TEXTURE,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
+             BRW_NEW_AUX_STATE |
              BRW_NEW_FRAGMENT_PROGRAM |
              BRW_NEW_FS_PROG_DATA |
              BRW_NEW_GEOMETRY_PROGRAM |
@@ -1345,8 +1286,10 @@ const struct brw_tracked_state brw_texture_surfaces = {
 static void
 brw_update_cs_texture_surfaces(struct brw_context *brw)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
    /* BRW_NEW_COMPUTE_PROGRAM */
-   struct gl_program *cs = (struct gl_program *) brw->compute_program;
+   struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
 
    /* _NEW_TEXTURE */
    update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
@@ -1355,8 +1298,8 @@ brw_update_cs_texture_surfaces(struct brw_context *brw)
     * allows the surface format to be overriden for only the
     * gather4 messages.
     */
-   if (brw->gen < 8) {
-      if (cs && cs->nir->info.uses_texture_gather)
+   if (devinfo->gen < 8) {
+      if (cs && cs->info.uses_texture_gather)
          update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
    }
 
@@ -1367,12 +1310,43 @@ const struct brw_tracked_state brw_cs_texture_surfaces = {
    .dirty = {
       .mesa = _NEW_TEXTURE,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
-             BRW_NEW_COMPUTE_PROGRAM,
+             BRW_NEW_COMPUTE_PROGRAM |
+             BRW_NEW_AUX_STATE,
    },
    .emit = brw_update_cs_texture_surfaces,
 };
 
+static void
+upload_buffer_surface(struct brw_context *brw,
+                      struct gl_buffer_binding *binding,
+                      uint32_t *out_offset,
+                      enum isl_format format,
+                      unsigned reloc_flags)
+{
+   struct gl_context *ctx = &brw->ctx;
+
+   if (binding->BufferObject == ctx->Shared->NullBufferObj) {
+      emit_null_surface_state(brw, NULL, out_offset);
+   } else {
+      ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
+      if (!binding->AutomaticSize)
+         size = MIN2(size, binding->Size);
+
+      if (size == 0) {
+         emit_null_surface_state(brw, NULL, out_offset);
+         return;
+      }
+
+      struct intel_buffer_object *iobj =
+         intel_buffer_object(binding->BufferObject);
+      struct brw_bo *bo =
+         intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
+                                (reloc_flags & RELOC_WRITE) != 0);
+
+      brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
+                                    format, size, 1, reloc_flags);
+   }
+}
 
 void
 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
@@ -1381,61 +1355,42 @@ brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
 {
    struct gl_context *ctx = &brw->ctx;
 
-   if (!prog)
+   if (!prog || (prog->info.num_ubos == 0 &&
+                 prog->info.num_ssbos == 0 &&
+                 prog->info.num_abos == 0))
       return;
 
    uint32_t *ubo_surf_offsets =
       &stage_state->surf_offset[prog_data->binding_table.ubo_start];
 
    for (int i = 0; i < prog->info.num_ubos; i++) {
-      struct gl_uniform_buffer_binding *binding =
+      struct gl_buffer_binding *binding =
          &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
-
-      if (binding->BufferObject == ctx->Shared->NullBufferObj) {
-         brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
-      } else {
-         struct intel_buffer_object *intel_bo =
-            intel_buffer_object(binding->BufferObject);
-         GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
-         if (!binding->AutomaticSize)
-            size = MIN2(size, binding->Size);
-         struct brw_bo *bo =
-            intel_bufferobj_buffer(brw, intel_bo,
-                                   binding->Offset,
-                                   size);
-         brw_create_constant_surface(brw, bo, binding->Offset,
-                                     size,
-                                     &ubo_surf_offsets[i]);
-      }
+      upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
+                            ISL_FORMAT_R32G32B32A32_FLOAT, 0);
    }
 
-   uint32_t *ssbo_surf_offsets =
+   uint32_t *abo_surf_offsets =
       &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
+   uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
+
+   for (int i = 0; i < prog->info.num_abos; i++) {
+      struct gl_buffer_binding *binding =
+         &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
+      upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
+                            ISL_FORMAT_RAW, RELOC_WRITE);
+   }
 
    for (int i = 0; i < prog->info.num_ssbos; i++) {
-      struct gl_shader_storage_buffer_binding *binding =
+      struct gl_buffer_binding *binding =
          &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
 
-      if (binding->BufferObject == ctx->Shared->NullBufferObj) {
-         brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
-      } else {
-         struct intel_buffer_object *intel_bo =
-            intel_buffer_object(binding->BufferObject);
-         GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
-         if (!binding->AutomaticSize)
-            size = MIN2(size, binding->Size);
-         struct brw_bo *bo =
-            intel_bufferobj_buffer(brw, intel_bo,
-                                   binding->Offset,
-                                   size);
-         brw_create_buffer_surface(brw, bo, binding->Offset,
-                                   size,
-                                   &ssbo_surf_offsets[i]);
-      }
+      upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
+                            ISL_FORMAT_RAW, RELOC_WRITE);
    }
 
-   if (prog->info.num_ubos || prog->info.num_ssbos)
-      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
+   stage_state->push_constants_dirty = true;
+   brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
 }
 
 static void
@@ -1443,7 +1398,7 @@ brw_upload_wm_ubo_surfaces(struct brw_context *brw)
 {
    struct gl_context *ctx = &brw->ctx;
    /* _NEW_PROGRAM */
-   struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
+   struct gl_program *prog = ctx->FragmentProgram._Current;
 
    /* BRW_NEW_FS_PROG_DATA */
    brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
@@ -1453,7 +1408,6 @@ const struct brw_tracked_state brw_wm_ubo_surfaces = {
    .dirty = {
       .mesa = _NEW_PROGRAM,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
              BRW_NEW_FS_PROG_DATA |
              BRW_NEW_UNIFORM_BUFFER,
    },
@@ -1476,92 +1430,17 @@ const struct brw_tracked_state brw_cs_ubo_surfaces = {
    .dirty = {
       .mesa = _NEW_PROGRAM,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
              BRW_NEW_CS_PROG_DATA |
              BRW_NEW_UNIFORM_BUFFER,
    },
    .emit = brw_upload_cs_ubo_surfaces,
 };
 
-void
-brw_upload_abo_surfaces(struct brw_context *brw,
-                        const struct gl_program *prog,
-                        struct brw_stage_state *stage_state,
-                        struct brw_stage_prog_data *prog_data)
-{
-   struct gl_context *ctx = &brw->ctx;
-   uint32_t *surf_offsets =
-      &stage_state->surf_offset[prog_data->binding_table.abo_start];
-
-   if (prog->info.num_abos) {
-      for (unsigned i = 0; i < prog->info.num_abos; i++) {
-         struct gl_atomic_buffer_binding *binding =
-            &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
-         struct intel_buffer_object *intel_bo =
-            intel_buffer_object(binding->BufferObject);
-         struct brw_bo *bo = intel_bufferobj_buffer(
-            brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
-
-         brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
-                                       binding->Offset, ISL_FORMAT_RAW,
-                                       bo->size - binding->Offset, 1, true);
-      }
-
-      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
-   }
-}
-
-static void
-brw_upload_wm_abo_surfaces(struct brw_context *brw)
-{
-   /* _NEW_PROGRAM */
-   const struct gl_program *wm = brw->fragment_program;
-
-   if (wm) {
-      /* BRW_NEW_FS_PROG_DATA */
-      brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
-   }
-}
-
-const struct brw_tracked_state brw_wm_abo_surfaces = {
-   .dirty = {
-      .mesa = _NEW_PROGRAM,
-      .brw = BRW_NEW_ATOMIC_BUFFER |
-             BRW_NEW_BLORP |
-             BRW_NEW_BATCH |
-             BRW_NEW_FS_PROG_DATA,
-   },
-   .emit = brw_upload_wm_abo_surfaces,
-};
-
-static void
-brw_upload_cs_abo_surfaces(struct brw_context *brw)
-{
-   /* _NEW_PROGRAM */
-   const struct gl_program *cp = brw->compute_program;
-
-   if (cp) {
-      /* BRW_NEW_CS_PROG_DATA */
-      brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
-   }
-}
-
-const struct brw_tracked_state brw_cs_abo_surfaces = {
-   .dirty = {
-      .mesa = _NEW_PROGRAM,
-      .brw = BRW_NEW_ATOMIC_BUFFER |
-             BRW_NEW_BLORP |
-             BRW_NEW_BATCH |
-             BRW_NEW_CS_PROG_DATA,
-   },
-   .emit = brw_upload_cs_abo_surfaces,
-};
-
 static void
 brw_upload_cs_image_surfaces(struct brw_context *brw)
 {
    /* _NEW_PROGRAM */
-   const struct gl_program *cp = brw->compute_program;
+   const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
 
    if (cp) {
       /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
@@ -1574,8 +1453,8 @@ const struct brw_tracked_state brw_cs_image_surfaces = {
    .dirty = {
       .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
              BRW_NEW_CS_PROG_DATA |
+             BRW_NEW_AUX_STATE |
              BRW_NEW_IMAGE_UNITS
    },
    .emit = brw_upload_cs_image_surfaces,
@@ -1585,8 +1464,8 @@ static uint32_t
 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
 {
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
-   uint32_t hw_format = brw_isl_format_for_mesa_format(format);
-   if (access == GL_WRITE_ONLY) {
+   enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
+   if (access == GL_WRITE_ONLY || access == GL_NONE) {
       return hw_format;
    } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
       /* Typed surface reads support a very limited subset of the shader
@@ -1605,11 +1484,9 @@ get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
 static void
 update_default_image_param(struct brw_context *brw,
                            struct gl_image_unit *u,
-                           unsigned surface_idx,
                            struct brw_image_param *param)
 {
    memset(param, 0, sizeof(*param));
-   param->surface_idx = surface_idx;
    /* Set the swizzling shifts to all-ones to effectively disable swizzling --
     * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
     * detailed explanation of these parameters.
@@ -1621,153 +1498,97 @@ update_default_image_param(struct brw_context *brw,
 static void
 update_buffer_image_param(struct brw_context *brw,
                           struct gl_image_unit *u,
-                          unsigned surface_idx,
                           struct brw_image_param *param)
 {
-   struct gl_buffer_object *obj = u->TexObj->BufferObject;
-   const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
-   update_default_image_param(brw, u, surface_idx, param);
+   const unsigned size = buffer_texture_range_size(brw, u->TexObj);
+   update_default_image_param(brw, u, param);
 
    param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
    param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
 }
 
-static void
-update_texture_image_param(struct brw_context *brw,
-                           struct gl_image_unit *u,
-                           unsigned surface_idx,
-                           struct brw_image_param *param)
-{
-   struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
-
-   update_default_image_param(brw, u, surface_idx, param);
-
-   param->size[0] = minify(mt->logical_width0, u->Level);
-   param->size[1] = minify(mt->logical_height0, u->Level);
-   param->size[2] = (!u->Layered ? 1 :
-                     u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
-                     u->TexObj->Target == GL_TEXTURE_3D ?
-                     minify(mt->logical_depth0, u->Level) :
-                     mt->logical_depth0);
-
-   intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
-                                  &param->offset[0],
-                                  &param->offset[1]);
-
-   param->stride[0] = mt->cpp;
-   param->stride[1] = mt->pitch / mt->cpp;
-   param->stride[2] =
-      brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
-   param->stride[3] =
-      brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
-
-   if (mt->tiling == I915_TILING_X) {
-      /* An X tile is a rectangular block of 512x8 bytes. */
-      param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
-      param->tiling[1] = _mesa_logbase2(8);
-
-      if (brw->has_swizzling) {
-         /* Right shifts required to swizzle bits 9 and 10 of the memory
-          * address with bit 6.
-          */
-         param->swizzling[0] = 3;
-         param->swizzling[1] = 4;
-      }
-   } else if (mt->tiling == I915_TILING_Y) {
-      /* The layout of a Y-tiled surface in memory isn't really fundamentally
-       * different to the layout of an X-tiled surface, we simply pretend that
-       * the surface is broken up in a number of smaller 16Bx32 tiles, each
-       * one arranged in X-major order just like is the case for X-tiling.
-       */
-      param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
-      param->tiling[1] = _mesa_logbase2(32);
-
-      if (brw->has_swizzling) {
-         /* Right shift required to swizzle bit 9 of the memory address with
-          * bit 6.
-          */
-         param->swizzling[0] = 3;
-      }
-   }
-
-   /* 3D textures are arranged in 2D in memory with 2^lod slices per row.  The
-    * address calculation algorithm (emit_address_calculation() in
-    * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
-    * modulus equal to the LOD.
-    */
-   param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
-                       0);
-}
-
 static void
 update_image_surface(struct brw_context *brw,
                      struct gl_image_unit *u,
                      GLenum access,
-                     unsigned surface_idx,
                      uint32_t *surf_offset,
                      struct brw_image_param *param)
 {
    if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
       struct gl_texture_object *obj = u->TexObj;
       const unsigned format = get_image_format(brw, u->_ActualFormat, access);
+      const bool written = (access != GL_READ_ONLY && access != GL_NONE);
 
       if (obj->Target == GL_TEXTURE_BUFFER) {
-         struct intel_buffer_object *intel_obj =
-            intel_buffer_object(obj->BufferObject);
          const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
                                       _mesa_get_format_bytes(u->_ActualFormat));
+         const unsigned buffer_size = buffer_texture_range_size(brw, obj);
+         struct brw_bo *const bo = !obj->BufferObject ? NULL :
+            intel_bufferobj_buffer(brw, intel_buffer_object(obj->BufferObject),
+                                   obj->BufferOffset, buffer_size, written);
 
          brw_emit_buffer_surface_state(
-            brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
-            format, intel_obj->Base.Size, texel_size,
-            access != GL_READ_ONLY);
+            brw, surf_offset, bo, obj->BufferOffset,
+            format, buffer_size, texel_size,
+            written ? RELOC_WRITE : 0);
 
-         update_buffer_image_param(brw, u, surface_idx, param);
+         update_buffer_image_param(brw, u, param);
 
       } else {
          struct intel_texture_object *intel_obj = intel_texture_object(obj);
          struct intel_mipmap_tree *mt = intel_obj->mt;
 
+         unsigned base_layer, num_layers;
+         if (u->Layered) {
+            if (obj->Target == GL_TEXTURE_3D) {
+               base_layer = 0;
+               num_layers = minify(mt->surf.logical_level0_px.depth, u->Level);
+            } else {
+               assert(obj->Immutable || obj->MinLayer == 0);
+               base_layer = obj->MinLayer;
+               num_layers = obj->Immutable ?
+                                obj->NumLayers :
+                                mt->surf.logical_level0_px.array_len;
+            }
+         } else {
+            base_layer = obj->MinLayer + u->_Layer;
+            num_layers = 1;
+         }
+
+         struct isl_view view = {
+            .format = format,
+            .base_level = obj->MinLevel + u->Level,
+            .levels = 1,
+            .base_array_layer = base_layer,
+            .array_len = num_layers,
+            .swizzle = ISL_SWIZZLE_IDENTITY,
+            .usage = ISL_SURF_USAGE_STORAGE_BIT,
+         };
+
          if (format == ISL_FORMAT_RAW) {
             brw_emit_buffer_surface_state(
                brw, surf_offset, mt->bo, mt->offset,
                format, mt->bo->size - mt->offset, 1 /* pitch */,
-               access != GL_READ_ONLY);
+               written ? RELOC_WRITE : 0);
 
          } else {
-            const unsigned num_layers = (!u->Layered ? 1 :
-                                         obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
-                                         mt->logical_depth0);
-
-            struct isl_view view = {
-               .format = format,
-               .base_level = obj->MinLevel + u->Level,
-               .levels = 1,
-               .base_array_layer = obj->MinLayer + u->_Layer,
-               .array_len = num_layers,
-               .swizzle = ISL_SWIZZLE_IDENTITY,
-               .usage = ISL_SURF_USAGE_STORAGE_BIT,
-            };
-
             const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
-            const bool unresolved = intel_miptree_has_color_unresolved(
-                                       mt, view.base_level, view.levels,
-                                       view.base_array_layer, view.array_len);
-            const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
-            brw_emit_surface_state(brw, mt, flags, mt->target, view,
-                                   tex_mocs[brw->gen],
+            assert(!intel_miptree_has_color_unresolved(mt,
+                                                       view.base_level, 1,
+                                                       view.base_array_layer,
+                                                       view.array_len));
+            brw_emit_surface_state(brw, mt, mt->target, view,
+                                   ISL_AUX_USAGE_NONE,
                                    surf_offset, surf_index,
-                                   I915_GEM_DOMAIN_SAMPLER,
-                                   access == GL_READ_ONLY ? 0 :
-                                             I915_GEM_DOMAIN_SAMPLER);
+                                   written ? RELOC_WRITE : 0);
          }
 
-         update_texture_image_param(brw, u, surface_idx, param);
+         isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
       }
 
    } else {
-      brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
-      update_default_image_param(brw, u, surface_idx, param);
+      emit_null_surface_state(brw, NULL, surf_offset);
+      update_default_image_param(brw, u, param);
    }
 }
 
@@ -1786,9 +1607,8 @@ brw_upload_image_surfaces(struct brw_context *brw,
          const unsigned surf_idx = prog_data->binding_table.image_start + i;
 
          update_image_surface(brw, u, prog->sh.ImageAccess[i],
-                              surf_idx,
                               &stage_state->surf_offset[surf_idx],
-                              &prog_data->image_param[i]);
+                              &stage_state->image_param[i]);
       }
 
       brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
@@ -1804,7 +1624,7 @@ static void
 brw_upload_wm_image_surfaces(struct brw_context *brw)
 {
    /* BRW_NEW_FRAGMENT_PROGRAM */
-   const struct gl_program *wm = brw->fragment_program;
+   const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
 
    if (wm) {
       /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
@@ -1817,7 +1637,7 @@ const struct brw_tracked_state brw_wm_image_surfaces = {
    .dirty = {
       .mesa = _NEW_TEXTURE,
       .brw = BRW_NEW_BATCH |
-             BRW_NEW_BLORP |
+             BRW_NEW_AUX_STATE |
              BRW_NEW_FRAGMENT_PROGRAM |
              BRW_NEW_FS_PROG_DATA |
              BRW_NEW_IMAGE_UNITS
@@ -1825,20 +1645,6 @@ const struct brw_tracked_state brw_wm_image_surfaces = {
    .emit = brw_upload_wm_image_surfaces,
 };
 
-void
-gen4_init_vtable_surface_functions(struct brw_context *brw)
-{
-   brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
-   brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
-}
-
-void
-gen6_init_vtable_surface_functions(struct brw_context *brw)
-{
-   gen4_init_vtable_surface_functions(brw);
-   brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
-}
-
 static void
 brw_upload_cs_work_groups_surface(struct brw_context *brw)
 {
@@ -1859,12 +1665,12 @@ brw_upload_cs_work_groups_surface(struct brw_context *brw)
 
       if (brw->compute.num_work_groups_bo == NULL) {
          bo = NULL;
-         intel_upload_data(brw,
-                           (void *)brw->compute.num_work_groups,
-                           3 * sizeof(GLuint),
-                           sizeof(GLuint),
-                           &bo,
-                           &bo_offset);
+         brw_upload_data(&brw->upload,
+                         (void *)brw->compute.num_work_groups,
+                         3 * sizeof(GLuint),
+                         sizeof(GLuint),
+                         &bo,
+                         &bo_offset);
       } else {
          bo = brw->compute.num_work_groups_bo;
          bo_offset = brw->compute.num_work_groups_offset;
@@ -1873,15 +1679,15 @@ brw_upload_cs_work_groups_surface(struct brw_context *brw)
       brw_emit_buffer_surface_state(brw, surf_offset,
                                     bo, bo_offset,
                                     ISL_FORMAT_RAW,
-                                    3 * sizeof(GLuint), 1, true);
+                                    3 * sizeof(GLuint), 1,
+                                    RELOC_WRITE);
       brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
    }
 }
 
 const struct brw_tracked_state brw_cs_work_groups_surface = {
    .dirty = {
-      .brw = BRW_NEW_BLORP |
-             BRW_NEW_CS_PROG_DATA |
+      .brw = BRW_NEW_CS_PROG_DATA |
              BRW_NEW_CS_WORK_GROUPS
    },
    .emit = brw_upload_cs_work_groups_surface,