i965/program_cache: Cast the key to char * before adding key_size
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
index 0c43d2b4b2be60c15c6a4343c7d0d9e5af4daa73..b752294250e3effc4d6b3e33c9c404fe9f1584e8 100644 (file)
@@ -54,7 +54,9 @@
 static void
 upload_pipelined_state_pointers(struct brw_context *brw)
 {
-   if (brw->gen == 5) {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   if (devinfo->gen == 5) {
       /* Need to flush before changing clip max threads for errata. */
       BEGIN_BATCH(1);
       OUT_BATCH(MI_FLUSH);
@@ -63,21 +65,15 @@ upload_pipelined_state_pointers(struct brw_context *brw)
 
    BEGIN_BATCH(7);
    OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->vs.base.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
    if (brw->ff_gs.prog_active)
-      OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-               brw->ff_gs.state_offset | 1);
+      OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
    else
       OUT_BATCH(0);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->clip.state_offset | 1);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->sf.state_offset);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->wm.base.state_offset);
-   OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-            brw->cc.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
+   OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
    ADVANCE_BATCH();
 
    brw->ctx.NewDriverState |= BRW_NEW_PSP;
@@ -140,6 +136,7 @@ static bool
 rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
                      bool invalidate)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct gl_context *ctx = &brw->ctx;
    uint32_t tile_mask_x = 0, tile_mask_y = 0;
 
@@ -160,7 +157,7 @@ rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
    bool rebase = tile_x & 7 || tile_y & 7;
 
    /* We didn't even have intra-tile offsets before g45. */
-   rebase |= (!brw->has_surface_tile_offset && (tile_x || tile_y));
+   rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
 
    if (rebase) {
       perf_debug("HW workaround: blitting depth level %d to a temporary "
@@ -201,6 +198,7 @@ void
 brw_workaround_depthstencil_alignment(struct brw_context *brw,
                                       GLbitfield clear_mask)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
    struct gl_context *ctx = &brw->ctx;
    struct gl_framebuffer *fb = ctx->DrawBuffer;
    struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
@@ -221,7 +219,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
    /* Gen6+ doesn't require the workarounds, since we always program the
     * surface state at the start of the whole surface.
     */
-   if (brw->gen >= 6)
+   if (devinfo->gen >= 6)
       return;
 
    /* Check if depth buffer is in depth/stencil format.  If so, then it's only
@@ -256,141 +254,51 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
        rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
 }
 
-void
-brw_emit_depthbuffer(struct brw_context *brw)
+static void
+brw_emit_depth_stencil_hiz(struct brw_context *brw,
+                           struct intel_renderbuffer *depth_irb,
+                           struct intel_mipmap_tree *depth_mt,
+                           struct intel_renderbuffer *stencil_irb,
+                           struct intel_mipmap_tree *stencil_mt)
 {
-   struct gl_context *ctx = &brw->ctx;
-   struct gl_framebuffer *fb = ctx->DrawBuffer;
-   /* _NEW_BUFFERS */
-   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
-   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
-   struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
-   struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
    uint32_t tile_x = brw->depthstencil.tile_x;
    uint32_t tile_y = brw->depthstencil.tile_y;
-   bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
-   bool separate_stencil = false;
    uint32_t depth_surface_type = BRW_SURFACE_NULL;
    uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
    uint32_t depth_offset = 0;
    uint32_t width = 1, height = 1;
-
-   if (stencil_mt) {
-      separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
-
-      /* Gen7 supports only separate stencil */
-      assert(separate_stencil || brw->gen < 7);
-   }
+   bool tiled_surface = true;
 
    /* If there's a packed depth/stencil bound to stencil only, we need to
     * emit the packed depth/stencil buffer packet.
     */
-   if (!depth_irb && stencil_irb && !separate_stencil) {
+   if (!depth_irb && stencil_irb) {
       depth_irb = stencil_irb;
       depth_mt = stencil_mt;
    }
 
    if (depth_irb && depth_mt) {
-      /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
-       * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
-       * depthstencil format.
-       *
-       * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
-       * set to the same value. Gens after 7 implicitly always set
-       * Separate_Stencil_Enable; software cannot disable it.
-       */
-      if ((brw->gen < 7 && hiz) || brw->gen >= 7) {
-         assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
-      }
-
-      /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
-      assert(brw->gen >= 7 || !separate_stencil || hiz);
-
-      assert(brw->gen < 6 || depth_mt->surf.tiling == ISL_TILING_Y0);
-      assert(!hiz || depth_mt->surf.tiling == ISL_TILING_Y0);
-
       depthbuffer_format = brw_depthbuffer_format(brw);
       depth_surface_type = BRW_SURFACE_2D;
       depth_offset = brw->depthstencil.depth_offset;
       width = depth_irb->Base.Base.Width;
       height = depth_irb->Base.Base.Height;
-   } else if (separate_stencil) {
-      /*
-       * There exists a separate stencil buffer but no depth buffer.
-       *
-       * The stencil buffer inherits most of its fields from
-       * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
-       * height.
-       *
-       * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
-       * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
-       *     [DevGT+]: This field must be set to TRUE.
-       */
-      assert(brw->has_separate_stencil);
-
-      depth_surface_type = BRW_SURFACE_2D;
-      width = stencil_irb->Base.Base.Width;
-      height = stencil_irb->Base.Base.Height;
+      tiled_surface = depth_mt->surf.tiling != ISL_TILING_LINEAR;
    }
 
-   if (depth_mt)
-      brw_render_cache_set_check_flush(brw, depth_mt->bo);
-   if (stencil_mt)
-      brw_render_cache_set_check_flush(brw, stencil_mt->bo);
-
-   brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
-                                    depthbuffer_format, depth_surface_type,
-                                    stencil_mt, hiz, separate_stencil,
-                                    width, height, tile_x, tile_y);
-}
-
-uint32_t
-brw_convert_depth_value(mesa_format format, float value)
-{
-   switch (format) {
-   case MESA_FORMAT_Z_FLOAT32:
-      return float_as_int(value);
-   case MESA_FORMAT_Z_UNORM16:
-      return value * ((1u << 16) - 1);
-   case MESA_FORMAT_Z24_UNORM_X8_UINT:
-      return value * ((1u << 24) - 1);
-   default:
-      unreachable("Invalid depth format");
-   }
-}
-
-void
-brw_emit_depth_stencil_hiz(struct brw_context *brw,
-                           struct intel_mipmap_tree *depth_mt,
-                           uint32_t depth_offset, uint32_t depthbuffer_format,
-                           uint32_t depth_surface_type,
-                           struct intel_mipmap_tree *stencil_mt,
-                           bool hiz, bool separate_stencil,
-                           uint32_t width, uint32_t height,
-                           uint32_t tile_x, uint32_t tile_y)
-{
-   (void)hiz;
-   (void)separate_stencil;
-   (void)stencil_mt;
-
-   assert(!hiz);
-   assert(!separate_stencil);
-
-   const unsigned len = (brw->is_g4x || brw->gen == 5) ? 6 : 5;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const unsigned len = (devinfo->is_g4x || devinfo->gen == 5) ? 6 : 5;
 
    BEGIN_BATCH(len);
    OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
-   OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch - 1 : 0) |
+   OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch_B - 1 : 0) |
              (depthbuffer_format << 18) |
              (BRW_TILEWALK_YMAJOR << 26) |
-             ((depth_mt ? depth_mt->surf.tiling != ISL_TILING_LINEAR : 1)
-              << 27) |
+             (tiled_surface << 27) |
              (depth_surface_type << 29));
 
    if (depth_mt) {
-      OUT_RELOC(depth_mt->bo,
-               I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
-               depth_offset);
+      OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
    } else {
       OUT_BATCH(0);
    }
@@ -399,21 +307,159 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
              ((height + tile_y - 1) << 19));
    OUT_BATCH(0);
 
-   if (brw->is_g4x || brw->gen >= 5)
+   if (devinfo->is_g4x || devinfo->gen >= 5)
       OUT_BATCH(tile_x | (tile_y << 16));
    else
       assert(tile_x == 0 && tile_y == 0);
 
-   if (brw->gen >= 6)
+   if (devinfo->gen >= 6)
       OUT_BATCH(0);
 
    ADVANCE_BATCH();
 }
 
+void
+brw_emit_depthbuffer(struct brw_context *brw)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   struct gl_context *ctx = &brw->ctx;
+   struct gl_framebuffer *fb = ctx->DrawBuffer;
+   /* _NEW_BUFFERS */
+   struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+   struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+   struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
+   struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
+
+   if (depth_mt)
+      brw_cache_flush_for_depth(brw, depth_mt->bo);
+   if (stencil_mt)
+      brw_cache_flush_for_depth(brw, stencil_mt->bo);
+
+   if (devinfo->gen < 6) {
+      brw_emit_depth_stencil_hiz(brw, depth_irb, depth_mt,
+                                 stencil_irb, stencil_mt);
+      return;
+   }
+
+   /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
+   if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
+      assert(brw->hw_ctx);
+      return;
+   }
+
+   brw_emit_depth_stall_flushes(brw);
+
+   const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
+   intel_batchbuffer_begin(brw, ds_dwords);
+   uint32_t *ds_map = brw->batch.map_next;
+   const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
+
+   struct isl_view view = {
+      /* Some nice defaults */
+      .base_level = 0,
+      .levels = 1,
+      .base_array_layer = 0,
+      .array_len = 1,
+      .swizzle = ISL_SWIZZLE_IDENTITY,
+   };
+
+   struct isl_depth_stencil_hiz_emit_info info = {
+      .view = &view,
+   };
+
+   if (depth_mt) {
+      view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
+      info.depth_surf = &depth_mt->surf;
+
+      info.depth_address =
+         brw_batch_reloc(&brw->batch,
+                         ds_offset + brw->isl_dev.ds.depth_offset,
+                         depth_mt->bo, depth_mt->offset, RELOC_WRITE);
+
+      info.mocs = brw_get_bo_mocs(devinfo, depth_mt->bo);
+      view.base_level = depth_irb->mt_level - depth_irb->mt->first_level;
+      view.base_array_layer = depth_irb->mt_layer;
+      view.array_len = MAX2(depth_irb->layer_count, 1);
+      view.format = depth_mt->surf.format;
+
+      info.hiz_usage = depth_mt->aux_usage;
+      if (!intel_renderbuffer_has_hiz(depth_irb)) {
+         /* Just because a miptree has ISL_AUX_USAGE_HIZ does not mean that
+          * all miplevels of that miptree are guaranteed to support HiZ.  See
+          * intel_miptree_level_enable_hiz for details.
+          */
+         info.hiz_usage = ISL_AUX_USAGE_NONE;
+      }
+
+      if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
+         info.hiz_surf = &depth_mt->aux_buf->surf;
+
+         uint32_t hiz_offset = 0;
+         if (devinfo->gen == 6) {
+            /* HiZ surfaces on Sandy Bridge technically don't support
+             * mip-mapping.  However, we can fake it by offsetting to the
+             * first slice of LOD0 in the HiZ surface.
+             */
+            isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
+                                                view.base_level, 0, 0,
+                                                &hiz_offset, NULL, NULL);
+         }
+
+         info.hiz_address =
+            brw_batch_reloc(&brw->batch,
+                            ds_offset + brw->isl_dev.ds.hiz_offset,
+                            depth_mt->aux_buf->bo,
+                            depth_mt->aux_buf->offset + hiz_offset,
+                            RELOC_WRITE);
+      }
+
+      info.depth_clear_value = depth_mt->fast_clear_color.f32[0];
+   }
+
+   if (stencil_mt) {
+      view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
+      info.stencil_surf = &stencil_mt->surf;
+
+      if (!depth_mt) {
+         info.mocs = brw_get_bo_mocs(devinfo, stencil_mt->bo);
+         view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
+         view.base_array_layer = stencil_irb->mt_layer;
+         view.array_len = MAX2(stencil_irb->layer_count, 1);
+         view.format = stencil_mt->surf.format;
+      }
+
+      uint32_t stencil_offset = 0;
+      if (devinfo->gen == 6) {
+         /* Stencil surfaces on Sandy Bridge technically don't support
+          * mip-mapping.  However, we can fake it by offsetting to the
+          * first slice of LOD0 in the stencil surface.
+          */
+         isl_surf_get_image_offset_B_tile_sa(&stencil_mt->surf,
+                                             view.base_level, 0, 0,
+                                             &stencil_offset, NULL, NULL);
+      }
+
+      info.stencil_address =
+         brw_batch_reloc(&brw->batch,
+                         ds_offset + brw->isl_dev.ds.stencil_offset,
+                         stencil_mt->bo,
+                         stencil_mt->offset + stencil_offset,
+                         RELOC_WRITE);
+   }
+
+   isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
+
+   brw->batch.map_next += ds_dwords;
+   intel_batchbuffer_advance(brw);
+
+   brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
+}
+
 const struct brw_tracked_state brw_depthbuffer = {
    .dirty = {
       .mesa = _NEW_BUFFERS,
-      .brw = BRW_NEW_BATCH |
+      .brw = BRW_NEW_AUX_STATE |
+             BRW_NEW_BATCH |
              BRW_NEW_BLORP,
    },
    .emit = brw_emit_depthbuffer,
@@ -422,11 +468,12 @@ const struct brw_tracked_state brw_depthbuffer = {
 void
 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
 {
-   const bool is_965 = brw->gen == 4 && !brw->is_g4x;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
    const uint32_t _3DSTATE_PIPELINE_SELECT =
       is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
 
-   if (brw->gen >= 8 && brw->gen < 10) {
+   if (devinfo->gen >= 8 && devinfo->gen < 10) {
       /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
        *
        *   Software must clear the COLOR_CALC_STATE Valid field in
@@ -446,7 +493,7 @@ brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
       }
    }
 
-   if (brw->gen >= 6) {
+   if (devinfo->gen >= 6) {
       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
        * PIPELINE_SELECT [DevBWR+]":
        *
@@ -458,21 +505,19 @@ brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
        */
       const unsigned dc_flush =
-         brw->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
+         devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
 
       brw_emit_pipe_control_flush(brw,
                                   PIPE_CONTROL_RENDER_TARGET_FLUSH |
                                   PIPE_CONTROL_DEPTH_CACHE_FLUSH |
                                   dc_flush |
-                                  PIPE_CONTROL_NO_WRITE |
                                   PIPE_CONTROL_CS_STALL);
 
       brw_emit_pipe_control_flush(brw,
                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                                   PIPE_CONTROL_CONST_CACHE_INVALIDATE |
                                   PIPE_CONTROL_STATE_CACHE_INVALIDATE |
-                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE |
-                                  PIPE_CONTROL_NO_WRITE);
+                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE);
 
    } else {
       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
@@ -491,11 +536,11 @@ brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
    /* Select the pipeline */
    BEGIN_BATCH(1);
    OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
-             (brw->gen >= 9 ? (3 << 8) : 0) |
+             (devinfo->gen >= 9 ? (3 << 8) : 0) |
              (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
    ADVANCE_BATCH();
 
-   if (brw->gen == 7 && !brw->is_haswell &&
+   if (devinfo->gen == 7 && !devinfo->is_haswell &&
        pipeline == BRW_RENDER_PIPELINE) {
       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
        * PIPELINE_SELECT [DevBWR+]":
@@ -518,6 +563,21 @@ brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
       OUT_BATCH(0);
       ADVANCE_BATCH();
    }
+
+   if (devinfo->is_geminilake) {
+      /* Project: DevGLK
+       *
+       * "This chicken bit works around a hardware issue with barrier logic
+       *  encountered when switching between GPGPU and 3D pipelines.  To
+       *  workaround the issue, this mode bit should be set after a pipeline
+       *  is selected."
+       */
+      const unsigned barrier_mode =
+         pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
+                                         : GLK_SCEC_BARRIER_MODE_GPGPU;
+      brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
+                              barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
+   }
 }
 
 /**
@@ -526,12 +586,13 @@ brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
 void
 brw_upload_invariant_state(struct brw_context *brw)
 {
-   const bool is_965 = brw->gen == 4 && !brw->is_g4x;
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
 
    brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
    brw->last_pipeline = BRW_RENDER_PIPELINE;
 
-   if (brw->gen >= 8) {
+   if (devinfo->gen >= 8) {
       BEGIN_BATCH(3);
       OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
       OUT_BATCH(0);
@@ -553,23 +614,8 @@ brw_upload_invariant_state(struct brw_context *brw)
       OUT_BATCH(0);
       ADVANCE_BATCH();
    }
-
-   const uint32_t _3DSTATE_VF_STATISTICS =
-      is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
-   BEGIN_BATCH(1);
-   OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
-   ADVANCE_BATCH();
 }
 
-const struct brw_tracked_state brw_invariant_state = {
-   .dirty = {
-      .mesa = 0,
-      .brw = BRW_NEW_BLORP |
-             BRW_NEW_CONTEXT,
-   },
-   .emit = brw_upload_invariant_state
-};
-
 /**
  * Define the base addresses which some state is referenced from.
  *
@@ -583,6 +629,8 @@ const struct brw_tracked_state brw_invariant_state = {
 void
 brw_upload_state_base_address(struct brw_context *brw)
 {
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
    if (brw->batch.state_base_address_emitted)
       return;
 
@@ -595,9 +643,9 @@ brw_upload_state_base_address(struct brw_context *brw)
     * maybe this isn't required for us in particular.
     */
 
-   if (brw->gen >= 6) {
+   if (devinfo->gen >= 6) {
       const unsigned dc_flush =
-         brw->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
+         devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
 
       /* Emit a render target cache flush.
        *
@@ -626,9 +674,15 @@ brw_upload_state_base_address(struct brw_context *brw)
                                 dc_flush);
    }
 
-   if (brw->gen >= 8) {
-      uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
-      int pkt_len = brw->gen >= 9 ? 19 : 16;
+   if (devinfo->gen >= 8) {
+      /* STATE_BASE_ADDRESS has issues with 48-bit address spaces.  If the
+       * address + size as seen by STATE_BASE_ADDRESS overflows 48 bits,
+       * the GPU appears to treat all accesses to the buffer as being out
+       * of bounds and returns zero.  To work around this, we pin all SBAs
+       * to the bottom 4GB.
+       */
+      uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
+      int pkt_len = devinfo->gen >= 10 ? 22 : (devinfo->gen >= 9 ? 19 : 16);
 
       BEGIN_BATCH(pkt_len);
       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
@@ -637,35 +691,35 @@ brw_upload_state_base_address(struct brw_context *brw)
       OUT_BATCH(0);
       OUT_BATCH(mocs_wb << 16);
       /* Surface state base address: */
-      OUT_RELOC64(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
-                  mocs_wb << 4 | 1);
+      OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
       /* Dynamic state base address: */
-      OUT_RELOC64(brw->batch.bo,
-                  I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, 0,
-                  mocs_wb << 4 | 1);
+      OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
       /* Indirect object base address: MEDIA_OBJECT data */
       OUT_BATCH(mocs_wb << 4 | 1);
       OUT_BATCH(0);
       /* Instruction base address: shader kernels (incl. SIP) */
-      OUT_RELOC64(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-                  mocs_wb << 4 | 1);
-
+      OUT_RELOC64(brw->cache.bo, RELOC_32BIT, mocs_wb << 4 | 1);
       /* General state buffer size */
       OUT_BATCH(0xfffff001);
       /* Dynamic state buffer size */
-      OUT_BATCH(ALIGN(brw->batch.bo->size, 4096) | 1);
+      OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
       /* Indirect object upper bound */
       OUT_BATCH(0xfffff001);
       /* Instruction access upper bound */
       OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
-      if (brw->gen >= 9) {
+      if (devinfo->gen >= 9) {
+         OUT_BATCH(1);
+         OUT_BATCH(0);
+         OUT_BATCH(0);
+      }
+      if (devinfo->gen >= 10) {
          OUT_BATCH(1);
          OUT_BATCH(0);
          OUT_BATCH(0);
       }
       ADVANCE_BATCH();
-   } else if (brw->gen >= 6) {
-      uint8_t mocs = brw->gen == 7 ? GEN7_MOCS_L3 : 0;
+   } else if (devinfo->gen >= 6) {
+      uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
 
        BEGIN_BATCH(10);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
@@ -676,7 +730,7 @@ brw_upload_state_base_address(struct brw_context *brw)
        * BINDING_TABLE_STATE
        * SURFACE_STATE
        */
-       OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
+       OUT_RELOC(brw->batch.state.bo, 0, 1);
         /* Dynamic state base address:
         * SAMPLER_STATE
         * SAMPLER_BORDER_COLOR_STATE
@@ -687,12 +741,12 @@ brw_upload_state_base_address(struct brw_context *brw)
         * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
         * Disable is clear, which we rely on)
         */
-       OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
-                                  I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
+       OUT_RELOC(brw->batch.state.bo, 0, 1);
 
        OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
-       OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-                1); /* Instruction base address: shader kernels (incl. SIP) */
+
+       /* Instruction base address: shader kernels (incl. SIP) */
+       OUT_RELOC(brw->cache.bo, 0, 1);
 
        OUT_BATCH(1); /* General state upper bound */
        /* Dynamic state upper bound.  Although the documentation says that
@@ -704,15 +758,13 @@ brw_upload_state_base_address(struct brw_context *brw)
        OUT_BATCH(1); /* Indirect object upper bound */
        OUT_BATCH(1); /* Instruction access upper bound */
        ADVANCE_BATCH();
-   } else if (brw->gen == 5) {
+   } else if (devinfo->gen == 5) {
        BEGIN_BATCH(8);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
        OUT_BATCH(1); /* General state base address */
-       OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
-                1); /* Surface state base address */
+       OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
        OUT_BATCH(1); /* Indirect object base address */
-       OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
-                1); /* Instruction base address */
+       OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
        OUT_BATCH(0xfffff001); /* General state upper bound */
        OUT_BATCH(1); /* Indirect object upper bound */
        OUT_BATCH(1); /* Instruction access upper bound */
@@ -721,15 +773,14 @@ brw_upload_state_base_address(struct brw_context *brw)
        BEGIN_BATCH(6);
        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
        OUT_BATCH(1); /* General state base address */
-       OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
-                1); /* Surface state base address */
+       OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
        OUT_BATCH(1); /* Indirect object base address */
        OUT_BATCH(1); /* General state upper bound */
        OUT_BATCH(1); /* Indirect object upper bound */
        ADVANCE_BATCH();
    }
 
-   if (brw->gen >= 6) {
+   if (devinfo->gen >= 6) {
       brw_emit_pipe_control_flush(brw,
                                   PIPE_CONTROL_INSTRUCTION_INVALIDATE |
                                   PIPE_CONTROL_STATE_CACHE_INVALIDATE |