i965: store workaround_bo offset
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
index fe7e8c478faa3c9c9afa349b02f09b6b8dfe02fc..815dc2bc969d10b73fefc21bc3b0e76006593731 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <sys/errno.h>
 
+#include "main/arrayobj.h"
 #include "main/blend.h"
 #include "main/context.h"
 #include "main/condrender.h"
@@ -34,8 +35,9 @@
 #include "main/macros.h"
 #include "main/transformfeedback.h"
 #include "main/framebuffer.h"
+#include "main/varray.h"
 #include "tnl/tnl.h"
-#include "vbo/vbo_context.h"
+#include "vbo/vbo.h"
 #include "swrast/swrast.h"
 #include "swrast_setup/swrast_setup.h"
 #include "drivers/common/meta.h"
@@ -157,8 +159,12 @@ static void
 brw_emit_prim(struct brw_context *brw,
               const struct _mesa_prim *prim,
               uint32_t hw_prim,
+              bool is_indexed,
+              GLuint num_instances, GLuint base_instance,
               struct brw_transform_feedback_object *xfb_obj,
-              unsigned stream)
+              unsigned stream,
+              bool is_indirect,
+              GLsizeiptr indirect_offset)
 {
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    int verts_per_instance;
@@ -171,7 +177,7 @@ brw_emit_prim(struct brw_context *brw,
    int start_vertex_location = prim->start;
    int base_vertex_location = prim->basevertex;
 
-   if (prim->indexed) {
+   if (is_indexed) {
       vertex_access_type = devinfo->gen >= 7 ?
          GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
          GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
@@ -191,7 +197,7 @@ brw_emit_prim(struct brw_context *brw,
       verts_per_instance = prim->count;
 
    /* If nothing to emit, just return. */
-   if (verts_per_instance == 0 && !prim->is_indirect && !xfb_obj)
+   if (verts_per_instance == 0 && !is_indirect && !xfb_obj)
       return;
 
    /* If we're set to always flush, do it before and after the primitive emit.
@@ -212,7 +218,7 @@ brw_emit_prim(struct brw_context *brw,
       BEGIN_BATCH(9);
       OUT_BATCH(MI_LOAD_REGISTER_IMM | (9 - 2));
       OUT_BATCH(GEN7_3DPRIM_INSTANCE_COUNT);
-      OUT_BATCH(prim->num_instances);
+      OUT_BATCH(num_instances);
       OUT_BATCH(GEN7_3DPRIM_START_VERTEX);
       OUT_BATCH(0);
       OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
@@ -220,29 +226,29 @@ brw_emit_prim(struct brw_context *brw,
       OUT_BATCH(GEN7_3DPRIM_START_INSTANCE);
       OUT_BATCH(0);
       ADVANCE_BATCH();
-   } else if (prim->is_indirect) {
+   } else if (is_indirect) {
       struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
       struct brw_bo *bo = intel_bufferobj_buffer(brw,
             intel_buffer_object(indirect_buffer),
-            prim->indirect_offset, 5 * sizeof(GLuint), false);
+            indirect_offset, 5 * sizeof(GLuint), false);
 
       indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
 
       brw_load_register_mem(brw, GEN7_3DPRIM_VERTEX_COUNT, bo,
-                            prim->indirect_offset + 0);
+                            indirect_offset + 0);
       brw_load_register_mem(brw, GEN7_3DPRIM_INSTANCE_COUNT, bo,
-                            prim->indirect_offset + 4);
+                            indirect_offset + 4);
 
       brw_load_register_mem(brw, GEN7_3DPRIM_START_VERTEX, bo,
-                            prim->indirect_offset + 8);
-      if (prim->indexed) {
+                            indirect_offset + 8);
+      if (is_indexed) {
          brw_load_register_mem(brw, GEN7_3DPRIM_BASE_VERTEX, bo,
-                               prim->indirect_offset + 12);
+                               indirect_offset + 12);
          brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
-                               prim->indirect_offset + 16);
+                               indirect_offset + 16);
       } else {
          brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
-                               prim->indirect_offset + 12);
+                               indirect_offset + 12);
          brw_load_register_imm32(brw, GEN7_3DPRIM_BASE_VERTEX, 0);
       }
    } else {
@@ -265,8 +271,8 @@ brw_emit_prim(struct brw_context *brw,
    }
    OUT_BATCH(verts_per_instance);
    OUT_BATCH(start_vertex_location);
-   OUT_BATCH(prim->num_instances);
-   OUT_BATCH(prim->base_instance);
+   OUT_BATCH(num_instances);
+   OUT_BATCH(base_instance);
    OUT_BATCH(base_vertex_location);
    ADVANCE_BATCH();
 
@@ -276,55 +282,85 @@ brw_emit_prim(struct brw_context *brw,
 
 
 static void
-brw_merge_inputs(struct brw_context *brw,
-                 const struct gl_vertex_array *arrays[])
+brw_clear_buffers(struct brw_context *brw)
 {
-   const struct gen_device_info *devinfo = &brw->screen->devinfo;
-   const struct gl_context *ctx = &brw->ctx;
-   GLuint i;
-
-   for (i = 0; i < brw->vb.nr_buffers; i++) {
+   for (unsigned i = 0; i < brw->vb.nr_buffers; ++i) {
       brw_bo_unreference(brw->vb.buffers[i].bo);
       brw->vb.buffers[i].bo = NULL;
    }
    brw->vb.nr_buffers = 0;
 
-   for (i = 0; i < VERT_ATTRIB_MAX; i++) {
-      brw->vb.inputs[i].buffer = -1;
-      brw->vb.inputs[i].glarray = arrays[i];
+   for (unsigned i = 0; i < brw->vb.nr_enabled; ++i) {
+      brw->vb.enabled[i]->buffer = -1;
+   }
+#ifndef NDEBUG
+   for (unsigned i = 0; i < VERT_ATTRIB_MAX; i++) {
+      assert(brw->vb.inputs[i].buffer == -1);
    }
+#endif
+}
 
-   if (devinfo->gen < 8 && !devinfo->is_haswell) {
-      uint64_t mask = ctx->VertexProgram._Current->info.inputs_read;
-      /* Prior to Haswell, the hardware can't natively support GL_FIXED or
-       * 2_10_10_10_REV vertex formats.  Set appropriate workaround flags.
-       */
-      while (mask) {
-         uint8_t wa_flags = 0;
 
-         i = u_bit_scan64(&mask);
+static uint8_t get_wa_flags(const struct gl_vertex_format *glformat)
+{
+   uint8_t wa_flags = 0;
+
+   switch (glformat->Type) {
+   case GL_FIXED:
+      wa_flags = glformat->Size;
+      break;
+
+   case GL_INT_2_10_10_10_REV:
+      wa_flags |= BRW_ATTRIB_WA_SIGN;
+      /* fallthough */
 
-         switch (brw->vb.inputs[i].glarray->Type) {
+   case GL_UNSIGNED_INT_2_10_10_10_REV:
+      if (glformat->Format == GL_BGRA)
+         wa_flags |= BRW_ATTRIB_WA_BGRA;
 
-         case GL_FIXED:
-            wa_flags = brw->vb.inputs[i].glarray->Size;
-            break;
+      if (glformat->Normalized)
+         wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
+      else if (!glformat->Integer)
+         wa_flags |= BRW_ATTRIB_WA_SCALE;
 
-         case GL_INT_2_10_10_10_REV:
-            wa_flags |= BRW_ATTRIB_WA_SIGN;
-            /* fallthough */
+      break;
+   }
 
-         case GL_UNSIGNED_INT_2_10_10_10_REV:
-            if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
-               wa_flags |= BRW_ATTRIB_WA_BGRA;
+   return wa_flags;
+}
 
-            if (brw->vb.inputs[i].glarray->Normalized)
-               wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
-            else if (!brw->vb.inputs[i].glarray->Integer)
-               wa_flags |= BRW_ATTRIB_WA_SCALE;
 
-            break;
+static void
+brw_merge_inputs(struct brw_context *brw)
+{
+   const struct gen_device_info *devinfo = &brw->screen->devinfo;
+   const struct gl_context *ctx = &brw->ctx;
+
+   if (devinfo->gen < 8 && !devinfo->is_haswell) {
+      /* Prior to Haswell, the hardware can't natively support GL_FIXED or
+       * 2_10_10_10_REV vertex formats.  Set appropriate workaround flags.
+       */
+      const struct gl_vertex_array_object *vao = ctx->Array._DrawVAO;
+      const uint64_t vs_inputs = ctx->VertexProgram._Current->info.inputs_read;
+      assert((vs_inputs & ~((uint64_t)VERT_BIT_ALL)) == 0);
+
+      unsigned vaomask = vs_inputs & _mesa_draw_array_bits(ctx);
+      while (vaomask) {
+         const gl_vert_attrib i = u_bit_scan(&vaomask);
+         const uint8_t wa_flags =
+            get_wa_flags(_mesa_draw_array_format(vao, i));
+
+         if (brw->vb.attrib_wa_flags[i] != wa_flags) {
+            brw->vb.attrib_wa_flags[i] = wa_flags;
+            brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
          }
+      }
+
+      unsigned currmask = vs_inputs & _mesa_draw_current_bits(ctx);
+      while (currmask) {
+         const gl_vert_attrib i = u_bit_scan(&currmask);
+         const uint8_t wa_flags =
+            get_wa_flags(_mesa_draw_current_format(ctx, i));
 
          if (brw->vb.attrib_wa_flags[i] != wa_flags) {
             brw->vb.attrib_wa_flags[i] = wa_flags;
@@ -341,6 +377,7 @@ brw_merge_inputs(struct brw_context *brw,
  */
 static bool
 intel_disable_rb_aux_buffer(struct brw_context *brw,
+                            bool *draw_aux_buffer_disabled,
                             struct intel_mipmap_tree *tex_mt,
                             unsigned min_level, unsigned num_levels,
                             const char *usage)
@@ -360,7 +397,7 @@ intel_disable_rb_aux_buffer(struct brw_context *brw,
       if (irb && irb->mt->bo == tex_mt->bo &&
           irb->mt_level >= min_level &&
           irb->mt_level < min_level + num_levels) {
-         found = brw->draw_aux_buffer_disabled[i] = true;
+         found = draw_aux_buffer_disabled[i] = true;
       }
    }
 
@@ -372,6 +409,68 @@ intel_disable_rb_aux_buffer(struct brw_context *brw,
    return found;
 }
 
+/** Implement the ASTC 5x5 sampler workaround
+ *
+ * Gen9 sampling hardware has a bug where an ASTC 5x5 compressed surface
+ * cannot live in the sampler cache at the same time as an aux compressed
+ * surface.  In order to work around the bug we have to stall rendering with a
+ * CS and pixel scoreboard stall (implicit in the CS stall) and invalidate the
+ * texture cache whenever one of ASTC 5x5 or aux compressed may be in the
+ * sampler cache and we're about to render with something which samples from
+ * the other.
+ *
+ * In the case of a single shader which textures from both ASTC 5x5 and
+ * a texture which is CCS or HiZ compressed, we have to resolve the aux
+ * compressed texture prior to rendering.  This second part is handled in
+ * brw_predraw_resolve_inputs() below.
+ *
+ * We have observed this issue to affect CCS and HiZ sampling but whether or
+ * not it also affects MCS is unknown.  Because MCS has no concept of a
+ * resolve (and doing one would be stupid expensive), we choose to simply
+ * ignore the possibility and hope for the best.
+ */
+static void
+gen9_apply_astc5x5_wa_flush(struct brw_context *brw,
+                            enum gen9_astc5x5_wa_tex_type curr_mask)
+{
+   assert(brw->screen->devinfo.gen == 9);
+
+   if (((brw->gen9_astc5x5_wa_tex_mask & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5) &&
+        (curr_mask & GEN9_ASTC5X5_WA_TEX_TYPE_AUX)) ||
+       ((brw->gen9_astc5x5_wa_tex_mask & GEN9_ASTC5X5_WA_TEX_TYPE_AUX) &&
+        (curr_mask & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5))) {
+      brw_emit_pipe_control_flush(brw, PIPE_CONTROL_CS_STALL);
+      brw_emit_pipe_control_flush(brw, PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+   }
+
+   brw->gen9_astc5x5_wa_tex_mask = curr_mask;
+}
+
+static enum gen9_astc5x5_wa_tex_type
+gen9_astc5x5_wa_bits(mesa_format format, enum isl_aux_usage aux_usage)
+{
+   if (aux_usage != ISL_AUX_USAGE_NONE &&
+       aux_usage != ISL_AUX_USAGE_MCS)
+      return GEN9_ASTC5X5_WA_TEX_TYPE_AUX;
+
+   if (format == MESA_FORMAT_RGBA_ASTC_5x5 ||
+       format == MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5)
+      return GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5;
+
+   return 0;
+}
+
+/* Helper for the gen9 ASTC 5x5 workaround.  This version exists for BLORP's
+ * use-cases where only a single texture is bound.
+ */
+void
+gen9_apply_single_tex_astc5x5_wa(struct brw_context *brw,
+                                 mesa_format format,
+                                 enum isl_aux_usage aux_usage)
+{
+   gen9_apply_astc5x5_wa_flush(brw, gen9_astc5x5_wa_bits(format, aux_usage));
+}
+
 static void
 mark_textures_used_for_txf(BITSET_WORD *used_for_txf,
                            const struct gl_program *prog)
@@ -379,7 +478,7 @@ mark_textures_used_for_txf(BITSET_WORD *used_for_txf,
    if (!prog)
       return;
 
-   unsigned mask = prog->SamplersUsed & prog->info.textures_used_by_txf;
+   uint32_t mask = prog->info.textures_used_by_txf;
    while (mask) {
       int s = u_bit_scan(&mask);
       BITSET_SET(used_for_txf, prog->SamplerUnits[s]);
@@ -393,14 +492,12 @@ mark_textures_used_for_txf(BITSET_WORD *used_for_txf,
  * enabled depth texture, and flush the render cache for any dirty textures.
  */
 void
-brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
+brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering,
+                           bool *draw_aux_buffer_disabled)
 {
    struct gl_context *ctx = &brw->ctx;
    struct intel_texture_object *tex_obj;
 
-   memset(brw->draw_aux_buffer_disabled, 0,
-          sizeof(brw->draw_aux_buffer_disabled));
-
    BITSET_DECLARE(used_for_txf, MAX_COMBINED_TEXTURE_IMAGE_UNITS);
    memset(used_for_txf, 0, sizeof(used_for_txf));
    if (rendering) {
@@ -413,8 +510,30 @@ brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
       mark_textures_used_for_txf(used_for_txf, ctx->ComputeProgram._Current);
    }
 
-   /* Resolve depth buffer and render cache of each enabled texture. */
    int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
+
+   enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits = 0;
+   if (brw->screen->devinfo.gen == 9) {
+      /* In order to properly implement the ASTC 5x5 workaround for an
+       * arbitrary draw or dispatch call, we have to walk the entire list of
+       * textures looking for ASTC 5x5.  If there is any ASTC 5x5 in this draw
+       * call, all aux compressed textures must be resolved and have aux
+       * compression disabled while sampling.
+       */
+      for (int i = 0; i <= maxEnabledUnit; i++) {
+         if (!ctx->Texture.Unit[i]._Current)
+            continue;
+         tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
+         if (!tex_obj || !tex_obj->mt)
+            continue;
+
+         astc5x5_wa_bits |= gen9_astc5x5_wa_bits(tex_obj->_Format,
+                                                 tex_obj->mt->aux_usage);
+      }
+      gen9_apply_astc5x5_wa_flush(brw, astc5x5_wa_bits);
+   }
+
+   /* Resolve depth buffer and render cache of each enabled texture. */
    for (int i = 0; i <= maxEnabledUnit; i++) {
       if (!ctx->Texture.Unit[i]._Current)
         continue;
@@ -440,14 +559,16 @@ brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
          num_layers = INTEL_REMAINING_LAYERS;
       }
 
-      const bool disable_aux = rendering &&
-         intel_disable_rb_aux_buffer(brw, tex_obj->mt, min_level, num_levels,
+      if (rendering) {
+         intel_disable_rb_aux_buffer(brw, draw_aux_buffer_disabled,
+                                     tex_obj->mt, min_level, num_levels,
                                      "for sampling");
+      }
 
       intel_miptree_prepare_texture(brw, tex_obj->mt, view_format,
                                     min_level, num_levels,
                                     min_layer, num_layers,
-                                    disable_aux);
+                                    astc5x5_wa_bits);
 
       /* If any programs are using it with texelFetch, we may need to also do
        * a prepare with an sRGB format to ensure texelFetch works "properly".
@@ -459,7 +580,7 @@ brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
             intel_miptree_prepare_texture(brw, tex_obj->mt, txf_format,
                                           min_level, num_levels,
                                           min_layer, num_layers,
-                                          disable_aux);
+                                          astc5x5_wa_bits);
          }
       }
 
@@ -469,6 +590,11 @@ brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
           tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
          intel_update_r8stencil(brw, tex_obj->mt);
       }
+
+      if (intel_miptree_has_etc_shadow(brw, tex_obj->mt) &&
+          tex_obj->mt->shadow_needs_update) {
+         intel_miptree_update_etc_shadow_levels(brw, tex_obj->mt);
+      }
    }
 
    /* Resolve color for each active shader image. */
@@ -483,7 +609,8 @@ brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
 
             if (tex_obj && tex_obj->mt) {
                if (rendering) {
-                  intel_disable_rb_aux_buffer(brw, tex_obj->mt, 0, ~0,
+                  intel_disable_rb_aux_buffer(brw, draw_aux_buffer_disabled,
+                                              tex_obj->mt, 0, ~0,
                                               "as a shader image");
                }
 
@@ -497,7 +624,8 @@ brw_predraw_resolve_inputs(struct brw_context *brw, bool rendering)
 }
 
 static void
-brw_predraw_resolve_framebuffer(struct brw_context *brw)
+brw_predraw_resolve_framebuffer(struct brw_context *brw,
+                                bool *draw_aux_buffer_disabled)
 {
    struct gl_context *ctx = &brw->ctx;
    struct intel_renderbuffer *depth_irb;
@@ -512,7 +640,7 @@ brw_predraw_resolve_framebuffer(struct brw_context *brw)
    }
 
    /* Resolve color buffers for non-coherent framebuffer fetch. */
-   if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
+   if (!ctx->Extensions.EXT_shader_framebuffer_fetch &&
        ctx->FragmentProgram._Current &&
        ctx->FragmentProgram._Current->info.outputs_read) {
       const struct gl_framebuffer *fb = ctx->DrawBuffer;
@@ -530,7 +658,7 @@ brw_predraw_resolve_framebuffer(struct brw_context *brw)
             intel_miptree_prepare_texture(brw, irb->mt, irb->mt->surf.format,
                                           irb->mt_level, 1,
                                           irb->mt_layer, irb->layer_count,
-                                          false);
+                                          brw->gen9_astc5x5_wa_tex_mask);
          }
       }
    }
@@ -549,11 +677,16 @@ brw_predraw_resolve_framebuffer(struct brw_context *brw)
       bool blend_enabled = ctx->Color.BlendEnabled & (1 << i);
       enum isl_aux_usage aux_usage =
          intel_miptree_render_aux_usage(brw, irb->mt, isl_format,
-                                        blend_enabled);
+                                        blend_enabled,
+                                        draw_aux_buffer_disabled[i]);
+      if (brw->draw_aux_usage[i] != aux_usage) {
+         brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
+         brw->draw_aux_usage[i] = aux_usage;
+      }
 
       intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
                                    irb->mt_layer, irb->layer_count,
-                                   isl_format, blend_enabled);
+                                   aux_usage);
 
       brw_cache_flush_for_render(brw, irb->mt->bo,
                                  isl_format, aux_usage);
@@ -566,6 +699,9 @@ brw_predraw_resolve_framebuffer(struct brw_context *brw)
  * If the depth buffer was written to and if it has an accompanying HiZ
  * buffer, then mark that it needs a depth resolve.
  *
+ * If the stencil buffer was written to then mark that it may need to be
+ * copied to an R8 texture.
+ *
  * If the color buffer is a multisample window system buffer, then
  * mark that it needs a downsample.
  *
@@ -609,8 +745,15 @@ brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
          brw_depth_cache_add_bo(brw, depth_irb->mt->bo);
    }
 
-   if (stencil_irb && brw->stencil_write_enabled)
-      brw_depth_cache_add_bo(brw, stencil_irb->mt->bo);
+   if (stencil_irb && brw->stencil_write_enabled) {
+      struct intel_mipmap_tree *stencil_mt =
+         stencil_irb->mt->stencil_mt != NULL ?
+         stencil_irb->mt->stencil_mt : stencil_irb->mt;
+      brw_depth_cache_add_bo(brw, stencil_mt->bo);
+      intel_miptree_finish_write(brw, stencil_mt, stencil_irb->mt_level,
+                                 stencil_irb->mt_layer,
+                                 stencil_irb->layer_count, ISL_AUX_USAGE_NONE);
+   }
 
    for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
       struct intel_renderbuffer *irb =
@@ -622,16 +765,13 @@ brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
       mesa_format mesa_format =
          _mesa_get_render_format(ctx, intel_rb_format(irb));
       enum isl_format isl_format = brw_isl_format_for_mesa_format(mesa_format);
-      bool blend_enabled = ctx->Color.BlendEnabled & (1 << i);
-      enum isl_aux_usage aux_usage =
-         intel_miptree_render_aux_usage(brw, irb->mt, isl_format,
-                                        blend_enabled);
+      enum isl_aux_usage aux_usage = brw->draw_aux_usage[i];
 
       brw_render_cache_add_bo(brw, irb->mt->bo, isl_format, aux_usage);
 
       intel_miptree_finish_render(brw, irb->mt, irb->mt_level,
                                   irb->mt_layer, irb->layer_count,
-                                  isl_format, blend_enabled);
+                                  aux_usage);
    }
 }
 
@@ -687,7 +827,6 @@ brw_postdraw_reconcile_align_wa_slices(struct brw_context *brw)
 
 static void
 brw_prepare_drawing(struct gl_context *ctx,
-                    const struct gl_vertex_array *arrays[],
                     const struct _mesa_index_buffer *ib,
                     bool index_bounds_valid,
                     GLuint min_index,
@@ -712,15 +851,15 @@ brw_prepare_drawing(struct gl_context *ctx,
     * index.
     */
    brw->wm.base.sampler_count =
-      util_last_bit(ctx->FragmentProgram._Current->SamplersUsed);
+      util_last_bit(ctx->FragmentProgram._Current->info.textures_used);
    brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
-      util_last_bit(ctx->GeometryProgram._Current->SamplersUsed) : 0;
+      util_last_bit(ctx->GeometryProgram._Current->info.textures_used) : 0;
    brw->tes.base.sampler_count = ctx->TessEvalProgram._Current ?
-      util_last_bit(ctx->TessEvalProgram._Current->SamplersUsed) : 0;
+      util_last_bit(ctx->TessEvalProgram._Current->info.textures_used) : 0;
    brw->tcs.base.sampler_count = ctx->TessCtrlProgram._Current ?
-      util_last_bit(ctx->TessCtrlProgram._Current->SamplersUsed) : 0;
+      util_last_bit(ctx->TessCtrlProgram._Current->info.textures_used) : 0;
    brw->vs.base.sampler_count =
-      util_last_bit(ctx->VertexProgram._Current->SamplersUsed);
+      util_last_bit(ctx->VertexProgram._Current->info.textures_used);
 
    intel_prepare_render(brw);
 
@@ -734,12 +873,14 @@ brw_prepare_drawing(struct gl_context *ctx,
     * and finalizing textures but before setting up any hardware state for
     * this draw call.
     */
-   brw_predraw_resolve_inputs(brw, true);
-   brw_predraw_resolve_framebuffer(brw);
+   bool draw_aux_buffer_disabled[MAX_DRAW_BUFFERS] = { };
+   brw_predraw_resolve_inputs(brw, true, draw_aux_buffer_disabled);
+   brw_predraw_resolve_framebuffer(brw, draw_aux_buffer_disabled);
 
    /* Bind all inputs, derive varying and size information:
     */
-   brw_merge_inputs(brw, arrays);
+   brw_clear_buffers(brw);
+   brw_merge_inputs(brw);
 
    brw->ib.ib = ib;
    brw->ctx.NewDriverState |= BRW_NEW_INDICES;
@@ -766,6 +907,76 @@ brw_finish_drawing(struct gl_context *ctx)
       brw_bo_unreference(brw->draw.draw_params_count_bo);
       brw->draw.draw_params_count_bo = NULL;
    }
+
+   if (brw->draw.draw_params_bo) {
+      brw_bo_unreference(brw->draw.draw_params_bo);
+      brw->draw.draw_params_bo = NULL;
+   }
+
+   if (brw->draw.derived_draw_params_bo) {
+      brw_bo_unreference(brw->draw.derived_draw_params_bo);
+      brw->draw.derived_draw_params_bo = NULL;
+   }
+}
+
+/**
+ * Implement workarounds for preemption:
+ *    - WaDisableMidObjectPreemptionForGSLineStripAdj
+ *    - WaDisableMidObjectPreemptionForTrifanOrPolygon
+ *    - WaDisableMidObjectPreemptionForLineLoop
+ *    - WA#0798
+ */
+static void
+gen9_emit_preempt_wa(struct brw_context *brw,
+                     const struct _mesa_prim *prim, GLuint num_instances)
+{
+   bool object_preemption = true;
+   ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
+
+   /* Only apply these workarounds for gen9 */
+   assert(devinfo->gen == 9);
+
+   /* WaDisableMidObjectPreemptionForGSLineStripAdj
+    *
+    *    WA: Disable mid-draw preemption when draw-call is a linestrip_adj and
+    *    GS is enabled.
+    */
+   if (brw->primitive == _3DPRIM_LINESTRIP_ADJ && brw->gs.enabled)
+      object_preemption = false;
+
+   /* WaDisableMidObjectPreemptionForTrifanOrPolygon
+    *
+    *    TriFan miscompare in Execlist Preemption test. Cut index that is on a
+    *    previous context. End the previous, the resume another context with a
+    *    tri-fan or polygon, and the vertex count is corrupted. If we prempt
+    *    again we will cause corruption.
+    *
+    *    WA: Disable mid-draw preemption when draw-call has a tri-fan.
+    */
+   if (brw->primitive == _3DPRIM_TRIFAN)
+      object_preemption = false;
+
+   /* WaDisableMidObjectPreemptionForLineLoop
+    *
+    *    VF Stats Counters Missing a vertex when preemption enabled.
+    *
+    *    WA: Disable mid-draw preemption when the draw uses a lineloop
+    *    topology.
+    */
+   if (brw->primitive == _3DPRIM_LINELOOP)
+      object_preemption = false;
+
+   /* WA#0798
+    *
+    *    VF is corrupting GAFS data when preempted on an instance boundary and
+    *    replayed with instancing enabled.
+    *
+    *    WA: Disable preemption when using instanceing.
+    */
+   if (num_instances > 1)
+      object_preemption = false;
+
+   brw_enable_obj_preemption(brw, object_preemption);
 }
 
 /* May fail if out of video memory for texture or vbo upload, or on
@@ -773,16 +984,18 @@ brw_finish_drawing(struct gl_context *ctx)
  */
 static void
 brw_draw_single_prim(struct gl_context *ctx,
-                     const struct gl_vertex_array *arrays[],
                      const struct _mesa_prim *prim,
                      unsigned prim_id,
+                     bool is_indexed,
+                     GLuint num_instances, GLuint base_instance,
                      struct brw_transform_feedback_object *xfb_obj,
                      unsigned stream,
-                     struct gl_buffer_object *indirect)
+                     GLsizeiptr indirect_offset)
 {
    struct brw_context *brw = brw_context(ctx);
    const struct gen_device_info *devinfo = &brw->screen->devinfo;
-   bool fail_next = false;
+   bool fail_next;
+   bool is_indirect = brw->draw.draw_indirect_data != NULL;
 
    /* Flag BRW_NEW_DRAW_CALL on every draw.  This allows us to have
     * atoms that happen on every draw call.
@@ -792,19 +1005,20 @@ brw_draw_single_prim(struct gl_context *ctx,
    /* Flush the batch if the batch/state buffers are nearly full.  We can
     * grow them if needed, but this is not free, so we'd like to avoid it.
     */
-   intel_batchbuffer_require_space(brw, 1500, RENDER_RING);
+   intel_batchbuffer_require_space(brw, 1500);
    brw_require_statebuffer_space(brw, 2400);
    intel_batchbuffer_save_state(brw);
+   fail_next = intel_batchbuffer_saved_state_is_empty(brw);
 
-   if (brw->num_instances != prim->num_instances ||
+   if (brw->num_instances != num_instances ||
        brw->basevertex != prim->basevertex ||
-       brw->baseinstance != prim->base_instance) {
-      brw->num_instances = prim->num_instances;
+       brw->baseinstance != base_instance) {
+      brw->num_instances = num_instances;
       brw->basevertex = prim->basevertex;
-      brw->baseinstance = prim->base_instance;
+      brw->baseinstance = base_instance;
       if (prim_id > 0) { /* For i == 0 we just did this before the loop */
          brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
-         brw_merge_inputs(brw, arrays);
+         brw_clear_buffers(brw);
       }
    }
 
@@ -813,35 +1027,35 @@ brw_draw_single_prim(struct gl_context *ctx,
     * always flag if the shader uses one of the values. For direct draws,
     * we only flag if the values change.
     */
-   const int new_basevertex =
-      prim->indexed ? prim->basevertex : prim->start;
-   const int new_baseinstance = prim->base_instance;
+   const int new_firstvertex =
+      is_indexed ? prim->basevertex : prim->start;
+   const int new_baseinstance = base_instance;
    const struct brw_vs_prog_data *vs_prog_data =
       brw_vs_prog_data(brw->vs.base.prog_data);
    if (prim_id > 0) {
       const bool uses_draw_parameters =
-         vs_prog_data->uses_basevertex ||
+         vs_prog_data->uses_firstvertex ||
          vs_prog_data->uses_baseinstance;
 
-      if ((uses_draw_parameters && prim->is_indirect) ||
-          (vs_prog_data->uses_basevertex &&
-           brw->draw.params.gl_basevertex != new_basevertex) ||
+      if ((uses_draw_parameters && is_indirect) ||
+          (vs_prog_data->uses_firstvertex &&
+           brw->draw.params.firstvertex != new_firstvertex) ||
           (vs_prog_data->uses_baseinstance &&
            brw->draw.params.gl_baseinstance != new_baseinstance))
          brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
    }
 
-   brw->draw.params.gl_basevertex = new_basevertex;
+   brw->draw.params.firstvertex = new_firstvertex;
    brw->draw.params.gl_baseinstance = new_baseinstance;
    brw_bo_unreference(brw->draw.draw_params_bo);
 
-   if (prim->is_indirect) {
+   if (is_indirect) {
       /* Point draw_params_bo at the indirect buffer. */
       brw->draw.draw_params_bo =
          intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
       brw_bo_reference(brw->draw.draw_params_bo);
       brw->draw.draw_params_offset =
-         prim->indirect_offset + (prim->indexed ? 12 : 8);
+         indirect_offset + (is_indexed ? 12 : 8);
    } else {
       /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
        * has to upload gl_BaseVertex and such if they're needed.
@@ -851,17 +1065,21 @@ brw_draw_single_prim(struct gl_context *ctx,
    }
 
    /* gl_DrawID always needs its own vertex buffer since it's not part of
-    * the indirect parameter buffer. If the program uses gl_DrawID we need
-    * to flag BRW_NEW_VERTICES. For the first iteration, we don't have
-    * valid vs_prog_data, but we always flag BRW_NEW_VERTICES before
-    * the loop.
+    * the indirect parameter buffer. Same for is_indexed_draw, which shares
+    * the buffer with gl_DrawID. If the program uses gl_DrawID, we need to
+    * flag BRW_NEW_VERTICES. For the first iteration, we don't have valid
+    * vs_prog_data, but we always flag BRW_NEW_VERTICES before the loop.
     */
-   brw->draw.gl_drawid = prim->draw_id;
-   brw_bo_unreference(brw->draw.draw_id_bo);
-   brw->draw.draw_id_bo = NULL;
    if (prim_id > 0 && vs_prog_data->uses_drawid)
       brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
 
+   brw->draw.derived_params.gl_drawid = prim->draw_id;
+   brw->draw.derived_params.is_indexed_draw = is_indexed ? ~0 : 0;
+
+   brw_bo_unreference(brw->draw.derived_draw_params_bo);
+   brw->draw.derived_draw_params_bo = NULL;
+   brw->draw.derived_draw_params_offset = 0;
+
    if (devinfo->gen < 6)
       brw_set_prim(brw, prim);
    else
@@ -879,7 +1097,12 @@ retry:
       brw_upload_render_state(brw);
    }
 
-   brw_emit_prim(brw, prim, brw->primitive, xfb_obj, stream);
+   if (devinfo->gen == 9)
+      gen9_emit_preempt_wa(brw, prim, num_instances);
+
+   brw_emit_prim(brw, prim, brw->primitive, is_indexed, num_instances,
+                 base_instance, xfb_obj, stream, is_indirect,
+                 indirect_offset);
 
    brw->batch.no_wrap = false;
 
@@ -906,6 +1129,8 @@ retry:
    return;
 }
 
+
+
 void
 brw_draw_prims(struct gl_context *ctx,
                const struct _mesa_prim *prims,
@@ -914,13 +1139,13 @@ brw_draw_prims(struct gl_context *ctx,
                GLboolean index_bounds_valid,
                GLuint min_index,
                GLuint max_index,
+               GLuint num_instances,
+               GLuint base_instance,
                struct gl_transform_feedback_object *gl_xfb_obj,
-               unsigned stream,
-               struct gl_buffer_object *indirect)
+               unsigned stream)
 {
    unsigned i;
    struct brw_context *brw = brw_context(ctx);
-   const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
    int predicate_state = brw->predicate.state;
    struct brw_transform_feedback_object *xfb_obj =
       (struct brw_transform_feedback_object *) gl_xfb_obj;
@@ -929,7 +1154,8 @@ brw_draw_prims(struct gl_context *ctx,
       return;
 
    /* Handle primitive restart if needed */
-   if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib, indirect)) {
+   if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib, num_instances,
+                                    base_instance)) {
       /* The draw was handled, so we can exit now */
       return;
    }
@@ -942,8 +1168,8 @@ brw_draw_prims(struct gl_context *ctx,
                  _mesa_enum_to_string(ctx->RenderMode));
       _swsetup_Wakeup(ctx);
       _tnl_wakeup(ctx);
-      _tnl_draw_prims(ctx, prims, nr_prims, ib,
-                      index_bounds_valid, min_index, max_index, NULL, 0, NULL);
+      _tnl_draw(ctx, prims, nr_prims, ib, index_bounds_valid, min_index,
+                max_index, num_instances, base_instance, NULL, 0);
       return;
    }
 
@@ -951,15 +1177,14 @@ brw_draw_prims(struct gl_context *ctx,
     * get the minimum and maximum of their index buffer so we know what range
     * to upload.
     */
-   if (!index_bounds_valid && !vbo_all_varyings_in_vbos(arrays)) {
+   if (!index_bounds_valid && _mesa_draw_user_array_bits(ctx) != 0) {
       perf_debug("Scanning index buffer to compute index buffer bounds.  "
                  "Use glDrawRangeElements() to avoid this.\n");
       vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
       index_bounds_valid = true;
    }
 
-   brw_prepare_drawing(ctx, arrays, ib, index_bounds_valid, min_index,
-                       max_index);
+   brw_prepare_drawing(ctx, ib, index_bounds_valid, min_index, max_index);
    /* Try drawing with the hardware, but don't do anything else if we can't
     * manage it.  swrast doesn't support our featureset, so we can't fall back
     * to it.
@@ -996,8 +1221,10 @@ brw_draw_prims(struct gl_context *ctx,
          brw->predicate.state = BRW_PREDICATE_STATE_USE_BIT;
       }
 
-      brw_draw_single_prim(ctx, arrays, &prims[i], i, xfb_obj, stream,
-                           indirect);
+      brw_draw_single_prim(ctx, &prims[i], i, ib != NULL, num_instances,
+                           base_instance, xfb_obj, stream,
+                           brw->draw.draw_indirect_offset +
+                           brw->draw.draw_indirect_stride * i);
    }
 
    brw_finish_drawing(ctx);
@@ -1028,13 +1255,13 @@ brw_draw_indirect_prims(struct gl_context *ctx,
       return;
    }
 
+   brw->draw.draw_indirect_stride = stride;
+   brw->draw.draw_indirect_offset = indirect_offset;
+
    prim[0].begin = 1;
    prim[draw_count - 1].end = 1;
-   for (i = 0; i < draw_count; ++i, indirect_offset += stride) {
+   for (i = 0; i < draw_count; ++i) {
       prim[i].mode = mode;
-      prim[i].indexed = ib != NULL;
-      prim[i].indirect_offset = indirect_offset;
-      prim[i].is_indirect = 1;
       prim[i].draw_id = i;
    }
 
@@ -1045,24 +1272,26 @@ brw_draw_indirect_prims(struct gl_context *ctx,
       brw->draw.draw_params_count_offset = indirect_params_offset;
    }
 
-   brw_draw_prims(ctx, prim, draw_count,
-                  ib, false, 0, ~0,
-                  NULL, 0,
-                  indirect_data);
+   brw->draw.draw_indirect_data = indirect_data;
+
+   brw_draw_prims(ctx, prim, draw_count, ib, false, 0, ~0, 0, 0, NULL, 0);
 
+   brw->draw.draw_indirect_data = NULL;
    free(prim);
 }
 
 void
-brw_draw_init(struct brw_context *brw)
+brw_init_draw_functions(struct dd_function_table *functions)
 {
-   struct gl_context *ctx = &brw->ctx;
-
    /* Register our drawing function:
     */
-   vbo_set_draw_func(ctx, brw_draw_prims);
-   vbo_set_indirect_draw_func(ctx, brw_draw_indirect_prims);
+   functions->Draw = brw_draw_prims;
+   functions->DrawIndirect = brw_draw_indirect_prims;
+}
 
+void
+brw_draw_init(struct brw_context *brw)
+{
    for (int i = 0; i < VERT_ATTRIB_MAX; i++)
       brw->vb.inputs[i].buffer = -1;
    brw->vb.nr_buffers = 0;