vk: Add four unit tests for our lock-free data-structures
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
index 34afc26ccbdb6d75e8cda951f468556ae9c8dab1..b91597a9f5dbbdec0c9d050155aff7cd0c7cd768 100644 (file)
 #include "brw_defines.h"
 #include "brw_context.h"
 #include "brw_state.h"
+#include "brw_vs.h"
 
 #include "intel_batchbuffer.h"
 #include "intel_buffers.h"
 #include "intel_fbo.h"
 #include "intel_mipmap_tree.h"
-#include "intel_regions.h"
 #include "intel_buffer_objects.h"
 
 #define FILE_DEBUG_FLAG DEBUG_PRIMS
 
-const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
+static const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
    _3DPRIM_POINTLIST,
    _3DPRIM_LINELIST,
    _3DPRIM_LINELOOP,
@@ -87,6 +87,17 @@ static const GLenum reduced_prim[GL_POLYGON+1] = {
    GL_TRIANGLES
 };
 
+uint32_t
+get_hw_prim_for_gl_prim(int mode)
+{
+   if (mode >= BRW_PRIM_OFFSET)
+      return mode - BRW_PRIM_OFFSET;
+   else {
+      assert(mode < ARRAY_SIZE(prim_to_hw_prim));
+      return prim_to_hw_prim[mode];
+   }
+}
+
 
 /* When the primitive changes, set a state bit and re-validate.  Not
  * the nicest and would rather deal with this by having all the
@@ -97,7 +108,7 @@ static void brw_set_prim(struct brw_context *brw,
                          const struct _mesa_prim *prim)
 {
    struct gl_context *ctx = &brw->ctx;
-   uint32_t hw_prim = prim_to_hw_prim[prim->mode];
+   uint32_t hw_prim = get_hw_prim_for_gl_prim(prim->mode);
 
    DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
 
@@ -118,11 +129,11 @@ static void brw_set_prim(struct brw_context *brw,
 
    if (hw_prim != brw->primitive) {
       brw->primitive = hw_prim;
-      brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+      brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
 
       if (reduced_prim[prim->mode] != brw->reduced_primitive) {
         brw->reduced_primitive = reduced_prim[prim->mode];
-        brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
+        brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE;
       }
    }
 }
@@ -134,11 +145,11 @@ static void gen6_set_prim(struct brw_context *brw,
 
    DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
 
-   hw_prim = prim_to_hw_prim[prim->mode];
+   hw_prim = get_hw_prim_for_gl_prim(prim->mode);
 
    if (hw_prim != brw->primitive) {
       brw->primitive = hw_prim;
-      brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+      brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
    }
 }
 
@@ -168,15 +179,15 @@ static void brw_emit_prim(struct brw_context *brw,
 {
    int verts_per_instance;
    int vertex_access_type;
-   int start_vertex_location;
-   int base_vertex_location;
    int indirect_flag;
+   int predicate_enable;
 
    DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
        prim->start, prim->count);
 
-   start_vertex_location = prim->start;
-   base_vertex_location = prim->basevertex;
+   int start_vertex_location = prim->start;
+   int base_vertex_location = prim->basevertex;
+
    if (prim->indexed) {
       vertex_access_type = brw->gen >= 7 ?
          GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
@@ -250,10 +261,14 @@ static void brw_emit_prim(struct brw_context *brw,
       indirect_flag = 0;
    }
 
-
    if (brw->gen >= 7) {
+      if (brw->predicate.state == BRW_PREDICATE_STATE_USE_BIT)
+         predicate_enable = GEN7_3DPRIM_PREDICATE_ENABLE;
+      else
+         predicate_enable = 0;
+
       BEGIN_BATCH(7);
-      OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag);
+      OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag | predicate_enable);
       OUT_BATCH(hw_prim | vertex_access_type);
    } else {
       BEGIN_BATCH(6);
@@ -268,9 +283,6 @@ static void brw_emit_prim(struct brw_context *brw,
    OUT_BATCH(base_vertex_location);
    ADVANCE_BATCH();
 
-   /* Only used on Sandybridge; harmless to set elsewhere. */
-   brw->batch.need_workaround_flush = true;
-
    if (brw->always_flush_cache) {
       intel_batchbuffer_emit_mi_flush(brw);
    }
@@ -280,6 +292,7 @@ static void brw_emit_prim(struct brw_context *brw,
 static void brw_merge_inputs( struct brw_context *brw,
                       const struct gl_client_array *arrays[])
 {
+   const struct gl_context *ctx = &brw->ctx;
    GLuint i;
 
    for (i = 0; i < brw->vb.nr_buffers; i++) {
@@ -292,39 +305,45 @@ static void brw_merge_inputs( struct brw_context *brw,
       brw->vb.inputs[i].buffer = -1;
       brw->vb.inputs[i].glarray = arrays[i];
    }
-}
 
-/*
- * \brief Resolve buffers before drawing.
- *
- * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
- * enabled depth texture, and flush the render cache for any dirty textures.
- *
- * (In the future, this will also perform MSAA resolves).
- */
-static void
-brw_predraw_resolve_buffers(struct brw_context *brw)
-{
-   struct gl_context *ctx = &brw->ctx;
-   struct intel_renderbuffer *depth_irb;
-   struct intel_texture_object *tex_obj;
-
-   /* Resolve the depth buffer's HiZ buffer. */
-   depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
-   if (depth_irb)
-      intel_renderbuffer_resolve_hiz(brw, depth_irb);
-
-   /* Resolve depth buffer and render cache of each enabled texture. */
-   int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
-   for (int i = 0; i <= maxEnabledUnit; i++) {
-      if (!ctx->Texture.Unit[i]._ReallyEnabled)
-        continue;
-      tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
-      if (!tex_obj || !tex_obj->mt)
-        continue;
-      intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
-      intel_miptree_resolve_color(brw, tex_obj->mt);
-      brw_render_cache_set_check_flush(brw, tex_obj->mt->region->bo);
+   if (brw->gen < 8 && !brw->is_haswell) {
+      struct gl_program *vp = &ctx->VertexProgram._Current->Base;
+      /* Prior to Haswell, the hardware can't natively support GL_FIXED or
+       * 2_10_10_10_REV vertex formats.  Set appropriate workaround flags.
+       */
+      for (i = 0; i < VERT_ATTRIB_MAX; i++) {
+         if (!(vp->InputsRead & BITFIELD64_BIT(i)))
+            continue;
+
+         uint8_t wa_flags = 0;
+
+         switch (brw->vb.inputs[i].glarray->Type) {
+
+         case GL_FIXED:
+            wa_flags = brw->vb.inputs[i].glarray->Size;
+            break;
+
+         case GL_INT_2_10_10_10_REV:
+            wa_flags |= BRW_ATTRIB_WA_SIGN;
+            /* fallthough */
+
+         case GL_UNSIGNED_INT_2_10_10_10_REV:
+            if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
+               wa_flags |= BRW_ATTRIB_WA_BGRA;
+
+            if (brw->vb.inputs[i].glarray->Normalized)
+               wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
+            else if (!brw->vb.inputs[i].glarray->Integer)
+               wa_flags |= BRW_ATTRIB_WA_SCALE;
+
+            break;
+         }
+
+         if (brw->vb.attrib_wa_flags[i] != wa_flags) {
+            brw->vb.attrib_wa_flags[i] = wa_flags;
+            brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
+         }
+      }
    }
 }
 
@@ -360,12 +379,12 @@ static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
       back_irb->need_downsample = true;
    if (depth_irb && ctx->Depth.Mask) {
       intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
-      brw_render_cache_set_add_bo(brw, depth_irb->mt->region->bo);
+      brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
    }
 
    if (ctx->Extensions.ARB_stencil_texturing &&
        stencil_irb && ctx->Stencil._WriteEnabled) {
-      brw_render_cache_set_add_bo(brw, stencil_irb->mt->region->bo);
+      brw_render_cache_set_add_bo(brw, stencil_irb->mt->bo);
    }
 
    for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
@@ -373,14 +392,14 @@ static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
          intel_renderbuffer(fb->_ColorDrawBuffers[i]);
 
       if (irb)
-         brw_render_cache_set_add_bo(brw, irb->mt->region->bo);
+         brw_render_cache_set_add_bo(brw, irb->mt->bo);
    }
 }
 
 /* May fail if out of video memory for texture or vbo upload, or on
  * fallback conditions.
  */
-static bool brw_try_draw_prims( struct gl_context *ctx,
+static void brw_try_draw_prims( struct gl_context *ctx,
                                     const struct gl_client_array *arrays[],
                                     const struct _mesa_prim *prims,
                                     GLuint nr_prims,
@@ -390,7 +409,6 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
                                     struct gl_buffer_object *indirect)
 {
    struct brw_context *brw = brw_context(ctx);
-   bool retval = true;
    GLuint i;
    bool fail_next = false;
 
@@ -419,35 +437,30 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
 
    intel_prepare_render(brw);
 
-   /* This workaround has to happen outside of brw_upload_state() because it
-    * may flush the batchbuffer for a blit, affecting the state flags.
+   /* This workaround has to happen outside of brw_upload_render_state()
+    * because it may flush the batchbuffer for a blit, affecting the state
+    * flags.
     */
    brw_workaround_depthstencil_alignment(brw, 0);
 
-   /* Resolves must occur after updating renderbuffers, updating context state,
-    * and finalizing textures but before setting up any hardware state for
-    * this draw call.
-    */
-   brw_predraw_resolve_buffers(brw);
-
    /* Bind all inputs, derive varying and size information:
     */
    brw_merge_inputs( brw, arrays );
 
    brw->ib.ib = ib;
-   brw->state.dirty.brw |= BRW_NEW_INDICES;
+   brw->ctx.NewDriverState |= BRW_NEW_INDICES;
 
    brw->vb.min_index = min_index;
    brw->vb.max_index = max_index;
-   brw->state.dirty.brw |= BRW_NEW_VERTICES;
+   brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
 
    for (i = 0; i < nr_prims; i++) {
       int estimated_max_prim_size;
+      const int sampler_state_size = 16;
 
       estimated_max_prim_size = 512; /* batchbuffer commands */
-      estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
-                                 (sizeof(struct brw_sampler_state) +
-                                  sizeof(struct gen5_sampler_default_color)));
+      estimated_max_prim_size += BRW_MAX_TEX_UNIT *
+         (sampler_state_size + sizeof(struct gen5_sampler_default_color));
       estimated_max_prim_size += 1024; /* gen6 VS push constants */
       estimated_max_prim_size += 1024; /* gen6 WM push constants */
       estimated_max_prim_size += 512; /* misc. pad */
@@ -459,30 +472,51 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
       intel_batchbuffer_require_space(brw, estimated_max_prim_size, RENDER_RING);
       intel_batchbuffer_save_state(brw);
 
-      if (brw->num_instances != prims[i].num_instances) {
+      if (brw->num_instances != prims[i].num_instances ||
+          brw->basevertex != prims[i].basevertex) {
          brw->num_instances = prims[i].num_instances;
-         brw->state.dirty.brw |= BRW_NEW_VERTICES;
-         brw_merge_inputs(brw, arrays);
-      }
-      if (brw->basevertex != prims[i].basevertex) {
          brw->basevertex = prims[i].basevertex;
-         brw->state.dirty.brw |= BRW_NEW_VERTICES;
-         brw_merge_inputs(brw, arrays);
+         if (i > 0) { /* For i == 0 we just did this before the loop */
+            brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
+            brw_merge_inputs(brw, arrays);
+         }
       }
+
+      brw->draw.gl_basevertex =
+         prims[i].indexed ? prims[i].basevertex : prims[i].start;
+
+      drm_intel_bo_unreference(brw->draw.draw_params_bo);
+
+      if (prims[i].is_indirect) {
+         /* Point draw_params_bo at the indirect buffer. */
+         brw->draw.draw_params_bo =
+            intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
+         drm_intel_bo_reference(brw->draw.draw_params_bo);
+         brw->draw.draw_params_offset =
+            prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
+      } else {
+         /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
+          * has to upload gl_BaseVertex and such if they're needed.
+          */
+         brw->draw.draw_params_bo = NULL;
+         brw->draw.draw_params_offset = 0;
+      }
+
       if (brw->gen < 6)
         brw_set_prim(brw, &prims[i]);
       else
         gen6_set_prim(brw, &prims[i]);
 
 retry:
-      /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
+
+      /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
        * that the state updated in the loop outside of this block is that in
        * *_set_prim or intel_batchbuffer_flush(), which only impacts
-       * brw->state.dirty.brw.
+       * brw->ctx.NewDriverState.
        */
-      if (brw->state.dirty.brw) {
+      if (brw->ctx.NewDriverState) {
         brw->no_batch_wrap = true;
-        brw_upload_state(brw);
+        brw_upload_render_state(brw);
       }
 
       brw_emit_prim(brw, &prims[i], brw->primitive);
@@ -496,25 +530,18 @@ retry:
            fail_next = true;
            goto retry;
         } else {
-           if (intel_batchbuffer_flush(brw) == -ENOSPC) {
-              static bool warned = false;
-
-              if (!warned) {
-                 fprintf(stderr, "i965: Single primitive emit exceeded"
-                         "available aperture space\n");
-                 warned = true;
-              }
-
-              retval = false;
-           }
+            int ret = intel_batchbuffer_flush(brw);
+            WARN_ONCE(ret == -ENOSPC,
+                      "i965: Single primitive emit exceeded "
+                      "available aperture space\n");
         }
       }
 
       /* Now that we know we haven't run out of aperture space, we can safely
        * reset the dirty bits.
        */
-      if (brw->state.dirty.brw)
-         brw_clear_dirty_bits(brw);
+      if (brw->ctx.NewDriverState)
+         brw_render_state_finished(brw);
    }
 
    if (brw->always_flush_batch)
@@ -523,7 +550,7 @@ retry:
    brw_state_cache_check_size(brw);
    brw_postdraw_set_buffers_need_resolve(brw);
 
-   return retval;
+   return;
 }
 
 void brw_draw_prims( struct gl_context *ctx,
@@ -541,7 +568,7 @@ void brw_draw_prims( struct gl_context *ctx,
 
    assert(unused_tfb_object == NULL);
 
-   if (!_mesa_check_conditional_render(ctx))
+   if (!brw_check_conditional_render(brw))
       return;
 
    /* Handle primitive restart if needed */