i965/fs: Lower 32x32 bit multiplication on BXT.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
index b28eaf24918bfdf07bb0c0a4321704474b88a2de..9113d0f5a8edb81910bbbc3f62912c4a2717dc63 100644 (file)
@@ -46,6 +46,7 @@
 #include "brw_defines.h"
 #include "brw_context.h"
 #include "brw_state.h"
+#include "brw_vs.h"
 
 #include "intel_batchbuffer.h"
 #include "intel_buffers.h"
@@ -91,8 +92,10 @@ get_hw_prim_for_gl_prim(int mode)
 {
    if (mode >= BRW_PRIM_OFFSET)
       return mode - BRW_PRIM_OFFSET;
-   else
+   else {
+      assert(mode < ARRAY_SIZE(prim_to_hw_prim));
       return prim_to_hw_prim[mode];
+   }
 }
 
 
@@ -101,13 +104,13 @@ get_hw_prim_for_gl_prim(int mode)
  * programs be immune to the active primitive (ie. cope with all
  * possibilities).  That may not be realistic however.
  */
-static void brw_set_prim(struct brw_context *brw,
-                         const struct _mesa_prim *prim)
+static void
+brw_set_prim(struct brw_context *brw, const struct _mesa_prim *prim)
 {
    struct gl_context *ctx = &brw->ctx;
    uint32_t hw_prim = get_hw_prim_for_gl_prim(prim->mode);
 
-   DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
+   DBG("PRIM: %s\n", _mesa_enum_to_string(prim->mode));
 
    /* Slight optimization to avoid the GS program when not needed:
     */
@@ -126,27 +129,24 @@ static void brw_set_prim(struct brw_context *brw,
 
    if (hw_prim != brw->primitive) {
       brw->primitive = hw_prim;
-      brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+      brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
 
       if (reduced_prim[prim->mode] != brw->reduced_primitive) {
         brw->reduced_primitive = reduced_prim[prim->mode];
-        brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
+        brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE;
       }
    }
 }
 
-static void gen6_set_prim(struct brw_context *brw,
-                          const struct _mesa_prim *prim)
+static void
+gen6_set_prim(struct brw_context *brw, const struct _mesa_prim *prim)
 {
-   uint32_t hw_prim;
-
-   DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
-
-   hw_prim = get_hw_prim_for_gl_prim(prim->mode);
+   DBG("PRIM: %s\n", _mesa_enum_to_string(prim->mode));
 
+   const uint32_t hw_prim = get_hw_prim_for_gl_prim(prim->mode);
    if (hw_prim != brw->primitive) {
       brw->primitive = hw_prim;
-      brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+      brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
    }
 }
 
@@ -159,7 +159,8 @@ static void gen6_set_prim(struct brw_context *brw,
  * quads so that those dangling vertices won't get drawn when we convert to
  * trifans/tristrips.
  */
-static GLuint trim(GLenum prim, GLuint length)
+static GLuint
+trim(GLenum prim, GLuint length)
 {
    if (prim == GL_QUAD_STRIP)
       return length > 3 ? (length - length % 2) : 0;
@@ -170,25 +171,32 @@ static GLuint trim(GLenum prim, GLuint length)
 }
 
 
-static void brw_emit_prim(struct brw_context *brw,
-                         const struct _mesa_prim *prim,
-                         uint32_t hw_prim)
+static void
+brw_emit_prim(struct brw_context *brw,
+              const struct _mesa_prim *prim,
+              uint32_t hw_prim)
 {
    int verts_per_instance;
    int vertex_access_type;
    int indirect_flag;
 
-   DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
+   DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim->mode),
        prim->start, prim->count);
 
+   int start_vertex_location = prim->start;
+   int base_vertex_location = prim->basevertex;
+
    if (prim->indexed) {
       vertex_access_type = brw->gen >= 7 ?
          GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
          GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
+      start_vertex_location += brw->ib.start_vertex_offset;
+      base_vertex_location += brw->vb.start_vertex_bias;
    } else {
       vertex_access_type = brw->gen >= 7 ?
          GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL :
          GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
+      start_vertex_location += brw->vb.start_vertex_bias;
    }
 
    /* We only need to trim the primitive count on pre-Gen6. */
@@ -206,9 +214,8 @@ static void brw_emit_prim(struct brw_context *brw,
     * and missed flushes of the render cache as it heads to other parts of
     * the besides the draw code.
     */
-   if (brw->always_flush_cache) {
-      intel_batchbuffer_emit_mi_flush(brw);
-   }
+   if (brw->always_flush_cache)
+      brw_emit_mi_flush(brw);
 
    /* If indirect, emit a bunch of loads from the indirect BO. */
    if (prim->is_indirect) {
@@ -246,41 +253,41 @@ static void brw_emit_prim(struct brw_context *brw,
          OUT_BATCH(0);
          ADVANCE_BATCH();
       }
-   }
-   else {
+   } else {
       indirect_flag = 0;
    }
 
+   BEGIN_BATCH(brw->gen >= 7 ? 7 : 6);
 
    if (brw->gen >= 7) {
-      BEGIN_BATCH(7);
-      OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag);
+      const int predicate_enable =
+         (brw->predicate.state == BRW_PREDICATE_STATE_USE_BIT)
+         ? GEN7_3DPRIM_PREDICATE_ENABLE : 0;
+
+      OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag | predicate_enable);
       OUT_BATCH(hw_prim | vertex_access_type);
    } else {
-      BEGIN_BATCH(6);
       OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
                 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
                 vertex_access_type);
    }
    OUT_BATCH(verts_per_instance);
-   OUT_BATCH(brw->draw.start_vertex_location);
+   OUT_BATCH(start_vertex_location);
    OUT_BATCH(prim->num_instances);
    OUT_BATCH(prim->base_instance);
-   OUT_BATCH(brw->draw.base_vertex_location);
+   OUT_BATCH(base_vertex_location);
    ADVANCE_BATCH();
 
-   /* Only used on Sandybridge; harmless to set elsewhere. */
-   brw->batch.need_workaround_flush = true;
-
-   if (brw->always_flush_cache) {
-      intel_batchbuffer_emit_mi_flush(brw);
-   }
+   if (brw->always_flush_cache)
+      brw_emit_mi_flush(brw);
 }
 
 
-static void brw_merge_inputs( struct brw_context *brw,
-                      const struct gl_client_array *arrays[])
+static void
+brw_merge_inputs(struct brw_context *brw,
+                 const struct gl_client_array *arrays[])
 {
+   const struct gl_context *ctx = &brw->ctx;
    GLuint i;
 
    for (i = 0; i < brw->vb.nr_buffers; i++) {
@@ -293,6 +300,46 @@ static void brw_merge_inputs( struct brw_context *brw,
       brw->vb.inputs[i].buffer = -1;
       brw->vb.inputs[i].glarray = arrays[i];
    }
+
+   if (brw->gen < 8 && !brw->is_haswell) {
+      struct gl_program *vp = &ctx->VertexProgram._Current->Base;
+      /* Prior to Haswell, the hardware can't natively support GL_FIXED or
+       * 2_10_10_10_REV vertex formats.  Set appropriate workaround flags.
+       */
+      for (i = 0; i < VERT_ATTRIB_MAX; i++) {
+         if (!(vp->InputsRead & BITFIELD64_BIT(i)))
+            continue;
+
+         uint8_t wa_flags = 0;
+
+         switch (brw->vb.inputs[i].glarray->Type) {
+
+         case GL_FIXED:
+            wa_flags = brw->vb.inputs[i].glarray->Size;
+            break;
+
+         case GL_INT_2_10_10_10_REV:
+            wa_flags |= BRW_ATTRIB_WA_SIGN;
+            /* fallthough */
+
+         case GL_UNSIGNED_INT_2_10_10_10_REV:
+            if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
+               wa_flags |= BRW_ATTRIB_WA_BGRA;
+
+            if (brw->vb.inputs[i].glarray->Normalized)
+               wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
+            else if (!brw->vb.inputs[i].glarray->Integer)
+               wa_flags |= BRW_ATTRIB_WA_SCALE;
+
+            break;
+         }
+
+         if (brw->vb.attrib_wa_flags[i] != wa_flags) {
+            brw->vb.attrib_wa_flags[i] = wa_flags;
+            brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
+         }
+      }
+   }
 }
 
 /**
@@ -307,7 +354,8 @@ static void brw_merge_inputs( struct brw_context *brw,
  * Also mark any render targets which will be textured as needing a render
  * cache flush.
  */
-static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
+static void
+brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
 {
    struct gl_context *ctx = &brw->ctx;
    struct gl_framebuffer *fb = ctx->DrawBuffer;
@@ -347,22 +395,22 @@ static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
 /* May fail if out of video memory for texture or vbo upload, or on
  * fallback conditions.
  */
-static bool brw_try_draw_prims( struct gl_context *ctx,
-                                    const struct gl_client_array *arrays[],
-                                    const struct _mesa_prim *prims,
-                                    GLuint nr_prims,
-                                    const struct _mesa_index_buffer *ib,
-                                    GLuint min_index,
-                                    GLuint max_index,
-                                    struct gl_buffer_object *indirect)
+static void
+brw_try_draw_prims(struct gl_context *ctx,
+                   const struct gl_client_array *arrays[],
+                   const struct _mesa_prim *prims,
+                   GLuint nr_prims,
+                   const struct _mesa_index_buffer *ib,
+                   GLuint min_index,
+                   GLuint max_index,
+                   struct gl_buffer_object *indirect)
 {
    struct brw_context *brw = brw_context(ctx);
-   bool retval = true;
    GLuint i;
    bool fail_next = false;
 
    if (ctx->NewState)
-      _mesa_update_state( ctx );
+      _mesa_update_state(ctx);
 
    /* Find the highest sampler unit used by each shader program.  A bit-count
     * won't work since ARB programs use the texture unit number as the sampler
@@ -382,25 +430,26 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
     * software fallback will segfault if it attempts to access any
     * texture level other than level 0.
     */
-   brw_validate_textures( brw );
+   brw_validate_textures(brw);
 
    intel_prepare_render(brw);
 
-   /* This workaround has to happen outside of brw_upload_state() because it
-    * may flush the batchbuffer for a blit, affecting the state flags.
+   /* This workaround has to happen outside of brw_upload_render_state()
+    * because it may flush the batchbuffer for a blit, affecting the state
+    * flags.
     */
    brw_workaround_depthstencil_alignment(brw, 0);
 
    /* Bind all inputs, derive varying and size information:
     */
-   brw_merge_inputs( brw, arrays );
+   brw_merge_inputs(brw, arrays);
 
    brw->ib.ib = ib;
-   brw->state.dirty.brw |= BRW_NEW_INDICES;
+   brw->ctx.NewDriverState |= BRW_NEW_INDICES;
 
    brw->vb.min_index = min_index;
    brw->vb.max_index = max_index;
-   brw->state.dirty.brw |= BRW_NEW_VERTICES;
+   brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
 
    for (i = 0; i < nr_prims; i++) {
       int estimated_max_prim_size;
@@ -425,14 +474,13 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
          brw->num_instances = prims[i].num_instances;
          brw->basevertex = prims[i].basevertex;
          if (i > 0) { /* For i == 0 we just did this before the loop */
-            brw->state.dirty.brw |= BRW_NEW_VERTICES;
+            brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
             brw_merge_inputs(brw, arrays);
          }
       }
 
-      brw->draw.indexed = prims[i].indexed;
-      brw->draw.start_vertex_location = prims[i].start;
-      brw->draw.base_vertex_location = prims[i].basevertex;
+      brw->draw.gl_basevertex =
+         prims[i].indexed ? prims[i].basevertex : prims[i].start;
 
       drm_intel_bo_unreference(brw->draw.draw_params_bo);
 
@@ -458,14 +506,14 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
 
 retry:
 
-      /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
+      /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
        * that the state updated in the loop outside of this block is that in
        * *_set_prim or intel_batchbuffer_flush(), which only impacts
-       * brw->state.dirty.brw.
+       * brw->ctx.NewDriverState.
        */
-      if (brw->state.dirty.brw) {
+      if (brw->ctx.NewDriverState) {
         brw->no_batch_wrap = true;
-        brw_upload_state(brw);
+        brw_upload_render_state(brw);
       }
 
       brw_emit_prim(brw, &prims[i], brw->primitive);
@@ -479,25 +527,18 @@ retry:
            fail_next = true;
            goto retry;
         } else {
-           if (intel_batchbuffer_flush(brw) == -ENOSPC) {
-              static bool warned = false;
-
-              if (!warned) {
-                 fprintf(stderr, "i965: Single primitive emit exceeded"
-                         "available aperture space\n");
-                 warned = true;
-              }
-
-              retval = false;
-           }
+            int ret = intel_batchbuffer_flush(brw);
+            WARN_ONCE(ret == -ENOSPC,
+                      "i965: Single primitive emit exceeded "
+                      "available aperture space\n");
         }
       }
 
       /* Now that we know we haven't run out of aperture space, we can safely
        * reset the dirty bits.
        */
-      if (brw->state.dirty.brw)
-         brw_clear_dirty_bits(brw);
+      if (brw->ctx.NewDriverState)
+         brw_render_state_finished(brw);
    }
 
    if (brw->always_flush_batch)
@@ -506,30 +547,26 @@ retry:
    brw_state_cache_check_size(brw);
    brw_postdraw_set_buffers_need_resolve(brw);
 
-   return retval;
+   return;
 }
 
-void brw_draw_prims( struct gl_context *ctx,
-                    const struct _mesa_prim *prims,
-                    GLuint nr_prims,
-                    const struct _mesa_index_buffer *ib,
-                    GLboolean index_bounds_valid,
-                    GLuint min_index,
-                    GLuint max_index,
-                    struct gl_transform_feedback_object *unused_tfb_object,
-                    struct gl_buffer_object *indirect )
+void
+brw_draw_prims(struct gl_context *ctx,
+               const struct _mesa_prim *prims,
+               GLuint nr_prims,
+               const struct _mesa_index_buffer *ib,
+               GLboolean index_bounds_valid,
+               GLuint min_index,
+               GLuint max_index,
+               struct gl_transform_feedback_object *unused_tfb_object,
+               struct gl_buffer_object *indirect)
 {
    struct brw_context *brw = brw_context(ctx);
    const struct gl_client_array **arrays = ctx->Array._DrawArrays;
 
    assert(unused_tfb_object == NULL);
 
-   if (ctx->Query.CondRenderQuery) {
-      perf_debug("Conditional rendering is implemented in software and may "
-                 "stall.  This should be fixed in the driver.\n");
-   }
-
-   if (!_mesa_check_conditional_render(ctx))
+   if (!brw_check_conditional_render(brw))
       return;
 
    /* Handle primitive restart if needed */
@@ -543,7 +580,7 @@ void brw_draw_prims( struct gl_context *ctx,
     */
    if (ctx->RenderMode != GL_RENDER) {
       perf_debug("%s render mode not supported in hardware\n",
-                 _mesa_lookup_enum_by_nr(ctx->RenderMode));
+                 _mesa_enum_to_string(ctx->RenderMode));
       _swsetup_Wakeup(ctx);
       _tnl_wakeup(ctx);
       _tnl_draw_prims(ctx, prims, nr_prims, ib,
@@ -565,26 +602,28 @@ void brw_draw_prims( struct gl_context *ctx,
     * manage it.  swrast doesn't support our featureset, so we can't fall back
     * to it.
     */
-   brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index, indirect);
+   brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index,
+                      indirect);
 }
 
-void brw_draw_init( struct brw_context *brw )
+void
+brw_draw_init(struct brw_context *brw)
 {
    struct gl_context *ctx = &brw->ctx;
    struct vbo_context *vbo = vbo_context(ctx);
-   int i;
 
    /* Register our drawing function:
     */
    vbo->draw_prims = brw_draw_prims;
 
-   for (i = 0; i < VERT_ATTRIB_MAX; i++)
+   for (int i = 0; i < VERT_ATTRIB_MAX; i++)
       brw->vb.inputs[i].buffer = -1;
    brw->vb.nr_buffers = 0;
    brw->vb.nr_enabled = 0;
 }
 
-void brw_draw_destroy( struct brw_context *brw )
+void
+brw_draw_destroy(struct brw_context *brw)
 {
    int i;