i965: store workaround_bo offset
[mesa.git] / src / mesa / drivers / dri / i965 / brw_compute.c
index d9f181a1cbfffb264289b308868f7d0b86e54471..852db6dd64b192a6676849fb5da6e2f159b47c95 100644 (file)
 #include "brw_defines.h"
 
 
-static void
-brw_emit_gpgpu_walker(struct brw_context *brw)
-{
-   const struct brw_cs_prog_data *prog_data = brw->cs.prog_data;
-
-   const GLuint *num_groups = brw->compute.num_work_groups;
-   uint32_t indirect_flag;
-
-   if (brw->compute.num_work_groups_bo == NULL) {
-      indirect_flag = 0;
-   } else {
-      GLintptr indirect_offset = brw->compute.num_work_groups_offset;
-      drm_intel_bo *bo = brw->compute.num_work_groups_bo;
-
-      indirect_flag = GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE;
-
-      brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
-                            I915_GEM_DOMAIN_VERTEX, 0,
-                            indirect_offset + 0);
-      brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMY, bo,
-                            I915_GEM_DOMAIN_VERTEX, 0,
-                            indirect_offset + 4);
-      brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMZ, bo,
-                            I915_GEM_DOMAIN_VERTEX, 0,
-                            indirect_offset + 8);
-   }
-
-   const unsigned simd_size = prog_data->simd_size;
-   unsigned group_size = prog_data->local_size[0] *
-      prog_data->local_size[1] * prog_data->local_size[2];
-   unsigned thread_width_max =
-      (group_size + simd_size - 1) / simd_size;
-
-   uint32_t right_mask = 0xffffffffu >> (32 - simd_size);
-   const unsigned right_non_aligned = group_size & (simd_size - 1);
-   if (right_non_aligned != 0)
-      right_mask >>= (simd_size - right_non_aligned);
-
-   uint32_t dwords = brw->gen < 8 ? 11 : 15;
-   BEGIN_BATCH(dwords);
-   OUT_BATCH(GPGPU_WALKER << 16 | (dwords - 2) | indirect_flag);
-   OUT_BATCH(0);
-   if (brw->gen >= 8) {
-      OUT_BATCH(0);                     /* Indirect Data Length */
-      OUT_BATCH(0);                     /* Indirect Data Start Address */
-   }
-   assert(thread_width_max <= brw->max_cs_threads);
-   OUT_BATCH(SET_FIELD(simd_size / 16, GPGPU_WALKER_SIMD_SIZE) |
-             SET_FIELD(thread_width_max - 1, GPGPU_WALKER_THREAD_WIDTH_MAX));
-   OUT_BATCH(0);                        /* Thread Group ID Starting X */
-   if (brw->gen >= 8)
-      OUT_BATCH(0);                     /* MBZ */
-   OUT_BATCH(num_groups[0]);            /* Thread Group ID X Dimension */
-   OUT_BATCH(0);                        /* Thread Group ID Starting Y */
-   if (brw->gen >= 8)
-      OUT_BATCH(0);                     /* MBZ */
-   OUT_BATCH(num_groups[1]);            /* Thread Group ID Y Dimension */
-   OUT_BATCH(0);                        /* Thread Group ID Starting/Resume Z */
-   OUT_BATCH(num_groups[2]);            /* Thread Group ID Z Dimension */
-   OUT_BATCH(right_mask);               /* Right Execution Mask */
-   OUT_BATCH(0xffffffff);               /* Bottom Execution Mask */
-   ADVANCE_BATCH();
-
-   BEGIN_BATCH(2);
-   OUT_BATCH(MEDIA_STATE_FLUSH << 16 | (2 - 2));
-   OUT_BATCH(0);
-   ADVANCE_BATCH();
-}
-
-
 static void
 brw_dispatch_compute_common(struct gl_context *ctx)
 {
    struct brw_context *brw = brw_context(ctx);
-   int estimated_buffer_space_needed;
-   bool fail_next = false;
+   bool fail_next;
 
    if (!_mesa_check_conditional_render(ctx))
       return;
@@ -119,46 +48,35 @@ brw_dispatch_compute_common(struct gl_context *ctx)
 
    brw_validate_textures(brw);
 
-   const int sampler_state_size = 16; /* 16 bytes */
-   estimated_buffer_space_needed = 512; /* batchbuffer commands */
-   estimated_buffer_space_needed += (BRW_MAX_TEX_UNIT *
-                                     (sampler_state_size +
-                                      sizeof(struct gen5_sampler_default_color)));
-   estimated_buffer_space_needed += 1024; /* push constants */
-   estimated_buffer_space_needed += 512; /* misc. pad */
-
-   /* Flush the batch if it's approaching full, so that we don't wrap while
-    * we've got validated state that needs to be in the same batch as the
-    * primitives.
+   brw_predraw_resolve_inputs(brw, false, NULL);
+
+   /* Flush the batch if the batch/state buffers are nearly full.  We can
+    * grow them if needed, but this is not free, so we'd like to avoid it.
     */
-   intel_batchbuffer_require_space(brw, estimated_buffer_space_needed,
-                                   RENDER_RING);
+   intel_batchbuffer_require_space(brw, 600);
+   brw_require_statebuffer_space(brw, 2500);
    intel_batchbuffer_save_state(brw);
+   fail_next = intel_batchbuffer_saved_state_is_empty(brw);
 
  retry:
-   brw->no_batch_wrap = true;
+   brw->batch.no_wrap = true;
    brw_upload_compute_state(brw);
 
-   brw_emit_gpgpu_walker(brw);
+   brw->vtbl.emit_compute_walker(brw);
 
-   brw->no_batch_wrap = false;
+   brw->batch.no_wrap = false;
 
-   if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
+   if (!brw_batch_has_aperture_space(brw, 0)) {
       if (!fail_next) {
          intel_batchbuffer_reset_to_saved(brw);
          intel_batchbuffer_flush(brw);
          fail_next = true;
          goto retry;
       } else {
-         if (intel_batchbuffer_flush(brw) == -ENOSPC) {
-            static bool warned = false;
-
-            if (!warned) {
-               fprintf(stderr, "i965: Single compute shader dispatch "
-                       "exceeded available aperture space\n");
-               warned = true;
-            }
-         }
+         int ret = intel_batchbuffer_flush(brw);
+         WARN_ONCE(ret == -ENOSPC,
+                   "i965: Single compute shader dispatch "
+                   "exceeded available aperture space\n");
       }
    }
 
@@ -170,7 +88,7 @@ brw_dispatch_compute_common(struct gl_context *ctx)
    if (brw->always_flush_batch)
       intel_batchbuffer_flush(brw);
 
-   brw_state_cache_check_size(brw);
+   brw_program_cache_check_size(brw);
 
    /* Note: since compute shaders can't write to framebuffers, there's no need
     * to call brw_postdraw_set_buffers_need_resolve().
@@ -183,6 +101,7 @@ brw_dispatch_compute(struct gl_context *ctx, const GLuint *num_groups) {
 
    brw->compute.num_work_groups_bo = NULL;
    brw->compute.num_work_groups = num_groups;
+   brw->compute.group_size = NULL;
    ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
 
    brw_dispatch_compute_common(ctx);
@@ -194,14 +113,30 @@ brw_dispatch_compute_indirect(struct gl_context *ctx, GLintptr indirect)
    struct brw_context *brw = brw_context(ctx);
    static const GLuint indirect_group_counts[3] = { 0, 0, 0 };
    struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
-   drm_intel_bo *bo =
+   struct brw_bo *bo =
       intel_bufferobj_buffer(brw,
                              intel_buffer_object(indirect_buffer),
-                             indirect, 3 * sizeof(GLuint));
+                             indirect, 3 * sizeof(GLuint), false);
 
    brw->compute.num_work_groups_bo = bo;
    brw->compute.num_work_groups_offset = indirect;
    brw->compute.num_work_groups = indirect_group_counts;
+   brw->compute.group_size = NULL;
+   ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
+
+   brw_dispatch_compute_common(ctx);
+}
+
+static void
+brw_dispatch_compute_group_size(struct gl_context *ctx,
+                                const GLuint *num_groups,
+                                const GLuint *group_size)
+{
+   struct brw_context *brw = brw_context(ctx);
+
+   brw->compute.num_work_groups_bo = NULL;
+   brw->compute.num_work_groups = num_groups;
+   brw->compute.group_size = group_size;
    ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
 
    brw_dispatch_compute_common(ctx);
@@ -212,4 +147,5 @@ brw_init_compute_functions(struct dd_function_table *functions)
 {
    functions->DispatchCompute = brw_dispatch_compute;
    functions->DispatchComputeIndirect = brw_dispatch_compute_indirect;
+   functions->DispatchComputeGroupSize = brw_dispatch_compute_group_size;
 }