i965/gen7: Use predicated rendering for indirect compute
authorJordan Justen <jordan.l.justen@intel.com>
Mon, 15 Feb 2016 23:17:15 +0000 (15:17 -0800)
committerJordan Justen <jordan.l.justen@intel.com>
Wed, 17 Feb 2016 17:25:47 +0000 (09:25 -0800)
On gen7 (Ivy Bridge, Haswell), we will get a GPU hang if an indirect
dispatch is used, but one of the dimensions is 0.

Therefore we use predicated rendering on the GPGPU_WALKER command to
handle this case.

Fixes piglit test: spec/arb_compute_shader/zero-dispatch-size

From the ARB_compute_shader spec, under DispatchCompute:

"If the work group count in any dimension is zero, no work groups are
 dispatched."

And then for DispatchComputeIndirect:

... "is equivalent (assuming no errors are generated) to calling
DispatchCompute with <num_groups_x>, <num_groups_y> and
<num_groups_z>" ...

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=94100
Signed-off-by: Jordan Justen <jordan.l.justen@intel.com>
Reviewed-by: Ben Widawsky <benjamin.widawsky@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Tested-by: Ilia Mirkin <imirkin@alum.mit.edu>
src/mesa/drivers/dri/i965/brw_compute.c
src/mesa/drivers/dri/i965/brw_defines.h

index d9f181a1cbfffb264289b308868f7d0b86e54471..0d2aca5b78addfc320de46ba12582fa8e18485b8 100644 (file)
 #include "brw_defines.h"
 
 
+static void
+prepare_indirect_gpgpu_walker(struct brw_context *brw)
+{
+   GLintptr indirect_offset = brw->compute.num_work_groups_offset;
+   drm_intel_bo *bo = brw->compute.num_work_groups_bo;
+
+   brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
+                         I915_GEM_DOMAIN_VERTEX, 0,
+                         indirect_offset + 0);
+   brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMY, bo,
+                         I915_GEM_DOMAIN_VERTEX, 0,
+                         indirect_offset + 4);
+   brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMZ, bo,
+                         I915_GEM_DOMAIN_VERTEX, 0,
+                         indirect_offset + 8);
+
+   if (brw->gen > 7)
+      return;
+
+   /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
+   BEGIN_BATCH(7);
+   OUT_BATCH(MI_LOAD_REGISTER_IMM | (7 - 2));
+   OUT_BATCH(MI_PREDICATE_SRC0 + 4);
+   OUT_BATCH(0u);
+   OUT_BATCH(MI_PREDICATE_SRC1 + 0);
+   OUT_BATCH(0u);
+   OUT_BATCH(MI_PREDICATE_SRC1 + 4);
+   OUT_BATCH(0u);
+   ADVANCE_BATCH();
+
+   /* Load compute_dispatch_indirect_x_size into SRC0 */
+   brw_load_register_mem(brw, MI_PREDICATE_SRC0, bo,
+                         I915_GEM_DOMAIN_INSTRUCTION, 0,
+                         indirect_offset + 0);
+
+   /* predicate = (compute_dispatch_indirect_x_size == 0); */
+   BEGIN_BATCH(1);
+   OUT_BATCH(GEN7_MI_PREDICATE |
+             MI_PREDICATE_LOADOP_LOAD |
+             MI_PREDICATE_COMBINEOP_SET |
+             MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
+   ADVANCE_BATCH();
+
+   /* Load compute_dispatch_indirect_y_size into SRC0 */
+   brw_load_register_mem(brw, MI_PREDICATE_SRC0, bo,
+                         I915_GEM_DOMAIN_INSTRUCTION, 0,
+                         indirect_offset + 4);
+
+   /* predicate |= (compute_dispatch_indirect_y_size == 0); */
+   BEGIN_BATCH(1);
+   OUT_BATCH(GEN7_MI_PREDICATE |
+             MI_PREDICATE_LOADOP_LOAD |
+             MI_PREDICATE_COMBINEOP_OR |
+             MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
+   ADVANCE_BATCH();
+
+   /* Load compute_dispatch_indirect_z_size into SRC0 */
+   brw_load_register_mem(brw, MI_PREDICATE_SRC0, bo,
+                         I915_GEM_DOMAIN_INSTRUCTION, 0,
+                         indirect_offset + 8);
+
+   /* predicate |= (compute_dispatch_indirect_z_size == 0); */
+   BEGIN_BATCH(1);
+   OUT_BATCH(GEN7_MI_PREDICATE |
+             MI_PREDICATE_LOADOP_LOAD |
+             MI_PREDICATE_COMBINEOP_OR |
+             MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
+   ADVANCE_BATCH();
+
+   /* predicate = !predicate; */
+   BEGIN_BATCH(1);
+   OUT_BATCH(GEN7_MI_PREDICATE |
+             MI_PREDICATE_LOADOP_LOADINV |
+             MI_PREDICATE_COMBINEOP_OR |
+             MI_PREDICATE_COMPAREOP_FALSE);
+   ADVANCE_BATCH();
+}
+
 static void
 brw_emit_gpgpu_walker(struct brw_context *brw)
 {
@@ -45,20 +123,10 @@ brw_emit_gpgpu_walker(struct brw_context *brw)
    if (brw->compute.num_work_groups_bo == NULL) {
       indirect_flag = 0;
    } else {
-      GLintptr indirect_offset = brw->compute.num_work_groups_offset;
-      drm_intel_bo *bo = brw->compute.num_work_groups_bo;
-
-      indirect_flag = GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE;
-
-      brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
-                            I915_GEM_DOMAIN_VERTEX, 0,
-                            indirect_offset + 0);
-      brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMY, bo,
-                            I915_GEM_DOMAIN_VERTEX, 0,
-                            indirect_offset + 4);
-      brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMZ, bo,
-                            I915_GEM_DOMAIN_VERTEX, 0,
-                            indirect_offset + 8);
+      indirect_flag =
+         GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE |
+         (brw->gen == 7 ? GEN7_GPGPU_PREDICATE_ENABLE : 0);
+      prepare_indirect_gpgpu_walker(brw);
    }
 
    const unsigned simd_size = prog_data->simd_size;
index b1fa559129bd315b8900dc32117124bc71893d8e..60b696cfb98de3b578c7a577b0d12e5476ce1557 100644 (file)
@@ -2938,6 +2938,7 @@ enum brw_wm_barycentric_interp_mode {
 #define GPGPU_WALKER                            0x7105
 /* GEN7 DW0 */
 # define GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE   (1 << 10)
+# define GEN7_GPGPU_PREDICATE_ENABLE            (1 << 8)
 /* GEN8+ DW2 */
 # define GPGPU_WALKER_INDIRECT_LENGTH_SHIFT     0
 # define GPGPU_WALKER_INDIRECT_LENGTH_MASK      INTEL_MASK(15, 0)