i965/fs: Handle fixed HW GRF subnr in reg_offset().
[mesa.git] / src / mesa / drivers / dri / i965 / gen8_ps_state.c
index a88f109c691a6c1bf4c39b6644eaf086cc515f0d..0e7d258e1aa19f6fee396e2f95c512c429ebcf4b 100644 (file)
 #include "program/program.h"
 #include "brw_state.h"
 #include "brw_defines.h"
+#include "brw_wm.h"
 #include "intel_batchbuffer.h"
 
 void
 gen8_upload_ps_extra(struct brw_context *brw,
-                     const struct gl_fragment_program *fp,
-                     const struct brw_wm_prog_data *prog_data,
-                     bool multisampled_fbo)
+                     const struct brw_wm_prog_data *prog_data)
 {
-   struct gl_context *ctx = &brw->ctx;
    uint32_t dw1 = 0;
 
    dw1 |= GEN8_PSX_PIXEL_SHADER_VALID;
@@ -45,22 +43,66 @@ gen8_upload_ps_extra(struct brw_context *brw,
    if (prog_data->num_varying_inputs != 0)
       dw1 |= GEN8_PSX_ATTRIBUTE_ENABLE;
 
-   if (fp->Base.InputsRead & VARYING_BIT_POS)
-      dw1 |= GEN8_PSX_USES_SOURCE_DEPTH | GEN8_PSX_USES_SOURCE_W;
+   if (prog_data->uses_src_depth)
+      dw1 |= GEN8_PSX_USES_SOURCE_DEPTH;
+
+   if (prog_data->uses_src_w)
+      dw1 |= GEN8_PSX_USES_SOURCE_W;
 
-   if (multisampled_fbo &&
-       _mesa_get_min_invocations_per_fragment(ctx, fp, false) > 1)
+   if (prog_data->persample_dispatch)
       dw1 |= GEN8_PSX_SHADER_IS_PER_SAMPLE;
 
-   if (fp->Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN)
-      dw1 |= GEN8_PSX_SHADER_USES_INPUT_COVERAGE_MASK;
+   if (prog_data->uses_sample_mask) {
+      if (brw->gen >= 9)
+         dw1 |= BRW_PSICMS_INNER << GEN9_PSX_SHADER_NORMAL_COVERAGE_MASK_SHIFT;
+      else
+         dw1 |= GEN8_PSX_SHADER_USES_INPUT_COVERAGE_MASK;
+   }
 
    if (prog_data->uses_omask)
       dw1 |= GEN8_PSX_OMASK_TO_RENDER_TARGET;
 
-   if (_mesa_active_fragment_shader_has_atomic_ops(&brw->ctx))
+   if (brw->gen >= 9 && prog_data->pulls_bary)
+      dw1 |= GEN9_PSX_SHADER_PULLS_BARY;
+
+   /* The stricter cross-primitive coherency guarantees that the hardware
+    * gives us with the "Accesses UAV" bit set for at least one shader stage
+    * and the "UAV coherency required" bit set on the 3DPRIMITIVE command are
+    * redundant within the current image, atomic counter and SSBO GL APIs,
+    * which all have very loose ordering and coherency requirements and
+    * generally rely on the application to insert explicit barriers when a
+    * shader invocation is expected to see the memory writes performed by the
+    * invocations of some previous primitive.  Regardless of the value of "UAV
+    * coherency required", the "Accesses UAV" bits will implicitly cause an in
+    * most cases useless DC flush when the lowermost stage with the bit set
+    * finishes execution.
+    *
+    * It would be nice to disable it, but in some cases we can't because on
+    * Gen8+ it also has an influence on rasterization via the PS UAV-only
+    * signal (which could be set independently from the coherency mechanism in
+    * the 3DSTATE_WM command on Gen7), and because in some cases it will
+    * determine whether the hardware skips execution of the fragment shader or
+    * not via the ThreadDispatchEnable signal.  However if we know that
+    * GEN8_PS_BLEND_HAS_WRITEABLE_RT is going to be set and
+    * GEN8_PSX_PIXEL_SHADER_NO_RT_WRITE is not set it shouldn't make any
+    * difference so we may just disable it here.
+    *
+    * Gen8 hardware tries to compute ThreadDispatchEnable for us but doesn't
+    * take into account KillPixels when no depth or stencil writes are enabled.
+    * In order for occlusion queries to work correctly with no attachments, we
+    * need to force-enable here.
+    *
+    * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS | _NEW_COLOR
+    */
+   if ((prog_data->has_side_effects || prog_data->uses_kill) &&
+       !brw_color_buffer_write_enabled(brw))
       dw1 |= GEN8_PSX_SHADER_HAS_UAV;
 
+   if (prog_data->computed_stencil) {
+      assert(brw->gen >= 9);
+      dw1 |= GEN9_PSX_SHADER_COMPUTES_STENCIL;
+   }
+
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_PS_EXTRA << 16 | (2 - 2));
    OUT_BATCH(dw1);
@@ -70,24 +112,18 @@ gen8_upload_ps_extra(struct brw_context *brw,
 static void
 upload_ps_extra(struct brw_context *brw)
 {
-   /* BRW_NEW_FRAGMENT_PROGRAM */
-   const struct brw_fragment_program *fp =
-      brw_fragment_program_const(brw->fragment_program);
    /* BRW_NEW_FS_PROG_DATA */
    const struct brw_wm_prog_data *prog_data = brw->wm.prog_data;
-   /* BRW_NEW_NUM_SAMPLES */
-   const bool multisampled_fbo = brw->num_samples > 1;
 
-   gen8_upload_ps_extra(brw, &fp->program, prog_data, multisampled_fbo);
+   gen8_upload_ps_extra(brw, prog_data);
 }
 
 const struct brw_tracked_state gen8_ps_extra = {
    .dirty = {
-      .mesa  = 0,
-      .brw   = BRW_NEW_CONTEXT |
-               BRW_NEW_FRAGMENT_PROGRAM |
-               BRW_NEW_FS_PROG_DATA |
-               BRW_NEW_NUM_SAMPLES,
+      .mesa  = _NEW_BUFFERS | _NEW_COLOR,
+      .brw   = BRW_NEW_BLORP |
+               BRW_NEW_CONTEXT |
+               BRW_NEW_FS_PROG_DATA,
    },
    .emit = upload_ps_extra,
 };
@@ -115,6 +151,12 @@ upload_wm_state(struct brw_context *brw)
    dw1 |= brw->wm.prog_data->barycentric_interp_modes <<
       GEN7_WM_BARYCENTRIC_INTERPOLATION_MODE_SHIFT;
 
+   /* BRW_NEW_FS_PROG_DATA */
+   if (brw->wm.prog_data->early_fragment_tests)
+      dw1 |= GEN7_WM_EARLY_DS_CONTROL_PREPS;
+   else if (brw->wm.prog_data->has_side_effects)
+      dw1 |= GEN7_WM_EARLY_DS_CONTROL_PSEXEC;
+
    BEGIN_BATCH(2);
    OUT_BATCH(_3DSTATE_WM << 16 | (2 - 2));
    OUT_BATCH(dw1);
@@ -125,7 +167,8 @@ const struct brw_tracked_state gen8_wm_state = {
    .dirty = {
       .mesa  = _NEW_LINE |
                _NEW_POLYGON,
-      .brw   = BRW_NEW_CONTEXT |
+      .brw   = BRW_NEW_BLORP |
+               BRW_NEW_CONTEXT |
                BRW_NEW_FS_PROG_DATA,
    },
    .emit = upload_wm_state,
@@ -133,12 +176,10 @@ const struct brw_tracked_state gen8_wm_state = {
 
 void
 gen8_upload_ps_state(struct brw_context *brw,
-                     const struct gl_fragment_program *fp,
                      const struct brw_stage_state *stage_state,
                      const struct brw_wm_prog_data *prog_data,
                      uint32_t fast_clear_op)
 {
-   struct gl_context *ctx = &brw->ctx;
    uint32_t dw3 = 0, dw6 = 0, dw7 = 0, ksp0, ksp2 = 0;
 
    /* Initialize the execution mask with VMask.  Otherwise, derivatives are
@@ -149,7 +190,7 @@ gen8_upload_ps_state(struct brw_context *brw,
 
    const unsigned sampler_count =
       DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4);
-   dw3 |= SET_FIELD(sampler_count, GEN7_PS_SAMPLER_COUNT); 
+   dw3 |= SET_FIELD(sampler_count, GEN7_PS_SAMPLER_COUNT);
 
    /* BRW_NEW_FS_PROG_DATA */
    dw3 |=
@@ -192,38 +233,19 @@ gen8_upload_ps_state(struct brw_context *brw,
 
    dw6 |= fast_clear_op;
 
-   /* _NEW_MULTISAMPLE
-    * In case of non 1x per sample shading, only one of SIMD8 and SIMD16
-    * should be enabled. We do 'SIMD16 only' dispatch if a SIMD16 shader
-    * is successfully compiled. In majority of the cases that bring us
-    * better performance than 'SIMD8 only' dispatch.
-    */
-   int min_invocations_per_fragment =
-      _mesa_get_min_invocations_per_fragment(ctx, fp, false);
-   assert(min_invocations_per_fragment >= 1);
+   if (prog_data->dispatch_8)
+      dw6 |= GEN7_PS_8_DISPATCH_ENABLE;
 
-   if (prog_data->prog_offset_16 || prog_data->no_8) {
+   if (prog_data->dispatch_16)
       dw6 |= GEN7_PS_16_DISPATCH_ENABLE;
-      if (!prog_data->no_8 && min_invocations_per_fragment == 1) {
-         dw6 |= GEN7_PS_8_DISPATCH_ENABLE;
-         dw7 |= (prog_data->base.dispatch_grf_start_reg <<
-                 GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
-         dw7 |= (prog_data->dispatch_grf_start_reg_16 <<
-                 GEN7_PS_DISPATCH_START_GRF_SHIFT_2);
-         ksp0 = stage_state->prog_offset;
-         ksp2 = stage_state->prog_offset + prog_data->prog_offset_16;
-      } else {
-         dw7 |= (prog_data->dispatch_grf_start_reg_16 <<
-                 GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
-
-         ksp0 = stage_state->prog_offset + prog_data->prog_offset_16;
-      }
-   } else {
-      dw6 |= GEN7_PS_8_DISPATCH_ENABLE;
-      dw7 |= (prog_data->base.dispatch_grf_start_reg <<
-              GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
-      ksp0 = stage_state->prog_offset;
-   }
+
+   dw7 |= prog_data->base.dispatch_grf_start_reg <<
+          GEN7_PS_DISPATCH_START_GRF_SHIFT_0;
+   dw7 |= prog_data->dispatch_grf_start_reg_2 <<
+          GEN7_PS_DISPATCH_START_GRF_SHIFT_2;
+
+   ksp0 = stage_state->prog_offset;
+   ksp2 = stage_state->prog_offset + prog_data->prog_offset_2;
 
    BEGIN_BATCH(12);
    OUT_BATCH(_3DSTATE_PS << 16 | (12 - 2));
@@ -233,7 +255,7 @@ gen8_upload_ps_state(struct brw_context *brw,
    if (prog_data->base.total_scratch) {
       OUT_RELOC64(stage_state->scratch_bo,
                   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
-                  ffs(prog_data->base.total_scratch) - 11);
+                  ffs(stage_state->per_thread_scratch) - 11);
    } else {
       OUT_BATCH(0);
       OUT_BATCH(0);
@@ -252,14 +274,14 @@ upload_ps_state(struct brw_context *brw)
 {
    /* BRW_NEW_FS_PROG_DATA */
    const struct brw_wm_prog_data *prog_data = brw->wm.prog_data;
-   gen8_upload_ps_state(brw, brw->fragment_program, &brw->wm.base, prog_data,
-                        brw->wm.fast_clear_op);
+   gen8_upload_ps_state(brw, &brw->wm.base, prog_data, brw->wm.fast_clear_op);
 }
 
 const struct brw_tracked_state gen8_ps_state = {
    .dirty = {
       .mesa  = _NEW_MULTISAMPLE,
       .brw   = BRW_NEW_BATCH |
+               BRW_NEW_BLORP |
                BRW_NEW_FRAGMENT_PROGRAM |
                BRW_NEW_FS_PROG_DATA,
    },