i965/blorp: Use ISL to compute image offsets
[mesa.git] / src / intel / vulkan / gen7_cmd_buffer.c
index 32bae027725bb51d09e8961733b14e976d359dac..61778aa25b106433d5919c079fba26ffb54d87d2 100644 (file)
 #include "genxml/gen_macros.h"
 #include "genxml/genX_pack.h"
 
-#if GEN_GEN == 7 && !GEN_IS_HASWELL
-void
-gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
-                                         uint32_t stages)
-{
-   static const uint32_t sampler_state_opcodes[] = {
-      [MESA_SHADER_VERTEX]                      = 43,
-      [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
-      [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
-      [MESA_SHADER_GEOMETRY]                    = 46,
-      [MESA_SHADER_FRAGMENT]                    = 47,
-      [MESA_SHADER_COMPUTE]                     = 0,
-   };
-
-   static const uint32_t binding_table_opcodes[] = {
-      [MESA_SHADER_VERTEX]                      = 38,
-      [MESA_SHADER_TESS_CTRL]                   = 39,
-      [MESA_SHADER_TESS_EVAL]                   = 40,
-      [MESA_SHADER_GEOMETRY]                    = 41,
-      [MESA_SHADER_FRAGMENT]                    = 42,
-      [MESA_SHADER_COMPUTE]                     = 0,
-   };
-
-   anv_foreach_stage(s, stages) {
-      if (cmd_buffer->state.samplers[s].alloc_size > 0) {
-         anv_batch_emit(&cmd_buffer->batch,
-                        GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
-            ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
-            ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
-         }
-      }
-
-      /* Always emit binding table pointers if we're asked to, since on SKL
-       * this is what flushes push constants. */
-      anv_batch_emit(&cmd_buffer->batch,
-                     GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
-         btp._3DCommandSubOpcode = binding_table_opcodes[s];
-         btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
-      }
-   }
-}
-
-uint32_t
-gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
-{
-   VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
-                              cmd_buffer->state.pipeline->active_stages;
-
-   VkResult result = VK_SUCCESS;
-   anv_foreach_stage(s, dirty) {
-      result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
-                                            &cmd_buffer->state.samplers[s]);
-      if (result != VK_SUCCESS)
-         break;
-      result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
-                                                 &cmd_buffer->state.binding_tables[s]);
-      if (result != VK_SUCCESS)
-         break;
-   }
-
-   if (result != VK_SUCCESS) {
-      assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
-
-      result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
-      assert(result == VK_SUCCESS);
-
-      /* Re-emit state base addresses so we get the new surface state base
-       * address before we start emitting binding tables etc.
-       */
-      anv_cmd_buffer_emit_state_base_address(cmd_buffer);
-
-      /* Re-emit all active binding tables */
-      dirty |= cmd_buffer->state.pipeline->active_stages;
-      anv_foreach_stage(s, dirty) {
-         result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
-                                               &cmd_buffer->state.samplers[s]);
-         if (result != VK_SUCCESS)
-            return result;
-         result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
-                                                    &cmd_buffer->state.binding_tables[s]);
-         if (result != VK_SUCCESS)
-            return result;
-      }
-   }
-
-   cmd_buffer->state.descriptors_dirty &= ~dirty;
-
-   return dirty;
-}
-#endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */
-
 static inline int64_t
 clamp_int64(int64_t x, int64_t min, int64_t max)
 {
@@ -234,12 +143,6 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
    const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
 
-   unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
-   unsigned push_constant_data_size =
-      (prog_data->nr_params + local_id_dwords) * 4;
-   unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
-   unsigned push_constant_regs = reg_aligned_constant_size / 32;
-
    if (push_state.alloc_size) {
       anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
          curbe.CURBETotalDataLength    = push_state.alloc_size;
@@ -247,15 +150,7 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
       }
    }
 
-   assert(prog_data->total_shared <= 64 * 1024);
-   uint32_t slm_size = 0;
-   if (prog_data->total_shared > 0) {
-      /* slm_size is in 4k increments, but must be a power of 2. */
-      slm_size = 4 * 1024;
-      while (slm_size < prog_data->total_shared)
-         slm_size <<= 1;
-      slm_size /= 4 * 1024;
-   }
+   const uint32_t slm_size = encode_slm_size(GEN_GEN, prog_data->total_shared);
 
    struct anv_state state =
       anv_state_pool_emit(&device->dynamic_state_pool,
@@ -264,14 +159,17 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
                           .BindingTablePointer = surfaces.offset,
                           .SamplerStatePointer = samplers.offset,
                           .ConstantURBEntryReadLength =
-                             push_constant_regs,
-#if !GEN_IS_HASWELL
+                             cs_prog_data->push.per_thread.regs,
+#if GEN_IS_HASWELL
+                          .CrossThreadConstantDataReadLength =
+                             cs_prog_data->push.cross_thread.regs,
+#else
                           .ConstantURBEntryReadOffset = 0,
 #endif
                           .BarrierEnable = cs_prog_data->uses_barrier,
                           .SharedLocalMemorySize = slm_size,
                           .NumberofThreadsinGPGPUThreadGroup =
-                             pipeline->cs_thread_width_max);
+                             cs_prog_data->threads);
 
    const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
    anv_batch_emit(&cmd_buffer->batch,
@@ -283,108 +181,15 @@ flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
    return VK_SUCCESS;
 }
 
-void
-genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
-{
-   /* References for GL state:
-    *
-    * - commits e307cfa..228d5a3
-    * - src/mesa/drivers/dri/i965/gen7_l3_state.c
-    */
-
-   uint32_t l3cr2_slm, l3cr2_noslm;
-   anv_pack_struct(&l3cr2_noslm, GENX(L3CNTLREG2),
-                   .URBAllocation = 24,
-                   .ROAllocation = 0,
-                   .DCAllocation = 16);
-   anv_pack_struct(&l3cr2_slm, GENX(L3CNTLREG2),
-                   .SLMEnable = 1,
-                   .URBAllocation = 16,
-                   .URBLowBandwidth = 1,
-                   .ROAllocation = 0,
-                   .DCAllocation = 8);
-   const uint32_t l3cr2_val = enable_slm ? l3cr2_slm : l3cr2_noslm;
-   bool changed = cmd_buffer->state.current_l3_config != l3cr2_val;
-
-   if (changed) {
-      /* According to the hardware docs, the L3 partitioning can only be
-       * changed while the pipeline is completely drained and the caches are
-       * flushed, which involves a first PIPE_CONTROL flush which stalls the
-       * pipeline...
-       */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
-         pc.DCFlushEnable              = true;
-         pc.CommandStreamerStallEnable = true;
-         pc.PostSyncOperation          = NoWrite;
-      }
-
-      /* ...followed by a second pipelined PIPE_CONTROL that initiates
-       * invalidation of the relevant caches. Note that because RO
-       * invalidation happens at the top of the pipeline (i.e. right away as
-       * the PIPE_CONTROL command is processed by the CS) we cannot combine it
-       * with the previous stalling flush as the hardware documentation
-       * suggests, because that would cause the CS to stall on previous
-       * rendering *after* RO invalidation and wouldn't prevent the RO caches
-       * from being polluted by concurrent rendering before the stall
-       * completes. This intentionally doesn't implement the SKL+ hardware
-       * workaround suggesting to enable CS stall on PIPE_CONTROLs with the
-       * texture cache invalidation bit set for GPGPU workloads because the
-       * previous and subsequent PIPE_CONTROLs already guarantee that there is
-       * no concurrent GPGPU kernel execution (see SKL HSD 2132585).
-       */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
-         pc.TextureCacheInvalidationEnable   = true;
-         pc.ConstantCacheInvalidationEnable  = true;
-         pc.InstructionCacheInvalidateEnable = true;
-         pc.StateCacheInvalidationEnable     = true;
-         pc.PostSyncOperation                = NoWrite;
-      }
-
-      /* Now send a third stalling flush to make sure that invalidation is
-       * complete when the L3 configuration registers are modified.
-       */
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
-         pc.DCFlushEnable              = true;
-         pc.CommandStreamerStallEnable = true;
-         pc.PostSyncOperation          = NoWrite;
-      }
-
-      anv_finishme("write GEN7_L3SQCREG1");
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
-         lri.RegisterOffset   = GENX(L3CNTLREG2_num);
-         lri.DataDWord        = l3cr2_val;
-      }
-
-      uint32_t l3cr3_slm, l3cr3_noslm;
-      anv_pack_struct(&l3cr3_noslm, GENX(L3CNTLREG3),
-                      .ISAllocation = 8,
-                      .CAllocation = 4,
-                      .TAllocation = 8);
-      anv_pack_struct(&l3cr3_slm, GENX(L3CNTLREG3),
-                      .ISAllocation = 8,
-                      .CAllocation = 8,
-                      .TAllocation = 8);
-      const uint32_t l3cr3_val = enable_slm ? l3cr3_slm : l3cr3_noslm;
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
-         lri.RegisterOffset   = GENX(L3CNTLREG3_num);
-         lri.DataDWord        = l3cr3_val;
-      }
-
-      cmd_buffer->state.current_l3_config = l3cr2_val;
-   }
-}
-
 void
 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
 {
    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
-   const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
    MAYBE_UNUSED VkResult result;
 
    assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
 
-   bool needs_slm = cs_prog_data->base.total_shared > 0;
-   genX(cmd_buffer_config_l3)(cmd_buffer, needs_slm);
+   genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
 
    genX(flush_pipeline_select_gpgpu)(cmd_buffer);
 
@@ -400,6 +205,8 @@ genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
    }
 
    cmd_buffer->state.compute_dirty = 0;
+
+   genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
 }
 
 void
@@ -448,7 +255,7 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
          .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
          .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
          .StencilReferenceValue = d->stencil_reference.front & 0xff,
-         .BackFaceStencilReferenceValue = d->stencil_reference.back & 0xff,
+         .BackfaceStencilReferenceValue = d->stencil_reference.back & 0xff,
       };
       GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
       if (!cmd_buffer->device->info.has_llc)