anv: Rework fences
[mesa.git] / src / intel / vulkan / genX_pipeline_util.h
index 679fb5768351370674b307c9ff2da97615e6718c..129ae94aebaf2ab9189c0d6ecb227935e8e77358 100644 (file)
  * IN THE SOFTWARE.
  */
 
+#ifndef GENX_PIPELINE_UTIL_H
+#define GENX_PIPELINE_UTIL_H
+
+#include "common/gen_l3_config.h"
+#include "common/gen_sample_positions.h"
 #include "vk_format_info.h"
-#include "genX_multisample.h"
 
 static uint32_t
 vertex_element_comp_control(enum isl_format format, unsigned comp)
@@ -52,26 +56,14 @@ vertex_element_comp_control(enum isl_format format, unsigned comp)
 
 static void
 emit_vertex_input(struct anv_pipeline *pipeline,
-                  const VkPipelineVertexInputStateCreateInfo *info,
-                  const struct anv_graphics_pipeline_create_info *extra)
+                  const VkPipelineVertexInputStateCreateInfo *info)
 {
    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
 
-   uint32_t elements;
-   if (extra && extra->disable_vs) {
-      /* If the VS is disabled, just assume the user knows what they're
-       * doing and apply the layout blindly.  This can only come from
-       * meta, so this *should* be safe.
-       */
-      elements = 0;
-      for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++)
-         elements |= (1 << info->pVertexAttributeDescriptions[i].location);
-   } else {
-      /* Pull inputs_read out of the VS prog data */
-      uint64_t inputs_read = vs_prog_data->inputs_read;
-      assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
-      elements = inputs_read >> VERT_ATTRIB_GENERIC0;
-   }
+   /* Pull inputs_read out of the VS prog data */
+   const uint64_t inputs_read = vs_prog_data->inputs_read;
+   assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
+   const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
 
 #if GEN_GEN >= 8
    /* On BDW+, we only need to allocate space for base ids.  Setting up
@@ -134,8 +126,8 @@ emit_vertex_input(struct anv_pipeline *pipeline,
        * VERTEX_BUFFER_STATE which we emit later.
        */
       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
-         vfi.InstancingEnable = pipeline->instancing_enable[desc->binding],
-         vfi.VertexElementIndex = slot,
+         vfi.InstancingEnable = pipeline->instancing_enable[desc->binding];
+         vfi.VertexElementIndex = slot;
          /* Vulkan so far doesn't have an instance divisor, so
           * this is always 1 (ignored if not instancing). */
          vfi.InstanceDataStepRate = 1;
@@ -187,12 +179,129 @@ emit_vertex_input(struct anv_pipeline *pipeline,
 #endif
 }
 
-static inline void
-emit_urb_setup(struct anv_pipeline *pipeline)
+void
+genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
+                     VkShaderStageFlags active_stages,
+                     unsigned vs_size, unsigned gs_size,
+                     const struct gen_l3_config *l3_config)
 {
-#if GEN_GEN == 7 && !GEN_IS_HASWELL
-   struct anv_device *device = pipeline->device;
+   if (!(active_stages & VK_SHADER_STAGE_VERTEX_BIT))
+      vs_size = 1;
+
+   if (!(active_stages & VK_SHADER_STAGE_GEOMETRY_BIT))
+      gs_size = 1;
+
+   unsigned vs_entry_size_bytes = vs_size * 64;
+   unsigned gs_entry_size_bytes = gs_size * 64;
+
+   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
+    *
+    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
+    *     Allocation Size is less than 9 512-bit URB entries.
+    *
+    * Similar text exists for GS.
+    */
+   unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
+   unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
+
+   /* URB allocations must be done in 8k chunks. */
+   unsigned chunk_size_bytes = 8192;
+
+   /* Determine the size of the URB in chunks. */
+   const unsigned total_urb_size =
+      gen_get_l3_config_urb_size(&device->info, l3_config);
+   const unsigned urb_chunks = total_urb_size * 1024 / chunk_size_bytes;
+
+   /* Reserve space for push constants */
+   unsigned push_constant_kb;
+   if (device->info.gen >= 8)
+      push_constant_kb = 32;
+   else if (device->info.is_haswell)
+      push_constant_kb = device->info.gt == 3 ? 32 : 16;
+   else
+      push_constant_kb = 16;
+
+   unsigned push_constant_bytes = push_constant_kb * 1024;
+   unsigned push_constant_chunks =
+      push_constant_bytes / chunk_size_bytes;
+
+   /* Initially, assign each stage the minimum amount of URB space it needs,
+    * and make a note of how much additional space it "wants" (the amount of
+    * additional space it could actually make use of).
+    */
+
+   /* VS has a lower limit on the number of URB entries */
+   unsigned vs_chunks =
+      ALIGN(device->info.urb.min_vs_entries * vs_entry_size_bytes,
+            chunk_size_bytes) / chunk_size_bytes;
+   unsigned vs_wants =
+      ALIGN(device->info.urb.max_vs_entries * vs_entry_size_bytes,
+            chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+
+   unsigned gs_chunks = 0;
+   unsigned gs_wants = 0;
+   if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) {
+      /* There are two constraints on the minimum amount of URB space we can
+       * allocate:
+       *
+       * (1) We need room for at least 2 URB entries, since we always operate
+       * the GS in DUAL_OBJECT mode.
+       *
+       * (2) We can't allocate less than nr_gs_entries_granularity.
+       */
+      gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+                        chunk_size_bytes) / chunk_size_bytes;
+      gs_wants =
+         ALIGN(device->info.urb.max_gs_entries * gs_entry_size_bytes,
+               chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+   }
+
+   /* There should always be enough URB space to satisfy the minimum
+    * requirements of each stage.
+    */
+   unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
+   assert(total_needs <= urb_chunks);
+
+   /* Mete out remaining space (if any) in proportion to "wants". */
+   unsigned total_wants = vs_wants + gs_wants;
+   unsigned remaining_space = urb_chunks - total_needs;
+   if (remaining_space > total_wants)
+      remaining_space = total_wants;
+   if (remaining_space > 0) {
+      unsigned vs_additional = (unsigned)
+         round(vs_wants * (((double) remaining_space) / total_wants));
+      vs_chunks += vs_additional;
+      remaining_space -= vs_additional;
+      gs_chunks += remaining_space;
+   }
+
+   /* Sanity check that we haven't over-allocated. */
+   assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
 
+   /* Finally, compute the number of entries that can fit in the space
+    * allocated to each stage.
+    */
+   unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
+   unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+
+   /* Since we rounded up when computing *_wants, this may be slightly more
+    * than the maximum allowed amount, so correct for that.
+    */
+   nr_vs_entries = MIN2(nr_vs_entries, device->info.urb.max_vs_entries);
+   nr_gs_entries = MIN2(nr_gs_entries, device->info.urb.max_gs_entries);
+
+   /* Ensure that we program a multiple of the granularity. */
+   nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
+   nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
+
+   /* Finally, sanity check to make sure we have at least the minimum number
+    * of entries needed for each stage.
+    */
+   assert(nr_vs_entries >= device->info.urb.min_vs_entries);
+   if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
+      assert(nr_gs_entries >= 2);
+
+#if GEN_GEN == 7 && !GEN_IS_HASWELL
    /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
     *
     *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
@@ -201,23 +310,54 @@ emit_urb_setup(struct anv_pipeline *pipeline)
     *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one PIPE_CONTROL
     *    needs to be sent before any combination of VS associated 3DSTATE."
     */
-   anv_batch_emit(&pipeline->batch, GEN7_PIPE_CONTROL, pc) {
+   anv_batch_emit(batch, GEN7_PIPE_CONTROL, pc) {
       pc.DepthStallEnable  = true;
       pc.PostSyncOperation = WriteImmediateData;
       pc.Address           = (struct anv_address) { &device->workaround_bo, 0 };
    }
 #endif
 
-   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
-      anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS), urb) {
-         urb._3DCommandSubOpcode       = 48 + i;
-         urb.VSURBStartingAddress      = pipeline->urb.start[i];
-         urb.VSURBEntryAllocationSize  = pipeline->urb.size[i] - 1;
-         urb.VSNumberofURBEntries      = pipeline->urb.entries[i];
-      }
+   /* Lay out the URB in the following order:
+    * - push constants
+    * - VS
+    * - GS
+    */
+   anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
+      urb.VSURBStartingAddress      = push_constant_chunks;
+      urb.VSURBEntryAllocationSize  = vs_size - 1;
+      urb.VSNumberofURBEntries      = nr_vs_entries;
+   }
+
+   anv_batch_emit(batch, GENX(3DSTATE_URB_HS), urb) {
+      urb.HSURBStartingAddress      = push_constant_chunks;
+   }
+
+   anv_batch_emit(batch, GENX(3DSTATE_URB_DS), urb) {
+      urb.DSURBStartingAddress      = push_constant_chunks;
+   }
+
+   anv_batch_emit(batch, GENX(3DSTATE_URB_GS), urb) {
+      urb.GSURBStartingAddress      = push_constant_chunks + vs_chunks;
+      urb.GSURBEntryAllocationSize  = gs_size - 1;
+      urb.GSNumberofURBEntries      = nr_gs_entries;
    }
 }
 
+static inline void
+emit_urb_setup(struct anv_pipeline *pipeline)
+{
+   unsigned vs_entry_size =
+      (pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT) ?
+      get_vs_prog_data(pipeline)->base.urb_entry_size : 0;
+   unsigned gs_entry_size =
+      (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) ?
+      get_gs_prog_data(pipeline)->base.urb_entry_size : 0;
+
+   genX(emit_urb_setup)(pipeline->device, &pipeline->batch,
+                        pipeline->active_stages, vs_entry_size, gs_entry_size,
+                        pipeline->urb.l3_config);
+}
+
 static void
 emit_3dstate_sbe(struct anv_pipeline *pipeline)
 {
@@ -237,44 +377,12 @@ emit_3dstate_sbe(struct anv_pipeline *pipeline)
       .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
       .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs,
       .ConstantInterpolationEnable = wm_prog_data->flat_inputs,
+   };
 
 #if GEN_GEN >= 9
-      .Attribute0ActiveComponentFormat = ACF_XYZW,
-      .Attribute1ActiveComponentFormat = ACF_XYZW,
-      .Attribute2ActiveComponentFormat = ACF_XYZW,
-      .Attribute3ActiveComponentFormat = ACF_XYZW,
-      .Attribute4ActiveComponentFormat = ACF_XYZW,
-      .Attribute5ActiveComponentFormat = ACF_XYZW,
-      .Attribute6ActiveComponentFormat = ACF_XYZW,
-      .Attribute7ActiveComponentFormat = ACF_XYZW,
-      .Attribute8ActiveComponentFormat = ACF_XYZW,
-      .Attribute9ActiveComponentFormat = ACF_XYZW,
-      .Attribute10ActiveComponentFormat = ACF_XYZW,
-      .Attribute11ActiveComponentFormat = ACF_XYZW,
-      .Attribute12ActiveComponentFormat = ACF_XYZW,
-      .Attribute13ActiveComponentFormat = ACF_XYZW,
-      .Attribute14ActiveComponentFormat = ACF_XYZW,
-      .Attribute15ActiveComponentFormat = ACF_XYZW,
-      /* wow, much field, very attribute */
-      .Attribute16ActiveComponentFormat = ACF_XYZW,
-      .Attribute17ActiveComponentFormat = ACF_XYZW,
-      .Attribute18ActiveComponentFormat = ACF_XYZW,
-      .Attribute19ActiveComponentFormat = ACF_XYZW,
-      .Attribute20ActiveComponentFormat = ACF_XYZW,
-      .Attribute21ActiveComponentFormat = ACF_XYZW,
-      .Attribute22ActiveComponentFormat = ACF_XYZW,
-      .Attribute23ActiveComponentFormat = ACF_XYZW,
-      .Attribute24ActiveComponentFormat = ACF_XYZW,
-      .Attribute25ActiveComponentFormat = ACF_XYZW,
-      .Attribute26ActiveComponentFormat = ACF_XYZW,
-      .Attribute27ActiveComponentFormat = ACF_XYZW,
-      .Attribute28ActiveComponentFormat = ACF_XYZW,
-      .Attribute29ActiveComponentFormat = ACF_XYZW,
-      .Attribute28ActiveComponentFormat = ACF_XYZW,
-      .Attribute29ActiveComponentFormat = ACF_XYZW,
-      .Attribute30ActiveComponentFormat = ACF_XYZW,
+   for (unsigned i = 0; i < 32; i++)
+      sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
 #endif
-   };
 
 #if GEN_GEN >= 8
    /* On Broadwell, they broke 3DSTATE_SBE into two packets */
@@ -368,14 +476,13 @@ emit_rs_state(struct anv_pipeline *pipeline,
               const VkPipelineRasterizationStateCreateInfo *rs_info,
               const VkPipelineMultisampleStateCreateInfo *ms_info,
               const struct anv_render_pass *pass,
-              const struct anv_subpass *subpass,
-              const struct anv_graphics_pipeline_create_info *extra)
+              const struct anv_subpass *subpass)
 {
    struct GENX(3DSTATE_SF) sf = {
       GENX(3DSTATE_SF_header),
    };
 
-   sf.ViewportTransformEnable = !(extra && extra->use_rectlist);
+   sf.ViewportTransformEnable = true;
    sf.StatisticsEnable = true;
    sf.TriangleStripListProvokingVertexSelect = 0;
    sf.LineStripListProvokingVertexSelect = 0;
@@ -408,7 +515,7 @@ emit_rs_state(struct anv_pipeline *pipeline,
    raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
    raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
    raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
-   raster.ScissorRectangleEnable = !(extra && extra->use_rectlist);
+   raster.ScissorRectangleEnable = true;
 
 #if GEN_GEN >= 9
    /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
@@ -493,16 +600,16 @@ emit_ms_state(struct anv_pipeline *pipeline,
 
       switch (samples) {
       case 1:
-         SAMPLE_POS_1X(ms.Sample);
+         GEN_SAMPLE_POS_1X(ms.Sample);
          break;
       case 2:
-         SAMPLE_POS_2X(ms.Sample);
+         GEN_SAMPLE_POS_2X(ms.Sample);
          break;
       case 4:
-         SAMPLE_POS_4X(ms.Sample);
+         GEN_SAMPLE_POS_4X(ms.Sample);
          break;
       case 8:
-         SAMPLE_POS_8X(ms.Sample);
+         GEN_SAMPLE_POS_8X(ms.Sample);
          break;
       default:
          break;
@@ -633,10 +740,7 @@ emit_ds_state(struct anv_pipeline *pipeline,
    };
 
    VkImageAspectFlags aspects = 0;
-   if (pass->attachments == NULL) {
-      /* This comes from meta.  Assume we have verything. */
-      aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
-   } else if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
+   if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
       VkFormat depth_stencil_format =
          pass->attachments[subpass->depth_stencil_attachment].format;
       aspects = vk_format_aspects(depth_stencil_format);
@@ -700,11 +804,15 @@ emit_cb_state(struct anv_pipeline *pipeline,
       blend_state.Entry[i].WriteDisableBlue = true;
    }
 
-   struct anv_pipeline_bind_map *map =
-      &pipeline->bindings[MESA_SHADER_FRAGMENT];
+   uint32_t surface_count = 0;
+   struct anv_pipeline_bind_map *map;
+   if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
+      map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map;
+      surface_count = map->surface_count;
+   }
 
    bool has_writeable_rt = false;
-   for (unsigned i = 0; i < map->surface_count; i++) {
+   for (unsigned i = 0; i < surface_count; i++) {
       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
 
       /* All color attachments are at the beginning of the binding table */
@@ -808,13 +916,12 @@ emit_cb_state(struct anv_pipeline *pipeline,
 static void
 emit_3dstate_clip(struct anv_pipeline *pipeline,
                   const VkPipelineViewportStateCreateInfo *vp_info,
-                  const VkPipelineRasterizationStateCreateInfo *rs_info,
-                  const struct anv_graphics_pipeline_create_info *extra)
+                  const VkPipelineRasterizationStateCreateInfo *rs_info)
 {
    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
    (void) wm_prog_data;
    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
-      clip.ClipEnable               = !(extra && extra->use_rectlist);
+      clip.ClipEnable               = true;
       clip.EarlyCullEnable          = true;
       clip.APIMode                  = APIMODE_D3D,
       clip.ViewportXYClipTestEnable = true;
@@ -827,7 +934,7 @@ emit_3dstate_clip(struct anv_pipeline *pipeline,
 
       clip.MinimumPointWidth = 0.125;
       clip.MaximumPointWidth = 255.875;
-      clip.MaximumVPIndex    = vp_info->viewportCount - 1;
+      clip.MaximumVPIndex    = (vp_info ? vp_info->viewportCount : 1) - 1;
 
 #if GEN_GEN == 7
       clip.FrontWinding            = vk_to_gen_front_face[rs_info->frontFace];
@@ -848,3 +955,5 @@ emit_3dstate_streamout(struct anv_pipeline *pipeline,
       so.RenderingDisable = rs_info->rasterizerDiscardEnable;
    }
 }
+
+#endif /* GENX_PIPELINE_UTIL_H */