anv: move BaseVertexID/BaseInstanceID vertex buffer index to 31
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
index 47d3322e4856618ba6aaa62a2bac64c41ecd4966..d3fc95ea185db4d02d6bf41902c3c7249492f2e5 100644 (file)
@@ -55,8 +55,6 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
 {
    struct anv_device *device = cmd_buffer->device;
 
-/* XXX: Do we need this on more than just BDW? */
-#if (GEN_GEN >= 8)
    /* Emit a render target cache flush.
     *
     * This isn't documented anywhere in the PRM.  However, it seems to be
@@ -65,9 +63,10 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
     * clear depth, reset state base address, and then go render stuff.
     */
    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+      pc.DCFlushEnable = true;
       pc.RenderTargetCacheFlushEnable = true;
+      pc.CommandStreamerStallEnable = true;
    }
-#endif
 
    anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
       sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
@@ -148,6 +147,8 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
     */
    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
       pc.TextureCacheInvalidationEnable = true;
+      pc.ConstantCacheInvalidationEnable = true;
+      pc.StateCacheInvalidationEnable = true;
    }
 }
 
@@ -303,13 +304,93 @@ need_input_attachment_state(const struct anv_render_pass_attachment *att)
    if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
       return false;
 
-   /* We only allocate input attachment states for color and depth surfaces.
-    * Stencil doesn't allow compression so we can just use the texture surface
-    * state from the view
+   /* We only allocate input attachment states for color surfaces. Compression
+    * is not yet enabled for depth textures and stencil doesn't allow
+    * compression so we can just use the texture surface state from the view.
     */
-   return vk_format_is_color(att->format) || vk_format_has_depth(att->format);
+   return vk_format_is_color(att->format);
 }
 
+static enum isl_aux_usage
+layout_to_hiz_usage(VkImageLayout layout, uint8_t samples)
+{
+   switch (layout) {
+   case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+      return ISL_AUX_USAGE_HIZ;
+   case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+   case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+      if (anv_can_sample_with_hiz(GEN_GEN, samples))
+         return ISL_AUX_USAGE_HIZ;
+      /* Fall-through */
+   case VK_IMAGE_LAYOUT_GENERAL:
+      /* This buffer could be used as a source or destination in a transfer
+       * operation. Transfer operations current don't perform HiZ-enabled reads
+       * and writes.
+       */
+   default:
+      return ISL_AUX_USAGE_NONE;
+   }
+}
+
+/* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
+ * the initial layout is undefined, the HiZ buffer and depth buffer will
+ * represent the same data at the end of this operation.
+ */
+static void
+transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
+                        const struct anv_image *image,
+                        VkImageLayout initial_layout,
+                        VkImageLayout final_layout)
+{
+   assert(image);
+
+   if (image->aux_usage != ISL_AUX_USAGE_HIZ || final_layout == initial_layout)
+      return;
+
+   const bool hiz_enabled = layout_to_hiz_usage(initial_layout, image->samples) ==
+                            ISL_AUX_USAGE_HIZ;
+   const bool enable_hiz = layout_to_hiz_usage(final_layout, image->samples) ==
+                           ISL_AUX_USAGE_HIZ;
+
+   enum blorp_hiz_op hiz_op;
+   if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+      /* We've already initialized the aux HiZ buffer at BindImageMemory time,
+       * so there's no need to perform a HIZ resolve or clear to avoid GPU hangs.
+       * This initial layout indicates that the user doesn't care about the data
+       * that's currently in the buffer, so resolves are not necessary except
+       * for the special case noted below.
+       */
+      hiz_op = BLORP_HIZ_OP_NONE;
+   } else if (hiz_enabled && !enable_hiz) {
+      hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
+   } else if (!hiz_enabled && enable_hiz) {
+      hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
+   } else {
+      assert(hiz_enabled == enable_hiz);
+      /* If the same buffer will be used, no resolves are necessary except for
+       * the special case noted below.
+       */
+      hiz_op = BLORP_HIZ_OP_NONE;
+   }
+
+   if (hiz_op != BLORP_HIZ_OP_NONE)
+      anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op);
+
+   /* Images that have sampling with HiZ enabled cause all shader sampling to
+    * load data with the HiZ buffer. Therefore, in the case of transitioning to
+    * the general layout - which currently routes all writes to the depth
+    * buffer - we must ensure that the HiZ buffer remains consistent with the
+    * depth buffer by performing an additional HIZ resolve if the operation
+    * required by this transition was not already a HiZ resolve.
+    */
+   if (final_layout == VK_IMAGE_LAYOUT_GENERAL &&
+       anv_can_sample_with_hiz(GEN_GEN, image->samples) &&
+       hiz_op != BLORP_HIZ_OP_HIZ_RESOLVE) {
+      anv_gen8_hiz_op_resolve(cmd_buffer, image, BLORP_HIZ_OP_HIZ_RESOLVE);
+   }
+}
+
+
 /**
  * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
  */
@@ -430,6 +511,7 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
             }
          }
 
+         state->attachments[i].current_layout = att->initial_layout;
          state->attachments[i].pending_clear_aspects = clear_aspects;
          if (clear_aspects)
             state->attachments[i].clear_value = begin->pClearValues[i];
@@ -446,6 +528,7 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
 
             struct isl_view view = iview->isl;
             view.usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
+            view.swizzle = anv_swizzle_for_render(view.swizzle);
             isl_surf_fill_state(isl_dev,
                                 state->attachments[i].color_rt_state.map,
                                 .surf = &iview->image->color_surface.isl,
@@ -459,23 +542,21 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
                                   state->attachments[i].aux_usage,
                                   state->attachments[i].color_rt_state);
          } else {
-            state->attachments[i].aux_usage = iview->image->aux_usage;
+            if (iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+               state->attachments[i].aux_usage =
+                  layout_to_hiz_usage(att->initial_layout, iview->image->samples);
+            } else {
+               state->attachments[i].aux_usage = ISL_AUX_USAGE_NONE;
+            }
             state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE;
          }
 
          if (need_input_attachment_state(&pass->attachments[i])) {
-            const struct isl_surf *surf;
-            if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
-               surf = &iview->image->color_surface.isl;
-            } else {
-               surf = &iview->image->depth_surface.isl;
-            }
-
             struct isl_view view = iview->isl;
             view.usage |= ISL_SURF_USAGE_TEXTURE_BIT;
             isl_surf_fill_state(isl_dev,
                                 state->attachments[i].input_att_state.map,
-                                .surf = surf,
+                                .surf = &iview->image->color_surface.isl,
                                 .view = &view,
                                 .aux_surf = &iview->image->aux_surface.isl,
                                 .aux_usage = state->attachments[i].input_aux_usage,
@@ -866,6 +947,13 @@ void genX(CmdPipelineBarrier)(
    for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
       src_flags |= pImageMemoryBarriers[i].srcAccessMask;
       dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
+      ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
+      if (pImageMemoryBarriers[i].subresourceRange.aspectMask &
+          VK_IMAGE_ASPECT_DEPTH_BIT) {
+         transition_depth_buffer(cmd_buffer, image,
+                                 pImageMemoryBarriers[i].oldLayout,
+                                 pImageMemoryBarriers[i].newLayout);
+      }
    }
 
    enum anv_pipe_bits pipe_bits = 0;
@@ -1091,9 +1179,9 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
 
       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
          assert(stage == MESA_SHADER_FRAGMENT);
-         if (desc->image_view->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
-            /* For stencil input attachments, we treat it like any old texture
-             * that a user may have bound.
+         if (desc->image_view->aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
+            /* For depth and stencil input attachments, we treat it like any
+             * old texture that a user may have bound.
              */
             surface_state = desc->image_view->sampler_surface_state;
             assert(surface_state.alloc_size);
@@ -1101,9 +1189,9 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
                                   desc->image_view->image->aux_usage,
                                   surface_state);
          } else {
-            /* For depth and color input attachments, we create the surface
-             * state at vkBeginRenderPass time so that we can include aux
-             * and clear color information.
+            /* For color input attachments, we create the surface state at
+             * vkBeginRenderPass time so that we can include aux and clear
+             * color information.
              */
             assert(binding->input_attachment_index < subpass->input_count);
             const unsigned subpass_att = binding->input_attachment_index;
@@ -1507,7 +1595,7 @@ emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
 
    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
       &(struct GENX(VERTEX_BUFFER_STATE)) {
-         .VertexBufferIndex = 32, /* Reserved for this */
+         .VertexBufferIndex = ANV_SVGS_VB_INDEX, /* Reserved for this */
          .AddressModifyEnable = true,
          .BufferPitch = 0,
 #if (GEN_GEN >= 8)
@@ -2105,12 +2193,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
             depth_stencil_surface_type(image->depth_surface.isl.dim);
          db.DepthWriteEnable              = true;
          db.StencilWriteEnable            = has_stencil;
-
-         if (cmd_buffer->state.pass->subpass_count == 1) {
-            db.HierarchicalDepthBufferEnable = has_hiz;
-         } else {
-            anv_finishme("Multiple-subpass HiZ not implemented");
-         }
+         db.HierarchicalDepthBufferEnable = has_hiz;
 
          db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
                                                       &image->depth_surface.isl);
@@ -2237,10 +2320,7 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) {
       if (has_hiz) {
          cp.DepthClearValueValid = true;
-         const uint32_t ds =
-            cmd_buffer->state.subpass->depth_stencil_attachment;
-         cp.DepthClearValue =
-            cmd_buffer->state.attachments[ds].clear_value.depthStencil.depth;
+         cp.DepthClearValue = ANV_HZ_FC_VAL;
       }
    }
 }
@@ -2253,9 +2333,22 @@ genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
 
    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
 
+   const struct anv_image_view *iview =
+      anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+
+   if (iview && iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+      const uint32_t ds = subpass->depth_stencil_attachment;
+      transition_depth_buffer(cmd_buffer, iview->image,
+                              cmd_buffer->state.attachments[ds].current_layout,
+                              cmd_buffer->state.subpass->depth_stencil_layout);
+      cmd_buffer->state.attachments[ds].current_layout =
+         cmd_buffer->state.subpass->depth_stencil_layout;
+      cmd_buffer->state.attachments[ds].aux_usage =
+         layout_to_hiz_usage(cmd_buffer->state.subpass->depth_stencil_layout,
+                             iview->image->samples);
+   }
+
    cmd_buffer_emit_depth_stencil(cmd_buffer);
-   genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_HIZ_RESOLVE);
-   genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_CLEAR);
 
    anv_cmd_buffer_clear_subpass(cmd_buffer);
 }
@@ -2287,6 +2380,20 @@ void genX(CmdNextSubpass)(
 
    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
 
+   const struct anv_image_view *iview =
+      anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+
+   if (iview && iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+      const uint32_t ds = cmd_buffer->state.subpass->depth_stencil_attachment;
+
+      if (cmd_buffer->state.subpass - cmd_buffer->state.pass->subpasses ==
+          cmd_buffer->state.pass->attachments[ds].last_subpass_idx) {
+         transition_depth_buffer(cmd_buffer, iview->image,
+                                 cmd_buffer->state.attachments[ds].current_layout,
+                                 cmd_buffer->state.pass->attachments[ds].final_layout);
+      }
+   }
+
    anv_cmd_buffer_resolve_subpass(cmd_buffer);
    genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
 }
@@ -2296,7 +2403,20 @@ void genX(CmdEndRenderPass)(
 {
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
-   genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_RESOLVE);
+   const struct anv_image_view *iview =
+      anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+
+   if (iview && iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+      const uint32_t ds = cmd_buffer->state.subpass->depth_stencil_attachment;
+
+      if (cmd_buffer->state.subpass - cmd_buffer->state.pass->subpasses ==
+          cmd_buffer->state.pass->attachments[ds].last_subpass_idx) {
+         transition_depth_buffer(cmd_buffer, iview->image,
+                                 cmd_buffer->state.attachments[ds].current_layout,
+                                 cmd_buffer->state.pass->attachments[ds].final_layout);
+      }
+   }
+
    anv_cmd_buffer_resolve_subpass(cmd_buffer);
 
 #ifndef NDEBUG