anv: Set driver version to Mesa version
[mesa.git] / src / intel / vulkan / anv_blorp.c
index 16f1692ff53cf27231f457d1b44daf0e5600fc25..45cbbb8690083405ddc8748bafaf2019cf8aae1c 100644 (file)
@@ -233,7 +233,8 @@ void anv_CmdCopyImage(
          layer_count = pRegions[r].extent.depth;
       } else {
          dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
-         layer_count = pRegions[r].dstSubresource.layerCount;
+         layer_count =
+            anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
       }
 
       unsigned src_base_layer;
@@ -241,7 +242,8 @@ void anv_CmdCopyImage(
          src_base_layer = pRegions[r].srcOffset.z;
       } else {
          src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
-         assert(pRegions[r].srcSubresource.layerCount == layer_count);
+         assert(layer_count ==
+                anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
       }
 
       assert(pRegions[r].srcSubresource.aspectMask ==
@@ -313,7 +315,8 @@ copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
          anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
       if (anv_image->type != VK_IMAGE_TYPE_3D) {
          image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
-         extent.depth = pRegions[r].imageSubresource.layerCount;
+         extent.depth =
+            anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
       }
 
       const enum isl_format buffer_format =
@@ -467,7 +470,7 @@ void anv_CmdBlitImage(
          dst_end = pRegions[r].dstOffsets[1].z;
       } else {
          dst_start = dst_res->baseArrayLayer;
-         dst_end = dst_start + dst_res->layerCount;
+         dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
       }
 
       unsigned src_start, src_end;
@@ -477,7 +480,7 @@ void anv_CmdBlitImage(
          src_end = pRegions[r].srcOffsets[1].z;
       } else {
          src_start = src_res->baseArrayLayer;
-         src_end = src_start + src_res->layerCount;
+         src_end = src_start + anv_get_layerCount(src_image, src_res);
       }
 
       bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
@@ -683,10 +686,15 @@ void anv_CmdUpdateBuffer(
     * little data at the top to build its linked list.
     */
    const uint32_t max_update_size =
-      cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
+      cmd_buffer->device->dynamic_state_pool.block_size - 64;
 
    assert(max_update_size < MAX_SURFACE_DIM * 4);
 
+   /* We're about to read data that was written from the CPU.  Flush the
+    * texture cache so we don't get anything stale.
+    */
+   cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
+
    while (dataSize) {
       const uint32_t copy_size = MIN2(dataSize, max_update_size);
 
@@ -695,12 +703,14 @@ void anv_CmdUpdateBuffer(
 
       memcpy(tmp_data.map, pData, copy_size);
 
+      anv_state_flush(cmd_buffer->device, tmp_data);
+
       int bs = 16;
       bs = gcd_pow2_u64(bs, dstOffset);
       bs = gcd_pow2_u64(bs, copy_size);
 
       do_buffer_copy(&batch,
-                     &cmd_buffer->device->dynamic_state_block_pool.bo,
+                     &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
                      tmp_data.offset,
                      dst_buffer->bo, dst_buffer->offset + dstOffset,
                      copy_size / bs, 1, bs);
@@ -989,6 +999,25 @@ clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
    union isl_color_value clear_color =
       vk_to_isl_color(attachment->clearValue.color);
 
+   /* If multiview is enabled we ignore baseArrayLayer and layerCount */
+   if (subpass->view_mask) {
+      uint32_t view_idx;
+      for_each_bit(view_idx, subpass->view_mask) {
+         for (uint32_t r = 0; r < rectCount; ++r) {
+            const VkOffset2D offset = pRects[r].rect.offset;
+            const VkExtent2D extent = pRects[r].rect.extent;
+            blorp_clear_attachments(batch, binding_table,
+                                    ISL_FORMAT_UNSUPPORTED, pass_att->samples,
+                                    view_idx, 1,
+                                    offset.x, offset.y,
+                                    offset.x + extent.width,
+                                    offset.y + extent.height,
+                                    true, clear_color, false, 0.0f, 0, 0);
+         }
+      }
+      return;
+   }
+
    for (uint32_t r = 0; r < rectCount; ++r) {
       const VkOffset2D offset = pRects[r].rect.offset;
       const VkExtent2D extent = pRects[r].rect.extent;
@@ -1037,6 +1066,28 @@ clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
    if (result != VK_SUCCESS)
       return;
 
+   /* If multiview is enabled we ignore baseArrayLayer and layerCount */
+   if (subpass->view_mask) {
+      uint32_t view_idx;
+      for_each_bit(view_idx, subpass->view_mask) {
+         for (uint32_t r = 0; r < rectCount; ++r) {
+            const VkOffset2D offset = pRects[r].rect.offset;
+            const VkExtent2D extent = pRects[r].rect.extent;
+            VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
+            blorp_clear_attachments(batch, binding_table,
+                                    depth_format, pass_att->samples,
+                                    view_idx, 1,
+                                    offset.x, offset.y,
+                                    offset.x + extent.width,
+                                    offset.y + extent.height,
+                                    false, color_value,
+                                    clear_depth, value.depth,
+                                    clear_stencil ? 0xff : 0, value.stencil);
+         }
+      }
+      return;
+   }
+
    for (uint32_t r = 0; r < rectCount; ++r) {
       const VkOffset2D offset = pRects[r].rect.offset;
       const VkExtent2D extent = pRects[r].rect.extent;
@@ -1090,80 +1141,6 @@ enum subpass_stage {
    SUBPASS_STAGE_RESOLVE,
 };
 
-static bool
-attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
-                       struct anv_render_pass_attachment *att,
-                       enum subpass_stage stage)
-{
-   struct anv_render_pass *pass = cmd_buffer->state.pass;
-   const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
-
-   /* We handle this subpass specially based on the current stage */
-   enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
-   switch (stage) {
-   case SUBPASS_STAGE_LOAD:
-      if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
-         return true;
-      break;
-
-   case SUBPASS_STAGE_DRAW:
-      if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
-         return true;
-      break;
-
-   default:
-      break;
-   }
-
-   for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
-      usage = att->subpass_usage[s];
-
-      /* If this attachment is going to be used as an input in this or any
-       * future subpass, then we need to flush its cache and invalidate the
-       * texture cache.
-       */
-      if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
-         return true;
-
-      if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
-         /* We found another subpass that draws to this attachment.  We'll
-          * wait to resolve until then.
-          */
-         return false;
-      }
-   }
-
-   return false;
-}
-
-static void
-anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
-                                 enum subpass_stage stage)
-{
-   struct anv_subpass *subpass = cmd_buffer->state.subpass;
-   struct anv_render_pass *pass = cmd_buffer->state.pass;
-
-   for (uint32_t i = 0; i < subpass->color_count; ++i) {
-      uint32_t att = subpass->color_attachments[i].attachment;
-      assert(att < pass->attachment_count);
-      if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
-         cmd_buffer->state.pending_pipe_bits |=
-            ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
-            ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
-      }
-   }
-
-   if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
-      uint32_t att = subpass->depth_stencil_attachment.attachment;
-      assert(att < pass->attachment_count);
-      if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
-         cmd_buffer->state.pending_pipe_bits |=
-            ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
-            ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
-      }
-   }
-}
-
 static bool
 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
 {
@@ -1172,14 +1149,19 @@ subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
 
    for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
       uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
+      if (a == VK_ATTACHMENT_UNUSED)
+         continue;
+
+      assert(a < cmd_state->pass->attachment_count);
       if (cmd_state->attachments[a].pending_clear_aspects) {
          return true;
       }
    }
 
-   if (ds != VK_ATTACHMENT_UNUSED &&
-       cmd_state->attachments[ds].pending_clear_aspects) {
-      return true;
+   if (ds != VK_ATTACHMENT_UNUSED) {
+      assert(ds < cmd_state->pass->attachment_count);
+      if (cmd_state->attachments[ds].pending_clear_aspects)
+         return true;
    }
 
    return false;
@@ -1211,6 +1193,10 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
    struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
    for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
       const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
+      if (a == VK_ATTACHMENT_UNUSED)
+         continue;
+
+      assert(a < cmd_state->pass->attachment_count);
       struct anv_attachment_state *att_state = &cmd_state->attachments[a];
 
       if (!att_state->pending_clear_aspects)
@@ -1270,6 +1256,7 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
    }
 
    const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
+   assert(ds == VK_ATTACHMENT_UNUSED || ds < cmd_state->pass->attachment_count);
 
    if (ds != VK_ATTACHMENT_UNUSED &&
        cmd_state->attachments[ds].pending_clear_aspects) {
@@ -1348,8 +1335,6 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
    }
 
    blorp_batch_finish(&batch);
-
-   anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
 }
 
 static void
@@ -1407,10 +1392,11 @@ void anv_CmdResolveImage(
    for (uint32_t r = 0; r < regionCount; r++) {
       assert(pRegions[r].srcSubresource.aspectMask ==
              pRegions[r].dstSubresource.aspectMask);
-      assert(pRegions[r].srcSubresource.layerCount ==
-             pRegions[r].dstSubresource.layerCount);
+      assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
+             anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
 
-      const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
+      const uint32_t layer_count =
+         anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
 
       for (uint32_t layer = 0; layer < layer_count; layer++) {
          resolve_image(&batch,
@@ -1428,6 +1414,73 @@ void anv_CmdResolveImage(
    blorp_batch_finish(&batch);
 }
 
+void
+anv_image_ccs_clear(struct anv_cmd_buffer *cmd_buffer,
+                    const struct anv_image *image,
+                    const struct isl_view *view,
+                    const VkImageSubresourceRange *subresourceRange)
+{
+   assert(image->type == VK_IMAGE_TYPE_3D || image->extent.depth == 1);
+
+   struct blorp_batch batch;
+   blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
+
+   struct blorp_surf surf;
+   get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
+                                image->aux_usage, &surf);
+
+   /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
+    *
+    *    "After Render target fast clear, pipe-control with color cache
+    *    write-flush must be issued before sending any DRAW commands on
+    *    that render target."
+    *
+    * This comment is a bit cryptic and doesn't really tell you what's going
+    * or what's really needed.  It appears that fast clear ops are not
+    * properly synchronized with other drawing.  This means that we cannot
+    * have a fast clear operation in the pipe at the same time as other
+    * regular drawing operations.  We need to use a PIPE_CONTROL to ensure
+    * that the contents of the previous draw hit the render target before we
+    * resolve and then use a second PIPE_CONTROL after the resolve to ensure
+    * that it is completed before any additional drawing occurs.
+    */
+   cmd_buffer->state.pending_pipe_bits |=
+      ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
+
+   const uint32_t level_count =
+      view ? view->levels : anv_get_levelCount(image, subresourceRange);
+   for (uint32_t l = 0; l < level_count; l++) {
+      const uint32_t level =
+         (view ? view->base_level : subresourceRange->baseMipLevel) + l;
+
+      const VkExtent3D extent = {
+         .width = anv_minify(image->extent.width, level),
+         .height = anv_minify(image->extent.height, level),
+         .depth = anv_minify(image->extent.depth, level),
+      };
+
+      /* Blorp likes to treat 2D_ARRAY and 3D the same. */
+      uint32_t blorp_base_layer, blorp_layer_count;
+      if (view) {
+         blorp_base_layer = view->base_array_layer;
+         blorp_layer_count = view->array_len;
+      } else if (image->type == VK_IMAGE_TYPE_3D) {
+         blorp_base_layer = 0;
+         blorp_layer_count = extent.depth;
+      } else {
+         blorp_base_layer = subresourceRange->baseArrayLayer;
+         blorp_layer_count = anv_get_layerCount(image, subresourceRange);
+      }
+
+      blorp_fast_clear(&batch, &surf, surf.surf->format,
+                       level, blorp_base_layer, blorp_layer_count,
+                       0, 0, extent.width, extent.height);
+   }
+
+   cmd_buffer->state.pending_pipe_bits |=
+      ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
+}
+
 static void
 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
                        struct blorp_batch *batch,
@@ -1574,13 +1627,23 @@ anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
    blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
 
    for (uint32_t i = 0; i < subpass->color_count; ++i) {
-      ccs_resolve_attachment(cmd_buffer, &batch,
-                             subpass->color_attachments[i].attachment);
-   }
+      const uint32_t att = subpass->color_attachments[i].attachment;
+      if (att == VK_ATTACHMENT_UNUSED)
+         continue;
 
-   anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
+      assert(att < cmd_buffer->state.pass->attachment_count);
+      ccs_resolve_attachment(cmd_buffer, &batch, att);
+   }
 
    if (subpass->has_resolve) {
+      /* We are about to do some MSAA resolves.  We need to flush so that the
+       * result of writes to the MSAA color attachments show up in the sampler
+       * when we blit to the single-sampled resolve target.
+       */
+      cmd_buffer->state.pending_pipe_bits |=
+         ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
+         ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
+
       for (uint32_t i = 0; i < subpass->color_count; ++i) {
          uint32_t src_att = subpass->color_attachments[i].attachment;
          uint32_t dst_att = subpass->resolve_attachments[i].attachment;
@@ -1588,6 +1651,9 @@ anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
          if (dst_att == VK_ATTACHMENT_UNUSED)
             continue;
 
+         assert(src_att < cmd_buffer->state.pass->attachment_count);
+         assert(dst_att < cmd_buffer->state.pass->attachment_count);
+
          if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
             /* From the Vulkan 1.0 spec:
              *
@@ -1618,8 +1684,6 @@ anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
 
          ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
       }
-
-      anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
    }
 
    blorp_batch_finish(&batch);