anv: add no reloc flags on empty and simple bo paths.
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
index 2c3b7b3cad1f21d1d4af4446bd7d53a5141a94f4..823345eeec19a855da2f1a6451919155bb9b8a35 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "genxml/gen8_pack.h"
 #include "genxml/genX_bits.h"
+#include "perf/gen_perf.h"
 
 #include "util/debug.h"
 
@@ -408,9 +409,8 @@ static void
 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
                    size_t batch_padding)
 {
-   batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
-   batch->next = batch->start = bbo->bo->map;
-   batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
+   anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
+                         bbo->bo->map, bbo->bo->size - batch_padding);
    batch->relocs = &bbo->relocs;
    anv_reloc_list_clear(&bbo->relocs);
 }
@@ -928,7 +928,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
       const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
       if (!cmd_buffer->device->can_chain_batches) {
          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
-      } else if (cmd_buffer->device->physical->use_softpin) {
+      } else if (cmd_buffer->device->physical->use_call_secondary) {
          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
          /* If the secondary command buffer begins & ends in the same BO and
           * its length is less than the length of CS prefetch, add some NOOPs
@@ -1102,6 +1102,8 @@ struct anv_execbuf {
 
    const VkAllocationCallbacks *             alloc;
    VkSystemAllocationScope                   alloc_scope;
+
+   int                                       perf_query_pass;
 };
 
 static void
@@ -1375,6 +1377,9 @@ static bool
 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
                     struct anv_execbuf *exec)
 {
+   if (cmd_buffer->perf_query_pool)
+      return false;
+
    if (!exec->has_relocs)
       return true;
 
@@ -1631,7 +1636,7 @@ setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
       .buffer_count = execbuf->bo_count,
       .batch_start_offset = 0,
       .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
-      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
       .rsvd1 = device->context_id,
       .rsvd2 = 0,
    };
@@ -1672,8 +1677,15 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
    anv_execbuf_init(&execbuf);
    execbuf.alloc = submit->alloc;
    execbuf.alloc_scope = submit->alloc_scope;
+   execbuf.perf_query_pass = submit->perf_query_pass;
 
-   VkResult result;
+   /* Always add the workaround BO as it includes a driver identifier for the
+    * error_state.
+    */
+   VkResult result =
+      anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
+   if (result != VK_SUCCESS)
+      goto error;
 
    for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
       int signaled;
@@ -1697,7 +1709,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
          .buffer_count = execbuf.bo_count,
          .batch_start_offset = 0,
          .batch_len = submit->simple_bo_size,
-         .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+         .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
          .rsvd1 = device->context_id,
          .rsvd2 = 0,
       };
@@ -1708,10 +1720,26 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
    if (result != VK_SUCCESS)
       goto error;
 
+   const bool has_perf_query =
+      submit->perf_query_pass >= 0 &&
+      submit->cmd_buffer &&
+      submit->cmd_buffer->perf_query_pool;
+
    if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
       if (submit->cmd_buffer) {
-         struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
+         if (has_perf_query) {
+            struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
+            struct anv_bo *pass_batch_bo = query_pool->bo;
+            uint64_t pass_batch_offset =
+               khr_perf_query_preamble_offset(query_pool,
+                                              submit->perf_query_pass);
+
+            gen_print_batch(&device->decoder_ctx,
+                            pass_batch_bo->map + pass_batch_offset, 64,
+                            pass_batch_bo->offset + pass_batch_offset, false);
+         }
 
+         struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
          device->cmd_buffer_being_decoded = submit->cmd_buffer;
          gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
                          (*bo)->bo->size, (*bo)->bo->offset, false);
@@ -1742,6 +1770,49 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
    if (submit->need_out_fence)
       execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
 
+   if (has_perf_query) {
+      struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
+      assert(submit->perf_query_pass < query_pool->n_passes);
+      struct gen_perf_query_info *query_info =
+         query_pool->pass_query[submit->perf_query_pass];
+
+      /* Some performance queries just the pipeline statistic HW, no need for
+       * OA in that case, so no need to reconfigure.
+       */
+      if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
+          (query_info->kind == GEN_PERF_QUERY_TYPE_OA ||
+           query_info->kind == GEN_PERF_QUERY_TYPE_RAW)) {
+         int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
+                             (void *)(uintptr_t) query_info->oa_metrics_set_id);
+         if (ret < 0) {
+            result = anv_device_set_lost(device,
+                                         "i915-perf config failed: %s",
+                                         strerror(ret));
+         }
+      }
+
+      struct anv_bo *pass_batch_bo = query_pool->bo;
+
+      struct drm_i915_gem_exec_object2 query_pass_object = {
+         .handle = pass_batch_bo->gem_handle,
+         .offset = pass_batch_bo->offset,
+         .flags  = pass_batch_bo->flags,
+      };
+      struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
+         .buffers_ptr = (uintptr_t) &query_pass_object,
+         .buffer_count = 1,
+         .batch_start_offset = khr_perf_query_preamble_offset(query_pool,
+                                                              submit->perf_query_pass),
+         .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+         .rsvd1 = device->context_id,
+      };
+
+      int ret = queue->device->no_hw ? 0 :
+         anv_gem_execbuffer(queue->device, &query_pass_execbuf);
+      if (ret)
+         result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
+   }
+
    int ret = queue->device->no_hw ? 0 :
       anv_gem_execbuffer(queue->device, &execbuf.execbuf);
    if (ret)