anv: For pinned BOs, skip relocations, but track bo usage
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
index 9def174b429d48b4305963a94a2920e16c927687..fd738d0a03a57f123f24d4fbb59dc0754908ad07 100644 (file)
@@ -75,11 +75,24 @@ anv_reloc_list_init_clone(struct anv_reloc_list *list,
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
    }
 
+   list->deps = _mesa_set_create(NULL, _mesa_hash_pointer,
+                                 _mesa_key_pointer_equal);
+
+   if (!list->deps) {
+      vk_free(alloc, list->relocs);
+      vk_free(alloc, list->reloc_bos);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
    if (other_list) {
       memcpy(list->relocs, other_list->relocs,
              list->array_length * sizeof(*list->relocs));
       memcpy(list->reloc_bos, other_list->reloc_bos,
              list->array_length * sizeof(*list->reloc_bos));
+      struct set_entry *entry;
+      set_foreach(other_list->deps, entry) {
+         _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
+      }
    }
 
    return VK_SUCCESS;
@@ -98,6 +111,7 @@ anv_reloc_list_finish(struct anv_reloc_list *list,
 {
    vk_free(alloc, list->relocs);
    vk_free(alloc, list->reloc_bos);
+   _mesa_set_destroy(list->deps, NULL);
 }
 
 static VkResult
@@ -148,8 +162,10 @@ anv_reloc_list_add(struct anv_reloc_list *list,
    struct drm_i915_gem_relocation_entry *entry;
    int index;
 
-   const uint32_t domain =
-      (target_bo->flags & EXEC_OBJECT_WRITE) ? I915_GEM_DOMAIN_RENDER : 0;
+   if (target_bo->flags & EXEC_OBJECT_PINNED) {
+      _mesa_set_add(list->deps, target_bo);
+      return VK_SUCCESS;
+   }
 
    VkResult result = anv_reloc_list_grow(list, alloc, 1);
    if (result != VK_SUCCESS)
@@ -163,8 +179,8 @@ anv_reloc_list_add(struct anv_reloc_list *list,
    entry->delta = delta;
    entry->offset = offset;
    entry->presumed_offset = target_bo->offset;
-   entry->read_domains = domain;
-   entry->write_domain = domain;
+   entry->read_domains = 0;
+   entry->write_domain = 0;
    VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
 
    return VK_SUCCESS;
@@ -188,6 +204,12 @@ anv_reloc_list_append(struct anv_reloc_list *list,
       list->relocs[i + list->num_relocs].offset += offset;
 
    list->num_relocs += other->num_relocs;
+
+   struct set_entry *entry;
+   set_foreach(other->deps, entry) {
+      _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
+   }
+
    return VK_SUCCESS;
 }
 
@@ -341,6 +363,7 @@ anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
    batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
    batch->relocs = &bbo->relocs;
    bbo->relocs.num_relocs = 0;
+   _mesa_set_clear(bbo->relocs.deps, NULL);
 }
 
 static void
@@ -444,7 +467,7 @@ anv_batch_bo_list_clone(const struct list_head *list,
  * Functions related to anv_batch_bo
  *-----------------------------------------------------------------------*/
 
-static inline struct anv_batch_bo *
+static struct anv_batch_bo *
 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
 {
    return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
@@ -455,7 +478,7 @@ anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
 {
    struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
    return (struct anv_address) {
-      .bo = &cmd_buffer->device->surface_state_pool.block_pool.bo,
+      .bo = &anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
       .offset = bt_block->offset,
    };
 }
@@ -622,7 +645,8 @@ struct anv_state
 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
                                    uint32_t entries, uint32_t *state_offset)
 {
-   struct anv_state_pool *state_pool = &cmd_buffer->device->surface_state_pool;
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_state_pool *state_pool = &device->surface_state_pool;
    struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
    struct anv_state state;
 
@@ -632,12 +656,19 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
       return (struct anv_state) { 0 };
 
    state.offset = cmd_buffer->bt_next;
-   state.map = state_pool->block_pool.map + bt_block->offset + state.offset;
+   state.map = anv_binding_table_pool(device)->block_pool.map +
+      bt_block->offset + state.offset;
 
    cmd_buffer->bt_next += state.alloc_size;
 
-   assert(bt_block->offset < 0);
-   *state_offset = -bt_block->offset;
+   if (device->instance->physicalDevice.use_softpin) {
+      assert(bt_block->offset >= 0);
+      *state_offset = device->surface_state_pool.block_pool.start_address -
+         device->binding_table_pool.block_pool.start_address - bt_block->offset;
+   } else {
+      assert(bt_block->offset < 0);
+      *state_offset = -bt_block->offset;
+   }
 
    return state;
 }
@@ -661,15 +692,13 @@ anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
 VkResult
 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
 {
-   struct anv_state_pool *state_pool = &cmd_buffer->device->surface_state_pool;
-
    struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
    if (bt_block == NULL) {
       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
    }
 
-   *bt_block = anv_state_pool_alloc_back(state_pool);
+   *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
    cmd_buffer->bt_next = 0;
 
    return VK_SUCCESS;
@@ -709,9 +738,10 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
 
    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
 
+   /* u_vector requires power-of-two size elements */
+   unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
    success = u_vector_init(&cmd_buffer->bt_block_states,
-                           sizeof(struct anv_state),
-                           8 * sizeof(struct anv_state));
+                           pow2_state_size, 8 * pow2_state_size);
    if (!success)
       goto fail_seen_bbos;
 
@@ -742,7 +772,7 @@ anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
 {
    struct anv_state *bt_block;
    u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
-      anv_state_pool_free(&cmd_buffer->device->surface_state_pool, *bt_block);
+      anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
    u_vector_finish(&cmd_buffer->bt_block_states);
 
    anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
@@ -774,12 +804,13 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
 
    while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
       struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
-      anv_state_pool_free(&cmd_buffer->device->surface_state_pool, *bt_block);
+      anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
    }
    assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
    cmd_buffer->bt_next = 0;
 
    cmd_buffer->surface_relocs.num_relocs = 0;
+   _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
    cmd_buffer->last_ss_pool_center = 0;
 
    /* Reset the list of seen buffers */
@@ -848,7 +879,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
    }
 }
 
-static inline VkResult
+static VkResult
 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
                              struct list_head *list)
 {
@@ -959,6 +990,11 @@ struct anv_execbuf {
 
    /* Allocated length of the 'objects' and 'bos' arrays */
    uint32_t                                  array_length;
+
+   uint32_t                                  fence_count;
+   uint32_t                                  fence_array_length;
+   struct drm_i915_gem_exec_fence *          fences;
+   struct anv_syncobj **                     syncobjs;
 };
 
 static void
@@ -973,6 +1009,17 @@ anv_execbuf_finish(struct anv_execbuf *exec,
 {
    vk_free(alloc, exec->objects);
    vk_free(alloc, exec->bos);
+   vk_free(alloc, exec->fences);
+   vk_free(alloc, exec->syncobjs);
+}
+
+static int
+_compare_bo_handles(const void *_bo1, const void *_bo2)
+{
+   struct anv_bo * const *bo1 = _bo1;
+   struct anv_bo * const *bo2 = _bo2;
+
+   return (*bo1)->gem_handle - (*bo2)->gem_handle;
 }
 
 static VkResult
@@ -1058,11 +1105,67 @@ anv_execbuf_add_bo(struct anv_execbuf *exec,
          if (result != VK_SUCCESS)
             return result;
       }
+
+      const uint32_t entries = relocs->deps->entries;
+      struct anv_bo **bos =
+         vk_alloc(alloc, entries * sizeof(*bos),
+                  8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+      if (bos == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      struct set_entry *entry;
+      struct anv_bo **bo = bos;
+      set_foreach(relocs->deps, entry) {
+         *bo++ = (void *)entry->key;
+      }
+
+      qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
+
+      VkResult result = VK_SUCCESS;
+      for (bo = bos; bo < bos + entries; bo++) {
+         result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
+         if (result != VK_SUCCESS)
+            break;
+      }
+
+      vk_free(alloc, bos);
+
+      if (result != VK_SUCCESS)
+         return result;
    }
 
    return VK_SUCCESS;
 }
 
+static VkResult
+anv_execbuf_add_syncobj(struct anv_execbuf *exec,
+                        uint32_t handle, uint32_t flags,
+                        const VkAllocationCallbacks *alloc)
+{
+   assert(flags != 0);
+
+   if (exec->fence_count >= exec->fence_array_length) {
+      uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
+
+      exec->fences = vk_realloc(alloc, exec->fences,
+                                new_len * sizeof(*exec->fences),
+                                8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+      if (exec->fences == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      exec->fence_array_length = new_len;
+   }
+
+   exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
+      .handle = handle,
+      .flags = flags,
+   };
+
+   exec->fence_count++;
+
+   return VK_SUCCESS;
+}
+
 static void
 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
                               struct anv_reloc_list *list)
@@ -1071,32 +1174,6 @@ anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
       list->relocs[i].target_handle = list->reloc_bos[i]->index;
 }
 
-static void
-write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
-{
-   unsigned reloc_size = 0;
-   if (device->info.gen >= 8) {
-      /* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress:
-       *
-       *    "This field specifies the address of the memory location where the
-       *    register value specified in the DWord above will read from. The
-       *    address specifies the DWord location of the data. Range =
-       *    GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress
-       *    [63:48] are ignored by the HW and assumed to be in correct
-       *    canonical form [63:48] == [47]."
-       */
-      const int shift = 63 - 47;
-      reloc_size = sizeof(uint64_t);
-      *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
-   } else {
-      reloc_size = sizeof(uint32_t);
-      *(uint32_t *)p = v;
-   }
-
-   if (flush && !device->info.has_llc)
-      anv_flush_range(p, reloc_size);
-}
-
 static void
 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
                                    struct anv_reloc_list *relocs,
@@ -1341,8 +1418,7 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
       .num_cliprects = 0,
       .DR1 = 0,
       .DR4 = 0,
-      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
-               I915_EXEC_CONSTANTS_REL_GENERAL,
+      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
       .rsvd1 = cmd_buffer->device->context_id,
       .rsvd2 = 0,
    };
@@ -1390,22 +1466,48 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
    return VK_SUCCESS;
 }
 
+static VkResult
+setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
+{
+   VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
+                                        NULL, 0, &device->alloc);
+   if (result != VK_SUCCESS)
+      return result;
+
+   execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
+      .buffers_ptr = (uintptr_t) execbuf->objects,
+      .buffer_count = execbuf->bo_count,
+      .batch_start_offset = 0,
+      .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
+      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+      .rsvd1 = device->context_id,
+      .rsvd2 = 0,
+   };
+
+   return VK_SUCCESS;
+}
+
 VkResult
 anv_cmd_buffer_execbuf(struct anv_device *device,
                        struct anv_cmd_buffer *cmd_buffer,
                        const VkSemaphore *in_semaphores,
                        uint32_t num_in_semaphores,
                        const VkSemaphore *out_semaphores,
-                       uint32_t num_out_semaphores)
+                       uint32_t num_out_semaphores,
+                       VkFence _fence)
 {
+   ANV_FROM_HANDLE(anv_fence, fence, _fence);
+
    struct anv_execbuf execbuf;
    anv_execbuf_init(&execbuf);
 
+   int in_fence = -1;
    VkResult result = VK_SUCCESS;
    for (uint32_t i = 0; i < num_in_semaphores; i++) {
       ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
-      assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
-      struct anv_semaphore_impl *impl = &semaphore->permanent;
+      struct anv_semaphore_impl *impl =
+         semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
+         &semaphore->temporary : &semaphore->permanent;
 
       switch (impl->type) {
       case ANV_SEMAPHORE_TYPE_BO:
@@ -1414,15 +1516,54 @@ anv_cmd_buffer_execbuf(struct anv_device *device,
          if (result != VK_SUCCESS)
             return result;
          break;
+
+      case ANV_SEMAPHORE_TYPE_SYNC_FILE:
+         if (in_fence == -1) {
+            in_fence = impl->fd;
+         } else {
+            int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
+            if (merge == -1)
+               return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+
+            close(impl->fd);
+            close(in_fence);
+            in_fence = merge;
+         }
+
+         impl->fd = -1;
+         break;
+
+      case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
+         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
+                                          I915_EXEC_FENCE_WAIT,
+                                          &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
       default:
          break;
       }
    }
 
+   bool need_out_fence = false;
    for (uint32_t i = 0; i < num_out_semaphores; i++) {
       ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
-      assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
-      struct anv_semaphore_impl *impl = &semaphore->permanent;
+
+      /* Under most circumstances, out fences won't be temporary.  However,
+       * the spec does allow it for opaque_fd.  From the Vulkan 1.0.53 spec:
+       *
+       *    "If the import is temporary, the implementation must restore the
+       *    semaphore to its prior permanent state after submitting the next
+       *    semaphore wait operation."
+       *
+       * The spec says nothing whatsoever about signal operations on
+       * temporarily imported semaphores so it appears they are allowed.
+       * There are also CTS tests that require this to work.
+       */
+      struct anv_semaphore_impl *impl =
+         semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
+         &semaphore->temporary : &semaphore->permanent;
 
       switch (impl->type) {
       case ANV_SEMAPHORE_TYPE_BO:
@@ -1431,17 +1572,139 @@ anv_cmd_buffer_execbuf(struct anv_device *device,
          if (result != VK_SUCCESS)
             return result;
          break;
+
+      case ANV_SEMAPHORE_TYPE_SYNC_FILE:
+         need_out_fence = true;
+         break;
+
+      case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
+         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
+                                          I915_EXEC_FENCE_SIGNAL,
+                                          &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
       default:
          break;
       }
    }
 
-   result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
+   if (fence) {
+      /* Under most circumstances, out fences won't be temporary.  However,
+       * the spec does allow it for opaque_fd.  From the Vulkan 1.0.53 spec:
+       *
+       *    "If the import is temporary, the implementation must restore the
+       *    semaphore to its prior permanent state after submitting the next
+       *    semaphore wait operation."
+       *
+       * The spec says nothing whatsoever about signal operations on
+       * temporarily imported semaphores so it appears they are allowed.
+       * There are also CTS tests that require this to work.
+       */
+      struct anv_fence_impl *impl =
+         fence->temporary.type != ANV_FENCE_TYPE_NONE ?
+         &fence->temporary : &fence->permanent;
+
+      switch (impl->type) {
+      case ANV_FENCE_TYPE_BO:
+         result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
+                                     EXEC_OBJECT_WRITE, &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      case ANV_FENCE_TYPE_SYNCOBJ:
+         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
+                                          I915_EXEC_FENCE_SIGNAL,
+                                          &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      default:
+         unreachable("Invalid fence type");
+      }
+   }
+
+   if (cmd_buffer)
+      result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
+   else
+      result = setup_empty_execbuf(&execbuf, device);
+
    if (result != VK_SUCCESS)
       return result;
 
+   if (execbuf.fence_count > 0) {
+      assert(device->instance->physicalDevice.has_syncobj);
+      execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
+      execbuf.execbuf.num_cliprects = execbuf.fence_count;
+      execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
+   }
+
+   if (in_fence != -1) {
+      execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
+      execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
+   }
+
+   if (need_out_fence)
+      execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
+
    result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
 
+   /* Execbuf does not consume the in_fence.  It's our job to close it. */
+   if (in_fence != -1)
+      close(in_fence);
+
+   for (uint32_t i = 0; i < num_in_semaphores; i++) {
+      ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
+      /* From the Vulkan 1.0.53 spec:
+       *
+       *    "If the import is temporary, the implementation must restore the
+       *    semaphore to its prior permanent state after submitting the next
+       *    semaphore wait operation."
+       *
+       * This has to happen after the execbuf in case we close any syncobjs in
+       * the process.
+       */
+      anv_semaphore_reset_temporary(device, semaphore);
+   }
+
+   if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
+      /* BO fences can't be shared, so they can't be temporary. */
+      assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+
+      /* Once the execbuf has returned, we need to set the fence state to
+       * SUBMITTED.  We can't do this before calling execbuf because
+       * anv_GetFenceStatus does take the global device lock before checking
+       * fence->state.
+       *
+       * We set the fence state to SUBMITTED regardless of whether or not the
+       * execbuf succeeds because we need to ensure that vkWaitForFences() and
+       * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
+       * VK_SUCCESS) in a finite amount of time even if execbuf fails.
+       */
+      fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
+   }
+
+   if (result == VK_SUCCESS && need_out_fence) {
+      int out_fence = execbuf.execbuf.rsvd2 >> 32;
+      for (uint32_t i = 0; i < num_out_semaphores; i++) {
+         ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
+         /* Out fences can't have temporary state because that would imply
+          * that we imported a sync file and are trying to signal it.
+          */
+         assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
+         struct anv_semaphore_impl *impl = &semaphore->permanent;
+
+         if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
+            assert(impl->fd == -1);
+            impl->fd = dup(out_fence);
+         }
+      }
+      close(out_fence);
+   }
+
    anv_execbuf_finish(&execbuf, &device->alloc);
 
    return result;