+
+UNUSED static struct anv_bo *
+anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ return bo ? &bo->bo : NULL;
+}
+
+#define ANV_BO_CACHE_SUPPORTED_FLAGS \
+ (EXEC_OBJECT_WRITE | \
+ EXEC_OBJECT_ASYNC | \
+ EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
+ EXEC_OBJECT_PINNED)
+
+VkResult
+anv_bo_cache_alloc(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ uint64_t size, uint64_t bo_flags,
+ struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+
+ struct anv_cached_bo *bo =
+ vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ bo->refcount = 1;
+
+ /* The kernel is going to give us whole pages anyway */
+ size = align_u64(size, 4096);
+
+ VkResult result = anv_bo_init_new(&bo->bo, device, size);
+ if (result != VK_SUCCESS) {
+ vk_free(&device->alloc, bo);
+ return result;
+ }
+
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ assert(bo->bo.gem_handle);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ _mesa_hash_table_insert(cache->bo_map,
+ (void *)(uintptr_t)bo->bo.gem_handle, bo);
+
+ pthread_mutex_unlock(&cache->mutex);
+
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_import(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ int fd, uint64_t bo_flags,
+ struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+
+ pthread_mutex_lock(&cache->mutex);
+
+ uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
+ if (!gem_handle) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ /* We have to be careful how we combine flags so that it makes sense.
+ * Really, though, if we get to this case and it actually matters, the
+ * client has imported a BO twice in different ways and they get what
+ * they have coming.
+ */
+ uint64_t new_flags = 0;
+ new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_WRITE;
+ new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_ASYNC;
+ new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_PINNED;
+
+ /* It's theoretically possible for a BO to get imported such that it's
+ * both pinned and not pinned. The only way this can happen is if it
+ * gets imported as both a semaphore and a memory object and that would
+ * be an application error. Just fail out in that case.
+ */
+ if ((bo->bo.flags & EXEC_OBJECT_PINNED) !=
+ (bo_flags & EXEC_OBJECT_PINNED)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported two different ways");
+ }
+
+ /* It's also theoretically possible that someone could export a BO from
+ * one heap and import it into another or to import the same BO into two
+ * different heaps. If this happens, we could potentially end up both
+ * allowing and disallowing 48-bit addresses. There's not much we can
+ * do about it if we're pinning so we just throw an error and hope no
+ * app is actually that stupid.
+ */
+ if ((new_flags & EXEC_OBJECT_PINNED) &&
+ (bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
+ (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported on two different heaps");
+ }
+
+ bo->bo.flags = new_flags;
+
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ off_t size = lseek(fd, 0, SEEK_END);
+ if (size == (off_t)-1) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
+
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_bo_cache_export(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in, int *fd_out)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
+ if (fd < 0)
+ return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+
+ *fd_out = fd;
+
+ return VK_SUCCESS;
+}
+
+static bool
+atomic_dec_not_one(uint32_t *counter)
+{
+ uint32_t old, val;
+
+ val = *counter;
+ while (1) {
+ if (val == 1)
+ return false;
+
+ old = __sync_val_compare_and_swap(counter, val, val - 1);
+ if (old == val)
+ return true;
+
+ val = old;
+ }
+}
+
+void
+anv_bo_cache_release(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ struct anv_bo *bo_in)
+{
+ assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
+ struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+
+ /* Try to decrement the counter but don't go below one. If this succeeds
+ * then the refcount has been decremented and we are not the last
+ * reference.
+ */
+ if (atomic_dec_not_one(&bo->refcount))
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ /* We are probably the last reference since our attempt to decrement above
+ * failed. However, we can't actually know until we are inside the mutex.
+ * Otherwise, someone could import the BO between the decrement and our
+ * taking the mutex.
+ */
+ if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
+ /* Turns out we're not the last reference. Unlock and bail. */
+ pthread_mutex_unlock(&cache->mutex);
+ return;
+ }
+
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->bo_map,
+ (const void *)(uintptr_t)bo->bo.gem_handle);
+ assert(entry);
+ _mesa_hash_table_remove(cache->bo_map, entry);
+
+ if (bo->bo.map)
+ anv_gem_munmap(bo->bo.map, bo->bo.size);
+
+ anv_vma_free(device, &bo->bo);
+
+ anv_gem_close(device, bo->bo.gem_handle);
+
+ /* Don't unlock until we've actually closed the BO. The whole point of
+ * the BO cache is to ensure that we correctly handle races with creating
+ * and releasing GEM handles and we don't want to let someone import the BO
+ * again between mutex unlock and closing the GEM handle.
+ */
+ pthread_mutex_unlock(&cache->mutex);
+
+ vk_free(&device->alloc, bo);
+}