+ struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
+ if (bo->refcount > 0) {
+ /* VK_EXT_external_memory_host doesn't require handling importing the
+ * same pointer twice at the same time, but we don't get in the way. If
+ * kernel gives us the same gem_handle, only succeed if the flags match.
+ */
+ assert(bo->gem_handle == gem_handle);
+ if (bo_flags != bo->flags) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "same host pointer imported two different ways");
+ }
+
+ if (bo->has_client_visible_address !=
+ ((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported with and without buffer "
+ "device address");
+ }
+
+ if (client_address && client_address != gen_48b_address(bo->offset)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported at two different "
+ "addresses");
+ }
+
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ struct anv_bo new_bo = {
+ .gem_handle = gem_handle,
+ .refcount = 1,
+ .offset = -1,
+ .size = size,
+ .map = host_ptr,
+ .flags = bo_flags,
+ .is_external = true,
+ .from_host_ptr = true,
+ .has_client_visible_address =
+ (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
+ };
+
+ assert(client_address == gen_48b_address(client_address));
+ if (new_bo.flags & EXEC_OBJECT_PINNED) {
+ assert(new_bo._ccs_size == 0);
+ new_bo.offset = anv_vma_alloc(device, new_bo.size,
+ anv_device_get_bo_align(device,
+ alloc_flags),
+ alloc_flags, client_address);
+ if (new_bo.offset == 0) {
+ anv_gem_close(device, new_bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+ } else {
+ assert(!new_bo.has_client_visible_address);
+ }
+
+ *bo = new_bo;
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+ *bo_out = bo;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_device_import_bo(struct anv_device *device,
+ int fd,
+ enum anv_bo_alloc_flags alloc_flags,
+ uint64_t client_address,
+ struct anv_bo **bo_out)
+{
+ assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED |
+ ANV_BO_ALLOC_SNOOPED |
+ ANV_BO_ALLOC_FIXED_ADDRESS)));
+
+ /* We can't do implicit CCS with an aux table on shared memory */
+ if (!device->physical->has_implicit_ccs || device->info.has_aux_map)
+ assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
+
+ struct anv_bo_cache *cache = &device->bo_cache;
+ const uint32_t bo_flags =
+ anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+
+ pthread_mutex_lock(&cache->mutex);