return VK_SUCCESS;
}
+VkResult
+anv_bo_cache_import_host_ptr(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ void *host_ptr, uint32_t size,
+ uint64_t bo_flags, struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ assert((bo_flags & ANV_BO_EXTERNAL) == 0);
+
+ uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
+ if (!gem_handle)
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ /* VK_EXT_external_memory_host doesn't require handling importing the
+ * same pointer twice at the same time, but we don't get in the way. If
+ * kernel gives us the same gem_handle, only succeed if the flags match.
+ */
+ if (bo_flags != bo->bo.flags) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "same host pointer imported two different ways");
+ }
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
VkResult
anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
+ VkPhysicalDeviceExternalMemoryHostPropertiesEXT *props =
+ (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
+ /* Userptr needs page aligned memory. */
+ props->minImportedHostPointerAlignment = 4096;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
VkPhysicalDeviceIDProperties *id_props =
(VkPhysicalDeviceIDProperties *)ext;
mem->map = NULL;
mem->map_size = 0;
mem->ahw = NULL;
+ mem->host_ptr = NULL;
uint64_t bo_flags = 0;
goto success;
}
+ const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
+ vk_find_struct_const(pAllocateInfo->pNext,
+ IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
+ if (host_ptr_info && host_ptr_info->handleType) {
+ if (host_ptr_info->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
+ result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ goto fail;
+ }
+
+ assert(host_ptr_info->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
+
+ result = anv_bo_cache_import_host_ptr(
+ device, &device->bo_cache, host_ptr_info->pHostPointer,
+ pAllocateInfo->allocationSize, bo_flags, &mem->bo);
+
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ mem->host_ptr = host_ptr_info->pHostPointer;
+ goto success;
+ }
+
/* Regular allocate (not importing memory). */
if (export_info && export_info->handleTypes)
}
}
+VkResult anv_GetMemoryHostPointerPropertiesEXT(
+ VkDevice _device,
+ VkExternalMemoryHandleTypeFlagBits handleType,
+ const void* pHostPointer,
+ VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
+ assert(pMemoryHostPointerProperties->sType ==
+ VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
+
+ switch (handleType) {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+
+ /* Host memory can be imported as any memory type. */
+ pMemoryHostPointerProperties->memoryTypeBits =
+ (1ull << pdevice->memory.type_count) - 1;
+
+ return VK_SUCCESS;
+ }
+ default:
+ return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+ }
+}
+
void anv_FreeMemory(
VkDevice _device,
VkDeviceMemory _mem,
return VK_SUCCESS;
}
+ if (mem->host_ptr) {
+ *ppData = mem->host_ptr + offset;
+ return VK_SUCCESS;
+ }
+
if (size == VK_WHOLE_SIZE)
size = mem->bo->size - offset;
{
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
- if (mem == NULL)
+ if (mem == NULL || mem->host_ptr)
return;
anv_gem_munmap(mem->map, mem->map_size);
Extension('VK_EXT_display_control', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_EXT_display_surface_counter', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
Extension('VK_EXT_external_memory_dma_buf', 1, True),
+ Extension('VK_EXT_external_memory_host', 1, True),
Extension('VK_EXT_global_priority', 1,
'device->has_context_priority'),
Extension('VK_EXT_inline_uniform_block', 1, True),
struct anv_bo_cache *cache,
uint64_t size, uint64_t bo_flags,
struct anv_bo **bo);
+VkResult anv_bo_cache_import_host_ptr(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ void *host_ptr, uint32_t size,
+ uint64_t bo_flags, struct anv_bo **bo_out);
VkResult anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
int fd, uint64_t bo_flags,
* which we must release when memory is freed.
*/
struct AHardwareBuffer * ahw;
+
+ /* If set, this memory comes from a host pointer. */
+ void * host_ptr;
};
/**