X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fanv_allocator.c;h=bd9289c1876bdbcc03efe28becafc2f6fb090667;hb=98dc179c1e094ab42346b23fe046ebb719b66ed4;hp=a39a1715d53f15b7200a087d17b20ca69eaa72c4;hpb=b781c85c79944ccc0a6b0e49daae574672c6dd26;p=mesa.git diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index a39a1715d53..bd9289c1876 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -109,11 +109,8 @@ struct anv_mmap_cleanup { void *map; size_t size; - uint32_t gem_handle; }; -#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0}) - static inline uint32_t ilog2_round_up(uint32_t value) { @@ -360,57 +357,6 @@ anv_free_list_pop(union anv_free_list *list, return NULL; } -/* All pointers in the ptr_free_list are assumed to be page-aligned. This - * means that the bottom 12 bits should all be zero. - */ -#define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff) -#define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff)) -#define PFL_PACK(ptr, count) ({ \ - (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \ -}) - -static bool -anv_ptr_free_list_pop(void **list, void **elem) -{ - void *current = *list; - while (PFL_PTR(current) != NULL) { - void **next_ptr = PFL_PTR(current); - void *new_ptr = VG_NOACCESS_READ(next_ptr); - unsigned new_count = PFL_COUNT(current) + 1; - void *new = PFL_PACK(new_ptr, new_count); - void *old = __sync_val_compare_and_swap(list, current, new); - if (old == current) { - *elem = PFL_PTR(current); - return true; - } - current = old; - } - - return false; -} - -static void -anv_ptr_free_list_push(void **list, void *elem) -{ - void *old, *current; - void **next_ptr = elem; - - /* The pointer-based free list requires that the pointer be - * page-aligned. This is because we use the bottom 12 bits of the - * pointer to store a counter to solve the ABA concurrency problem. - */ - assert(((uintptr_t)elem & 0xfff) == 0); - - old = *list; - do { - current = old; - VG_NOACCESS_WRITE(next_ptr, PFL_PTR(current)); - unsigned new_count = PFL_COUNT(current) + 1; - void *new = PFL_PACK(elem, new_count); - old = __sync_val_compare_and_swap(list, current, new); - } while (old != current); -} - static VkResult anv_block_pool_expand_range(struct anv_block_pool *pool, uint32_t center_bo_offset, uint32_t size); @@ -419,25 +365,22 @@ VkResult anv_block_pool_init(struct anv_block_pool *pool, struct anv_device *device, uint64_t start_address, - uint32_t initial_size, - uint64_t bo_flags) + uint32_t initial_size) { VkResult result; pool->device = device; - pool->bo_flags = bo_flags; + pool->use_softpin = device->instance->physicalDevice.use_softpin; pool->nbos = 0; pool->size = 0; pool->center_bo_offset = 0; pool->start_address = gen_canonical_address(start_address); pool->map = NULL; - /* This pointer will always point to the first BO in the list */ - pool->bo = &pool->bos[0]; - - anv_bo_init(pool->bo, 0, 0); - - if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) { + if (pool->use_softpin) { + pool->bo = NULL; + pool->fd = -1; + } else { /* Just make it 2GB up-front. The Linux kernel won't actually back it * with pages until we either map and fault on one of them or we use * userptr and send a chunk of it off to the GPU. @@ -445,8 +388,13 @@ anv_block_pool_init(struct anv_block_pool *pool, pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool"); if (pool->fd == -1) return vk_error(VK_ERROR_INITIALIZATION_FAILED); - } else { - pool->fd = -1; + + pool->wrapper_bo = (struct anv_bo) { + .refcount = 1, + .offset = -1, + .is_wrapper = true, + }; + pool->bo = &pool->wrapper_bo; } if (!u_vector_init(&pool->mmap_cleanups, @@ -475,7 +423,7 @@ anv_block_pool_init(struct anv_block_pool *pool, fail_mmap_cleanups: u_vector_finish(&pool->mmap_cleanups); fail_fd: - if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) + if (pool->fd >= 0) close(pool->fd); return result; @@ -484,21 +432,18 @@ anv_block_pool_init(struct anv_block_pool *pool, void anv_block_pool_finish(struct anv_block_pool *pool) { - struct anv_mmap_cleanup *cleanup; - const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED); - - u_vector_foreach(cleanup, &pool->mmap_cleanups) { - if (use_softpin) - anv_gem_munmap(cleanup->map, cleanup->size); - else - munmap(cleanup->map, cleanup->size); - - if (cleanup->gem_handle) - anv_gem_close(pool->device, cleanup->gem_handle); + anv_block_pool_foreach_bo(bo, pool) { + if (bo->map) + anv_gem_munmap(bo->map, bo->size); + anv_gem_close(pool->device, bo->gem_handle); } + struct anv_mmap_cleanup *cleanup; + u_vector_foreach(cleanup, &pool->mmap_cleanups) + munmap(cleanup->map, cleanup->size); u_vector_finish(&pool->mmap_cleanups); - if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) + + if (pool->fd >= 0) close(pool->fd); } @@ -506,78 +451,17 @@ static VkResult anv_block_pool_expand_range(struct anv_block_pool *pool, uint32_t center_bo_offset, uint32_t size) { - void *map; - uint32_t gem_handle; - struct anv_mmap_cleanup *cleanup; - const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED); - /* Assert that we only ever grow the pool */ assert(center_bo_offset >= pool->back_state.end); assert(size - center_bo_offset >= pool->state.end); /* Assert that we don't go outside the bounds of the memfd */ assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER); - assert(use_softpin || + assert(pool->use_softpin || size - center_bo_offset <= BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER); - cleanup = u_vector_add(&pool->mmap_cleanups); - if (!cleanup) - return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - - *cleanup = ANV_MMAP_CLEANUP_INIT; - - uint32_t newbo_size = size - pool->size; - if (use_softpin) { - gem_handle = anv_gem_create(pool->device, newbo_size); - map = anv_gem_mmap(pool->device, gem_handle, 0, newbo_size, 0); - if (map == MAP_FAILED) - return vk_errorf(pool->device->instance, pool->device, - VK_ERROR_MEMORY_MAP_FAILED, "gem mmap failed: %m"); - assert(center_bo_offset == 0); - } else { - /* Just leak the old map until we destroy the pool. We can't munmap it - * without races or imposing locking on the block allocate fast path. On - * the whole the leaked maps adds up to less than the size of the - * current map. MAP_POPULATE seems like the right thing to do, but we - * should try to get some numbers. - */ - map = mmap(NULL, size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, pool->fd, - BLOCK_POOL_MEMFD_CENTER - center_bo_offset); - if (map == MAP_FAILED) - return vk_errorf(pool->device->instance, pool->device, - VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m"); - - /* Now that we mapped the new memory, we can write the new - * center_bo_offset back into pool and update pool->map. */ - pool->center_bo_offset = center_bo_offset; - pool->map = map + center_bo_offset; - gem_handle = anv_gem_userptr(pool->device, map, size); - if (gem_handle == 0) { - munmap(map, size); - return vk_errorf(pool->device->instance, pool->device, - VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m"); - } - } - - cleanup->map = map; - cleanup->size = use_softpin ? newbo_size : size; - cleanup->gem_handle = gem_handle; - - /* Regular objects are created I915_CACHING_CACHED on LLC platforms and - * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are - * always created as I915_CACHING_CACHED, which on non-LLC means - * snooped. - * - * On platforms that support softpin, we are not going to use userptr - * anymore, but we still want to rely on the snooped states. So make sure - * everything is set to I915_CACHING_CACHED. - */ - if (!pool->device->info.has_llc) - anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_CACHED); - - /* For block pool BOs we have to be a bit careful about where we place them + /* For state pool BOs we have to be a bit careful about where we place them * in the GTT. There are two documented workarounds for state base address * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset * which state that those two base addresses do not support 48-bit @@ -600,37 +484,79 @@ anv_block_pool_expand_range(struct anv_block_pool *pool, * BO to some particular location of our choosing, but that's significantly * more work than just not setting a flag. So, we explicitly DO NOT set * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the - * hard work for us. + * hard work for us. When using softpin, we're in control and the fixed + * addresses we choose are fine for base addresses. */ - struct anv_bo *bo; - uint32_t bo_size; - uint64_t bo_offset; + enum anv_bo_alloc_flags bo_alloc_flags = 0; + if (!pool->use_softpin) + bo_alloc_flags |= ANV_BO_ALLOC_32BIT_ADDRESS; + + uint64_t bo_flags = 0; + if (pool->device->instance->physicalDevice.has_exec_capture) + bo_flags |= EXEC_OBJECT_CAPTURE; + + if (pool->use_softpin) { + uint32_t new_bo_size = size - pool->size; + struct anv_bo *new_bo; + VkResult result = anv_device_alloc_bo(pool->device, new_bo_size, + bo_alloc_flags | + ANV_BO_ALLOC_FIXED_ADDRESS | + ANV_BO_ALLOC_MAPPED | + ANV_BO_ALLOC_SNOOPED, + &new_bo); + if (result != VK_SUCCESS) + return result; - assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS); + assert(center_bo_offset == 0); - if (use_softpin) { - /* With softpin, we add a new BO to the pool, and set its offset to right - * where the previous BO ends (the end of the pool). - */ - bo = &pool->bos[pool->nbos++]; - bo_size = newbo_size; - bo_offset = pool->start_address + pool->size; + new_bo->offset = pool->start_address + pool->size; + pool->bos[pool->nbos++] = new_bo; + + /* This pointer will always point to the first BO in the list */ + pool->bo = pool->bos[0]; } else { - /* Without softpin, we just need one BO, and we already have a pointer to - * it. Simply "allocate" it from our array if we didn't do it before. - * The offset doesn't matter since we are not pinning the BO anyway. + /* Just leak the old map until we destroy the pool. We can't munmap it + * without races or imposing locking on the block allocate fast path. On + * the whole the leaked maps adds up to less than the size of the + * current map. MAP_POPULATE seems like the right thing to do, but we + * should try to get some numbers. */ - if (pool->nbos == 0) - pool->nbos++; - bo = pool->bo; - bo_size = size; - bo_offset = 0; + void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, pool->fd, + BLOCK_POOL_MEMFD_CENTER - center_bo_offset); + if (map == MAP_FAILED) + return vk_errorf(pool->device->instance, pool->device, + VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m"); + + struct anv_bo *new_bo; + VkResult result = anv_device_import_bo_from_host_ptr(pool->device, + map, size, + bo_alloc_flags, + &new_bo); + if (result != VK_SUCCESS) { + munmap(map, size); + return result; + } + + struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups); + if (!cleanup) { + munmap(map, size); + anv_device_release_bo(pool->device, new_bo); + return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + } + cleanup->map = map; + cleanup->size = size; + + /* Now that we mapped the new memory, we can write the new + * center_bo_offset back into pool and update pool->map. */ + pool->center_bo_offset = center_bo_offset; + pool->map = map + center_bo_offset; + + pool->bos[pool->nbos++] = new_bo; + pool->wrapper_bo.map = new_bo; } - anv_bo_init(bo, gem_handle, bo_size); - bo->offset = bo_offset; - bo->flags = pool->bo_flags; - bo->map = map; + assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS); pool->size = size; return VK_SUCCESS; @@ -645,7 +571,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool, void* anv_block_pool_map(struct anv_block_pool *pool, int32_t offset) { - if (pool->bo_flags & EXEC_OBJECT_PINNED) { + if (pool->use_softpin) { struct anv_bo *bo = NULL; int32_t bo_offset = 0; anv_block_pool_foreach_bo(iter_bo, pool) { @@ -776,8 +702,6 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state) result = anv_block_pool_expand_range(pool, center_bo_offset, size); - pool->bo->flags = pool->bo_flags; - done: pthread_mutex_unlock(&pool->device->mutex); @@ -813,7 +737,7 @@ anv_block_pool_alloc_new(struct anv_block_pool *pool, if (state.next + block_size <= state.end) { return state.next; } else if (state.next <= state.end) { - if (pool->bo_flags & EXEC_OBJECT_PINNED && state.next < state.end) { + if (pool->use_softpin && state.next < state.end) { /* We need to grow the block pool, but still have some leftover * space that can't be used by that particular allocation. So we * add that as a "padding", and return it. @@ -890,13 +814,11 @@ VkResult anv_state_pool_init(struct anv_state_pool *pool, struct anv_device *device, uint64_t start_address, - uint32_t block_size, - uint64_t bo_flags) + uint32_t block_size) { VkResult result = anv_block_pool_init(&pool->block_pool, device, start_address, - block_size * 16, - bo_flags); + block_size * 16); if (result != VK_SUCCESS) return result; @@ -1341,18 +1263,17 @@ anv_state_stream_alloc(struct anv_state_stream *stream, return state; } -struct bo_pool_bo_link { - struct bo_pool_bo_link *next; - struct anv_bo bo; -}; - void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device, uint64_t bo_flags) { pool->device = device; pool->bo_flags = bo_flags; - memset(pool->free_list, 0, sizeof(pool->free_list)); + for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { + util_sparse_array_free_list_init(&pool->free_list[i], + &device->bo_cache.bo_map, 0, + offsetof(struct anv_bo, free_index)); + } VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false)); } @@ -1361,14 +1282,15 @@ void anv_bo_pool_finish(struct anv_bo_pool *pool) { for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) { - struct bo_pool_bo_link *link = PFL_PTR(pool->free_list[i]); - while (link != NULL) { - struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link); - - anv_gem_munmap(link_copy.bo.map, link_copy.bo.size); - anv_vma_free(pool->device, &link_copy.bo); - anv_gem_close(pool->device, link_copy.bo.gem_handle); - link = link_copy.next; + while (1) { + struct anv_bo *bo = + util_sparse_array_free_list_pop_elem(&pool->free_list[i]); + if (bo == NULL) + break; + + /* anv_device_release_bo is going to "free" it */ + VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1)); + anv_device_release_bo(pool->device, bo); } } @@ -1376,80 +1298,53 @@ anv_bo_pool_finish(struct anv_bo_pool *pool) } VkResult -anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo, uint32_t size) +anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size, + struct anv_bo **bo_out) { - VkResult result; - const unsigned size_log2 = size < 4096 ? 12 : ilog2_round_up(size); const unsigned pow2_size = 1 << size_log2; const unsigned bucket = size_log2 - 12; assert(bucket < ARRAY_SIZE(pool->free_list)); - void *next_free_void; - if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) { - struct bo_pool_bo_link *next_free = next_free_void; - *bo = VG_NOACCESS_READ(&next_free->bo); - assert(bo->gem_handle); - assert(bo->map == next_free); - assert(size <= bo->size); - + struct anv_bo *bo = + util_sparse_array_free_list_pop_elem(&pool->free_list[bucket]); + if (bo != NULL) { VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); - + *bo_out = bo; return VK_SUCCESS; } - struct anv_bo new_bo; - - result = anv_bo_init_new(&new_bo, pool->device, pow2_size); + VkResult result = anv_device_alloc_bo(pool->device, + pow2_size, + ANV_BO_ALLOC_MAPPED | + ANV_BO_ALLOC_SNOOPED, + &bo); if (result != VK_SUCCESS) return result; - new_bo.flags = pool->bo_flags; - - if (!anv_vma_alloc(pool->device, &new_bo)) - return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY); - - assert(new_bo.size == pow2_size); - - new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0); - if (new_bo.map == MAP_FAILED) { - anv_gem_close(pool->device, new_bo.gem_handle); - anv_vma_free(pool->device, &new_bo); - return vk_error(VK_ERROR_MEMORY_MAP_FAILED); - } - - /* We are removing the state flushes, so lets make sure that these buffers - * are cached/snooped. - */ - if (!pool->device->info.has_llc) { - anv_gem_set_caching(pool->device, new_bo.gem_handle, - I915_CACHING_CACHED); - } - - *bo = new_bo; - + /* We want it to look like it came from this pool */ + VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0)); VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size)); + *bo_out = bo; + return VK_SUCCESS; } void -anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in) +anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo) { - /* Make a copy in case the anv_bo happens to be storred in the BO */ - struct anv_bo bo = *bo_in; - - VG(VALGRIND_MEMPOOL_FREE(pool, bo.map)); + VG(VALGRIND_MEMPOOL_FREE(pool, bo->map)); - struct bo_pool_bo_link *link = bo.map; - VG_NOACCESS_WRITE(&link->bo, bo); - - assert(util_is_power_of_two_or_zero(bo.size)); - const unsigned size_log2 = ilog2_round_up(bo.size); + assert(util_is_power_of_two_or_zero(bo->size)); + const unsigned size_log2 = ilog2_round_up(bo->size); const unsigned bucket = size_log2 - 12; assert(bucket < ARRAY_SIZE(pool->free_list)); - anv_ptr_free_list_push(&pool->free_list[bucket], link); + assert(util_sparse_array_get(&pool->device->bo_cache.bo_map, + bo->gem_handle) == bo); + util_sparse_array_free_list_push(&pool->free_list[bucket], + &bo->gem_handle, 1); } // Scratch pool @@ -1465,11 +1360,8 @@ anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool { for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { for (unsigned i = 0; i < 16; i++) { - struct anv_scratch_bo *bo = &pool->bos[i][s]; - if (bo->exists > 0) { - anv_vma_free(device, &bo->bo); - anv_gem_close(device, bo->bo.gem_handle); - } + if (pool->bos[i][s] != NULL) + anv_device_release_bo(device, pool->bos[i][s]); } } } @@ -1484,19 +1376,10 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048); assert(scratch_size_log2 < 16); - struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage]; - - /* We can use "exists" to shortcut and ignore the critical section */ - if (bo->exists) - return &bo->bo; + struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]); - pthread_mutex_lock(&device->mutex); - - __sync_synchronize(); - if (bo->exists) { - pthread_mutex_unlock(&device->mutex); - return &bo->bo; - } + if (bo != NULL) + return bo; const struct anv_physical_device *physical_device = &device->instance->physicalDevice; @@ -1555,8 +1438,6 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, uint32_t size = per_thread_scratch * max_threads[stage]; - anv_bo_init_new(&bo->bo, device, size); - /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they * are still relative to the general state base address. When we emit * STATE_BASE_ADDRESS, we set general state base address to 0 and the size @@ -1574,23 +1455,19 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, * * so nothing will ever touch the top page. */ - assert(!(bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)); - - if (device->instance->physicalDevice.has_exec_async) - bo->bo.flags |= EXEC_OBJECT_ASYNC; - - if (device->instance->physicalDevice.use_softpin) - bo->bo.flags |= EXEC_OBJECT_PINNED; - - anv_vma_alloc(device, &bo->bo); - - /* Set the exists last because it may be read by other threads */ - __sync_synchronize(); - bo->exists = true; - - pthread_mutex_unlock(&device->mutex); + VkResult result = anv_device_alloc_bo(device, size, + ANV_BO_ALLOC_32BIT_ADDRESS, &bo); + if (result != VK_SUCCESS) + return NULL; /* TODO */ - return &bo->bo; + struct anv_bo *current_bo = + p_atomic_cmpxchg(&pool->bos[scratch_size_log2][stage], NULL, bo); + if (current_bo) { + anv_device_release_bo(device, bo); + return current_bo; + } else { + return bo; + } } VkResult @@ -1614,43 +1491,106 @@ anv_bo_cache_finish(struct anv_bo_cache *cache) pthread_mutex_destroy(&cache->mutex); } -static struct anv_bo * -anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle) -{ - return util_sparse_array_get(&cache->bo_map, gem_handle); -} - #define ANV_BO_CACHE_SUPPORTED_FLAGS \ (EXEC_OBJECT_WRITE | \ EXEC_OBJECT_ASYNC | \ EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \ - EXEC_OBJECT_PINNED) + EXEC_OBJECT_PINNED | \ + EXEC_OBJECT_CAPTURE) + +static uint32_t +anv_bo_alloc_flags_to_bo_flags(struct anv_device *device, + enum anv_bo_alloc_flags alloc_flags) +{ + struct anv_physical_device *pdevice = &device->instance->physicalDevice; + + uint64_t bo_flags = 0; + if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) && + pdevice->supports_48bit_addresses) + bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; + + if ((alloc_flags & ANV_BO_ALLOC_CAPTURE) && pdevice->has_exec_capture) + bo_flags |= EXEC_OBJECT_CAPTURE; + + if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) { + assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC); + bo_flags |= EXEC_OBJECT_WRITE; + } + + if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async) + bo_flags |= EXEC_OBJECT_ASYNC; + + if (pdevice->use_softpin) + bo_flags |= EXEC_OBJECT_PINNED; + + return bo_flags; +} VkResult -anv_bo_cache_alloc(struct anv_device *device, - struct anv_bo_cache *cache, - uint64_t size, uint64_t bo_flags, - bool is_external, - struct anv_bo **bo_out) +anv_device_alloc_bo(struct anv_device *device, + uint64_t size, + enum anv_bo_alloc_flags alloc_flags, + struct anv_bo **bo_out) { + const uint32_t bo_flags = + anv_bo_alloc_flags_to_bo_flags(device, alloc_flags); assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS)); /* The kernel is going to give us whole pages anyway */ size = align_u64(size, 4096); - struct anv_bo new_bo; - VkResult result = anv_bo_init_new(&new_bo, device, size); - if (result != VK_SUCCESS) - return result; + uint32_t gem_handle = anv_gem_create(device, size); + if (gem_handle == 0) + return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY); + + struct anv_bo new_bo = { + .gem_handle = gem_handle, + .refcount = 1, + .offset = -1, + .size = size, + .flags = bo_flags, + .is_external = (alloc_flags & ANV_BO_ALLOC_EXTERNAL), + }; - new_bo.flags = bo_flags; - new_bo.is_external = is_external; + if (alloc_flags & ANV_BO_ALLOC_MAPPED) { + new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0); + if (new_bo.map == MAP_FAILED) { + anv_gem_close(device, new_bo.gem_handle); + return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + } + } - if (!anv_vma_alloc(device, &new_bo)) { - anv_gem_close(device, new_bo.gem_handle); - return vk_errorf(device->instance, NULL, - VK_ERROR_OUT_OF_DEVICE_MEMORY, - "failed to allocate virtual address for BO"); + if (alloc_flags & ANV_BO_ALLOC_SNOOPED) { + assert(alloc_flags & ANV_BO_ALLOC_MAPPED); + /* We don't want to change these defaults if it's going to be shared + * with another process. + */ + assert(!(alloc_flags & ANV_BO_ALLOC_EXTERNAL)); + + /* Regular objects are created I915_CACHING_CACHED on LLC platforms and + * I915_CACHING_NONE on non-LLC platforms. For many internal state + * objects, we'd rather take the snooping overhead than risk forgetting + * a CLFLUSH somewhere. Userptr objects are always created as + * I915_CACHING_CACHED, which on non-LLC means snooped so there's no + * need to do this there. + */ + if (!device->info.has_llc) { + anv_gem_set_caching(device, new_bo.gem_handle, + I915_CACHING_CACHED); + } + } + + if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) { + new_bo.has_fixed_address = true; + } else { + if (!anv_vma_alloc(device, &new_bo)) { + if (new_bo.map) + anv_gem_munmap(new_bo.map, size); + anv_gem_close(device, new_bo.gem_handle); + return vk_errorf(device->instance, NULL, + VK_ERROR_OUT_OF_DEVICE_MEMORY, + "failed to allocate virtual address for BO"); + } } assert(new_bo.gem_handle); @@ -1658,7 +1598,7 @@ anv_bo_cache_alloc(struct anv_device *device, /* If we just got this gem_handle from anv_bo_init_new then we know no one * else is touching this BO at the moment so we don't need to lock here. */ - struct anv_bo *bo = anv_bo_cache_lookup(cache, new_bo.gem_handle); + struct anv_bo *bo = anv_device_lookup_bo(device, new_bo.gem_handle); *bo = new_bo; *bo_out = bo; @@ -1667,11 +1607,18 @@ anv_bo_cache_alloc(struct anv_device *device, } VkResult -anv_bo_cache_import_host_ptr(struct anv_device *device, - struct anv_bo_cache *cache, - void *host_ptr, uint32_t size, - uint64_t bo_flags, struct anv_bo **bo_out) +anv_device_import_bo_from_host_ptr(struct anv_device *device, + void *host_ptr, uint32_t size, + enum anv_bo_alloc_flags alloc_flags, + struct anv_bo **bo_out) { + assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED | + ANV_BO_ALLOC_SNOOPED | + ANV_BO_ALLOC_FIXED_ADDRESS))); + + struct anv_bo_cache *cache = &device->bo_cache; + const uint32_t bo_flags = + anv_bo_alloc_flags_to_bo_flags(device, alloc_flags); assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS)); uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size); @@ -1680,7 +1627,7 @@ anv_bo_cache_import_host_ptr(struct anv_device *device, pthread_mutex_lock(&cache->mutex); - struct anv_bo *bo = anv_bo_cache_lookup(cache, gem_handle); + struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle); if (bo->refcount > 0) { /* VK_EXT_external_memory_host doesn't require handling importing the * same pointer twice at the same time, but we don't get in the way. If @@ -1695,10 +1642,16 @@ anv_bo_cache_import_host_ptr(struct anv_device *device, } __sync_fetch_and_add(&bo->refcount, 1); } else { - struct anv_bo new_bo; - anv_bo_init(&new_bo, gem_handle, size); - new_bo.flags = bo_flags; - new_bo.is_external = true; + struct anv_bo new_bo = { + .gem_handle = gem_handle, + .refcount = 1, + .offset = -1, + .size = size, + .map = host_ptr, + .flags = bo_flags, + .is_external = true, + .from_host_ptr = true, + }; if (!anv_vma_alloc(device, &new_bo)) { anv_gem_close(device, new_bo.gem_handle); @@ -1718,11 +1671,18 @@ anv_bo_cache_import_host_ptr(struct anv_device *device, } VkResult -anv_bo_cache_import(struct anv_device *device, - struct anv_bo_cache *cache, - int fd, uint64_t bo_flags, - struct anv_bo **bo_out) +anv_device_import_bo(struct anv_device *device, + int fd, + enum anv_bo_alloc_flags alloc_flags, + struct anv_bo **bo_out) { + assert(!(alloc_flags & (ANV_BO_ALLOC_MAPPED | + ANV_BO_ALLOC_SNOOPED | + ANV_BO_ALLOC_FIXED_ADDRESS))); + + struct anv_bo_cache *cache = &device->bo_cache; + const uint32_t bo_flags = + anv_bo_alloc_flags_to_bo_flags(device, alloc_flags); assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS)); pthread_mutex_lock(&cache->mutex); @@ -1733,7 +1693,7 @@ anv_bo_cache_import(struct anv_device *device, return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE); } - struct anv_bo *bo = anv_bo_cache_lookup(cache, gem_handle); + struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle); if (bo->refcount > 0) { /* We have to be careful how we combine flags so that it makes sense. * Really, though, if we get to this case and it actually matters, the @@ -1745,6 +1705,7 @@ anv_bo_cache_import(struct anv_device *device, new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_ASYNC; new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS; new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_PINNED; + new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_CAPTURE; /* It's theoretically possible for a BO to get imported such that it's * both pinned and not pinned. The only way this can happen is if it @@ -1786,10 +1747,14 @@ anv_bo_cache_import(struct anv_device *device, return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE); } - struct anv_bo new_bo; - anv_bo_init(&new_bo, gem_handle, size); - new_bo.flags = bo_flags; - new_bo.is_external = true; + struct anv_bo new_bo = { + .gem_handle = gem_handle, + .refcount = 1, + .offset = -1, + .size = size, + .flags = bo_flags, + .is_external = true, + }; if (!anv_vma_alloc(device, &new_bo)) { anv_gem_close(device, new_bo.gem_handle); @@ -1809,11 +1774,10 @@ anv_bo_cache_import(struct anv_device *device, } VkResult -anv_bo_cache_export(struct anv_device *device, - struct anv_bo_cache *cache, - struct anv_bo *bo, int *fd_out) +anv_device_export_bo(struct anv_device *device, + struct anv_bo *bo, int *fd_out) { - assert(anv_bo_cache_lookup(cache, bo->gem_handle) == bo); + assert(anv_device_lookup_bo(device, bo->gem_handle) == bo); /* This BO must have been flagged external in order for us to be able * to export it. This is done based on external options passed into @@ -1849,11 +1813,11 @@ atomic_dec_not_one(uint32_t *counter) } void -anv_bo_cache_release(struct anv_device *device, - struct anv_bo_cache *cache, - struct anv_bo *bo) +anv_device_release_bo(struct anv_device *device, + struct anv_bo *bo) { - assert(anv_bo_cache_lookup(cache, bo->gem_handle) == bo); + struct anv_bo_cache *cache = &device->bo_cache; + assert(anv_device_lookup_bo(device, bo->gem_handle) == bo); /* Try to decrement the counter but don't go below one. If this succeeds * then the refcount has been decremented and we are not the last @@ -1876,12 +1840,24 @@ anv_bo_cache_release(struct anv_device *device, } assert(bo->refcount == 0); - if (bo->map) + if (bo->map && !bo->from_host_ptr) anv_gem_munmap(bo->map, bo->size); - anv_vma_free(device, bo); + if (!bo->has_fixed_address) + anv_vma_free(device, bo); + + uint32_t gem_handle = bo->gem_handle; + + /* Memset the BO just in case. The refcount being zero should be enough to + * prevent someone from assuming the data is valid but it's safer to just + * stomp to zero just in case. We explicitly do this *before* we close the + * GEM handle to ensure that if anyone allocates something and gets the + * same GEM handle, the memset has already happen and won't stomp all over + * any data they may write in this BO. + */ + memset(bo, 0, sizeof(*bo)); - anv_gem_close(device, bo->gem_handle); + anv_gem_close(device, gem_handle); /* Don't unlock until we've actually closed the BO. The whole point of * the BO cache is to ensure that we correctly handle races with creating