X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fvulkan%2Fanv_allocator.c;h=1f858bbdff4fa886f283dc44fca3b89049944665;hb=ecd1d94d1c74be6481ebc6adde01fe73c7d41331;hp=6c7c85d5e74636a496a12bd2b9b3e9fa8345c2cd;hpb=c55fa89251a1188b312aa09ba260cba7a411a282;p=mesa.git diff --git a/src/vulkan/anv_allocator.c b/src/vulkan/anv_allocator.c index 6c7c85d5e74..1f858bbdff4 100644 --- a/src/vulkan/anv_allocator.c +++ b/src/vulkan/anv_allocator.c @@ -241,7 +241,7 @@ anv_ptr_free_list_push(void **list, void *elem) } static uint32_t -anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size); +anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state); void anv_block_pool_init(struct anv_block_pool *pool, @@ -252,8 +252,10 @@ anv_block_pool_init(struct anv_block_pool *pool, pool->device = device; pool->bo.gem_handle = 0; pool->bo.offset = 0; + pool->bo.size = 0; pool->block_size = block_size; pool->free_list = ANV_FREE_LIST_EMPTY; + pool->back_free_list = ANV_FREE_LIST_EMPTY; pool->fd = memfd_create("block pool", MFD_CLOEXEC); if (pool->fd == -1) @@ -269,9 +271,13 @@ anv_block_pool_init(struct anv_block_pool *pool, anv_vector_init(&pool->mmap_cleanups, round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128); - /* Immediately grow the pool so we'll have a backing bo. */ pool->state.next = 0; - pool->state.end = anv_block_pool_grow(pool, 0); + pool->state.end = 0; + pool->back_state.next = 0; + pool->back_state.end = 0; + + /* Immediately grow the pool so we'll have a backing bo. */ + pool->state.end = anv_block_pool_grow(pool, &pool->state); } void @@ -291,18 +297,75 @@ anv_block_pool_finish(struct anv_block_pool *pool) close(pool->fd); } +#define PAGE_SIZE 4096 + +/** Grows and re-centers the block pool. + * + * We grow the block pool in one or both directions in such a way that the + * following conditions are met: + * + * 1) The size of the entire pool is always a power of two. + * + * 2) The pool only grows on both ends. Neither end can get + * shortened. + * + * 3) At the end of the allocation, we have about twice as much space + * allocated for each end as we have used. This way the pool doesn't + * grow too far in one direction or the other. + * + * 4) If the _alloc_back() has never been called, then the back portion of + * the pool retains a size of zero. (This makes it easier for users of + * the block pool that only want a one-sided pool.) + * + * 5) We have enough space allocated for at least one more block in + * whichever side `state` points to. + * + * 6) The center of the pool is always aligned to both the block_size of + * the pool and a 4K CPU page. + */ static uint32_t -anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size) +anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state) { size_t size; void *map; - int gem_handle; + uint32_t gem_handle; struct anv_mmap_cleanup *cleanup; pthread_mutex_lock(&pool->device->mutex); + assert(state == &pool->state || state == &pool->back_state); + + /* Gather a little usage information on the pool. Since we may have + * threadsd waiting in queue to get some storage while we resize, it's + * actually possible that total_used will be larger than old_size. In + * particular, block_pool_alloc() increments state->next prior to + * calling block_pool_grow, so this ensures that we get enough space for + * which ever side tries to grow the pool. + * + * We align to a page size because it makes it easier to do our + * calculations later in such a way that we state page-aigned. + */ + uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE); + uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE); + uint32_t total_used = front_used + back_used; + + assert(state == &pool->state || back_used > 0); + + size_t old_size = pool->bo.size; + + if (old_size != 0 && + back_used * 2 <= pool->center_bo_offset && + front_used * 2 <= (old_size - pool->center_bo_offset)) { + /* If we're in this case then this isn't the firsta allocation and we + * already have enough space on both sides to hold double what we + * have allocated. There's nothing for us to do. + */ + goto done; + } + if (old_size == 0) { - size = 32 * pool->block_size; + /* This is the first allocation */ + size = MAX2(32 * pool->block_size, PAGE_SIZE); } else { size = old_size * 2; } @@ -313,27 +376,63 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size) */ assert(size <= (1u << 31)); + /* We compute a new center_bo_offset such that, when we double the size + * of the pool, we maintain the ratio of how much is used by each side. + * This way things should remain more-or-less balanced. + */ + uint32_t center_bo_offset; + if (back_used == 0) { + /* If we're in this case then we have never called alloc_back(). In + * this case, we want keep the offset at 0 to make things as simple + * as possible for users that don't care about back allocations. + */ + center_bo_offset = 0; + } else { + /* Try to "center" the allocation based on how much is currently in + * use on each side of the center line. + */ + center_bo_offset = ((uint64_t)size * back_used) / total_used; + + /* Align down to a multiple of both the block size and page size */ + uint32_t granularity = MAX2(pool->block_size, PAGE_SIZE); + assert(util_is_power_of_two(granularity)); + center_bo_offset &= ~(granularity - 1); + + assert(center_bo_offset >= back_used); + + /* Make sure we don't shrink the back end of the pool */ + if (center_bo_offset < pool->back_state.end) + center_bo_offset = pool->back_state.end; + + /* Make sure that we don't shrink the front end of the pool */ + if (size - center_bo_offset < pool->state.end) + center_bo_offset = size - pool->state.end; + } + + assert(center_bo_offset % pool->block_size == 0); + assert(center_bo_offset % PAGE_SIZE == 0); + + /* Assert that we only ever grow the pool */ + assert(center_bo_offset >= pool->back_state.end); + assert(size - center_bo_offset >= pool->state.end); + cleanup = anv_vector_add(&pool->mmap_cleanups); if (!cleanup) goto fail; *cleanup = ANV_MMAP_CLEANUP_INIT; - /* First try to see if mremap can grow the map in place. */ - map = MAP_FAILED; - if (old_size > 0) - map = mremap(pool->map, old_size, size, 0); - if (map == MAP_FAILED) { - /* Just leak the old map until we destroy the pool. We can't munmap it - * without races or imposing locking on the block allocate fast path. On - * the whole the leaked maps adds up to less than the size of the - * current map. MAP_POPULATE seems like the right thing to do, but we - * should try to get some numbers. - */ - map = mmap(NULL, size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, pool->fd, 0); - cleanup->map = map; - cleanup->size = size; - } + /* Just leak the old map until we destroy the pool. We can't munmap it + * without races or imposing locking on the block allocate fast path. On + * the whole the leaked maps adds up to less than the size of the + * current map. MAP_POPULATE seems like the right thing to do, but we + * should try to get some numbers. + */ + map = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, pool->fd, + BLOCK_POOL_MEMFD_CENTER - center_bo_offset); + cleanup->map = map; + cleanup->size = size; + if (map == MAP_FAILED) goto fail; @@ -342,28 +441,81 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size) goto fail; cleanup->gem_handle = gem_handle; + /* Regular objects are created I915_CACHING_CACHED on LLC platforms and + * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are + * always created as I915_CACHING_CACHED, which on non-LLC means + * snooped. That can be useful but comes with a bit of overheard. Since + * we're eplicitly clflushing and don't want the overhead we need to turn + * it off. */ + if (!pool->device->info.has_llc) { + anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE); + anv_gem_set_domain(pool->device, gem_handle, + I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); + } + /* Now that we successfull allocated everything, we can write the new * values back into pool. */ - pool->map = map; + pool->map = map + center_bo_offset; + pool->center_bo_offset = center_bo_offset; pool->bo.gem_handle = gem_handle; pool->bo.size = size; pool->bo.map = map; pool->bo.index = 0; +done: pthread_mutex_unlock(&pool->device->mutex); - return size; + /* Return the appropreate new size. This function never actually + * updates state->next. Instead, we let the caller do that because it + * needs to do so in order to maintain its concurrency model. + */ + if (state == &pool->state) { + return pool->bo.size - pool->center_bo_offset; + } else { + assert(pool->center_bo_offset > 0); + return pool->center_bo_offset; + } fail: pthread_mutex_unlock(&pool->device->mutex); + return 0; } -uint32_t +static uint32_t +anv_block_pool_alloc_new(struct anv_block_pool *pool, + struct anv_block_state *pool_state) +{ + struct anv_block_state state, old, new; + + while (1) { + state.u64 = __sync_fetch_and_add(&pool_state->u64, pool->block_size); + if (state.next < state.end) { + assert(pool->map); + return state.next; + } else if (state.next == state.end) { + /* We allocated the first block outside the pool, we have to grow it. + * pool_state->next acts a mutex: threads who try to allocate now will + * get block indexes above the current limit and hit futex_wait + * below. */ + new.next = state.next + pool->block_size; + new.end = anv_block_pool_grow(pool, pool_state); + assert(new.end >= new.next && new.end % pool->block_size == 0); + old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64); + if (old.next != state.next) + futex_wake(&pool_state->end, INT_MAX); + return state.next; + } else { + futex_wait(&pool_state->end, state.end); + continue; + } + } +} + +int32_t anv_block_pool_alloc(struct anv_block_pool *pool) { int32_t offset; - struct anv_block_state state, old, new; /* Try free list first. */ if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) { @@ -372,33 +524,49 @@ anv_block_pool_alloc(struct anv_block_pool *pool) return offset; } - restart: - state.u64 = __sync_fetch_and_add(&pool->state.u64, pool->block_size); - if (state.next < state.end) { + return anv_block_pool_alloc_new(pool, &pool->state); +} + +/* Allocates a block out of the back of the block pool. + * + * This will allocated a block earlier than the "start" of the block pool. + * The offsets returned from this function will be negative but will still + * be correct relative to the block pool's map pointer. + * + * If you ever use anv_block_pool_alloc_back, then you will have to do + * gymnastics with the block pool's BO when doing relocations. + */ +int32_t +anv_block_pool_alloc_back(struct anv_block_pool *pool) +{ + int32_t offset; + + /* Try free list first. */ + if (anv_free_list_pop(&pool->back_free_list, &pool->map, &offset)) { + assert(offset < 0); assert(pool->map); - return state.next; - } else if (state.next == state.end) { - /* We allocated the first block outside the pool, we have to grow it. - * pool->next_block acts a mutex: threads who try to allocate now will - * get block indexes above the current limit and hit futex_wait - * below. */ - new.next = state.next + pool->block_size; - new.end = anv_block_pool_grow(pool, state.end); - assert(new.end > 0); - old.u64 = __sync_lock_test_and_set(&pool->state.u64, new.u64); - if (old.next != state.next) - futex_wake(&pool->state.end, INT_MAX); - return state.next; - } else { - futex_wait(&pool->state.end, state.end); - goto restart; + return offset; } + + offset = anv_block_pool_alloc_new(pool, &pool->back_state); + + /* The offset we get out of anv_block_pool_alloc_new() is actually the + * number of bytes downwards from the middle to the end of the block. + * We need to turn it into a (negative) offset from the middle to the + * start of the block. + */ + assert(offset >= 0); + return -(offset + pool->block_size); } void -anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset) +anv_block_pool_free(struct anv_block_pool *pool, int32_t offset) { - anv_free_list_push(&pool->free_list, pool->map, offset); + if (offset < 0) { + anv_free_list_push(&pool->back_free_list, pool->map, offset); + } else { + anv_free_list_push(&pool->free_list, pool->map, offset); + } } static void @@ -507,13 +675,17 @@ anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) } #define NULL_BLOCK 1 -struct stream_block { - uint32_t next; +struct anv_state_stream_block { + /* The next block */ + struct anv_state_stream_block *next; - /* The map for the BO at the time the block was givne to us */ - void *current_map; + /* The offset into the block pool at which this block starts */ + uint32_t offset; #ifdef HAVE_VALGRIND + /* A pointer to the first user-allocated thing in this block. This is + * what valgrind sees as the start of the block. + */ void *_vg_ptr; #endif }; @@ -526,9 +698,13 @@ anv_state_stream_init(struct anv_state_stream *stream, struct anv_block_pool *block_pool) { stream->block_pool = block_pool; - stream->next = 0; + stream->block = NULL; + + /* Ensure that next + whatever > end. This way the first call to + * state_stream_alloc fetches a new block. + */ + stream->next = 1; stream->end = 0; - stream->current_block = NULL_BLOCK; VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false)); } @@ -536,16 +712,16 @@ anv_state_stream_init(struct anv_state_stream *stream, void anv_state_stream_finish(struct anv_state_stream *stream) { - struct stream_block *sb; - uint32_t block, next_block; - - block = stream->current_block; - while (block != NULL_BLOCK) { - sb = stream->block_pool->map + block; - next_block = VG_NOACCESS_READ(&sb->next); - VG(VALGRIND_MEMPOOL_FREE(stream, VG_NOACCESS_READ(&sb->_vg_ptr))); - anv_block_pool_free(stream->block_pool, block); - block = next_block; + VG(const uint32_t block_size = stream->block_pool->block_size); + + struct anv_state_stream_block *next = stream->block; + while (next != NULL) { + VG(VALGRIND_MAKE_MEM_DEFINED(next, sizeof(*next))); + struct anv_state_stream_block sb = VG_NOACCESS_READ(next); + VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr)); + VG(VALGRIND_MAKE_MEM_UNDEFINED(next, block_size)); + anv_block_pool_free(stream->block_pool, sb.offset); + next = sb.next; } VG(VALGRIND_DESTROY_MEMPOOL(stream)); @@ -555,29 +731,32 @@ struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream, uint32_t size, uint32_t alignment) { - struct stream_block *sb; + struct anv_state_stream_block *sb = stream->block; + struct anv_state state; - uint32_t block; state.offset = align_u32(stream->next, alignment); if (state.offset + size > stream->end) { - block = anv_block_pool_alloc(stream->block_pool); - void *current_map = stream->block_pool->map; - sb = current_map + block; - VG_NOACCESS_WRITE(&sb->current_map, current_map); - VG_NOACCESS_WRITE(&sb->next, stream->current_block); - VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, 0)); - stream->current_block = block; + uint32_t block = anv_block_pool_alloc(stream->block_pool); + sb = stream->block_pool->map + block; + + VG(VALGRIND_MAKE_MEM_UNDEFINED(sb, sizeof(*sb))); + sb->next = stream->block; + sb->offset = block; + VG(sb->_vg_ptr = NULL); + VG(VALGRIND_MAKE_MEM_NOACCESS(sb, stream->block_pool->block_size)); + + stream->block = sb; + stream->start = block; stream->next = block + sizeof(*sb); stream->end = block + stream->block_pool->block_size; + state.offset = align_u32(stream->next, alignment); assert(state.offset + size <= stream->end); } - sb = stream->block_pool->map + stream->current_block; - void *current_map = VG_NOACCESS_READ(&sb->current_map); - - state.map = current_map + state.offset; + assert(state.offset > stream->start); + state.map = (void *)sb + (state.offset - stream->start); state.alloc_size = size; #ifdef HAVE_VALGRIND @@ -587,11 +766,12 @@ anv_state_stream_alloc(struct anv_state_stream *stream, VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr); VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size); } else { - ptrdiff_t vg_offset = vg_ptr - current_map; - assert(vg_offset >= stream->current_block && - vg_offset < stream->end); - VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, - (state.offset + size) - vg_offset); + void *state_end = state.map + state.alloc_size; + /* This only updates the mempool. The newly allocated chunk is still + * marked as NOACCESS. */ + VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr); + /* Mark the newly allocated chunk as undefined */ + VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size); } #endif @@ -656,7 +836,7 @@ anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo) assert(new_bo.size == pool->bo_size); - new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pool->bo_size); + new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pool->bo_size, 0); if (new_bo.map == NULL) { anv_gem_close(pool->device, new_bo.gem_handle); return vk_error(VK_ERROR_MEMORY_MAP_FAILED);