}
static bool
-anv_free_list_pop(union anv_free_list *list, void **map, uint32_t *offset)
+anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset)
{
- union anv_free_list current, next, old;
+ union anv_free_list current, new, old;
- current = *list;
+ current.u64 = list->u64;
while (current.offset != EMPTY) {
/* We have to add a memory barrier here so that the list head (and
* offset) gets read before we read the map pointer. This way we
*/
__sync_synchronize();
- uint32_t *next_ptr = *map + current.offset;
- next.offset = VG_NOACCESS_READ(next_ptr);
- next.count = current.count + 1;
- old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, next.u64);
+ int32_t *next_ptr = *map + current.offset;
+ new.offset = VG_NOACCESS_READ(next_ptr);
+ new.count = current.count + 1;
+ old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
if (old.u64 == current.u64) {
*offset = current.offset;
return true;
}
static void
-anv_free_list_push(union anv_free_list *list, void *map, uint32_t offset)
+anv_free_list_push(union anv_free_list *list, void *map, int32_t offset)
{
union anv_free_list current, old, new;
- uint32_t *next_ptr = map + offset;
+ int32_t *next_ptr = map + offset;
old = *list;
do {
} while (old != current);
}
-static int
-anv_block_pool_grow(struct anv_block_pool *pool);
+static uint32_t
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state);
void
anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device, uint32_t block_size)
{
- assert(is_power_of_two(block_size));
+ assert(util_is_power_of_two(block_size));
pool->device = device;
pool->bo.gem_handle = 0;
pool->bo.offset = 0;
- pool->size = 0;
+ pool->bo.size = 0;
pool->block_size = block_size;
- pool->next_block = 0;
pool->free_list = ANV_FREE_LIST_EMPTY;
+ pool->back_free_list = ANV_FREE_LIST_EMPTY;
+
+ pool->fd = memfd_create("block pool", MFD_CLOEXEC);
+ if (pool->fd == -1)
+ return;
+
+ /* Just make it 2GB up-front. The Linux kernel won't actually back it
+ * with pages until we either map and fault on one of them or we use
+ * userptr and send a chunk of it off to the GPU.
+ */
+ if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1)
+ return;
+
anv_vector_init(&pool->mmap_cleanups,
round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128);
+ pool->state.next = 0;
+ pool->state.end = 0;
+ pool->back_state.next = 0;
+ pool->back_state.end = 0;
+
/* Immediately grow the pool so we'll have a backing bo. */
- anv_block_pool_grow(pool);
+ pool->state.end = anv_block_pool_grow(pool, &pool->state);
}
void
close(pool->fd);
}
-static int
-anv_block_pool_grow(struct anv_block_pool *pool)
+#define PAGE_SIZE 4096
+
+/** Grows and re-centers the block pool.
+ *
+ * We grow the block pool in one or both directions in such a way that the
+ * following conditions are met:
+ *
+ * 1) The size of the entire pool is always a power of two.
+ *
+ * 2) The pool only grows on both ends. Neither end can get
+ * shortened.
+ *
+ * 3) At the end of the allocation, we have about twice as much space
+ * allocated for each end as we have used. This way the pool doesn't
+ * grow too far in one direction or the other.
+ *
+ * 4) If the _alloc_back() has never been called, then the back portion of
+ * the pool retains a size of zero. (This makes it easier for users of
+ * the block pool that only want a one-sided pool.)
+ *
+ * 5) We have enough space allocated for at least one more block in
+ * whichever side `state` points to.
+ *
+ * 6) The center of the pool is always aligned to both the block_size of
+ * the pool and a 4K CPU page.
+ */
+static uint32_t
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
{
size_t size;
void *map;
- int gem_handle;
+ uint32_t gem_handle;
struct anv_mmap_cleanup *cleanup;
- if (pool->size == 0) {
- size = 32 * pool->block_size;
+ pthread_mutex_lock(&pool->device->mutex);
+
+ assert(state == &pool->state || state == &pool->back_state);
+
+ /* Gather a little usage information on the pool. Since we may have
+ * threadsd waiting in queue to get some storage while we resize, it's
+ * actually possible that total_used will be larger than old_size. In
+ * particular, block_pool_alloc() increments state->next prior to
+ * calling block_pool_grow, so this ensures that we get enough space for
+ * which ever side tries to grow the pool.
+ *
+ * We align to a page size because it makes it easier to do our
+ * calculations later in such a way that we state page-aigned.
+ */
+ uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE);
+ uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE);
+ uint32_t total_used = front_used + back_used;
+
+ assert(state == &pool->state || back_used > 0);
+
+ size_t old_size = pool->bo.size;
+
+ if (old_size != 0 &&
+ back_used * 2 <= pool->center_bo_offset &&
+ front_used * 2 <= (old_size - pool->center_bo_offset)) {
+ /* If we're in this case then this isn't the firsta allocation and we
+ * already have enough space on both sides to hold double what we
+ * have allocated. There's nothing for us to do.
+ */
+ goto done;
+ }
+
+ if (old_size == 0) {
+ /* This is the first allocation */
+ size = MAX2(32 * pool->block_size, PAGE_SIZE);
} else {
- size = pool->size * 2;
+ size = old_size * 2;
}
+ /* We can't have a block pool bigger than 1GB because we use signed
+ * 32-bit offsets in the free list and we don't want overflow. We
+ * should never need a block pool bigger than 1GB anyway.
+ */
+ assert(size <= (1u << 31));
+
+ /* We compute a new center_bo_offset such that, when we double the size
+ * of the pool, we maintain the ratio of how much is used by each side.
+ * This way things should remain more-or-less balanced.
+ */
+ uint32_t center_bo_offset;
+ if (back_used == 0) {
+ /* If we're in this case then we have never called alloc_back(). In
+ * this case, we want keep the offset at 0 to make things as simple
+ * as possible for users that don't care about back allocations.
+ */
+ center_bo_offset = 0;
+ } else {
+ /* Try to "center" the allocation based on how much is currently in
+ * use on each side of the center line.
+ */
+ center_bo_offset = ((uint64_t)size * back_used) / total_used;
+
+ /* Align down to a multiple of both the block size and page size */
+ uint32_t granularity = MAX2(pool->block_size, PAGE_SIZE);
+ assert(util_is_power_of_two(granularity));
+ center_bo_offset &= ~(granularity - 1);
+
+ assert(center_bo_offset >= back_used);
+
+ /* Make sure we don't shrink the back end of the pool */
+ if (center_bo_offset < pool->back_state.end)
+ center_bo_offset = pool->back_state.end;
+
+ /* Make sure that we don't shrink the front end of the pool */
+ if (size - center_bo_offset < pool->state.end)
+ center_bo_offset = size - pool->state.end;
+ }
+
+ assert(center_bo_offset % pool->block_size == 0);
+ assert(center_bo_offset % PAGE_SIZE == 0);
+
+ /* Assert that we only ever grow the pool */
+ assert(center_bo_offset >= pool->back_state.end);
+ assert(size - center_bo_offset >= pool->state.end);
+
cleanup = anv_vector_add(&pool->mmap_cleanups);
if (!cleanup)
- return -1;
+ goto fail;
*cleanup = ANV_MMAP_CLEANUP_INIT;
- if (pool->size == 0)
- pool->fd = memfd_create("block pool", MFD_CLOEXEC);
+ /* Just leak the old map until we destroy the pool. We can't munmap it
+ * without races or imposing locking on the block allocate fast path. On
+ * the whole the leaked maps adds up to less than the size of the
+ * current map. MAP_POPULATE seems like the right thing to do, but we
+ * should try to get some numbers.
+ */
+ map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, pool->fd,
+ BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
+ cleanup->map = map;
+ cleanup->size = size;
- if (pool->fd == -1)
- return -1;
-
- if (ftruncate(pool->fd, size) == -1)
- return -1;
-
- /* First try to see if mremap can grow the map in place. */
- map = MAP_FAILED;
- if (pool->size > 0)
- map = mremap(pool->map, pool->size, size, 0);
- if (map == MAP_FAILED) {
- /* Just leak the old map until we destroy the pool. We can't munmap it
- * without races or imposing locking on the block allocate fast path. On
- * the whole the leaked maps adds up to less than the size of the
- * current map. MAP_POPULATE seems like the right thing to do, but we
- * should try to get some numbers.
- */
- map = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, pool->fd, 0);
- cleanup->map = map;
- cleanup->size = size;
- }
if (map == MAP_FAILED)
- return -1;
+ goto fail;
gem_handle = anv_gem_userptr(pool->device, map, size);
if (gem_handle == 0)
- return -1;
+ goto fail;
cleanup->gem_handle = gem_handle;
+ /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
+ * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
+ * always created as I915_CACHING_CACHED, which on non-LLC means
+ * snooped. That can be useful but comes with a bit of overheard. Since
+ * we're eplicitly clflushing and don't want the overhead we need to turn
+ * it off. */
+ if (!pool->device->info.has_llc) {
+ anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE);
+ anv_gem_set_domain(pool->device, gem_handle,
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ }
+
/* Now that we successfull allocated everything, we can write the new
* values back into pool. */
- pool->map = map;
+ pool->map = map + center_bo_offset;
+ pool->center_bo_offset = center_bo_offset;
pool->bo.gem_handle = gem_handle;
pool->bo.size = size;
pool->bo.map = map;
pool->bo.index = 0;
- /* Write size last and after the memory barrier here. We need the memory
- * barrier to make sure map and gem_handle are written before other threads
- * see the new size. A thread could allocate a block and then go try using
- * the old pool->map and access out of bounds. */
+done:
+ pthread_mutex_unlock(&pool->device->mutex);
- __sync_synchronize();
- pool->size = size;
+ /* Return the appropreate new size. This function never actually
+ * updates state->next. Instead, we let the caller do that because it
+ * needs to do so in order to maintain its concurrency model.
+ */
+ if (state == &pool->state) {
+ return pool->bo.size - pool->center_bo_offset;
+ } else {
+ assert(pool->center_bo_offset > 0);
+ return pool->center_bo_offset;
+ }
+
+fail:
+ pthread_mutex_unlock(&pool->device->mutex);
return 0;
}
-uint32_t
+static uint32_t
+anv_block_pool_alloc_new(struct anv_block_pool *pool,
+ struct anv_block_state *pool_state)
+{
+ struct anv_block_state state, old, new;
+
+ while (1) {
+ state.u64 = __sync_fetch_and_add(&pool_state->u64, pool->block_size);
+ if (state.next < state.end) {
+ assert(pool->map);
+ return state.next;
+ } else if (state.next == state.end) {
+ /* We allocated the first block outside the pool, we have to grow it.
+ * pool_state->next acts a mutex: threads who try to allocate now will
+ * get block indexes above the current limit and hit futex_wait
+ * below. */
+ new.next = state.next + pool->block_size;
+ new.end = anv_block_pool_grow(pool, pool_state);
+ assert(new.end >= new.next && new.end % pool->block_size == 0);
+ old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
+ if (old.next != state.next)
+ futex_wake(&pool_state->end, INT_MAX);
+ return state.next;
+ } else {
+ futex_wait(&pool_state->end, state.end);
+ continue;
+ }
+ }
+}
+
+int32_t
anv_block_pool_alloc(struct anv_block_pool *pool)
{
- uint32_t offset, block, size;
+ int32_t offset;
/* Try free list first. */
if (anv_free_list_pop(&pool->free_list, &pool->map, &offset)) {
+ assert(offset >= 0);
assert(pool->map);
return offset;
}
- restart:
- size = pool->size;
- block = __sync_fetch_and_add(&pool->next_block, pool->block_size);
- if (block < size) {
+ return anv_block_pool_alloc_new(pool, &pool->state);
+}
+
+/* Allocates a block out of the back of the block pool.
+ *
+ * This will allocated a block earlier than the "start" of the block pool.
+ * The offsets returned from this function will be negative but will still
+ * be correct relative to the block pool's map pointer.
+ *
+ * If you ever use anv_block_pool_alloc_back, then you will have to do
+ * gymnastics with the block pool's BO when doing relocations.
+ */
+int32_t
+anv_block_pool_alloc_back(struct anv_block_pool *pool)
+{
+ int32_t offset;
+
+ /* Try free list first. */
+ if (anv_free_list_pop(&pool->back_free_list, &pool->map, &offset)) {
+ assert(offset < 0);
assert(pool->map);
- return block;
- } else if (block == size) {
- /* We allocated the first block outside the pool, we have to grow it.
- * pool->next_block acts a mutex: threads who try to allocate now will
- * get block indexes above the current limit and hit futex_wait
- * below. */
- int err = anv_block_pool_grow(pool);
- assert(err == 0);
- (void) err;
- futex_wake(&pool->size, INT_MAX);
- } else {
- futex_wait(&pool->size, size);
- __sync_fetch_and_add(&pool->next_block, -pool->block_size);
- goto restart;
+ return offset;
}
- return block;
+ offset = anv_block_pool_alloc_new(pool, &pool->back_state);
+
+ /* The offset we get out of anv_block_pool_alloc_new() is actually the
+ * number of bytes downwards from the middle to the end of the block.
+ * We need to turn it into a (negative) offset from the middle to the
+ * start of the block.
+ */
+ assert(offset >= 0);
+ return -(offset + pool->block_size);
}
void
-anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset)
+anv_block_pool_free(struct anv_block_pool *pool, int32_t offset)
{
- anv_free_list_push(&pool->free_list, pool->map, offset);
+ if (offset < 0) {
+ anv_free_list_push(&pool->back_free_list, pool->map, offset);
+ } else {
+ anv_free_list_push(&pool->free_list, pool->map, offset);
+ }
}
static void
size_t state_size)
{
/* At least a cache line and must divide the block size. */
- assert(state_size >= 64 && is_power_of_two(state_size));
+ assert(state_size >= 64 && util_is_power_of_two(state_size));
pool->state_size = state_size;
pool->free_list = ANV_FREE_LIST_EMPTY;
anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool,
struct anv_block_pool *block_pool)
{
- uint32_t offset;
+ int32_t offset;
struct anv_block_state block, old, new;
/* Try free list first. */
- if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset))
+ if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) {
+ assert(offset >= 0);
return offset;
+ }
/* If free list was empty (or somebody raced us and took the items) we
* allocate a new item from the end of the block */
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
- new.next = anv_block_pool_alloc(block_pool);
- new.end = new.next + block_pool->block_size;
- old.u64 = __sync_fetch_and_add(&pool->block.u64, new.u64 - block.u64);
+ offset = anv_block_pool_alloc(block_pool);
+ new.next = offset + pool->state_size;
+ new.end = offset + block_pool->block_size;
+ old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
if (old.next != block.next)
futex_wake(&pool->block.end, INT_MAX);
- return new.next;
+ return offset;
} else {
futex_wait(&pool->block.end, block.end);
- __sync_fetch_and_add(&pool->block.u64, -pool->state_size);
goto restart;
}
}
void
anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
{
- assert(is_power_of_two(state.alloc_size));
+ assert(util_is_power_of_two(state.alloc_size));
unsigned size_log2 = ilog2_round_up(state.alloc_size);
assert(size_log2 >= ANV_MIN_STATE_SIZE_LOG2 &&
size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
}
#define NULL_BLOCK 1
-struct stream_block {
- uint32_t next;
+struct anv_state_stream_block {
+ /* The next block */
+ struct anv_state_stream_block *next;
- /* The map for the BO at the time the block was givne to us */
- void *current_map;
+ /* The offset into the block pool at which this block starts */
+ uint32_t offset;
#ifdef HAVE_VALGRIND
+ /* A pointer to the first user-allocated thing in this block. This is
+ * what valgrind sees as the start of the block.
+ */
void *_vg_ptr;
#endif
};
struct anv_block_pool *block_pool)
{
stream->block_pool = block_pool;
- stream->next = 0;
+ stream->block = NULL;
+
+ /* Ensure that next + whatever > end. This way the first call to
+ * state_stream_alloc fetches a new block.
+ */
+ stream->next = 1;
stream->end = 0;
- stream->current_block = NULL_BLOCK;
VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
}
void
anv_state_stream_finish(struct anv_state_stream *stream)
{
- struct stream_block *sb;
- uint32_t block, next_block;
-
- block = stream->current_block;
- while (block != NULL_BLOCK) {
- sb = stream->block_pool->map + block;
- next_block = VG_NOACCESS_READ(&sb->next);
- VG(VALGRIND_MEMPOOL_FREE(stream, VG_NOACCESS_READ(&sb->_vg_ptr)));
- anv_block_pool_free(stream->block_pool, block);
- block = next_block;
+ VG(const uint32_t block_size = stream->block_pool->block_size);
+
+ struct anv_state_stream_block *next = stream->block;
+ while (next != NULL) {
+ VG(VALGRIND_MAKE_MEM_DEFINED(next, sizeof(*next)));
+ struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
+ VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
+ VG(VALGRIND_MAKE_MEM_UNDEFINED(next, block_size));
+ anv_block_pool_free(stream->block_pool, sb.offset);
+ next = sb.next;
}
VG(VALGRIND_DESTROY_MEMPOOL(stream));
anv_state_stream_alloc(struct anv_state_stream *stream,
uint32_t size, uint32_t alignment)
{
- struct stream_block *sb;
+ struct anv_state_stream_block *sb = stream->block;
+
struct anv_state state;
- uint32_t block;
state.offset = align_u32(stream->next, alignment);
if (state.offset + size > stream->end) {
- block = anv_block_pool_alloc(stream->block_pool);
- void *current_map = stream->block_pool->map;
- sb = current_map + block;
- VG_NOACCESS_WRITE(&sb->current_map, current_map);
- VG_NOACCESS_WRITE(&sb->next, stream->current_block);
- VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, 0));
- stream->current_block = block;
+ uint32_t block = anv_block_pool_alloc(stream->block_pool);
+ sb = stream->block_pool->map + block;
+
+ VG(VALGRIND_MAKE_MEM_UNDEFINED(sb, sizeof(*sb)));
+ sb->next = stream->block;
+ sb->offset = block;
+ VG(sb->_vg_ptr = NULL);
+ VG(VALGRIND_MAKE_MEM_NOACCESS(sb, stream->block_pool->block_size));
+
+ stream->block = sb;
+ stream->start = block;
stream->next = block + sizeof(*sb);
stream->end = block + stream->block_pool->block_size;
+
state.offset = align_u32(stream->next, alignment);
assert(state.offset + size <= stream->end);
}
- sb = stream->block_pool->map + stream->current_block;
- void *current_map = VG_NOACCESS_READ(&sb->current_map);
-
- state.map = current_map + state.offset;
+ assert(state.offset > stream->start);
+ state.map = (void *)sb + (state.offset - stream->start);
state.alloc_size = size;
#ifdef HAVE_VALGRIND
VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size);
} else {
- ptrdiff_t vg_offset = vg_ptr - current_map;
- assert(vg_offset >= stream->current_block &&
- vg_offset < stream->end);
- VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr,
- (state.offset + size) - vg_offset);
+ void *state_end = state.map + state.alloc_size;
+ /* This only updates the mempool. The newly allocated chunk is still
+ * marked as NOACCESS. */
+ VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr);
+ /* Mark the newly allocated chunk as undefined */
+ VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size);
}
#endif
assert(new_bo.size == pool->bo_size);
- new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pool->bo_size);
+ new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pool->bo_size, 0);
if (new_bo.map == NULL) {
anv_gem_close(pool->device, new_bo.gem_handle);
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);