#include <unistd.h>
#include <limits.h>
#include <assert.h>
-#include <linux/memfd.h>
#include <sys/mman.h>
#include "anv_private.h"
#include "util/hash_table.h"
#include "util/simple_mtx.h"
+#include "util/anon_file.h"
#ifdef HAVE_VALGRIND
#define VG_NOACCESS_READ(__ptr) ({ \
#define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
#endif
+#ifndef MAP_POPULATE
+#define MAP_POPULATE 0
+#endif
+
/* Design goals:
*
* - Lock free (except when resizing underlying bos)
/* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
* We use it to indicate the free list is empty. */
-#define EMPTY 1
-#define EMPTY2 UINT32_MAX
+#define EMPTY UINT32_MAX
#define PAGE_SIZE 4096
#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
-#ifndef HAVE_MEMFD_CREATE
-static inline int
-memfd_create(const char *name, unsigned int flags)
-{
- return syscall(SYS_memfd_create, name, flags);
-}
-#endif
-
static inline uint32_t
ilog2_round_up(uint32_t value)
{
table->device = device;
- table->fd = memfd_create("state table", MFD_CLOEXEC);
- if (table->fd == -1)
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
-
/* Just make it 2GB up-front. The Linux kernel won't actually back it
* with pages until we either map and fault on one of them or we use
* userptr and send a chunk of it off to the GPU.
*/
- if (ftruncate(table->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
+ table->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "state table");
+ if (table->fd == -1) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
- if (!u_vector_init(&table->mmap_cleanups,
+ if (!u_vector_init(&table->cleanups,
round_to_power_of_two(sizeof(struct anv_state_table_cleanup)),
128)) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
uint32_t initial_size = initial_entries * ANV_STATE_ENTRY_SIZE;
result = anv_state_table_expand_range(table, initial_size);
if (result != VK_SUCCESS)
- goto fail_mmap_cleanups;
+ goto fail_cleanups;
return VK_SUCCESS;
- fail_mmap_cleanups:
- u_vector_finish(&table->mmap_cleanups);
+ fail_cleanups:
+ u_vector_finish(&table->cleanups);
fail_fd:
close(table->fd);
anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
{
void *map;
- struct anv_mmap_cleanup *cleanup;
+ struct anv_state_table_cleanup *cleanup;
/* Assert that we only ever grow the pool */
assert(size >= table->state.end);
if (size > BLOCK_POOL_MEMFD_SIZE)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- cleanup = u_vector_add(&table->mmap_cleanups);
+ cleanup = u_vector_add(&table->cleanups);
if (!cleanup)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- *cleanup = ANV_MMAP_CLEANUP_INIT;
+ *cleanup = ANV_STATE_TABLE_CLEANUP_INIT;
/* Just leak the old map until we destroy the pool. We can't munmap it
* without races or imposing locking on the block allocate fast path. On
{
struct anv_state_table_cleanup *cleanup;
- u_vector_foreach(cleanup, &table->mmap_cleanups) {
+ u_vector_foreach(cleanup, &table->cleanups) {
if (cleanup->map)
munmap(cleanup->map, cleanup->size);
}
- u_vector_finish(&table->mmap_cleanups);
+ u_vector_finish(&table->cleanups);
close(table->fd);
}
}
void
-anv_free_list_push2(union anv_free_list2 *list,
- struct anv_state_table *table,
- uint32_t first, uint32_t count)
+anv_free_list_push(union anv_free_list *list,
+ struct anv_state_table *table,
+ uint32_t first, uint32_t count)
{
- union anv_free_list2 current, old, new;
+ union anv_free_list current, old, new;
uint32_t last = first;
for (uint32_t i = 1; i < count; i++, last++)
}
struct anv_state *
-anv_free_list_pop2(union anv_free_list2 *list,
- struct anv_state_table *table)
+anv_free_list_pop(union anv_free_list *list,
+ struct anv_state_table *table)
{
- union anv_free_list2 current, new, old;
+ union anv_free_list current, new, old;
current.u64 = list->u64;
- while (current.offset != EMPTY2) {
+ while (current.offset != EMPTY) {
__sync_synchronize();
new.offset = table->map[current.offset].next;
new.count = current.count + 1;
return NULL;
}
-static bool
-anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset)
-{
- union anv_free_list current, new, old;
-
- current.u64 = list->u64;
- while (current.offset != EMPTY) {
- /* We have to add a memory barrier here so that the list head (and
- * offset) gets read before we read the map pointer. This way we
- * know that the map pointer is valid for the given offset at the
- * point where we read it.
- */
- __sync_synchronize();
-
- int32_t *next_ptr = *map + current.offset;
- new.offset = VG_NOACCESS_READ(next_ptr);
- new.count = current.count + 1;
- old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
- if (old.u64 == current.u64) {
- *offset = current.offset;
- return true;
- }
- current = old;
- }
-
- return false;
-}
-
-static void
-anv_free_list_push(union anv_free_list *list, void *map, int32_t offset,
- uint32_t size, uint32_t count)
-{
- union anv_free_list current, old, new;
- int32_t *next_ptr = map + offset;
-
- /* If we're returning more than one chunk, we need to build a chain to add
- * to the list. Fortunately, we can do this without any atomics since we
- * own everything in the chain right now. `offset` is left pointing to the
- * head of our chain list while `next_ptr` points to the tail.
- */
- for (uint32_t i = 1; i < count; i++) {
- VG_NOACCESS_WRITE(next_ptr, offset + i * size);
- next_ptr = map + offset + i * size;
- }
-
- old = *list;
- do {
- current = old;
- VG_NOACCESS_WRITE(next_ptr, current.offset);
- new.offset = offset;
- new.count = current.count + 1;
- old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
- } while (old.u64 != current.u64);
-}
-
/* All pointers in the ptr_free_list are assumed to be page-aligned. This
* means that the bottom 12 bits should all be zero.
*/
pool->device = device;
pool->bo_flags = bo_flags;
+ pool->nbos = 0;
+ pool->size = 0;
+ pool->center_bo_offset = 0;
pool->start_address = gen_canonical_address(start_address);
+ pool->map = NULL;
- anv_bo_init(&pool->bo, 0, 0);
+ /* This pointer will always point to the first BO in the list */
+ pool->bo = &pool->bos[0];
- pool->fd = memfd_create("block pool", MFD_CLOEXEC);
- if (pool->fd == -1)
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ anv_bo_init(pool->bo, 0, 0);
- /* Just make it 2GB up-front. The Linux kernel won't actually back it
- * with pages until we either map and fault on one of them or we use
- * userptr and send a chunk of it off to the GPU.
- */
- if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
- goto fail_fd;
+ if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) {
+ /* Just make it 2GB up-front. The Linux kernel won't actually back it
+ * with pages until we either map and fault on one of them or we use
+ * userptr and send a chunk of it off to the GPU.
+ */
+ pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
+ if (pool->fd == -1)
+ return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ } else {
+ pool->fd = -1;
}
if (!u_vector_init(&pool->mmap_cleanups,
if (result != VK_SUCCESS)
goto fail_mmap_cleanups;
+ /* Make the entire pool available in the front of the pool. If back
+ * allocation needs to use this space, the "ends" will be re-arranged.
+ */
+ pool->state.end = pool->size;
+
return VK_SUCCESS;
fail_mmap_cleanups:
u_vector_finish(&pool->mmap_cleanups);
fail_fd:
- close(pool->fd);
+ if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+ close(pool->fd);
return result;
}
anv_block_pool_finish(struct anv_block_pool *pool)
{
struct anv_mmap_cleanup *cleanup;
+ const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED);
u_vector_foreach(cleanup, &pool->mmap_cleanups) {
- if (cleanup->map)
+ if (use_softpin)
+ anv_gem_munmap(cleanup->map, cleanup->size);
+ else
munmap(cleanup->map, cleanup->size);
+
if (cleanup->gem_handle)
anv_gem_close(pool->device, cleanup->gem_handle);
}
u_vector_finish(&pool->mmap_cleanups);
-
- close(pool->fd);
+ if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+ close(pool->fd);
}
static VkResult
void *map;
uint32_t gem_handle;
struct anv_mmap_cleanup *cleanup;
+ const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED);
/* Assert that we only ever grow the pool */
assert(center_bo_offset >= pool->back_state.end);
/* Assert that we don't go outside the bounds of the memfd */
assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
- assert(size - center_bo_offset <=
+ assert(use_softpin ||
+ size - center_bo_offset <=
BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
cleanup = u_vector_add(&pool->mmap_cleanups);
*cleanup = ANV_MMAP_CLEANUP_INIT;
- /* Just leak the old map until we destroy the pool. We can't munmap it
- * without races or imposing locking on the block allocate fast path. On
- * the whole the leaked maps adds up to less than the size of the
- * current map. MAP_POPULATE seems like the right thing to do, but we
- * should try to get some numbers.
- */
- map = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, pool->fd,
- BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
- if (map == MAP_FAILED)
- return vk_errorf(pool->device->instance, pool->device,
- VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
-
- gem_handle = anv_gem_userptr(pool->device, map, size);
- if (gem_handle == 0) {
- munmap(map, size);
- return vk_errorf(pool->device->instance, pool->device,
- VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m");
+ uint32_t newbo_size = size - pool->size;
+ if (use_softpin) {
+ gem_handle = anv_gem_create(pool->device, newbo_size);
+ map = anv_gem_mmap(pool->device, gem_handle, 0, newbo_size, 0);
+ if (map == MAP_FAILED)
+ return vk_errorf(pool->device->instance, pool->device,
+ VK_ERROR_MEMORY_MAP_FAILED, "gem mmap failed: %m");
+ assert(center_bo_offset == 0);
+ } else {
+ /* Just leak the old map until we destroy the pool. We can't munmap it
+ * without races or imposing locking on the block allocate fast path. On
+ * the whole the leaked maps adds up to less than the size of the
+ * current map. MAP_POPULATE seems like the right thing to do, but we
+ * should try to get some numbers.
+ */
+ map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, pool->fd,
+ BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
+ if (map == MAP_FAILED)
+ return vk_errorf(pool->device->instance, pool->device,
+ VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
+
+ /* Now that we mapped the new memory, we can write the new
+ * center_bo_offset back into pool and update pool->map. */
+ pool->center_bo_offset = center_bo_offset;
+ pool->map = map + center_bo_offset;
+ gem_handle = anv_gem_userptr(pool->device, map, size);
+ if (gem_handle == 0) {
+ munmap(map, size);
+ return vk_errorf(pool->device->instance, pool->device,
+ VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m");
+ }
}
cleanup->map = map;
- cleanup->size = size;
+ cleanup->size = use_softpin ? newbo_size : size;
cleanup->gem_handle = gem_handle;
-#if 0
/* Regular objects are created I915_CACHING_CACHED on LLC platforms and
* I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
* always created as I915_CACHING_CACHED, which on non-LLC means
- * snooped. That can be useful but comes with a bit of overheard. Since
- * we're eplicitly clflushing and don't want the overhead we need to turn
- * it off. */
- if (!pool->device->info.has_llc) {
- anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE);
- anv_gem_set_domain(pool->device, gem_handle,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
- }
-#endif
-
- /* Now that we successfull allocated everything, we can write the new
- * values back into pool. */
- pool->map = map + center_bo_offset;
- pool->center_bo_offset = center_bo_offset;
+ * snooped.
+ *
+ * On platforms that support softpin, we are not going to use userptr
+ * anymore, but we still want to rely on the snooped states. So make sure
+ * everything is set to I915_CACHING_CACHED.
+ */
+ if (!pool->device->info.has_llc)
+ anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_CACHED);
/* For block pool BOs we have to be a bit careful about where we place them
* in the GTT. There are two documented workarounds for state base address
* the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
* hard work for us.
*/
- anv_bo_init(&pool->bo, gem_handle, size);
- if (pool->bo_flags & EXEC_OBJECT_PINNED) {
- pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
- center_bo_offset;
+ struct anv_bo *bo;
+ uint32_t bo_size;
+ uint64_t bo_offset;
+
+ assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
+
+ if (use_softpin) {
+ /* With softpin, we add a new BO to the pool, and set its offset to right
+ * where the previous BO ends (the end of the pool).
+ */
+ bo = &pool->bos[pool->nbos++];
+ bo_size = newbo_size;
+ bo_offset = pool->start_address + pool->size;
+ } else {
+ /* Without softpin, we just need one BO, and we already have a pointer to
+ * it. Simply "allocate" it from our array if we didn't do it before.
+ * The offset doesn't matter since we are not pinning the BO anyway.
+ */
+ if (pool->nbos == 0)
+ pool->nbos++;
+ bo = pool->bo;
+ bo_size = size;
+ bo_offset = 0;
}
- pool->bo.flags = pool->bo_flags;
- pool->bo.map = map;
+
+ anv_bo_init(bo, gem_handle, bo_size);
+ bo->offset = bo_offset;
+ bo->flags = pool->bo_flags;
+ bo->map = map;
+ pool->size = size;
return VK_SUCCESS;
}
+static struct anv_bo *
+anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)
+{
+ struct anv_bo *bo, *bo_found = NULL;
+ int32_t cur_offset = 0;
+
+ assert(offset);
+
+ if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+ return pool->bo;
+
+ anv_block_pool_foreach_bo(bo, pool) {
+ if (*offset < cur_offset + bo->size) {
+ bo_found = bo;
+ break;
+ }
+ cur_offset += bo->size;
+ }
+
+ assert(bo_found != NULL);
+ *offset -= cur_offset;
+
+ return bo_found;
+}
+
/** Returns current memory map of the block pool.
*
* The returned pointer points to the map for the memory at the specified
void*
anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
{
- return pool->map + offset;
+ if (pool->bo_flags & EXEC_OBJECT_PINNED) {
+ struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
+ return bo->map + offset;
+ } else {
+ return pool->map + offset;
+ }
}
/** Grows and re-centers the block pool.
assert(state == &pool->state || back_used > 0);
- uint32_t old_size = pool->bo.size;
+ uint32_t old_size = pool->size;
/* The block pool is always initialized to a nonzero size and this function
* is always called after initialization.
while (size < back_required + front_required)
size *= 2;
- assert(size > pool->bo.size);
+ assert(size > pool->size);
/* We compute a new center_bo_offset such that, when we double the size
* of the pool, we maintain the ratio of how much is used by each side.
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- pool->bo.flags = pool->bo_flags;
+ pool->bo->flags = pool->bo_flags;
done:
pthread_mutex_unlock(&pool->device->mutex);
* needs to do so in order to maintain its concurrency model.
*/
if (state == &pool->state) {
- return pool->bo.size - pool->center_bo_offset;
+ return pool->size - pool->center_bo_offset;
} else {
assert(pool->center_bo_offset > 0);
return pool->center_bo_offset;
static uint32_t
anv_block_pool_alloc_new(struct anv_block_pool *pool,
struct anv_block_state *pool_state,
- uint32_t block_size)
+ uint32_t block_size, uint32_t *padding)
{
struct anv_block_state state, old, new;
+ /* Most allocations won't generate any padding */
+ if (padding)
+ *padding = 0;
+
while (1) {
state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
if (state.next + block_size <= state.end) {
- assert(pool->map);
return state.next;
} else if (state.next <= state.end) {
+ if (pool->bo_flags & EXEC_OBJECT_PINNED && state.next < state.end) {
+ /* We need to grow the block pool, but still have some leftover
+ * space that can't be used by that particular allocation. So we
+ * add that as a "padding", and return it.
+ */
+ uint32_t leftover = state.end - state.next;
+
+ /* If there is some leftover space in the pool, the caller must
+ * deal with it.
+ */
+ assert(leftover == 0 || padding);
+ if (padding)
+ *padding = leftover;
+ state.next += leftover;
+ }
+
/* We allocated the first block outside the pool so we have to grow
* the pool. pool_state->next acts a mutex: threads who try to
* allocate now will get block indexes above the current limit and
int32_t
anv_block_pool_alloc(struct anv_block_pool *pool,
- uint32_t block_size)
+ uint32_t block_size, uint32_t *padding)
{
- return anv_block_pool_alloc_new(pool, &pool->state, block_size);
+ uint32_t offset;
+
+ offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding);
+
+ return offset;
}
/* Allocates a block out of the back of the block pool.
uint32_t block_size)
{
int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
- block_size);
+ block_size, NULL);
/* The offset we get out of anv_block_pool_alloc_new() is actually the
* number of bytes downwards from the middle to the end of the block.
pool->block_size = block_size;
pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
- pool->buckets[i].free_list = ANV_FREE_LIST2_EMPTY;
+ pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
pool->buckets[i].block.next = 0;
pool->buckets[i].block.end = 0;
}
anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
struct anv_block_pool *block_pool,
uint32_t state_size,
- uint32_t block_size)
+ uint32_t block_size,
+ uint32_t *padding)
{
struct anv_block_state block, old, new;
uint32_t offset;
+ /* We don't always use anv_block_pool_alloc(), which would set *padding to
+ * zero for us. So if we have a pointer to padding, we must zero it out
+ * ourselves here, to make sure we always return some sensible value.
+ */
+ if (padding)
+ *padding = 0;
+
/* If our state is large, we don't need any sub-allocation from a block.
* Instead, we just grab whole (potentially large) blocks.
*/
if (state_size >= block_size)
- return anv_block_pool_alloc(block_pool, state_size);
+ return anv_block_pool_alloc(block_pool, state_size, padding);
restart:
block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
- offset = anv_block_pool_alloc(block_pool, block_size);
+ offset = anv_block_pool_alloc(block_pool, block_size, padding);
new.next = offset + state_size;
new.end = offset + block_size;
old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
uint32_t chunk_offset, uint32_t count,
uint32_t block_size)
{
- if (count == 0)
- return;
+ /* Disallow returning 0 chunks */
+ assert(count != 0);
/* Make sure we always return chunks aligned to the block_size */
assert(chunk_offset % block_size == 0);
uint32_t st_idx;
- VkResult result = anv_state_table_add(&pool->table, &st_idx, count);
+ UNUSED VkResult result = anv_state_table_add(&pool->table, &st_idx, count);
assert(result == VK_SUCCESS);
for (int i = 0; i < count; i++) {
/* update states that were added back to the state table */
}
uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
- anv_free_list_push2(&pool->buckets[block_bucket].free_list,
- &pool->table, st_idx, count);
+ anv_free_list_push(&pool->buckets[block_bucket].free_list,
+ &pool->table, st_idx, count);
+}
+
+/** Returns a chunk of memory back to the state pool.
+ *
+ * Do a two-level split. If chunk_size is bigger than divisor
+ * (pool->block_size), we return as many divisor sized blocks as we can, from
+ * the end of the chunk.
+ *
+ * The remaining is then split into smaller blocks (starting at small_size if
+ * it is non-zero), with larger blocks always being taken from the end of the
+ * chunk.
+ */
+static void
+anv_state_pool_return_chunk(struct anv_state_pool *pool,
+ uint32_t chunk_offset, uint32_t chunk_size,
+ uint32_t small_size)
+{
+ uint32_t divisor = pool->block_size;
+ uint32_t nblocks = chunk_size / divisor;
+ uint32_t rest = chunk_size - nblocks * divisor;
+
+ if (nblocks > 0) {
+ /* First return divisor aligned and sized chunks. We start returning
+ * larger blocks from the end fo the chunk, since they should already be
+ * aligned to divisor. Also anv_state_pool_return_blocks() only accepts
+ * aligned chunks.
+ */
+ uint32_t offset = chunk_offset + rest;
+ anv_state_pool_return_blocks(pool, offset, nblocks, divisor);
+ }
+
+ chunk_size = rest;
+ divisor /= 2;
+
+ if (small_size > 0 && small_size < divisor)
+ divisor = small_size;
+
+ uint32_t min_size = 1 << ANV_MIN_STATE_SIZE_LOG2;
+
+ /* Just as before, return larger divisor aligned blocks from the end of the
+ * chunk first.
+ */
+ while (chunk_size > 0 && divisor >= min_size) {
+ nblocks = chunk_size / divisor;
+ rest = chunk_size - nblocks * divisor;
+ if (nblocks > 0) {
+ anv_state_pool_return_blocks(pool, chunk_offset + rest,
+ nblocks, divisor);
+ chunk_size = rest;
+ }
+ divisor /= 2;
+ }
}
static struct anv_state
int32_t offset;
/* Try free list first. */
- state = anv_free_list_pop2(&pool->buckets[bucket].free_list,
- &pool->table);
+ state = anv_free_list_pop(&pool->buckets[bucket].free_list,
+ &pool->table);
if (state) {
assert(state->offset >= 0);
goto done;
/* Try to grab a chunk from some larger bucket and split it up */
for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
- state = anv_free_list_pop2(&pool->buckets[b].free_list, &pool->table);
+ state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table);
if (state) {
unsigned chunk_size = anv_state_pool_get_bucket_size(b);
int32_t chunk_offset = state->offset;
*/
state->alloc_size = alloc_size;
- /* We've found a chunk that's larger than the requested state size.
+ /* Now return the unused part of the chunk back to the pool as free
+ * blocks
+ *
* There are a couple of options as to what we do with it:
*
* 1) We could fully split the chunk into state.alloc_size sized
* two-level split. If it's bigger than some fixed block_size,
* we split it into block_size sized chunks and return all but
* one of them. Then we split what remains into
- * state.alloc_size sized chunks and return all but one.
+ * state.alloc_size sized chunks and return them.
*
- * We choose option (3).
+ * We choose something close to option (3), which is implemented with
+ * anv_state_pool_return_chunk(). That is done by returning the
+ * remaining of the chunk, with alloc_size as a hint of the size that
+ * we want the smaller chunk split into.
*/
- if (chunk_size > pool->block_size &&
- alloc_size < pool->block_size) {
- assert(chunk_size % pool->block_size == 0);
- /* We don't want to split giant chunks into tiny chunks. Instead,
- * break anything bigger than a block into block-sized chunks and
- * then break it down into bucket-sized chunks from there. Return
- * all but the first block of the chunk to the block bucket.
- */
- uint32_t push_back = (chunk_size / pool->block_size) - 1;
- anv_state_pool_return_blocks(pool, chunk_offset + pool->block_size,
- push_back, pool->block_size);
- chunk_size = pool->block_size;
- }
-
- assert(chunk_size % alloc_size == 0);
- uint32_t push_back = (chunk_size / alloc_size) - 1;
- anv_state_pool_return_blocks(pool, chunk_offset + alloc_size,
- push_back, alloc_size);
+ anv_state_pool_return_chunk(pool, chunk_offset + alloc_size,
+ chunk_size - alloc_size, alloc_size);
goto done;
}
}
+ uint32_t padding;
offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
&pool->block_pool,
alloc_size,
- pool->block_size);
+ pool->block_size,
+ &padding);
/* Everytime we allocate a new state, add it to the state pool */
uint32_t idx;
- VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
state->alloc_size = alloc_size;
state->map = anv_block_pool_map(&pool->block_pool, offset);
+ if (padding > 0) {
+ uint32_t return_offset = offset - padding;
+ anv_state_pool_return_chunk(pool, return_offset, padding, 0);
+ }
+
done:
return *state;
}
struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool *pool)
{
- struct anv_state state;
- state.alloc_size = pool->block_size;
+ struct anv_state *state;
+ uint32_t alloc_size = pool->block_size;
- if (anv_free_list_pop(&pool->back_alloc_free_list,
- &pool->block_pool.map, &state.offset)) {
- assert(state.offset < 0);
+ state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
+ if (state) {
+ assert(state->offset < 0);
goto done;
}
- state.offset = anv_block_pool_alloc_back(&pool->block_pool,
- pool->block_size);
+ int32_t offset;
+ offset = anv_block_pool_alloc_back(&pool->block_pool,
+ pool->block_size);
+ uint32_t idx;
+ UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ assert(result == VK_SUCCESS);
+
+ state = anv_state_table_get(&pool->table, idx);
+ state->offset = offset;
+ state->alloc_size = alloc_size;
+ state->map = anv_block_pool_map(&pool->block_pool, state->offset);
done:
- state.map = pool->block_pool.map + state.offset;
- VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
- return state;
+ VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
+ return *state;
}
static void
if (state.offset < 0) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
- pool->block_pool.map, state.offset,
- state.alloc_size, 1);
+ &pool->table, state.idx, 1);
} else {
- anv_free_list_push2(&pool->buckets[bucket].free_list,
- &pool->table, state.idx, 1);
+ anv_free_list_push(&pool->buckets[bucket].free_list,
+ &pool->table, state.idx, 1);
}
}
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
+ /* We are removing the state flushes, so lets make sure that these buffers
+ * are cached/snooped.
+ */
+ if (!pool->device->info.has_llc) {
+ anv_gem_set_caching(pool->device, new_bo.gem_handle,
+ I915_CACHING_CACHED);
+ }
+
*bo = new_bo;
VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
return VK_SUCCESS;
}
+VkResult
+anv_bo_cache_import_host_ptr(struct anv_device *device,
+ struct anv_bo_cache *cache,
+ void *host_ptr, uint32_t size,
+ uint64_t bo_flags, struct anv_bo **bo_out)
+{
+ assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ assert((bo_flags & ANV_BO_EXTERNAL) == 0);
+
+ uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
+ if (!gem_handle)
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+
+ pthread_mutex_lock(&cache->mutex);
+
+ struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
+ if (bo) {
+ /* VK_EXT_external_memory_host doesn't require handling importing the
+ * same pointer twice at the same time, but we don't get in the way. If
+ * kernel gives us the same gem_handle, only succeed if the flags match.
+ */
+ if (bo_flags != bo->bo.flags) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "same host pointer imported two different ways");
+ }
+ __sync_fetch_and_add(&bo->refcount, 1);
+ } else {
+ bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!bo) {
+ anv_gem_close(device, gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ bo->refcount = 1;
+
+ anv_bo_init(&bo->bo, gem_handle, size);
+ bo->bo.flags = bo_flags;
+
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
+ _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+ *bo_out = &bo->bo;
+
+ return VK_SUCCESS;
+}
+
VkResult
anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,