/* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
* We use it to indicate the free list is empty. */
-#define EMPTY 1
+#define EMPTY UINT32_MAX
+
+#define PAGE_SIZE 4096
struct anv_mmap_cleanup {
void *map;
return 1 << ilog2_round_up(value);
}
-static bool
-anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset)
+struct anv_state_table_cleanup {
+ void *map;
+ size_t size;
+};
+
+#define ANV_STATE_TABLE_CLEANUP_INIT ((struct anv_state_table_cleanup){0})
+#define ANV_STATE_ENTRY_SIZE (sizeof(struct anv_free_entry))
+
+static VkResult
+anv_state_table_expand_range(struct anv_state_table *table, uint32_t size);
+
+VkResult
+anv_state_table_init(struct anv_state_table *table,
+ struct anv_device *device,
+ uint32_t initial_entries)
{
- union anv_free_list current, new, old;
+ VkResult result;
- current.u64 = list->u64;
- while (current.offset != EMPTY) {
- /* We have to add a memory barrier here so that the list head (and
- * offset) gets read before we read the map pointer. This way we
- * know that the map pointer is valid for the given offset at the
- * point where we read it.
- */
- __sync_synchronize();
+ table->device = device;
- int32_t *next_ptr = *map + current.offset;
- new.offset = VG_NOACCESS_READ(next_ptr);
- new.count = current.count + 1;
- old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
- if (old.u64 == current.u64) {
- *offset = current.offset;
- return true;
- }
- current = old;
+ table->fd = memfd_create("state table", MFD_CLOEXEC);
+ if (table->fd == -1)
+ return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+
+ /* Just make it 2GB up-front. The Linux kernel won't actually back it
+ * with pages until we either map and fault on one of them or we use
+ * userptr and send a chunk of it off to the GPU.
+ */
+ if (ftruncate(table->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_fd;
}
- return false;
+ if (!u_vector_init(&table->mmap_cleanups,
+ round_to_power_of_two(sizeof(struct anv_state_table_cleanup)),
+ 128)) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_fd;
+ }
+
+ table->state.next = 0;
+ table->state.end = 0;
+ table->size = 0;
+
+ uint32_t initial_size = initial_entries * ANV_STATE_ENTRY_SIZE;
+ result = anv_state_table_expand_range(table, initial_size);
+ if (result != VK_SUCCESS)
+ goto fail_mmap_cleanups;
+
+ return VK_SUCCESS;
+
+ fail_mmap_cleanups:
+ u_vector_finish(&table->mmap_cleanups);
+ fail_fd:
+ close(table->fd);
+
+ return result;
}
-static void
-anv_free_list_push(union anv_free_list *list, void *map, int32_t offset,
- uint32_t size, uint32_t count)
+static VkResult
+anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
{
- union anv_free_list current, old, new;
- int32_t *next_ptr = map + offset;
+ void *map;
+ struct anv_mmap_cleanup *cleanup;
+
+ /* Assert that we only ever grow the pool */
+ assert(size >= table->state.end);
+
+ /* Make sure that we don't go outside the bounds of the memfd */
+ if (size > BLOCK_POOL_MEMFD_SIZE)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ cleanup = u_vector_add(&table->mmap_cleanups);
+ if (!cleanup)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ *cleanup = ANV_MMAP_CLEANUP_INIT;
- /* If we're returning more than one chunk, we need to build a chain to add
- * to the list. Fortunately, we can do this without any atomics since we
- * own everything in the chain right now. `offset` is left pointing to the
- * head of our chain list while `next_ptr` points to the tail.
+ /* Just leak the old map until we destroy the pool. We can't munmap it
+ * without races or imposing locking on the block allocate fast path. On
+ * the whole the leaked maps adds up to less than the size of the
+ * current map. MAP_POPULATE seems like the right thing to do, but we
+ * should try to get some numbers.
*/
- for (uint32_t i = 1; i < count; i++) {
- VG_NOACCESS_WRITE(next_ptr, offset + i * size);
- next_ptr = map + offset + i * size;
+ map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, table->fd, 0);
+ if (map == MAP_FAILED) {
+ return vk_errorf(table->device->instance, table->device,
+ VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
+ }
+
+ cleanup->map = map;
+ cleanup->size = size;
+
+ table->map = map;
+ table->size = size;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+anv_state_table_grow(struct anv_state_table *table)
+{
+ VkResult result = VK_SUCCESS;
+
+ uint32_t used = align_u32(table->state.next * ANV_STATE_ENTRY_SIZE,
+ PAGE_SIZE);
+ uint32_t old_size = table->size;
+
+ /* The block pool is always initialized to a nonzero size and this function
+ * is always called after initialization.
+ */
+ assert(old_size > 0);
+
+ uint32_t required = MAX2(used, old_size);
+ if (used * 2 <= required) {
+ /* If we're in this case then this isn't the firsta allocation and we
+ * already have enough space on both sides to hold double what we
+ * have allocated. There's nothing for us to do.
+ */
+ goto done;
+ }
+
+ uint32_t size = old_size * 2;
+ while (size < required)
+ size *= 2;
+
+ assert(size > table->size);
+
+ result = anv_state_table_expand_range(table, size);
+
+ done:
+ return result;
+}
+
+void
+anv_state_table_finish(struct anv_state_table *table)
+{
+ struct anv_state_table_cleanup *cleanup;
+
+ u_vector_foreach(cleanup, &table->mmap_cleanups) {
+ if (cleanup->map)
+ munmap(cleanup->map, cleanup->size);
}
+ u_vector_finish(&table->mmap_cleanups);
+
+ close(table->fd);
+}
+
+VkResult
+anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
+ uint32_t count)
+{
+ struct anv_block_state state, old, new;
+ VkResult result;
+
+ assert(idx);
+
+ while(1) {
+ state.u64 = __sync_fetch_and_add(&table->state.u64, count);
+ if (state.next + count <= state.end) {
+ assert(table->map);
+ struct anv_free_entry *entry = &table->map[state.next];
+ for (int i = 0; i < count; i++) {
+ entry[i].state.idx = state.next + i;
+ }
+ *idx = state.next;
+ return VK_SUCCESS;
+ } else if (state.next <= state.end) {
+ /* We allocated the first block outside the pool so we have to grow
+ * the pool. pool_state->next acts a mutex: threads who try to
+ * allocate now will get block indexes above the current limit and
+ * hit futex_wait below.
+ */
+ new.next = state.next + count;
+ do {
+ result = anv_state_table_grow(table);
+ if (result != VK_SUCCESS)
+ return result;
+ new.end = table->size / ANV_STATE_ENTRY_SIZE;
+ } while (new.end < new.next);
+
+ old.u64 = __sync_lock_test_and_set(&table->state.u64, new.u64);
+ if (old.next != state.next)
+ futex_wake(&table->state.end, INT_MAX);
+ } else {
+ futex_wait(&table->state.end, state.end, NULL);
+ continue;
+ }
+ }
+}
+
+void
+anv_free_list_push(union anv_free_list *list,
+ struct anv_state_table *table,
+ uint32_t first, uint32_t count)
+{
+ union anv_free_list current, old, new;
+ uint32_t last = first;
+
+ for (uint32_t i = 1; i < count; i++, last++)
+ table->map[last].next = last + 1;
+
old = *list;
do {
current = old;
- VG_NOACCESS_WRITE(next_ptr, current.offset);
- new.offset = offset;
+ table->map[last].next = current.offset;
+ new.offset = first;
new.count = current.count + 1;
old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
} while (old.u64 != current.u64);
}
+struct anv_state *
+anv_free_list_pop(union anv_free_list *list,
+ struct anv_state_table *table)
+{
+ union anv_free_list current, new, old;
+
+ current.u64 = list->u64;
+ while (current.offset != EMPTY) {
+ __sync_synchronize();
+ new.offset = table->map[current.offset].next;
+ new.count = current.count + 1;
+ old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
+ if (old.u64 == current.u64) {
+ struct anv_free_entry *entry = &table->map[current.offset];
+ return &entry->state;
+ }
+ current = old;
+ }
+
+ return NULL;
+}
+
/* All pointers in the ptr_free_list are assumed to be page-aligned. This
* means that the bottom 12 bits should all be zero.
*/
pool->device = device;
pool->bo_flags = bo_flags;
+ pool->nbos = 0;
+ pool->size = 0;
pool->start_address = gen_canonical_address(start_address);
- anv_bo_init(&pool->bo, 0, 0);
+ /* This pointer will always point to the first BO in the list */
+ pool->bo = &pool->bos[0];
+
+ anv_bo_init(pool->bo, 0, 0);
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
if (pool->fd == -1)
close(pool->fd);
}
-#define PAGE_SIZE 4096
-
static VkResult
anv_block_pool_expand_range(struct anv_block_pool *pool,
uint32_t center_bo_offset, uint32_t size)
#endif
/* Now that we successfull allocated everything, we can write the new
- * values back into pool. */
- pool->map = map + center_bo_offset;
+ * center_bo_offset back into pool. */
pool->center_bo_offset = center_bo_offset;
/* For block pool BOs we have to be a bit careful about where we place them
* the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
* hard work for us.
*/
- anv_bo_init(&pool->bo, gem_handle, size);
+ struct anv_bo *bo;
+
+ assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
+
+ /* We just need one BO, and we already have a pointer to it. Let's simply
+ * "allocate" it from our array.
+ */
+ if (pool->nbos == 0)
+ pool->nbos++;
+
+ bo = pool->bo;
+
+ anv_bo_init(bo, gem_handle, size);
if (pool->bo_flags & EXEC_OBJECT_PINNED) {
- pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
+ bo->offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
center_bo_offset;
}
- pool->bo.flags = pool->bo_flags;
- pool->bo.map = map;
+ bo->flags = pool->bo_flags;
+ bo->map = map;
+ pool->size = size;
return VK_SUCCESS;
}
+static struct anv_bo *
+anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)
+{
+ struct anv_bo *bo, *bo_found = NULL;
+ int32_t cur_offset = 0;
+
+ assert(offset);
+
+ if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+ return pool->bo;
+
+ anv_block_pool_foreach_bo(bo, pool) {
+ if (*offset < cur_offset + bo->size) {
+ bo_found = bo;
+ break;
+ }
+ cur_offset += bo->size;
+ }
+
+ assert(bo_found != NULL);
+ *offset -= cur_offset;
+
+ return bo_found;
+}
+
+/** Returns current memory map of the block pool.
+ *
+ * The returned pointer points to the map for the memory at the specified
+ * offset. The offset parameter is relative to the "center" of the block pool
+ * rather than the start of the block pool BO map.
+ */
+void*
+anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
+{
+ struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
+ return bo->map + pool->center_bo_offset + offset;
+}
+
/** Grows and re-centers the block pool.
*
* We grow the block pool in one or both directions in such a way that the
assert(state == &pool->state || back_used > 0);
- uint32_t old_size = pool->bo.size;
+ uint32_t old_size = pool->size;
/* The block pool is always initialized to a nonzero size and this function
* is always called after initialization.
while (size < back_required + front_required)
size *= 2;
- assert(size > pool->bo.size);
+ assert(size > pool->size);
/* We compute a new center_bo_offset such that, when we double the size
* of the pool, we maintain the ratio of how much is used by each side.
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- pool->bo.flags = pool->bo_flags;
+ pool->bo->flags = pool->bo_flags;
done:
pthread_mutex_unlock(&pool->device->mutex);
* needs to do so in order to maintain its concurrency model.
*/
if (state == &pool->state) {
- return pool->bo.size - pool->center_bo_offset;
+ return pool->size - pool->center_bo_offset;
} else {
assert(pool->center_bo_offset > 0);
return pool->center_bo_offset;
static uint32_t
anv_block_pool_alloc_new(struct anv_block_pool *pool,
struct anv_block_state *pool_state,
- uint32_t block_size)
+ uint32_t block_size, uint32_t *padding)
{
struct anv_block_state state, old, new;
+ /* Most allocations won't generate any padding */
+ if (padding)
+ *padding = 0;
+
while (1) {
state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
if (state.next + block_size <= state.end) {
- assert(pool->map);
return state.next;
} else if (state.next <= state.end) {
+ if (pool->bo_flags & EXEC_OBJECT_PINNED && state.next < state.end) {
+ /* We need to grow the block pool, but still have some leftover
+ * space that can't be used by that particular allocation. So we
+ * add that as a "padding", and return it.
+ */
+ uint32_t leftover = state.end - state.next;
+
+ /* If there is some leftover space in the pool, the caller must
+ * deal with it.
+ */
+ assert(leftover == 0 || padding);
+ if (padding)
+ *padding = leftover;
+ state.next += leftover;
+ }
+
/* We allocated the first block outside the pool so we have to grow
* the pool. pool_state->next acts a mutex: threads who try to
* allocate now will get block indexes above the current limit and
int32_t
anv_block_pool_alloc(struct anv_block_pool *pool,
- uint32_t block_size)
+ uint32_t block_size, uint32_t *padding)
{
- return anv_block_pool_alloc_new(pool, &pool->state, block_size);
+ uint32_t offset;
+
+ offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding);
+
+ return offset;
}
/* Allocates a block out of the back of the block pool.
uint32_t block_size)
{
int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
- block_size);
+ block_size, NULL);
/* The offset we get out of anv_block_pool_alloc_new() is actually the
* number of bytes downwards from the middle to the end of the block.
if (result != VK_SUCCESS)
return result;
+ result = anv_state_table_init(&pool->table, device, 64);
+ if (result != VK_SUCCESS) {
+ anv_block_pool_finish(&pool->block_pool);
+ return result;
+ }
+
assert(util_is_power_of_two_or_zero(block_size));
pool->block_size = block_size;
pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
anv_state_pool_finish(struct anv_state_pool *pool)
{
VG(VALGRIND_DESTROY_MEMPOOL(pool));
+ anv_state_table_finish(&pool->table);
anv_block_pool_finish(&pool->block_pool);
}
anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
struct anv_block_pool *block_pool,
uint32_t state_size,
- uint32_t block_size)
+ uint32_t block_size,
+ uint32_t *padding)
{
struct anv_block_state block, old, new;
uint32_t offset;
+ /* We don't always use anv_block_pool_alloc(), which would set *padding to
+ * zero for us. So if we have a pointer to padding, we must zero it out
+ * ourselves here, to make sure we always return some sensible value.
+ */
+ if (padding)
+ *padding = 0;
+
/* If our state is large, we don't need any sub-allocation from a block.
* Instead, we just grab whole (potentially large) blocks.
*/
if (state_size >= block_size)
- return anv_block_pool_alloc(block_pool, state_size);
+ return anv_block_pool_alloc(block_pool, state_size, padding);
restart:
block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
if (block.next < block.end) {
return block.next;
} else if (block.next == block.end) {
- offset = anv_block_pool_alloc(block_pool, block_size);
+ offset = anv_block_pool_alloc(block_pool, block_size, padding);
new.next = offset + state_size;
new.end = offset + block_size;
old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
return 1 << size_log2;
}
+/** Helper to push a chunk into the state table.
+ *
+ * It creates 'count' entries into the state table and update their sizes,
+ * offsets and maps, also pushing them as "free" states.
+ */
+static void
+anv_state_pool_return_blocks(struct anv_state_pool *pool,
+ uint32_t chunk_offset, uint32_t count,
+ uint32_t block_size)
+{
+ /* Disallow returning 0 chunks */
+ assert(count != 0);
+
+ /* Make sure we always return chunks aligned to the block_size */
+ assert(chunk_offset % block_size == 0);
+
+ uint32_t st_idx;
+ VkResult result = anv_state_table_add(&pool->table, &st_idx, count);
+ assert(result == VK_SUCCESS);
+ for (int i = 0; i < count; i++) {
+ /* update states that were added back to the state table */
+ struct anv_state *state_i = anv_state_table_get(&pool->table,
+ st_idx + i);
+ state_i->alloc_size = block_size;
+ state_i->offset = chunk_offset + block_size * i;
+ state_i->map = anv_block_pool_map(&pool->block_pool, state_i->offset);
+ }
+
+ uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
+ anv_free_list_push(&pool->buckets[block_bucket].free_list,
+ &pool->table, st_idx, count);
+}
+
+/** Returns a chunk of memory back to the state pool.
+ *
+ * Do a two-level split. If chunk_size is bigger than divisor
+ * (pool->block_size), we return as many divisor sized blocks as we can, from
+ * the end of the chunk.
+ *
+ * The remaining is then split into smaller blocks (starting at small_size if
+ * it is non-zero), with larger blocks always being taken from the end of the
+ * chunk.
+ */
+static void
+anv_state_pool_return_chunk(struct anv_state_pool *pool,
+ uint32_t chunk_offset, uint32_t chunk_size,
+ uint32_t small_size)
+{
+ uint32_t divisor = pool->block_size;
+ uint32_t nblocks = chunk_size / divisor;
+ uint32_t rest = chunk_size - nblocks * divisor;
+
+ if (nblocks > 0) {
+ /* First return divisor aligned and sized chunks. We start returning
+ * larger blocks from the end fo the chunk, since they should already be
+ * aligned to divisor. Also anv_state_pool_return_blocks() only accepts
+ * aligned chunks.
+ */
+ uint32_t offset = chunk_offset + rest;
+ anv_state_pool_return_blocks(pool, offset, nblocks, divisor);
+ }
+
+ chunk_size = rest;
+ divisor /= 2;
+
+ if (small_size > 0 && small_size < divisor)
+ divisor = small_size;
+
+ uint32_t min_size = 1 << ANV_MIN_STATE_SIZE_LOG2;
+
+ /* Just as before, return larger divisor aligned blocks from the end of the
+ * chunk first.
+ */
+ while (chunk_size > 0 && divisor >= min_size) {
+ nblocks = chunk_size / divisor;
+ rest = chunk_size - nblocks * divisor;
+ if (nblocks > 0) {
+ anv_state_pool_return_blocks(pool, chunk_offset + rest,
+ nblocks, divisor);
+ chunk_size = rest;
+ }
+ divisor /= 2;
+ }
+}
+
static struct anv_state
anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
uint32_t size, uint32_t align)
{
uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
- struct anv_state state;
- state.alloc_size = anv_state_pool_get_bucket_size(bucket);
+ struct anv_state *state;
+ uint32_t alloc_size = anv_state_pool_get_bucket_size(bucket);
+ int32_t offset;
/* Try free list first. */
- if (anv_free_list_pop(&pool->buckets[bucket].free_list,
- &pool->block_pool.map, &state.offset)) {
- assert(state.offset >= 0);
+ state = anv_free_list_pop(&pool->buckets[bucket].free_list,
+ &pool->table);
+ if (state) {
+ assert(state->offset >= 0);
goto done;
}
/* Try to grab a chunk from some larger bucket and split it up */
for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
- int32_t chunk_offset;
- if (anv_free_list_pop(&pool->buckets[b].free_list,
- &pool->block_pool.map, &chunk_offset)) {
+ state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table);
+ if (state) {
unsigned chunk_size = anv_state_pool_get_bucket_size(b);
+ int32_t chunk_offset = state->offset;
+
+ /* First lets update the state we got to its new size. offset and map
+ * remain the same.
+ */
+ state->alloc_size = alloc_size;
- /* We've found a chunk that's larger than the requested state size.
+ /* Now return the unused part of the chunk back to the pool as free
+ * blocks
+ *
* There are a couple of options as to what we do with it:
*
* 1) We could fully split the chunk into state.alloc_size sized
* two-level split. If it's bigger than some fixed block_size,
* we split it into block_size sized chunks and return all but
* one of them. Then we split what remains into
- * state.alloc_size sized chunks and return all but one.
+ * state.alloc_size sized chunks and return them.
*
- * We choose option (3).
+ * We choose something close to option (3), which is implemented with
+ * anv_state_pool_return_chunk(). That is done by returning the
+ * remaining of the chunk, with alloc_size as a hint of the size that
+ * we want the smaller chunk split into.
*/
- if (chunk_size > pool->block_size &&
- state.alloc_size < pool->block_size) {
- assert(chunk_size % pool->block_size == 0);
- /* We don't want to split giant chunks into tiny chunks. Instead,
- * break anything bigger than a block into block-sized chunks and
- * then break it down into bucket-sized chunks from there. Return
- * all but the first block of the chunk to the block bucket.
- */
- const uint32_t block_bucket =
- anv_state_pool_get_bucket(pool->block_size);
- anv_free_list_push(&pool->buckets[block_bucket].free_list,
- pool->block_pool.map,
- chunk_offset + pool->block_size,
- pool->block_size,
- (chunk_size / pool->block_size) - 1);
- chunk_size = pool->block_size;
- }
-
- assert(chunk_size % state.alloc_size == 0);
- anv_free_list_push(&pool->buckets[bucket].free_list,
- pool->block_pool.map,
- chunk_offset + state.alloc_size,
- state.alloc_size,
- (chunk_size / state.alloc_size) - 1);
-
- state.offset = chunk_offset;
+ anv_state_pool_return_chunk(pool, chunk_offset + alloc_size,
+ chunk_size - alloc_size, alloc_size);
goto done;
}
}
- state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
- &pool->block_pool,
- state.alloc_size,
- pool->block_size);
+ uint32_t padding;
+ offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
+ &pool->block_pool,
+ alloc_size,
+ pool->block_size,
+ &padding);
+ /* Everytime we allocate a new state, add it to the state pool */
+ uint32_t idx;
+ VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ assert(result == VK_SUCCESS);
+
+ state = anv_state_table_get(&pool->table, idx);
+ state->offset = offset;
+ state->alloc_size = alloc_size;
+ state->map = anv_block_pool_map(&pool->block_pool, offset);
+
+ if (padding > 0) {
+ uint32_t return_offset = offset - padding;
+ anv_state_pool_return_chunk(pool, return_offset, padding, 0);
+ }
done:
- state.map = pool->block_pool.map + state.offset;
- return state;
+ return *state;
}
struct anv_state
struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool *pool)
{
- struct anv_state state;
- state.alloc_size = pool->block_size;
+ struct anv_state *state;
+ uint32_t alloc_size = pool->block_size;
- if (anv_free_list_pop(&pool->back_alloc_free_list,
- &pool->block_pool.map, &state.offset)) {
- assert(state.offset < 0);
+ state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
+ if (state) {
+ assert(state->offset < 0);
goto done;
}
- state.offset = anv_block_pool_alloc_back(&pool->block_pool,
- pool->block_size);
+ int32_t offset;
+ offset = anv_block_pool_alloc_back(&pool->block_pool,
+ pool->block_size);
+ uint32_t idx;
+ VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ assert(result == VK_SUCCESS);
+
+ state = anv_state_table_get(&pool->table, idx);
+ state->offset = offset;
+ state->alloc_size = alloc_size;
+ state->map = anv_block_pool_map(&pool->block_pool, state->offset);
done:
- state.map = pool->block_pool.map + state.offset;
- VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
- return state;
+ VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
+ return *state;
}
static void
if (state.offset < 0) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
- pool->block_pool.map, state.offset,
- state.alloc_size, 1);
+ &pool->table, state.idx, 1);
} else {
anv_free_list_push(&pool->buckets[bucket].free_list,
- pool->block_pool.map, state.offset,
- state.alloc_size, 1);
+ &pool->table, state.idx, 1);
}
}
struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
anv_gem_munmap(link_copy.bo.map, link_copy.bo.size);
+ anv_vma_free(pool->device, &link_copy.bo);
anv_gem_close(pool->device, link_copy.bo.gem_handle);
link = link_copy.next;
}
new_bo.flags = pool->bo_flags;
+ if (!anv_vma_alloc(pool->device, &new_bo))
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
assert(new_bo.size == pow2_size);
new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
if (new_bo.map == MAP_FAILED) {
anv_gem_close(pool->device, new_bo.gem_handle);
+ anv_vma_free(pool->device, &new_bo);
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned i = 0; i < 16; i++) {
struct anv_scratch_bo *bo = &pool->bos[i][s];
- if (bo->exists > 0)
+ if (bo->exists > 0) {
+ anv_vma_free(device, &bo->bo);
anv_gem_close(device, bo->bo.gem_handle);
+ }
}
}
}
if (device->instance->physicalDevice.has_exec_async)
bo->bo.flags |= EXEC_OBJECT_ASYNC;
+ if (device->instance->physicalDevice.use_softpin)
+ bo->bo.flags |= EXEC_OBJECT_PINNED;
+
+ anv_vma_alloc(device, &bo->bo);
+
/* Set the exists last because it may be read by other threads */
__sync_synchronize();
bo->exists = true;
VkResult
anv_bo_cache_init(struct anv_bo_cache *cache)
{
- cache->bo_map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ cache->bo_map = _mesa_pointer_hash_table_create(NULL);
if (!cache->bo_map)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
#define ANV_BO_CACHE_SUPPORTED_FLAGS \
(EXEC_OBJECT_WRITE | \
EXEC_OBJECT_ASYNC | \
- EXEC_OBJECT_SUPPORTS_48B_ADDRESS)
+ EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
+ EXEC_OBJECT_PINNED | \
+ ANV_BO_EXTERNAL)
VkResult
anv_bo_cache_alloc(struct anv_device *device,
bo->bo.flags = bo_flags;
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
assert(bo->bo.gem_handle);
pthread_mutex_lock(&cache->mutex);
struct anv_bo **bo_out)
{
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ assert(bo_flags & ANV_BO_EXTERNAL);
pthread_mutex_lock(&cache->mutex);
uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
if (!gem_handle) {
pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
* client has imported a BO twice in different ways and they get what
* they have coming.
*/
- uint64_t new_flags = 0;
+ uint64_t new_flags = ANV_BO_EXTERNAL;
new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_WRITE;
new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_ASYNC;
new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_PINNED;
+
+ /* It's theoretically possible for a BO to get imported such that it's
+ * both pinned and not pinned. The only way this can happen is if it
+ * gets imported as both a semaphore and a memory object and that would
+ * be an application error. Just fail out in that case.
+ */
+ if ((bo->bo.flags & EXEC_OBJECT_PINNED) !=
+ (bo_flags & EXEC_OBJECT_PINNED)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported two different ways");
+ }
+
+ /* It's also theoretically possible that someone could export a BO from
+ * one heap and import it into another or to import the same BO into two
+ * different heaps. If this happens, we could potentially end up both
+ * allowing and disallowing 48-bit addresses. There's not much we can
+ * do about it if we're pinning so we just throw an error and hope no
+ * app is actually that stupid.
+ */
+ if ((new_flags & EXEC_OBJECT_PINNED) &&
+ (bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
+ (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
+ pthread_mutex_unlock(&cache->mutex);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "The same BO was imported on two different heaps");
+ }
bo->bo.flags = new_flags;
if (size == (off_t)-1) {
anv_gem_close(device, gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
anv_bo_init(&bo->bo, gem_handle, size);
bo->bo.flags = bo_flags;
+ if (!anv_vma_alloc(device, &bo->bo)) {
+ anv_gem_close(device, bo->bo.gem_handle);
+ pthread_mutex_unlock(&cache->mutex);
+ vk_free(&device->alloc, bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to allocate virtual address for BO");
+ }
+
_mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
}
assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
+ /* This BO must have been flagged external in order for us to be able
+ * to export it. This is done based on external options passed into
+ * anv_AllocateMemory.
+ */
+ assert(bo->bo.flags & ANV_BO_EXTERNAL);
+
int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
if (fd < 0)
return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
if (bo->bo.map)
anv_gem_munmap(bo->bo.map, bo->bo.size);
+ anv_vma_free(device, &bo->bo);
+
anv_gem_close(device, bo->bo.gem_handle);
/* Don't unlock until we've actually closed the BO. The whole point of