/* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
* We use it to indicate the free list is empty. */
-#define EMPTY 1
-#define EMPTY2 UINT32_MAX
+#define EMPTY UINT32_MAX
#define PAGE_SIZE 4096
}
void
-anv_free_list_push2(union anv_free_list2 *list,
- struct anv_state_table *table,
- uint32_t first, uint32_t count)
+anv_free_list_push(union anv_free_list *list,
+ struct anv_state_table *table,
+ uint32_t first, uint32_t count)
{
- union anv_free_list2 current, old, new;
+ union anv_free_list current, old, new;
uint32_t last = first;
for (uint32_t i = 1; i < count; i++, last++)
}
struct anv_state *
-anv_free_list_pop2(union anv_free_list2 *list,
- struct anv_state_table *table)
+anv_free_list_pop(union anv_free_list *list,
+ struct anv_state_table *table)
{
- union anv_free_list2 current, new, old;
+ union anv_free_list current, new, old;
current.u64 = list->u64;
- while (current.offset != EMPTY2) {
+ while (current.offset != EMPTY) {
__sync_synchronize();
new.offset = table->map[current.offset].next;
new.count = current.count + 1;
return NULL;
}
-static bool
-anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset)
-{
- union anv_free_list current, new, old;
-
- current.u64 = list->u64;
- while (current.offset != EMPTY) {
- /* We have to add a memory barrier here so that the list head (and
- * offset) gets read before we read the map pointer. This way we
- * know that the map pointer is valid for the given offset at the
- * point where we read it.
- */
- __sync_synchronize();
-
- int32_t *next_ptr = *map + current.offset;
- new.offset = VG_NOACCESS_READ(next_ptr);
- new.count = current.count + 1;
- old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
- if (old.u64 == current.u64) {
- *offset = current.offset;
- return true;
- }
- current = old;
- }
-
- return false;
-}
-
-static void
-anv_free_list_push(union anv_free_list *list, void *map, int32_t offset,
- uint32_t size, uint32_t count)
-{
- union anv_free_list current, old, new;
- int32_t *next_ptr = map + offset;
-
- /* If we're returning more than one chunk, we need to build a chain to add
- * to the list. Fortunately, we can do this without any atomics since we
- * own everything in the chain right now. `offset` is left pointing to the
- * head of our chain list while `next_ptr` points to the tail.
- */
- for (uint32_t i = 1; i < count; i++) {
- VG_NOACCESS_WRITE(next_ptr, offset + i * size);
- next_ptr = map + offset + i * size;
- }
-
- old = *list;
- do {
- current = old;
- VG_NOACCESS_WRITE(next_ptr, current.offset);
- new.offset = offset;
- new.count = current.count + 1;
- old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
- } while (old.u64 != current.u64);
-}
-
/* All pointers in the ptr_free_list are assumed to be page-aligned. This
* means that the bottom 12 bits should all be zero.
*/
pool->block_size = block_size;
pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
- pool->buckets[i].free_list = ANV_FREE_LIST2_EMPTY;
+ pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
pool->buckets[i].block.next = 0;
pool->buckets[i].block.end = 0;
}
}
uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
- anv_free_list_push2(&pool->buckets[block_bucket].free_list,
- &pool->table, st_idx, count);
+ anv_free_list_push(&pool->buckets[block_bucket].free_list,
+ &pool->table, st_idx, count);
}
static struct anv_state
int32_t offset;
/* Try free list first. */
- state = anv_free_list_pop2(&pool->buckets[bucket].free_list,
- &pool->table);
+ state = anv_free_list_pop(&pool->buckets[bucket].free_list,
+ &pool->table);
if (state) {
assert(state->offset >= 0);
goto done;
/* Try to grab a chunk from some larger bucket and split it up */
for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
- state = anv_free_list_pop2(&pool->buckets[b].free_list, &pool->table);
+ state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table);
if (state) {
unsigned chunk_size = anv_state_pool_get_bucket_size(b);
int32_t chunk_offset = state->offset;
struct anv_state
anv_state_pool_alloc_back(struct anv_state_pool *pool)
{
- struct anv_state state;
- state.alloc_size = pool->block_size;
+ struct anv_state *state;
+ uint32_t alloc_size = pool->block_size;
- if (anv_free_list_pop(&pool->back_alloc_free_list,
- &pool->block_pool.map, &state.offset)) {
- assert(state.offset < 0);
+ state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
+ if (state) {
+ assert(state->offset < 0);
goto done;
}
- state.offset = anv_block_pool_alloc_back(&pool->block_pool,
- pool->block_size);
+ int32_t offset;
+ offset = anv_block_pool_alloc_back(&pool->block_pool,
+ pool->block_size);
+ uint32_t idx;
+ VkResult result = anv_state_table_add(&pool->table, &idx, 1);
+ assert(result == VK_SUCCESS);
+
+ state = anv_state_table_get(&pool->table, idx);
+ state->offset = offset;
+ state->alloc_size = alloc_size;
+ state->map = pool->block_pool.map + state->offset;
done:
- state.map = pool->block_pool.map + state.offset;
- VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
- return state;
+ VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
+ return *state;
}
static void
if (state.offset < 0) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
- pool->block_pool.map, state.offset,
- state.alloc_size, 1);
+ &pool->table, state.idx, 1);
} else {
- anv_free_list_push2(&pool->buckets[bucket].free_list,
- &pool->table, state.idx, 1);
+ anv_free_list_push(&pool->buckets[bucket].free_list,
+ &pool->table, state.idx, 1);
}
}