pool->block_pool = block_pool;
assert(util_is_power_of_two(block_size));
pool->block_size = block_size;
+ pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
pool->buckets[i].block.next = 0;
return state;
}
+struct anv_state
+anv_state_pool_alloc_back(struct anv_state_pool *pool)
+{
+ struct anv_state state;
+ state.alloc_size = pool->block_size;
+
+ if (anv_free_list_pop(&pool->back_alloc_free_list,
+ &pool->block_pool->map, &state.offset)) {
+ assert(state.offset < 0);
+ goto done;
+ }
+
+ state.offset = anv_block_pool_alloc_back(pool->block_pool, pool->block_size);
+
+done:
+ state.map = pool->block_pool->map + state.offset;
+ VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
+ return state;
+}
+
static void
anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
{
size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
- anv_free_list_push(&pool->buckets[bucket].free_list,
- pool->block_pool->map, state.offset);
+ if (state.offset < 0) {
+ assert(state.alloc_size == pool->block_size);
+ anv_free_list_push(&pool->back_alloc_free_list,
+ pool->block_pool->map, state.offset);
+ } else {
+ anv_free_list_push(&pool->buckets[bucket].free_list,
+ pool->block_pool->map, state.offset);
+ }
}
void
/* The size of blocks which will be allocated from the block pool */
uint32_t block_size;
+ /** Free list for "back" allocations */
+ union anv_free_list back_alloc_free_list;
+
struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
};
void anv_state_pool_finish(struct anv_state_pool *pool);
struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
uint32_t state_size, uint32_t alignment);
+struct anv_state anv_state_pool_alloc_back(struct anv_state_pool *pool);
void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
void anv_state_stream_init(struct anv_state_stream *stream,
struct anv_state_pool *state_pool,