{
anv_block_pool_foreach_bo(bo, pool) {
if (bo->map)
- anv_gem_munmap(bo->map, bo->size);
+ anv_gem_munmap(pool->device, bo->map, bo->size);
anv_gem_close(pool->device, bo->gem_handle);
}
VkResult
anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
- uint64_t start_address,
+ uint64_t base_address,
+ int32_t start_offset,
uint32_t block_size)
{
+ /* We don't want to ever see signed overflow */
+ assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
+
VkResult result = anv_block_pool_init(&pool->block_pool, device,
- start_address,
+ base_address + start_offset,
block_size * 16);
if (result != VK_SUCCESS)
return result;
+ pool->start_offset = start_offset;
+
result = anv_state_table_init(&pool->table, device, 64);
if (result != VK_SUCCESS) {
anv_block_pool_finish(&pool->block_pool);
struct anv_state *state_i = anv_state_table_get(&pool->table,
st_idx + i);
state_i->alloc_size = block_size;
- state_i->offset = chunk_offset + block_size * i;
+ state_i->offset = pool->start_offset + chunk_offset + block_size * i;
state_i->map = anv_block_pool_map(&pool->block_pool,
state_i->offset,
state_i->alloc_size);
state = anv_free_list_pop(&pool->buckets[bucket].free_list,
&pool->table);
if (state) {
- assert(state->offset >= 0);
+ assert(state->offset >= pool->start_offset);
goto done;
}
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
- state->offset = offset;
+ state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
struct anv_state *state;
uint32_t alloc_size = pool->block_size;
+ /* This function is only used with pools where start_offset == 0 */
+ assert(pool->start_offset == 0);
+
state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
if (state) {
- assert(state->offset < 0);
+ assert(state->offset < pool->start_offset);
goto done;
}
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
- state->offset = offset;
+ state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
assert(util_is_power_of_two_or_zero(state.alloc_size));
unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
- if (state.offset < 0) {
+ if (state.offset < pool->start_offset) {
assert(state.alloc_size == pool->block_size);
anv_free_list_push(&pool->back_alloc_free_list,
&pool->table, state.idx, 1);
return state;
}
+void
+anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
+ struct anv_state_pool *parent,
+ uint32_t count, uint32_t size, uint32_t alignment)
+{
+ pool->pool = parent;
+ pool->reserved_blocks = ANV_FREE_LIST_EMPTY;
+ pool->count = count;
+
+ for (unsigned i = 0; i < count; i++) {
+ struct anv_state state = anv_state_pool_alloc(pool->pool, size, alignment);
+ anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
+ }
+}
+
+void
+anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool)
+{
+ struct anv_state *state;
+
+ while ((state = anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table))) {
+ anv_state_pool_free(pool->pool, *state);
+ pool->count--;
+ }
+ assert(pool->count == 0);
+}
+
+struct anv_state
+anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool)
+{
+ return *anv_free_list_pop(&pool->reserved_blocks, &pool->pool->table);
+}
+
+void
+anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
+ struct anv_state state)
+{
+ anv_free_list_push(&pool->reserved_blocks, &pool->pool->table, state.idx, 1);
+}
+
void
anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device)
{
align, alloc_flags, explicit_address);
if (new_bo.offset == 0) {
if (new_bo.map)
- anv_gem_munmap(new_bo.map, size);
+ anv_gem_munmap(device, new_bo.map, size);
anv_gem_close(device, new_bo.gem_handle);
return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
assert(bo->refcount == 0);
if (bo->map && !bo->from_host_ptr)
- anv_gem_munmap(bo->map, bo->size);
+ anv_gem_munmap(device, bo->map, bo->size);
if (bo->_ccs_size > 0) {
assert(device->physical->has_implicit_ccs);