anv_block_pool_grow(pool);
}
-/** Initializes a block pool that is a slave of another
- *
- * The newly initialized pool is not a block pool on its own but it rather
- * takes a fixed number of blocks from the master pool and hands them out.
- * In some sense, it's nothing more than a glorified free list. However,
- * since it is a block pool, it can be used to back a pool or stream.
- */
-void
-anv_block_pool_init_slave(struct anv_block_pool *pool,
- struct anv_block_pool *master_pool,
- uint32_t num_blocks)
-{
- pool->device = NULL;
-
- /* We don't have backing storage */
- pool->bo.gem_handle = 0;
- pool->bo.offset = 0;
- pool->size = 0;
- pool->next_block = 0;
-
- pool->block_size = master_pool->block_size;
- pool->free_list = ANV_FREE_LIST_EMPTY;
- anv_vector_init(&pool->mmap_cleanups,
- round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128);
-
- /* Pull N blocks off the master pool and put them on this pool */
- for (uint32_t i = 0; i < num_blocks; i++) {
- uint32_t block = anv_block_pool_alloc(master_pool);
- pool->map = master_pool->map;
- anv_block_pool_free(pool, block);
- }
-}
-
/* The memfd path lets us create a map for an fd and lets us grow and remap
* without copying. It breaks valgrind however, so we have a MAP_ANONYMOUS
* path we can take for valgrind debugging. */
int gem_handle;
struct anv_mmap_cleanup *cleanup;
- /* If we don't have a device then we can't resize the pool. This can be
- * the case if the pool is a slave pool.
- */
- if (pool->device == NULL)
- return -1;
-
if (pool->size == 0) {
size = 32 * pool->block_size;
} else {
uint32_t block, next_block;
block = stream->current_block;
- while (block != 1) {
+ while (block != NULL_BLOCK) {
sb = stream->block_pool->map + block;
next_block = VG_NOACCESS_READ(&sb->next);
VG(VALGRIND_FREELIKE_BLOCK(VG_NOACCESS_READ(&sb->_vg_ptr), 0));
if (vg_ptr == NULL) {
vg_ptr = state.map;
VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
- VG(VALGRIND_MALLOCLIKE_BLOCK(vg_ptr, size, 0, false));
+ VALGRIND_MALLOCLIKE_BLOCK(vg_ptr, size, 0, false);
} else {
ptrdiff_t vg_offset = vg_ptr - current_map;
assert(vg_offset >= stream->current_block &&
struct bo_pool_bo_link *link = PFL_PTR(pool->free_list);
while (link != NULL) {
struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
+
+ /* The anv_gem_m[un]map() functions are also valgrind-safe so they
+ * act as an alloc/free. In order to avoid a double-free warning, we
+ * need to mark thiss as malloc'd before we unmap it.
+ */
+ VG(VALGRIND_MALLOCLIKE_BLOCK(link_copy.bo.map, pool->bo_size, 0, false));
+
anv_gem_munmap(link_copy.bo.map, pool->bo_size);
anv_gem_close(pool->device, link_copy.bo.gem_handle);
link = link_copy.next;
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
- VG(VALGRIND_MALLOCLIKE_BLOCK(new_bo.map, pool->bo_size, 0, false));
+ /* We don't need to call VALGRIND_MALLOCLIKE_BLOCK here because gem_mmap
+ * calls it for us. If we really want to be pedantic we could do a
+ * VALGRIND_FREELIKE_BLOCK right after the mmap, but there's no good
+ * reason.
+ */
*bo = new_bo;
return VK_SUCCESS;